Merge tag 'lsk-v4.4-17.02-android' into branch 'msm-4.4'

* refs/heads/tmp-26c8156:
  Linux 4.4.49
  drm/i915: fix use-after-free in page_flip_completed()
  ALSA: seq: Don't handle loop timeout at snd_seq_pool_done()
  ALSA: seq: Fix race at creating a queue
  xen-netfront: Delete rx_refill_timer in xennet_disconnect_backend()
  scsi: mpt3sas: disable ASPM for MPI2 controllers
  scsi: aacraid: Fix INTx/MSI-x issue with older controllers
  scsi: zfcp: fix use-after-free by not tracing WKA port open/close on failed send
  netvsc: Set maximum GSO size in the right place
  mac80211: Fix adding of mesh vendor IEs
  ARM: 8642/1: LPAE: catch pending imprecise abort on unmask
  target: Fix COMPARE_AND_WRITE ref leak for non GOOD status
  target: Fix early transport_generic_handle_tmr abort scenario
  target: Use correct SCSI status during EXTENDED_COPY exception
  target: Don't BUG_ON during NodeACL dynamic -> explicit conversion
  ARM: 8643/3: arm/ptrace: Preserve previous registers for short regset write
  hns: avoid stack overflow with CONFIG_KASAN
  cpumask: use nr_cpumask_bits for parsing functions
  Revert "x86/ioapic: Restore IO-APIC irq_chip retrigger callback"
  selinux: fix off-by-one in setprocattr
  ARC: [arcompact] brown paper bag bug in unaligned access delay slot fixup
  Linux 4.4.48
  base/memory, hotplug: fix a kernel oops in show_valid_zones()
  x86/irq: Make irq activate operations symmetric
  USB: serial: option: add device ID for HP lt2523 (Novatel E371)
  usb: gadget: f_fs: Assorted buffer overflow checks.
  USB: Add quirk for WORLDE easykey.25 MIDI keyboard
  USB: serial: pl2303: add ATEN device ID
  USB: serial: qcserial: add Dell DW5570 QDL
  KVM: x86: do not save guest-unsupported XSAVE state
  HID: wacom: Fix poor prox handling in 'wacom_pl_irq'
  percpu-refcount: fix reference leak during percpu-atomic transition
  mmc: sdhci: Ignore unexpected CARD_INT interrupts
  can: bcm: fix hrtimer/tasklet termination in bcm op removal
  mm, fs: check for fatal signals in do_generic_file_read()
  mm/memory_hotplug.c: check start_pfn in test_pages_in_a_zone()
  cifs: initialize file_info_lock
  zswap: disable changing params if init fails
  svcrpc: fix oops in absence of krb5 module
  NFSD: Fix a null reference case in find_or_create_lock_stateid()
  powerpc: Add missing error check to prom_find_boot_cpu()
  powerpc/eeh: Fix wrong flag passed to eeh_unfreeze_pe()
  libata: apply MAX_SEC_1024 to all CX1-JB*-HP devices
  ata: sata_mv:- Handle return value of devm_ioremap.
  perf/core: Fix PERF_RECORD_MMAP2 prot/flags for anonymous memory
  crypto: arm64/aes-blk - honour iv_out requirement in CBC and CTR modes
  crypto: api - Clear CRYPTO_ALG_DEAD bit before registering an alg
  drm/nouveau/nv1a,nv1f/disp: fix memory clock rate retrieval
  drm/nouveau/disp/gt215: Fix HDA ELD handling (thus, HDMI audio) on gt215
  ext4: validate s_first_meta_bg at mount time
  PCI/ASPM: Handle PCI-to-PCIe bridges as roots of PCIe hierarchies
  ANDROID: security: export security_path_chown()
  Linux 4.4.47
  net: dsa: Bring back device detaching in dsa_slave_suspend()
  qmi_wwan/cdc_ether: add device ID for HP lt2523 (Novatel E371) WWAN card
  af_unix: move unix_mknod() out of bindlock
  r8152: don't execute runtime suspend if the tx is not empty
  bridge: netlink: call br_changelink() during br_dev_newlink()
  tcp: initialize max window for a new fastopen socket
  ipv6: addrconf: Avoid addrconf_disable_change() using RCU read-side lock
  net: phy: bcm63xx: Utilize correct config_intr function
  net: fix harmonize_features() vs NETIF_F_HIGHDMA
  ax25: Fix segfault after sock connection timeout
  ravb: do not use zero-length alignment DMA descriptor
  openvswitch: maintain correct checksum state in conntrack actions
  tcp: fix tcp_fastopen unaligned access complaints on sparc
  net: systemport: Decouple flow control from __bcm_sysport_tx_reclaim
  net: ipv4: fix table id in getroute response
  net: lwtunnel: Handle lwtunnel_fill_encap failure
  mlxsw: pci: Fix EQE structure definition
  mlxsw: switchx2: Fix memory leak at skb reallocation
  mlxsw: spectrum: Fix memory leak at skb reallocation
  r8152: fix the sw rx checksum is unavailable
  ANDROID: sdcardfs: Switch strcasecmp for internal call
  ANDROID: sdcardfs: switch to full_name_hash and qstr
  ANDROID: sdcardfs: Add GID Derivation to sdcardfs
  ANDROID: sdcardfs: Remove redundant operation
  ANDROID: sdcardfs: add support for user permission isolation
  ANDROID: sdcardfs: Refactor configfs interface
  ANDROID: sdcardfs: Allow non-owners to touch
  ANDROID: binder: fix format specifier for type binder_size_t
  ANDROID: fs: Export vfs_rmdir2
  ANDROID: fs: Export free_fs_struct and set_fs_pwd
  ANDROID: mnt: remount should propagate to slaves of slaves
  ANDROID: sdcardfs: Switch ->d_inode to d_inode()
  ANDROID: sdcardfs: Fix locking issue with permision fix up
  ANDROID: sdcardfs: Change magic value
  ANDROID: sdcardfs: Use per mount permissions
  ANDROID: sdcardfs: Add gid and mask to private mount data
  ANDROID: sdcardfs: User new permission2 functions
  ANDROID: vfs: Add setattr2 for filesystems with per mount permissions
  ANDROID: vfs: Add permission2 for filesystems with per mount permissions
  ANDROID: vfs: Allow filesystems to access their private mount data
  ANDROID: mnt: Add filesystem private data to mount points
  ANDROID: sdcardfs: Move directory unlock before touch
  ANDROID: sdcardfs: fix external storage exporting incorrect uid
  ANDROID: sdcardfs: Added top to sdcardfs_inode_info
  ANDROID: sdcardfs: Switch package list to RCU
  ANDROID: sdcardfs: Fix locking for permission fix up
  ANDROID: sdcardfs: Check for other cases on path lookup
  ANDROID: sdcardfs: override umask on mkdir and create
  Linux 4.4.46
  mm, memcg: do not retry precharge charges
  platform/x86: intel_mid_powerbtn: Set IRQ_ONESHOT
  pinctrl: broxton: Use correct PADCFGLOCK offset
  s5k4ecgx: select CRC32 helper
  IB/umem: Release pid in error and ODP flow
  IB/ipoib: move back IB LL address into the hard header
  drm/i915: Don't leak edid in intel_crt_detect_ddc()
  SUNRPC: cleanup ida information when removing sunrpc module
  NFSv4.0: always send mode in SETATTR after EXCLUSIVE4
  nfs: Don't increment lock sequence ID after NFS4ERR_MOVED
  parisc: Don't use BITS_PER_LONG in userspace-exported swab.h header
  ARC: [arcompact] handle unaligned access delay slot corner case
  ARC: udelay: fix inline assembler by adding LP_COUNT to clobber list
  can: ti_hecc: add missing prepare and unprepare of the clock
  can: c_can_pci: fix null-pointer-deref in c_can_start() - set device pointer
  s390/ptrace: Preserve previous registers for short regset write
  RDMA/cma: Fix unknown symbol when CONFIG_IPV6 is not enabled
  ISDN: eicon: silence misleading array-bounds warning
  sysctl: fix proc_doulongvec_ms_jiffies_minmax()
  mm/mempolicy.c: do not put mempolicy before using its nodemask
  drm: Fix broken VT switch with video=1366x768 option
  tile/ptrace: Preserve previous registers for short regset write
  fbdev: color map copying bounds checking
  Linux 4.4.45
  arm64: avoid returning from bad_mode
  selftest/powerpc: Wrong PMC initialized in pmc56_overflow test
  dmaengine: pl330: Fix runtime PM support for terminated transfers
  ite-cir: initialize use_demodulator before using it
  blackfin: check devm_pinctrl_get() for errors
  ARM: 8613/1: Fix the uaccess crash on PB11MPCore
  ARM: ux500: fix prcmu_is_cpu_in_wfi() calculation
  ARM: dts: imx6qdl-nitrogen6_max: fix sgtl5000 pinctrl init
  arm64/ptrace: Reject attempts to set incomplete hardware breakpoint fields
  arm64/ptrace: Avoid uninitialised struct padding in fpr_set()
  arm64/ptrace: Preserve previous registers for short regset write - 3
  arm64/ptrace: Preserve previous registers for short regset write - 2
  arm64/ptrace: Preserve previous registers for short regset write
  ARM: dts: da850-evm: fix read access to SPI flash
  ceph: fix bad endianness handling in parse_reply_info_extra
  ARM: 8634/1: hw_breakpoint: blacklist Scorpion CPUs
  svcrdma: avoid duplicate dma unmapping during error recovery
  clocksource/exynos_mct: Clear interrupt when cpu is shut down
  ubifs: Fix journal replay wrt. xattr nodes
  qla2xxx: Fix crash due to null pointer access
  x86/ioapic: Restore IO-APIC irq_chip retrigger callback
  mtd: nand: xway: disable module support
  ieee802154: atusb: do not use the stack for buffers to make them DMA able
  mmc: mxs-mmc: Fix additional cycles after transmission stop
  HID: corsair: fix control-transfer error handling
  HID: corsair: fix DMA buffers on stack
  PCI: Enumerate switches below PCI-to-PCIe bridges
  fuse: clear FR_PENDING flag when moving requests out of pending queue
  svcrpc: don't leak contexts on PROC_DESTROY
  x86/PCI: Ignore _CRS on Supermicro X8DTH-i/6/iF/6F
  tmpfs: clear S_ISGID when setting posix ACLs
  ARM: dts: imx31: fix AVIC base address
  ARM: dts: imx31: move CCM device node to AIPS2 bus devices
  ARM: dts: imx31: fix clock control module interrupts description
  perf scripting: Avoid leaking the scripting_context variable
  IB/IPoIB: Remove can't use GFP_NOIO warning
  IB/mlx4: When no DMFS for IPoIB, don't allow NET_IF QPs
  IB/mlx4: Fix port query for 56Gb Ethernet links
  IB/mlx4: Fix out-of-range array index in destroy qp flow
  IB/mlx4: Set traffic class in AH
  IB/mlx5: Wait for all async command completions to complete
  ftrace/x86: Set ftrace_stub to weak to prevent gcc from using short jumps to it
  Linux 4.4.44
  pinctrl: sh-pfc: Do not unconditionally support PIN_CONFIG_BIAS_DISABLE
  powerpc/ibmebus: Fix device reference leaks in sysfs interface
  powerpc/ibmebus: Fix further device reference leaks
  bus: vexpress-config: fix device reference leak
  blk-mq: Always schedule hctx->next_cpu
  ACPI / APEI: Fix NMI notification handling
  block: cfq_cpd_alloc() should use @gfp
  cpufreq: powernv: Disable preemption while checking CPU throttling state
  NFSv4.1: nfs4_fl_prepare_ds must be careful about reporting success.
  NFS: Fix a performance regression in readdir
  pNFS: Fix race in pnfs_wait_on_layoutreturn
  pinctrl: meson: fix gpio request disabling other modes
  btrfs: fix error handling when run_delayed_extent_op fails
  btrfs: fix locking when we put back a delayed ref that's too new
  x86/cpu: Fix bootup crashes by sanitizing the argument of the 'clearcpuid=' command-line option
  USB: serial: ch341: fix modem-control and B0 handling
  USB: serial: ch341: fix resume after reset
  drm/radeon: drop verde dpm quirks
  sysctl: Drop reference added by grab_header in proc_sys_readdir
  sysrq: attach sysrq handler correctly for 32-bit kernel
  tty/serial: atmel_serial: BUG: stop DMA from transmitting in stop_tx
  mnt: Protect the mountpoint hashtable with mount_lock
  vme: Fix wrong pointer utilization in ca91cx42_slave_get
  xhci: fix deadlock at host remove by running watchdog correctly
  i2c: fix kernel memory disclosure in dev interface
  i2c: print correct device invalid address
  Input: elants_i2c - avoid divide by 0 errors on bad touchscreen data
  USB: serial: ch341: fix open and resume after B0
  USB: serial: ch341: fix control-message error handling
  USB: serial: ch341: fix open error handling
  USB: serial: ch341: fix initial modem-control state
  USB: serial: kl5kusb105: fix line-state error handling
  nl80211: fix sched scan netlink socket owner destruction
  KVM: x86: Introduce segmented_write_std
  KVM: x86: emulate FXSAVE and FXRSTOR
  KVM: x86: add asm_safe wrapper
  KVM: x86: add Align16 instruction flag
  KVM: x86: flush pending lapic jump label updates on module unload
  jump_labels: API for flushing deferred jump label updates
  KVM: eventfd: fix NULL deref irqbypass consumer
  KVM: x86: fix emulation of "MOV SS, null selector"
  mm/hugetlb.c: fix reservation race when freeing surplus pages
  ocfs2: fix crash caused by stale lvb with fsdlm plugin
  mm: fix devm_memremap_pages crash, use mem_hotplug_{begin, done}
  selftests: do not require bash for the generated test
  selftests: do not require bash to run netsocktests testcase
  Input: i8042 - add Pegatron touchpad to noloop table
  Input: xpad - use correct product id for x360w controllers
  DEBUG: sched/fair: Fix sched_load_avg_cpu events for task_groups
  DEBUG: sched/fair: Fix missing sched_load_avg_cpu events
  net: socket: don't set sk_uid to garbage value in ->setattr()
  ANDROID: configs: CONFIG_ARM64_SW_TTBR0_PAN=y
  UPSTREAM: arm64: Disable PAN on uaccess_enable()
  UPSTREAM: arm64: Enable CONFIG_ARM64_SW_TTBR0_PAN
  UPSTREAM: arm64: xen: Enable user access before a privcmd hvc call
  UPSTREAM: arm64: Handle faults caused by inadvertent user access with PAN enabled
  BACKPORT: arm64: Disable TTBR0_EL1 during normal kernel execution
  BACKPORT: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1
  BACKPORT: arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro
  BACKPORT: arm64: Factor out PAN enabling/disabling into separate uaccess_* macros
  UPSTREAM: arm64: alternative: add auto-nop infrastructure
  UPSTREAM: arm64: barriers: introduce nops and __nops macros for NOP sequences
  Revert "FROMLIST: arm64: Factor out PAN enabling/disabling into separate uaccess_* macros"
  Revert "FROMLIST: arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro"
  Revert "FROMLIST: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1"
  Revert "FROMLIST: arm64: Disable TTBR0_EL1 during normal kernel execution"
  Revert "FROMLIST: arm64: Handle faults caused by inadvertent user access with PAN enabled"
  Revert "FROMLIST: arm64: xen: Enable user access before a privcmd hvc call"
  Revert "FROMLIST: arm64: Enable CONFIG_ARM64_SW_TTBR0_PAN"
  ANDROID: sched/walt: fix build failure if FAIR_GROUP_SCHED=n
  Linux 4.4.43
  mm/init: fix zone boundary creation
  ALSA: usb-audio: Add a quirk for Plantronics BT600
  spi: mvebu: fix baudrate calculation for armada variant
  ARM: OMAP4+: Fix bad fallthrough for cpuidle
  ARM: zynq: Reserve correct amount of non-DMA RAM
  powerpc: Fix build warning on 32-bit PPC
  ALSA: firewire-tascam: Fix to handle error from initialization of stream data
  HID: hid-cypress: validate length of report
  net: vrf: do not allow table id 0
  net: ipv4: Fix multipath selection with vrf
  gro: Disable frag0 optimization on IPv6 ext headers
  gro: use min_t() in skb_gro_reset_offset()
  gro: Enter slow-path if there is no tailroom
  r8152: fix rx issue for runtime suspend
  r8152: split rtl8152_suspend function
  ipv4: Do not allow MAIN to be alias for new LOCAL w/ custom rules
  igmp: Make igmp group member RFC 3376 compliant
  drop_monitor: consider inserted data in genlmsg_end
  drop_monitor: add missing call to genlmsg_end
  net/mlx5: Avoid shadowing numa_node
  net/mlx5: Check FW limitations on log_max_qp before setting it
  net: stmmac: Fix race between stmmac_drv_probe and stmmac_open
  net, sched: fix soft lockup in tc_classify
  ipv6: handle -EFAULT from skb_copy_bits
  net: vrf: Drop conntrack data after pass through VRF device on Tx
  ser_gigaset: return -ENOMEM on error instead of success
  netvsc: reduce maximum GSO size
  Linux 4.4.42
  usb: gadget: composite: always set ep->mult to a sensible value
  Revert "usb: gadget: composite: always set ep->mult to a sensible value"
  tick/broadcast: Prevent NULL pointer dereference
  drm/radeon: Always store CRTC relative radeon_crtc->cursor_x/y values
  cx23885-dvb: move initialization of a8293_pdata
  net: vxge: avoid unused function warnings
  net: ti: cpmac: Fix compiler warning due to type confusion
  cred/userns: define current_user_ns() as a function
  staging: comedi: dt282x: tidy up register bit defines
  powerpc/pci/rpadlpar: Fix device reference leaks
  md: MD_RECOVERY_NEEDED is set for mddev->recovery
  crypto: arm64/aes-ce - fix for big endian
  crypto: arm64/aes-xts-ce: fix for big endian
  crypto: arm64/sha1-ce - fix for big endian
  crypto: arm64/aes-neon - fix for big endian
  crypto: arm64/aes-ccm-ce: fix for big endian
  crypto: arm/aes-ce - fix for big endian
  crypto: arm64/ghash-ce - fix for big endian
  crypto: arm64/sha2-ce - fix for big endian
  s390/crypto: unlock on error in prng_tdes_read()
  mmc: mmc_test: Uninitialized return value
  PM / wakeirq: Fix dedicated wakeirq for drivers not using autosuspend
  irqchip/bcm7038-l1: Implement irq_cpu_offline() callback
  target/iscsi: Fix double free in lio_target_tiqn_addtpg()
  scsi: mvsas: fix command_active typo
  ASoC: samsung: i2s: Fixup last IRQ unsafe spin lock call
  iommu/vt-d: Flush old iommu caches for kdump when the device gets context mapped
  iommu/vt-d: Fix pasid table size encoding
  iommu/amd: Fix the left value check of cmd buffer
  iommu/amd: Missing error code in amd_iommu_init_device()
  clk: imx31: fix rewritten input argument of mx31_clocks_init()
  clk: clk-wm831x: fix a logic error
  hwmon: (g762) Fix overflows and crash seen when writing limit attributes
  hwmon: (nct7802) Fix overflows seen when writing into limit attributes
  hwmon: (ds620) Fix overflows seen when writing temperature limits
  hwmon: (amc6821) sign extension temperature
  hwmon: (scpi) Fix module autoload
  cris: Only build flash rescue image if CONFIG_ETRAX_AXISFLASHMAP is selected
  ath10k: use the right length of "background"
  stable-fixup: hotplug: fix unused function warning
  usb: dwc3: ep0: explicitly call dwc3_ep0_prepare_one_trb()
  usb: dwc3: ep0: add dwc3_ep0_prepare_one_trb()
  usb: dwc3: gadget: always unmap EP0 requests
  staging: iio: ad7606: fix improper setting of oversampling pins
  mei: bus: fix mei_cldev_enable KDoc
  USB: serial: io_ti: bind to interface after fw download
  USB: phy: am335x-control: fix device and of_node leaks
  ARM: dts: r8a7794: Correct hsusb parent clock
  USB: serial: kl5kusb105: abort on open exception path
  ALSA: usb-audio: Fix bogus error return in snd_usb_create_stream()
  usb: musb: blackfin: add bfin_fifo_offset in bfin_ops
  usb: hub: Move hub_port_disable() to fix warning if PM is disabled
  usb: musb: Fix trying to free already-free IRQ 4
  usb: dwc3: pci: add Intel Gemini Lake PCI ID
  xhci: Fix race related to abort operation
  xhci: Use delayed_work instead of timer for command timeout
  usb: xhci-mem: use passed in GFP flags instead of GFP_KERNEL
  USB: serial: mos7720: fix parallel probe
  USB: serial: mos7720: fix parport use-after-free on probe errors
  USB: serial: mos7720: fix use-after-free on probe errors
  USB: serial: mos7720: fix NULL-deref at open
  USB: serial: mos7840: fix NULL-deref at open
  USB: serial: kobil_sct: fix NULL-deref in write
  USB: serial: cyberjack: fix NULL-deref at open
  USB: serial: oti6858: fix NULL-deref at open
  USB: serial: io_edgeport: fix NULL-deref at open
  USB: serial: ti_usb_3410_5052: fix NULL-deref at open
  USB: serial: garmin_gps: fix memory leak on failed URB submit
  USB: serial: iuu_phoenix: fix NULL-deref at open
  USB: serial: io_ti: fix I/O after disconnect
  USB: serial: io_ti: fix another NULL-deref at open
  USB: serial: io_ti: fix NULL-deref at open
  USB: serial: spcp8x5: fix NULL-deref at open
  USB: serial: keyspan_pda: verify endpoints at probe
  USB: serial: pl2303: fix NULL-deref at open
  USB: serial: quatech2: fix sleep-while-atomic in close
  USB: serial: omninet: fix NULL-derefs at open and disconnect
  usb: xhci: hold lock over xhci_abort_cmd_ring()
  xhci: Handle command completion and timeout race
  usb: host: xhci: Fix possible wild pointer when handling abort command
  usb: xhci: fix return value of xhci_setup_device()
  xhci: free xhci virtual devices with leaf nodes first
  usb: xhci: apply XHCI_PME_STUCK_QUIRK to Intel Apollo Lake
  xhci: workaround for hosts missing CAS bit
  usb: xhci: fix possible wild pointer
  usb: dwc3: core: avoid Overflow events
  usb: gadget: composite: Test get_alt() presence instead of set_alt()
  USB: dummy-hcd: fix bug in stop_activity (handle ep0)
  USB: fix problems with duplicate endpoint addresses
  USB: gadgetfs: fix checks of wTotalLength in config descriptors
  USB: gadgetfs: fix use-after-free bug
  USB: gadgetfs: fix unbounded memory allocation bug
  usb: gadgetfs: restrict upper bound on device configuration size
  usb: storage: unusual_uas: Add JMicron JMS56x to unusual device
  usb: musb: dsps: implement clear_ep_rxintr() callback
  usb: musb: core: add clear_ep_rxintr() to musb_platform_ops
  KVM: MIPS: Flush KVM entry code from icache globally
  KVM: x86: reset MMU on KVM_SET_VCPU_EVENTS
  mac80211: initialize fast-xmit 'info' later
  ARM: davinci: da850: don't add emac clock to lookup table twice
  ALSA: usb-audio: Fix irq/process data synchronization
  ALSA: hda - Apply asus-mode8 fixup to ASUS X71SL
  ALSA: hda - Fix up GPIO for ASUS ROG Ranger
  Linux 4.4.41
  net: mvpp2: fix dma unmapping of TX buffers for fragments
  sg_write()/bsg_write() is not fit to be called under KERNEL_DS
  kconfig/nconf: Fix hang when editing symbol with a long prompt
  target/user: Fix use-after-free of tcmu_cmds if they are expired
  powerpc: Convert cmp to cmpd in idle enter sequence
  powerpc/ps3: Fix system hang with GCC 5 builds
  nfs_write_end(): fix handling of short copies
  libceph: verify authorize reply on connect
  PCI: Check for PME in targeted sleep state
  Input: drv260x - fix input device's parent assignment
  media: solo6x10: fix lockup by avoiding delayed register write
  IB/cma: Fix a race condition in iboe_addr_get_sgid()
  IB/multicast: Check ib_find_pkey() return value
  IPoIB: Avoid reading an uninitialized member variable
  IB/mad: Fix an array index check
  fgraph: Handle a case where a tracer ignores set_graph_notrace
  platform/x86: asus-nb-wmi.c: Add X45U quirk
  ftrace/x86_32: Set ftrace_stub to weak to prevent gcc from using short jumps to it
  kvm: nVMX: Allow L1 to intercept software exceptions (#BP and #OF)
  KVM: PPC: Book3S HV: Don't lose hardware R/C bit updates in H_PROTECT
  KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state
  md/raid5: limit request size according to implementation limits
  sc16is7xx: Drop bogus use of IRQF_ONESHOT
  s390/vmlogrdr: fix IUCV buffer allocation
  firmware: fix usermode helper fallback loading
  ARC: mm: arc700: Don't assume 2 colours for aliasing VIPT dcache
  scsi: avoid a permanent stop of the scsi device's request queue
  scsi: zfcp: fix rport unblock race with LUN recovery
  scsi: zfcp: do not trace pure benign residual HBA responses at default level
  scsi: zfcp: fix use-after-"free" in FC ingress path after TMF
  scsi: megaraid_sas: Do not set MPI2_TYPE_CUDA for JBOD FP path for FW which does not support JBOD sequence map
  scsi: megaraid_sas: For SRIOV enabled firmware, ensure VF driver waits for 30secs before reset
  vt: fix Scroll Lock LED trigger name
  block: protect iterate_bdevs() against concurrent close
  mei: request async autosuspend at the end of enumeration
  drivers/gpu/drm/ast: Fix infinite loop if read fails
  drm/gma500: Add compat ioctl
  drm/radeon: add additional pci revision to dpm workaround
  drm/radeon: Hide the HW cursor while it's out of bounds
  drm/radeon: Also call cursor_move_locked when the cursor size changes
  drm/nouveau/i2c/gk110b,gm10x: use the correct implementation
  drm/nouveau/fifo/gf100-: protect channel preempt with subdev mutex
  drm/nouveau/ltc: protect clearing of comptags with mutex
  drm/nouveau/bios: require checksum to match for fast acpi shadow method
  drm/nouveau/kms: lvds panel strap moved again on maxwell
  ACPI / video: Add force_native quirk for HP Pavilion dv6
  ACPI / video: Add force_native quirk for Dell XPS 17 L702X
  staging: comedi: ni_mio_common: fix E series ni_ai_insn_read() data
  staging: comedi: ni_mio_common: fix M Series ni_ai_insn_read() data mask
  thermal: hwmon: Properly report critical temperature in sysfs
  clk: bcm2835: Avoid overwriting the div info when disabling a pll_div clk
  timekeeping_Force_unsigned_clocksource_to_nanoseconds_conversion
  regulator: stw481x-vmmc: fix ages old enable error
  mmc: sdhci: Fix recovery from tuning timeout
  ath9k: Really fix LED polarity for some Mini PCI AR9220 MB92 cards.
  cfg80211/mac80211: fix BSS leaks when abandoning assoc attempts
  rtlwifi: Fix enter/exit power_save
  ssb: Fix error routine when fallback SPROM fails
  Linux 4.4.40
  ppp: defer netns reference release for ppp channel
  driver core: fix race between creating/querying glue dir and its cleanup
  xfs: set AGI buffer type in xlog_recover_clear_agi_bucket
  arm/xen: Use alloc_percpu rather than __alloc_percpu
  xen/gntdev: Use VM_MIXEDMAP instead of VM_IO to avoid NUMA balancing
  tpm xen: Remove bogus tpm_chip_unregister
  kernel/debug/debug_core.c: more properly delay for secondary CPUs
  kernel/watchdog: use nmi registers snapshot in hardlockup handler
  CIFS: Fix a possible memory corruption in push locks
  CIFS: Fix missing nls unload in smb2_reconnect()
  CIFS: Fix a possible memory corruption during reconnect
  ASoC: intel: Fix crash at suspend/resume without card registration
  dm space map metadata: fix 'struct sm_metadata' leak on failed create
  dm crypt: mark key as invalid until properly loaded
  dm flakey: return -EINVAL on interval bounds error in flakey_ctr()
  blk-mq: Do not invoke .queue_rq() for a stopped queue
  usb: gadget: composite: always set ep->mult to a sensible value
  exec: Ensure mm->user_ns contains the execed files
  fs: exec: apply CLOEXEC before changing dumpable task flags
  mm/vmscan.c: set correct defer count for shrinker
  loop: return proper error from loop_queue_rq()
  f2fs: set ->owner for debugfs status file's file_operations
  ext4: do not perform data journaling when data is encrypted
  ext4: return -ENOMEM instead of success
  ext4: reject inodes with negative size
  ext4: add sanity checking to count_overhead()
  ext4: fix in-superblock mount options processing
  ext4: use more strict checks for inodes_per_block on mount
  ext4: fix stack memory corruption with 64k block size
  ext4: fix mballoc breakage with 64k block size
  crypto: caam - fix AEAD givenc descriptors
  ptrace: Capture the ptracer's creds not PT_PTRACE_CAP
  mm: Add a user_ns owner to mm_struct and fix ptrace permission checks
  block_dev: don't test bdev->bd_contains when it is not stable
  btrfs: make file clone aware of fatal signals
  Btrfs: don't BUG() during drop snapshot
  Btrfs: fix memory leak in do_walk_down
  Btrfs: don't leak reloc root nodes on error
  Btrfs: return gracefully from balance if fs tree is corrupted
  Btrfs: bail out if block group has different mixed flag
  Btrfs: fix memory leak in reading btree blocks
  clk: ti: omap36xx: Work around sprz319 advisory 2.1
  ALSA: hda: when comparing pin configurations, ignore assoc in addition to seq
  ALSA: hda - Gate the mic jack on HP Z1 Gen3 AiO
  ALSA: hda - fix headset-mic problem on a Dell laptop
  ALSA: hda - ignore the assoc and seq when comparing pin configurations
  ALSA: hda/ca0132 - Add quirk for Alienware 15 R2 2016
  ALSA: hiface: Fix M2Tech hiFace driver sampling rate change
  ALSA: usb-audio: Add QuickCam Communicate Deluxe/S7500 to volume_control_quirks
  USB: UHCI: report non-PME wakeup signalling for Intel hardware
  usb: gadget: composite: correctly initialize ep->maxpacket
  usb: gadget: f_uac2: fix error handling at afunc_bind
  usb: hub: Fix auto-remount of safely removed or ejected USB-3 devices
  USB: cdc-acm: add device id for GW Instek AFG-125
  USB: serial: kl5kusb105: fix open error path
  USB: serial: option: add dlink dwm-158
  USB: serial: option: add support for Telit LE922A PIDs 0x1040, 0x1041
  Btrfs: fix qgroup rescan worker initialization
  btrfs: store and load values of stripes_min/stripes_max in balance status item
  Btrfs: fix tree search logic when replaying directory entry deletes
  btrfs: limit async_work allocation and worker func duration
  ANDROID: trace: net: use %pK for kernel pointers
  ANDROID: android-base: Enable QUOTA related configs
  net: ipv4: Don't crash if passing a null sk to ip_rt_update_pmtu.
  net: inet: Support UID-based routing in IP protocols.
  Revert "net: ipv6: fix virtual tunneling build"
  net: core: add UID to flows, rules, and routes
  net: core: Add a UID field to struct sock.
  Revert "net: core: Support UID-based routing."
  Revert "net: core: Handle 'sk' being NULL in UID-based routing"
  Revert "ANDROID: net: fix 'const' warnings"
  Revert "ANDROID: net: fib: remove duplicate assignment"
  Revert "ANDROID: net: core: fix UID-based routing"
  UPSTREAM: efi/arm64: Don't apply MEMBLOCK_NOMAP to UEFI memory map mapping
  UPSTREAM: arm64: enable CONFIG_DEBUG_RODATA by default
  goldfish: enable CONFIG_INET_DIAG_DESTROY
  sched/walt: kill {min,max}_capacity
  sched: fix wrong truncation of walt_avg
  ANDROID: dm verity: add minimum prefetch size
  Linux 4.4.39
  crypto: rsa - Add Makefile dependencies to fix parallel builds
  hotplug: Make register and unregister notifier API symmetric
  batman-adv: Check for alloc errors when preparing TT local data
  m68k: Fix ndelay() macro
  arm64: futex.h: Add missing PAN toggling
  can: peak: fix bad memory access and free sequence
  can: raw: raw_setsockopt: limit number of can_filter that can be set
  crypto: mcryptd - Check mcryptd algorithm compatibility
  perf/x86: Fix full width counter, counter overflow
  locking/rtmutex: Use READ_ONCE() in rt_mutex_owner()
  locking/rtmutex: Prevent dequeue vs. unlock race
  zram: restrict add/remove attributes to root only
  parisc: Fix TLB related boot crash on SMP machines
  parisc: Remove unnecessary TLB purges from flush_dcache_page_asm and flush_icache_page_asm
  parisc: Purge TLB before setting PTE
  powerpc/eeh: Fix deadlock when PE frozen state can't be cleared

Conflicts:
	arch/arm64/kernel/traps.c
	drivers/usb/dwc3/core.h
	drivers/usb/dwc3/ep0.c
	drivers/usb/gadget/function/f_fs.c
	drivers/usb/host/xhci-mem.c
	drivers/usb/host/xhci-ring.c
	drivers/usb/host/xhci.c
	drivers/video/fbdev/core/fbcmap.c
	include/trace/events/sched.h
	mm/vmscan.c

Change-Id: I3faa0010ecb98972cd8e6470377a493b56d95f89
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
Signed-off-by: Runmin Wang <runminw@codeaurora.org>
This commit is contained in:
Runmin Wang 2017-03-16 14:44:48 -07:00
commit 78cbd38fd5
532 changed files with 5995 additions and 2385 deletions

View file

@ -77,7 +77,7 @@ Examples:
clks: ccm@53f80000{
compatible = "fsl,imx31-ccm";
reg = <0x53f80000 0x4000>;
interrupts = <0 31 0x04 0 53 0x04>;
interrupts = <31>, <53>;
#clock-cells = <1>;
};

View file

@ -1991,6 +1991,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TM_VSCR | 32
PPC | KVM_REG_PPC_TM_DSCR | 64
PPC | KVM_REG_PPC_TM_TAR | 64
PPC | KVM_REG_PPC_TM_XER | 64
| |
MIPS | KVM_REG_MIPS_R0 | 64
...

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 38
SUBLEVEL = 49
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -139,7 +139,11 @@ CONFIG_PPP_DEFLATE=y
CONFIG_PPP_MPPE=y
CONFIG_PREEMPT=y
CONFIG_PROFILING=y
CONFIG_QFMT_V2=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QUOTA_TREE=y
CONFIG_QUOTACTL=y
CONFIG_RANDOMIZE_BASE=y
CONFIG_RTC_CLASS=y
CONFIG_RT_GROUP_SCHED=y

View file

@ -8,6 +8,7 @@
# CONFIG_VT is not set
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ARM_KERNMEM_PERMS=y
CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y

View file

@ -85,6 +85,10 @@ void flush_anon_page(struct vm_area_struct *vma,
*/
#define PG_dc_clean PG_arch_1
#define CACHE_COLORS_NUM 4
#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
/*
* Simple wrapper over config option
* Bootup code ensures that hardware matches kernel configuration
@ -94,8 +98,6 @@ static inline int cache_is_vipt_aliasing(void)
return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
}
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
/*
* checks if two addresses (after page aligning) index into same cache set
*/

View file

@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
" lp 1f \n"
" nop \n"
"1: \n"
: : "r"(loops));
:
: "r"(loops)
: "lp_count");
}
extern void __bad_udelay(void);

View file

@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
if (state.fault)
goto fault;
/* clear any remanants of delay slot */
if (delay_mode(regs)) {
regs->ret = regs->bta;
regs->ret = regs->bta & ~1U;
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;

View file

@ -960,11 +960,16 @@ void arc_cache_init(void)
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
if (is_isa_arcompact()) {
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
if (dc->alias && !handled)
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
else if (!dc->alias && handled)
if (dc->alias) {
if (!handled)
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
if (CACHE_COLORS_NUM != num_colors)
panic("CACHE_COLORS_NUM not optimized for config\n");
} else if (!dc->alias && handled) {
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
}
}
}

View file

@ -85,6 +85,7 @@
#size-cells = <1>;
compatible = "m25p64";
spi-max-frequency = <30000000>;
m25p,fast-read;
reg = <0>;
partition@0 {
label = "U-Boot-SPL";

View file

@ -30,11 +30,11 @@
};
};
avic: avic-interrupt-controller@60000000 {
avic: interrupt-controller@68000000 {
compatible = "fsl,imx31-avic", "fsl,avic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x60000000 0x100000>;
reg = <0x68000000 0x100000>;
};
soc {
@ -110,13 +110,6 @@
interrupts = <19>;
clocks = <&clks 25>;
};
clks: ccm@53f80000{
compatible = "fsl,imx31-ccm";
reg = <0x53f80000 0x4000>;
interrupts = <0 31 0x04 0 53 0x04>;
#clock-cells = <1>;
};
};
aips@53f00000 { /* AIPS2 */
@ -126,6 +119,13 @@
reg = <0x53f00000 0x100000>;
ranges;
clks: ccm@53f80000{
compatible = "fsl,imx31-ccm";
reg = <0x53f80000 0x4000>;
interrupts = <31>, <53>;
#clock-cells = <1>;
};
gpt: timer@53f90000 {
compatible = "fsl,imx31-gpt";
reg = <0x53f90000 0x4000>;

View file

@ -319,8 +319,6 @@
compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
"fsl,imx-audio-sgtl5000";
model = "imx6q-nitrogen6_max-sgtl5000";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sgtl5000>;
ssi-controller = <&ssi1>;
audio-codec = <&codec>;
audio-routing =
@ -401,6 +399,8 @@
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sgtl5000>;
reg = <0x0a>;
clocks = <&clks 201>;
VDDA-supply = <&reg_2p5v>;

View file

@ -1023,7 +1023,7 @@
mstp7_clks: mstp7_clks@e615014c {
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
clocks = <&mp_clk>, <&mp_clk>,
clocks = <&mp_clk>, <&hp_clk>,
<&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>,
<&zs_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>;
#clock-cells = <1>;

View file

@ -48,6 +48,7 @@ CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y

View file

@ -87,8 +87,13 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords;
#ifndef CONFIG_CPU_BIG_ENDIAN
rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
rko[0] = rko[0] ^ rki[0] ^ rcon[i];
#else
rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8);
rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24);
#endif
rko[1] = rko[0] ^ rki[1];
rko[2] = rko[1] ^ rki[2];
rko[3] = rko[2] ^ rki[3];

View file

@ -81,6 +81,9 @@
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
/* Qualcomm implemented cores */
#define ARM_CPU_PART_SCORPION 0x510002d0
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15

View file

@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void)
return 0;
}
/*
* Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
* whenever a WFI is issued, even if the core is not powered down, in
* violation of the architecture. When DBGPRSR.SPD is set, accesses to
* breakpoint and watchpoint registers are treated as undefined, so
* this results in boot time and runtime failures when these are
* accessed and we unexpectedly take a trap.
*
* It's not clear if/how this can be worked around, so we blacklist
* Scorpion CPUs to avoid these issues.
*/
if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
return 0;
}
has_ossr = core_has_os_save_restore();
/* Determine how many BRPs/WRPs are available. */

View file

@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct pt_regs newregs;
struct pt_regs newregs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&newregs,

View file

@ -9,6 +9,7 @@
*/
#include <linux/preempt.h>
#include <linux/smp.h>
#include <linux/uaccess.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
static inline void ipi_flush_tlb_page(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_page(void *arg)
@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
static inline void ipi_flush_tlb_range(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_range(void *arg)

View file

@ -298,6 +298,16 @@ static struct clk emac_clk = {
.gpsc = 1,
};
/*
* In order to avoid adding the emac_clk to the clock lookup table twice (and
* screwing up the linked list in the process) create a separate clock for
* mdio inheriting the rate from emac_clk.
*/
static struct clk mdio_clk = {
.name = "mdio",
.parent = &emac_clk,
};
static struct clk mcasp_clk = {
.name = "mcasp",
.parent = &pll0_sysclk2,
@ -462,7 +472,7 @@ static struct clk_lookup da850_clks[] = {
CLK(NULL, "arm", &arm_clk),
CLK(NULL, "rmii", &rmii_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("davinci_mdio.0", "fck", &emac_clk),
CLK("davinci_mdio.0", "fck", &mdio_clk),
CLK("davinci-mcasp.0", NULL, &mcasp_clk),
CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
CLK("da830-mmc.0", NULL, &mmcsd0_clk),

View file

@ -243,10 +243,9 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
save_state = 1;
break;
case PWRDM_POWER_RET:
if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) {
if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
save_state = 0;
break;
}
break;
default:
/*
* CPUx CSWR is invalid hardware state. Also CPUx OSWR

View file

@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
*/
bool prcmu_is_cpu_in_wfi(int cpu)
{
return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
PRCM_ARM_WFI_STANDBY_WFI0;
return readl(PRCM_ARM_WFI_STANDBY) &
(cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
}
/*

View file

@ -59,7 +59,7 @@ void __iomem *zynq_scu_base;
static void __init zynq_memory_init(void)
{
if (!__pa(PAGE_OFFSET))
memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir));
memblock_reserve(__pa(PAGE_OFFSET), 0x80000);
}
static struct platform_device zynq_cpuidle_device = {

View file

@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
void __init early_abt_enable(void)
{
fsr_info[22].fn = early_abort_handler;
fsr_info[FSR_FS_AEA].fn = early_abort_handler;
local_abt_enable();
fsr_info[22].fn = do_bad;
fsr_info[FSR_FS_AEA].fn = do_bad;
}
#ifndef CONFIG_ARM_LPAE

View file

@ -11,11 +11,15 @@
#define FSR_FS5_0 (0x3f)
#ifdef CONFIG_ARM_LPAE
#define FSR_FS_AEA 17
static inline int fsr_fs(unsigned int fsr)
{
return fsr & FSR_FS5_0;
}
#else
#define FSR_FS_AEA 22
static inline int fsr_fs(unsigned int fsr)
{
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;

View file

@ -239,8 +239,7 @@ static int __init xen_guest_init(void)
* for secondary CPUs as they are brought up.
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/
xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
sizeof(struct vcpu_info));
xen_vcpu_info = alloc_percpu(struct vcpu_info);
if (xen_vcpu_info == NULL)
return -ENOMEM;

View file

@ -812,7 +812,7 @@ config SETEND_EMULATION
endif
config ARM64_SW_TTBR0_PAN
bool "Emulate Priviledged Access Never using TTBR0_EL1 switching"
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved

View file

@ -64,13 +64,13 @@ config DEBUG_SET_MODULE_RONX
config DEBUG_RODATA
bool "Make kernel text and rodata read-only"
default y
help
If this is set, kernel text and rodata will be made read-only. This
is to help catch accidental or malicious attempts to change the
kernel's executable code. Additionally splits rodata from kernel
text so it can be made explicitly non-executable.
kernel's executable code.
If in doubt, say Y
If in doubt, say Y
config DEBUG_ALIGN_RODATA
depends on DEBUG_RODATA

View file

@ -579,7 +579,6 @@ CONFIG_TIMER_STATS=y
CONFIG_IPC_LOGGING=y
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_PFK=y
CONFIG_SECURITY=y

View file

@ -609,7 +609,6 @@ CONFIG_TIMER_STATS=y
CONFIG_IPC_LOGGING=y
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_EVENT=y

View file

@ -677,7 +677,6 @@ CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_ARM64_PTDUMP=y
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_FREE_PAGES_RDONLY=y
CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
CONFIG_CORESIGHT=y

View file

@ -658,7 +658,6 @@ CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_ARM64_PTDUMP=y
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_FREE_PAGES_RDONLY=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_EVENT=y

View file

@ -50,6 +50,7 @@ CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y

View file

@ -623,7 +623,6 @@ CONFIG_TIMER_STATS=y
CONFIG_IPC_LOGGING=y
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_EVENT=y

View file

@ -688,7 +688,6 @@ CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_ARM64_PTDUMP=y
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_FREE_PAGES_RDONLY=y
CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
CONFIG_CORESIGHT=y

View file

@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.arch armv8-a+crypto
@ -19,7 +20,7 @@
*/
ENTRY(ce_aes_ccm_auth_data)
ldr w8, [x3] /* leftover from prev round? */
ld1 {v0.2d}, [x0] /* load mac */
ld1 {v0.16b}, [x0] /* load mac */
cbz w8, 1f
sub w8, w8, #16
eor v1.16b, v1.16b, v1.16b
@ -31,7 +32,7 @@ ENTRY(ce_aes_ccm_auth_data)
beq 8f /* out of input? */
cbnz w8, 0b
eor v0.16b, v0.16b, v1.16b
1: ld1 {v3.2d}, [x4] /* load first round key */
1: ld1 {v3.16b}, [x4] /* load first round key */
prfm pldl1strm, [x1]
cmp w5, #12 /* which key size? */
add x6, x4, #16
@ -41,17 +42,17 @@ ENTRY(ce_aes_ccm_auth_data)
mov v5.16b, v3.16b
b 4f
2: mov v4.16b, v3.16b
ld1 {v5.2d}, [x6], #16 /* load 2nd round key */
ld1 {v5.16b}, [x6], #16 /* load 2nd round key */
3: aese v0.16b, v4.16b
aesmc v0.16b, v0.16b
4: ld1 {v3.2d}, [x6], #16 /* load next round key */
4: ld1 {v3.16b}, [x6], #16 /* load next round key */
aese v0.16b, v5.16b
aesmc v0.16b, v0.16b
5: ld1 {v4.2d}, [x6], #16 /* load next round key */
5: ld1 {v4.16b}, [x6], #16 /* load next round key */
subs w7, w7, #3
aese v0.16b, v3.16b
aesmc v0.16b, v0.16b
ld1 {v5.2d}, [x6], #16 /* load next round key */
ld1 {v5.16b}, [x6], #16 /* load next round key */
bpl 3b
aese v0.16b, v4.16b
subs w2, w2, #16 /* last data? */
@ -60,7 +61,7 @@ ENTRY(ce_aes_ccm_auth_data)
ld1 {v1.16b}, [x1], #16 /* load next input block */
eor v0.16b, v0.16b, v1.16b /* xor with mac */
bne 1b
6: st1 {v0.2d}, [x0] /* store mac */
6: st1 {v0.16b}, [x0] /* store mac */
beq 10f
adds w2, w2, #16
beq 10f
@ -79,7 +80,7 @@ ENTRY(ce_aes_ccm_auth_data)
adds w7, w7, #1
bne 9b
eor v0.16b, v0.16b, v1.16b
st1 {v0.2d}, [x0]
st1 {v0.16b}, [x0]
10: str w8, [x3]
ret
ENDPROC(ce_aes_ccm_auth_data)
@ -89,27 +90,27 @@ ENDPROC(ce_aes_ccm_auth_data)
* u32 rounds);
*/
ENTRY(ce_aes_ccm_final)
ld1 {v3.2d}, [x2], #16 /* load first round key */
ld1 {v0.2d}, [x0] /* load mac */
ld1 {v3.16b}, [x2], #16 /* load first round key */
ld1 {v0.16b}, [x0] /* load mac */
cmp w3, #12 /* which key size? */
sub w3, w3, #2 /* modified # of rounds */
ld1 {v1.2d}, [x1] /* load 1st ctriv */
ld1 {v1.16b}, [x1] /* load 1st ctriv */
bmi 0f
bne 3f
mov v5.16b, v3.16b
b 2f
0: mov v4.16b, v3.16b
1: ld1 {v5.2d}, [x2], #16 /* load next round key */
1: ld1 {v5.16b}, [x2], #16 /* load next round key */
aese v0.16b, v4.16b
aesmc v0.16b, v0.16b
aese v1.16b, v4.16b
aesmc v1.16b, v1.16b
2: ld1 {v3.2d}, [x2], #16 /* load next round key */
2: ld1 {v3.16b}, [x2], #16 /* load next round key */
aese v0.16b, v5.16b
aesmc v0.16b, v0.16b
aese v1.16b, v5.16b
aesmc v1.16b, v1.16b
3: ld1 {v4.2d}, [x2], #16 /* load next round key */
3: ld1 {v4.16b}, [x2], #16 /* load next round key */
subs w3, w3, #3
aese v0.16b, v3.16b
aesmc v0.16b, v0.16b
@ -120,47 +121,47 @@ ENTRY(ce_aes_ccm_final)
aese v1.16b, v4.16b
/* final round key cancels out */
eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
st1 {v0.2d}, [x0] /* store result */
st1 {v0.16b}, [x0] /* store result */
ret
ENDPROC(ce_aes_ccm_final)
.macro aes_ccm_do_crypt,enc
ldr x8, [x6, #8] /* load lower ctr */
ld1 {v0.2d}, [x5] /* load mac */
rev x8, x8 /* keep swabbed ctr in reg */
ld1 {v0.16b}, [x5] /* load mac */
CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
0: /* outer loop */
ld1 {v1.1d}, [x6] /* load upper ctr */
ld1 {v1.8b}, [x6] /* load upper ctr */
prfm pldl1strm, [x1]
add x8, x8, #1
rev x9, x8
cmp w4, #12 /* which key size? */
sub w7, w4, #2 /* get modified # of rounds */
ins v1.d[1], x9 /* no carry in lower ctr */
ld1 {v3.2d}, [x3] /* load first round key */
ld1 {v3.16b}, [x3] /* load first round key */
add x10, x3, #16
bmi 1f
bne 4f
mov v5.16b, v3.16b
b 3f
1: mov v4.16b, v3.16b
ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
ld1 {v5.16b}, [x10], #16 /* load 2nd round key */
2: /* inner loop: 3 rounds, 2x interleaved */
aese v0.16b, v4.16b
aesmc v0.16b, v0.16b
aese v1.16b, v4.16b
aesmc v1.16b, v1.16b
3: ld1 {v3.2d}, [x10], #16 /* load next round key */
3: ld1 {v3.16b}, [x10], #16 /* load next round key */
aese v0.16b, v5.16b
aesmc v0.16b, v0.16b
aese v1.16b, v5.16b
aesmc v1.16b, v1.16b
4: ld1 {v4.2d}, [x10], #16 /* load next round key */
4: ld1 {v4.16b}, [x10], #16 /* load next round key */
subs w7, w7, #3
aese v0.16b, v3.16b
aesmc v0.16b, v0.16b
aese v1.16b, v3.16b
aesmc v1.16b, v1.16b
ld1 {v5.2d}, [x10], #16 /* load next round key */
ld1 {v5.16b}, [x10], #16 /* load next round key */
bpl 2b
aese v0.16b, v4.16b
aese v1.16b, v4.16b
@ -177,14 +178,14 @@ ENDPROC(ce_aes_ccm_final)
eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
st1 {v1.16b}, [x0], #16 /* write output block */
bne 0b
rev x8, x8
st1 {v0.2d}, [x5] /* store mac */
CPU_LE( rev x8, x8 )
st1 {v0.16b}, [x5] /* store mac */
str x8, [x6, #8] /* store lsb end of ctr (BE) */
5: ret
6: eor v0.16b, v0.16b, v5.16b /* final round mac */
eor v1.16b, v1.16b, v5.16b /* final round enc */
st1 {v0.2d}, [x5] /* store mac */
st1 {v0.16b}, [x5] /* store mac */
add w2, w2, #16 /* process partial tail block */
7: ldrb w9, [x1], #1 /* get 1 byte of input */
umov w6, v1.b[0] /* get top crypted ctr byte */

View file

@ -47,24 +47,24 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
kernel_neon_begin_partial(4);
__asm__(" ld1 {v0.16b}, %[in] ;"
" ld1 {v1.2d}, [%[key]], #16 ;"
" ld1 {v1.16b}, [%[key]], #16 ;"
" cmp %w[rounds], #10 ;"
" bmi 0f ;"
" bne 3f ;"
" mov v3.16b, v1.16b ;"
" b 2f ;"
"0: mov v2.16b, v1.16b ;"
" ld1 {v3.2d}, [%[key]], #16 ;"
" ld1 {v3.16b}, [%[key]], #16 ;"
"1: aese v0.16b, v2.16b ;"
" aesmc v0.16b, v0.16b ;"
"2: ld1 {v1.2d}, [%[key]], #16 ;"
"2: ld1 {v1.16b}, [%[key]], #16 ;"
" aese v0.16b, v3.16b ;"
" aesmc v0.16b, v0.16b ;"
"3: ld1 {v2.2d}, [%[key]], #16 ;"
"3: ld1 {v2.16b}, [%[key]], #16 ;"
" subs %w[rounds], %w[rounds], #3 ;"
" aese v0.16b, v1.16b ;"
" aesmc v0.16b, v0.16b ;"
" ld1 {v3.2d}, [%[key]], #16 ;"
" ld1 {v3.16b}, [%[key]], #16 ;"
" bpl 1b ;"
" aese v0.16b, v2.16b ;"
" eor v0.16b, v0.16b, v3.16b ;"
@ -92,24 +92,24 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
kernel_neon_begin_partial(4);
__asm__(" ld1 {v0.16b}, %[in] ;"
" ld1 {v1.2d}, [%[key]], #16 ;"
" ld1 {v1.16b}, [%[key]], #16 ;"
" cmp %w[rounds], #10 ;"
" bmi 0f ;"
" bne 3f ;"
" mov v3.16b, v1.16b ;"
" b 2f ;"
"0: mov v2.16b, v1.16b ;"
" ld1 {v3.2d}, [%[key]], #16 ;"
" ld1 {v3.16b}, [%[key]], #16 ;"
"1: aesd v0.16b, v2.16b ;"
" aesimc v0.16b, v0.16b ;"
"2: ld1 {v1.2d}, [%[key]], #16 ;"
"2: ld1 {v1.16b}, [%[key]], #16 ;"
" aesd v0.16b, v3.16b ;"
" aesimc v0.16b, v0.16b ;"
"3: ld1 {v2.2d}, [%[key]], #16 ;"
"3: ld1 {v2.16b}, [%[key]], #16 ;"
" subs %w[rounds], %w[rounds], #3 ;"
" aesd v0.16b, v1.16b ;"
" aesimc v0.16b, v0.16b ;"
" ld1 {v3.2d}, [%[key]], #16 ;"
" ld1 {v3.16b}, [%[key]], #16 ;"
" bpl 1b ;"
" aesd v0.16b, v2.16b ;"
" eor v0.16b, v0.16b, v3.16b ;"
@ -173,7 +173,12 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords;
#ifndef CONFIG_CPU_BIG_ENDIAN
rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
#else
rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^
rki[0];
#endif
rko[1] = rko[0] ^ rki[1];
rko[2] = rko[1] ^ rki[2];
rko[3] = rko[2] ^ rki[3];

View file

@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#define AES_ENTRY(func) ENTRY(ce_ ## func)
#define AES_ENDPROC(func) ENDPROC(ce_ ## func)

View file

@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
cbz w6, .Lcbcencloop
ld1 {v0.16b}, [x5] /* get iv */
enc_prepare w3, x2, x5
enc_prepare w3, x2, x6
.Lcbcencloop:
ld1 {v1.16b}, [x1], #16 /* get next pt block */
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
encrypt_block v0, w3, x2, x5, w6
encrypt_block v0, w3, x2, x6, w7
st1 {v0.16b}, [x0], #16
subs w4, w4, #1
bne .Lcbcencloop
st1 {v0.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_encrypt)
@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
cbz w6, .LcbcdecloopNx
ld1 {v7.16b}, [x5] /* get iv */
dec_prepare w3, x2, x5
dec_prepare w3, x2, x6
.LcbcdecloopNx:
#if INTERLEAVE >= 2
@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
.Lcbcdecloop:
ld1 {v1.16b}, [x1], #16 /* get next ct block */
mov v0.16b, v1.16b /* ...and copy to v0 */
decrypt_block v0, w3, x2, x5, w6
decrypt_block v0, w3, x2, x6, w7
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
mov v7.16b, v1.16b /* ct is next iv */
st1 {v0.16b}, [x0], #16
@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
bne .Lcbcdecloop
.Lcbcdecout:
FRAME_POP
st1 {v7.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_decrypt)
@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
AES_ENTRY(aes_ctr_encrypt)
FRAME_PUSH
cbnz w6, .Lctrfirst /* 1st time around? */
umov x5, v4.d[1] /* keep swabbed ctr in reg */
rev x5, x5
#if INTERLEAVE >= 2
cmn w5, w4 /* 32 bit overflow? */
bcs .Lctrinc
add x5, x5, #1 /* increment BE ctr */
b .LctrincNx
#else
b .Lctrinc
#endif
.Lctrfirst:
cbz w6, .Lctrnotfirst /* 1st time around? */
enc_prepare w3, x2, x6
ld1 {v4.16b}, [x5]
umov x5, v4.d[1] /* keep swabbed ctr in reg */
rev x5, x5
.Lctrnotfirst:
umov x8, v4.d[1] /* keep swabbed ctr in reg */
rev x8, x8
#if INTERLEAVE >= 2
cmn w5, w4 /* 32 bit overflow? */
cmn w8, w4 /* 32 bit overflow? */
bcs .Lctrloop
.LctrloopNx:
subs w4, w4, #INTERLEAVE
@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
#if INTERLEAVE == 2
mov v0.8b, v4.8b
mov v1.8b, v4.8b
rev x7, x5
add x5, x5, #1
rev x7, x8
add x8, x8, #1
ins v0.d[1], x7
rev x7, x5
add x5, x5, #1
rev x7, x8
add x8, x8, #1
ins v1.d[1], x7
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
do_encrypt_block2x
@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
st1 {v0.16b-v1.16b}, [x0], #32
#else
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
dup v7.4s, w5
dup v7.4s, w8
mov v0.16b, v4.16b
add v7.4s, v7.4s, v8.4s
mov v1.16b, v4.16b
@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
eor v2.16b, v7.16b, v2.16b
eor v3.16b, v5.16b, v3.16b
st1 {v0.16b-v3.16b}, [x0], #64
add x5, x5, #INTERLEAVE
add x8, x8, #INTERLEAVE
#endif
cbz w4, .LctroutNx
.LctrincNx:
rev x7, x5
rev x7, x8
ins v4.d[1], x7
cbz w4, .Lctrout
b .LctrloopNx
.LctroutNx:
sub x5, x5, #1
rev x7, x5
ins v4.d[1], x7
b .Lctrout
.Lctr1x:
adds w4, w4, #INTERLEAVE
beq .Lctrout
@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
.Lctrloop:
mov v0.16b, v4.16b
encrypt_block v0, w3, x2, x6, w7
adds x8, x8, #1 /* increment BE ctr */
rev x7, x8
ins v4.d[1], x7
bcs .Lctrcarry /* overflow? */
.Lctrcarrydone:
subs w4, w4, #1
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
ld1 {v3.16b}, [x1], #16
eor v3.16b, v0.16b, v3.16b
st1 {v3.16b}, [x0], #16
beq .Lctrout
.Lctrinc:
adds x5, x5, #1 /* increment BE ctr */
rev x7, x5
ins v4.d[1], x7
bcc .Lctrloop /* no overflow? */
bne .Lctrloop
.Lctrout:
st1 {v4.16b}, [x5] /* return next CTR value */
FRAME_POP
ret
.Lctrhalfblock:
ld1 {v3.8b}, [x1]
eor v3.8b, v0.8b, v3.8b
st1 {v3.8b}, [x0]
FRAME_POP
ret
.Lctrcarry:
umov x7, v4.d[0] /* load upper word of ctr */
rev x7, x7 /* ... to handle the carry */
add x7, x7, #1
rev x7, x7
ins v4.d[0], x7
b .Lctrloop
.Lctrhalfblock:
ld1 {v3.8b}, [x1]
eor v3.8b, v0.8b, v3.8b
st1 {v3.8b}, [x0]
.Lctrout:
FRAME_POP
ret
b .Lctrcarrydone
AES_ENDPROC(aes_ctr_encrypt)
.ltorg
@ -386,7 +382,8 @@ AES_ENDPROC(aes_ctr_encrypt)
.endm
.Lxts_mul_x:
.word 1, 0, 0x87, 0
CPU_LE( .quad 1, 0x87 )
CPU_BE( .quad 0x87, 1 )
AES_ENTRY(aes_xts_encrypt)
FRAME_PUSH

View file

@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#define AES_ENTRY(func) ENTRY(neon_ ## func)
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
@ -83,13 +84,13 @@
.endm
.macro do_block, enc, in, rounds, rk, rkp, i
ld1 {v15.16b}, [\rk]
ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
mov \i, \rounds
1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
sub_bytes \in
ld1 {v15.16b}, [\rkp], #16
ld1 {v15.4s}, [\rkp], #16
subs \i, \i, #1
beq 2222f
.if \enc == 1
@ -229,7 +230,7 @@
.endm
.macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
ld1 {v15.16b}, [\rk]
ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
mov \i, \rounds
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
@ -237,7 +238,7 @@
sub_bytes_2x \in0, \in1
tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
ld1 {v15.16b}, [\rkp], #16
ld1 {v15.4s}, [\rkp], #16
subs \i, \i, #1
beq 2222f
.if \enc == 1
@ -254,7 +255,7 @@
.endm
.macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
ld1 {v15.16b}, [\rk]
ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
mov \i, \rounds
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
@ -266,7 +267,7 @@
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
ld1 {v15.16b}, [\rkp], #16
ld1 {v15.4s}, [\rkp], #16
subs \i, \i, #1
beq 2222f
.if \enc == 1
@ -306,12 +307,16 @@
.text
.align 4
.LForward_ShiftRows:
.byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
.byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
CPU_LE( .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3 )
CPU_LE( .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb )
CPU_BE( .byte 0xb, 0x6, 0x1, 0xc, 0x7, 0x2, 0xd, 0x8 )
CPU_BE( .byte 0x3, 0xe, 0x9, 0x4, 0xf, 0xa, 0x5, 0x0 )
.LReverse_ShiftRows:
.byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
.byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
CPU_LE( .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb )
CPU_LE( .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3 )
CPU_BE( .byte 0x3, 0x6, 0x9, 0xc, 0xf, 0x2, 0x5, 0x8 )
CPU_BE( .byte 0xb, 0xe, 0x1, 0x4, 0x7, 0xa, 0xd, 0x0 )
.LForward_Sbox:
.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5

View file

@ -29,8 +29,8 @@
* struct ghash_key const *k, const char *head)
*/
ENTRY(pmull_ghash_update)
ld1 {SHASH.16b}, [x3]
ld1 {XL.16b}, [x1]
ld1 {SHASH.2d}, [x3]
ld1 {XL.2d}, [x1]
movi MASK.16b, #0xe1
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
shl MASK.2d, MASK.2d, #57
@ -74,6 +74,6 @@ CPU_LE( rev64 T1.16b, T1.16b )
cbnz w0, 0b
st1 {XL.16b}, [x1]
st1 {XL.2d}, [x1]
ret
ENDPROC(pmull_ghash_update)

View file

@ -78,7 +78,7 @@ ENTRY(sha1_ce_transform)
ld1r {k3.4s}, [x6]
/* load state */
ldr dga, [x0]
ld1 {dgav.4s}, [x0]
ldr dgb, [x0, #16]
/* load sha1_ce_state::finalize */
@ -144,7 +144,7 @@ CPU_LE( rev32 v11.16b, v11.16b )
b 1b
/* store new state */
3: str dga, [x0]
3: st1 {dgav.4s}, [x0]
str dgb, [x0, #16]
ret
ENDPROC(sha1_ce_transform)

View file

@ -85,7 +85,7 @@ ENTRY(sha2_ce_transform)
ld1 {v12.4s-v15.4s}, [x8]
/* load state */
ldp dga, dgb, [x0]
ld1 {dgav.4s, dgbv.4s}, [x0]
/* load sha256_ce_state::finalize */
ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
@ -148,6 +148,6 @@ CPU_LE( rev32 v19.16b, v19.16b )
b 1b
/* store new state */
3: stp dga, dgb, [x0]
3: st1 {dgav.4s, dgbv.4s}, [x0]
ret
ENDPROC(sha2_ce_transform)

View file

@ -2,6 +2,7 @@
#define __ASM_ALTERNATIVE_H
#include <asm/cpufeature.h>
#include <asm/insn.h>
#ifndef __ASSEMBLY__
@ -90,24 +91,15 @@ void apply_alternatives(void *start, size_t length);
.endm
/*
* Begin an alternative code sequence.
* Alternative sequences
*
* The code that follows this macro will be assembled and linked as
* normal. There are no restrictions on this code.
*/
.macro alternative_if_not cap
.pushsection .altinstructions, "a"
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
.popsection
661:
.endm
/*
* Provide the alternative code sequence.
* The code for the case where the capability is not present will be
* assembled and linked as normal. There are no restrictions on this
* code.
*
* The code that follows this macro is assembled into a special
* section to be used for dynamic patching. Code that follows this
* macro must:
* The code for the case where the capability is present will be
* assembled into a special section to be used for dynamic patching.
* Code for that case must:
*
* 1. Be exactly the same length (in bytes) as the default code
* sequence.
@ -116,8 +108,38 @@ void apply_alternatives(void *start, size_t length);
* alternative sequence it is defined in (branches into an
* alternative sequence are not fixed up).
*/
/*
* Begin an alternative code sequence.
*/
.macro alternative_if_not cap
.set .Lasm_alt_mode, 0
.pushsection .altinstructions, "a"
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
.popsection
661:
.endm
.macro alternative_if cap
.set .Lasm_alt_mode, 1
.pushsection .altinstructions, "a"
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
.popsection
.pushsection .altinstr_replacement, "ax"
.align 2 /* So GAS knows label 661 is suitably aligned */
661:
.endm
/*
* Provide the other half of the alternative code sequence.
*/
.macro alternative_else
662: .pushsection .altinstr_replacement, "ax"
662:
.if .Lasm_alt_mode==0
.pushsection .altinstr_replacement, "ax"
.else
.popsection
.endif
663:
.endm
@ -125,11 +147,25 @@ void apply_alternatives(void *start, size_t length);
* Complete an alternative code sequence.
*/
.macro alternative_endif
664: .popsection
664:
.if .Lasm_alt_mode==0
.popsection
.endif
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.endm
/*
* Provides a trivial alternative or default sequence consisting solely
* of NOPs. The number of NOPs is chosen automatically to match the
* previous case.
*/
.macro alternative_else_nop_endif
alternative_else
nops (662b-661b) / AARCH64_INSN_SIZE
alternative_endif
.endm
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)

View file

@ -119,6 +119,15 @@
dmb \opt
.endm
/*
* NOP sequence
*/
.macro nops, num
.rept \num
nop
.endr
.endm
/*
* Emit an entry into the exception table
*/
@ -395,15 +404,11 @@ alternative_endif
*/
.macro post_ttbr0_update_workaround
#ifdef CONFIG_CAVIUM_ERRATUM_27456
alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
nop
nop
nop
alternative_else
alternative_if ARM64_WORKAROUND_CAVIUM_27456
ic iallu
dsb nsh
isb
alternative_endif
alternative_else_nop_endif
#endif
.endm

View file

@ -20,6 +20,9 @@
#ifndef __ASSEMBLY__
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) asm volatile(__nops(n))
#define sev() asm volatile("sev" : : : "memory")
#define wfe() asm volatile("wfe" : : : "memory")
#define wfi() asm volatile("wfi" : : : "memory")

View file

@ -21,10 +21,7 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/sysreg.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
do { \

View file

@ -223,9 +223,11 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous
* value may have not been initialised yet (activate_mm caller) or the
* ASID has changed since the last run (following the context switch
* of another thread of the same process).
* of another thread of the same process). Avoid setting the reserved
* TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
*/
update_saved_ttbr0(tsk, next);
if (next != &init_mm)
update_saved_ttbr0(tsk, next);
}
#define deactivate_mm(tsk,mm) do { } while (0)

View file

@ -21,8 +21,6 @@
#include <uapi/asm/ptrace.h>
#define _PSR_PAN_BIT 22
/* Current Exception Level values, as contained in CurrentEL */
#define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2)

View file

@ -47,10 +47,10 @@ typedef unsigned long mm_segment_t;
struct thread_info {
unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
struct task_struct *task; /* main task structure */
int preempt_count; /* 0 => preemptable, <0 => bug */
int cpu; /* cpu */
};

View file

@ -18,6 +18,10 @@
#ifndef __ASM_UACCESS_H
#define __ASM_UACCESS_H
#include <asm/alternative.h>
#include <asm/kernel-pgtable.h>
#include <asm/sysreg.h>
#ifndef __ASSEMBLY__
/*
@ -26,11 +30,8 @@
#include <linux/string.h>
#include <linux/thread_info.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/kernel-pgtable.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
@ -130,7 +131,7 @@ static inline void set_fs(mm_segment_t fs)
* User access enabling/disabling.
*/
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void uaccess_ttbr0_disable(void)
static inline void __uaccess_ttbr0_disable(void)
{
unsigned long ttbr;
@ -140,7 +141,7 @@ static inline void uaccess_ttbr0_disable(void)
isb();
}
static inline void uaccess_ttbr0_enable(void)
static inline void __uaccess_ttbr0_enable(void)
{
unsigned long flags;
@ -154,30 +155,44 @@ static inline void uaccess_ttbr0_enable(void)
isb();
local_irq_restore(flags);
}
#else
static inline void uaccess_ttbr0_disable(void)
static inline bool uaccess_ttbr0_disable(void)
{
if (!system_uses_ttbr0_pan())
return false;
__uaccess_ttbr0_disable();
return true;
}
static inline void uaccess_ttbr0_enable(void)
static inline bool uaccess_ttbr0_enable(void)
{
if (!system_uses_ttbr0_pan())
return false;
__uaccess_ttbr0_enable();
return true;
}
#else
static inline bool uaccess_ttbr0_disable(void)
{
return false;
}
static inline bool uaccess_ttbr0_enable(void)
{
return false;
}
#endif
#define __uaccess_disable(alt) \
do { \
if (system_uses_ttbr0_pan()) \
uaccess_ttbr0_disable(); \
else \
if (!uaccess_ttbr0_disable()) \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
CONFIG_ARM64_PAN)); \
} while (0)
#define __uaccess_enable(alt) \
do { \
if (system_uses_ttbr0_pan()) \
uaccess_ttbr0_enable(); \
else \
if (!uaccess_ttbr0_enable()) \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
CONFIG_ARM64_PAN)); \
} while (0)
@ -407,69 +422,62 @@ extern __must_check long strnlen_user(const char __user *str, long n);
#else /* __ASSEMBLY__ */
#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/kernel-pgtable.h>
/*
* User access enabling/disabling macros.
*/
.macro uaccess_ttbr0_disable, tmp1
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
.endm
.macro uaccess_ttbr0_enable, tmp1
.macro __uaccess_ttbr0_enable, tmp1
get_thread_info \tmp1
ldr \tmp1, [\tmp1, #TI_TTBR0] // load saved TTBR0_EL1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
.macro uaccess_ttbr0_disable, tmp1
alternative_if_not ARM64_HAS_PAN
__uaccess_ttbr0_disable \tmp1
alternative_else_nop_endif
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption
__uaccess_ttbr0_enable \tmp1
restore_irq \tmp2
alternative_else_nop_endif
.endm
#else
.macro uaccess_ttbr0_disable, tmp1
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
.endm
#endif
/*
* These macros are no-ops when UAO is present.
*/
.macro uaccess_disable_not_uao, tmp1
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
alternative_if_not ARM64_HAS_PAN
uaccess_ttbr0_disable \tmp1
alternative_else
nop
nop
nop
nop
alternative_endif
#endif
alternative_if_not ARM64_ALT_PAN_NOT_UAO
nop
alternative_else
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
alternative_endif
alternative_else_nop_endif
.endm
.macro uaccess_enable_not_uao, tmp1, tmp2
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption
uaccess_ttbr0_enable \tmp1
restore_irq \tmp2
alternative_else
nop
nop
nop
nop
nop
nop
nop
alternative_endif
#endif
alternative_if_not ARM64_ALT_PAN_NOT_UAO
nop
alternative_else
uaccess_ttbr0_enable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
alternative_endif
alternative_else_nop_endif
.endm
#endif /* __ASSEMBLY__ */

View file

@ -77,6 +77,7 @@ struct user_fpsimd_state {
__uint128_t vregs[32];
__u32 fpsr;
__u32 fpcr;
__u32 __reserved[2];
};
struct user_hwdebug_state {

View file

@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/opcodes.h>

View file

@ -38,11 +38,11 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
DEFINE(TI_TTBR0, offsetof(struct thread_info, ttbr0));
#endif
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();

View file

@ -121,11 +121,9 @@
* feature as all TTBR0_EL1 accesses are disabled, not just those to
* user mappings.
*/
alternative_if_not ARM64_HAS_PAN
nop
alternative_else
alternative_if ARM64_HAS_PAN
b 1f // skip TTBR0 PAN
alternative_endif
alternative_else_nop_endif
.if \el != 0
mrs x21, ttbr0_el1
@ -135,7 +133,7 @@ alternative_endif
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
.endif
uaccess_ttbr0_disable x21
__uaccess_ttbr0_disable x21
1:
#endif
@ -184,17 +182,15 @@ alternative_endif
* Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
* PAN bit checking.
*/
alternative_if_not ARM64_HAS_PAN
nop
alternative_else
alternative_if ARM64_HAS_PAN
b 2f // skip TTBR0 PAN
alternative_endif
alternative_else_nop_endif
.if \el != 0
tbnz x22, #_PSR_PAN_BIT, 1f // Skip re-enabling TTBR0 access if previously disabled
tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
.endif
uaccess_ttbr0_enable x0
__uaccess_ttbr0_enable x0
.if \el == 0
/*
@ -687,7 +683,7 @@ el0_inv:
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
bl bad_mode
bl bad_el0_sync
b ret_to_user
ENDPROC(el0_sync)

View file

@ -550,6 +550,8 @@ static int hw_break_set(struct task_struct *target,
/* (address, ctrl) registers */
limit = regset->n * regset->size;
while (count && offset < limit) {
if (count < PTRACE_HBP_ADDR_SZ)
return -EINVAL;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
offset, offset + PTRACE_HBP_ADDR_SZ);
if (ret)
@ -559,6 +561,8 @@ static int hw_break_set(struct task_struct *target,
return ret;
offset += PTRACE_HBP_ADDR_SZ;
if (!count)
break;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
offset, offset + PTRACE_HBP_CTRL_SZ);
if (ret)
@ -595,7 +599,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct user_pt_regs newregs;
struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
if (ret)
@ -625,7 +629,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct user_fpsimd_state newstate;
struct user_fpsimd_state newstate =
target->thread.fpsimd_state.user_fpsimd;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
if (ret)
@ -649,7 +654,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
unsigned long tls;
unsigned long tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
@ -675,7 +680,8 @@ static int system_call_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int syscallno, ret;
int syscallno = task_pt_regs(target)->syscallno;
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
if (ret)
@ -947,7 +953,7 @@ static int compat_tls_set(struct task_struct *target,
const void __user *ubuf)
{
int ret;
compat_ulong_t tls;
compat_ulong_t tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)

View file

@ -509,22 +509,15 @@ const char *esr_get_class_string(u32 esr)
}
/*
* bad_mode handles the impossible case in the exception vector.
* bad_mode handles the impossible case in the exception vector. This is always
* fatal.
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
handler[reason], esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
if (esr >> ESR_ELx_EC_SHIFT == ESR_ELx_EC_SERROR) {
pr_crit("System error detected. ESR.ISS = %08x\n",
@ -532,7 +525,34 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
arm64_check_cache_ecc(NULL);
}
arm64_notify_die("Oops - bad mode", regs, &info, 0);
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
}
/*
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
* exceptions taken from EL0. Unlike bad_mode, this returns.
*/
asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
smp_processor_id(), esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
current->thread.fault_address = 0;
current->thread.fault_code = 0;
force_sig_info(info.si_signo, &info, current);
}
void __pte_error(const char *file, int line, unsigned long val)

View file

@ -17,9 +17,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
.text

View file

@ -16,10 +16,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*

View file

@ -18,10 +18,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*

View file

@ -16,10 +16,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*

View file

@ -23,6 +23,7 @@
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/uaccess.h>
/*
* __flush_dcache_all()
@ -121,6 +122,7 @@ ENTRY(flush_icache_range)
* - end - virtual end address of region
*/
ENTRY(__flush_cache_user_range)
uaccess_ttbr0_enable x2, x3
dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x0, x3
@ -142,10 +144,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
dsb ish
isb
mov x0, #0
1:
uaccess_ttbr0_disable x1
ret
9:
mov x0, #-EFAULT
ret
b 1b
ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)

View file

@ -528,10 +528,10 @@ static const struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },

View file

@ -90,7 +90,6 @@ ENTRY(privcmd_call)
mov x2, x3
mov x3, x4
mov x4, x5
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Privcmd calls are issued by the userspace. The kernel needs to
* enable access to TTBR0_EL1 as the hypervisor would issue stage 1
@ -99,15 +98,12 @@ ENTRY(privcmd_call)
* need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
* is enabled (it implies that hardware UAO and PAN disabled).
*/
uaccess_enable_not_uao x6, x7
#endif
uaccess_ttbr0_enable x6, x7
hvc XEN_IMM
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Disable userspace access from kernel once the hyp call completed.
*/
uaccess_disable_not_uao x6
#endif
uaccess_ttbr0_disable x6
ret
ENDPROC(privcmd_call);

View file

@ -10,6 +10,9 @@
asflags-y += $(LINUXINCLUDE)
ccflags-y += -O2 $(LINUXINCLUDE)
ifdef CONFIG_ETRAX_AXISFLASHMAP
arch-$(CONFIG_ETRAX_ARCH_V10) = v10
arch-$(CONFIG_ETRAX_ARCH_V32) = v32
@ -28,6 +31,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE
$(call if_changed,objcopy)
cp -p $(obj)/rescue.bin $(objtree)
else
$(obj)/rescue.bin:
endif
$(obj)/testrescue.bin: $(obj)/testrescue.o
$(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin
# Pad it to 784 bytes

View file

@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs)
*/
#define HZSCALE (268435456 / (1000000 / HZ))
#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000))
#endif /* defined(_M68K_DELAY_H) */

View file

@ -324,8 +324,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
#endif
/* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
/*
* Allocate comm page for guest kernel, a TLB will be reserved for

View file

@ -6,7 +6,7 @@
#endif
#include <linux/compiler.h>
#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
#include <asm/types.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <linux/atomic.h>
@ -17,6 +17,12 @@
* to include/asm-i386/bitops.h or kerneldoc
*/
#if __BITS_PER_LONG == 64
#define SHIFT_PER_LONG 6
#else
#define SHIFT_PER_LONG 5
#endif
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))

View file

@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \
old_pte = *ptep; \
set_pte(ptep, pteval); \
if (pte_inserted(old_pte)) \
purge_tlb_entries(mm, addr); \
set_pte(ptep, pteval); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \
} while (0)
@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 0;
}
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
set_pte(ptep, pte_mkold(pte));
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 1;
}
@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
spin_lock_irqsave(&pa_tlb_lock, flags);
old_pte = *ptep;
set_pte(ptep, __pte(0));
if (pte_inserted(old_pte))
purge_tlb_entries(mm, addr);
set_pte(ptep, __pte(0));
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return old_pte;
@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
{
unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
set_pte(ptep, pte_wrprotect(*ptep));
spin_unlock_irqrestore(&pa_tlb_lock, flags);
}

View file

@ -3,10 +3,8 @@
#if defined(__LP64__)
#define __BITS_PER_LONG 64
#define SHIFT_PER_LONG 6
#else
#define __BITS_PER_LONG 32
#define SHIFT_PER_LONG 5
#endif
#include <asm-generic/bitsperlong.h>

View file

@ -1,6 +1,7 @@
#ifndef _PARISC_SWAB_H
#define _PARISC_SWAB_H
#include <asm/bitsperlong.h>
#include <linux/types.h>
#include <linux/compiler.h>
@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
}
#define __arch_swab32 __arch_swab32
#if BITS_PER_LONG > 32
#if __BITS_PER_LONG > 32
/*
** From "PA-RISC 2.0 Architecture", HP Professional Books.
** See Appendix I page 8 , "Endian Byte Swapping".
@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
return x;
}
#define __arch_swab64 __arch_swab64
#endif /* BITS_PER_LONG > 32 */
#endif /* __BITS_PER_LONG > 32 */
#endif /* _PARISC_SWAB_H */

View file

@ -375,6 +375,15 @@ void __init parisc_setup_cache_timing(void)
/* calculate TLB flush threshold */
/* On SMP machines, skip the TLB measure of kernel text which
* has been mapped as huge pages. */
if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
threshold = max(cache_info.it_size, cache_info.dt_size);
threshold *= PAGE_SIZE;
threshold /= num_online_cpus();
goto set_tlb_threshold;
}
alltime = mfctl(16);
flush_tlb_all();
alltime = mfctl(16) - alltime;
@ -393,6 +402,8 @@ void __init parisc_setup_cache_timing(void)
alltime, size, rangetime);
threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
set_tlb_threshold:
if (threshold)
parisc_tlb_flush_threshold = threshold;
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",

View file

@ -886,19 +886,10 @@ ENTRY(flush_dcache_page_asm)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
cmpb,COND(<<) %r28, %r25,1b
cmpb,COND(<<) %r28, %r25,1b
fdc,m r31(%r28)
sync
#ifdef CONFIG_PA20
pdtlb,l %r0(%r25)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r25)
tlb_unlock %r20,%r21,%r22
#endif
bv %r0(%r2)
nop
.exit
@ -973,17 +964,6 @@ ENTRY(flush_icache_page_asm)
fic,m %r31(%sr4,%r28)
sync
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
pitlb,l %r0(%sr4,%r25)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
pitlb %r0(%sr4,%r25)
tlb_unlock %r20,%r21,%r22
#endif
bv %r0(%r2)
nop
.exit

View file

@ -57,11 +57,6 @@ __system_reset_overlay:
bctr
1:
/* Save the value at addr zero for a null pointer write check later. */
li r4, 0
lwz r3, 0(r4)
/* Primary delays then goes to _zimage_start in wrapper. */
or 31, 31, 31 /* db16cyc */

View file

@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
flush_cache((void *)0x100, 512);
}
void platform_init(unsigned long null_check)
void platform_init(void)
{
const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
void *chosen;
unsigned long ft_addr;
u64 rm_size;
unsigned long val;
console_ops.write = ps3_console_write;
platform_ops.exit = ps3_exit;
@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
printf(" flat tree at 0x%lx\n\r", ft_addr);
val = *(unsigned long *)0;
if (val != null_check)
printf("null check failed: %lx != %lx\n\r", val, null_check);
((kernel_entry_t)0)(ft_addr, 0, NULL);
ps3_exit();

View file

@ -545,6 +545,7 @@ struct kvm_vcpu_arch {
u64 tfiar;
u32 cr_tm;
u64 xer_tm;
u64 lr_tm;
u64 ctr_tm;
u64 amr_tm;

View file

@ -587,6 +587,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
/* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */

View file

@ -584,6 +584,7 @@ int main(void)
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));

View file

@ -485,7 +485,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
{
struct eeh_pe *pe = (struct eeh_pe *)data;
bool *clear_sw_state = flag;
bool clear_sw_state = *(bool *)flag;
int i, rc = 1;
for (i = 0; rc && i < 3; i++)
@ -612,8 +612,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
/* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false);
if (rc)
if (rc) {
pci_unlock_rescan_remove();
return rc;
}
/* Give the system 5 seconds to finish running the user-space
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,

View file

@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn)
static int ibmebus_create_devices(const struct of_device_id *matches)
{
struct device_node *root, *child;
struct device *dev;
int ret = 0;
root = of_find_node_by_path("/");
@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
if (!of_match_node(matches, child))
continue;
if (bus_find_device(&ibmebus_bus_type, NULL, child,
ibmebus_match_node))
dev = bus_find_device(&ibmebus_bus_type, NULL, child,
ibmebus_match_node);
if (dev) {
put_device(dev);
continue;
}
ret = ibmebus_create_device(child);
if (ret) {
@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
const char *buf, size_t count)
{
struct device_node *dn = NULL;
struct device *dev;
char *path;
ssize_t rc = 0;
@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
if (!path)
return -ENOMEM;
if (bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path)) {
dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path);
if (dev) {
put_device(dev);
printk(KERN_WARNING "%s: %s has already been probed\n",
__func__, path);
rc = -EEXIST;
@ -307,6 +314,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path))) {
of_device_unregister(to_platform_device(dev));
put_device(dev);
kfree(path);
return count;

View file

@ -44,7 +44,7 @@
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
1: cmp cr0,r0,r0; \
1: cmpd cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .

View file

@ -313,7 +313,7 @@ _GLOBAL(flush_instruction_cache)
lis r3, KERNELBASE@h
iccci 0,r3
#endif
#elif CONFIG_FSL_BOOKE
#elif defined(CONFIG_FSL_BOOKE)
BEGIN_FTR_SECTION
mfspr r3,SPRN_L1CSR0
ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC

View file

@ -2664,6 +2664,9 @@ static void __init prom_find_boot_cpu(void)
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
if (!PHANDLE_VALID(cpu_pkg))
return;
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
prom.cpu = be32_to_cpu(rval);

View file

@ -1186,6 +1186,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR:
*val = get_reg_val(id, vcpu->arch.cr_tm);
break;
case KVM_REG_PPC_TM_XER:
*val = get_reg_val(id, vcpu->arch.xer_tm);
break;
case KVM_REG_PPC_TM_LR:
*val = get_reg_val(id, vcpu->arch.lr_tm);
break;
@ -1393,6 +1396,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR:
vcpu->arch.cr_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_XER:
vcpu->arch.xer_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_LR:
vcpu->arch.lr_tm = set_reg_val(id, *val);
break;

View file

@ -653,6 +653,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
HPTE_V_ABSENT);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
true);
/* Don't lose R/C bit updates done by hardware */
r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
hpte[1] = cpu_to_be64(r);
}
}

View file

@ -2514,11 +2514,13 @@ kvmppc_save_tm:
mfctr r7
mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR
mfxer r11
std r5, VCPU_LR_TM(r9)
stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9)
std r11, VCPU_XER_TM(r9)
/* Restore r12 as trap number. */
lwz r12, VCPU_TRAP(r9)
@ -2611,11 +2613,13 @@ kvmppc_restore_tm:
ld r7, VCPU_CTR_TM(r4)
ld r8, VCPU_AMR_TM(r4)
ld r9, VCPU_TAR_TM(r4)
ld r10, VCPU_XER_TM(r4)
mtlr r5
mtcr r6
mtctr r7
mtspr SPRN_AMR, r8
mtspr SPRN_TAR, r9
mtxer r10
/*
* Load up PPR and DSCR values but don't put them in the actual SPRs

View file

@ -565,8 +565,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
prng_data->prngws.byte_counter += n;
prng_data->prngws.reseed_counter += n;
if (copy_to_user(ubuf, prng_data->buf, chunk))
return -EFAULT;
if (copy_to_user(ubuf, prng_data->buf, chunk)) {
ret = -EFAULT;
break;
}
nbytes -= chunk;
ret += chunk;

View file

@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
if (MACHINE_HAS_VX)
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
else
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0)
for (i = 0; i < __NUM_VXRS_LOW; i++)

View file

@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct pt_regs regs;
struct pt_regs regs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
sizeof(regs));

View file

@ -89,7 +89,7 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y

View file

@ -87,7 +87,7 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y

View file

@ -766,8 +766,8 @@ ftrace_graph_call:
jmp ftrace_stub
#endif
.globl ftrace_stub
ftrace_stub:
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
ret
END(ftrace_caller)

View file

@ -2115,6 +2115,7 @@ static inline void __init check_timer(void)
if (idx != -1 && irq_trigger(idx))
unmask_ioapic_irq(irq_get_chip_data(0));
}
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
if (timer_irq_works()) {
if (disable_timer_pin_1 > 0)
@ -2136,6 +2137,7 @@ static inline void __init check_timer(void)
* legacy devices should be connected to IO APIC #0
*/
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
legacy_pic->unmask(0);
if (timer_irq_works()) {

View file

@ -1129,7 +1129,7 @@ static __init int setup_disablecpuid(char *arg)
{
int bit;
if (get_option(&arg, &bit) && bit < NCAPINTS*32)
if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
setup_clear_cpu_cap(bit);
else
return 0;

View file

@ -67,7 +67,7 @@ u64 x86_perf_event_update(struct perf_event *event)
int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
int idx = hwc->idx;
s64 delta;
u64 delta;
if (idx == INTEL_PMC_IDX_FIXED_BTS)
return 0;

View file

@ -3636,7 +3636,7 @@ __init int intel_pmu_init(void)
/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
x86_pmu.max_period = x86_pmu.cntval_mask;
x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
x86_pmu.perfctr = MSR_IA32_PMC0;
pr_cont("full-width counters, ");
}

View file

@ -351,6 +351,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
} else {
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
disable_irq(hdev->irq);
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));

View file

@ -180,7 +180,8 @@ GLOBAL(ftrace_graph_call)
jmp ftrace_stub
#endif
GLOBAL(ftrace_stub)
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
retq
END(ftrace_caller)

View file

@ -172,6 +172,7 @@
#define NearBranch ((u64)1 << 52) /* Near branches */
#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
#define Aligned16 ((u64)1 << 55) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
@ -434,6 +435,26 @@ FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;
/*
* XXX: inoutclob user must know where the argument is being expanded.
* Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
*/
#define asm_safe(insn, inoutclob...) \
({ \
int _fault = 0; \
\
asm volatile("1:" insn "\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: movl $1, %[_fault]\n" \
" jmp 2b\n" \
".popsection\n" \
_ASM_EXTABLE(1b, 3b) \
: [_fault] "+qm"(_fault) inoutclob ); \
\
_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
})
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
@ -620,21 +641,24 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
* depending on whether they're AVX encoded or not.
*
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
* subject to the same check.
* subject to the same check. FXSAVE and FXRSTOR are checked here too as their
* 512 bytes of data must be aligned to a 16 byte boundary.
*/
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
{
if (likely(size < 16))
return false;
return 1;
if (ctxt->d & Aligned)
return true;
return size;
else if (ctxt->d & Unaligned)
return false;
return 1;
else if (ctxt->d & Avx)
return false;
return 1;
else if (ctxt->d & Aligned16)
return 16;
else
return true;
return size;
}
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
@ -692,7 +716,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
}
break;
}
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
if (la & (insn_alignment(ctxt, size) - 1))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
bad:
@ -779,6 +803,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned int size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
}
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
@ -1532,7 +1570,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
&ctxt->exception);
}
/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
@ -1569,20 +1606,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
rpl = selector & 3;
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS
|| (seg == VCPU_SREG_SS
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|| seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
/* NULL selector is not valid for TR, CS and (except for long mode) SS */
if (null_selector) {
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
goto exception;
if (seg == VCPU_SREG_SS) {
if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
goto exception;
/*
* ctxt->ops->set_segment expects the CPL to be in
* SS.DPL, so fake an expand-up 32-bit data segment.
*/
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
seg_desc.dpl = cpl;
seg_desc.d = 1;
seg_desc.g = 1;
}
/* Skip all following checks */
goto load;
}
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
@ -1698,6 +1749,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
/*
* None of MOV, POP and LSS can load a NULL selector in CPL=3, but
* they can load it at CPL<3 (Intel's manual says only LSS can,
* but it's wrong).
*
* However, the Intel manual says that putting IST=1/DPL=3 in
* an interrupt gate will result in SS=3 (the AMD manual instead
* says it doesn't), so allow SS=3 in __load_segment_descriptor
* and only forbid it here.
*/
if (seg == VCPU_SREG_SS && selector == 3 &&
ctxt->mode == X86EMUL_MODE_PROT64)
return emulate_exception(ctxt, GP_VECTOR, 0, true);
return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
}
@ -3646,8 +3712,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return segmented_write(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes);
return segmented_write_std(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes);
}
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
@ -3830,6 +3896,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
{
u32 eax = 1, ebx, ecx = 0, edx;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
if (!(edx & FFL(FXSR)))
return emulate_ud(ctxt);
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
/*
* Don't emulate a case that should never be hit, instead of working
* around a lack of fxsave64/fxrstor64 on old compilers.
*/
if (ctxt->mode >= X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
return X86EMUL_CONTINUE;
}
/*
* FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
* 1) 16 bit mode
* 2) 32 bit mode
* - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
* preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
* save and restore
* 3) 64-bit mode with REX.W prefix
* - like (2), but XMM 8-15 are being saved and restored
* 4) 64-bit mode without REX.W prefix
* - like (3), but FIP and FDP are 64 bit
*
* Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
* desired result. (4) is not emulated.
*
* Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
* and FPU DS) should match.
*/
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
size_t size;
int rc;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->get_fpu(ctxt);
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
ctxt->ops->put_fpu(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
else
size = offsetof(struct fxregs_state, xmm_space[0]);
return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
}
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
struct fxregs_state *new)
{
int rc = X86EMUL_CONTINUE;
struct fxregs_state old;
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
if (rc != X86EMUL_CONTINUE)
return rc;
/*
* 64 bit host will restore XMM 8-15, which is not correct on non-64
* bit guests. Load the current values in order to preserve 64 bit
* XMMs after fxrstor.
*/
#ifdef CONFIG_X86_64
/* XXX: accessing XMM 8-15 very awkwardly */
memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
#endif
/*
* Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
* does save and restore MXCSR.
*/
if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
memcpy(new->xmm_space, old.xmm_space, 8 * 16);
return rc;
}
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
int rc;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
if (rc != X86EMUL_CONTINUE)
return rc;
if (fx_state.mxcsr >> 16)
return emulate_gp(ctxt, 0);
ctxt->ops->get_fpu(ctxt);
if (ctxt->mode < X86EMUL_MODE_PROT64)
rc = fxrstor_fixup(ctxt, &fx_state);
if (rc == X86EMUL_CONTINUE)
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
ctxt->ops->put_fpu(ctxt);
return rc;
}
static bool valid_cr(int nr)
{
switch (nr) {
@ -4182,7 +4373,9 @@ static const struct gprefix pfx_0f_ae_7 = {
};
static const struct group_dual group15 = { {
N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
I(ModRM | Aligned16, em_fxsave),
I(ModRM | Aligned16, em_fxrstor),
N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
N, N, N, N, N, N, N, N,
} };
@ -5054,21 +5247,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
bool fault = false;
int rc;
ctxt->ops->get_fpu(ctxt);
asm volatile("1: fwait \n\t"
"2: \n\t"
".pushsection .fixup,\"ax\" \n\t"
"3: \n\t"
"movb $1, %[fault] \n\t"
"jmp 2b \n\t"
".popsection \n\t"
_ASM_EXTABLE(1b, 3b)
: [fault]"+qm"(fault));
rc = asm_safe("fwait");
ctxt->ops->put_fpu(ctxt);
if (unlikely(fault))
if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;

View file

@ -2187,3 +2187,9 @@ void kvm_lapic_init(void)
jump_label_rate_limit(&apic_hw_disabled, HZ);
jump_label_rate_limit(&apic_sw_disabled, HZ);
}
void kvm_lapic_exit(void)
{
static_key_deferred_flush(&apic_hw_disabled);
static_key_deferred_flush(&apic_sw_disabled);
}

Some files were not shown because too many files have changed in this diff Show more