android_kernel_oneplus_msm8998/arch/arm64/mm/mmu.c
David Keitel f2b1fed1bd Merge remote-tracking branch 'lsk-44/linux-linaro-lsk-v4.4' into 44rc2
* lsk-44/linux-linaro-lsk-v4.4:
  Linux 4.4.3
  modules: fix modparam async_probe request
  module: wrapper for symbol name.
  itimers: Handle relative timers with CONFIG_TIME_LOW_RES proper
  posix-timers: Handle relative timers with CONFIG_TIME_LOW_RES proper
  timerfd: Handle relative timers with CONFIG_TIME_LOW_RES proper
  prctl: take mmap sem for writing to protect against others
  xfs: log mount failures don't wait for buffers to be released
  Revert "xfs: clear PF_NOFREEZE for xfsaild kthread"
  xfs: inode recovery readahead can race with inode buffer creation
  libxfs: pack the agfl header structure so XFS_AGFL_SIZE is correct
  ovl: setattr: check permissions before copy-up
  ovl: root: copy attr
  ovl: check dentry positiveness in ovl_cleanup_whiteouts()
  ovl: use a minimal buffer in ovl_copy_xattr
  ovl: allow zero size xattr
  futex: Drop refcount if requeue_pi() acquired the rtmutex
  devm_memremap_release(): fix memremap'd addr handling
  ipc/shm: handle removed segments gracefully in shm_mmap()
  intel_scu_ipcutil: underflow in scu_reg_access()
  mm,thp: khugepaged: call pte flush at the time of collapse
  dump_stack: avoid potential deadlocks
  radix-tree: fix oops after radix_tree_iter_retry
  drivers/hwspinlock: fix race between radix tree insertion and lookup
  radix-tree: fix race in gang lookup
  MAINTAINERS: return arch/sh to maintained state, with new maintainers
  memcg: only free spare array when readers are done
  numa: fix /proc/<pid>/numa_maps for hugetlbfs on s390
  fs/hugetlbfs/inode.c: fix bugs in hugetlb_vmtruncate_list()
  scripts/bloat-o-meter: fix python3 syntax error
  dma-debug: switch check from _text to _stext
  m32r: fix m32104ut_defconfig build fail
  xhci: Fix list corruption in urb dequeue at host removal
  Revert "xhci: don't finish a TD if we get a short-transfer event mid TD"
  iommu/vt-d: Clear PPR bit to ensure we get more page request interrupts
  iommu/vt-d: Fix 64-bit accesses to 32-bit DMAR_GSTS_REG
  iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
  iommu/amd: Correct the wrong setting of alias DTE in do_attach
  iommu/vt-d: Don't skip PCI devices when disabling IOTLB
  Input: vmmouse - fix absolute device registration
  string_helpers: fix precision loss for some inputs
  Input: i8042 - add Fujitsu Lifebook U745 to the nomux list
  Input: elantech - mark protocols v2 and v3 as semi-mt
  mm: fix regression in remap_file_pages() emulation
  mm: replace vma_lock_anon_vma with anon_vma_lock_read/write
  mm: fix mlock accouting
  libnvdimm: fix namespace object confusion in is_uuid_busy()
  mm: soft-offline: check return value in second __get_any_page() call
  perf kvm record/report: 'unprocessable sample' error while recording/reporting guest data
  KVM: PPC: Fix ONE_REG AltiVec support
  KVM: PPC: Fix emulation of H_SET_DABR/X on POWER8
  KVM: arm/arm64: Fix reference to uninitialised VGIC
  arm64: dma-mapping: fix handling of devices registered before arch_initcall
  ARM: OMAP2+: Fix ppa_zero_params and ppa_por_params for rodata
  ARM: OMAP2+: Fix save_secure_ram_context for rodata
  ARM: OMAP2+: Fix l2dis_3630 for rodata
  ARM: OMAP2+: Fix l2_inv_api_params for rodata
  ARM: OMAP2+: Fix wait_dll_lock_timed for rodata
  ARM: dts: at91: sama5d4ek: add phy address and IRQ for macb0
  ARM: dts: at91: sama5d4 xplained: fix phy0 IRQ type
  ARM: dts: at91: sama5d4: fix instance id of DBGU
  ARM: dts: at91: sama5d4 xplained: properly mux phy interrupt
  ARM: dts: omap5-board-common: enable rtc and charging of backup battery
  ARM: dts: Fix omap5 PMIC control lines for RTC writes
  ARM: dts: Fix wl12xx missing clocks that cause hangs
  ARM: nomadik: fix up SD/MMC DT settings
  ARM: 8517/1: ICST: avoid arithmetic overflow in icst_hz()
  ARM: 8519/1: ICST: try other dividends than 1
  arm64: mm: avoid calling apply_to_page_range on empty range
  ARM: mvebu: remove duplicated regulator definition in Armada 388 GP
  powerpc/ioda: Set "read" permission when "write" is set
  powerpc/powernv: Fix stale PE primary bus
  powerpc/eeh: Fix stale cached primary bus
  powerpc/eeh: Fix PE location code
  SUNRPC: Fixup socket wait for memory
  udf: Check output buffer length when converting name to CS0
  udf: Prevent buffer overrun with multi-byte characters
  udf: limit the maximum number of indirect extents in a row
  pNFS/flexfiles: Fix an XDR encoding bug in layoutreturn
  nfs: Fix race in __update_open_stateid()
  pNFS/flexfiles: Fix an Oopsable typo in ff_mirror_match_fh()
  NFS: Fix attribute cache revalidation
  cifs: fix erroneous return value
  cifs_dbg() outputs an uninitialized buffer in cifs_readdir()
  cifs: fix race between call_async() and reconnect()
  cifs: Ratelimit kernel log messages
  iio: inkern: fix a NULL dereference on error
  iio: pressure: mpl115: fix temperature offset sign
  iio: light: acpi-als: Report data as processed
  iio: dac: mcp4725: set iio name property in sysfs
  iio: add IIO_TRIGGER dependency to STK8BA50
  iio: add HAS_IOMEM dependency to VF610_ADC
  iio-light: Use a signed return type for ltr501_match_samp_freq()
  iio:adc:ti_am335x_adc Fix buffered mode by identifying as software buffer.
  iio: adis_buffer: Fix out-of-bounds memory access
  scsi: fix soft lockup in scsi_remove_target() on module removal
  SCSI: Add Marvell Console to VPD blacklist
  scsi_dh_rdac: always retry MODE SELECT on command lock violation
  drivers/scsi/sg.c: mark VMA as VM_IO to prevent migration
  SCSI: fix crashes in sd and sr runtime PM
  iscsi-target: Fix potential dead-lock during node acl delete
  scsi: add Synology to 1024 sector blacklist
  klist: fix starting point removed bug in klist iterators
  tracepoints: Do not trace when cpu is offline
  tracing: Fix freak link error caused by branch tracer
  perf tools: tracepoint_error() can receive e=NULL, robustify it
  tools lib traceevent: Fix output of %llu for 64 bit values read on 32 bit machines
  ptrace: use fsuid, fsgid, effective creds for fs access checks
  Btrfs: fix direct IO requests not reporting IO error to user space
  Btrfs: fix hang on extent buffer lock caused by the inode_paths ioctl
  Btrfs: fix page reading in extent_same ioctl leading to csum errors
  Btrfs: fix invalid page accesses in extent_same (dedup) ioctl
  btrfs: properly set the termination value of ctx->pos in readdir
  Revert "btrfs: clear PF_NOFREEZE in cleaner_kthread()"
  Btrfs: fix fitrim discarding device area reserved for boot loader's use
  btrfs: handle invalid num_stripes in sys_array
  ext4: don't read blocks from disk after extents being swapped
  ext4: fix potential integer overflow
  ext4: fix scheduling in atomic on group checksum failure
  serial: omap: Prevent DoS using unprivileged ioctl(TIOCSRS485)
  serial: 8250_pci: Add Intel Broadwell ports
  tty: Add support for PCIe WCH382 2S multi-IO card
  pty: make sure super_block is still valid in final /dev/tty close
  pty: fix possible use after free of tty->driver_data
  staging/speakup: Use tty_ldisc_ref() for paste kworker
  phy: twl4030-usb: Fix unbalanced pm_runtime_enable on module reload
  phy: twl4030-usb: Relase usb phy on unload
  ALSA: seq: Fix double port list deletion
  ALSA: seq: Fix leak of pool buffer at concurrent writes
  ALSA: pcm: Fix rwsem deadlock for non-atomic PCM stream
  ALSA: hda - Cancel probe work instead of flush at remove
  x86/mm: Fix vmalloc_fault() to handle large pages properly
  x86/uaccess/64: Handle the caching of 4-byte nocache copies properly in __copy_user_nocache()
  x86/uaccess/64: Make the __copy_user_nocache() assembly code more readable
  x86/mm/pat: Avoid truncation when converting cpa->numpages to address
  x86/mm: Fix types used in pgprot cacheability flags translations
  Linux 4.4.2
  HID: multitouch: fix input mode switching on some Elan panels
  mm, vmstat: fix wrong WQ sleep when memory reclaim doesn't make any progress
  zsmalloc: fix migrate_zspage-zs_free race condition
  zram: don't call idr_remove() from zram_remove()
  zram: try vmalloc() after kmalloc()
  zram/zcomp: use GFP_NOIO to allocate streams
  rtlwifi: rtl8821ae: Fix 5G failure when EEPROM is incorrectly encoded
  rtlwifi: rtl8821ae: Fix errors in parameter initialization
  crypto: marvell/cesa - fix test in mv_cesa_dev_dma_init()
  crypto: atmel-sha - remove calls of clk_prepare() from atomic contexts
  crypto: atmel-sha - fix atmel_sha_remove()
  crypto: algif_skcipher - Do not set MAY_BACKLOG on the async path
  crypto: algif_skcipher - Do not dereference ctx without socket lock
  crypto: algif_skcipher - Do not assume that req is unchanged
  crypto: user - lock crypto_alg_list on alg dump
  EVM: Use crypto_memneq() for digest comparisons
  crypto: algif_hash - wait for crypto_ahash_init() to complete
  crypto: shash - Fix has_key setting
  crypto: chacha20-ssse3 - Align stack pointer to 64 bytes
  crypto: caam - make write transactions bufferable on PPC platforms
  crypto: algif_skcipher - sendmsg SG marking is off by one
  crypto: algif_skcipher - Load TX SG list after waiting
  crypto: crc32c - Fix crc32c soft dependency
  crypto: algif_skcipher - Fix race condition in skcipher_check_key
  crypto: algif_hash - Fix race condition in hash_check_key
  crypto: af_alg - Forbid bind(2) when nokey child sockets are present
  crypto: algif_skcipher - Remove custom release parent function
  crypto: algif_hash - Remove custom release parent function
  crypto: af_alg - Allow af_af_alg_release_parent to be called on nokey path
  ahci: Intel DNV device IDs SATA
  libata: disable forced PORTS_IMPL for >= AHCI 1.3
  crypto: algif_skcipher - Add key check exception for cipher_null
  crypto: skcipher - Add crypto_skcipher_has_setkey
  crypto: algif_hash - Require setkey before accept(2)
  crypto: hash - Add crypto_ahash_has_setkey
  crypto: algif_skcipher - Add nokey compatibility path
  crypto: af_alg - Add nokey compatibility path
  crypto: af_alg - Fix socket double-free when accept fails
  crypto: af_alg - Disallow bind/setkey/... after accept(2)
  crypto: algif_skcipher - Require setkey before accept(2)
  sched: Fix crash in sched_init_numa()
  ext4 crypto: add missing locking for keyring_key access
  iommu/io-pgtable-arm: Ensure we free the final level on teardown
  tty: Fix unsafe ldisc reference via ioctl(TIOCGETD)
  tty: Retry failed reopen if tty teardown in-progress
  tty: Wait interruptibly for tty lock on reopen
  n_tty: Fix unsafe reference to "other" ldisc
  usb: xhci: apply XHCI_PME_STUCK_QUIRK to Intel Broxton-M platforms
  usb: xhci: handle both SSIC ports in PME stuck quirk
  usb: phy: msm: fix error handling in probe.
  usb: cdc-acm: send zero packet for intel 7260 modem
  usb: cdc-acm: handle unlinked urb in acm read callback
  USB: option: fix Cinterion AHxx enumeration
  USB: serial: option: Adding support for Telit LE922
  USB: cp210x: add ID for IAI USB to RS485 adaptor
  USB: serial: ftdi_sio: add support for Yaesu SCU-18 cable
  usb: hub: do not clear BOS field during reset device
  USB: visor: fix null-deref at probe
  USB: serial: visor: fix crash on detecting device without write_urbs
  ASoC: rt5645: fix the shift bit of IN1 boost
  saa7134-alsa: Only frees registered sound cards
  ALSA: dummy: Implement timer backend switching more safely
  ALSA: hda - Fix bad dereference of jack object
  ALSA: hda - Fix speaker output from VAIO AiO machines
  Revert "ALSA: hda - Fix noise on Gigabyte Z170X mobo"
  ALSA: hda - Fix static checker warning in patch_hdmi.c
  ALSA: hda - Add fixup for Mac Mini 7,1 model
  ALSA: timer: Fix race between stop and interrupt
  ALSA: timer: Fix wrong instance passed to slave callbacks
  ALSA: timer: Fix race at concurrent reads
  ALSA: timer: Fix link corruption due to double start or stop
  ALSA: timer: Fix leftover link at closing
  ALSA: timer: Code cleanup
  ALSA: seq: Fix lockdep warnings due to double mutex locks
  ALSA: seq: Fix race at closing in virmidi driver
  ALSA: seq: Fix yet another races among ALSA timer accesses
  ASoC: dpcm: fix the BE state on hw_free
  ALSA: pcm: Fix potential deadlock in OSS emulation
  ALSA: hda/realtek - Support Dell headset mode for ALC225
  ALSA: hda/realtek - Support headset mode for ALC225
  ALSA: hda/realtek - New codec support of ALC225
  ALSA: rawmidi: Fix race at copying & updating the position
  ALSA: rawmidi: Remove kernel WARNING for NULL user-space buffer check
  ALSA: rawmidi: Make snd_rawmidi_transmit() race-free
  ALSA: seq: Degrade the error message for too many opens
  ALSA: seq: Fix incorrect sanity check at snd_seq_oss_synth_cleanup()
  ALSA: dummy: Disable switching timer backend via sysfs
  ALSA: compress: Disable GET_CODEC_CAPS ioctl for some architectures
  ALSA: hda - disable dynamic clock gating on Broxton before reset
  ALSA: Add missing dependency on CONFIG_SND_TIMER
  ALSA: bebob: Use a signed return type for get_formation_index
  ALSA: usb-audio: avoid freeing umidi object twice
  ALSA: usb-audio: Add native DSD support for PS Audio NuWave DAC
  ALSA: usb-audio: Fix OPPO HA-1 vendor ID
  ALSA: usb-audio: Add quirk for Microsoft LifeCam HD-6000
  ALSA: usb-audio: Fix TEAC UD-501/UD-503/NT-503 usb delay
  hrtimer: Handle remaining time proper for TIME_LOW_RES
  md/raid: only permit hot-add of compatible integrity profiles
  media: i2c: Don't export ir-kbd-i2c module alias
  parisc: Fix __ARCH_SI_PREAMBLE_SIZE
  parisc: Protect huge page pte changes with spinlocks
  printk: do cond_resched() between lines while outputting to consoles
  tracing/stacktrace: Show entire trace if passed in function not found
  tracing: Fix stacktrace skip depth in trace_buffer_unlock_commit_regs()
  PCI: Fix minimum allocation address overwrite
  PCI: host: Mark PCIe/PCI (MSI) IRQ cascade handlers as IRQF_NO_THREAD
  mtd: nand: assign reasonable default name for NAND drivers
  wlcore/wl12xx: spi: fix NULL pointer dereference (Oops)
  wlcore/wl12xx: spi: fix oops on firmware load
  ocfs2/dlm: clear refmap bit of recovery lock while doing local recovery cleanup
  ocfs2/dlm: ignore cleaning the migration mle that is inuse
  ALSA: hda - Implement loopback control switch for Realtek and other codecs
  block: fix bio splitting on max sectors
  base/platform: Fix platform drivers with no probe callback
  HID: usbhid: fix recursive deadlock
  ocfs2: NFS hangs in __ocfs2_cluster_lock due to race with ocfs2_unblock_lock
  block: split bios to max possible length
  NFSv4.1/pnfs: Fixup an lo->plh_block_lgets imbalance in layoutreturn
  crypto: sun4i-ss - add missing statesize
  Linux 4.4.1
  arm64: kernel: fix architected PMU registers unconditional access
  arm64: kernel: enforce pmuserenr_el0 initialization and restore
  arm64: mm: ensure that the zero page is visible to the page table walker
  arm64: Clear out any singlestep state on a ptrace detach operation
  powerpc/module: Handle R_PPC64_ENTRY relocations
  scripts/recordmcount.pl: support data in text section on powerpc
  powerpc: Make {cmp}xchg* and their atomic_ versions fully ordered
  powerpc: Make value-returning atomics fully ordered
  powerpc/tm: Check for already reclaimed tasks
  batman-adv: Drop immediate orig_node free function
  batman-adv: Drop immediate batadv_hard_iface free function
  batman-adv: Drop immediate neigh_ifinfo free function
  batman-adv: Drop immediate batadv_neigh_node free function
  batman-adv: Drop immediate batadv_orig_ifinfo free function
  batman-adv: Avoid recursive call_rcu for batadv_nc_node
  batman-adv: Avoid recursive call_rcu for batadv_bla_claim
  team: Replace rcu_read_lock with a mutex in team_vlan_rx_kill_vid
  net/mlx5_core: Fix trimming down IRQ number
  bridge: fix lockdep addr_list_lock false positive splat
  ipv6: update skb->csum when CE mark is propagated
  net: bpf: reject invalid shifts
  phonet: properly unshare skbs in phonet_rcv()
  dwc_eth_qos: Fix dma address for multi-fragment skbs
  bonding: Prevent IPv6 link local address on enslaved devices
  net: preserve IP control block during GSO segmentation
  udp: disallow UFO for sockets with SO_NO_CHECK option
  net: pktgen: fix null ptr deref in skb allocation
  sched,cls_flower: set key address type when present
  tcp_yeah: don't set ssthresh below 2
  ipv6: tcp: add rcu locking in tcp_v6_send_synack()
  net: sctp: prevent writes to cookie_hmac_alg from accessing invalid memory
  vxlan: fix test which detect duplicate vxlan iface
  unix: properly account for FDs passed over unix sockets
  xhci: refuse loading if nousb is used
  usb: core: lpm: fix usb3_hardware_lpm sysfs node
  USB: cp210x: add ID for ELV Marble Sound Board 1
  rtlwifi: fix memory leak for USB device
  ASoC: compress: Fix compress device direction check
  ASoC: wm5110: Fix PGA clear when disabling DRE
  ALSA: timer: Handle disconnection more safely
  ALSA: hda - Flush the pending probe work at remove
  ALSA: hda - Fix missing module loading with model=generic option
  ALSA: hda - Fix bass pin fixup for ASUS N550JX
  ALSA: control: Avoid kernel warnings from tlv ioctl with numid 0
  ALSA: hrtimer: Fix stall by hrtimer_cancel()
  ALSA: pcm: Fix snd_pcm_hw_params struct copy in compat mode
  ALSA: seq: Fix snd_seq_call_port_info_ioctl in compat mode
  ALSA: hda - Add fixup for Dell Latitidue E6540
  ALSA: timer: Fix double unlink of active_list
  ALSA: timer: Fix race among timer ioctls
  ALSA: hda - fix the headset mic detection problem for a Dell laptop
  ALSA: timer: Harden slave timer list handling
  ALSA: usb-audio: Fix mixer ctl regression of Native Instrument devices
  ALSA: hda - Fix white noise on Dell Latitude E5550
  ALSA: seq: Fix race at timer setup and close
  ALSA: usb-audio: Avoid calling usb_autopm_put_interface() at disconnect
  ALSA: seq: Fix missing NULL check at remove_events ioctl
  ALSA: hda - Fixup inverted internal mic for Lenovo E50-80
  ALSA: usb: Add native DSD support for Oppo HA-1
  x86/mm: Improve switch_mm() barrier comments
  x86/mm: Add barriers and document switch_mm()-vs-flush synchronization
  x86/boot: Double BOOT_HEAP_SIZE to 64KB
  x86/reboot/quirks: Add iMac10,1 to pci_reboot_dmi_table[]
  kvm: x86: Fix vmwrite to SECONDARY_VM_EXEC_CONTROL
  KVM: x86: correctly print #AC in traces
  KVM: x86: expose MSR_TSC_AUX to userspace
  x86/xen: don't reset vcpu_info on a cancelled suspend
  KEYS: Fix keyring ref leak in join_session_keyring()

Conflicts:
	arch/arm64/kernel/perf_event.c
	drivers/scsi/sd.c
	sound/core/compress_offload.c

Change-Id: I9f77fe42aaae249c24cd6e170202110ab1426878
Signed-off-by: Trilok Soni <tsoni@codeaurora.org>
2016-03-23 20:51:00 -07:00

842 lines
21 KiB
C

/*
* Based on arch/arm/mm/mmu.c
*
* Copyright (C) 1995-2005 Russell King
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/libfdt.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <linux/dma-contiguous.h>
#include <linux/cma.h>
#include <asm/cputype.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/memblock.h>
#include <asm/mmu_context.h>
#include "mm.h"
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
/*
* Empty_zero_page is a special page that is used for zero-initialized data
* and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
if (!pfn_valid(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz)
{
phys_addr_t phys;
void *ptr;
phys = memblock_alloc(sz, sz);
BUG_ON(!phys);
ptr = __va(phys);
memset(ptr, 0, sz);
return ptr;
}
/*
* remap a PMD into pages
*/
static void split_pmd(pmd_t *pmd, pte_t *pte)
{
unsigned long pfn = pmd_pfn(*pmd);
int i = 0;
do {
/*
* Need to have the least restrictive permissions available
* permissions will be fixed up later
*/
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
pfn++;
} while (pte++, i++, i < PTRS_PER_PTE);
}
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
pgprot_t prot,
void *(*alloc)(unsigned long size))
{
pte_t *pte;
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
if (pmd_sect(*pmd))
split_pmd(pmd, pte);
__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
flush_tlb_all();
}
BUG_ON(pmd_bad(*pmd));
pte = pte_offset_kernel(pmd, addr);
do {
set_pte(pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
static void split_pud(pud_t *old_pud, pmd_t *pmd)
{
unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
int i = 0;
do {
set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
addr += PMD_SIZE;
} while (pmd++, i++, i < PTRS_PER_PMD);
}
static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
void *(*alloc)(unsigned long size), bool pages)
{
pmd_t *pmd;
unsigned long next;
/*
* Check for initial section mappings in the pgd/pud and remove them.
*/
if (pud_none(*pud) || pud_sect(*pud)) {
pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
if (pud_sect(*pud)) {
/*
* need to have the 1G of mappings continue to be
* present
*/
split_pud(pud, pmd);
}
pud_populate(mm, pud, pmd);
flush_tlb_all();
}
BUG_ON(pud_bad(*pud));
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
if (!pages && ((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys |
pgprot_val(mk_sect_prot(prot))));
/*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
*/
if (!pmd_none(old_pmd)) {
flush_tlb_all();
if (pmd_table(old_pmd)) {
phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}
}
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
prot, alloc);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
static inline bool use_1G_block(unsigned long addr, unsigned long next,
unsigned long phys)
{
if (PAGE_SHIFT != 12)
return false;
if (((addr | next | phys) & ~PUD_MASK) != 0)
return false;
return true;
}
static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
void *(*alloc)(unsigned long size), bool force_pages)
{
pud_t *pud;
unsigned long next;
if (pgd_none(*pgd)) {
pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
pgd_populate(mm, pgd, pud);
}
BUG_ON(pgd_bad(*pgd));
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
/*
* For 4K granule only, attempt to put down a 1GB block
*/
if (use_1G_block(addr, next, phys) &&
!IS_ENABLED(CONFIG_FORCE_PAGES)) {
pud_t old_pud = *pud;
set_pud(pud, __pud(phys |
pgprot_val(mk_sect_prot(prot))));
/*
* If we have an old value for a pud, it will
* be pointing to a pmd table that we no longer
* need (from swapper_pg_dir).
*
* Look up the old pmd table and free it.
*/
if (!pud_none(old_pud)) {
flush_tlb_all();
if (pud_table(old_pud)) {
phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}
}
} else {
alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc, force_pages);
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
/*
* Create the page directory entries and any necessary page tables for the
* mapping specified by 'md'.
*/
static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot,
void *(*alloc)(unsigned long size), bool force_pages)
{
unsigned long addr, length, end, next;
addr = virt & PAGE_MASK;
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
end = addr + length;
do {
next = pgd_addr_end(addr, end);
alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc, force_pages);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
static void *late_alloc(unsigned long size)
{
void *ptr;
BUG_ON(size > PAGE_SIZE);
ptr = (void *)__get_free_page(PGALLOC_GFP);
BUG_ON(!ptr);
return ptr;
}
static void __init create_mapping(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot, bool force_pages)
{
if (virt < VMALLOC_START) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
}
__create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
size, prot, early_alloc, force_pages);
}
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot)
{
__create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
late_alloc, false);
}
static inline pmd_t *pmd_off_k(unsigned long virt)
{
return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
}
void __init remap_as_pages(unsigned long start, unsigned long size)
{
unsigned long addr;
unsigned long end = start + size;
/*
* Make start and end PMD_SIZE aligned, observing memory
* boundaries
*/
if (memblock_is_memory(start & PMD_MASK))
start = start & PMD_MASK;
if (memblock_is_memory(ALIGN(end, PMD_SIZE)))
end = ALIGN(end, PMD_SIZE);
size = end - start;
/*
* Clear previous low-memory mapping
*/
for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
addr += PMD_SIZE) {
pmd_t *pmd;
pmd = pmd_off_k(addr);
if (pmd_bad(*pmd) || pmd_sect(*pmd))
pmd_clear(pmd);
}
create_mapping(start, __phys_to_virt(start), size, PAGE_KERNEL, true);
}
struct dma_contig_early_reserve {
phys_addr_t base;
unsigned long size;
};
static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
static int dma_mmu_remap_num __initdata;
void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
{
dma_mmu_remap[dma_mmu_remap_num].base = base;
dma_mmu_remap[dma_mmu_remap_num].size = size;
dma_mmu_remap_num++;
}
static void __init dma_contiguous_remap(void)
{
int i;
for (i = 0; i < dma_mmu_remap_num; i++)
remap_as_pages(dma_mmu_remap[i].base,
dma_mmu_remap[i].size);
}
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
if (virt < VMALLOC_START) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
}
return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
phys, virt, size, prot, late_alloc, false);
}
#ifdef CONFIG_DEBUG_RODATA
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
{
/*
* Set up the executable regions using the existing section mappings
* for now. This will get more fine grained later once all memory
* is mapped
*/
unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
if (end < kernel_x_start) {
create_mapping(start, __phys_to_virt(start),
end - start, PAGE_KERNEL, false);
} else if (start >= kernel_x_end) {
create_mapping(start, __phys_to_virt(start),
end - start, PAGE_KERNEL, false);
} else {
if (start < kernel_x_start)
create_mapping(start, __phys_to_virt(start),
kernel_x_start - start,
PAGE_KERNEL, false);
create_mapping(kernel_x_start,
__phys_to_virt(kernel_x_start),
kernel_x_end - kernel_x_start,
PAGE_KERNEL_EXEC, false);
if (kernel_x_end < end)
create_mapping(kernel_x_end,
__phys_to_virt(kernel_x_end),
end - kernel_x_end,
PAGE_KERNEL, false);
}
}
#else
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
{
create_mapping(start, __phys_to_virt(start), end - start,
PAGE_KERNEL_EXEC, false);
}
#endif
static void __init map_mem(void)
{
struct memblock_region *reg;
phys_addr_t limit;
/*
* Temporarily limit the memblock range. We need to do this as
* create_mapping requires puds, pmds and ptes to be allocated from
* memory addressable from the initial direct kernel mapping.
*
* The initial direct kernel mapping, located at swapper_pg_dir, gives
* us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
* memory starting from PHYS_OFFSET (which must be aligned to 2MB as
* per Documentation/arm64/booting.txt).
*/
limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
memblock_set_current_limit(limit);
/* map all the memory banks */
for_each_memblock(memory, reg) {
phys_addr_t start = reg->base;
phys_addr_t end = start + reg->size;
if (start >= end)
break;
if (ARM64_SWAPPER_USES_SECTION_MAPS) {
/*
* For the first memory bank align the start address and
* current memblock limit to prevent create_mapping() from
* allocating pte page tables from unmapped memory. With
* the section maps, if the first block doesn't end on section
* size boundary, create_mapping() will try to allocate a pte
* page, which may be returned from an unmapped area.
* When section maps are not used, the pte page table for the
* current limit is already present in swapper_pg_dir.
*/
if (start < limit)
start = ALIGN(start, SECTION_SIZE);
if (end < limit) {
limit = end & SECTION_MASK;
memblock_set_current_limit(limit);
}
}
__map_memblock(start, end);
}
/* Limit no longer required. */
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
}
#ifdef CONFIG_FORCE_PAGES
static noinline void __init split_and_set_pmd(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn)
{
pte_t *pte, *start_pte;
start_pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
pte = start_pte;
do {
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
set_pmd(pmd, __pmd((__pa(start_pte)) | PMD_TYPE_TABLE));
}
static noinline void __init remap_pages(void)
{
struct memblock_region *reg;
for_each_memblock(memory, reg) {
phys_addr_t phys_pgd = reg->base;
phys_addr_t phys_end = reg->base + reg->size;
unsigned long addr_pgd = (unsigned long)__va(phys_pgd);
unsigned long end = (unsigned long)__va(phys_end);
pmd_t *pmd = NULL;
pud_t *pud = NULL;
pgd_t *pgd = NULL;
unsigned long next_pud, next_pmd, next_pgd;
unsigned long addr_pmd, addr_pud;
phys_addr_t phys_pud, phys_pmd;
if (phys_pgd >= phys_end)
break;
pgd = pgd_offset(&init_mm, addr_pgd);
do {
next_pgd = pgd_addr_end(addr_pgd, end);
pud = pud_offset(pgd, addr_pgd);
addr_pud = addr_pgd;
phys_pud = phys_pgd;
do {
next_pud = pud_addr_end(addr_pud, next_pgd);
pmd = pmd_offset(pud, addr_pud);
addr_pmd = addr_pud;
phys_pmd = phys_pud;
do {
next_pmd = pmd_addr_end(addr_pmd,
next_pud);
if (pmd_none(*pmd) || pmd_bad(*pmd))
split_and_set_pmd(pmd, addr_pmd,
next_pmd, __phys_to_pfn(phys_pmd));
pmd++;
phys_pmd += next_pmd - addr_pmd;
} while (addr_pmd = next_pmd,
addr_pmd < next_pud);
phys_pud += next_pud - addr_pud;
} while (pud++, addr_pud = next_pud,
addr_pud < next_pgd);
phys_pgd += next_pgd - addr_pgd;
} while (pgd++, addr_pgd = next_pgd, addr_pgd < end);
}
}
#else
static void __init remap_pages(void)
{
}
#endif
static void __init fixup_executable(void)
{
#ifdef CONFIG_DEBUG_RODATA
/* now that we are actually fully mapped, make the start/end more fine grained */
if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
unsigned long aligned_start = round_down(__pa(_stext),
SWAPPER_BLOCK_SIZE);
create_mapping(aligned_start, __phys_to_virt(aligned_start),
__pa(_stext) - aligned_start,
PAGE_KERNEL, false);
}
if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
unsigned long aligned_end = round_up(__pa(__init_end),
SWAPPER_BLOCK_SIZE);
create_mapping(__pa(__init_end), (unsigned long)__init_end,
aligned_end - __pa(__init_end),
PAGE_KERNEL, false);
}
#endif
}
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void)
{
create_mapping_late(__pa(_stext), (unsigned long)_stext,
(unsigned long)_etext - (unsigned long)_stext,
PAGE_KERNEL_ROX);
}
#endif
void fixup_init(void)
{
create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
(unsigned long)__init_end - (unsigned long)__init_begin,
PAGE_KERNEL);
}
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps and sets up the zero page.
*/
void __init paging_init(void)
{
void *zero_page;
map_mem();
fixup_executable();
dma_contiguous_remap();
remap_pages();
/*
* Finally flush the caches and tlb to ensure that we're in a
* consistent state.
*/
flush_tlb_all();
/* allocate the zero page. */
zero_page = early_alloc(PAGE_SIZE);
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
/* Ensure the zero page is visible to the page table walker */
dsb(ishst);
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
*/
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
flush_tlb_all();
set_kernel_text_ro();
flush_tlb_all();
}
/*
* Check whether a kernel address is valid (derived from arch/x86/).
*/
int kern_addr_valid(unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if ((((long)addr) >> VA_BITS) != -1UL)
return 0;
pgd = pgd_offset_k(addr);
if (pgd_none(*pgd))
return 0;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
return 0;
if (pud_sect(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return 0;
if (pmd_sect(*pmd))
return pfn_valid(pmd_pfn(*pmd));
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
return 0;
return pfn_valid(pte_pfn(*pte));
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
#if !ARM64_SWAPPER_USES_SECTION_MAPS
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
return vmemmap_populate_basepages(start, end, node);
}
#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
unsigned long addr = start;
unsigned long next;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
do {
next = pmd_addr_end(addr, end);
pgd = vmemmap_pgd_populate(addr, node);
if (!pgd)
return -ENOMEM;
pud = vmemmap_pud_populate(pgd, addr, node);
if (!pud)
return -ENOMEM;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
void *p = NULL;
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
if (!p)
return -ENOMEM;
set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);
return 0;
}
#endif /* CONFIG_ARM64_64K_PAGES */
void vmemmap_free(unsigned long start, unsigned long end)
{
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
#if CONFIG_PGTABLE_LEVELS > 2
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
#endif
#if CONFIG_PGTABLE_LEVELS > 3
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
#endif
static inline pud_t * fixmap_pud(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
return pud_offset(pgd, addr);
}
static inline pmd_t * fixmap_pmd(unsigned long addr)
{
pud_t *pud = fixmap_pud(addr);
BUG_ON(pud_none(*pud) || pud_bad(*pud));
return pmd_offset(pud, addr);
}
static inline pte_t * fixmap_pte(unsigned long addr)
{
pmd_t *pmd = fixmap_pmd(addr);
BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
}
void __init early_fixmap_init(void)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
unsigned long addr = FIXADDR_START;
pgd = pgd_offset_k(addr);
pgd_populate(&init_mm, pgd, bm_pud);
pud = pud_offset(pgd, addr);
pud_populate(&init_mm, pud, bm_pmd);
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, bm_pte);
/*
* The boot-ioremap range spans multiple pmds, for which
* we are not preparted:
*/
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
|| pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
WARN_ON(1);
pr_warn("pmd %p != %p, %p\n",
pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
fix_to_virt(FIX_BTMAP_BEGIN));
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
fix_to_virt(FIX_BTMAP_END));
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
}
}
void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *pte;
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
pte = fixmap_pte(addr);
if (pgprot_val(flags)) {
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
} else {
pte_clear(&init_mm, addr, pte);
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
}
}
void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
{
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
pgprot_t prot = PAGE_KERNEL_RO;
int size, offset;
void *dt_virt;
/*
* Check whether the physical FDT address is set and meets the minimum
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
* at least 8 bytes so that we can always access the size field of the
* FDT header after mapping the first chunk, double check here if that
* is indeed the case.
*/
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
return NULL;
/*
* Make sure that the FDT region can be mapped without the need to
* allocate additional translation table pages, so that it is safe
* to call create_mapping() this early.
*
* On 64k pages, the FDT will be mapped using PTEs, so we need to
* be in the same PMD as the rest of the fixmap.
* On 4k pages, we'll use section mappings for the FDT so we only
* have to be in the same PUD.
*/
BUILD_BUG_ON(dt_virt_base % SZ_2M);
BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
__fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
offset = dt_phys % SWAPPER_BLOCK_SIZE;
dt_virt = (void *)dt_virt_base + offset;
/* map the first chunk so we can read the size from the header */
create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
SWAPPER_BLOCK_SIZE, prot, false);
if (fdt_check_header(dt_virt) != 0)
return NULL;
size = fdt_totalsize(dt_virt);
if (size > MAX_FDT_SIZE)
return NULL;
if (offset + size > SWAPPER_BLOCK_SIZE)
create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
round_up(offset + size, SWAPPER_BLOCK_SIZE), prot, false);
memblock_reserve(dt_phys, size);
return dt_virt;
}