android_kernel_oneplus_msm8998/mm/compaction.c
Runmin Wang 617229a3e9 Merge remote-tracking branch 'msm-4.4/tmp-510d0a3f' into msm-4.4
* msm-4.4/tmp-510d0a3f:
  Linux 4.4.11
  nf_conntrack: avoid kernel pointer value leak in slab name
  drm/radeon: fix DP link training issue with second 4K monitor
  drm/i915/bdw: Add missing delay during L3 SQC credit programming
  drm/i915: Bail out of pipe config compute loop on LPT
  drm/radeon: fix PLL sharing on DCE6.1 (v2)
  Revert "[media] videobuf2-v4l2: Verify planes array in buffer dequeueing"
  Input: max8997-haptic - fix NULL pointer dereference
  get_rock_ridge_filename(): handle malformed NM entries
  tools lib traceevent: Do not reassign parg after collapse_tree()
  qla1280: Don't allocate 512kb of host tags
  atomic_open(): fix the handling of create_error
  regulator: axp20x: Fix axp22x ldo_io voltage ranges
  regulator: s2mps11: Fix invalid selector mask and voltages for buck9
  workqueue: fix rebind bound workers warning
  ARM: dts: at91: sam9x5: Fix the memory range assigned to the PMC
  vfs: rename: check backing inode being equal
  vfs: add vfs_select_inode() helper
  perf/core: Disable the event on a truncated AUX record
  regmap: spmi: Fix regmap_spmi_ext_read in multi-byte case
  pinctrl: at91-pio4: fix pull-up/down logic
  spi: spi-ti-qspi: Handle truncated frames properly
  spi: spi-ti-qspi: Fix FLEN and WLEN settings if bits_per_word is overridden
  spi: pxa2xx: Do not detect number of enabled chip selects on Intel SPT
  ALSA: hda - Fix broken reconfig
  ALSA: hda - Fix white noise on Asus UX501VW headset
  ALSA: hda - Fix subwoofer pin on ASUS N751 and N551
  ALSA: usb-audio: Yet another Phoneix Audio device quirk
  ALSA: usb-audio: Quirk for yet another Phoenix Audio devices (v2)
  crypto: testmgr - Use kmalloc memory for RSA input
  crypto: hash - Fix page length clamping in hash walk
  crypto: qat - fix invalid pf2vf_resp_wq logic
  s390/mm: fix asce_bits handling with dynamic pagetable levels
  zsmalloc: fix zs_can_compact() integer overflow
  ocfs2: fix posix_acl_create deadlock
  ocfs2: revert using ocfs2_acl_chmod to avoid inode cluster lock hang
  net/route: enforce hoplimit max value
  tcp: refresh skb timestamp at retransmit time
  net: thunderx: avoid exposing kernel stack
  net: fix a kernel infoleak in x25 module
  uapi glibc compat: fix compile errors when glibc net/if.h included before linux/if.h MIME-Version: 1.0
  bridge: fix igmp / mld query parsing
  net: bridge: fix old ioctl unlocked net device walk
  VSOCK: do not disconnect socket when peer has shutdown SEND only
  net/mlx4_en: Fix endianness bug in IPV6 csum calculation
  net: fix infoleak in rtnetlink
  net: fix infoleak in llc
  net: fec: only clear a queue's work bit if the queue was emptied
  netem: Segment GSO packets on enqueue
  sch_dsmark: update backlog as well
  sch_htb: update backlog as well
  net_sched: update hierarchical backlog too
  net_sched: introduce qdisc_replace() helper
  gre: do not pull header in ICMP error processing
  net: Implement net_dbg_ratelimited() for CONFIG_DYNAMIC_DEBUG case
  samples/bpf: fix trace_output example
  bpf: fix check_map_func_compatibility logic
  bpf: fix refcnt overflow
  bpf: fix double-fdput in replace_map_fd_with_map_ptr()
  net/mlx4_en: fix spurious timestamping callbacks
  ipv4/fib: don't warn when primary address is missing if in_dev is dead
  net/mlx5e: Fix minimum MTU
  net/mlx5e: Device's mtu field is u16 and not int
  openvswitch: use flow protocol when recalculating ipv6 checksums
  atl2: Disable unimplemented scatter/gather feature
  vlan: pull on __vlan_insert_tag error path and fix csum correction
  net: use skb_postpush_rcsum instead of own implementations
  cdc_mbim: apply "NDP to end" quirk to all Huawei devices
  bpf/verifier: reject invalid LD_ABS | BPF_DW instruction
  net: sched: do not requeue a NULL skb
  packet: fix heap info leak in PACKET_DIAG_MCLIST sock_diag interface
  route: do not cache fib route info on local routes with oif
  decnet: Do not build routes to devices without decnet private data.
  parisc: Use generic extable search and sort routines
  arm64: kasan: Use actual memory node when populating the kernel image shadow
  arm64: mm: treat memstart_addr as a signed quantity
  arm64: lse: deal with clobbered IP registers after branch via PLT
  arm64: mm: check at build time that PAGE_OFFSET divides the VA space evenly
  arm64: kasan: Fix zero shadow mapping overriding kernel image shadow
  arm64: consistently use p?d_set_huge
  arm64: fix KASLR boot-time I-cache maintenance
  arm64: hugetlb: partial revert of 66b3923a1a0f
  arm64: make irq_stack_ptr more robust
  arm64: efi: invoke EFI_RNG_PROTOCOL to supply KASLR randomness
  efi: stub: use high allocation for converted command line
  efi: stub: add implementation of efi_random_alloc()
  efi: stub: implement efi_get_random_bytes() based on EFI_RNG_PROTOCOL
  arm64: kaslr: randomize the linear region
  arm64: add support for kernel ASLR
  arm64: add support for building vmlinux as a relocatable PIE binary
  arm64: switch to relative exception tables
  extable: add support for relative extables to search and sort routines
  scripts/sortextable: add support for ET_DYN binaries
  arm64: futex.h: Add missing PAN toggling
  arm64: make asm/elf.h available to asm files
  arm64: avoid dynamic relocations in early boot code
  arm64: avoid R_AARCH64_ABS64 relocations for Image header fields
  arm64: add support for module PLTs
  arm64: move brk immediate argument definitions to separate header
  arm64: mm: use bit ops rather than arithmetic in pa/va translations
  arm64: mm: only perform memstart_addr sanity check if DEBUG_VM
  arm64: User die() instead of panic() in do_page_fault()
  arm64: allow kernel Image to be loaded anywhere in physical memory
  arm64: defer __va translation of initrd_start and initrd_end
  arm64: move kernel image to base of vmalloc area
  arm64: kvm: deal with kernel symbols outside of linear mapping
  arm64: decouple early fixmap init from linear mapping
  arm64: pgtable: implement static [pte|pmd|pud]_offset variants
  arm64: introduce KIMAGE_VADDR as the virtual base of the kernel region
  arm64: add support for ioremap() block mappings
  arm64: prevent potential circular header dependencies in asm/bug.h
  of/fdt: factor out assignment of initrd_start/initrd_end
  of/fdt: make memblock minimum physical address arch configurable
  arm64: Remove the get_thread_info() function
  arm64: kernel: Don't toggle PAN on systems with UAO
  arm64: cpufeature: Test 'matches' pointer to find the end of the list
  arm64: kernel: Add support for User Access Override
  arm64: add ARMv8.2 id_aa64mmfr2 boiler plate
  arm64: cpufeature: Change read_cpuid() to use sysreg's mrs_s macro
  arm64: use local label prefixes for __reg_num symbols
  arm64: vdso: Mark vDSO code as read-only
  arm64: ubsan: select ARCH_HAS_UBSAN_SANITIZE_ALL
  arm64: ptdump: Indicate whether memory should be faulting
  arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
  arm64: Drop alloc function from create_mapping
  arm64: prefetch: add missing #include for spin_lock_prefetch
  arm64: lib: patch in prfm for copy_page if requested
  arm64: lib: improve copy_page to deal with 128 bytes at a time
  arm64: prefetch: add alternative pattern for CPUs without a prefetcher
  arm64: prefetch: don't provide spin_lock_prefetch with LSE
  arm64: allow vmalloc regions to be set with set_memory_*
  arm64: kernel: implement ACPI parking protocol
  arm64: mm: create new fine-grained mappings at boot
  arm64: ensure _stext and _etext are page-aligned
  arm64: mm: allow passing a pgdir to alloc_init_*
  arm64: mm: allocate pagetables anywhere
  arm64: mm: use fixmap when creating page tables
  arm64: mm: add functions to walk tables in fixmap
  arm64: mm: add __{pud,pgd}_populate
  arm64: mm: avoid redundant __pa(__va(x))
  arm64: mm: add functions to walk page tables by PA
  arm64: mm: move pte_* macros
  arm64: kasan: avoid TLB conflicts
  arm64: mm: add code to safely replace TTBR1_EL1
  arm64: add function to install the idmap
  arm64: unmap idmap earlier
  arm64: unify idmap removal
  arm64: mm: place empty_zero_page in bss
  arm64: mm: specialise pagetable allocators
  asm-generic: Fix local variable shadow in __set_fixmap_offset
  Eliminate the .eh_frame sections from the aarch64 vmlinux and kernel modules
  arm64: Fix an enum typo in mm/dump.c
  arm64: kasan: ensure that the KASAN zero page is mapped read-only
  arch/arm64/include/asm/pgtable.h: add pmd_mkclean for THP
  arm64: hide __efistub_ aliases from kallsyms
  Linux 4.4.10
  drm/i915/skl: Fix DMC load on Skylake J0 and K0
  lib/test-string_helpers.c: fix and improve string_get_size() tests
  ACPI / processor: Request native thermal interrupt handling via _OSC
  drm/i915: Fake HDMI live status
  drm/i915: Make RPS EI/thresholds multiple of 25 on SNB-BDW
  drm/i915: Fix eDP low vswing for Broadwell
  drm/i915/ddi: Fix eDP VDD handling during booting and suspend/resume
  drm/radeon: make sure vertical front porch is at least 1
  iio: ak8975: fix maybe-uninitialized warning
  iio: ak8975: Fix NULL pointer exception on early interrupt
  drm/amdgpu: set metadata pointer to NULL after freeing.
  drm/amdgpu: make sure vertical front porch is at least 1
  gpu: ipu-v3: Fix imx-ipuv3-crtc module autoloading
  nvmem: mxs-ocotp: fix buffer overflow in read
  USB: serial: cp210x: add Straizona Focusers device ids
  USB: serial: cp210x: add ID for Link ECU
  ata: ahci-platform: Add ports-implemented DT bindings.
  libahci: save port map for forced port map
  powerpc: Fix bad inline asm constraint in create_zero_mask()
  ACPICA: Dispatcher: Update thread ID for recursive method calls
  x86/sysfb_efi: Fix valid BAR address range check
  ARC: Add missing io barriers to io{read,write}{16,32}be()
  ARM: cpuidle: Pass on arm_cpuidle_suspend()'s return value
  propogate_mnt: Handle the first propogated copy being a slave
  fs/pnode.c: treat zero mnt_group_id-s as unequal
  x86/tsc: Read all ratio bits from MSR_PLATFORM_INFO
  MAINTAINERS: Remove asterisk from EFI directory names
  writeback: Fix performance regression in wb_over_bg_thresh()
  batman-adv: Reduce refcnt of removed router when updating route
  batman-adv: Fix broadcast/ogm queue limit on a removed interface
  batman-adv: Check skb size before using encapsulated ETH+VLAN header
  batman-adv: fix DAT candidate selection (must use vid)
  mm: update min_free_kbytes from khugepaged after core initialization
  proc: prevent accessing /proc/<PID>/environ until it's ready
  Input: zforce_ts - fix dual touch recognition
  HID: Fix boot delay for Creative SB Omni Surround 5.1 with quirk
  HID: wacom: Add support for DTK-1651
  xen/evtchn: fix ring resize when binding new events
  xen/balloon: Fix crash when ballooning on x86 32 bit PAE
  xen: Fix page <-> pfn conversion on 32 bit systems
  ARM: SoCFPGA: Fix secondary CPU startup in thumb2 kernel
  ARM: EXYNOS: Properly skip unitialized parent clock in power domain on
  mm/zswap: provide unique zpool name
  mm, cma: prevent nr_isolated_* counters from going negative
  Minimal fix-up of bad hashing behavior of hash_64()
  MD: make bio mergeable
  tracing: Don't display trigger file for events that can't be enabled
  mac80211: fix statistics leak if dev_alloc_name() fails
  ath9k: ar5008_hw_cmn_spur_mitigate: add missing mask_m & mask_p initialisation
  lpfc: fix misleading indentation
  clk: qcom: msm8960: Fix ce3_src register offset
  clk: versatile: sp810: support reentrance
  clk: qcom: msm8960: fix ce3_core clk enable register
  clk: meson: Fix meson_clk_register_clks() signature type mismatch
  clk: rockchip: free memory in error cases when registering clock branches
  soc: rockchip: power-domain: fix err handle while probing
  clk-divider: make sure read-only dividers do not write to their register
  CNS3xxx: Fix PCI cns3xxx_write_config()
  mwifiex: fix corner case association failure
  ata: ahci_xgene: dereferencing uninitialized pointer in probe
  nbd: ratelimit error msgs after socket close
  mfd: intel-lpss: Remove clock tree on error path
  ipvs: drop first packet to redirect conntrack
  ipvs: correct initial offset of Call-ID header search in SIP persistence engine
  ipvs: handle ip_vs_fill_iph_skb_off failure
  RDMA/iw_cxgb4: Fix bar2 virt addr calculation for T4 chips
  Revert: "powerpc/tm: Check for already reclaimed tasks"
  arm64: head.S: use memset to clear BSS
  efi: stub: define DISABLE_BRANCH_PROFILING for all architectures
  arm64: entry: remove pointless SPSR mode check
  arm64: mm: move pgd_cache initialisation to pgtable_cache_init
  arm64: module: avoid undefined shift behavior in reloc_data()
  arm64: module: fix relocation of movz instruction with negative immediate
  arm64: traps: address fallout from printk -> pr_* conversion
  arm64: ftrace: fix a stack tracer's output under function graph tracer
  arm64: pass a task parameter to unwind_frame()
  arm64: ftrace: modify a stack frame in a safe way
  arm64: remove irq_count and do_softirq_own_stack()
  arm64: hugetlb: add support for PTE contiguous bit
  arm64: Use PoU cache instr for I/D coherency
  arm64: Defer dcache flush in __cpu_copy_user_page
  arm64: reduce stack use in irq_handler
  arm64: Documentation: add list of software workarounds for errata
  arm64: mm: place __cpu_setup in .text
  arm64: cmpxchg: Don't incldue linux/mmdebug.h
  arm64: mm: fold alternatives into .init
  arm64: Remove redundant padding from linker script
  arm64: mm: remove pointless PAGE_MASKing
  arm64: don't call C code with el0's fp register
  arm64: when walking onto the task stack, check sp & fp are in current->stack
  arm64: Add this_cpu_ptr() assembler macro for use in entry.S
  arm64: irq: fix walking from irq stack to task stack
  arm64: Add do_softirq_own_stack() and enable irq_stacks
  arm64: Modify stack trace and dump for use with irq_stack
  arm64: Store struct thread_info in sp_el0
  arm64: Add trace_hardirqs_off annotation in ret_to_user
  arm64: ftrace: fix the comments for ftrace_modify_code
  arm64: ftrace: stop using kstop_machine to enable/disable tracing
  arm64: spinlock: serialise spin_unlock_wait against concurrent lockers
  arm64: enable HAVE_IRQ_TIME_ACCOUNTING
  arm64: fix COMPAT_SHMLBA definition for large pages
  arm64: add __init/__initdata section marker to some functions/variables
  arm64: pgtable: implement pte_accessible()
  arm64: mm: allow sections for unaligned bases
  arm64: mm: detect bad __create_mapping uses
  Linux 4.4.9
  extcon: max77843: Use correct size for reading the interrupt register
  stm class: Select CONFIG_SRCU
  megaraid_sas: add missing curly braces in ioctl handler
  sunrpc/cache: drop reference when sunrpc_cache_pipe_upcall() detects a race
  thermal: rockchip: fix a impossible condition caused by the warning
  unbreak allmodconfig KCONFIG_ALLCONFIG=...
  jme: Fix device PM wakeup API usage
  jme: Do not enable NIC WoL functions on S0
  bus: imx-weim: Take the 'status' property value into account
  ARM: dts: pxa: fix dma engine node to pxa3xx-nand
  ARM: dts: armada-375: use armada-370-sata for SATA
  ARM: EXYNOS: select THERMAL_OF
  ARM: prima2: always enable reset controller
  ARM: OMAP3: Add cpuidle parameters table for omap3430
  ext4: fix races of writeback with punch hole and zero range
  ext4: fix races between buffered IO and collapse / insert range
  ext4: move unlocked dio protection from ext4_alloc_file_blocks()
  ext4: fix races between page faults and hole punching
  perf stat: Document --detailed option
  perf tools: handle spaces in file names obtained from /proc/pid/maps
  perf hists browser: Only offer symbol scripting when a symbol is under the cursor
  mtd: nand: Drop mtd.owner requirement in nand_scan
  mtd: brcmnand: Fix v7.1 register offsets
  mtd: spi-nor: remove micron_quad_enable()
  serial: sh-sci: Remove cpufreq notifier to fix crash/deadlock
  ext4: fix NULL pointer dereference in ext4_mark_inode_dirty()
  x86/mm/kmmio: Fix mmiotrace for hugepages
  perf evlist: Reference count the cpu and thread maps at set_maps()
  drivers/misc/ad525x_dpot: AD5274 fix RDAC read back errors
  rtc: max77686: Properly handle regmap_irq_get_virq() error code
  rtc: rx8025: remove rv8803 id
  rtc: ds1685: passing bogus values to irq_restore
  rtc: vr41xx: Wire up alarm_irq_enable
  rtc: hym8563: fix invalid year calculation
  PM / Domains: Fix removal of a subdomain
  PM / OPP: Initialize u_volt_min/max to a valid value
  misc: mic/scif: fix wrap around tests
  misc/bmp085: Enable building as a module
  lib/mpi: Endianness fix
  fbdev: da8xx-fb: fix videomodes of lcd panels
  scsi_dh: force modular build if SCSI is a module
  paride: make 'verbose' parameter an 'int' again
  regulator: s5m8767: fix get_register() error handling
  irqchip/mxs: Fix error check of of_io_request_and_map()
  irqchip/sunxi-nmi: Fix error check of of_io_request_and_map()
  spi/rockchip: Make sure spi clk is on in rockchip_spi_set_cs
  locking/mcs: Fix mcs_spin_lock() ordering
  regulator: core: Fix nested locking of supplies
  regulator: core: Ensure we lock all regulators
  regulator: core: fix regulator_lock_supply regression
  Revert "regulator: core: Fix nested locking of supplies"
  videobuf2-v4l2: Verify planes array in buffer dequeueing
  videobuf2-core: Check user space planes array in dqbuf
  USB: usbip: fix potential out-of-bounds write
  cgroup: make sure a parent css isn't freed before its children
  mm/hwpoison: fix wrong num_poisoned_pages accounting
  mm: vmscan: reclaim highmem zone if buffer_heads is over limit
  numa: fix /proc/<pid>/numa_maps for THP
  mm/huge_memory: replace VM_NO_THP VM_BUG_ON with actual VMA check
  memcg: relocate charge moving from ->attach to ->post_attach
  cgroup, cpuset: replace cpuset_post_attach_flush() with cgroup_subsys->post_attach callback
  slub: clean up code for kmem cgroup support to kmem_cache_free_bulk
  workqueue: fix ghost PENDING flag while doing MQ IO
  x86/apic: Handle zero vector gracefully in clear_vector_irq()
  efi: Expose non-blocking set_variable() wrapper to efivars
  efi: Fix out-of-bounds read in variable_matches()
  IB/security: Restrict use of the write() interface
  IB/mlx5: Expose correct max_sge_rd limit
  cxl: Keep IRQ mappings on context teardown
  v4l2-dv-timings.h: fix polarity for 4k formats
  vb2-memops: Fix over allocation of frame vectors
  ASoC: rt5640: Correct the digital interface data select
  ASoC: dapm: Make sure we have a card when displaying component widgets
  ASoC: ssm4567: Reset device before regcache_sync()
  ASoC: s3c24xx: use const snd_soc_component_driver pointer
  EDAC: i7core, sb_edac: Don't return NOTIFY_BAD from mce_decoder callback
  toshiba_acpi: Fix regression caused by hotkey enabling value
  i2c: exynos5: Fix possible ABBA deadlock by keeping I2C clock prepared
  i2c: cpm: Fix build break due to incompatible pointer types
  perf intel-pt: Fix segfault tracing transactions
  drm/i915: Use fw_domains_put_with_fifo() on HSW
  drm/i915: Fixup the free space logic in ring_prepare
  drm/amdkfd: uninitialized variable in dbgdev_wave_control_set_registers()
  drm/i915: skl_update_scaler() wants a rotation bitmask instead of bit number
  drm/i915: Cleanup phys status page too
  pwm: brcmstb: Fix check of devm_ioremap_resource() return code
  drm/dp/mst: Get validated port ref in drm_dp_update_payload_part1()
  drm/dp/mst: Restore primary hub guid on resume
  drm/dp/mst: Validate port in drm_dp_payload_send_msg()
  drm/nouveau/gr/gf100: select a stream master to fixup tfb offset queries
  drm: Loongson-3 doesn't fully support wc memory
  drm/radeon: fix vertical bars appear on monitor (v2)
  drm/radeon: forbid mapping of userptr bo through radeon device file
  drm/radeon: fix initial connector audio value
  drm/radeon: add a quirk for a XFX R9 270X
  drm/amdgpu: fix regression on CIK (v2)
  amdgpu/uvd: add uvd fw version for amdgpu
  drm/amdgpu: bump the afmt limit for CZ, ST, Polaris
  drm/amdgpu: use defines for CRTCs and AMFT blocks
  drm/amdgpu: when suspending, if uvd/vce was running. need to cancel delay work.
  iommu/dma: Restore scatterlist offsets correctly
  iommu/amd: Fix checking of pci dma aliases
  pinctrl: single: Fix pcs_parse_bits_in_pinctrl_entry to use __ffs than ffs
  pinctrl: mediatek: correct debounce time unit in mtk_gpio_set_debounce
  xen kconfig: don't "select INPUT_XEN_KBDDEV_FRONTEND"
  Input: pmic8xxx-pwrkey - fix algorithm for converting trigger delay
  Input: gtco - fix crash on detecting device without endpoints
  netlink: don't send NETLINK_URELEASE for unbound sockets
  nl80211: check netlink protocol in socket release notification
  powerpc: Update TM user feature bits in scan_features()
  powerpc: Update cpu_user_features2 in scan_features()
  powerpc: scan_features() updates incorrect bits for REAL_LE
  crypto: talitos - fix AEAD tcrypt tests
  crypto: talitos - fix crash in talitos_cra_init()
  crypto: sha1-mb - use corrcet pointer while completing jobs
  crypto: ccp - Prevent information leakage on export
  iwlwifi: mvm: fix memory leak in paging
  iwlwifi: pcie: lower the debug level for RSA semaphore access
  s390/pci: add extra padding to function measurement block
  cpufreq: intel_pstate: Fix processing for turbo activation ratio
  Revert "drm/amdgpu: disable runtime pm on PX laptops without dGPU power control"
  Revert "drm/radeon: disable runtime pm on PX laptops without dGPU power control"
  drm/i915: Fix race condition in intel_dp_destroy_mst_connector()
  drm/qxl: fix cursor position with non-zero hotspot
  drm/nouveau/core: use vzalloc for allocating ramht
  futex: Acknowledge a new waiter in counter before plist
  futex: Handle unlock_pi race gracefully
  asm-generic/futex: Re-enable preemption in futex_atomic_cmpxchg_inatomic()
  ALSA: hda - Add dock support for ThinkPad X260
  ALSA: pcxhr: Fix missing mutex unlock
  ALSA: hda - add PCI ID for Intel Broxton-T
  ALSA: hda - Keep powering up ADCs on Cirrus codecs
  ALSA: hda/realtek - Add ALC3234 headset mode for Optiplex 9020m
  ALSA: hda - Don't trust the reported actual power state
  x86 EDAC, sb_edac.c: Repair damage introduced when "fixing" channel address
  x86/mm/xen: Suppress hugetlbfs in PV guests
  arm64: Update PTE_RDONLY in set_pte_at() for PROT_NONE permission
  arm64: Honour !PTE_WRITE in set_pte_at() for kernel mappings
  sched/cgroup: Fix/cleanup cgroup teardown/init
  dmaengine: pxa_dma: fix the maximum requestor line
  dmaengine: hsu: correct use of channel status register
  dmaengine: dw: fix master selection
  debugfs: Make automount point inodes permanently empty
  lib: lz4: fixed zram with lz4 on big endian machines
  dm cache metadata: fix cmd_read_lock() acquiring write lock
  dm cache metadata: fix READ_LOCK macros and cleanup WRITE_LOCK macros
  usb: gadget: f_fs: Fix use-after-free
  usb: hcd: out of bounds access in for_each_companion
  xhci: fix 10 second timeout on removal of PCI hotpluggable xhci controllers
  usb: xhci: fix wild pointers in xhci_mem_cleanup
  xhci: resume USB 3 roothub first
  usb: xhci: applying XHCI_PME_STUCK_QUIRK to Intel BXT B0 host
  assoc_array: don't call compare_object() on a node
  ARM: OMAP2+: hwmod: Fix updating of sysconfig register
  ARM: OMAP2: Fix up interconnect barrier initialization for DRA7
  ARM: mvebu: Correct unit address for linksys
  ARM: dts: AM43x-epos: Fix clk parent for synctimer
  KVM: arm/arm64: Handle forward time correction gracefully
  kvm: x86: do not leak guest xcr0 into host interrupt handlers
  x86/mce: Avoid using object after free in genpool
  block: loop: fix filesystem corruption in case of aio/dio
  block: partition: initialize percpuref before sending out KOBJ_ADD

Conflicts:
	arch/arm64/Kconfig
	arch/arm64/include/asm/cputype.h
	arch/arm64/include/asm/hardirq.h
	arch/arm64/include/asm/irq.h
	arch/arm64/include/asm/mmu_context.h
	arch/arm64/kernel/cpu_errata.c
	arch/arm64/kernel/cpuinfo.c
	arch/arm64/kernel/setup.c
	arch/arm64/kernel/smp.c
	arch/arm64/kernel/stacktrace.c
	arch/arm64/mm/init.c
	arch/arm64/mm/mmu.c
	arch/arm64/mm/pageattr.c
	mm/memcontrol.c

CRs-Fixed: 1069136
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
Signed-off-by: Runmin Wang <runminw@codeaurora.org>
Change-Id: Ie9a16debd0578331a66947376f3b787a7bb54d65
2016-10-21 18:00:55 -07:00

1775 lines
49 KiB
C

/*
* linux/mm/compaction.c
*
* Memory compaction for the reduction of external fragmentation. Note that
* this heavily depends upon page migration to do all the real heavy
* lifting
*
* Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
*/
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
#include <linux/backing-dev.h>
#include <linux/sysctl.h>
#include <linux/sysfs.h>
#include <linux/balloon_compaction.h>
#include <linux/page-isolation.h>
#include <linux/kasan.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
static inline void count_compact_event(enum vm_event_item item)
{
count_vm_event(item);
}
static inline void count_compact_events(enum vm_event_item item, long delta)
{
count_vm_events(item, delta);
}
#else
#define count_compact_event(item) do { } while (0)
#define count_compact_events(item, delta) do { } while (0)
#endif
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
static unsigned long release_freepages(struct list_head *freelist)
{
struct page *page, *next;
unsigned long high_pfn = 0;
list_for_each_entry_safe(page, next, freelist, lru) {
unsigned long pfn = page_to_pfn(page);
list_del(&page->lru);
__free_page(page);
if (pfn > high_pfn)
high_pfn = pfn;
}
return high_pfn;
}
static void map_pages(struct list_head *list)
{
struct page *page;
list_for_each_entry(page, list, lru) {
kasan_alloc_pages(page, 0);
arch_alloc_page(page, 0);
kernel_map_pages(page, 1, 1);
}
}
static inline bool migrate_async_suitable(int migratetype)
{
return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
}
/*
* Check that the whole (or subset of) a pageblock given by the interval of
* [start_pfn, end_pfn) is valid and within the same zone, before scanning it
* with the migration of free compaction scanner. The scanners then need to
* use only pfn_valid_within() check for arches that allow holes within
* pageblocks.
*
* Return struct page pointer of start_pfn, or NULL if checks were not passed.
*
* It's possible on some configurations to have a setup like node0 node1 node0
* i.e. it's possible that all pages within a zones range of pages do not
* belong to a single zone. We assume that a border between node0 and node1
* can occur within a single pageblock, but not a node0 node1 node0
* interleaving within a single pageblock. It is therefore sufficient to check
* the first and last page of a pageblock and avoid checking each individual
* page in a pageblock.
*/
static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
unsigned long end_pfn, struct zone *zone)
{
struct page *start_page;
struct page *end_page;
/* end_pfn is one past the range we are checking */
end_pfn--;
if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
return NULL;
start_page = pfn_to_page(start_pfn);
if (page_zone(start_page) != zone)
return NULL;
end_page = pfn_to_page(end_pfn);
/* This gives a shorter code than deriving page_zone(end_page) */
if (page_zone_id(start_page) != page_zone_id(end_page))
return NULL;
return start_page;
}
#ifdef CONFIG_COMPACTION
/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6
/*
* Compaction is deferred when compaction fails to result in a page
* allocation success. 1 << compact_defer_limit compactions are skipped up
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
*/
void defer_compaction(struct zone *zone, int order)
{
zone->compact_considered = 0;
zone->compact_defer_shift++;
if (order < zone->compact_order_failed)
zone->compact_order_failed = order;
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
trace_mm_compaction_defer_compaction(zone, order);
}
/* Returns true if compaction should be skipped this time */
bool compaction_deferred(struct zone *zone, int order)
{
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
if (order < zone->compact_order_failed)
return false;
/* Avoid possible overflow */
if (++zone->compact_considered > defer_limit)
zone->compact_considered = defer_limit;
if (zone->compact_considered >= defer_limit)
return false;
trace_mm_compaction_deferred(zone, order);
return true;
}
/*
* Update defer tracking counters after successful compaction of given order,
* which means an allocation either succeeded (alloc_success == true) or is
* expected to succeed.
*/
void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success)
{
if (alloc_success) {
zone->compact_considered = 0;
zone->compact_defer_shift = 0;
}
if (order >= zone->compact_order_failed)
zone->compact_order_failed = order + 1;
trace_mm_compaction_defer_reset(zone, order);
}
/* Returns true if restarting compaction after many failures */
bool compaction_restarting(struct zone *zone, int order)
{
if (order < zone->compact_order_failed)
return false;
return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
zone->compact_considered >= 1UL << zone->compact_defer_shift;
}
/* Returns true if the pageblock should be scanned for pages to isolate. */
static inline bool isolation_suitable(struct compact_control *cc,
struct page *page)
{
if (cc->ignore_skip_hint)
return true;
return !get_pageblock_skip(page);
}
static void reset_cached_positions(struct zone *zone)
{
zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
zone->compact_cached_free_pfn = zone_end_pfn(zone);
}
/*
* This function is called to clear all cached information on pageblocks that
* should be skipped for page isolation when the migrate and free page scanner
* meet.
*/
static void __reset_isolation_suitable(struct zone *zone)
{
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone_end_pfn(zone);
unsigned long pfn;
zone->compact_blockskip_flush = false;
/* Walk the zone and mark every pageblock as suitable for isolation */
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
struct page *page;
cond_resched();
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (zone != page_zone(page))
continue;
clear_pageblock_skip(page);
}
reset_cached_positions(zone);
}
void reset_isolation_suitable(pg_data_t *pgdat)
{
int zoneid;
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
struct zone *zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
/* Only flush if a full compaction finished recently */
if (zone->compact_blockskip_flush)
__reset_isolation_suitable(zone);
}
}
/*
* If no pages were isolated then mark this pageblock to be skipped in the
* future. The information is later cleared by __reset_isolation_suitable().
*/
static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
bool migrate_scanner)
{
struct zone *zone = cc->zone;
unsigned long pfn;
if (cc->ignore_skip_hint)
return;
if (!page)
return;
if (nr_isolated)
return;
set_pageblock_skip(page);
pfn = page_to_pfn(page);
/* Update where async and sync compaction should restart */
if (migrate_scanner) {
if (pfn > zone->compact_cached_migrate_pfn[0])
zone->compact_cached_migrate_pfn[0] = pfn;
if (cc->mode != MIGRATE_ASYNC &&
pfn > zone->compact_cached_migrate_pfn[1])
zone->compact_cached_migrate_pfn[1] = pfn;
} else {
if (pfn < zone->compact_cached_free_pfn)
zone->compact_cached_free_pfn = pfn;
}
}
#else
static inline bool isolation_suitable(struct compact_control *cc,
struct page *page)
{
return true;
}
static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
bool migrate_scanner)
{
}
#endif /* CONFIG_COMPACTION */
/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. For async compaction, back out if the lock cannot
* be taken immediately. For sync compaction, spin on the lock if needed.
*
* Returns true if the lock is held
* Returns false if the lock is not held and compaction should abort
*/
static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
struct compact_control *cc)
{
if (cc->mode == MIGRATE_ASYNC) {
if (!spin_trylock_irqsave(lock, *flags)) {
cc->contended = COMPACT_CONTENDED_LOCK;
return false;
}
} else {
spin_lock_irqsave(lock, *flags);
}
return true;
}
/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. The lock should be periodically unlocked to avoid
* having disabled IRQs for a long time, even when there is nobody waiting on
* the lock. It might also be that allowing the IRQs will result in
* need_resched() becoming true. If scheduling is needed, async compaction
* aborts. Sync compaction schedules.
* Either compaction type will also abort if a fatal signal is pending.
* In either case if the lock was locked, it is dropped and not regained.
*
* Returns true if compaction should abort due to fatal signal pending, or
* async compaction due to need_resched()
* Returns false when compaction can continue (sync compaction might have
* scheduled)
*/
static bool compact_unlock_should_abort(spinlock_t *lock,
unsigned long flags, bool *locked, struct compact_control *cc)
{
if (*locked) {
spin_unlock_irqrestore(lock, flags);
*locked = false;
}
if (fatal_signal_pending(current)) {
cc->contended = COMPACT_CONTENDED_SCHED;
return true;
}
if (need_resched()) {
if (cc->mode == MIGRATE_ASYNC) {
cc->contended = COMPACT_CONTENDED_SCHED;
return true;
}
cond_resched();
}
return false;
}
/*
* Aside from avoiding lock contention, compaction also periodically checks
* need_resched() and either schedules in sync compaction or aborts async
* compaction. This is similar to what compact_unlock_should_abort() does, but
* is used where no lock is concerned.
*
* Returns false when no scheduling was needed, or sync compaction scheduled.
* Returns true when async compaction should abort.
*/
static inline bool compact_should_abort(struct compact_control *cc)
{
/* async compaction aborts if contended */
if (need_resched()) {
if (cc->mode == MIGRATE_ASYNC) {
cc->contended = COMPACT_CONTENDED_SCHED;
return true;
}
cond_resched();
}
return false;
}
/*
* Isolate free pages onto a private freelist. If @strict is true, will abort
* returning 0 on any invalid PFNs or non-free pages inside of the pageblock
* (even though it may still end up isolating some pages).
*/
static unsigned long isolate_freepages_block(struct compact_control *cc,
unsigned long *start_pfn,
unsigned long end_pfn,
struct list_head *freelist,
bool strict)
{
int nr_scanned = 0, total_isolated = 0;
struct page *cursor, *valid_page = NULL;
unsigned long flags = 0;
bool locked = false;
unsigned long blockpfn = *start_pfn;
cursor = pfn_to_page(blockpfn);
/* Isolate free pages. */
for (; blockpfn < end_pfn; blockpfn++, cursor++) {
int isolated, i;
struct page *page = cursor;
/*
* Periodically drop the lock (if held) regardless of its
* contention, to give chance to IRQs. Abort if fatal signal
* pending or async compaction detects need_resched()
*/
if (!(blockpfn % SWAP_CLUSTER_MAX)
&& compact_unlock_should_abort(&cc->zone->lock, flags,
&locked, cc))
break;
nr_scanned++;
if (!pfn_valid_within(blockpfn))
goto isolate_fail;
if (!valid_page)
valid_page = page;
/*
* For compound pages such as THP and hugetlbfs, we can save
* potentially a lot of iterations if we skip them at once.
* The check is racy, but we can consider only valid values
* and the only danger is skipping too much.
*/
if (PageCompound(page)) {
unsigned int comp_order = compound_order(page);
if (likely(comp_order < MAX_ORDER)) {
blockpfn += (1UL << comp_order) - 1;
cursor += (1UL << comp_order) - 1;
}
goto isolate_fail;
}
if (!PageBuddy(page))
goto isolate_fail;
/*
* If we already hold the lock, we can skip some rechecking.
* Note that if we hold the lock now, checked_pageblock was
* already set in some previous iteration (or strict is true),
* so it is correct to skip the suitable migration target
* recheck as well.
*/
if (!locked) {
/*
* The zone lock must be held to isolate freepages.
* Unfortunately this is a very coarse lock and can be
* heavily contended if there are parallel allocations
* or parallel compactions. For async compaction do not
* spin on the lock and we acquire the lock as late as
* possible.
*/
locked = compact_trylock_irqsave(&cc->zone->lock,
&flags, cc);
if (!locked)
break;
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
goto isolate_fail;
}
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
page++;
}
/* If a page was split, advance to the end of it */
if (isolated) {
cc->nr_freepages += isolated;
if (!strict &&
cc->nr_migratepages <= cc->nr_freepages) {
blockpfn += isolated;
break;
}
blockpfn += isolated - 1;
cursor += isolated - 1;
continue;
}
isolate_fail:
if (strict)
break;
else
continue;
}
/*
* There is a tiny chance that we have read bogus compound_order(),
* so be careful to not go outside of the pageblock.
*/
if (unlikely(blockpfn > end_pfn))
blockpfn = end_pfn;
trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
nr_scanned, total_isolated);
/* Record how far we have got within the block */
*start_pfn = blockpfn;
/*
* If strict isolation is requested by CMA then check that all the
* pages requested were isolated. If there were any failures, 0 is
* returned and CMA will fail.
*/
if (strict && blockpfn < end_pfn)
total_isolated = 0;
if (locked)
spin_unlock_irqrestore(&cc->zone->lock, flags);
/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
update_pageblock_skip(cc, valid_page, total_isolated, false);
count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
count_compact_events(COMPACTISOLATED, total_isolated);
return total_isolated;
}
/**
* isolate_freepages_range() - isolate free pages.
* @start_pfn: The first PFN to start isolating.
* @end_pfn: The one-past-last PFN.
*
* Non-free pages, invalid PFNs, or zone boundaries within the
* [start_pfn, end_pfn) range are considered errors, cause function to
* undo its actions and return zero.
*
* Otherwise, function returns one-past-the-last PFN of isolated page
* (which may be greater then end_pfn if end fell in a middle of
* a free page).
*/
unsigned long
isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long isolated, pfn, block_end_pfn;
LIST_HEAD(freelist);
pfn = start_pfn;
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
for (; pfn < end_pfn; pfn += isolated,
block_end_pfn += pageblock_nr_pages) {
/* Protect pfn from changing by isolate_freepages_block */
unsigned long isolate_start_pfn = pfn;
block_end_pfn = min(block_end_pfn, end_pfn);
/*
* pfn could pass the block_end_pfn if isolated freepage
* is more than pageblock order. In this case, we adjust
* scanning range to right one.
*/
if (pfn >= block_end_pfn) {
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_end_pfn = min(block_end_pfn, end_pfn);
}
if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
block_end_pfn, &freelist, true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
* there are any holes in the block (ie. invalid PFNs or
* non-free pages).
*/
if (!isolated)
break;
/*
* If we managed to isolate pages, it is always (1 << n) *
* pageblock_nr_pages for some non-negative n. (Max order
* page may span two pageblocks).
*/
}
/* split_free_page does not map the pages */
map_pages(&freelist);
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
release_freepages(&freelist);
return 0;
}
/* We don't use freelists for anything. */
return pfn;
}
/* Update the number of anon and file isolated pages in the zone */
static void acct_isolated(struct zone *zone, struct compact_control *cc)
{
struct page *page;
unsigned int count[2] = { 0, };
if (list_empty(&cc->migratepages))
return;
list_for_each_entry(page, &cc->migratepages, lru)
count[!!page_is_file_cache(page)]++;
mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
}
static bool __too_many_isolated(struct zone *zone, int safe)
{
unsigned long active, inactive, isolated;
if (safe) {
inactive = zone_page_state_snapshot(zone, NR_INACTIVE_FILE) +
zone_page_state_snapshot(zone, NR_INACTIVE_ANON);
active = zone_page_state_snapshot(zone, NR_ACTIVE_FILE) +
zone_page_state_snapshot(zone, NR_ACTIVE_ANON);
isolated = zone_page_state_snapshot(zone, NR_ISOLATED_FILE) +
zone_page_state_snapshot(zone, NR_ISOLATED_ANON);
} else {
inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
zone_page_state(zone, NR_INACTIVE_ANON);
active = zone_page_state(zone, NR_ACTIVE_FILE) +
zone_page_state(zone, NR_ACTIVE_ANON);
isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
zone_page_state(zone, NR_ISOLATED_ANON);
}
return isolated > (inactive + active) / 2;
}
/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct compact_control *cc)
{
/*
* __too_many_isolated(safe=0) is fast but inaccurate, because it
* doesn't account for the vm_stat_diff[] counters. So if it looks
* like too_many_isolated() is about to return true, fall back to the
* slower, more accurate zone_page_state_snapshot().
*/
if (unlikely(__too_many_isolated(cc->zone, 0))) {
if (cc->mode != MIGRATE_ASYNC)
return __too_many_isolated(cc->zone, 1);
}
return false;
}
/**
* isolate_migratepages_block() - isolate all migrate-able pages within
* a single pageblock
* @cc: Compaction control structure.
* @low_pfn: The first PFN to isolate
* @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
* @isolate_mode: Isolation mode to be used.
*
* Isolate all pages that can be migrated from the range specified by
* [low_pfn, end_pfn). The range is expected to be within same pageblock.
* Returns zero if there is a fatal signal pending, otherwise PFN of the
* first page that was not scanned (which may be both less, equal to or more
* than end_pfn).
*
* The pages are isolated on cc->migratepages list (not required to be empty),
* and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
* is neither read nor updated.
*/
static unsigned long
isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
unsigned long end_pfn, isolate_mode_t isolate_mode)
{
struct zone *zone = cc->zone;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
struct lruvec *lruvec;
unsigned long flags = 0;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
unsigned long start_pfn = low_pfn;
/*
* Ensure that there are not too many pages isolated from the LRU
* list by either parallel reclaimers or compaction. If there are,
* delay for some time until fewer pages are isolated
*/
while (unlikely(too_many_isolated(cc))) {
/* async migration should just abort */
if (cc->mode == MIGRATE_ASYNC)
return 0;
congestion_wait(BLK_RW_ASYNC, HZ/10);
if (fatal_signal_pending(current))
return 0;
}
if (compact_should_abort(cc))
return 0;
/* Time to isolate some pages for migration */
for (; low_pfn < end_pfn; low_pfn++) {
bool is_lru;
/*
* Periodically drop the lock (if held) regardless of its
* contention, to give chance to IRQs. Abort async compaction
* if contended.
*/
if (!(low_pfn % SWAP_CLUSTER_MAX)
&& compact_unlock_should_abort(&zone->lru_lock, flags,
&locked, cc))
break;
if (!pfn_valid_within(low_pfn))
continue;
nr_scanned++;
page = pfn_to_page(low_pfn);
if (!valid_page)
valid_page = page;
/*
* Skip if free. We read page order here without zone lock
* which is generally unsafe, but the race window is small and
* the worst thing that can happen is that we skip some
* potential isolation targets.
*/
if (PageBuddy(page)) {
unsigned long freepage_order = page_order_unsafe(page);
/*
* Without lock, we cannot be sure that what we got is
* a valid page order. Consider only values in the
* valid order range to prevent low_pfn overflow.
*/
if (freepage_order > 0 && freepage_order < MAX_ORDER)
low_pfn += (1UL << freepage_order) - 1;
continue;
}
/*
* Check may be lockless but that's ok as we recheck later.
* It's possible to migrate LRU pages and balloon pages
* Skip any other type of page
*/
is_lru = PageLRU(page);
if (!is_lru) {
if (unlikely(balloon_page_movable(page))) {
if (balloon_page_isolate(page)) {
/* Successfully isolated */
goto isolate_success;
}
}
}
/*
* Regardless of being on LRU, compound pages such as THP and
* hugetlbfs are not to be compacted. We can potentially save
* a lot of iterations if we skip them at once. The check is
* racy, but we can consider only valid values and the only
* danger is skipping too much.
*/
if (PageCompound(page)) {
unsigned int comp_order = compound_order(page);
if (likely(comp_order < MAX_ORDER))
low_pfn += (1UL << comp_order) - 1;
continue;
}
if (!is_lru)
continue;
/*
* Migration will fail if an anonymous page is pinned in memory,
* so avoid taking lru_lock and isolating it unnecessarily in an
* admittedly racy check.
*/
if (!page_mapping(page) &&
page_count(page) > page_mapcount(page))
continue;
/* If we already hold the lock, we can skip some rechecking */
if (!locked) {
locked = compact_trylock_irqsave(&zone->lru_lock,
&flags, cc);
if (!locked)
break;
/* Recheck PageLRU and PageCompound under lock */
if (!PageLRU(page))
continue;
/*
* Page become compound since the non-locked check,
* and it's on LRU. It can only be a THP so the order
* is safe to read and it's 0 for tail pages.
*/
if (unlikely(PageCompound(page))) {
low_pfn += (1UL << compound_order(page)) - 1;
continue;
}
}
lruvec = mem_cgroup_page_lruvec(page, zone);
/* Try isolate the page */
if (__isolate_lru_page(page, isolate_mode) != 0)
continue;
VM_BUG_ON_PAGE(PageCompound(page), page);
/* Successfully isolated */
del_page_from_lru_list(page, lruvec, page_lru(page));
isolate_success:
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
nr_isolated++;
/* Avoid isolating too much */
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
++low_pfn;
break;
}
}
/*
* The PageBuddy() check could have potentially brought us outside
* the range to be scanned.
*/
if (unlikely(low_pfn > end_pfn))
low_pfn = end_pfn;
if (locked)
spin_unlock_irqrestore(&zone->lru_lock, flags);
/*
* Update the pageblock-skip information and cached scanner pfn,
* if the whole pageblock was scanned without isolating any page.
*/
if (low_pfn == end_pfn)
update_pageblock_skip(cc, valid_page, nr_isolated, true);
trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
nr_scanned, nr_isolated);
count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
if (nr_isolated)
count_compact_events(COMPACTISOLATED, nr_isolated);
return low_pfn;
}
/**
* isolate_migratepages_range() - isolate migrate-able pages in a PFN range
* @cc: Compaction control structure.
* @start_pfn: The first PFN to start isolating.
* @end_pfn: The one-past-last PFN.
*
* Returns zero if isolation fails fatally due to e.g. pending signal.
* Otherwise, function returns one-past-the-last PFN of isolated page
* (which may be greater than end_pfn if end fell in a middle of a THP page).
*/
unsigned long
isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long pfn, block_end_pfn;
/* Scan block by block. First and last block may be incomplete */
pfn = start_pfn;
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
for (; pfn < end_pfn; pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) {
block_end_pfn = min(block_end_pfn, end_pfn);
if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
continue;
pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
ISOLATE_UNEVICTABLE);
if (!pfn)
break;
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
break;
}
acct_isolated(cc->zone, cc);
return pfn;
}
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
{
/* If the page is a large free page, then disallow migration */
if (PageBuddy(page)) {
/*
* We are checking page_order without zone->lock taken. But
* the only small danger is that we skip a potentially suitable
* pageblock, so it's not worth to check order for valid range.
*/
if (page_order_unsafe(page) >= pageblock_order)
return false;
}
/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
if (migrate_async_suitable(get_pageblock_migratetype(page)))
return true;
/* Otherwise skip the block */
return false;
}
/*
* Test whether the free scanner has reached the same or lower pageblock than
* the migration scanner, and compaction should thus terminate.
*/
static inline bool compact_scanners_met(struct compact_control *cc)
{
return (cc->free_pfn >> pageblock_order)
<= (cc->migrate_pfn >> pageblock_order);
}
/*
* Based on information in the current compact_control, find blocks
* suitable for isolating free pages from and then isolate them.
*/
static void isolate_freepages(struct compact_control *cc)
{
struct zone *zone = cc->zone;
struct page *page;
unsigned long block_start_pfn; /* start of current pageblock */
unsigned long isolate_start_pfn; /* exact pfn we start at */
unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
struct list_head *freelist = &cc->freepages;
/*
* Initialise the free scanner. The starting point is where we last
* successfully isolated from, zone-cached value, or the end of the
* zone when isolating for the first time. For looping we also need
* this pfn aligned down to the pageblock boundary, because we do
* block_start_pfn -= pageblock_nr_pages in the for loop.
* For ending point, take care when isolating in last pageblock of a
* a zone which ends in the middle of a pageblock.
* The low boundary is the end of the pageblock the migration scanner
* is using.
*/
isolate_start_pfn = cc->free_pfn;
block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
zone_end_pfn(zone));
low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
/*
* Isolate free pages until enough are available to migrate the
* pages on cc->migratepages. We stop searching if the migrate
* and free page scanners meet or enough free pages are isolated.
*/
for (; block_start_pfn >= low_pfn;
block_end_pfn = block_start_pfn,
block_start_pfn -= pageblock_nr_pages,
isolate_start_pfn = block_start_pfn) {
/*
* This can iterate a massively long zone without finding any
* suitable migration targets, so periodically check if we need
* to schedule, or even abort async compaction.
*/
if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
&& compact_should_abort(cc))
break;
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);
if (!page)
continue;
/* Check the block is suitable for migration */
if (!suitable_migration_target(page))
continue;
/* If isolation recently failed, do not retry */
if (!isolation_suitable(cc, page))
continue;
/* Found a block suitable for isolating free pages from. */
isolate_freepages_block(cc, &isolate_start_pfn,
block_end_pfn, freelist, false);
/*
* If we isolated enough freepages, or aborted due to async
* compaction being contended, terminate the loop.
* Remember where the free scanner should restart next time,
* which is where isolate_freepages_block() left off.
* But if it scanned the whole pageblock, isolate_start_pfn
* now points at block_end_pfn, which is the start of the next
* pageblock.
* In that case we will however want to restart at the start
* of the previous pageblock.
*/
if ((cc->nr_freepages >= cc->nr_migratepages)
|| cc->contended) {
if (isolate_start_pfn >= block_end_pfn)
isolate_start_pfn =
block_start_pfn - pageblock_nr_pages;
break;
} else {
/*
* isolate_freepages_block() should not terminate
* prematurely unless contended, or isolated enough
*/
VM_BUG_ON(isolate_start_pfn < block_end_pfn);
}
}
/* split_free_page does not map the pages */
map_pages(freelist);
/*
* Record where the free scanner will restart next time. Either we
* broke from the loop and set isolate_start_pfn based on the last
* call to isolate_freepages_block(), or we met the migration scanner
* and the loop terminated due to isolate_start_pfn < low_pfn
*/
cc->free_pfn = isolate_start_pfn;
}
/*
* This is a migrate-callback that "allocates" freepages by taking pages
* from the isolated freelists in the block we are migrating to.
*/
static struct page *compaction_alloc(struct page *migratepage,
unsigned long data,
int **result)
{
struct compact_control *cc = (struct compact_control *)data;
struct page *freepage;
/*
* Isolate free pages if necessary, and if we are not aborting due to
* contention.
*/
if (list_empty(&cc->freepages)) {
if (!cc->contended)
isolate_freepages(cc);
if (list_empty(&cc->freepages))
return NULL;
}
freepage = list_entry(cc->freepages.next, struct page, lru);
list_del(&freepage->lru);
cc->nr_freepages--;
return freepage;
}
/*
* This is a migrate-callback that "frees" freepages back to the isolated
* freelist. All pages on the freelist are from the same zone, so there is no
* special handling needed for NUMA.
*/
static void compaction_free(struct page *page, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
list_add(&page->lru, &cc->freepages);
cc->nr_freepages++;
}
/* possible outcome of isolate_migratepages */
typedef enum {
ISOLATE_ABORT, /* Abort compaction now */
ISOLATE_NONE, /* No pages isolated, continue scanning */
ISOLATE_SUCCESS, /* Pages isolated, migrate */
} isolate_migrate_t;
/*
* Allow userspace to control policy on scanning the unevictable LRU for
* compactable pages.
*/
int sysctl_compact_unevictable_allowed __read_mostly = 1;
/*
* Isolate all pages that can be migrated from the first suitable block,
* starting at the block pointed to by the migrate scanner pfn within
* compact_control.
*/
static isolate_migrate_t isolate_migratepages(struct zone *zone,
struct compact_control *cc)
{
unsigned long low_pfn, end_pfn;
unsigned long isolate_start_pfn;
struct page *page;
const isolate_mode_t isolate_mode =
(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
(cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
/*
* Start at where we last stopped, or beginning of the zone as
* initialized by compact_zone()
*/
low_pfn = cc->migrate_pfn;
/* Only scan within a pageblock boundary */
end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
/*
* Iterate over whole pageblocks until we find the first suitable.
* Do not cross the free scanner.
*/
for (; end_pfn <= cc->free_pfn;
low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
/*
* This can potentially iterate a massively long zone with
* many pageblocks unsuitable, so periodically check if we
* need to schedule, or even abort async compaction.
*/
if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
&& compact_should_abort(cc))
break;
page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
if (!page)
continue;
/* If isolation recently failed, do not retry */
if (!isolation_suitable(cc, page))
continue;
/*
* For async compaction, also only scan in MOVABLE blocks.
* Async compaction is optimistic to see if the minimum amount
* of work satisfies the allocation.
*/
if (cc->mode == MIGRATE_ASYNC &&
!migrate_async_suitable(get_pageblock_migratetype(page)))
continue;
/* Perform the isolation */
isolate_start_pfn = low_pfn;
low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
isolate_mode);
if (!low_pfn || cc->contended) {
acct_isolated(zone, cc);
return ISOLATE_ABORT;
}
/*
* Record where we could have freed pages by migration and not
* yet flushed them to buddy allocator.
* - this is the lowest page that could have been isolated and
* then freed by migration.
*/
if (cc->nr_migratepages && !cc->last_migrated_pfn)
cc->last_migrated_pfn = isolate_start_pfn;
/*
* Either we isolated something and proceed with migration. Or
* we failed and compact_zone should decide if we should
* continue or not.
*/
break;
}
acct_isolated(zone, cc);
/* Record where migration scanner will be restarted. */
cc->migrate_pfn = low_pfn;
return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
}
/*
* order == -1 is expected when compacting via
* /proc/sys/vm/compact_memory
*/
static inline bool is_via_compact_memory(int order)
{
return order == -1;
}
static int __compact_finished(struct zone *zone, struct compact_control *cc,
const int migratetype)
{
unsigned int order;
unsigned long watermark;
if (cc->contended || fatal_signal_pending(current))
return COMPACT_CONTENDED;
/* Compaction run completes if the migrate and free scanner meet */
if (compact_scanners_met(cc)) {
/* Let the next compaction start anew. */
reset_cached_positions(zone);
/*
* Mark that the PG_migrate_skip information should be cleared
* by kswapd when it goes to sleep. kswapd does not set the
* flag itself as the decision to be clear should be directly
* based on an allocation request.
*/
if (!current_is_kswapd())
zone->compact_blockskip_flush = true;
return COMPACT_COMPLETE;
}
if (is_via_compact_memory(cc->order))
return COMPACT_CONTINUE;
/* Compaction run is not finished if the watermark is not met */
watermark = low_wmark_pages(zone);
if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
cc->alloc_flags))
return COMPACT_CONTINUE;
/* Direct compactor: Is a suitable page free? */
for (order = cc->order; order < MAX_ORDER; order++) {
struct free_area *area = &zone->free_area[order];
bool can_steal;
/* Job done if page is free of the right migratetype */
if (!list_empty(&area->free_list[migratetype]))
return COMPACT_PARTIAL;
#ifdef CONFIG_CMA
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
if (migratetype == MIGRATE_MOVABLE &&
!list_empty(&area->free_list[MIGRATE_CMA]))
return COMPACT_PARTIAL;
#endif
/*
* Job done if allocation would steal freepages from
* other migratetype buddy lists.
*/
if (find_suitable_fallback(area, order, migratetype,
true, &can_steal) != -1)
return COMPACT_PARTIAL;
}
return COMPACT_NO_SUITABLE_PAGE;
}
static int compact_finished(struct zone *zone, struct compact_control *cc,
const int migratetype)
{
int ret;
ret = __compact_finished(zone, cc, migratetype);
trace_mm_compaction_finished(zone, cc->order, ret);
if (ret == COMPACT_NO_SUITABLE_PAGE)
ret = COMPACT_CONTINUE;
return ret;
}
/*
* compaction_suitable: Is this suitable to run compaction on this zone now?
* Returns
* COMPACT_SKIPPED - If there are too few free pages for compaction
* COMPACT_PARTIAL - If the allocation would succeed without compaction
* COMPACT_CONTINUE - If compaction should run now
*/
static unsigned long __compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx)
{
int fragindex;
unsigned long watermark;
if (is_via_compact_memory(order))
return COMPACT_CONTINUE;
watermark = low_wmark_pages(zone);
/*
* If watermarks for high-order allocation are already met, there
* should be no need for compaction at all.
*/
if (zone_watermark_ok(zone, order, watermark, classzone_idx,
alloc_flags))
return COMPACT_PARTIAL;
/*
* Watermarks for order-0 must be met for compaction. Note the 2UL.
* This is because during migration, copies of pages need to be
* allocated and for a short time, the footprint is higher
*/
watermark += (2UL << order);
if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
return COMPACT_SKIPPED;
/*
* fragmentation index determines if allocation failures are due to
* low memory or external fragmentation
*
* index of -1000 would imply allocations might succeed depending on
* watermarks, but we already failed the high-order watermark check
* index towards 0 implies failure is due to lack of memory
* index towards 1000 implies failure is due to fragmentation
*
* Only compact if a failure would be due to fragmentation.
*/
fragindex = fragmentation_index(zone, order);
if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
return COMPACT_NOT_SUITABLE_ZONE;
return COMPACT_CONTINUE;
}
unsigned long compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx)
{
unsigned long ret;
ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
trace_mm_compaction_suitable(zone, order, ret);
if (ret == COMPACT_NOT_SUITABLE_ZONE)
ret = COMPACT_SKIPPED;
return ret;
}
static int compact_zone(struct zone *zone, struct compact_control *cc)
{
int ret;
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone_end_pfn(zone);
const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
const bool sync = cc->mode != MIGRATE_ASYNC;
ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
cc->classzone_idx);
switch (ret) {
case COMPACT_PARTIAL:
case COMPACT_SKIPPED:
/* Compaction is likely to fail */
return ret;
case COMPACT_CONTINUE:
/* Fall through to compaction */
;
}
/*
* Clear pageblock skip if there were failures recently and compaction
* is about to be retried after being deferred. kswapd does not do
* this reset as it'll reset the cached information when going to sleep.
*/
if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
__reset_isolation_suitable(zone);
/*
* Setup to move all movable pages to the end of the zone. Used cached
* information on where the scanners should start but check that it
* is initialised by ensuring the values are within zone boundaries.
*/
cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
cc->free_pfn = zone->compact_cached_free_pfn;
if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
zone->compact_cached_free_pfn = cc->free_pfn;
}
if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
cc->migrate_pfn = start_pfn;
zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
}
cc->last_migrated_pfn = 0;
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
cc->free_pfn, end_pfn, sync);
migrate_prep_local();
while ((ret = compact_finished(zone, cc, migratetype)) ==
COMPACT_CONTINUE) {
int err;
switch (isolate_migratepages(zone, cc)) {
case ISOLATE_ABORT:
ret = COMPACT_CONTENDED;
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
goto out;
case ISOLATE_NONE:
/*
* We haven't isolated and migrated anything, but
* there might still be unflushed migrations from
* previous cc->order aligned block.
*/
goto check_drain;
case ISOLATE_SUCCESS:
;
}
err = migrate_pages(&cc->migratepages, compaction_alloc,
compaction_free, (unsigned long)cc, cc->mode,
MR_COMPACTION);
trace_mm_compaction_migratepages(cc->nr_migratepages, err,
&cc->migratepages);
/* All pages were either migrated or will be released */
cc->nr_migratepages = 0;
if (err) {
putback_movable_pages(&cc->migratepages);
/*
* migrate_pages() may return -ENOMEM when scanners meet
* and we want compact_finished() to detect it
*/
if (err == -ENOMEM && !compact_scanners_met(cc)) {
ret = COMPACT_CONTENDED;
goto out;
}
}
check_drain:
/*
* Has the migration scanner moved away from the previous
* cc->order aligned block where we migrated from? If yes,
* flush the pages that were freed, so that they can merge and
* compact_finished() can detect immediately if allocation
* would succeed.
*/
if (cc->order > 0 && cc->last_migrated_pfn) {
int cpu;
unsigned long current_block_start =
cc->migrate_pfn & ~((1UL << cc->order) - 1);
if (cc->last_migrated_pfn < current_block_start) {
cpu = get_cpu();
lru_add_drain_cpu(cpu);
drain_local_pages(zone);
put_cpu();
/* No more flushing until we migrate again */
cc->last_migrated_pfn = 0;
}
}
}
out:
/*
* Release free pages and update where the free scanner should restart,
* so we don't leave any returned pages behind in the next attempt.
*/
if (cc->nr_freepages > 0) {
unsigned long free_pfn = release_freepages(&cc->freepages);
cc->nr_freepages = 0;
VM_BUG_ON(free_pfn == 0);
/* The cached pfn is always the first in a pageblock */
free_pfn &= ~(pageblock_nr_pages-1);
/*
* Only go back, not forward. The cached pfn might have been
* already reset to zone end in compact_finished()
*/
if (free_pfn > zone->compact_cached_free_pfn)
zone->compact_cached_free_pfn = free_pfn;
}
trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
cc->free_pfn, end_pfn, sync, ret);
if (ret == COMPACT_CONTENDED)
ret = COMPACT_PARTIAL;
return ret;
}
static unsigned long compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask, enum migrate_mode mode, int *contended,
int alloc_flags, int classzone_idx)
{
unsigned long ret;
struct compact_control cc = {
.nr_freepages = 0,
.nr_migratepages = 0,
.order = order,
.gfp_mask = gfp_mask,
.zone = zone,
.mode = mode,
.alloc_flags = alloc_flags,
.classzone_idx = classzone_idx,
};
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
ret = compact_zone(zone, &cc);
VM_BUG_ON(!list_empty(&cc.freepages));
VM_BUG_ON(!list_empty(&cc.migratepages));
*contended = cc.contended;
return ret;
}
int sysctl_extfrag_threshold = 500;
/**
* try_to_compact_pages - Direct compact to satisfy a high-order allocation
* @gfp_mask: The GFP mask of the current allocation
* @order: The order of the current allocation
* @alloc_flags: The allocation flags of the current allocation
* @ac: The context of current allocation
* @mode: The migration mode for async, sync light, or sync migration
* @contended: Return value that determines if compaction was aborted due to
* need_resched() or lock contention
*
* This is the main entry point for direct page compaction.
*/
unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
int alloc_flags, const struct alloc_context *ac,
enum migrate_mode mode, int *contended)
{
int may_enter_fs = gfp_mask & __GFP_FS;
int may_perform_io = gfp_mask & __GFP_IO;
struct zoneref *z;
struct zone *zone;
int rc = COMPACT_DEFERRED;
int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
*contended = COMPACT_CONTENDED_NONE;
/* Check if the GFP flags allow compaction */
if (!order || !may_enter_fs || !may_perform_io)
return COMPACT_SKIPPED;
trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
/* Compact each zone in the list */
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
ac->nodemask) {
int status;
int zone_contended;
if (compaction_deferred(zone, order))
continue;
status = compact_zone_order(zone, order, gfp_mask, mode,
&zone_contended, alloc_flags,
ac->classzone_idx);
rc = max(status, rc);
/*
* It takes at least one zone that wasn't lock contended
* to clear all_zones_contended.
*/
all_zones_contended &= zone_contended;
/* If a normal allocation would succeed, stop compacting */
if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
ac->classzone_idx, alloc_flags)) {
/*
* We think the allocation will succeed in this zone,
* but it is not certain, hence the false. The caller
* will repeat this with true if allocation indeed
* succeeds in this zone.
*/
compaction_defer_reset(zone, order, false);
/*
* It is possible that async compaction aborted due to
* need_resched() and the watermarks were ok thanks to
* somebody else freeing memory. The allocation can
* however still fail so we better signal the
* need_resched() contention anyway (this will not
* prevent the allocation attempt).
*/
if (zone_contended == COMPACT_CONTENDED_SCHED)
*contended = COMPACT_CONTENDED_SCHED;
goto break_loop;
}
if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
/*
* We think that allocation won't succeed in this zone
* so we defer compaction there. If it ends up
* succeeding after all, it will be reset.
*/
defer_compaction(zone, order);
}
/*
* We might have stopped compacting due to need_resched() in
* async compaction, or due to a fatal signal detected. In that
* case do not try further zones and signal need_resched()
* contention.
*/
if ((zone_contended == COMPACT_CONTENDED_SCHED)
|| fatal_signal_pending(current)) {
*contended = COMPACT_CONTENDED_SCHED;
goto break_loop;
}
continue;
break_loop:
/*
* We might not have tried all the zones, so be conservative
* and assume they are not all lock contended.
*/
all_zones_contended = 0;
break;
}
/*
* If at least one zone wasn't deferred or skipped, we report if all
* zones that were tried were lock contended.
*/
if (rc > COMPACT_SKIPPED && all_zones_contended)
*contended = COMPACT_CONTENDED_LOCK;
return rc;
}
/* Compact all zones within a node */
static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
{
int zoneid;
struct zone *zone;
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
cc->nr_freepages = 0;
cc->nr_migratepages = 0;
cc->zone = zone;
INIT_LIST_HEAD(&cc->freepages);
INIT_LIST_HEAD(&cc->migratepages);
/*
* When called via /proc/sys/vm/compact_memory
* this makes sure we compact the whole zone regardless of
* cached scanner positions.
*/
if (is_via_compact_memory(cc->order))
__reset_isolation_suitable(zone);
if (is_via_compact_memory(cc->order) ||
!compaction_deferred(zone, cc->order))
compact_zone(zone, cc);
if (cc->order > 0) {
if (zone_watermark_ok(zone, cc->order,
low_wmark_pages(zone), 0, 0))
compaction_defer_reset(zone, cc->order, false);
}
VM_BUG_ON(!list_empty(&cc->freepages));
VM_BUG_ON(!list_empty(&cc->migratepages));
}
}
void compact_pgdat(pg_data_t *pgdat, int order)
{
struct compact_control cc = {
.order = order,
.mode = MIGRATE_ASYNC,
};
if (!order)
return;
__compact_pgdat(pgdat, &cc);
}
static void compact_node(int nid)
{
struct compact_control cc = {
.order = -1,
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
};
__compact_pgdat(NODE_DATA(nid), &cc);
}
/* Compact all nodes in the system */
static void compact_nodes(void)
{
int nid;
/* Flush pending updates to the LRU lists */
lru_add_drain_all();
for_each_online_node(nid)
compact_node(nid);
}
/* The written value is actually unused, all memory is compacted */
int sysctl_compact_memory;
/* This is the entry point for compacting all nodes via /proc/sys/vm */
int sysctl_compaction_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
if (write)
compact_nodes();
return 0;
}
int sysctl_extfrag_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec_minmax(table, write, buffer, length, ppos);
return 0;
}
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
static ssize_t sysfs_compact_node(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int nid = dev->id;
if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
/* Flush pending updates to the LRU lists */
lru_add_drain_all();
compact_node(nid);
}
return count;
}
static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
int compaction_register_node(struct node *node)
{
return device_create_file(&node->dev, &dev_attr_compact);
}
void compaction_unregister_node(struct node *node)
{
return device_remove_file(&node->dev, &dev_attr_compact);
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
#endif /* CONFIG_COMPACTION */