* origin/tmp-da9a92f: arm64: kaslr: increase randomization granularity arm64: relocatable: deal with physically misaligned kernel images arm64: don't map TEXT_OFFSET bytes below the kernel if we can avoid it arm64: kernel: replace early 64-bit literal loads with move-immediates arm64: introduce mov_q macro to move a constant into a 64-bit register arm64: kernel: perform relocation processing from ID map arm64: kernel: use literal for relocated address of __secondary_switched arm64: kernel: don't export local symbols from head.S arm64: simplify kernel segment mapping granularity arm64: cover the .head.text section in the .text segment mapping arm64: move early boot code to the .init segment arm64: use 'segment' rather than 'chunk' to describe mapped kernel regions arm64: mm: Mark .rodata as RO Linux 4.4.16 ovl: verify upper dentry before unlink and rename drm/i915: Revert DisplayPort fast link training feature tmpfs: fix regression hang in fallocate undo tmpfs: don't undo fallocate past its last page crypto: qat - make qat_asym_algs.o depend on asn1 headers xen/acpi: allow xen-acpi-processor driver to load on Xen 4.7 File names with trailing period or space need special case conversion cifs: dynamic allocation of ntlmssp blob Fix reconnect to not defer smb3 session reconnect long after socket reconnect 53c700: fix BUG on untagged commands s390: fix test_fp_ctl inline assembly contraints scsi: fix race between simultaneous decrements of ->host_failed ovl: verify upper dentry in ovl_remove_and_whiteout() ovl: Copy up underlying inode's ->i_mode to overlay inode ARM: mvebu: fix HW I/O coherency related deadlocks ARM: dts: armada-38x: fix MBUS_ID for crypto SRAM on Armada 385 Linksys ARM: sunxi/dt: make the CHIP inherit from allwinner,sun5i-a13 ALSA: hda: add AMD Stoney PCI ID with proper driver caps ALSA: hda - fix use-after-free after module unload ALSA: ctl: Stop notification after disconnection ALSA: pcm: Free chmap at PCM free callback, too ALSA: hda/realtek - add new pin definition in alc225 pin quirk table ALSA: hda - fix read before array start ALSA: hda - Add PCI ID for Kabylake-H ALSA: hda/realtek: Add Lenovo L460 to docking unit fixup ALSA: timer: Fix negative queue usage by racy accesses ALSA: echoaudio: Fix memory allocation ALSA: au88x0: Fix calculation in vortex_wtdma_bufshift() ALSA: hda / realtek - add two more Thinkpad IDs (5050,5053) for tpt460 fixup ALSA: hda - Fix the headset mic jack detection on Dell machine ALSA: dummy: Fix a use-after-free at closing hwmon: (dell-smm) Cache fan_type() calls and change fan detection hwmon: (dell-smm) Disallow fan_type() calls on broken machines hwmon: (dell-smm) Restrict fan control and serial number to CAP_SYS_ADMIN by default tty/vt/keyboard: fix OOB access in do_compute_shiftstate() tty: vt: Fix soft lockup in fbcon cursor blink timer. iio:ad7266: Fix probe deferral for vref iio:ad7266: Fix support for optional regulators iio:ad7266: Fix broken regulator error handling iio: accel: kxsd9: fix the usage of spi_w8r8() staging: iio: accel: fix error check iio: hudmidity: hdc100x: fix incorrect shifting and scaling iio: humidity: hdc100x: fix IIO_TEMP channel reporting iio: humidity: hdc100x: correct humidity integration time mask iio: proximity: as3935: fix buffer stack trashing iio: proximity: as3935: remove triggered buffer processing iio: proximity: as3935: correct IIO_CHAN_INFO_RAW output iio: light apds9960: Add the missing dev.parent iio:st_pressure: fix sampling gains (bring inline with ABI) iio: Fix error handling in iio_trigger_attach_poll_func xen/balloon: Fix declared-but-not-defined warning perf/x86: Fix undefined shift on 32-bit kernels memory: omap-gpmc: Fix omap gpmc EXTRADELAY timing drm/vmwgfx: Fix error paths when mapping framebuffer drm/vmwgfx: Delay pinning fbdev framebuffer until after mode set drm/vmwgfx: Check pin count before attempting to move a buffer drm/vmwgfx: Work around mode set failure in 2D VMs drm/vmwgfx: Add an option to change assumed FB bpp drm/ttm: Make ttm_bo_mem_compat available drm: atmel-hlcdc: actually disable scaling when no scaling is required drm: make drm_atomic_set_mode_prop_for_crtc() more reliable drm: add missing drm_mode_set_crtcinfo call drm/i915: Update CDCLK_FREQ register on BDW after changing cdclk frequency drm/i915: Update ifdeffery for mutex->owner drm/i915: Refresh cached DP port register value on resume drm/i915/ilk: Don't disable SSC source if it's in use drm/nouveau/disp/sor/gf119: select correct sor when poking training pattern drm/nouveau: fix for disabled fbdev emulation drm/nouveau/fbcon: fix out-of-bounds memory accesses drm/nouveau/gr/gf100-: update sm error decoding from gk20a nvgpu headers drm/nouveau/disp/sor/gf119: both links use the same training register virtio_balloon: fix PFN format for virtio-1 drm/dp/mst: Always clear proposed vcpi table for port. drm/amdkfd: destroy dbgmgr in notifier release drm/amdkfd: unbind only existing processes ubi: Make recover_peb power cut aware drm/amdgpu/gfx7: fix broken condition check drm/radeon: fix asic initialization for virtualized environments btrfs: account for non-CoW'd blocks in btrfs_abort_transaction percpu: fix synchronization between synchronous map extension and chunk destruction percpu: fix synchronization between chunk->map_extend_work and chunk destruction af_unix: fix hard linked sockets on overlay vfs: add d_real_inode() helper arm64: Rework valid_user_regs ipmi: Remove smi_msg from waiting_rcv_msgs list before handle_one_recv_msg() drm/mgag200: Black screen fix for G200e rev 4 iommu/amd: Fix unity mapping initialization race iommu/vt-d: Enable QI on all IOMMUs before setting root entry iommu/arm-smmu: Wire up map_sg for arm-smmu-v3 base: make module_create_drivers_dir race-free tracing: Handle NULL formats in hold_module_trace_bprintk_format() HID: multitouch: enable palm rejection for Windows Precision Touchpad HID: hiddev: validate num_values for HIDIOCGUSAGES, HIDIOCSUSAGES commands HID: elo: kill not flush the work KVM: nVMX: VMX instructions: fix segment checks when L1 is in long mode. kvm: Fix irq route entries exceeding KVM_MAX_IRQ_ROUTES KEYS: potential uninitialized variable ARCv2: LLSC: software backoff is NOT needed starting HS2.1c ARCv2: Check for LL-SC livelock only if LLSC is enabled ipv6: Fix mem leak in rt6i_pcpu cdc_ncm: workaround for EM7455 "silent" data interface net_sched: fix mirrored packets checksum packet: Use symmetric hash for PACKET_FANOUT_HASH. sched/fair: Fix cfs_rq avg tracking underflow UBIFS: Implement ->migratepage() mm: Export migrate_page_move_mapping and migrate_page_copy MIPS: KVM: Fix modular KVM under QEMU ARM: 8579/1: mm: Fix definition of pmd_mknotpresent ARM: 8578/1: mm: ensure pmd_present only checks the valid bit ARM: imx6ul: Fix Micrel PHY mask NFS: Fix another OPEN_DOWNGRADE bug make nfs_atomic_open() call d_drop() on all ->open_context() errors. nfsd: check permissions when setting ACLs posix_acl: Add set_posix_acl nfsd: Extend the mutex holding region around in nfsd4_process_open2() nfsd: Always lock state exclusively. nfsd4/rpc: move backchannel create logic into rpc code writeback: use higher precision calculation in domain_dirty_limits() thermal: cpu_cooling: fix improper order during initialization uvc: Forward compat ioctls to their handlers directly Revert "gpiolib: Split GPIO flags parsing and GPIO configuration" x86/amd_nb: Fix boot crash on non-AMD systems kprobes/x86: Clear TF bit in fault on single-stepping x86, build: copy ldlinux.c32 to image.iso locking/static_key: Fix concurrent static_key_slow_inc() locking/qspinlock: Fix spin_unlock_wait() some more locking/ww_mutex: Report recursive ww_mutex locking early of: irq: fix of_irq_get[_byname]() kernel-doc of: fix autoloading due to broken modalias with no 'compatible' mnt: If fs_fully_visible fails call put_filesystem. mnt: Account for MS_RDONLY in fs_fully_visible mnt: fs_fully_visible test the proper mount for MNT_LOCKED usb: common: otg-fsm: add license to usb-otg-fsm USB: EHCI: declare hostpc register as zero-length array usb: dwc2: fix regression on big-endian PowerPC/ARM systems powerpc/tm: Always reclaim in start_thread() for exec() class syscalls powerpc/pseries: Fix IBM_ARCH_VEC_NRCORES_OFFSET since POWER8NVL was added powerpc/pseries: Fix PCI config address for DDW powerpc/iommu: Remove the dependency on EEH struct in DDW mechanism IB/mlx4: Properly initialize GRH TClass and FlowLabel in AHs IB/cm: Fix a recently introduced locking bug EDAC, sb_edac: Fix rank lookup on Broadwell mac80211: Fix mesh estab_plinks counting in STA removal case mac80211_hwsim: Add missing check for HWSIM_ATTR_SIGNAL mac80211: mesh: flush mesh paths unconditionally mac80211: fix fast_tx header alignment Linux 4.4.15 usb: dwc3: exynos: Fix deferred probing storm. usb: host: ehci-tegra: Grab the correct UTMI pads reset usb: gadget: fix spinlock dead lock in gadgetfs USB: mos7720: delete parport xhci: Fix handling timeouted commands on hosts in weird states. USB: xhci: Add broken streams quirk for Frescologic device id 1009 usb: xhci-plat: properly handle probe deferral for devm_clk_get() xhci: Cleanup only when releasing primary hcd usb: musb: host: correct cppi dma channel for isoch transfer usb: musb: Ensure rx reinit occurs for shared_fifo endpoints usb: musb: Stop bulk endpoint while queue is rotated usb: musb: only restore devctl when session was set in backup usb: quirks: Add no-lpm quirk for Acer C120 LED Projector usb: quirks: Fix sorting USB: uas: Fix slave queue_depth not being set crypto: user - re-add size check for CRYPTO_MSG_GETALG crypto: ux500 - memmove the right size crypto: vmx - Increase priority of aes-cbc cipher AX.25: Close socket connection on session completion bpf: try harder on clones when writing into skb net: alx: Work around the DMA RX overflow issue net: macb: fix default configuration for GMAC on AT91 neigh: Explicitly declare RCU-bh read side critical section in neigh_xmit() bpf, perf: delay release of BPF prog after grace period sock_diag: do not broadcast raw socket destruction Bridge: Fix ipv6 mc snooping if bridge has no ipv6 address ipmr/ip6mr: Initialize the last assert time of mfc entries. netem: fix a use after free esp: Fix ESN generation under UDP encapsulation sit: correct IP protocol used in ipip6_err net: Don't forget pr_fmt on net_dbg_ratelimited for CONFIG_DYNAMIC_DEBUG net_sched: fix pfifo_head_drop behavior vs backlog sdcardfs: Truncate packages_gid.list on overflow UPSTREAM: cdc_ncm: do not call usbnet_link_change from cdc_ncm_bind BACKPORT: proc: add /proc/<pid>/timerslack_ns interface BACKPORT: timer: convert timer_slack_ns from unsigned long to u64 netfilter: xt_quota2: make quota2_log work well Revert "usb: gadget: prevent change of Host MAC address of 'usb0' interface" BACKPORT: PM / sleep: Go direct_complete if driver has no callbacks ANDROID: base-cfg: enable UID_CPUTIME UPSTREAM: USB: usbfs: fix potential infoleak in devio UPSTREAM: ALSA: timer: Fix leak in events via snd_timer_user_ccallback UPSTREAM: ALSA: timer: Fix leak in events via snd_timer_user_tinterrupt UPSTREAM: ALSA: timer: Fix leak in SNDRV_TIMER_IOCTL_PARAMS ANDROID: configs: remove unused configs ANDROID: cpu: send KOBJ_ONLINE event when enabling cpus ANDROID: dm verity fec: initialize recursion level ANDROID: dm verity fec: fix RS block calculation Linux 4.4.14 netfilter: x_tables: introduce and use xt_copy_counters_from_user netfilter: x_tables: do compat validation via translate_table netfilter: x_tables: xt_compat_match_from_user doesn't need a retval netfilter: ip6_tables: simplify translate_compat_table args netfilter: ip_tables: simplify translate_compat_table args netfilter: arp_tables: simplify translate_compat_table args netfilter: x_tables: don't reject valid target size on some architectures netfilter: x_tables: validate all offsets and sizes in a rule netfilter: x_tables: check for bogus target offset netfilter: x_tables: check standard target size too netfilter: x_tables: add compat version of xt_check_entry_offsets netfilter: x_tables: assert minimum target size netfilter: x_tables: kill check_entry helper netfilter: x_tables: add and use xt_check_entry_offsets netfilter: x_tables: validate targets of jumps netfilter: x_tables: don't move to non-existent next rule drm/core: Do not preserve framebuffer on rmfb, v4. crypto: qat - fix adf_ctl_drv.c:undefined reference to adf_init_pf_wq netfilter: x_tables: fix unconditional helper netfilter: x_tables: make sure e->next_offset covers remaining blob size netfilter: x_tables: validate e->target_offset early MIPS: Fix 64k page support for 32 bit kernels. sparc64: Fix return from trap window fill crashes. sparc: Harden signal return frame checks. sparc64: Take ctx_alloc_lock properly in hugetlb_setup(). sparc64: Reduce TLB flushes during hugepte changes sparc/PCI: Fix for panic while enabling SR-IOV sparc64: Fix sparc64_set_context stack handling. sparc64: Fix numa node distance initialization sparc64: Fix bootup regressions on some Kconfig combinations. sparc: Fix system call tracing register handling. fix d_walk()/non-delayed __d_free() race sched: panic on corrupted stack end proc: prevent stacking filesystems on top x86/entry/traps: Don't force in_interrupt() to return true in IST handlers wext: Fix 32 bit iwpriv compatibility issue with 64 bit Kernel ecryptfs: forbid opening files without mmap handler memcg: add RCU locking around css_for_each_descendant_pre() in memcg_offline_kmem() parisc: Fix pagefault crash in unaligned __get_user() call pinctrl: mediatek: fix dual-edge code defect powerpc/pseries: Add POWER8NVL support to ibm,client-architecture-support call powerpc: Use privileged SPR number for MMCR2 powerpc: Fix definition of SIAR and SDAR registers powerpc/pseries/eeh: Handle RTAS delay requests in configure_bridge arm64: mm: always take dirty state from new pte in ptep_set_access_flags arm64: Provide "model name" in /proc/cpuinfo for PER_LINUX32 tasks crypto: ccp - Fix AES XTS error for request sizes above 4096 crypto: public_key: select CRYPTO_AKCIPHER irqchip/gic-v3: Fix ICC_SGI1R_EL1.INTID decoding mask s390/bpf: reduce maximum program size to 64 KB s390/bpf: fix recache skb->data/hlen for skb_vlan_push/pop gpio: bcm-kona: fix bcm_kona_gpio_reset() warnings ARM: fix PTRACE_SETVFPREGS on SMP systems ALSA: hda/realtek: Add T560 docking unit fixup ALSA: hda/realtek - Add support for new codecs ALC700/ALC701/ALC703 ALSA: hda/realtek - ALC256 speaker noise issue ALSA: hda - Fix headset mic detection problem for Dell machine ALSA: hda - Add PCI ID for Kabylake KVM: irqfd: fix NULL pointer dereference in kvm_irq_map_gsi KVM: x86: fix OOPS after invalid KVM_SET_DEBUGREGS vxlan, gre, geneve: Set a large MTU on ovs-created tunnel devices geneve: Relax MTU constraints vxlan: Relax MTU constraints ipv6: Skip XFRM lookup if dst_entry in socket cache is valid l2tp: fix configuration passed to setup_udp_tunnel_sock() bridge: Don't insert unnecessary local fdb entry on changing mac address tcp: record TLP and ER timer stats in v6 stats vxlan: Accept user specified MTU value when create new vxlan link team: don't call netdev_change_features under team->lock sfc: on MC reset, clear PIO buffer linkage in TXQs bpf, inode: disallow userns mounts uapi glibc compat: fix compilation when !__USE_MISC in glibc udp: prevent skbs lingering in tunnel socket queues bpf: Use mount_nodev not mount_ns to mount the bpf filesystem tuntap: correctly wake up process during uninit switchdev: pass pointer to fib_info instead of copy tipc: fix nametable publication field in nl compat netlink: Fix dump skb leak/double free tipc: check nl sock before parsing nested attributes scsi: Add QEMU CD-ROM to VPD Inquiry Blacklist scsi_lib: correctly retry failed zero length REQ_TYPE_FS commands cs-etm: associating output packet with CPU they executed on cs-etm: removing unecessary structure field cs-etm: account for each trace buffer in the queue cs-etm: avoid casting variable perf tools: fixing Makefile problems perf tools: new naming convention for openCSD perf scripts: Add python scripts for CoreSight traces perf tools: decoding capailitity for CoreSight traces perf symbols: Check before overwriting build_id perf tools: pushing driver configuration down to the kernel perf tools: add infrastructure for PMU specific configuration coresight: etm-perf: incorporating sink definition from the cmd line coresight: adding sink parameter to function coresight_build_path() perf: passing struct perf_event to function setup_aux() perf/core: adding PMU driver specific configuration perf tools: adding coresight etm PMU record capabilities perf tools: making coresight PMU listable coresight: tmc: implementing TMC-ETR AUX space API coresight: Add support for Juno platform coresight: Handle build path error coresight: Fix erroneous memset in tmc_read_unprepare_etr coresight: Fix tmc_read_unprepare_etr coresight: Fix NULL pointer dereference in _coresight_build_path ANDROID: dm verity fec: add missing release from fec_ktype ANDROID: dm verity fec: limit error correction recursion ANDROID: restrict access to perf events FROMLIST: security,perf: Allow further restriction of perf_event_open BACKPORT: perf tools: Document the perf sysctls Revert "armv6 dcc tty driver" Revert "arm: dcc_tty: fix armv6 dcc tty build failure" ARM64: Ignore Image-dtb from git point of view arm64: add option to build Image-dtb ANDROID: usb: gadget: f_midi: set fi->f to NULL when free f_midi function Linux 4.4.13 xfs: handle dquot buffer readahead in log recovery correctly xfs: print name of verifier if it fails xfs: skip stale inodes in xfs_iflush_cluster xfs: fix inode validity check in xfs_iflush_cluster xfs: xfs_iflush_cluster fails to abort on error xfs: Don't wrap growfs AGFL indexes xfs: disallow rw remount on fs with unknown ro-compat features gcov: disable tree-loop-im to reduce stack usage scripts/package/Makefile: rpmbuild add support of RPMOPTS dma-debug: avoid spinlock recursion when disabling dma-debug PM / sleep: Handle failures in device_suspend_late() consistently ext4: silence UBSAN in ext4_mb_init() ext4: address UBSAN warning in mb_find_order_for_block() ext4: fix oops on corrupted filesystem ext4: clean up error handling when orphan list is corrupted ext4: fix hang when processing corrupted orphaned inode list drm/imx: Match imx-ipuv3-crtc components using device node in platform data drm/i915: Don't leave old junk in ilk active watermarks on readout drm/atomic: Verify connector->funcs != NULL when clearing states drm/fb_helper: Fix references to dev->mode_config.num_connector drm/i915/fbdev: Fix num_connector references in intel_fb_initial_config() drm/amdgpu: Fix hdmi deep color support. drm/amdgpu: use drm_mode_vrefresh() rather than mode->vrefresh drm/vmwgfx: Fix order of operation drm/vmwgfx: use vmw_cmd_dx_cid_check for query commands. drm/vmwgfx: Enable SVGA_3D_CMD_DX_SET_PREDICATION drm/gma500: Fix possible out of bounds read sunrpc: fix stripping of padded MIC tokens xen: use same main loop for counting and remapping pages xen/events: Don't move disabled irqs powerpc/eeh: Restore initial state in eeh_pe_reset_and_recover() Revert "powerpc/eeh: Fix crash in eeh_add_device_early() on Cell" powerpc/eeh: Don't report error in eeh_pe_reset_and_recover() powerpc/book3s64: Fix branching to OOL handlers in relocatable kernel pipe: limit the per-user amount of pages allocated in pipes QE-UART: add "fsl,t1040-ucc-uart" to of_device_id wait/ptrace: assume __WALL if the child is traced mm: use phys_addr_t for reserve_bootmem_region() arguments media: v4l2-compat-ioctl32: fix missing reserved field copy in put_v4l2_create32 PCI: Disable all BAR sizing for devices with non-compliant BARs pinctrl: exynos5440: Use off-stack memory for pinctrl_gpio_range clk: bcm2835: divider value has to be 1 or more clk: bcm2835: pll_off should only update CM_PLL_ANARST clk: at91: fix check of clk_register() returned value clk: bcm2835: Fix PLL poweron cpuidle: Fix cpuidle_state_is_coupled() argument in cpuidle_enter() cpuidle: Indicate when a device has been unregistered PM / Runtime: Fix error path in pm_runtime_force_resume() mfd: intel_soc_pmic_core: Terminate panel control GPIO lookup table correctly mfd: intel-lpss: Save register context on suspend hwmon: (ads7828) Enable internal reference aacraid: Fix for KDUMP driver hang aacraid: Fix for aac_command_thread hang aacraid: Relinquish CPU during timeout wait rtlwifi: pci: use dev_kfree_skb_irq instead of kfree_skb in rtl_pci_reset_trx_ring rtlwifi: Fix logic error in enter/exit power-save mode rtlwifi: btcoexist: Implement antenna selection rtlwifi: rtl8723be: Add antenna select module parameter hwrng: exynos - Fix unbalanced PM runtime put on timeout error path ath5k: Change led pin configuration for compaq c700 laptop ath10k: fix kernel panic, move arvifs list head init before htt init ath10k: fix rx_channel during hw reconfigure ath10k: fix firmware assert in monitor mode ath10k: fix debugfs pktlog_filter write ath9k: Fix LED polarity for some Mini PCI AR9220 MB92 cards. ath9k: Add a module parameter to invert LED polarity. ARM: dts: imx35: restore existing used clock enumeration ARM: dts: exynos: Add interrupt line to MAX8997 PMIC on exynos4210-trats ARM: dts: at91: fix typo in sama5d2 PIN_PD24 description ARM: mvebu: fix GPIO config on the Linksys boards Input: uinput - handle compat ioctl for UI_SET_PHYS ASoC: ak4642: Enable cache usage to fix crashes on resume affs: fix remount failure when there are no options changed MIPS: VDSO: Build with `-fno-strict-aliasing' MIPS: lib: Mark intrinsics notrace MIPS: Build microMIPS VDSO for microMIPS kernels MIPS: Fix sigreturn via VDSO on microMIPS kernel MIPS: ptrace: Prevent writes to read-only FCSR bits MIPS: ptrace: Fix FP context restoration FCSR regression MIPS: Disable preemption during prctl(PR_SET_FP_MODE, ...) MIPS: Prevent "restoration" of MSA context in non-MSA kernels MIPS: Fix MSA ld_*/st_* asm macros to use PTR_ADDU MIPS: Use copy_s.fmt rather than copy_u.fmt MIPS: Loongson-3: Reserve 32MB for RS780E integrated GPU MIPS: Reserve nosave data for hibernation MIPS: ath79: make bootconsole wait for both THRE and TEMT MIPS: Sync icache & dcache in set_pte_at MIPS: Handle highmem pages in __update_cache MIPS: Flush highmem pages in __flush_dcache_page MIPS: Fix watchpoint restoration MIPS: Fix uapi include in exported asm/siginfo.h MIPS: Fix siginfo.h to use strict posix types MIPS: Avoid using unwind_stack() with usermode MIPS: Don't unwind to user mode with EVA MIPS: MSA: Fix a link error on `_init_msa_upper' with older GCC MIPS: math-emu: Fix jalr emulation when rd == $0 MIPS64: R6: R2 emulation bugfix coresight: etb10: adjust read pointer only when needed coresight: configuring ETF in FIFO mode when acting as link coresight: tmc: implementing TMC-ETF AUX space API coresight: moving struct cs_buffers to header file coresight: tmc: keep track of memory width coresight: tmc: make sysFS and Perf mode mutually exclusive coresight: tmc: dump system memory content only when needed coresight: tmc: adding mode of operation for link/sinks coresight: tmc: getting rid of multiple read access coresight: tmc: allocating memory when needed coresight: tmc: making prepare/unprepare functions generic coresight: tmc: splitting driver in ETB/ETF and ETR components coresight: tmc: cleaning up header file coresight: tmc: introducing new header file coresight: tmc: clearly define number of transfers per burst coresight: tmc: re-implementing tmc_read_prepare/unprepare() functions coresight: tmc: waiting for TMCReady bit before programming coresight: tmc: modifying naming convention coresight: tmc: adding sysFS management entries coresight: etm4x: add tracer ID for A72 Maia processor. coresight: etb10: fixing the right amount of words to read coresight: stm: adding driver for CoreSight STM component coresight: adding path for STM device coresight: etm4x: modify q_support type coresight: no need to do the forced type conversion coresight: removing gratuitous boot time log messages coresight: etb10: splitting sysFS "status" entry coresight: moving coresight_simple_func() to header file coresight: etm4x: implementing the perf PMU API coresight: etm4x: implementing user/kernel mode tracing coresight: etm4x: moving etm_drvdata::enable to atomic field coresight: etm4x: unlocking tracers in default arch init coresight: etm4x: splitting etmv4 default configuration coresight: etm4x: splitting struct etmv4_drvdata coresight: etm4x: adding config and traceid registers coresight: etm4x: moving sysFS entries to a dedicated file stm class: Support devices that override software assigned masters stm class: Remove unnecessary pointer increment stm class: Fix stm device initialization order stm class: Do not leak the chrdev in error path stm class: Remove a pointless line stm class: stm_heartbeat: Make nr_devs parameter read-only stm class: dummy_stm: Make nr_dummies parameter read-only MAINTAINERS: Add a git tree for the stm class perf/ring_buffer: Document AUX API usage perf/core: Free AUX pages in unmap path perf/ring_buffer: Refuse to begin AUX transaction after rb->aux_mmap_count drops perf auxtrace: Add perf_evlist pointer to *info_priv_size() perf session: Simplify tool stubs perf inject: Hit all DSOs for AUX data in JIT and other cases perf tools: tracepoint_error() can receive e=NULL, robustify it perf evlist: Make perf_evlist__open() open evsels with their cpus and threads (like perf record does) perf evsel: Introduce disable() method perf cpumap: Auto initialize cpu__max_{node,cpu} drivers/hwtracing: make coresight-etm-perf.c explicitly non-modular drivers/hwtracing: make coresight-* explicitly non-modular coresight: introducing a global trace ID function coresight: etm-perf: new PMU driver for ETM tracers coresight: etb10: implementing AUX API coresight: etb10: adding operation mode for sink->enable() coresight: etb10: moving to local atomic operations coresight: etm3x: implementing perf_enable/disable() API coresight: etm3x: implementing user/kernel mode tracing coresight: etm3x: consolidating initial config coresight: etm3x: changing default trace configuration coresight: etm3x: set progbit to stop trace collection coresight: etm3x: adding operation mode for etm_enable() coresight: etm3x: splitting struct etm_drvdata coresight: etm3x: unlocking tracers in default arch init coresight: etm3x: moving sysFS entries to dedicated file coresight: etm3x: moving etm_readl/writel to header file coresight: moving PM runtime operations to core framework coresight: add API to get sink from path coresight: associating path with session rather than tracer coresight: etm4x: Check every parameter used by dma_xx_coherent. coresight: "DEVICE_ATTR_RO" should defined as static. coresight: implementing 'cpu_id()' API coresight: removing bind/unbind options from sysfs coresight: remove csdev's link from topology coresight: release reference taken by 'bus_find_device()' coresight: coresight_unregister() function cleanup coresight: fixing lockdep error coresight: fixing indentation problem coresight: Fix a typo in Kconfig coresight: checking for NULL string in coresight_name_match() perf/core: Disable the event on a truncated AUX record perf/core: Don't leak event in the syscall error path perf/core: Fix perf_sched_count derailment stm class: dummy_stm: Add link callback for fault injection stm class: Plug stm device's unlink callback stm class: Fix a race in unlinking stm class: Fix unbalanced module/device refcounting stm class: Guard output assignment against concurrency stm class: Fix unlocking braino in the error path stm class: Add heartbeat stm source device stm class: dummy_stm: Create multiple devices stm class: Support devices with multiple instances stm class: Use driver's packet callback return value stm class: Prevent user-controllable allocations stm class: Fix link list locking stm class: Fix locking in unbinding policy path stm class: Select CONFIG_SRCU stm class: Hide STM-specific options if STM is disabled perf: Synchronously free aux pages in case of allocation failure Linux 4.4.12 kbuild: move -Wunused-const-variable to W=1 warning level Revert "scsi: fix soft lockup in scsi_remove_target() on module removal" scsi: Add intermediate STARGET_REMOVE state to scsi_target_state hpfs: implement the show_options method hpfs: fix remount failure when there are no options changed UBI: Fix static volume checks when Fastmap is used SIGNAL: Move generic copy_siginfo() to signal.h thunderbolt: Fix double free of drom buffer IB/srp: Fix a debug kernel crash ALSA: hda - Fix headset mic detection problem for one Dell machine ALSA: hda/realtek - Add support for ALC295/ALC3254 ALSA: hda - Fix headphone noise on Dell XPS 13 9360 ALSA: hda/realtek - New codecs support for ALC234/ALC274/ALC294 mcb: Fixed bar number assignment for the gdd clk: bcm2835: add locking to pll*_on/off methods locking,qspinlock: Fix spin_is_locked() and spin_unlock_wait() serial: samsung: Reorder the sequence of clock control when call s3c24xx_serial_set_termios() serial: 8250_mid: recognize interrupt source in handler serial: 8250_mid: use proper bar for DNV platform serial: 8250_pci: fix divide error bug if baud rate is 0 Fix OpenSSH pty regression on close tty/serial: atmel: fix hardware handshake selection TTY: n_gsm, fix false positive WARN_ON tty: vt, return error when con_startup fails xen/x86: actually allocate legacy interrupts on PV guests KVM: x86: mask CPUID(0xD,0x1).EAX against host value MIPS: KVM: Fix timer IRQ race when writing CP0_Compare MIPS: KVM: Fix timer IRQ race when freezing timer KVM: x86: fix ordering of cr0 initialization code in vmx_cpu_reset KVM: MTRR: remove MSR 0x2f8 staging: comedi: das1800: fix possible NULL dereference usb: gadget: udc: core: Fix argument of dev_err() in usb_gadget_map_request() USB: leave LPM alone if possible when binding/unbinding interface drivers usb: misc: usbtest: fix pattern tests for scatterlists. usb: f_mass_storage: test whether thread is running before starting another usb: gadget: f_fs: Fix EFAULT generation for async read operations USB: serial: option: add even more ZTE device ids USB: serial: option: add more ZTE device ids USB: serial: option: add support for Cinterion PH8 and AHxx USB: serial: io_edgeport: fix memory leaks in probe error path USB: serial: io_edgeport: fix memory leaks in attach error path USB: serial: quatech2: fix use-after-free in probe error path USB: serial: keyspan: fix use-after-free in probe error path USB: serial: mxuport: fix use-after-free in probe error path mei: bus: call mei_cl_read_start under device lock mei: amthif: discard not read messages mei: fix NULL dereferencing during FW initiated disconnection Bluetooth: vhci: Fix race at creating hci device Bluetooth: vhci: purge unhandled skbs Bluetooth: vhci: fix open_timeout vs. hdev race mmc: sdhci-pci: Remove MMC_CAP_BUS_WIDTH_TEST for Intel controllers mmc: longer timeout for long read time quirk dell-rbtn: Ignore ACPI notifications if device is suspended ACPI / osi: Fix an issue that acpi_osi=!* cannot disable ACPICA internal strings mmc: sdhci-acpi: Remove MMC_CAP_BUS_WIDTH_TEST for Intel controllers mmc: mmc: Fix partition switch timeout for some eMMCs can: fix handling of unmodifiable configuration options irqchip/gic-v3: Configure all interrupts as non-secure Group-1 irqchip/gic: Ensure ordering between read of INTACK and shared data Input: pwm-beeper - fix - scheduling while atomic mfd: omap-usb-tll: Fix scheduling while atomic BUG sched/loadavg: Fix loadavg artifacts on fully idle and on fully loaded systems clk: qcom: msm8916: Fix crypto clock flags crypto: sun4i-ss - Replace spinlock_bh by spin_lock_irq{save|restore} crypto: talitos - fix ahash algorithms registration crypto: caam - fix caam_jr_alloc() ret code ring-buffer: Prevent overflow of size in ring_buffer_resize() ring-buffer: Use long for nr_pages to avoid overflow failures asix: Fix offset calculation in asix_rx_fixup() causing slow transmissions fs/cifs: correctly to anonymous authentication for the NTLM(v2) authentication fs/cifs: correctly to anonymous authentication for the NTLM(v1) authentication fs/cifs: correctly to anonymous authentication for the LANMAN authentication fs/cifs: correctly to anonymous authentication via NTLMSSP remove directory incorrectly tries to set delete on close on non-empty directories kvm: arm64: Fix EC field in inject_abt64 arm/arm64: KVM: Enforce Break-Before-Make on Stage-2 page tables arm64: cpuinfo: Missing NULL terminator in compat_hwcap_str arm64: Implement pmdp_set_access_flags() for hardware AF/DBM arm64: Implement ptep_set_access_flags() for hardware AF/DBM arm64: Ensure pmd_present() returns false after pmd_mknotpresent() arm64: Fix typo in the pmdp_huge_get_and_clear() definition ext4: iterate over buffer heads correctly in move_extent_per_page() perf test: Fix build of BPF and LLVM on older glibc libraries perf/core: Fix perf_event_open() vs. execve() race perf/x86/intel/pt: Generate PMI in the STOP region as well Btrfs: don't use src fd for printk UPSTREAM: mac80211: fix "warning: ‘target_metric’ may be used uninitialized" Revert "drivers: power: use 'current' instead of 'get_current()'" cpufreq: interactive: drop cpufreq_{get,put}_global_kobject func calls Revert "cpufreq: interactive: build fixes for 4.4" xt_qtaguid: Fix panic caused by processing non-full socket. fiq_debugger: Add fiq_debugger.disable option UPSTREAM: procfs: fixes pthread cross-thread naming if !PR_DUMPABLE FROMLIST: wlcore: Disable filtering in AP role Revert "drivers: power: Add watchdog timer to catch drivers which lockup during suspend." fiq_debugger: Add option to apply uart overlay by FIQ_DEBUGGER_UART_OVERLAY Revert "Recreate asm/mach/mmc.h include file" Revert "ARM: Add 'card_present' state to mmc_platfrom_data" usb: dual-role: make stub functions inline Revert "mmc: Add status IRQ and status callback function to mmc platform data" quick selinux support for tracefs Revert "hid-multitouch: Filter collections by application usage." Revert "HID: steelseries: validate output report details" xt_qtaguid: Fix panic caused by synack processing Revert "mm: vmscan: Add a debug file for shrinkers" Revert "SELinux: Enable setting security contexts on rootfs inodes." Revert "SELinux: build fix for 4.1" fuse: Add support for d_canonical_path vfs: change d_canonical_path to take two paths android: recommended.cfg: remove CONFIG_UID_STAT netfilter: xt_qtaguid: seq_printf fixes Revert "misc: uidstat: Adding uid stat driver to collect network statistics." Revert "net: activity_stats: Add statistics for network transmission activity" Revert "net: activity_stats: Stop using obsolete create_proc_read_entry api" Revert "misc: uidstat: avoid create_stat() race and blockage." Revert "misc: uidstat: Remove use of obsolete create_proc_read_entry api" Revert "misc seq_printf fixes for 4.4" Revert "misc: uid_stat: Include linux/atomic.h instead of asm/atomic.h" Revert "net: socket ioctl to reset connections matching local address" Revert "net: fix iterating over hashtable in tcp_nuke_addr()" Revert "net: fix crash in tcp_nuke_addr()" Revert "Don't kill IPv4 sockets when killing IPv6 sockets was requested." Revert "tcp: Fix IPV6 module build errors" android: base-cfg: remove CONFIG_SWITCH Revert "switch: switch class and GPIO drivers." Revert "drivers: switch: remove S_IWUSR from dev_attr" ANDROID: base-cfg: enable CONFIG_IP_NF_NAT BACKPORT: selinux: restrict kernel module loading android: base-cfg: enable CONFIG_QUOTA Conflicts: Documentation/sysctl/kernel.txt drivers/cpufreq/cpufreq_interactive.c drivers/hwtracing/coresight/Kconfig drivers/hwtracing/coresight/Makefile drivers/hwtracing/coresight/coresight-etm4x.c drivers/hwtracing/coresight/coresight-etm4x.h drivers/hwtracing/coresight/coresight-priv.h drivers/hwtracing/coresight/coresight-stm.c drivers/hwtracing/coresight/coresight-tmc.c drivers/mmc/core/core.c include/linux/coresight-stm.h include/linux/coresight.h include/linux/msm_mdp.h include/uapi/linux/coresight-stm.h kernel/events/core.c kernel/sched/fair.c net/Makefile net/ipv4/netfilter/arp_tables.c net/ipv4/netfilter/ip_tables.c net/ipv4/tcp.c net/ipv6/netfilter/ip6_tables.c net/netfilter/xt_quota2.c sound/core/pcm.c Change-Id: I17aa0002815014e9bddc47e67769a53c15768a99 Signed-off-by: Runmin Wang <runminw@codeaurora.org>
1804 lines
49 KiB
C
1804 lines
49 KiB
C
/*
|
|
* linux/kernel/timer.c
|
|
*
|
|
* Kernel internal timers
|
|
*
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
*
|
|
* 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
|
|
*
|
|
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
|
|
* "A Kernel Model for Precision Timekeeping" by Dave Mills
|
|
* 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
|
|
* serialize accesses to xtime/lost_ticks).
|
|
* Copyright (C) 1998 Andrea Arcangeli
|
|
* 1999-03-10 Improved NTP compatibility by Ulrich Windl
|
|
* 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
|
|
* 2000-10-05 Implemented scalable SMP per-CPU timer handling.
|
|
* Copyright (C) 2000, 2001, 2002 Ingo Molnar
|
|
* Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
|
|
*/
|
|
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/export.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/init.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/pid_namespace.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/thread_info.h>
|
|
#include <linux/time.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/posix-timers.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/irq_work.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/sysctl.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/compat.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/unistd.h>
|
|
#include <asm/div64.h>
|
|
#include <asm/timex.h>
|
|
#include <asm/io.h>
|
|
|
|
#include "tick-internal.h"
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/timer.h>
|
|
|
|
__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
|
|
|
|
EXPORT_SYMBOL(jiffies_64);
|
|
|
|
/*
|
|
* per-CPU timer vector definitions:
|
|
*/
|
|
#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
|
|
#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
|
|
#define TVN_SIZE (1 << TVN_BITS)
|
|
#define TVR_SIZE (1 << TVR_BITS)
|
|
#define TVN_MASK (TVN_SIZE - 1)
|
|
#define TVR_MASK (TVR_SIZE - 1)
|
|
#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
|
|
|
|
struct tvec {
|
|
struct hlist_head vec[TVN_SIZE];
|
|
};
|
|
|
|
struct tvec_root {
|
|
struct hlist_head vec[TVR_SIZE];
|
|
};
|
|
|
|
struct tvec_base {
|
|
spinlock_t lock;
|
|
struct timer_list *running_timer;
|
|
unsigned long timer_jiffies;
|
|
unsigned long next_timer;
|
|
unsigned long active_timers;
|
|
unsigned long all_timers;
|
|
int cpu;
|
|
bool migration_enabled;
|
|
bool nohz_active;
|
|
struct tvec_root tv1;
|
|
struct tvec tv2;
|
|
struct tvec tv3;
|
|
struct tvec tv4;
|
|
struct tvec tv5;
|
|
} ____cacheline_aligned;
|
|
|
|
static inline void __run_timers(struct tvec_base *base);
|
|
|
|
static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
|
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
|
unsigned int sysctl_timer_migration = 1;
|
|
|
|
struct tvec_base tvec_base_deferrable;
|
|
|
|
void timers_update_migration(bool update_nohz)
|
|
{
|
|
bool on = sysctl_timer_migration && tick_nohz_active;
|
|
unsigned int cpu;
|
|
|
|
/* Avoid the loop, if nothing to update */
|
|
if (this_cpu_read(tvec_bases.migration_enabled) == on)
|
|
return;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
per_cpu(tvec_bases.migration_enabled, cpu) = on;
|
|
per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
|
|
if (!update_nohz)
|
|
continue;
|
|
per_cpu(tvec_bases.nohz_active, cpu) = true;
|
|
per_cpu(hrtimer_bases.nohz_active, cpu) = true;
|
|
}
|
|
}
|
|
|
|
int timer_migration_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *lenp,
|
|
loff_t *ppos)
|
|
{
|
|
static DEFINE_MUTEX(mutex);
|
|
int ret;
|
|
|
|
mutex_lock(&mutex);
|
|
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
|
if (!ret && write)
|
|
timers_update_migration(false);
|
|
mutex_unlock(&mutex);
|
|
return ret;
|
|
}
|
|
|
|
static inline struct tvec_base *get_target_base(struct tvec_base *base,
|
|
int pinned, u32 timer_flags)
|
|
{
|
|
if (!pinned && !(timer_flags & TIMER_PINNED_ON_CPU) &&
|
|
(timer_flags & TIMER_DEFERRABLE))
|
|
return &tvec_base_deferrable;
|
|
if (pinned || !base->migration_enabled)
|
|
return this_cpu_ptr(&tvec_bases);
|
|
return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
|
|
}
|
|
|
|
static inline void __run_deferrable_timers(void)
|
|
{
|
|
if (smp_processor_id() == tick_do_timer_cpu &&
|
|
time_after_eq(jiffies, tvec_base_deferrable.timer_jiffies))
|
|
__run_timers(&tvec_base_deferrable);
|
|
}
|
|
|
|
static inline void init_timer_deferrable_global(void)
|
|
{
|
|
tvec_base_deferrable.cpu = nr_cpu_ids;
|
|
spin_lock_init(&tvec_base_deferrable.lock);
|
|
tvec_base_deferrable.timer_jiffies = jiffies;
|
|
tvec_base_deferrable.next_timer = tvec_base_deferrable.timer_jiffies;
|
|
}
|
|
|
|
static inline struct tvec_base *get_timer_base(u32 timer_flags)
|
|
{
|
|
if (!(timer_flags & TIMER_PINNED_ON_CPU) &&
|
|
timer_flags & TIMER_DEFERRABLE)
|
|
return &tvec_base_deferrable;
|
|
else
|
|
return per_cpu_ptr(&tvec_bases, timer_flags & TIMER_CPUMASK);
|
|
}
|
|
#else
|
|
static inline struct tvec_base *get_target_base(struct tvec_base *base,
|
|
int pinned, u32 timer_flags)
|
|
{
|
|
return this_cpu_ptr(&tvec_bases);
|
|
}
|
|
|
|
static inline void __run_deferrable_timers(void)
|
|
{
|
|
}
|
|
|
|
static inline void init_timer_deferrable_global(void)
|
|
{
|
|
/*
|
|
* initialize cpu unbound deferrable timer base only when CONFIG_SMP.
|
|
* UP kernel handles the timers with cpu 0 timer base.
|
|
*/
|
|
}
|
|
|
|
static inline struct tvec_base *get_timer_base(u32 timer_flags)
|
|
{
|
|
return per_cpu_ptr(&tvec_bases, timer_flags & TIMER_CPUMASK);
|
|
}
|
|
#endif
|
|
|
|
static unsigned long round_jiffies_common(unsigned long j, int cpu,
|
|
bool force_up)
|
|
{
|
|
int rem;
|
|
unsigned long original = j;
|
|
|
|
/*
|
|
* We don't want all cpus firing their timers at once hitting the
|
|
* same lock or cachelines, so we skew each extra cpu with an extra
|
|
* 3 jiffies. This 3 jiffies came originally from the mm/ code which
|
|
* already did this.
|
|
* The skew is done by adding 3*cpunr, then round, then subtract this
|
|
* extra offset again.
|
|
*/
|
|
j += cpu * 3;
|
|
|
|
rem = j % HZ;
|
|
|
|
/*
|
|
* If the target jiffie is just after a whole second (which can happen
|
|
* due to delays of the timer irq, long irq off times etc etc) then
|
|
* we should round down to the whole second, not up. Use 1/4th second
|
|
* as cutoff for this rounding as an extreme upper bound for this.
|
|
* But never round down if @force_up is set.
|
|
*/
|
|
if (rem < HZ/4 && !force_up) /* round down */
|
|
j = j - rem;
|
|
else /* round up */
|
|
j = j - rem + HZ;
|
|
|
|
/* now that we have rounded, subtract the extra skew again */
|
|
j -= cpu * 3;
|
|
|
|
/*
|
|
* Make sure j is still in the future. Otherwise return the
|
|
* unmodified value.
|
|
*/
|
|
return time_is_after_jiffies(j) ? j : original;
|
|
}
|
|
|
|
/**
|
|
* __round_jiffies - function to round jiffies to a full second
|
|
* @j: the time in (absolute) jiffies that should be rounded
|
|
* @cpu: the processor number on which the timeout will happen
|
|
*
|
|
* __round_jiffies() rounds an absolute time in the future (in jiffies)
|
|
* up or down to (approximately) full seconds. This is useful for timers
|
|
* for which the exact time they fire does not matter too much, as long as
|
|
* they fire approximately every X seconds.
|
|
*
|
|
* By rounding these timers to whole seconds, all such timers will fire
|
|
* at the same time, rather than at various times spread out. The goal
|
|
* of this is to have the CPU wake up less, which saves power.
|
|
*
|
|
* The exact rounding is skewed for each processor to avoid all
|
|
* processors firing at the exact same time, which could lead
|
|
* to lock contention or spurious cache line bouncing.
|
|
*
|
|
* The return value is the rounded version of the @j parameter.
|
|
*/
|
|
unsigned long __round_jiffies(unsigned long j, int cpu)
|
|
{
|
|
return round_jiffies_common(j, cpu, false);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__round_jiffies);
|
|
|
|
/**
|
|
* __round_jiffies_relative - function to round jiffies to a full second
|
|
* @j: the time in (relative) jiffies that should be rounded
|
|
* @cpu: the processor number on which the timeout will happen
|
|
*
|
|
* __round_jiffies_relative() rounds a time delta in the future (in jiffies)
|
|
* up or down to (approximately) full seconds. This is useful for timers
|
|
* for which the exact time they fire does not matter too much, as long as
|
|
* they fire approximately every X seconds.
|
|
*
|
|
* By rounding these timers to whole seconds, all such timers will fire
|
|
* at the same time, rather than at various times spread out. The goal
|
|
* of this is to have the CPU wake up less, which saves power.
|
|
*
|
|
* The exact rounding is skewed for each processor to avoid all
|
|
* processors firing at the exact same time, which could lead
|
|
* to lock contention or spurious cache line bouncing.
|
|
*
|
|
* The return value is the rounded version of the @j parameter.
|
|
*/
|
|
unsigned long __round_jiffies_relative(unsigned long j, int cpu)
|
|
{
|
|
unsigned long j0 = jiffies;
|
|
|
|
/* Use j0 because jiffies might change while we run */
|
|
return round_jiffies_common(j + j0, cpu, false) - j0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__round_jiffies_relative);
|
|
|
|
/**
|
|
* round_jiffies - function to round jiffies to a full second
|
|
* @j: the time in (absolute) jiffies that should be rounded
|
|
*
|
|
* round_jiffies() rounds an absolute time in the future (in jiffies)
|
|
* up or down to (approximately) full seconds. This is useful for timers
|
|
* for which the exact time they fire does not matter too much, as long as
|
|
* they fire approximately every X seconds.
|
|
*
|
|
* By rounding these timers to whole seconds, all such timers will fire
|
|
* at the same time, rather than at various times spread out. The goal
|
|
* of this is to have the CPU wake up less, which saves power.
|
|
*
|
|
* The return value is the rounded version of the @j parameter.
|
|
*/
|
|
unsigned long round_jiffies(unsigned long j)
|
|
{
|
|
return round_jiffies_common(j, raw_smp_processor_id(), false);
|
|
}
|
|
EXPORT_SYMBOL_GPL(round_jiffies);
|
|
|
|
/**
|
|
* round_jiffies_relative - function to round jiffies to a full second
|
|
* @j: the time in (relative) jiffies that should be rounded
|
|
*
|
|
* round_jiffies_relative() rounds a time delta in the future (in jiffies)
|
|
* up or down to (approximately) full seconds. This is useful for timers
|
|
* for which the exact time they fire does not matter too much, as long as
|
|
* they fire approximately every X seconds.
|
|
*
|
|
* By rounding these timers to whole seconds, all such timers will fire
|
|
* at the same time, rather than at various times spread out. The goal
|
|
* of this is to have the CPU wake up less, which saves power.
|
|
*
|
|
* The return value is the rounded version of the @j parameter.
|
|
*/
|
|
unsigned long round_jiffies_relative(unsigned long j)
|
|
{
|
|
return __round_jiffies_relative(j, raw_smp_processor_id());
|
|
}
|
|
EXPORT_SYMBOL_GPL(round_jiffies_relative);
|
|
|
|
/**
|
|
* __round_jiffies_up - function to round jiffies up to a full second
|
|
* @j: the time in (absolute) jiffies that should be rounded
|
|
* @cpu: the processor number on which the timeout will happen
|
|
*
|
|
* This is the same as __round_jiffies() except that it will never
|
|
* round down. This is useful for timeouts for which the exact time
|
|
* of firing does not matter too much, as long as they don't fire too
|
|
* early.
|
|
*/
|
|
unsigned long __round_jiffies_up(unsigned long j, int cpu)
|
|
{
|
|
return round_jiffies_common(j, cpu, true);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__round_jiffies_up);
|
|
|
|
/**
|
|
* __round_jiffies_up_relative - function to round jiffies up to a full second
|
|
* @j: the time in (relative) jiffies that should be rounded
|
|
* @cpu: the processor number on which the timeout will happen
|
|
*
|
|
* This is the same as __round_jiffies_relative() except that it will never
|
|
* round down. This is useful for timeouts for which the exact time
|
|
* of firing does not matter too much, as long as they don't fire too
|
|
* early.
|
|
*/
|
|
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
|
|
{
|
|
unsigned long j0 = jiffies;
|
|
|
|
/* Use j0 because jiffies might change while we run */
|
|
return round_jiffies_common(j + j0, cpu, true) - j0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
|
|
|
|
/**
|
|
* round_jiffies_up - function to round jiffies up to a full second
|
|
* @j: the time in (absolute) jiffies that should be rounded
|
|
*
|
|
* This is the same as round_jiffies() except that it will never
|
|
* round down. This is useful for timeouts for which the exact time
|
|
* of firing does not matter too much, as long as they don't fire too
|
|
* early.
|
|
*/
|
|
unsigned long round_jiffies_up(unsigned long j)
|
|
{
|
|
return round_jiffies_common(j, raw_smp_processor_id(), true);
|
|
}
|
|
EXPORT_SYMBOL_GPL(round_jiffies_up);
|
|
|
|
/**
|
|
* round_jiffies_up_relative - function to round jiffies up to a full second
|
|
* @j: the time in (relative) jiffies that should be rounded
|
|
*
|
|
* This is the same as round_jiffies_relative() except that it will never
|
|
* round down. This is useful for timeouts for which the exact time
|
|
* of firing does not matter too much, as long as they don't fire too
|
|
* early.
|
|
*/
|
|
unsigned long round_jiffies_up_relative(unsigned long j)
|
|
{
|
|
return __round_jiffies_up_relative(j, raw_smp_processor_id());
|
|
}
|
|
EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
|
|
|
|
/**
|
|
* set_timer_slack - set the allowed slack for a timer
|
|
* @timer: the timer to be modified
|
|
* @slack_hz: the amount of time (in jiffies) allowed for rounding
|
|
*
|
|
* Set the amount of time, in jiffies, that a certain timer has
|
|
* in terms of slack. By setting this value, the timer subsystem
|
|
* will schedule the actual timer somewhere between
|
|
* the time mod_timer() asks for, and that time plus the slack.
|
|
*
|
|
* By setting the slack to -1, a percentage of the delay is used
|
|
* instead.
|
|
*/
|
|
void set_timer_slack(struct timer_list *timer, int slack_hz)
|
|
{
|
|
timer->slack = slack_hz;
|
|
}
|
|
EXPORT_SYMBOL_GPL(set_timer_slack);
|
|
|
|
static void
|
|
__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
|
|
{
|
|
unsigned long expires = timer->expires;
|
|
unsigned long idx = expires - base->timer_jiffies;
|
|
struct hlist_head *vec;
|
|
|
|
if (idx < TVR_SIZE) {
|
|
int i = expires & TVR_MASK;
|
|
vec = base->tv1.vec + i;
|
|
} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
|
|
int i = (expires >> TVR_BITS) & TVN_MASK;
|
|
vec = base->tv2.vec + i;
|
|
} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
|
|
int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
|
|
vec = base->tv3.vec + i;
|
|
} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
|
|
int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
|
|
vec = base->tv4.vec + i;
|
|
} else if ((signed long) idx < 0) {
|
|
/*
|
|
* Can happen if you add a timer with expires == jiffies,
|
|
* or you set a timer to go off in the past
|
|
*/
|
|
vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
|
|
} else {
|
|
int i;
|
|
/* If the timeout is larger than MAX_TVAL (on 64-bit
|
|
* architectures or with CONFIG_BASE_SMALL=1) then we
|
|
* use the maximum timeout.
|
|
*/
|
|
if (idx > MAX_TVAL) {
|
|
idx = MAX_TVAL;
|
|
expires = idx + base->timer_jiffies;
|
|
}
|
|
i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
|
|
vec = base->tv5.vec + i;
|
|
}
|
|
|
|
hlist_add_head(&timer->entry, vec);
|
|
}
|
|
|
|
static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
|
|
{
|
|
/* Advance base->jiffies, if the base is empty */
|
|
if (!base->all_timers++)
|
|
base->timer_jiffies = jiffies;
|
|
|
|
__internal_add_timer(base, timer);
|
|
/*
|
|
* Update base->active_timers and base->next_timer
|
|
*/
|
|
if (!(timer->flags & TIMER_DEFERRABLE)) {
|
|
if (!base->active_timers++ ||
|
|
time_before(timer->expires, base->next_timer))
|
|
base->next_timer = timer->expires;
|
|
}
|
|
|
|
/*
|
|
* Check whether the other CPU is in dynticks mode and needs
|
|
* to be triggered to reevaluate the timer wheel.
|
|
* We are protected against the other CPU fiddling
|
|
* with the timer by holding the timer base lock. This also
|
|
* makes sure that a CPU on the way to stop its tick can not
|
|
* evaluate the timer wheel.
|
|
*
|
|
* Spare the IPI for deferrable timers on idle targets though.
|
|
* The next busy ticks will take care of it. Except full dynticks
|
|
* require special care against races with idle_cpu(), lets deal
|
|
* with that later.
|
|
*/
|
|
if (base->nohz_active) {
|
|
if (!(timer->flags & TIMER_DEFERRABLE) ||
|
|
tick_nohz_full_cpu(base->cpu))
|
|
wake_up_nohz_cpu(base->cpu);
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_TIMER_STATS
|
|
void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
|
|
{
|
|
if (timer->start_site)
|
|
return;
|
|
|
|
timer->start_site = addr;
|
|
memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
|
|
timer->start_pid = current->pid;
|
|
}
|
|
|
|
static void timer_stats_account_timer(struct timer_list *timer)
|
|
{
|
|
void *site;
|
|
|
|
/*
|
|
* start_site can be concurrently reset by
|
|
* timer_stats_timer_clear_start_info()
|
|
*/
|
|
site = READ_ONCE(timer->start_site);
|
|
if (likely(!site))
|
|
return;
|
|
|
|
timer_stats_update_stats(timer, timer->start_pid, site,
|
|
timer->function, timer->start_comm,
|
|
timer->flags);
|
|
}
|
|
|
|
#else
|
|
static void timer_stats_account_timer(struct timer_list *timer) {}
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
|
|
|
|
static struct debug_obj_descr timer_debug_descr;
|
|
|
|
static void *timer_debug_hint(void *addr)
|
|
{
|
|
return ((struct timer_list *) addr)->function;
|
|
}
|
|
|
|
/*
|
|
* fixup_init is called when:
|
|
* - an active object is initialized
|
|
*/
|
|
static int timer_fixup_init(void *addr, enum debug_obj_state state)
|
|
{
|
|
struct timer_list *timer = addr;
|
|
|
|
switch (state) {
|
|
case ODEBUG_STATE_ACTIVE:
|
|
del_timer_sync(timer);
|
|
debug_object_init(timer, &timer_debug_descr);
|
|
return 1;
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/* Stub timer callback for improperly used timers. */
|
|
static void stub_timer(unsigned long data)
|
|
{
|
|
WARN_ON(1);
|
|
}
|
|
|
|
/*
|
|
* fixup_activate is called when:
|
|
* - an active object is activated
|
|
* - an unknown object is activated (might be a statically initialized object)
|
|
*/
|
|
static int timer_fixup_activate(void *addr, enum debug_obj_state state)
|
|
{
|
|
struct timer_list *timer = addr;
|
|
|
|
switch (state) {
|
|
|
|
case ODEBUG_STATE_NOTAVAILABLE:
|
|
/*
|
|
* This is not really a fixup. The timer was
|
|
* statically initialized. We just make sure that it
|
|
* is tracked in the object tracker.
|
|
*/
|
|
if (timer->entry.pprev == NULL &&
|
|
timer->entry.next == TIMER_ENTRY_STATIC) {
|
|
debug_object_init(timer, &timer_debug_descr);
|
|
debug_object_activate(timer, &timer_debug_descr);
|
|
return 0;
|
|
} else {
|
|
setup_timer(timer, stub_timer, 0);
|
|
return 1;
|
|
}
|
|
return 0;
|
|
|
|
case ODEBUG_STATE_ACTIVE:
|
|
WARN_ON(1);
|
|
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* fixup_free is called when:
|
|
* - an active object is freed
|
|
*/
|
|
static int timer_fixup_free(void *addr, enum debug_obj_state state)
|
|
{
|
|
struct timer_list *timer = addr;
|
|
|
|
switch (state) {
|
|
case ODEBUG_STATE_ACTIVE:
|
|
del_timer_sync(timer);
|
|
debug_object_free(timer, &timer_debug_descr);
|
|
return 1;
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* fixup_assert_init is called when:
|
|
* - an untracked/uninit-ed object is found
|
|
*/
|
|
static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
|
|
{
|
|
struct timer_list *timer = addr;
|
|
|
|
switch (state) {
|
|
case ODEBUG_STATE_NOTAVAILABLE:
|
|
if (timer->entry.next == TIMER_ENTRY_STATIC) {
|
|
/*
|
|
* This is not really a fixup. The timer was
|
|
* statically initialized. We just make sure that it
|
|
* is tracked in the object tracker.
|
|
*/
|
|
debug_object_init(timer, &timer_debug_descr);
|
|
return 0;
|
|
} else {
|
|
setup_timer(timer, stub_timer, 0);
|
|
return 1;
|
|
}
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static struct debug_obj_descr timer_debug_descr = {
|
|
.name = "timer_list",
|
|
.debug_hint = timer_debug_hint,
|
|
.fixup_init = timer_fixup_init,
|
|
.fixup_activate = timer_fixup_activate,
|
|
.fixup_free = timer_fixup_free,
|
|
.fixup_assert_init = timer_fixup_assert_init,
|
|
};
|
|
|
|
static inline void debug_timer_init(struct timer_list *timer)
|
|
{
|
|
debug_object_init(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static inline void debug_timer_activate(struct timer_list *timer)
|
|
{
|
|
debug_object_activate(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static inline void debug_timer_deactivate(struct timer_list *timer)
|
|
{
|
|
debug_object_deactivate(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static inline void debug_timer_free(struct timer_list *timer)
|
|
{
|
|
debug_object_free(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static inline void debug_timer_assert_init(struct timer_list *timer)
|
|
{
|
|
debug_object_assert_init(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static void do_init_timer(struct timer_list *timer, unsigned int flags,
|
|
const char *name, struct lock_class_key *key);
|
|
|
|
void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
|
|
const char *name, struct lock_class_key *key)
|
|
{
|
|
debug_object_init_on_stack(timer, &timer_debug_descr);
|
|
do_init_timer(timer, flags, name, key);
|
|
}
|
|
EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
|
|
|
|
void destroy_timer_on_stack(struct timer_list *timer)
|
|
{
|
|
debug_object_free(timer, &timer_debug_descr);
|
|
}
|
|
EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
|
|
|
|
#else
|
|
static inline void debug_timer_init(struct timer_list *timer) { }
|
|
static inline void debug_timer_activate(struct timer_list *timer) { }
|
|
static inline void debug_timer_deactivate(struct timer_list *timer) { }
|
|
static inline void debug_timer_assert_init(struct timer_list *timer) { }
|
|
#endif
|
|
|
|
static inline void debug_init(struct timer_list *timer)
|
|
{
|
|
debug_timer_init(timer);
|
|
trace_timer_init(timer);
|
|
}
|
|
|
|
static inline void
|
|
debug_activate(struct timer_list *timer, unsigned long expires)
|
|
{
|
|
debug_timer_activate(timer);
|
|
trace_timer_start(timer, expires, timer->flags);
|
|
}
|
|
|
|
static inline void debug_deactivate(struct timer_list *timer)
|
|
{
|
|
debug_timer_deactivate(timer);
|
|
trace_timer_cancel(timer);
|
|
}
|
|
|
|
static inline void debug_assert_init(struct timer_list *timer)
|
|
{
|
|
debug_timer_assert_init(timer);
|
|
}
|
|
|
|
static void do_init_timer(struct timer_list *timer, unsigned int flags,
|
|
const char *name, struct lock_class_key *key)
|
|
{
|
|
timer->entry.pprev = NULL;
|
|
timer->flags = flags | raw_smp_processor_id();
|
|
timer->slack = -1;
|
|
#ifdef CONFIG_TIMER_STATS
|
|
timer->start_site = NULL;
|
|
timer->start_pid = -1;
|
|
memset(timer->start_comm, 0, TASK_COMM_LEN);
|
|
#endif
|
|
lockdep_init_map(&timer->lockdep_map, name, key, 0);
|
|
}
|
|
|
|
/**
|
|
* init_timer_key - initialize a timer
|
|
* @timer: the timer to be initialized
|
|
* @flags: timer flags
|
|
* @name: name of the timer
|
|
* @key: lockdep class key of the fake lock used for tracking timer
|
|
* sync lock dependencies
|
|
*
|
|
* init_timer_key() must be done to a timer prior calling *any* of the
|
|
* other timer functions.
|
|
*/
|
|
void init_timer_key(struct timer_list *timer, unsigned int flags,
|
|
const char *name, struct lock_class_key *key)
|
|
{
|
|
debug_init(timer);
|
|
do_init_timer(timer, flags, name, key);
|
|
}
|
|
EXPORT_SYMBOL(init_timer_key);
|
|
|
|
static inline void detach_timer(struct timer_list *timer, bool clear_pending)
|
|
{
|
|
struct hlist_node *entry = &timer->entry;
|
|
|
|
debug_deactivate(timer);
|
|
|
|
__hlist_del(entry);
|
|
if (clear_pending)
|
|
entry->pprev = NULL;
|
|
entry->next = LIST_POISON2;
|
|
}
|
|
|
|
static inline void
|
|
detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
|
|
{
|
|
detach_timer(timer, true);
|
|
if (!(timer->flags & TIMER_DEFERRABLE))
|
|
base->active_timers--;
|
|
base->all_timers--;
|
|
}
|
|
|
|
static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
|
|
bool clear_pending)
|
|
{
|
|
if (!timer_pending(timer))
|
|
return 0;
|
|
|
|
detach_timer(timer, clear_pending);
|
|
if (!(timer->flags & TIMER_DEFERRABLE)) {
|
|
base->active_timers--;
|
|
if (timer->expires == base->next_timer)
|
|
base->next_timer = base->timer_jiffies;
|
|
}
|
|
/* If this was the last timer, advance base->jiffies */
|
|
if (!--base->all_timers)
|
|
base->timer_jiffies = jiffies;
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* We are using hashed locking: holding per_cpu(tvec_bases).lock
|
|
* means that all timers which are tied to this base via timer->base are
|
|
* locked, and the base itself is locked too.
|
|
*
|
|
* So __run_timers/migrate_timers can safely modify all timers which could
|
|
* be found on ->tvX lists.
|
|
*
|
|
* When the timer's base is locked and removed from the list, the
|
|
* TIMER_MIGRATING flag is set, FIXME
|
|
*/
|
|
static struct tvec_base *lock_timer_base(struct timer_list *timer,
|
|
unsigned long *flags)
|
|
__acquires(timer->base->lock)
|
|
{
|
|
for (;;) {
|
|
u32 tf = timer->flags;
|
|
struct tvec_base *base;
|
|
|
|
if (!(tf & TIMER_MIGRATING)) {
|
|
base = get_timer_base(tf);
|
|
spin_lock_irqsave(&base->lock, *flags);
|
|
if (timer->flags == tf)
|
|
return base;
|
|
spin_unlock_irqrestore(&base->lock, *flags);
|
|
}
|
|
cpu_relax();
|
|
}
|
|
}
|
|
|
|
static inline int
|
|
__mod_timer(struct timer_list *timer, unsigned long expires,
|
|
bool pending_only, int pinned)
|
|
{
|
|
struct tvec_base *base, *new_base;
|
|
unsigned long flags;
|
|
int ret = 0;
|
|
|
|
timer_stats_timer_set_start_info(timer);
|
|
BUG_ON(!timer->function);
|
|
|
|
base = lock_timer_base(timer, &flags);
|
|
|
|
ret = detach_if_pending(timer, base, false);
|
|
if (!ret && pending_only)
|
|
goto out_unlock;
|
|
|
|
debug_activate(timer, expires);
|
|
|
|
new_base = get_target_base(base, pinned, timer->flags);
|
|
|
|
if (base != new_base) {
|
|
/*
|
|
* We are trying to schedule the timer on the local CPU.
|
|
* However we can't change timer's base while it is running,
|
|
* otherwise del_timer_sync() can't detect that the timer's
|
|
* handler yet has not finished. This also guarantees that
|
|
* the timer is serialized wrt itself.
|
|
*/
|
|
if (likely(base->running_timer != timer)) {
|
|
/* See the comment in lock_timer_base() */
|
|
timer->flags |= TIMER_MIGRATING;
|
|
|
|
spin_unlock(&base->lock);
|
|
base = new_base;
|
|
spin_lock(&base->lock);
|
|
WRITE_ONCE(timer->flags,
|
|
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
|
|
}
|
|
}
|
|
|
|
if (pinned == TIMER_PINNED)
|
|
timer->flags |= TIMER_PINNED_ON_CPU;
|
|
else
|
|
timer->flags &= ~TIMER_PINNED_ON_CPU;
|
|
timer->expires = expires;
|
|
internal_add_timer(base, timer);
|
|
|
|
out_unlock:
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* mod_timer_pending - modify a pending timer's timeout
|
|
* @timer: the pending timer to be modified
|
|
* @expires: new timeout in jiffies
|
|
*
|
|
* mod_timer_pending() is the same for pending timers as mod_timer(),
|
|
* but will not re-activate and modify already deleted timers.
|
|
*
|
|
* It is useful for unserialized use of timers.
|
|
*/
|
|
int mod_timer_pending(struct timer_list *timer, unsigned long expires)
|
|
{
|
|
return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
|
|
}
|
|
EXPORT_SYMBOL(mod_timer_pending);
|
|
|
|
/*
|
|
* Decide where to put the timer while taking the slack into account
|
|
*
|
|
* Algorithm:
|
|
* 1) calculate the maximum (absolute) time
|
|
* 2) calculate the highest bit where the expires and new max are different
|
|
* 3) use this bit to make a mask
|
|
* 4) use the bitmask to round down the maximum time, so that all last
|
|
* bits are zeros
|
|
*/
|
|
static inline
|
|
unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
|
|
{
|
|
unsigned long expires_limit, mask;
|
|
int bit;
|
|
|
|
if (timer->slack >= 0) {
|
|
expires_limit = expires + timer->slack;
|
|
} else {
|
|
long delta = expires - jiffies;
|
|
|
|
if (delta < 256)
|
|
return expires;
|
|
|
|
expires_limit = expires + delta / 256;
|
|
}
|
|
mask = expires ^ expires_limit;
|
|
if (mask == 0)
|
|
return expires;
|
|
|
|
bit = __fls(mask);
|
|
|
|
mask = (1UL << bit) - 1;
|
|
|
|
expires_limit = expires_limit & ~(mask);
|
|
|
|
return expires_limit;
|
|
}
|
|
|
|
/**
|
|
* mod_timer - modify a timer's timeout
|
|
* @timer: the timer to be modified
|
|
* @expires: new timeout in jiffies
|
|
*
|
|
* mod_timer() is a more efficient way to update the expire field of an
|
|
* active timer (if the timer is inactive it will be activated)
|
|
*
|
|
* mod_timer(timer, expires) is equivalent to:
|
|
*
|
|
* del_timer(timer); timer->expires = expires; add_timer(timer);
|
|
*
|
|
* Note that if there are multiple unserialized concurrent users of the
|
|
* same timer, then mod_timer() is the only safe way to modify the timeout,
|
|
* since add_timer() cannot modify an already running timer.
|
|
*
|
|
* The function returns whether it has modified a pending timer or not.
|
|
* (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
|
|
* active timer returns 1.)
|
|
*/
|
|
int mod_timer(struct timer_list *timer, unsigned long expires)
|
|
{
|
|
expires = apply_slack(timer, expires);
|
|
|
|
/*
|
|
* This is a common optimization triggered by the
|
|
* networking code - if the timer is re-modified
|
|
* to be the same thing then just return:
|
|
*/
|
|
if (timer_pending(timer) && timer->expires == expires)
|
|
return 1;
|
|
|
|
return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
|
|
}
|
|
EXPORT_SYMBOL(mod_timer);
|
|
|
|
/**
|
|
* mod_timer_pinned - modify a timer's timeout
|
|
* @timer: the timer to be modified
|
|
* @expires: new timeout in jiffies
|
|
*
|
|
* mod_timer_pinned() is a way to update the expire field of an
|
|
* active timer (if the timer is inactive it will be activated)
|
|
* and to ensure that the timer is scheduled on the current CPU.
|
|
*
|
|
* Note that this does not prevent the timer from being migrated
|
|
* when the current CPU goes offline. If this is a problem for
|
|
* you, use CPU-hotplug notifiers to handle it correctly, for
|
|
* example, cancelling the timer when the corresponding CPU goes
|
|
* offline.
|
|
*
|
|
* mod_timer_pinned(timer, expires) is equivalent to:
|
|
*
|
|
* del_timer(timer); timer->expires = expires; add_timer(timer);
|
|
*/
|
|
int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
|
|
{
|
|
if (timer->expires == expires && timer_pending(timer))
|
|
return 1;
|
|
|
|
return __mod_timer(timer, expires, false, TIMER_PINNED);
|
|
}
|
|
EXPORT_SYMBOL(mod_timer_pinned);
|
|
|
|
/**
|
|
* add_timer - start a timer
|
|
* @timer: the timer to be added
|
|
*
|
|
* The kernel will do a ->function(->data) callback from the
|
|
* timer interrupt at the ->expires point in the future. The
|
|
* current time is 'jiffies'.
|
|
*
|
|
* The timer's ->expires, ->function (and if the handler uses it, ->data)
|
|
* fields must be set prior calling this function.
|
|
*
|
|
* Timers with an ->expires field in the past will be executed in the next
|
|
* timer tick.
|
|
*/
|
|
void add_timer(struct timer_list *timer)
|
|
{
|
|
BUG_ON(timer_pending(timer));
|
|
mod_timer(timer, timer->expires);
|
|
}
|
|
EXPORT_SYMBOL(add_timer);
|
|
|
|
/**
|
|
* add_timer_on - start a timer on a particular CPU
|
|
* @timer: the timer to be added
|
|
* @cpu: the CPU to start it on
|
|
*
|
|
* This is not very scalable on SMP. Double adds are not possible.
|
|
*/
|
|
void add_timer_on(struct timer_list *timer, int cpu)
|
|
{
|
|
struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
|
|
struct tvec_base *base;
|
|
unsigned long flags;
|
|
|
|
timer_stats_timer_set_start_info(timer);
|
|
BUG_ON(timer_pending(timer) || !timer->function);
|
|
|
|
/*
|
|
* If @timer was on a different CPU, it should be migrated with the
|
|
* old base locked to prevent other operations proceeding with the
|
|
* wrong base locked. See lock_timer_base().
|
|
*/
|
|
base = lock_timer_base(timer, &flags);
|
|
if (base != new_base) {
|
|
timer->flags |= TIMER_MIGRATING;
|
|
|
|
spin_unlock(&base->lock);
|
|
base = new_base;
|
|
spin_lock(&base->lock);
|
|
WRITE_ONCE(timer->flags,
|
|
(timer->flags & ~TIMER_BASEMASK) | cpu);
|
|
}
|
|
|
|
timer->flags |= TIMER_PINNED_ON_CPU;
|
|
debug_activate(timer, timer->expires);
|
|
internal_add_timer(base, timer);
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(add_timer_on);
|
|
|
|
/**
|
|
* del_timer - deactive a timer.
|
|
* @timer: the timer to be deactivated
|
|
*
|
|
* del_timer() deactivates a timer - this works on both active and inactive
|
|
* timers.
|
|
*
|
|
* The function returns whether it has deactivated a pending timer or not.
|
|
* (ie. del_timer() of an inactive timer returns 0, del_timer() of an
|
|
* active timer returns 1.)
|
|
*/
|
|
int del_timer(struct timer_list *timer)
|
|
{
|
|
struct tvec_base *base;
|
|
unsigned long flags;
|
|
int ret = 0;
|
|
|
|
debug_assert_init(timer);
|
|
|
|
timer_stats_timer_clear_start_info(timer);
|
|
if (timer_pending(timer)) {
|
|
base = lock_timer_base(timer, &flags);
|
|
ret = detach_if_pending(timer, base, true);
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(del_timer);
|
|
|
|
/**
|
|
* try_to_del_timer_sync - Try to deactivate a timer
|
|
* @timer: timer do del
|
|
*
|
|
* This function tries to deactivate a timer. Upon successful (ret >= 0)
|
|
* exit the timer is not queued and the handler is not running on any CPU.
|
|
*/
|
|
int try_to_del_timer_sync(struct timer_list *timer)
|
|
{
|
|
struct tvec_base *base;
|
|
unsigned long flags;
|
|
int ret = -1;
|
|
|
|
debug_assert_init(timer);
|
|
|
|
base = lock_timer_base(timer, &flags);
|
|
|
|
if (base->running_timer != timer) {
|
|
timer_stats_timer_clear_start_info(timer);
|
|
ret = detach_if_pending(timer, base, true);
|
|
}
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(try_to_del_timer_sync);
|
|
|
|
#ifdef CONFIG_SMP
|
|
/**
|
|
* del_timer_sync - deactivate a timer and wait for the handler to finish.
|
|
* @timer: the timer to be deactivated
|
|
*
|
|
* This function only differs from del_timer() on SMP: besides deactivating
|
|
* the timer it also makes sure the handler has finished executing on other
|
|
* CPUs.
|
|
*
|
|
* Synchronization rules: Callers must prevent restarting of the timer,
|
|
* otherwise this function is meaningless. It must not be called from
|
|
* interrupt contexts unless the timer is an irqsafe one. The caller must
|
|
* not hold locks which would prevent completion of the timer's
|
|
* handler. The timer's handler must not call add_timer_on(). Upon exit the
|
|
* timer is not queued and the handler is not running on any CPU.
|
|
*
|
|
* Note: For !irqsafe timers, you must not hold locks that are held in
|
|
* interrupt context while calling this function. Even if the lock has
|
|
* nothing to do with the timer in question. Here's why:
|
|
*
|
|
* CPU0 CPU1
|
|
* ---- ----
|
|
* <SOFTIRQ>
|
|
* call_timer_fn();
|
|
* base->running_timer = mytimer;
|
|
* spin_lock_irq(somelock);
|
|
* <IRQ>
|
|
* spin_lock(somelock);
|
|
* del_timer_sync(mytimer);
|
|
* while (base->running_timer == mytimer);
|
|
*
|
|
* Now del_timer_sync() will never return and never release somelock.
|
|
* The interrupt on the other CPU is waiting to grab somelock but
|
|
* it has interrupted the softirq that CPU0 is waiting to finish.
|
|
*
|
|
* The function returns whether it has deactivated a pending timer or not.
|
|
*/
|
|
int del_timer_sync(struct timer_list *timer)
|
|
{
|
|
#ifdef CONFIG_LOCKDEP
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* If lockdep gives a backtrace here, please reference
|
|
* the synchronization rules above.
|
|
*/
|
|
local_irq_save(flags);
|
|
lock_map_acquire(&timer->lockdep_map);
|
|
lock_map_release(&timer->lockdep_map);
|
|
local_irq_restore(flags);
|
|
#endif
|
|
/*
|
|
* don't use it in hardirq context, because it
|
|
* could lead to deadlock.
|
|
*/
|
|
WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
|
|
for (;;) {
|
|
int ret = try_to_del_timer_sync(timer);
|
|
if (ret >= 0)
|
|
return ret;
|
|
cpu_relax();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(del_timer_sync);
|
|
#endif
|
|
|
|
static int cascade(struct tvec_base *base, struct tvec *tv, int index)
|
|
{
|
|
/* cascade all the timers from tv up one level */
|
|
struct timer_list *timer;
|
|
struct hlist_node *tmp;
|
|
struct hlist_head tv_list;
|
|
|
|
hlist_move_list(tv->vec + index, &tv_list);
|
|
|
|
/*
|
|
* We are removing _all_ timers from the list, so we
|
|
* don't have to detach them individually.
|
|
*/
|
|
hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
|
|
/* No accounting, while moving them */
|
|
__internal_add_timer(base, timer);
|
|
}
|
|
|
|
return index;
|
|
}
|
|
|
|
static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
|
|
unsigned long data)
|
|
{
|
|
int count = preempt_count();
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
/*
|
|
* It is permissible to free the timer from inside the
|
|
* function that is called from it, this we need to take into
|
|
* account for lockdep too. To avoid bogus "held lock freed"
|
|
* warnings as well as problems when looking into
|
|
* timer->lockdep_map, make a copy and use that here.
|
|
*/
|
|
struct lockdep_map lockdep_map;
|
|
|
|
lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
|
|
#endif
|
|
/*
|
|
* Couple the lock chain with the lock chain at
|
|
* del_timer_sync() by acquiring the lock_map around the fn()
|
|
* call here and in del_timer_sync().
|
|
*/
|
|
lock_map_acquire(&lockdep_map);
|
|
|
|
trace_timer_expire_entry(timer);
|
|
fn(data);
|
|
trace_timer_expire_exit(timer);
|
|
|
|
lock_map_release(&lockdep_map);
|
|
|
|
if (count != preempt_count()) {
|
|
WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
|
|
fn, count, preempt_count());
|
|
/*
|
|
* Restore the preempt count. That gives us a decent
|
|
* chance to survive and extract information. If the
|
|
* callback kept a lock held, bad luck, but not worse
|
|
* than the BUG() we had.
|
|
*/
|
|
preempt_count_set(count);
|
|
}
|
|
}
|
|
|
|
#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
|
|
|
|
/**
|
|
* __run_timers - run all expired timers (if any) on this CPU.
|
|
* @base: the timer vector to be processed.
|
|
*
|
|
* This function cascades all vectors and executes all expired timer
|
|
* vectors.
|
|
*/
|
|
static inline void __run_timers(struct tvec_base *base)
|
|
{
|
|
struct timer_list *timer;
|
|
|
|
spin_lock_irq(&base->lock);
|
|
|
|
while (time_after_eq(jiffies, base->timer_jiffies)) {
|
|
struct hlist_head work_list;
|
|
struct hlist_head *head = &work_list;
|
|
int index;
|
|
|
|
if (!base->all_timers) {
|
|
base->timer_jiffies = jiffies;
|
|
break;
|
|
}
|
|
|
|
index = base->timer_jiffies & TVR_MASK;
|
|
|
|
/*
|
|
* Cascade timers:
|
|
*/
|
|
if (!index &&
|
|
(!cascade(base, &base->tv2, INDEX(0))) &&
|
|
(!cascade(base, &base->tv3, INDEX(1))) &&
|
|
!cascade(base, &base->tv4, INDEX(2)))
|
|
cascade(base, &base->tv5, INDEX(3));
|
|
++base->timer_jiffies;
|
|
hlist_move_list(base->tv1.vec + index, head);
|
|
while (!hlist_empty(head)) {
|
|
void (*fn)(unsigned long);
|
|
unsigned long data;
|
|
bool irqsafe;
|
|
|
|
timer = hlist_entry(head->first, struct timer_list, entry);
|
|
fn = timer->function;
|
|
data = timer->data;
|
|
irqsafe = timer->flags & TIMER_IRQSAFE;
|
|
|
|
timer_stats_account_timer(timer);
|
|
|
|
base->running_timer = timer;
|
|
detach_expired_timer(timer, base);
|
|
|
|
if (irqsafe) {
|
|
spin_unlock(&base->lock);
|
|
call_timer_fn(timer, fn, data);
|
|
spin_lock(&base->lock);
|
|
} else {
|
|
spin_unlock_irq(&base->lock);
|
|
call_timer_fn(timer, fn, data);
|
|
spin_lock_irq(&base->lock);
|
|
}
|
|
}
|
|
}
|
|
base->running_timer = NULL;
|
|
spin_unlock_irq(&base->lock);
|
|
}
|
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
/*
|
|
* Find out when the next timer event is due to happen. This
|
|
* is used on S/390 to stop all activity when a CPU is idle.
|
|
* This function needs to be called with interrupts disabled.
|
|
*/
|
|
static unsigned long __next_timer_interrupt(struct tvec_base *base)
|
|
{
|
|
unsigned long timer_jiffies = base->timer_jiffies;
|
|
unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
|
|
int index, slot, array, found = 0;
|
|
struct timer_list *nte;
|
|
struct tvec *varray[4];
|
|
|
|
/* Look for timer events in tv1. */
|
|
index = slot = timer_jiffies & TVR_MASK;
|
|
do {
|
|
hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
|
|
if (nte->flags & TIMER_DEFERRABLE)
|
|
continue;
|
|
|
|
found = 1;
|
|
expires = nte->expires;
|
|
/* Look at the cascade bucket(s)? */
|
|
if (!index || slot < index)
|
|
goto cascade;
|
|
return expires;
|
|
}
|
|
slot = (slot + 1) & TVR_MASK;
|
|
} while (slot != index);
|
|
|
|
cascade:
|
|
/* Calculate the next cascade event */
|
|
if (index)
|
|
timer_jiffies += TVR_SIZE - index;
|
|
timer_jiffies >>= TVR_BITS;
|
|
|
|
/* Check tv2-tv5. */
|
|
varray[0] = &base->tv2;
|
|
varray[1] = &base->tv3;
|
|
varray[2] = &base->tv4;
|
|
varray[3] = &base->tv5;
|
|
|
|
for (array = 0; array < 4; array++) {
|
|
struct tvec *varp = varray[array];
|
|
|
|
index = slot = timer_jiffies & TVN_MASK;
|
|
do {
|
|
hlist_for_each_entry(nte, varp->vec + slot, entry) {
|
|
if (nte->flags & TIMER_DEFERRABLE)
|
|
continue;
|
|
|
|
found = 1;
|
|
if (time_before(nte->expires, expires))
|
|
expires = nte->expires;
|
|
}
|
|
/*
|
|
* Do we still search for the first timer or are
|
|
* we looking up the cascade buckets ?
|
|
*/
|
|
if (found) {
|
|
/* Look at the cascade bucket(s)? */
|
|
if (!index || slot < index)
|
|
break;
|
|
return expires;
|
|
}
|
|
slot = (slot + 1) & TVN_MASK;
|
|
} while (slot != index);
|
|
|
|
if (index)
|
|
timer_jiffies += TVN_SIZE - index;
|
|
timer_jiffies >>= TVN_BITS;
|
|
}
|
|
return expires;
|
|
}
|
|
|
|
/*
|
|
* Check, if the next hrtimer event is before the next timer wheel
|
|
* event:
|
|
*/
|
|
static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
|
|
{
|
|
u64 nextevt = hrtimer_get_next_event();
|
|
|
|
/*
|
|
* If high resolution timers are enabled
|
|
* hrtimer_get_next_event() returns KTIME_MAX.
|
|
*/
|
|
if (expires <= nextevt)
|
|
return expires;
|
|
|
|
/*
|
|
* If the next timer is already expired, return the tick base
|
|
* time so the tick is fired immediately.
|
|
*/
|
|
if (nextevt <= basem)
|
|
return basem;
|
|
|
|
/*
|
|
* Round up to the next jiffie. High resolution timers are
|
|
* off, so the hrtimers are expired in the tick and we need to
|
|
* make sure that this tick really expires the timer to avoid
|
|
* a ping pong of the nohz stop code.
|
|
*
|
|
* Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
|
|
*/
|
|
return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
|
|
}
|
|
|
|
/**
|
|
* get_next_timer_interrupt - return the time (clock mono) of the next timer
|
|
* @basej: base time jiffies
|
|
* @basem: base time clock monotonic
|
|
*
|
|
* Returns the tick aligned clock monotonic time of the next pending
|
|
* timer or KTIME_MAX if no timer is pending.
|
|
*/
|
|
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
|
{
|
|
struct tvec_base *base = this_cpu_ptr(&tvec_bases);
|
|
u64 expires = KTIME_MAX;
|
|
unsigned long nextevt;
|
|
|
|
/*
|
|
* Pretend that there is no timer pending if the cpu is offline.
|
|
* Possible pending timers will be migrated later to an active cpu.
|
|
*/
|
|
if (cpu_is_offline(smp_processor_id()))
|
|
return expires;
|
|
|
|
spin_lock(&base->lock);
|
|
if (base->active_timers) {
|
|
if (time_before_eq(base->next_timer, base->timer_jiffies))
|
|
base->next_timer = __next_timer_interrupt(base);
|
|
nextevt = base->next_timer;
|
|
if (time_before_eq(nextevt, basej))
|
|
expires = basem;
|
|
else
|
|
expires = basem + (nextevt - basej) * TICK_NSEC;
|
|
}
|
|
spin_unlock(&base->lock);
|
|
|
|
return cmp_next_hrtimer_event(basem, expires);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Called from the timer interrupt handler to charge one tick to the current
|
|
* process. user_tick is 1 if the tick is user time, 0 for system.
|
|
*/
|
|
void update_process_times(int user_tick)
|
|
{
|
|
struct task_struct *p = current;
|
|
|
|
/* Note: this timer irq context must be accounted for as well. */
|
|
account_process_tick(p, user_tick);
|
|
run_local_timers();
|
|
rcu_check_callbacks(user_tick);
|
|
#ifdef CONFIG_IRQ_WORK
|
|
if (in_irq())
|
|
irq_work_tick();
|
|
#endif
|
|
scheduler_tick();
|
|
run_posix_cpu_timers(p);
|
|
}
|
|
|
|
/*
|
|
* This function runs timers and the timer-tq in bottom half context.
|
|
*/
|
|
static void run_timer_softirq(struct softirq_action *h)
|
|
{
|
|
struct tvec_base *base = this_cpu_ptr(&tvec_bases);
|
|
|
|
__run_deferrable_timers();
|
|
|
|
if (time_after_eq(jiffies, base->timer_jiffies))
|
|
__run_timers(base);
|
|
}
|
|
|
|
/*
|
|
* Called by the local, per-CPU timer interrupt on SMP.
|
|
*/
|
|
void run_local_timers(void)
|
|
{
|
|
hrtimer_run_queues();
|
|
raise_softirq(TIMER_SOFTIRQ);
|
|
}
|
|
|
|
#ifdef __ARCH_WANT_SYS_ALARM
|
|
|
|
/*
|
|
* For backwards compatibility? This can be done in libc so Alpha
|
|
* and all newer ports shouldn't need it.
|
|
*/
|
|
SYSCALL_DEFINE1(alarm, unsigned int, seconds)
|
|
{
|
|
return alarm_setitimer(seconds);
|
|
}
|
|
|
|
#endif
|
|
|
|
static void process_timeout(unsigned long __data)
|
|
{
|
|
wake_up_process((struct task_struct *)__data);
|
|
}
|
|
|
|
/**
|
|
* schedule_timeout - sleep until timeout
|
|
* @timeout: timeout value in jiffies
|
|
*
|
|
* Make the current task sleep until @timeout jiffies have
|
|
* elapsed. The routine will return immediately unless
|
|
* the current task state has been set (see set_current_state()).
|
|
*
|
|
* You can set the task state as follows -
|
|
*
|
|
* %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
|
|
* pass before the routine returns. The routine will return 0
|
|
*
|
|
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
|
|
* delivered to the current task. In this case the remaining time
|
|
* in jiffies will be returned, or 0 if the timer expired in time
|
|
*
|
|
* The current task state is guaranteed to be TASK_RUNNING when this
|
|
* routine returns.
|
|
*
|
|
* Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
|
|
* the CPU away without a bound on the timeout. In this case the return
|
|
* value will be %MAX_SCHEDULE_TIMEOUT.
|
|
*
|
|
* In all cases the return value is guaranteed to be non-negative.
|
|
*/
|
|
signed long __sched schedule_timeout(signed long timeout)
|
|
{
|
|
struct timer_list timer;
|
|
unsigned long expire;
|
|
|
|
switch (timeout)
|
|
{
|
|
case MAX_SCHEDULE_TIMEOUT:
|
|
/*
|
|
* These two special cases are useful to be comfortable
|
|
* in the caller. Nothing more. We could take
|
|
* MAX_SCHEDULE_TIMEOUT from one of the negative value
|
|
* but I' d like to return a valid offset (>=0) to allow
|
|
* the caller to do everything it want with the retval.
|
|
*/
|
|
schedule();
|
|
goto out;
|
|
default:
|
|
/*
|
|
* Another bit of PARANOID. Note that the retval will be
|
|
* 0 since no piece of kernel is supposed to do a check
|
|
* for a negative retval of schedule_timeout() (since it
|
|
* should never happens anyway). You just have the printk()
|
|
* that will tell you if something is gone wrong and where.
|
|
*/
|
|
if (timeout < 0) {
|
|
printk(KERN_ERR "schedule_timeout: wrong timeout "
|
|
"value %lx\n", timeout);
|
|
dump_stack();
|
|
current->state = TASK_RUNNING;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
expire = timeout + jiffies;
|
|
|
|
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
|
|
__mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
|
|
schedule();
|
|
del_singleshot_timer_sync(&timer);
|
|
|
|
/* Remove the timer from the object tracker */
|
|
destroy_timer_on_stack(&timer);
|
|
|
|
timeout = expire - jiffies;
|
|
|
|
out:
|
|
return timeout < 0 ? 0 : timeout;
|
|
}
|
|
EXPORT_SYMBOL(schedule_timeout);
|
|
|
|
/*
|
|
* We can use __set_current_state() here because schedule_timeout() calls
|
|
* schedule() unconditionally.
|
|
*/
|
|
signed long __sched schedule_timeout_interruptible(signed long timeout)
|
|
{
|
|
__set_current_state(TASK_INTERRUPTIBLE);
|
|
return schedule_timeout(timeout);
|
|
}
|
|
EXPORT_SYMBOL(schedule_timeout_interruptible);
|
|
|
|
signed long __sched schedule_timeout_killable(signed long timeout)
|
|
{
|
|
__set_current_state(TASK_KILLABLE);
|
|
return schedule_timeout(timeout);
|
|
}
|
|
EXPORT_SYMBOL(schedule_timeout_killable);
|
|
|
|
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
|
|
{
|
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
|
return schedule_timeout(timeout);
|
|
}
|
|
EXPORT_SYMBOL(schedule_timeout_uninterruptible);
|
|
|
|
#if defined(CONFIG_HOTPLUG_CPU)
|
|
static void migrate_timer_list(struct tvec_base *new_base,
|
|
struct hlist_head *head, bool remove_pinned)
|
|
{
|
|
struct timer_list *timer;
|
|
int cpu = new_base->cpu;
|
|
struct hlist_node *n;
|
|
int is_pinned;
|
|
|
|
hlist_for_each_entry_safe(timer, n, head, entry) {
|
|
is_pinned = timer->flags & TIMER_PINNED_ON_CPU;
|
|
if (!remove_pinned && is_pinned)
|
|
continue;
|
|
|
|
detach_if_pending(timer, get_timer_base(timer->flags), false);
|
|
timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
|
|
internal_add_timer(new_base, timer);
|
|
}
|
|
}
|
|
|
|
static void __migrate_timers(int cpu, bool wait, bool remove_pinned)
|
|
{
|
|
struct tvec_base *old_base;
|
|
struct tvec_base *new_base;
|
|
unsigned long flags;
|
|
int i;
|
|
|
|
old_base = per_cpu_ptr(&tvec_bases, cpu);
|
|
new_base = get_cpu_ptr(&tvec_bases);
|
|
/*
|
|
* The caller is globally serialized and nobody else
|
|
* takes two locks at once, deadlock is not possible.
|
|
*/
|
|
spin_lock_irqsave(&new_base->lock, flags);
|
|
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
|
|
|
if (wait) {
|
|
/* Ensure timers are done running before continuing */
|
|
while (old_base->running_timer) {
|
|
spin_unlock(&old_base->lock);
|
|
spin_unlock_irqrestore(&new_base->lock, flags);
|
|
cpu_relax();
|
|
spin_lock_irqsave(&new_base->lock, flags);
|
|
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
|
}
|
|
} else {
|
|
BUG_ON(old_base->running_timer);
|
|
}
|
|
|
|
for (i = 0; i < TVR_SIZE; i++)
|
|
migrate_timer_list(new_base, old_base->tv1.vec + i,
|
|
remove_pinned);
|
|
for (i = 0; i < TVN_SIZE; i++) {
|
|
migrate_timer_list(new_base, old_base->tv2.vec + i,
|
|
remove_pinned);
|
|
migrate_timer_list(new_base, old_base->tv3.vec + i,
|
|
remove_pinned);
|
|
migrate_timer_list(new_base, old_base->tv4.vec + i,
|
|
remove_pinned);
|
|
migrate_timer_list(new_base, old_base->tv5.vec + i,
|
|
remove_pinned);
|
|
}
|
|
|
|
spin_unlock(&old_base->lock);
|
|
spin_unlock_irqrestore(&new_base->lock, flags);
|
|
put_cpu_ptr(&tvec_bases);
|
|
}
|
|
|
|
/* Migrate timers from 'cpu' to this_cpu */
|
|
static void migrate_timers(int cpu)
|
|
{
|
|
BUG_ON(cpu_online(cpu));
|
|
__migrate_timers(cpu, false, true);
|
|
}
|
|
|
|
void timer_quiesce_cpu(void *cpup)
|
|
{
|
|
__migrate_timers(*(int *)cpup, true, false);
|
|
}
|
|
|
|
static int timer_cpu_notify(struct notifier_block *self,
|
|
unsigned long action, void *hcpu)
|
|
{
|
|
switch (action) {
|
|
case CPU_DEAD:
|
|
case CPU_DEAD_FROZEN:
|
|
migrate_timers((long)hcpu);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static inline void timer_register_cpu_notifier(void)
|
|
{
|
|
cpu_notifier(timer_cpu_notify, 0);
|
|
}
|
|
#else
|
|
static inline void timer_register_cpu_notifier(void) { }
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
static void __init init_timer_cpu(int cpu)
|
|
{
|
|
struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
|
|
|
|
base->cpu = cpu;
|
|
spin_lock_init(&base->lock);
|
|
|
|
base->timer_jiffies = jiffies;
|
|
base->next_timer = base->timer_jiffies;
|
|
}
|
|
|
|
static void __init init_timer_cpus(void)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
init_timer_cpu(cpu);
|
|
|
|
init_timer_deferrable_global();
|
|
}
|
|
|
|
void __init init_timers(void)
|
|
{
|
|
init_timer_cpus();
|
|
init_timer_stats();
|
|
timer_register_cpu_notifier();
|
|
open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
|
|
}
|
|
|
|
/**
|
|
* msleep - sleep safely even with waitqueue interruptions
|
|
* @msecs: Time in milliseconds to sleep for
|
|
*/
|
|
void msleep(unsigned int msecs)
|
|
{
|
|
unsigned long timeout = msecs_to_jiffies(msecs) + 1;
|
|
|
|
while (timeout)
|
|
timeout = schedule_timeout_uninterruptible(timeout);
|
|
}
|
|
|
|
EXPORT_SYMBOL(msleep);
|
|
|
|
/**
|
|
* msleep_interruptible - sleep waiting for signals
|
|
* @msecs: Time in milliseconds to sleep for
|
|
*/
|
|
unsigned long msleep_interruptible(unsigned int msecs)
|
|
{
|
|
unsigned long timeout = msecs_to_jiffies(msecs) + 1;
|
|
|
|
while (timeout && !signal_pending(current))
|
|
timeout = schedule_timeout_interruptible(timeout);
|
|
return jiffies_to_msecs(timeout);
|
|
}
|
|
|
|
EXPORT_SYMBOL(msleep_interruptible);
|
|
|
|
static void __sched do_usleep_range(unsigned long min, unsigned long max)
|
|
{
|
|
ktime_t kmin;
|
|
u64 delta;
|
|
|
|
kmin = ktime_set(0, min * NSEC_PER_USEC);
|
|
delta = (u64)(max - min) * NSEC_PER_USEC;
|
|
schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
|
|
}
|
|
|
|
/**
|
|
* usleep_range - Drop in replacement for udelay where wakeup is flexible
|
|
* @min: Minimum time in usecs to sleep
|
|
* @max: Maximum time in usecs to sleep
|
|
*/
|
|
void __sched usleep_range(unsigned long min, unsigned long max)
|
|
{
|
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
|
do_usleep_range(min, max);
|
|
}
|
|
EXPORT_SYMBOL(usleep_range);
|