* refs/heads/tmp-d6bbe8b Linux 4.4.127 Revert "ip6_vti: adjust vti mtu according to mtu of lower device" net: cavium: liquidio: fix up "Avoid dma_unmap_single on uninitialized ndata" spi: davinci: fix up dma_mapping_error() incorrect patch Revert "mtip32xx: use runtime tag to initialize command header" Revert "cpufreq: Fix governor module removal race" Revert "ARM: dts: omap3-n900: Fix the audio CODEC's reset pin" Revert "ARM: dts: am335x-pepper: Fix the audio CODEC's reset pin" Revert "PCI/MSI: Stop disabling MSI/MSI-X in pci_device_shutdown()" nospec: Kill array_index_nospec_mask_check() nospec: Move array_index_nospec() parameter checking into separate macro net: hns: Fix ethtool private flags md/raid10: reset the 'first' at the end of loop ARM: dts: am57xx-beagle-x15-common: Add overide powerhold property ARM: dts: dra7: Add power hold and power controller properties to palmas Documentation: pinctrl: palmas: Add ti,palmas-powerhold-override property definition vt: change SGR 21 to follow the standards Input: i8042 - enable MUX on Sony VAIO VGN-CS series to fix touchpad Input: i8042 - add Lenovo ThinkPad L460 to i8042 reset list staging: comedi: ni_mio_common: ack ai fifo error interrupts. fs/proc: Stop trying to report thread stacks crypto: x86/cast5-avx - fix ECB encryption when long sg follows short one crypto: ahash - Fix early termination in hash walk parport_pc: Add support for WCH CH382L PCI-E single parallel port card. media: usbtv: prevent double free in error case mei: remove dev_err message on an unsupported ioctl USB: serial: cp210x: add ELDAT Easywave RX09 id USB: serial: ftdi_sio: add support for Harman FirmwareHubEmulator USB: serial: ftdi_sio: add RT Systems VX-8 cable usb: dwc2: Improve gadget state disconnection handling scsi: virtio_scsi: always read VPD pages for multiqueue too llist: clang: introduce member_address_is_nonnull() Bluetooth: Fix missing encryption refresh on Security Request netfilter: x_tables: add and use xt_check_proc_name netfilter: bridge: ebt_among: add more missing match size checks xfrm: Refuse to insert 32 bit userspace socket policies on 64 bit systems net: xfrm: use preempt-safe this_cpu_read() in ipcomp_alloc_tfms() RDMA/ucma: Introduce safer rdma_addr_size() variants RDMA/ucma: Don't allow join attempts for unsupported AF family RDMA/ucma: Check that device exists prior to accessing it RDMA/ucma: Check that device is connected prior to access it RDMA/ucma: Ensure that CM_ID exists prior to access it RDMA/ucma: Fix use-after-free access in ucma_close RDMA/ucma: Check AF family prior resolving address xfrm_user: uncoditionally validate esn replay attribute struct arm64: avoid overflow in VA_START and PAGE_OFFSET selinux: Remove redundant check for unknown labeling behavior netfilter: ctnetlink: Make some parameters integer to avoid enum mismatch tty: provide tty_name() even without CONFIG_TTY audit: add tty field to LOGIN event frv: declare jiffies to be located in the .data section jiffies.h: declare jiffies and jiffies_64 with ____cacheline_aligned_in_smp fs: compat: Remove warning from COMPATIBLE_IOCTL selinux: Remove unnecessary check of array base in selinux_set_mapping() cpumask: Add helper cpumask_available() genirq: Use cpumask_available() for check of cpumask variable netfilter: nf_nat_h323: fix logical-not-parentheses warning Input: mousedev - fix implicit conversion warning dm ioctl: remove double parentheses PCI: Make PCI_ROM_ADDRESS_MASK a 32-bit constant writeback: fix the wrong congested state variable definition ACPI, PCI, irq: remove redundant check for null string pointer kprobes/x86: Fix to set RWX bits correctly before releasing trampoline usb: gadget: f_hid: fix: Prevent accessing released memory usb: gadget: align buffer size when allocating for OUT endpoint usb: gadget: fix usb_ep_align_maybe endianness and new usb_ep_align usb: gadget: change len to size_t on alloc_ep_req() usb: gadget: define free_ep_req as universal function partitions/msdos: Unable to mount UFS 44bsd partitions perf/hwbp: Simplify the perf-hwbp code, fix documentation ALSA: pcm: potential uninitialized return values ALSA: pcm: Use dma_bytes as size parameter in dma_mmap_coherent() mtd: jedec_probe: Fix crash in jedec_read_mfr() Replace #define with enum for better compilation errors. Add missing include to drivers/tty/goldfish.c Fix whitespace in drivers/tty/goldfish.c ANDROID: fuse: Add null terminator to path in canonical path to avoid issue ANDROID: sdcardfs: Fix sdcardfs to stop creating cases-sensitive duplicate entries. ANDROID: add missing include to pdev_bus ANDROID: pdev_bus: replace writel with gf_write_ptr ANDROID: Cleanup type casting in goldfish.h ANDROID: Include missing headers in goldfish.h ANDROID: cpufreq: times: skip printing invalid frequencies ANDROID: xt_qtaguid: Remove unnecessary null checks to device's name ANDROID: xt_qtaguid: Remove unnecessary null checks to ifa_label ANDROID: cpufreq: times: allocate enough space for a uid_entry Linux 4.4.126 net: systemport: Rewrite __bcm_sysport_tx_reclaim() net: fec: Fix unbalanced PM runtime calls ieee802154: 6lowpan: fix possible NULL deref in lowpan_device_event() s390/qeth: on channel error, reject further cmd requests s390/qeth: lock read device while queueing next buffer s390/qeth: when thread completes, wake up all waiters s390/qeth: free netdevice when removing a card team: Fix double free in error path skbuff: Fix not waking applications when errors are enqueued net: Only honor ifindex in IP_PKTINFO if non-0 netlink: avoid a double skb free in genlmsg_mcast() net/iucv: Free memory obtained by kzalloc net: ethernet: ti: cpsw: add check for in-band mode setting with RGMII PHY interface net: ethernet: arc: Fix a potential memory leak if an optional regulator is deferred l2tp: do not accept arbitrary sockets ipv6: fix access to non-linear packet in ndisc_fill_redirect_hdr_option() dccp: check sk for closed state in dccp_sendmsg() net: Fix hlist corruptions in inet_evict_bucket() Revert "genirq: Use irqd_get_trigger_type to compare the trigger type for shared IRQs" scsi: sg: don't return bogus Sg_requests Revert "genirq: Use irqd_get_trigger_type to compare the trigger type for shared IRQs" UPSTREAM: drm: virtio-gpu: set atomic flag UPSTREAM: drm: virtio-gpu: transfer dumb buffers to host on plane update UPSTREAM: drm: virtio-gpu: ensure plane is flushed to host on atomic update UPSTREAM: drm: virtio-gpu: get the fb from the plane state for atomic updates Linux 4.4.125 bpf, x64: increase number of passes bpf: skip unnecessary capability check kbuild: disable clang's default use of -fmerge-all-constants staging: lustre: ptlrpc: kfree used instead of kvfree perf/x86/intel: Don't accidentally clear high bits in bdw_limit_period() x86/entry/64: Don't use IST entry for #BP stack x86/boot/64: Verify alignment of the LOAD segment x86/build/64: Force the linker to use 2MB page size kvm/x86: fix icebp instruction handling tty: vt: fix up tabstops properly can: cc770: Fix use after free in cc770_tx_interrupt() can: cc770: Fix queue stall & dropped RTR reply can: cc770: Fix stalls on rt-linux, remove redundant IRQ ack staging: ncpfs: memory corruption in ncp_read_kernel() mtd: nand: fsl_ifc: Fix nand waitfunc return value tracing: probeevent: Fix to support minus offset from symbol rtlwifi: rtl8723be: Fix loss of signal brcmfmac: fix P2P_DEVICE ethernet address generation acpi, numa: fix pxm to online numa node associations drm: udl: Properly check framebuffer mmap offsets drm/radeon: Don't turn off DP sink when disconnected drm/vmwgfx: Fix a destoy-while-held mutex problem. x86/mm: implement free pmd/pte page interfaces mm/vmalloc: add interfaces to free unmapped page table libata: Modify quirks for MX100 to limit NCQ_TRIM quirk to MU01 version libata: Make Crucial BX100 500GB LPM quirk apply to all firmware versions libata: Apply NOLPM quirk to Crucial M500 480 and 960GB SSDs libata: Enable queued TRIM for Samsung SSD 860 libata: disable LPM for Crucial BX100 SSD 500GB drive libata: Apply NOLPM quirk to Crucial MX100 512GB SSDs libata: remove WARN() for DMA or PIO command without data libata: fix length validation of ATAPI-relayed SCSI commands Bluetooth: btusb: Fix quirk for Atheros 1525/QCA6174 clk: bcm2835: Protect sections updating shared registers ahci: Add PCI-id for the Highpoint Rocketraid 644L card PCI: Add function 1 DMA alias quirk for Highpoint RocketRAID 644L mmc: dw_mmc: fix falling from idmac to PIO mode when dw_mci_reset occurs ALSA: hda/realtek - Always immediately update mute LED with pin VREF ALSA: aloop: Fix access to not-yet-ready substream via cable ALSA: aloop: Sync stale timer before release ALSA: usb-audio: Fix parsing descriptor of UAC2 processing unit iio: st_pressure: st_accel: pass correct platform data to init MIPS: ralink: Remove ralink_halt() ANDROID: cpufreq: times: fix proc_time_in_state_show dtc: turn off dtc unit address warnings by default Linux 4.4.124 RDMA/ucma: Fix access to non-initialized CM_ID object dmaengine: ti-dma-crossbar: Fix event mapping for TPCC_EVT_MUX_60_63 clk: si5351: Rename internal plls to avoid name collisions nfsd4: permit layoutget of executable-only files RDMA/ocrdma: Fix permissions for OCRDMA_RESET_STATS ip6_vti: adjust vti mtu according to mtu of lower device iommu/vt-d: clean up pr_irq if request_threaded_irq fails pinctrl: Really force states during suspend/resume coresight: Fix disabling of CoreSight TPIU pty: cancel pty slave port buf's work in tty_release drm/omap: DMM: Check for DMM readiness after successful transaction commit vgacon: Set VGA struct resource types IB/umem: Fix use of npages/nmap fields RDMA/cma: Use correct size when writing netlink stats IB/ipoib: Avoid memory leak if the SA returns a different DGID mmc: avoid removing non-removable hosts during suspend platform/chrome: Use proper protocol transfer function cros_ec: fix nul-termination for firmware build info media: [RESEND] media: dvb-frontends: Add delay to Si2168 restart media: bt8xx: Fix err 'bt878_probe()' rtlwifi: rtl_pci: Fix the bug when inactiveps is enabled. RDMA/iwpm: Fix uninitialized error code in iwpm_send_mapinfo() drm/msm: fix leak in failed get_pages media: c8sectpfe: fix potential NULL pointer dereference in c8sectpfe_timer_interrupt Bluetooth: hci_qca: Avoid setup failure on missing rampatch perf tests kmod-path: Don't fail if compressed modules aren't supported rtc: ds1374: wdt: Fix stop/start ioctl always returning -EINVAL rtc: ds1374: wdt: Fix issue with timeout scaling from secs to wdt ticks cifs: small underflow in cnvrtDosUnixTm() net: hns: fix ethtool_get_strings overflow in hns driver sm501fb: don't return zero on failure path in sm501fb_start() video: fbdev: udlfb: Fix buffer on stack tcm_fileio: Prevent information leak for short reads ia64: fix module loading for gcc-5.4 md/raid10: skip spare disk as 'first' disk Input: twl4030-pwrbutton - use correct device for irq request power: supply: pda_power: move from timer to delayed_work bnx2x: Align RX buffers drm/nouveau/kms: Increase max retries in scanout position queries. ACPI / PMIC: xpower: Fix power_table addresses ipmi/watchdog: fix wdog hang on panic waiting for ipmi response ARM: DRA7: clockdomain: Change the CLKTRCTRL of CM_PCIE_CLKSTCTRL to SW_WKUP mmc: sdhci-of-esdhc: limit SD clock for ls1012a/ls1046a staging: wilc1000: fix unchecked return value staging: unisys: visorhba: fix s-Par to boot with option CONFIG_VMAP_STACK set to y mtip32xx: use runtime tag to initialize command header mfd: palmas: Reset the POWERHOLD mux during power off mac80211: don't parse encrypted management frames in ieee80211_frame_acked Btrfs: send, fix file hole not being preserved due to inline extent rndis_wlan: add return value validation mt7601u: check return value of alloc_skb iio: st_pressure: st_accel: Initialise sensor platform data properly NFS: don't try to cross a mountpount when there isn't one there. infiniband/uverbs: Fix integer overflows scsi: mac_esp: Replace bogus memory barrier with spinlock qlcnic: fix unchecked return value wan: pc300too: abort path on failure mmc: host: omap_hsmmc: checking for NULL instead of IS_ERR() openvswitch: Delete conntrack entry clashing with an expectation. netfilter: xt_CT: fix refcnt leak on error path Fix driver usage of 128B WQEs when WQ_CREATE is V1. ASoC: Intel: Skylake: Uninitialized variable in probe_codec() IB/mlx4: Change vma from shared to private IB/mlx4: Take write semaphore when changing the vma struct HSI: ssi_protocol: double free in ssip_pn_xmit() IB/ipoib: Update broadcast object if PKey value was changed in index 0 IB/ipoib: Fix deadlock between ipoib_stop and mcast join flow ALSA: hda - Fix headset microphone detection for ASUS N551 and N751 e1000e: fix timing for 82579 Gigabit Ethernet controller tcp: remove poll() flakes with FastOpen NFS: Fix missing pg_cleanup after nfs_pageio_cond_complete() md/raid10: wait up frozen array in handle_write_completed iommu/omap: Register driver before setting IOMMU ops ARM: 8668/1: ftrace: Fix dynamic ftrace with DEBUG_RODATA and !FRAME_POINTER KVM: PPC: Book3S PR: Exit KVM on failed mapping scsi: virtio_scsi: Always try to read VPD pages clk: ns2: Correct SDIO bits ath: Fix updating radar flags for coutry code India spi: dw: Disable clock after unregistering the host media/dvb-core: Race condition when writing to CAM net: ipv6: send unsolicited NA on admin up i2c: i2c-scmi: add a MS HID genirq: Use irqd_get_trigger_type to compare the trigger type for shared IRQs cpufreq/sh: Replace racy task affinity logic ACPI/processor: Replace racy task affinity logic ACPI/processor: Fix error handling in __acpi_processor_start() time: Change posix clocks ops interfaces to use timespec64 Input: ar1021_i2c - fix too long name in driver's device table rtc: cmos: Do not assume irq 8 for rtc when there are no legacy irqs x86: i8259: export legacy_pic symbol regulator: anatop: set default voltage selector for pcie platform/x86: asus-nb-wmi: Add wapf4 quirk for the X302UA staging: android: ashmem: Fix possible deadlock in ashmem_ioctl CIFS: Enable encryption during session setup phase SMB3: Validate negotiate request must always be signed tpm_tis: fix potential buffer overruns caused by bit glitches on the bus tpm: fix potential buffer overruns caused by bit glitches on the bus BACKPORT, FROMLIST: crypto: arm64/speck - add NEON-accelerated implementation of Speck-XTS Linux 4.4.123 bpf: fix incorrect sign extension in check_alu_op() usb: gadget: bdc: 64-bit pointer capability check USB: gadget: udc: Add missing platform_device_put() on error in bdc_pci_probe() btrfs: Fix use-after-free when cleaning up fs_devs with a single stale device btrfs: alloc_chunk: fix DUP stripe size handling ARM: dts: LogicPD Torpedo: Fix I2C1 pinmux scsi: sg: only check for dxfer_len greater than 256M scsi: sg: fix static checker warning in sg_is_valid_dxfer scsi: sg: fix SG_DXFER_FROM_DEV transfers irqchip/gic-v3-its: Ensure nr_ites >= nr_lpis fs/aio: Use RCU accessors for kioctx_table->table[] fs/aio: Add explicit RCU grace period when freeing kioctx lock_parent() needs to recheck if dentry got __dentry_kill'ed under it fs: Teach path_connected to handle nfs filesystems with multiple roots. drm/amdgpu/dce: Don't turn off DP sink when disconnected ALSA: seq: Clear client entry before deleting else at closing ALSA: seq: Fix possible UAF in snd_seq_check_queue() ALSA: hda - Revert power_save option default value ALSA: pcm: Fix UAF in snd_pcm_oss_get_formats() x86/mm: Fix vmalloc_fault to use pXd_large x86/vm86/32: Fix POPF emulation selftests/x86/entry_from_vm86: Add test cases for POPF selftests/x86: Add tests for the STR and SLDT instructions selftests/x86: Add tests for User-Mode Instruction Prevention selftests/x86/entry_from_vm86: Exit with 1 if we fail ima: relax requiring a file signature for new files with zero length rcutorture/configinit: Fix build directory error message ipvlan: add L2 check for packets arriving via virtual devices ASoC: nuc900: Fix a loop timeout test mac80211: remove BUG() when interface type is invalid mac80211_hwsim: enforce PS_MANUAL_POLL to be set after PS_ENABLED agp/intel: Flush all chipset writes after updating the GGTT drm/amdkfd: Fix memory leaks in kfd topology veth: set peer GSO values media: cpia2: Fix a couple off by one bugs scsi: dh: add new rdac devices scsi: devinfo: apply to HP XP the same flags as Hitachi VSP scsi: core: scsi_get_device_flags_keyed(): Always return device flags spi: sun6i: disable/unprepare clocks on remove tools/usbip: fixes build with musl libc toolchain ath10k: fix invalid STS_CAP_OFFSET_MASK clk: qcom: msm8916: fix mnd_width for codec_digcodec cpufreq: Fix governor module removal race ath10k: update tdls teardown state to target ARM: dts: omap3-n900: Fix the audio CODEC's reset pin ARM: dts: am335x-pepper: Fix the audio CODEC's reset pin mtd: nand: fix interpretation of NAND_CMD_NONE in nand_command[_lp]() net: xfrm: allow clearing socket xfrm policies. test_firmware: fix setting old custom fw path back on exit sched: Stop resched_cpu() from sending IPIs to offline CPUs sched: Stop switched_to_rt() from sending IPIs to offline CPUs ARM: dts: exynos: Correct Trats2 panel reset line HID: elo: clear BTN_LEFT mapping video/hdmi: Allow "empty" HDMI infoframes drm/edid: set ELD connector type in drm_edid_to_eld() wil6210: fix memory access violation in wil_memcpy_from/toio_32 pwm: tegra: Increase precision in PWM rate calculation kprobes/x86: Set kprobes pages read-only kprobes/x86: Fix kprobe-booster not to boost far call instructions scsi: sg: close race condition in sg_remove_sfp_usercontext() scsi: sg: check for valid direction before starting the request perf session: Don't rely on evlist in pipe mode perf inject: Copy events when reordering events in pipe mode drivers/perf: arm_pmu: handle no platform_device usb: gadget: dummy_hcd: Fix wrong power status bit clear/reset in dummy_hub_control() usb: dwc2: Make sure we disconnect the gadget state md/raid6: Fix anomily when recovering a single device in RAID6. regulator: isl9305: fix array size MIPS: r2-on-r6-emu: Clear BLTZALL and BGEZALL debugfs counters MIPS: r2-on-r6-emu: Fix BLEZL and BGTZL identification MIPS: BPF: Fix multiple problems in JIT skb access helpers. MIPS: BPF: Quit clobbering callee saved registers in JIT code. coresight: Fixes coresight DT parse to get correct output port ID. drm/amdgpu: Fail fb creation from imported dma-bufs. (v2) drm/radeon: Fail fb creation from imported dma-bufs. video: ARM CLCD: fix dma allocation size iommu/iova: Fix underflow bug in __alloc_and_insert_iova_range apparmor: Make path_max parameter readonly scsi: ses: don't get power status of SES device slot on probe fm10k: correctly check if interface is removed ALSA: firewire-digi00x: handle all MIDI messages on streaming packets reiserfs: Make cancel_old_flush() reliable ARM: dts: koelsch: Correct clock frequency of X2 DU clock input net/faraday: Add missing include of of.h powerpc: Avoid taking a data miss on every userspace instruction miss ARM: dts: r8a7791: Correct parent of SSI[0-9] clocks ARM: dts: r8a7790: Correct parent of SSI[0-9] clocks NFC: nfcmrvl: double free on error path NFC: nfcmrvl: Include unaligned.h instead of access_ok.h vxlan: vxlan dev should inherit lowerdev's gso_max_size drm/vmwgfx: Fixes to vmwgfx_fb braille-console: Fix value returned by _braille_console_setup bonding: refine bond_fold_stats() wrap detection f2fs: relax node version check for victim data in gc blk-throttle: make sure expire time isn't too big mm: Fix false-positive VM_BUG_ON() in page_cache_{get,add}_speculative() driver: (adm1275) set the m,b and R coefficients correctly for power dmaengine: imx-sdma: add 1ms delay to ensure SDMA channel is stopped tcp: sysctl: Fix a race to avoid unexpected 0 window from space spi: omap2-mcspi: poll OMAP2_MCSPI_CHSTAT_RXS for PIO transfer ASoC: rcar: ssi: don't set SSICR.CKDV = 000 with SSIWSR.CONT sched: act_csum: don't mangle TCP and UDP GSO packets Input: qt1070 - add OF device ID table sysrq: Reset the watchdog timers while displaying high-resolution timers timers, sched_clock: Update timeout for clock wrap media: i2c/soc_camera: fix ov6650 sensor getting wrong clock scsi: ipr: Fix missed EH wakeup solo6x10: release vb2 buffers in solo_stop_streaming() of: fix of_device_get_modalias returned length when truncating buffers batman-adv: handle race condition for claims between gateways ARM: dts: Adjust moxart IRQ controller and flags net/8021q: create device with all possible features in wanted_features HID: clamp input to logical range if no null state perf probe: Return errno when not hitting any event ath10k: disallow DFS simulation if DFS channel is not enabled drm: Defer disabling the vblank IRQ until the next interrupt (for instant-off) drivers: net: xgene: Fix hardware checksum setting perf tools: Make perf_event__synthesize_mmap_events() scale i40e: fix ethtool to get EEPROM data from X722 interface i40e: Acquire NVM lock before reads on all devices perf sort: Fix segfault with basic block 'cycles' sort dimension selinux: check for address length in selinux_socket_bind() PCI/MSI: Stop disabling MSI/MSI-X in pci_device_shutdown() ath10k: fix a warning during channel switch with multiple vaps drm: qxl: Don't alloc fbdev if emulation is not supported HID: reject input outside logical range only if null state is set staging: wilc1000: add check for kmalloc allocation failure. staging: speakup: Replace BUG_ON() with WARN_ON(). Input: tsc2007 - check for presence and power down tsc2007 during probe blkcg: fix double free of new_blkg in blkcg_init_queue ANDROID: cpufreq: times: avoid prematurely freeing uid_entry ANDROID: Use standard logging functions in goldfish_pipe ANDROID: Fix whitespace in goldfish staging: android: ashmem: Fix possible deadlock in ashmem_ioctl llist: clang: introduce member_address_is_nonnull() Linux 4.4.122 fixup: sctp: verify size of a new chunk in _sctp_make_chunk() serial: 8250_pci: Add Brainboxes UC-260 4 port serial device usb: gadget: f_fs: Fix use-after-free in ffs_fs_kill_sb() usb: usbmon: Read text within supplied buffer size USB: usbmon: remove assignment from IS_ERR argument usb: quirks: add control message delay for 1b1c:1b20 USB: storage: Add JMicron bridge 152d:2567 to unusual_devs.h staging: android: ashmem: Fix lockdep issue during llseek staging: comedi: fix comedi_nsamples_left. uas: fix comparison for error code tty/serial: atmel: add new version check for usart serial: sh-sci: prevent lockup on full TTY buffers x86: Treat R_X86_64_PLT32 as R_X86_64_PC32 x86/module: Detect and skip invalid relocations Revert "ARM: dts: LogicPD Torpedo: Fix I2C1 pinmux" NFS: Fix an incorrect type in struct nfs_direct_req scsi: qla2xxx: Replace fcport alloc with qla2x00_alloc_fcport ubi: Fix race condition between ubi volume creation and udev ext4: inplace xattr block update fails to deduplicate blocks netfilter: x_tables: pack percpu counter allocations netfilter: x_tables: pass xt_counters struct to counter allocator netfilter: x_tables: pass xt_counters struct instead of packet counter netfilter: use skb_to_full_sk in ip_route_me_harder netfilter: ipv6: fix use-after-free Write in nf_nat_ipv6_manip_pkt netfilter: bridge: ebt_among: add missing match size checks netfilter: ebtables: CONFIG_COMPAT: don't trust userland offsets netfilter: IDLETIMER: be syzkaller friendly netfilter: nat: cope with negative port range netfilter: x_tables: fix missing timer initialization in xt_LED netfilter: add back stackpointer size checks tc358743: fix register i2c_rd/wr function fix Input: tca8418_keypad - remove double read of key event register ARM: omap2: hide omap3_save_secure_ram on non-OMAP3 builds netfilter: nfnetlink_queue: fix timestamp attribute watchdog: hpwdt: fix unused variable warning watchdog: hpwdt: Check source of NMI watchdog: hpwdt: SMBIOS check nospec: Include <asm/barrier.h> dependency ALSA: hda: add dock and led support for HP ProBook 640 G2 ALSA: hda: add dock and led support for HP EliteBook 820 G3 ALSA: seq: More protection for concurrent write and ioctl races ALSA: seq: Don't allow resizing pool in use ALSA: hda/realtek - Fix dock line-out volume on Dell Precision 7520 x86/MCE: Serialize sysfs changes bcache: don't attach backing with duplicate UUID kbuild: Handle builtin dtb file names containing hyphens loop: Fix lost writes caused by missing flag Input: matrix_keypad - fix race when disabling interrupts MIPS: OCTEON: irq: Check for null return on kzalloc allocation MIPS: ath25: Check for kzalloc allocation failure MIPS: BMIPS: Do not mask IPIs during suspend drm/amdgpu: fix KV harvesting drm/radeon: fix KV harvesting drm/amdgpu: Notify sbios device ready before send request drm/amdgpu: Fix deadlock on runtime suspend drm/radeon: Fix deadlock on runtime suspend drm/nouveau: Fix deadlock on runtime suspend drm: Allow determining if current task is output poll worker workqueue: Allow retrieval of current task's work struct scsi: qla2xxx: Fix NULL pointer crash due to active timer for ABTS RDMA/mlx5: Fix integer overflow while resizing CQ RDMA/ucma: Check that user doesn't overflow QP state RDMA/ucma: Limit possible option size ANDROID: ranchu: 32 bit framebuffer support ANDROID: Address checkpatch warnings in goldfishfb ANDROID: Address checkpatch.pl warnings in goldfish_pipe ANDROID: sdcardfs: fix lock issue on 32 bit/SMP architectures ANDROID: goldfish: Fix typo in goldfish_cmd_locked() call ANDROID: Address checkpatch.pl warnings in goldfish_pipe_v2 FROMLIST: f2fs: don't put dentry page in pagecache into highmem Linux 4.4.121 btrfs: preserve i_mode if __btrfs_set_acl() fails bpf, x64: implement retpoline for tail call dm io: fix duplicate bio completion due to missing ref count mpls, nospec: Sanitize array index in mpls_label_ok() net: mpls: Pull common label check into helper sctp: verify size of a new chunk in _sctp_make_chunk() s390/qeth: fix IPA command submission race s390/qeth: fix SETIP command handling sctp: fix dst refcnt leak in sctp_v6_get_dst() sctp: fix dst refcnt leak in sctp_v4_get_dst udplite: fix partial checksum initialization ppp: prevent unregistered channels from connecting to PPP units netlink: ensure to loop over all netns in genlmsg_multicast_allns() net: ipv4: don't allow setting net.ipv4.route.min_pmtu below 68 net: fix race on decreasing number of TX queues ipv6 sit: work around bogus gcc-8 -Wrestrict warning hdlc_ppp: carrier detect ok, don't turn off negotiation fib_semantics: Don't match route with mismatching tclassid bridge: check brport attr show in brport_show Revert "led: core: Fix brightness setting when setting delay_off=0" x86/spectre: Fix an error message leds: do not overflow sysfs buffer in led_trigger_show x86/apic/vector: Handle legacy irq data correctly ARM: dts: LogicPD Torpedo: Fix I2C1 pinmux btrfs: Don't clear SGID when inheriting ACLs x86/syscall: Sanitize syscall table de-references under speculation fix KVM: mmu: Fix overlap between public and private memslots ARM: mvebu: Fix broken PL310_ERRATA_753970 selects nospec: Allow index argument to have const-qualified type media: m88ds3103: don't call a non-initalized function cpufreq: s3c24xx: Fix broken s3c_cpufreq_init() ALSA: hda: Add a power_save blacklist ALSA: usb-audio: Add a quirck for B&W PX headphones tpm_i2c_nuvoton: fix potential buffer overruns caused by bit glitches on the bus tpm_i2c_infineon: fix potential buffer overruns caused by bit glitches on the bus tpm: st33zp24: fix potential buffer overruns caused by bit glitches on the bus ANDROID: Delete the goldfish_nand driver. ANDROID: Add input support for Android Wear. ANDROID: proc: fix config & includes for /proc/uid FROMLIST: ARM: amba: Don't read past the end of sysfs "driver_override" buffer UPSTREAM: ANDROID: binder: remove WARN() for redundant txn error ANDROID: cpufreq: times: Add missing includes ANDROID: cpufreq: Add time_in_state to /proc/uid directories ANDROID: proc: Add /proc/uid directory ANDROID: cpufreq: times: track per-uid time in state ANDROID: cpufreq: track per-task time in state Conflicts: drivers/gpu/drm/msm/msm_gem.c drivers/net/wireless/ath/regd.c kernel/sched/core.c Change-Id: I9bb7b5a062415da6925a5a56a34e6eb066a53320 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
1187 lines
28 KiB
C
1187 lines
28 KiB
C
#undef DEBUG
|
|
|
|
/*
|
|
* ARM performance counter support.
|
|
*
|
|
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
|
|
* Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
|
|
*
|
|
* This code is based on the sparc64 perf event code, which is in turn based
|
|
* on the x86 code.
|
|
*/
|
|
#define pr_fmt(fmt) "hw perfevents: " fmt
|
|
|
|
#include <linux/bitmap.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/cpu_pm.h>
|
|
#include <linux/export.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/of_device.h>
|
|
#include <linux/perf/arm_pmu.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/irqdesc.h>
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <asm/cputype.h>
|
|
#include <asm/irq_regs.h>
|
|
|
|
static int
|
|
armpmu_map_cache_event(const unsigned (*cache_map)
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX],
|
|
u64 config)
|
|
{
|
|
unsigned int cache_type, cache_op, cache_result, ret;
|
|
|
|
cache_type = (config >> 0) & 0xff;
|
|
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
|
|
return -EINVAL;
|
|
|
|
cache_op = (config >> 8) & 0xff;
|
|
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
|
|
return -EINVAL;
|
|
|
|
cache_result = (config >> 16) & 0xff;
|
|
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
|
return -EINVAL;
|
|
|
|
ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
|
|
|
|
if (ret == CACHE_OP_UNSUPPORTED)
|
|
return -ENOENT;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int
|
|
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
|
|
{
|
|
int mapping;
|
|
|
|
if (config >= PERF_COUNT_HW_MAX)
|
|
return -EINVAL;
|
|
|
|
mapping = (*event_map)[config];
|
|
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
|
|
}
|
|
|
|
static int
|
|
armpmu_map_raw_event(u32 raw_event_mask, u64 config)
|
|
{
|
|
return (int)(config & raw_event_mask);
|
|
}
|
|
|
|
int
|
|
armpmu_map_event(struct perf_event *event,
|
|
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
|
|
const unsigned (*cache_map)
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX],
|
|
u32 raw_event_mask)
|
|
{
|
|
u64 config = event->attr.config;
|
|
int type = event->attr.type;
|
|
|
|
if (type == event->pmu->type)
|
|
return armpmu_map_raw_event(raw_event_mask, config);
|
|
|
|
switch (type) {
|
|
case PERF_TYPE_HARDWARE:
|
|
return armpmu_map_hw_event(event_map, config);
|
|
case PERF_TYPE_HW_CACHE:
|
|
return armpmu_map_cache_event(cache_map, config);
|
|
case PERF_TYPE_RAW:
|
|
return armpmu_map_raw_event(raw_event_mask, config);
|
|
}
|
|
|
|
return -ENOENT;
|
|
}
|
|
|
|
int armpmu_event_set_period(struct perf_event *event)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
s64 left = local64_read(&hwc->period_left);
|
|
s64 period = hwc->sample_period;
|
|
int ret = 0;
|
|
|
|
if (unlikely(left <= -period)) {
|
|
left = period;
|
|
local64_set(&hwc->period_left, left);
|
|
hwc->last_period = period;
|
|
ret = 1;
|
|
}
|
|
|
|
if (unlikely(left <= 0)) {
|
|
left += period;
|
|
local64_set(&hwc->period_left, left);
|
|
hwc->last_period = period;
|
|
ret = 1;
|
|
}
|
|
|
|
/*
|
|
* Limit the maximum period to prevent the counter value
|
|
* from overtaking the one we are about to program. In
|
|
* effect we are reducing max_period to account for
|
|
* interrupt latency (and we are being very conservative).
|
|
*/
|
|
if (left > (armpmu->max_period >> 1))
|
|
left = armpmu->max_period >> 1;
|
|
|
|
local64_set(&hwc->prev_count, (u64)-left);
|
|
|
|
armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
|
|
|
|
perf_event_update_userpage(event);
|
|
|
|
return ret;
|
|
}
|
|
|
|
u64 armpmu_event_update(struct perf_event *event)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
u64 delta, prev_raw_count, new_raw_count;
|
|
|
|
again:
|
|
prev_raw_count = local64_read(&hwc->prev_count);
|
|
new_raw_count = armpmu->read_counter(event);
|
|
|
|
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
|
new_raw_count) != prev_raw_count)
|
|
goto again;
|
|
|
|
delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
|
|
|
|
local64_add(delta, &event->count);
|
|
local64_sub(delta, &hwc->period_left);
|
|
|
|
return new_raw_count;
|
|
}
|
|
|
|
static void
|
|
armpmu_read(struct perf_event *event)
|
|
{
|
|
armpmu_event_update(event);
|
|
}
|
|
|
|
static void
|
|
armpmu_stop(struct perf_event *event, int flags)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
/*
|
|
* ARM pmu always has to update the counter, so ignore
|
|
* PERF_EF_UPDATE, see comments in armpmu_start().
|
|
*/
|
|
if (!(hwc->state & PERF_HES_STOPPED)) {
|
|
armpmu->disable(event);
|
|
armpmu_event_update(event);
|
|
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
|
}
|
|
}
|
|
|
|
static void armpmu_start(struct perf_event *event, int flags)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
/*
|
|
* ARM pmu always has to reprogram the period, so ignore
|
|
* PERF_EF_RELOAD, see the comment below.
|
|
*/
|
|
if (flags & PERF_EF_RELOAD)
|
|
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
|
|
|
|
hwc->state = 0;
|
|
/*
|
|
* Set the period again. Some counters can't be stopped, so when we
|
|
* were stopped we simply disabled the IRQ source and the counter
|
|
* may have been left counting. If we don't do this step then we may
|
|
* get an interrupt too soon or *way* too late if the overflow has
|
|
* happened since disabling.
|
|
*/
|
|
armpmu_event_set_period(event);
|
|
armpmu->enable(event);
|
|
}
|
|
|
|
static void
|
|
armpmu_del(struct perf_event *event, int flags)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
int idx = hwc->idx;
|
|
|
|
armpmu_stop(event, PERF_EF_UPDATE);
|
|
hw_events->events[idx] = NULL;
|
|
clear_bit(idx, hw_events->used_mask);
|
|
if (armpmu->clear_event_idx)
|
|
armpmu->clear_event_idx(hw_events, event);
|
|
|
|
perf_event_update_userpage(event);
|
|
}
|
|
|
|
static int
|
|
armpmu_add(struct perf_event *event, int flags)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
int idx;
|
|
int err = 0;
|
|
|
|
/* An event following a process won't be stopped earlier */
|
|
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
|
|
return -ENOENT;
|
|
|
|
perf_pmu_disable(event->pmu);
|
|
|
|
/* If we don't have a space for the counter then finish early. */
|
|
idx = armpmu->get_event_idx(hw_events, event);
|
|
if (idx < 0) {
|
|
err = idx;
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* If there is an event in the counter we are going to use then make
|
|
* sure it is disabled.
|
|
*/
|
|
event->hw.idx = idx;
|
|
armpmu->disable(event);
|
|
hw_events->events[idx] = event;
|
|
|
|
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
|
if (flags & PERF_EF_START)
|
|
armpmu_start(event, PERF_EF_RELOAD);
|
|
|
|
/* Propagate our changes to the userspace mapping. */
|
|
perf_event_update_userpage(event);
|
|
|
|
out:
|
|
perf_pmu_enable(event->pmu);
|
|
return err;
|
|
}
|
|
|
|
static int
|
|
validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
|
|
struct perf_event *event)
|
|
{
|
|
struct arm_pmu *armpmu;
|
|
|
|
if (is_software_event(event))
|
|
return 1;
|
|
|
|
/*
|
|
* Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
|
|
* core perf code won't check that the pmu->ctx == leader->ctx
|
|
* until after pmu->event_init(event).
|
|
*/
|
|
if (event->pmu != pmu)
|
|
return 0;
|
|
|
|
if (event->state < PERF_EVENT_STATE_OFF)
|
|
return 1;
|
|
|
|
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
|
|
return 1;
|
|
|
|
armpmu = to_arm_pmu(event->pmu);
|
|
return armpmu->get_event_idx(hw_events, event) >= 0;
|
|
}
|
|
|
|
static int
|
|
validate_group(struct perf_event *event)
|
|
{
|
|
struct perf_event *sibling, *leader = event->group_leader;
|
|
struct pmu_hw_events fake_pmu;
|
|
|
|
/*
|
|
* Initialise the fake PMU. We only need to populate the
|
|
* used_mask for the purposes of validation.
|
|
*/
|
|
memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
|
|
|
|
if (!validate_event(event->pmu, &fake_pmu, leader))
|
|
return -EINVAL;
|
|
|
|
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
|
|
if (!validate_event(event->pmu, &fake_pmu, sibling))
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (!validate_event(event->pmu, &fake_pmu, event))
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
|
|
{
|
|
struct platform_device *pdev = armpmu->plat_device;
|
|
|
|
return pdev ? dev_get_platdata(&pdev->dev) : NULL;
|
|
}
|
|
|
|
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
|
|
{
|
|
struct arm_pmu *armpmu;
|
|
struct arm_pmu_platdata *plat;
|
|
int ret;
|
|
u64 start_clock, finish_clock;
|
|
|
|
/*
|
|
* we request the IRQ with a (possibly percpu) struct arm_pmu**, but
|
|
* the handlers expect a struct arm_pmu*. The percpu_irq framework will
|
|
* do any necessary shifting, we just need to perform the first
|
|
* dereference.
|
|
*/
|
|
armpmu = *(void **)dev;
|
|
|
|
plat = armpmu_get_platdata(armpmu);
|
|
|
|
start_clock = sched_clock();
|
|
if (plat && plat->handle_irq)
|
|
ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
|
|
else
|
|
ret = armpmu->handle_irq(irq, armpmu);
|
|
finish_clock = sched_clock();
|
|
|
|
perf_sample_event_took(finish_clock - start_clock);
|
|
return ret;
|
|
}
|
|
|
|
static void
|
|
armpmu_release_hardware(struct arm_pmu *armpmu)
|
|
{
|
|
armpmu->free_irq(armpmu);
|
|
}
|
|
|
|
static int
|
|
armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
|
{
|
|
int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
|
|
if (err) {
|
|
armpmu_release_hardware(armpmu);
|
|
return err;
|
|
}
|
|
|
|
armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
hw_perf_event_destroy(struct perf_event *event)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
atomic_t *active_events = &armpmu->active_events;
|
|
struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
|
|
|
|
if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
|
|
armpmu_release_hardware(armpmu);
|
|
mutex_unlock(pmu_reserve_mutex);
|
|
}
|
|
}
|
|
|
|
static int
|
|
event_requires_mode_exclusion(struct perf_event_attr *attr)
|
|
{
|
|
return attr->exclude_idle || attr->exclude_user ||
|
|
attr->exclude_kernel || attr->exclude_hv;
|
|
}
|
|
|
|
static int
|
|
__hw_perf_event_init(struct perf_event *event)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
int mapping;
|
|
|
|
mapping = armpmu->map_event(event);
|
|
|
|
if (mapping < 0) {
|
|
pr_debug("event %x:%llx not supported\n", event->attr.type,
|
|
event->attr.config);
|
|
return mapping;
|
|
}
|
|
|
|
/*
|
|
* We don't assign an index until we actually place the event onto
|
|
* hardware. Use -1 to signify that we haven't decided where to put it
|
|
* yet. For SMP systems, each core has it's own PMU so we can't do any
|
|
* clever allocation or constraints checking at this point.
|
|
*/
|
|
hwc->idx = -1;
|
|
hwc->config_base = 0;
|
|
hwc->config = 0;
|
|
hwc->event_base = 0;
|
|
|
|
/*
|
|
* Check whether we need to exclude the counter from certain modes.
|
|
*/
|
|
if ((!armpmu->set_event_filter ||
|
|
armpmu->set_event_filter(hwc, &event->attr)) &&
|
|
event_requires_mode_exclusion(&event->attr)) {
|
|
pr_debug("ARM performance counters do not support "
|
|
"mode exclusion\n");
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
/*
|
|
* Store the event encoding into the config_base field.
|
|
*/
|
|
hwc->config_base |= (unsigned long)mapping;
|
|
|
|
if (!is_sampling_event(event)) {
|
|
/*
|
|
* For non-sampling runs, limit the sample_period to half
|
|
* of the counter width. That way, the new counter value
|
|
* is far less likely to overtake the previous one unless
|
|
* you have some serious IRQ latency issues.
|
|
*/
|
|
hwc->sample_period = armpmu->max_period >> 1;
|
|
hwc->last_period = hwc->sample_period;
|
|
local64_set(&hwc->period_left, hwc->sample_period);
|
|
}
|
|
|
|
if (event->group_leader != event) {
|
|
if (validate_group(event) != 0)
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int armpmu_event_init(struct perf_event *event)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
int err = 0;
|
|
atomic_t *active_events = &armpmu->active_events;
|
|
|
|
/*
|
|
* Reject CPU-affine events for CPUs that are of a different class to
|
|
* that which this PMU handles. Process-following events (where
|
|
* event->cpu == -1) can be migrated between CPUs, and thus we have to
|
|
* reject them later (in armpmu_add) if they're scheduled on a
|
|
* different class of CPU.
|
|
*/
|
|
if (event->cpu != -1 &&
|
|
!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
|
|
return -ENOENT;
|
|
|
|
/* does not support taken branch sampling */
|
|
if (has_branch_stack(event))
|
|
return -EOPNOTSUPP;
|
|
|
|
if (armpmu->map_event(event) == -ENOENT)
|
|
return -ENOENT;
|
|
|
|
event->destroy = hw_perf_event_destroy;
|
|
|
|
if (!atomic_inc_not_zero(active_events)) {
|
|
mutex_lock(&armpmu->reserve_mutex);
|
|
if (atomic_read(active_events) == 0)
|
|
err = armpmu_reserve_hardware(armpmu);
|
|
|
|
if (!err)
|
|
atomic_inc(active_events);
|
|
mutex_unlock(&armpmu->reserve_mutex);
|
|
}
|
|
|
|
if (err)
|
|
return err;
|
|
|
|
err = __hw_perf_event_init(event);
|
|
if (err)
|
|
hw_perf_event_destroy(event);
|
|
|
|
return err;
|
|
}
|
|
|
|
static void armpmu_enable(struct pmu *pmu)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(pmu);
|
|
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
|
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
|
|
|
|
/* For task-bound events we may be called on other CPUs */
|
|
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
|
|
return;
|
|
|
|
if (enabled)
|
|
armpmu->start(armpmu);
|
|
}
|
|
|
|
static void armpmu_disable(struct pmu *pmu)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(pmu);
|
|
|
|
/* For task-bound events we may be called on other CPUs */
|
|
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
|
|
return;
|
|
|
|
armpmu->stop(armpmu);
|
|
}
|
|
|
|
/*
|
|
* In heterogeneous systems, events are specific to a particular
|
|
* microarchitecture, and aren't suitable for another. Thus, only match CPUs of
|
|
* the same microarchitecture.
|
|
*/
|
|
static int armpmu_filter_match(struct perf_event *event)
|
|
{
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
unsigned int cpu = smp_processor_id();
|
|
return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
|
|
}
|
|
|
|
static void armpmu_init(struct arm_pmu *armpmu)
|
|
{
|
|
atomic_set(&armpmu->active_events, 0);
|
|
mutex_init(&armpmu->reserve_mutex);
|
|
|
|
armpmu->pmu = (struct pmu) {
|
|
.pmu_enable = armpmu_enable,
|
|
.pmu_disable = armpmu_disable,
|
|
.event_init = armpmu_event_init,
|
|
.add = armpmu_add,
|
|
.del = armpmu_del,
|
|
.start = armpmu_start,
|
|
.stop = armpmu_stop,
|
|
.read = armpmu_read,
|
|
.filter_match = armpmu_filter_match,
|
|
.events_across_hotplug = 1,
|
|
};
|
|
}
|
|
|
|
/* Set at runtime when we know what CPU type we are. */
|
|
static struct arm_pmu *__oprofile_cpu_pmu;
|
|
|
|
/*
|
|
* Despite the names, these two functions are CPU-specific and are used
|
|
* by the OProfile/perf code.
|
|
*/
|
|
const char *perf_pmu_name(void)
|
|
{
|
|
if (!__oprofile_cpu_pmu)
|
|
return NULL;
|
|
|
|
return __oprofile_cpu_pmu->name;
|
|
}
|
|
EXPORT_SYMBOL_GPL(perf_pmu_name);
|
|
|
|
int perf_num_counters(void)
|
|
{
|
|
int max_events = 0;
|
|
|
|
if (__oprofile_cpu_pmu != NULL)
|
|
max_events = __oprofile_cpu_pmu->num_events;
|
|
|
|
return max_events;
|
|
}
|
|
EXPORT_SYMBOL_GPL(perf_num_counters);
|
|
|
|
static void cpu_pmu_enable_percpu_irq(void *data)
|
|
{
|
|
int irq = *(int *)data;
|
|
|
|
enable_percpu_irq(irq, IRQ_TYPE_NONE);
|
|
}
|
|
|
|
static void cpu_pmu_disable_percpu_irq(void *data)
|
|
{
|
|
int irq = *(int *)data;
|
|
|
|
disable_percpu_irq(irq);
|
|
}
|
|
|
|
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
|
{
|
|
int i, irq, irqs;
|
|
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
|
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
|
|
|
cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
|
|
|
|
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
|
|
|
irq = platform_get_irq(pmu_device, 0);
|
|
if (irq > 0 && irq_is_percpu(irq)) {
|
|
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
|
|
free_percpu_irq(irq, &hw_events->percpu_pmu);
|
|
} else {
|
|
for (i = 0; i < irqs; ++i) {
|
|
int cpu = i;
|
|
|
|
if (cpu_pmu->irq_affinity)
|
|
cpu = cpu_pmu->irq_affinity[i];
|
|
|
|
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
|
|
continue;
|
|
irq = platform_get_irq(pmu_device, i);
|
|
if (irq > 0)
|
|
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
|
}
|
|
}
|
|
cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
|
|
}
|
|
|
|
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
|
{
|
|
int i, err, irq, irqs;
|
|
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
|
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
|
|
|
if (!pmu_device)
|
|
return -ENODEV;
|
|
|
|
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
|
if (irqs < 1) {
|
|
pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
|
|
return 0;
|
|
}
|
|
|
|
irq = platform_get_irq(pmu_device, 0);
|
|
if (irq > 0 && irq_is_percpu(irq)) {
|
|
err = request_percpu_irq(irq, handler, "arm-pmu",
|
|
&hw_events->percpu_pmu);
|
|
if (err) {
|
|
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
|
irq);
|
|
return err;
|
|
}
|
|
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
|
|
cpu_pmu->percpu_irq = irq;
|
|
} else {
|
|
for (i = 0; i < irqs; ++i) {
|
|
int cpu = i;
|
|
|
|
err = 0;
|
|
irq = platform_get_irq(pmu_device, i);
|
|
if (irq < 0)
|
|
continue;
|
|
|
|
if (cpu_pmu->irq_affinity)
|
|
cpu = cpu_pmu->irq_affinity[i];
|
|
|
|
/*
|
|
* If we have a single PMU interrupt that we can't shift,
|
|
* assume that we're running on a uniprocessor machine and
|
|
* continue. Otherwise, continue without this interrupt.
|
|
*/
|
|
if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
|
|
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
|
|
irq, cpu);
|
|
continue;
|
|
}
|
|
|
|
err = request_irq(irq, handler,
|
|
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
|
|
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
|
if (err) {
|
|
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
|
irq);
|
|
return err;
|
|
}
|
|
|
|
cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct cpu_pm_pmu_args {
|
|
struct arm_pmu *armpmu;
|
|
unsigned long cmd;
|
|
int cpu;
|
|
int ret;
|
|
};
|
|
|
|
#ifdef CONFIG_CPU_PM
|
|
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
|
|
{
|
|
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
|
struct perf_event *event;
|
|
int idx;
|
|
|
|
for (idx = 0; idx < armpmu->num_events; idx++) {
|
|
/*
|
|
* If the counter is not used skip it, there is no
|
|
* need of stopping/restarting it.
|
|
*/
|
|
if (!test_bit(idx, hw_events->used_mask))
|
|
continue;
|
|
|
|
event = hw_events->events[idx];
|
|
|
|
switch (cmd) {
|
|
case CPU_PM_ENTER:
|
|
/*
|
|
* Stop and update the counter
|
|
*/
|
|
armpmu_stop(event, PERF_EF_UPDATE);
|
|
break;
|
|
case CPU_PM_EXIT:
|
|
case CPU_PM_ENTER_FAILED:
|
|
/*
|
|
* Restore and enable the counter.
|
|
* armpmu_start() indirectly calls
|
|
*
|
|
* perf_event_update_userpage()
|
|
*
|
|
* that requires RCU read locking to be functional,
|
|
* wrap the call within RCU_NONIDLE to make the
|
|
* RCU subsystem aware this cpu is not idle from
|
|
* an RCU perspective for the armpmu_start() call
|
|
* duration.
|
|
*/
|
|
RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
static void cpu_pm_pmu_common(void *info)
|
|
{
|
|
struct cpu_pm_pmu_args *data = info;
|
|
struct arm_pmu *armpmu = data->armpmu;
|
|
unsigned long cmd = data->cmd;
|
|
int cpu = data->cpu;
|
|
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
|
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
|
|
|
|
if (!cpumask_test_cpu(cpu, &armpmu->supported_cpus)) {
|
|
data->ret = NOTIFY_DONE;
|
|
return;
|
|
}
|
|
|
|
if (!enabled) {
|
|
data->ret = NOTIFY_OK;
|
|
return;
|
|
}
|
|
|
|
data->ret = NOTIFY_OK;
|
|
|
|
switch (cmd) {
|
|
case CPU_PM_ENTER:
|
|
armpmu->stop(armpmu);
|
|
cpu_pm_pmu_setup(armpmu, cmd);
|
|
break;
|
|
case CPU_PM_EXIT:
|
|
case CPU_PM_ENTER_FAILED:
|
|
cpu_pm_pmu_setup(armpmu, cmd);
|
|
armpmu->start(armpmu);
|
|
break;
|
|
default:
|
|
data->ret = NOTIFY_DONE;
|
|
break;
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
|
|
void *v)
|
|
{
|
|
struct cpu_pm_pmu_args data = {
|
|
.armpmu = container_of(b, struct arm_pmu, cpu_pm_nb),
|
|
.cmd = cmd,
|
|
.cpu = smp_processor_id(),
|
|
};
|
|
|
|
/*
|
|
* Always reset the PMU registers on power-up even if
|
|
* there are no events running.
|
|
*/
|
|
if (cmd == CPU_PM_EXIT && data.armpmu->reset)
|
|
data.armpmu->reset(data.armpmu);
|
|
|
|
cpu_pm_pmu_common(&data);
|
|
return data.ret;
|
|
}
|
|
|
|
static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
|
|
{
|
|
cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
|
|
return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
|
|
}
|
|
|
|
static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
|
|
{
|
|
cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
|
|
}
|
|
#else
|
|
static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
|
|
static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
|
|
static inline void cpu_pm_pmu_common(void *info) { }
|
|
#endif
|
|
|
|
/*
|
|
* PMU hardware loses all context when a CPU goes offline.
|
|
* When a CPU is hotplugged back in, since some hardware registers are
|
|
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
|
|
* junk values out of them.
|
|
*/
|
|
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
|
void *hcpu)
|
|
{
|
|
int irq = -1;
|
|
unsigned long masked_action = (action & ~CPU_TASKS_FROZEN);
|
|
struct cpu_pm_pmu_args data = {
|
|
.armpmu = container_of(b, struct arm_pmu, hotplug_nb),
|
|
.cpu = (unsigned long)hcpu,
|
|
};
|
|
|
|
if (!cpumask_test_cpu(data.cpu, &data.armpmu->supported_cpus))
|
|
return NOTIFY_DONE;
|
|
|
|
switch (masked_action) {
|
|
case CPU_STARTING:
|
|
case CPU_DOWN_FAILED:
|
|
/*
|
|
* Always reset the PMU registers on power-up even if
|
|
* there are no events running.
|
|
*/
|
|
if (data.armpmu->reset)
|
|
data.armpmu->reset(data.armpmu);
|
|
if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING) {
|
|
if (data.armpmu->plat_device)
|
|
irq = data.armpmu->percpu_irq;
|
|
/* Arm the PMU IRQ before appearing. */
|
|
if (irq > 0 && irq_is_percpu(irq))
|
|
cpu_pmu_enable_percpu_irq(&irq);
|
|
data.cmd = CPU_PM_EXIT;
|
|
cpu_pm_pmu_common(&data);
|
|
}
|
|
return NOTIFY_OK;
|
|
case CPU_DYING:
|
|
if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF) {
|
|
data.cmd = CPU_PM_ENTER;
|
|
cpu_pm_pmu_common(&data);
|
|
/* Disarm the PMU IRQ before disappearing. */
|
|
if (data.armpmu->plat_device)
|
|
irq = data.armpmu->percpu_irq;
|
|
if (irq > 0 && irq_is_percpu(irq))
|
|
cpu_pmu_disable_percpu_irq(&irq);
|
|
}
|
|
return NOTIFY_OK;
|
|
default:
|
|
return NOTIFY_DONE;
|
|
}
|
|
}
|
|
|
|
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
|
{
|
|
int err;
|
|
int cpu;
|
|
struct pmu_hw_events __percpu *cpu_hw_events;
|
|
|
|
cpu_hw_events = alloc_percpu(struct pmu_hw_events);
|
|
if (!cpu_hw_events)
|
|
return -ENOMEM;
|
|
|
|
cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
|
|
err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
|
|
if (err)
|
|
goto out_hw_events;
|
|
|
|
err = cpu_pm_pmu_register(cpu_pmu);
|
|
if (err)
|
|
goto out_unregister;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
|
|
raw_spin_lock_init(&events->pmu_lock);
|
|
events->percpu_pmu = cpu_pmu;
|
|
}
|
|
|
|
cpu_pmu->hw_events = cpu_hw_events;
|
|
cpu_pmu->request_irq = cpu_pmu_request_irq;
|
|
cpu_pmu->free_irq = cpu_pmu_free_irq;
|
|
|
|
/* Ensure the PMU has sane values out of reset. */
|
|
if (cpu_pmu->reset)
|
|
on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
|
|
cpu_pmu, 1);
|
|
|
|
/* If no interrupts available, set the corresponding capability flag */
|
|
if (!platform_get_irq(cpu_pmu->plat_device, 0))
|
|
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
|
|
|
return 0;
|
|
|
|
out_unregister:
|
|
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
|
out_hw_events:
|
|
free_percpu(cpu_hw_events);
|
|
return err;
|
|
}
|
|
|
|
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
|
|
{
|
|
cpu_pm_pmu_unregister(cpu_pmu);
|
|
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
|
free_percpu(cpu_pmu->hw_events);
|
|
}
|
|
|
|
/*
|
|
* CPU PMU identification and probing.
|
|
*/
|
|
static int probe_current_pmu(struct arm_pmu *pmu,
|
|
const struct pmu_probe_info *info)
|
|
{
|
|
int cpu = get_cpu();
|
|
unsigned int cpuid = read_cpuid_id();
|
|
int ret = -ENODEV;
|
|
|
|
pr_info("probing PMU on CPU %d\n", cpu);
|
|
|
|
for (; info->init != NULL; info++) {
|
|
if ((cpuid & info->mask) != info->cpuid)
|
|
continue;
|
|
ret = info->init(pmu);
|
|
break;
|
|
}
|
|
|
|
put_cpu();
|
|
return ret;
|
|
}
|
|
|
|
static int of_pmu_irq_cfg(struct arm_pmu *pmu)
|
|
{
|
|
int *irqs, i = 0;
|
|
bool using_spi = false;
|
|
struct platform_device *pdev = pmu->plat_device;
|
|
|
|
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
|
if (!irqs)
|
|
return -ENOMEM;
|
|
|
|
do {
|
|
struct device_node *dn;
|
|
int cpu, irq;
|
|
|
|
/* See if we have an affinity entry */
|
|
dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
|
|
if (!dn)
|
|
break;
|
|
|
|
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
|
|
irq = platform_get_irq(pdev, i);
|
|
if (irq > 0) {
|
|
bool spi = !irq_is_percpu(irq);
|
|
|
|
if (i > 0 && spi != using_spi) {
|
|
pr_err("PPI/SPI IRQ type mismatch for %s!\n",
|
|
dn->name);
|
|
of_node_put(dn);
|
|
kfree(irqs);
|
|
return -EINVAL;
|
|
}
|
|
|
|
using_spi = spi;
|
|
}
|
|
|
|
/* Now look up the logical CPU number */
|
|
for_each_possible_cpu(cpu) {
|
|
struct device_node *cpu_dn;
|
|
|
|
cpu_dn = of_cpu_device_node_get(cpu);
|
|
of_node_put(cpu_dn);
|
|
|
|
if (dn == cpu_dn)
|
|
break;
|
|
}
|
|
|
|
if (cpu >= nr_cpu_ids) {
|
|
pr_warn("Failed to find logical CPU for %s\n",
|
|
dn->name);
|
|
of_node_put(dn);
|
|
cpumask_setall(&pmu->supported_cpus);
|
|
break;
|
|
}
|
|
of_node_put(dn);
|
|
|
|
/* For SPIs, we need to track the affinity per IRQ */
|
|
if (using_spi) {
|
|
if (i >= pdev->num_resources) {
|
|
of_node_put(dn);
|
|
break;
|
|
}
|
|
|
|
irqs[i] = cpu;
|
|
}
|
|
|
|
/* Keep track of the CPUs containing this PMU type */
|
|
cpumask_set_cpu(cpu, &pmu->supported_cpus);
|
|
of_node_put(dn);
|
|
i++;
|
|
} while (1);
|
|
|
|
/* If we didn't manage to parse anything, claim to support all CPUs */
|
|
if (cpumask_weight(&pmu->supported_cpus) == 0)
|
|
cpumask_setall(&pmu->supported_cpus);
|
|
|
|
/* If we matched up the IRQ affinities, use them to route the SPIs */
|
|
if (using_spi && i == pdev->num_resources)
|
|
pmu->irq_affinity = irqs;
|
|
else
|
|
kfree(irqs);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int arm_pmu_device_probe(struct platform_device *pdev,
|
|
const struct of_device_id *of_table,
|
|
const struct pmu_probe_info *probe_table)
|
|
{
|
|
const struct of_device_id *of_id;
|
|
const int (*init_fn)(struct arm_pmu *);
|
|
struct device_node *node = pdev->dev.of_node;
|
|
struct arm_pmu *pmu;
|
|
int ret = -ENODEV;
|
|
|
|
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
|
|
if (!pmu) {
|
|
pr_info("failed to allocate PMU device!\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
armpmu_init(pmu);
|
|
|
|
if (!__oprofile_cpu_pmu)
|
|
__oprofile_cpu_pmu = pmu;
|
|
|
|
pmu->plat_device = pdev;
|
|
|
|
ret = cpu_pmu_init(pmu);
|
|
if (ret)
|
|
goto out_free;
|
|
|
|
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
|
|
init_fn = of_id->data;
|
|
|
|
pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
|
|
"secure-reg-access");
|
|
|
|
/* arm64 systems boot only as non-secure */
|
|
if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
|
|
pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
|
|
pmu->secure_access = false;
|
|
}
|
|
|
|
ret = of_pmu_irq_cfg(pmu);
|
|
if (!ret)
|
|
ret = init_fn(pmu);
|
|
} else {
|
|
ret = probe_current_pmu(pmu, probe_table);
|
|
cpumask_setall(&pmu->supported_cpus);
|
|
}
|
|
|
|
if (ret) {
|
|
pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
|
|
goto out_destroy;
|
|
}
|
|
|
|
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
|
|
if (ret)
|
|
goto out_destroy;
|
|
|
|
pmu->pmu_state = ARM_PMU_STATE_OFF;
|
|
pmu->percpu_irq = -1;
|
|
|
|
pr_info("enabled with %s PMU driver, %d counters available\n",
|
|
pmu->name, pmu->num_events);
|
|
|
|
return 0;
|
|
|
|
out_destroy:
|
|
cpu_pmu_destroy(pmu);
|
|
out_free:
|
|
pr_info("%s: failed to register PMU devices!\n",
|
|
of_node_full_name(node));
|
|
kfree(pmu);
|
|
return ret;
|
|
}
|
|
|
|
static struct dentry *perf_debug_dir;
|
|
|
|
struct dentry *perf_create_debug_dir(void)
|
|
{
|
|
if (!perf_debug_dir)
|
|
perf_debug_dir = debugfs_create_dir("msm_perf", NULL);
|
|
return perf_debug_dir;
|
|
}
|
|
|
|
#ifdef CONFIG_PERF_EVENTS_RESET_PMU_DEBUGFS
|
|
static __ref void reset_pmu_force(void)
|
|
{
|
|
int cpu, ret;
|
|
u32 save_online_mask = 0;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
if (!cpu_online(cpu)) {
|
|
save_online_mask |= BIT(cpu);
|
|
ret = cpu_up(cpu);
|
|
if (ret)
|
|
pr_err("Failed to bring up CPU: %d, ret: %d\n",
|
|
cpu, ret);
|
|
}
|
|
}
|
|
if (cpu_pmu && cpu_pmu->reset)
|
|
on_each_cpu(cpu_pmu->reset, NULL, 1);
|
|
if (cpu_pmu && cpu_pmu->plat_device)
|
|
armpmu_release_hardware(cpu_pmu);
|
|
for_each_possible_cpu(cpu) {
|
|
if ((save_online_mask & BIT(cpu)) && cpu_online(cpu)) {
|
|
ret = cpu_down(cpu);
|
|
if (ret)
|
|
pr_err("Failed to bring down CPU: %d, ret: %d\n",
|
|
cpu, ret);
|
|
}
|
|
}
|
|
}
|
|
|
|
static int write_enabled_perfpmu_action(void *data, u64 val)
|
|
{
|
|
if (val != 0)
|
|
reset_pmu_force();
|
|
return 0;
|
|
}
|
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(fops_pmuaction,
|
|
NULL, write_enabled_perfpmu_action, "%llu\n");
|
|
|
|
int __init init_pmu_actions(void)
|
|
{
|
|
struct dentry *dir;
|
|
struct dentry *file;
|
|
unsigned int value = 1;
|
|
|
|
dir = perf_create_debug_dir();
|
|
if (!dir)
|
|
return -ENOMEM;
|
|
file = debugfs_create_file("resetpmu", 0220, dir,
|
|
&value, &fops_pmuaction);
|
|
if (!file)
|
|
return -ENOMEM;
|
|
return 0;
|
|
}
|
|
#else
|
|
int __init init_pmu_actions(void)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
late_initcall(init_pmu_actions);
|