Merge "sched: handle frequency alert notifications better"
This commit is contained in:
commit
1b7819036e
22 changed files with 5404 additions and 6614 deletions
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
560
arch/arm/configs/msmcortex_defconfig
Normal file
560
arch/arm/configs/msmcortex_defconfig
Normal file
|
@ -0,0 +1,560 @@
|
|||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_IRQ_TIME_ACCOUNTING=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_DEBUG=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_SCHED_HMP=y
|
||||
CONFIG_NAMESPACES=y
|
||||
# CONFIG_UTS_NS is not set
|
||||
# CONFIG_PID_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_RD_XZ is not set
|
||||
# CONFIG_RD_LZO is not set
|
||||
# CONFIG_RD_LZ4 is not set
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
# CONFIG_MEMBARRIER is not set
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_CC_STACKPROTECTOR_REGULAR=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_MODULE_FORCE_UNLOAD=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SIG=y
|
||||
CONFIG_MODULE_SIG_SHA512=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
CONFIG_ARCH_QCOM=y
|
||||
CONFIG_ARCH_MSMFALCON=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_SCHED_MC=y
|
||||
CONFIG_NR_CPUS=8
|
||||
CONFIG_ARM_PSCI=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_AEABI=y
|
||||
CONFIG_HIGHMEM=y
|
||||
# CONFIG_HIGHPTE is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_SECCOMP=y
|
||||
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
|
||||
# CONFIG_CPU_FREQ_STAT is not set
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||||
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||||
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
|
||||
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
|
||||
CONFIG_CPU_IDLE=y
|
||||
CONFIG_VFP=y
|
||||
CONFIG_NEON=y
|
||||
CONFIG_KERNEL_MODE_NEON=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
CONFIG_PM_AUTOSLEEP=y
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_PM_DEBUG=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
CONFIG_XFRM_USER=y
|
||||
CONFIG_XFRM_STATISTICS=y
|
||||
CONFIG_NET_KEY=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
CONFIG_IP_MULTIPLE_TABLES=y
|
||||
CONFIG_IP_ROUTE_VERBOSE=y
|
||||
CONFIG_IP_PNP=y
|
||||
CONFIG_IP_PNP_DHCP=y
|
||||
CONFIG_INET_AH=y
|
||||
CONFIG_INET_ESP=y
|
||||
CONFIG_INET_IPCOMP=y
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_IPV6_ROUTE_INFO=y
|
||||
CONFIG_IPV6_OPTIMISTIC_DAD=y
|
||||
CONFIG_INET6_AH=y
|
||||
CONFIG_INET6_ESP=y
|
||||
CONFIG_INET6_IPCOMP=y
|
||||
CONFIG_IPV6_MIP6=y
|
||||
CONFIG_IPV6_MULTIPLE_TABLES=y
|
||||
CONFIG_IPV6_SUBTREES=y
|
||||
CONFIG_NETFILTER=y
|
||||
CONFIG_NF_CONNTRACK=y
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CT_PROTO_DCCP=y
|
||||
CONFIG_NF_CT_PROTO_SCTP=y
|
||||
CONFIG_NF_CT_PROTO_UDPLITE=y
|
||||
CONFIG_NF_CONNTRACK_AMANDA=y
|
||||
CONFIG_NF_CONNTRACK_FTP=y
|
||||
CONFIG_NF_CONNTRACK_H323=y
|
||||
CONFIG_NF_CONNTRACK_IRC=y
|
||||
CONFIG_NF_CONNTRACK_NETBIOS_NS=y
|
||||
CONFIG_NF_CONNTRACK_PPTP=y
|
||||
CONFIG_NF_CONNTRACK_SANE=y
|
||||
CONFIG_NF_CONNTRACK_TFTP=y
|
||||
CONFIG_NF_CT_NETLINK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_LOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_DSCP=y
|
||||
CONFIG_NETFILTER_XT_MATCH_ESP=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
|
||||
# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MAC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_POLICY=y
|
||||
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=y
|
||||
CONFIG_NETFILTER_XT_MATCH_TIME=y
|
||||
CONFIG_NETFILTER_XT_MATCH_U32=y
|
||||
CONFIG_NF_CONNTRACK_IPV4=y
|
||||
CONFIG_IP_NF_IPTABLES=y
|
||||
CONFIG_IP_NF_MATCH_AH=y
|
||||
CONFIG_IP_NF_MATCH_ECN=y
|
||||
CONFIG_IP_NF_MATCH_TTL=y
|
||||
CONFIG_IP_NF_FILTER=y
|
||||
CONFIG_IP_NF_TARGET_REJECT=y
|
||||
CONFIG_IP_NF_NAT=y
|
||||
CONFIG_IP_NF_TARGET_MASQUERADE=y
|
||||
CONFIG_IP_NF_TARGET_NETMAP=y
|
||||
CONFIG_IP_NF_TARGET_REDIRECT=y
|
||||
CONFIG_IP_NF_MANGLE=y
|
||||
CONFIG_IP_NF_RAW=y
|
||||
CONFIG_IP_NF_SECURITY=y
|
||||
CONFIG_IP_NF_ARPTABLES=y
|
||||
CONFIG_IP_NF_ARPFILTER=y
|
||||
CONFIG_IP_NF_ARP_MANGLE=y
|
||||
CONFIG_NF_CONNTRACK_IPV6=y
|
||||
CONFIG_IP6_NF_IPTABLES=y
|
||||
CONFIG_IP6_NF_FILTER=y
|
||||
CONFIG_IP6_NF_TARGET_REJECT=y
|
||||
CONFIG_IP6_NF_MANGLE=y
|
||||
CONFIG_IP6_NF_RAW=y
|
||||
CONFIG_BRIDGE_NF_EBTABLES=y
|
||||
CONFIG_BRIDGE_EBT_BROUTE=y
|
||||
CONFIG_L2TP=y
|
||||
CONFIG_L2TP_DEBUGFS=y
|
||||
CONFIG_L2TP_V3=y
|
||||
CONFIG_L2TP_IP=y
|
||||
CONFIG_L2TP_ETH=y
|
||||
CONFIG_BRIDGE=y
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_HTB=y
|
||||
CONFIG_NET_SCH_PRIO=y
|
||||
CONFIG_NET_SCH_MULTIQ=y
|
||||
CONFIG_NET_SCH_INGRESS=y
|
||||
CONFIG_NET_CLS_FW=y
|
||||
CONFIG_NET_CLS_U32=y
|
||||
CONFIG_CLS_U32_MARK=y
|
||||
CONFIG_NET_CLS_FLOW=y
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_CMP=y
|
||||
CONFIG_NET_EMATCH_NBYTE=y
|
||||
CONFIG_NET_EMATCH_U32=y
|
||||
CONFIG_NET_EMATCH_META=y
|
||||
CONFIG_NET_EMATCH_TEXT=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_ACT_GACT=y
|
||||
CONFIG_NET_ACT_MIRRED=y
|
||||
CONFIG_NET_ACT_SKBEDIT=y
|
||||
CONFIG_DNS_RESOLVER=y
|
||||
CONFIG_RMNET_DATA=y
|
||||
CONFIG_RMNET_DATA_FC=y
|
||||
CONFIG_RMNET_DATA_DEBUG_PKT=y
|
||||
CONFIG_SOCKEV_NLMCAST=y
|
||||
CONFIG_BT=y
|
||||
CONFIG_MSM_BT_POWER=y
|
||||
CONFIG_BTFM_SLIM=y
|
||||
CONFIG_BTFM_SLIM_WCN3990=y
|
||||
CONFIG_CFG80211=y
|
||||
CONFIG_CFG80211_INTERNAL_REGDB=y
|
||||
# CONFIG_CFG80211_CRDA_SUPPORT is not set
|
||||
CONFIG_RFKILL=y
|
||||
CONFIG_NFC_NQ=y
|
||||
CONFIG_IPC_ROUTER=y
|
||||
CONFIG_IPC_ROUTER_SECURITY=y
|
||||
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=40
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_UID_CPUTIME=y
|
||||
CONFIG_MSM_ULTRASOUND=y
|
||||
CONFIG_SCSI=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_CHR_DEV_SG=y
|
||||
CONFIG_CHR_DEV_SCH=y
|
||||
CONFIG_SCSI_CONSTANTS=y
|
||||
CONFIG_SCSI_LOGGING=y
|
||||
CONFIG_SCSI_SCAN_ASYNC=y
|
||||
CONFIG_SCSI_UFSHCD=y
|
||||
CONFIG_SCSI_UFSHCD_PLATFORM=y
|
||||
CONFIG_SCSI_UFS_QCOM=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DM_VERITY=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_BONDING=y
|
||||
CONFIG_DUMMY=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_RNDIS_IPA=y
|
||||
CONFIG_PHYLIB=y
|
||||
CONFIG_PPP=y
|
||||
CONFIG_PPP_BSDCOMP=y
|
||||
CONFIG_PPP_DEFLATE=y
|
||||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PPPOLAC=y
|
||||
CONFIG_PPPOPNS=y
|
||||
CONFIG_WCNSS_MEM_PRE_ALLOC=y
|
||||
CONFIG_ATH_CARDS=y
|
||||
CONFIG_CLD_LL_CORE=y
|
||||
CONFIG_QPNP_POWER_ON=y
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
CONFIG_INPUT_KEYRESET=y
|
||||
CONFIG_KEYBOARD_GPIO=y
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
CONFIG_INPUT_JOYSTICK=y
|
||||
CONFIG_INPUT_TOUCHSCREEN=y
|
||||
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
|
||||
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
|
||||
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
|
||||
CONFIG_INPUT_MISC=y
|
||||
CONFIG_INPUT_HBTP_INPUT=y
|
||||
CONFIG_INPUT_KEYCHORD=y
|
||||
CONFIG_INPUT_UINPUT=y
|
||||
CONFIG_INPUT_GPIO=y
|
||||
# CONFIG_SERIO_SERPORT is not set
|
||||
# CONFIG_VT is not set
|
||||
# CONFIG_LEGACY_PTYS is not set
|
||||
CONFIG_SERIAL_MSM=y
|
||||
CONFIG_SERIAL_MSM_CONSOLE=y
|
||||
CONFIG_SERIAL_MSM_HS=y
|
||||
CONFIG_SERIAL_MSM_SMD=y
|
||||
CONFIG_HW_RANDOM=y
|
||||
CONFIG_HW_RANDOM_MSM_LEGACY=y
|
||||
CONFIG_MSM_ADSPRPC=y
|
||||
CONFIG_I2C_CHARDEV=y
|
||||
CONFIG_I2C_MSM_V2=y
|
||||
CONFIG_SLIMBUS=y
|
||||
CONFIG_SLIMBUS_MSM_NGD=y
|
||||
CONFIG_SOUNDWIRE=y
|
||||
CONFIG_SPI=y
|
||||
CONFIG_SPI_QUP=y
|
||||
CONFIG_SPI_SPIDEV=y
|
||||
CONFIG_SPMI=y
|
||||
CONFIG_PINCTRL_MSMCOBALT=y
|
||||
CONFIG_PINCTRL_MSMFALCON=y
|
||||
CONFIG_GPIO_SYSFS=y
|
||||
CONFIG_GPIO_QPNP_PIN=y
|
||||
CONFIG_APSS_CORE_EA=y
|
||||
CONFIG_MSM_APM=y
|
||||
CONFIG_QPNP_SMBCHARGER=y
|
||||
CONFIG_SMB135X_CHARGER=y
|
||||
CONFIG_SMB1351_USB_CHARGER=y
|
||||
CONFIG_MSM_BCL_CTL=y
|
||||
CONFIG_MSM_BCL_PERIPHERAL_CTL=y
|
||||
CONFIG_QPNP_SMB2=y
|
||||
CONFIG_SMB138X_CHARGER=y
|
||||
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
|
||||
CONFIG_THERMAL=y
|
||||
CONFIG_MFD_SPMI_PMIC=y
|
||||
CONFIG_MFD_I2C_PMIC=y
|
||||
CONFIG_MSM_CDC_PINCTRL=y
|
||||
CONFIG_MSM_CDC_SUPPLY=y
|
||||
CONFIG_REGULATOR=y
|
||||
CONFIG_REGULATOR_FIXED_VOLTAGE=y
|
||||
CONFIG_REGULATOR_RPM_SMD=y
|
||||
CONFIG_REGULATOR_QPNP=y
|
||||
CONFIG_REGULATOR_QPNP_LABIBB=y
|
||||
CONFIG_REGULATOR_SPM=y
|
||||
CONFIG_REGULATOR_CPR3_HMSS=y
|
||||
CONFIG_REGULATOR_CPR3_MMSS=y
|
||||
CONFIG_REGULATOR_CPRH_KBSS=y
|
||||
CONFIG_REGULATOR_MEM_ACC=y
|
||||
CONFIG_REGULATOR_PROXY_CONSUMER=y
|
||||
CONFIG_REGULATOR_STUB=y
|
||||
CONFIG_MEDIA_SUPPORT=y
|
||||
CONFIG_MEDIA_CAMERA_SUPPORT=y
|
||||
CONFIG_MEDIA_CONTROLLER=y
|
||||
CONFIG_VIDEO_V4L2_SUBDEV_API=y
|
||||
CONFIG_VIDEO_ADV_DEBUG=y
|
||||
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
|
||||
CONFIG_V4L_PLATFORM_DRIVERS=y
|
||||
CONFIG_MSM_CAMERA=y
|
||||
CONFIG_MSM_CAMERA_DEBUG=y
|
||||
CONFIG_MSM_SDE_ROTATOR=y
|
||||
CONFIG_QCOM_KGSL=y
|
||||
CONFIG_FB=y
|
||||
CONFIG_FB_VIRTUAL=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_MONO is not set
|
||||
# CONFIG_LOGO_LINUX_VGA16 is not set
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
CONFIG_SND_DYNAMIC_MINORS=y
|
||||
CONFIG_SND_SOC=y
|
||||
CONFIG_UHID=y
|
||||
CONFIG_HID_APPLE=y
|
||||
CONFIG_HID_MICROSOFT=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_XHCI_HCD=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
CONFIG_USB_OHCI_HCD_PLATFORM=y
|
||||
CONFIG_USB_STORAGE=y
|
||||
CONFIG_USB_DWC3=y
|
||||
CONFIG_USB_ISP1760=y
|
||||
CONFIG_USB_ISP1760_HOST_ROLE=y
|
||||
CONFIG_USB_OTG_WAKELOCK=y
|
||||
CONFIG_NOP_USB_XCEIV=y
|
||||
CONFIG_USB_MSM_SSPHY_QMP=y
|
||||
CONFIG_MSM_QUSB_PHY=y
|
||||
CONFIG_DUAL_ROLE_USB_INTF=y
|
||||
CONFIG_USB_GADGET=y
|
||||
CONFIG_USB_GADGET_VBUS_DRAW=500
|
||||
CONFIG_USB_CONFIGFS=y
|
||||
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
|
||||
CONFIG_USB_CONFIGFS_F_FS=y
|
||||
CONFIG_USB_CONFIGFS_F_MTP=y
|
||||
CONFIG_USB_CONFIGFS_F_PTP=y
|
||||
CONFIG_USB_CONFIGFS_F_ACC=y
|
||||
CONFIG_USB_CONFIGFS_UEVENT=y
|
||||
CONFIG_USB_CONFIGFS_F_HID=y
|
||||
CONFIG_USB_CONFIGFS_F_DIAG=y
|
||||
CONFIG_USB_CONFIGFS_F_GSI=y
|
||||
CONFIG_USB_CONFIGFS_F_CDEV=y
|
||||
CONFIG_USB_CONFIGFS_F_QDSS=y
|
||||
CONFIG_MMC=y
|
||||
CONFIG_MMC_PERF_PROFILING=y
|
||||
CONFIG_MMC_PARANOID_SD_INIT=y
|
||||
CONFIG_MMC_CLKGATE=y
|
||||
CONFIG_MMC_BLOCK_MINORS=32
|
||||
CONFIG_MMC_TEST=y
|
||||
CONFIG_MMC_SDHCI=y
|
||||
CONFIG_MMC_SDHCI_PLTFM=y
|
||||
CONFIG_MMC_SDHCI_MSM=y
|
||||
CONFIG_NEW_LEDS=y
|
||||
CONFIG_LEDS_CLASS=y
|
||||
CONFIG_LEDS_QPNP=y
|
||||
CONFIG_LEDS_QPNP_FLASH_V2=y
|
||||
CONFIG_LEDS_QPNP_WLED=y
|
||||
CONFIG_LEDS_TRIGGERS=y
|
||||
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
|
||||
CONFIG_LEDS_TRIGGER_CPU=y
|
||||
CONFIG_SWITCH=y
|
||||
CONFIG_EDAC=y
|
||||
CONFIG_EDAC_MM_EDAC=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_QPNP=y
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_QCOM_SPS_DMA=y
|
||||
CONFIG_UIO=y
|
||||
CONFIG_UIO_MSM_SHAREDMEM=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_ANDROID_TIMED_GPIO=y
|
||||
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
|
||||
CONFIG_SYNC=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_MSM=y
|
||||
CONFIG_QPNP_REVID=y
|
||||
CONFIG_QPNP_COINCELL=y
|
||||
CONFIG_SPS=y
|
||||
CONFIG_SPS_SUPPORT_NDP_BAM=y
|
||||
CONFIG_IPA=y
|
||||
CONFIG_RMNET_IPA=y
|
||||
CONFIG_GSI=y
|
||||
CONFIG_IPA3=y
|
||||
CONFIG_RMNET_IPA3=y
|
||||
CONFIG_GPIO_USB_DETECT=y
|
||||
CONFIG_USB_BAM=y
|
||||
CONFIG_REMOTE_SPINLOCK_MSM=y
|
||||
CONFIG_ARM_SMMU=y
|
||||
CONFIG_QCOM_COMMON_LOG=y
|
||||
CONFIG_MSM_SMEM=y
|
||||
CONFIG_QPNP_HAPTIC=y
|
||||
CONFIG_MSM_SMD=y
|
||||
CONFIG_MSM_SMD_DEBUG=y
|
||||
CONFIG_MSM_GLINK=y
|
||||
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
|
||||
CONFIG_MSM_GLINK_SMD_XPRT=y
|
||||
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
|
||||
CONFIG_MSM_SPCOM=y
|
||||
CONFIG_MSM_SMEM_LOGGING=y
|
||||
CONFIG_MSM_SMP2P=y
|
||||
CONFIG_MSM_SMP2P_TEST=y
|
||||
CONFIG_MSM_QMI_INTERFACE=y
|
||||
CONFIG_QCOM_BUS_SCALING=y
|
||||
CONFIG_MSM_SERVICE_LOCATOR=y
|
||||
CONFIG_QCOM_DCC=y
|
||||
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
|
||||
CONFIG_MSM_SYSMON_GLINK_COMM=y
|
||||
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
|
||||
CONFIG_MSM_GLINK_PKT=y
|
||||
CONFIG_MSM_SPM=y
|
||||
CONFIG_QCOM_SCM=y
|
||||
CONFIG_QCOM_WATCHDOG_V2=y
|
||||
CONFIG_QCOM_MEMORY_DUMP_V2=y
|
||||
CONFIG_ICNSS=y
|
||||
CONFIG_MSM_GLADIATOR_ERP_V2=y
|
||||
CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
|
||||
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
|
||||
CONFIG_MSM_CORE_HANG_DETECT=y
|
||||
CONFIG_MSM_RUN_QUEUE_STATS=y
|
||||
CONFIG_MSM_BOOT_STATS=y
|
||||
CONFIG_QCOM_CPUSS_DUMP=y
|
||||
CONFIG_MSM_QDSP6_APRV2_GLINK=y
|
||||
CONFIG_MSM_PERFORMANCE=y
|
||||
CONFIG_MSM_SUBSYSTEM_RESTART=y
|
||||
CONFIG_MSM_PIL=y
|
||||
CONFIG_MSM_PIL_SSR_GENERIC=y
|
||||
CONFIG_MSM_PIL_MSS_QDSP6V5=y
|
||||
CONFIG_TRACER_PKT=y
|
||||
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
|
||||
CONFIG_MSM_MPM_OF=y
|
||||
CONFIG_MSM_EVENT_TIMER=y
|
||||
CONFIG_MSM_CORE_CTL_HELPER=y
|
||||
CONFIG_MSM_SERVICE_NOTIFIER=y
|
||||
CONFIG_MEM_SHARE_QMI_SERVICE=y
|
||||
CONFIG_QCOM_BIMC_BWMON=y
|
||||
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
|
||||
CONFIG_QCOM_DEVFREQ_DEVBW=y
|
||||
CONFIG_EXTCON=y
|
||||
CONFIG_IIO=y
|
||||
CONFIG_QCOM_RRADC=y
|
||||
CONFIG_PWM=y
|
||||
CONFIG_PWM_QPNP=y
|
||||
CONFIG_ARM_GIC_V3_ACL=y
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
CONFIG_MSM_TZ_LOG=y
|
||||
CONFIG_SENSORS_SSC=y
|
||||
CONFIG_EXT2_FS=y
|
||||
CONFIG_EXT2_FS_XATTR=y
|
||||
CONFIG_EXT3_FS=y
|
||||
CONFIG_EXT4_FS_SECURITY=y
|
||||
CONFIG_FUSE_FS=y
|
||||
CONFIG_MSDOS_FS=y
|
||||
CONFIG_VFAT_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_ECRYPT_FS=y
|
||||
CONFIG_ECRYPT_FS_MESSAGING=y
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_PAGE_OWNER=y
|
||||
CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_SLUB_DEBUG_PANIC_ON=y
|
||||
CONFIG_DEBUG_OBJECTS=y
|
||||
CONFIG_DEBUG_OBJECTS_FREE=y
|
||||
CONFIG_DEBUG_OBJECTS_TIMERS=y
|
||||
CONFIG_DEBUG_OBJECTS_WORK=y
|
||||
CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
|
||||
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
|
||||
CONFIG_SLUB_DEBUG_ON=y
|
||||
CONFIG_DEBUG_KMEMLEAK=y
|
||||
CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
|
||||
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
CONFIG_WQ_WATCHDOG=y
|
||||
CONFIG_PANIC_TIMEOUT=5
|
||||
CONFIG_PANIC_ON_SCHED_BUG=y
|
||||
CONFIG_PANIC_ON_RT_THROTTLING=y
|
||||
CONFIG_SCHEDSTATS=y
|
||||
CONFIG_SCHED_STACK_END_CHECK=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
# CONFIG_DEBUG_PREEMPT is not set
|
||||
CONFIG_DEBUG_SPINLOCK=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
CONFIG_DEBUG_ATOMIC_SLEEP=y
|
||||
CONFIG_DEBUG_LIST=y
|
||||
CONFIG_FAULT_INJECTION=y
|
||||
CONFIG_FAIL_PAGE_ALLOC=y
|
||||
CONFIG_UFS_FAULT_INJECTION=y
|
||||
CONFIG_FAULT_INJECTION_DEBUG_FS=y
|
||||
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
||||
CONFIG_IPC_LOGGING=y
|
||||
CONFIG_QCOM_RTB=y
|
||||
CONFIG_FUNCTION_TRACER=y
|
||||
CONFIG_TRACER_SNAPSHOT=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
|
||||
CONFIG_MEMTEST=y
|
||||
CONFIG_PANIC_ON_DATA_CORRUPTION=y
|
||||
CONFIG_PID_IN_CONTEXTIDR=y
|
||||
CONFIG_DEBUG_SET_MODULE_RONX=y
|
||||
CONFIG_CORESIGHT=y
|
||||
CONFIG_CORESIGHT_REMOTE_ETM=y
|
||||
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
|
||||
CONFIG_CORESIGHT_CTI=y
|
||||
CONFIG_CORESIGHT_TPDA=y
|
||||
CONFIG_CORESIGHT_TPDM=y
|
||||
CONFIG_CORESIGHT_QPDI=y
|
||||
CONFIG_CORESIGHT_SOURCE_DUMMY=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
CONFIG_CRYPTO_ANSI_CPRNG=y
|
||||
CONFIG_CRYPTO_DEV_QCRYPTO=y
|
||||
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
|
||||
CONFIG_CRYPTO_DEV_QCEDEV=y
|
||||
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
|
||||
CONFIG_CRYPTO_DEV_QCE=y
|
||||
CONFIG_XZ_DEC=y
|
|
@ -51,7 +51,6 @@ CONFIG_CMA=y
|
|||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_SECCOMP=y
|
||||
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
|
||||
CONFIG_SCHED_FREQ_INPUT=y
|
||||
# CONFIG_CPU_FREQ_STAT is not set
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||||
|
@ -223,7 +222,6 @@ CONFIG_ZRAM=y
|
|||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_UID_STAT=y
|
||||
CONFIG_UID_CPUTIME=y
|
||||
CONFIG_MSM_ULTRASOUND=y
|
||||
CONFIG_SCSI=y
|
||||
|
|
|
@ -64,7 +64,6 @@ CONFIG_PM_WAKELOCKS=y
|
|||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_SCHED_FREQ_INPUT=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||||
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||||
|
|
|
@ -64,7 +64,6 @@ CONFIG_PM_WAKELOCKS_LIMIT=0
|
|||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_PM_DEBUG=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_SCHED_FREQ_INPUT=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||||
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||||
|
|
|
@ -70,7 +70,6 @@ CONFIG_PM_WAKELOCKS=y
|
|||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_SCHED_FREQ_INPUT=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||||
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||||
|
|
|
@ -69,7 +69,6 @@ CONFIG_PM_WAKELOCKS_LIMIT=0
|
|||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_PM_DEBUG=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_SCHED_FREQ_INPUT=y
|
||||
# CONFIG_CPU_FREQ_STAT is not set
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||||
|
|
|
@ -11,6 +11,7 @@ CONFIG_IKCONFIG=y
|
|||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_SCHED_HMP=y
|
||||
|
@ -51,6 +52,7 @@ CONFIG_PREEMPT=y
|
|||
CONFIG_HZ_100=y
|
||||
CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_CMA_DEBUGFS=y
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
|
||||
CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
|
||||
|
@ -68,7 +70,6 @@ CONFIG_PM_WAKELOCKS=y
|
|||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_SCHED_FREQ_INPUT=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||||
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||||
|
@ -238,8 +239,11 @@ CONFIG_SCSI_SCAN_ASYNC=y
|
|||
CONFIG_SCSI_UFSHCD=y
|
||||
CONFIG_SCSI_UFSHCD_PLATFORM=y
|
||||
CONFIG_SCSI_UFS_QCOM=y
|
||||
CONFIG_SCSI_UFS_QCOM_ICE=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
CONFIG_DM_REQ_CRYPT=y
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DM_VERITY=y
|
||||
CONFIG_NETDEVICES=y
|
||||
|
@ -399,6 +403,7 @@ CONFIG_HID_ELECOM=y
|
|||
CONFIG_HID_MAGICMOUSE=y
|
||||
CONFIG_HID_MICROSOFT=y
|
||||
CONFIG_HID_MULTITOUCH=y
|
||||
CONFIG_HID_PLANTRONICS=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_XHCI_HCD=y
|
||||
|
@ -468,6 +473,7 @@ CONFIG_GSI=y
|
|||
CONFIG_IPA3=y
|
||||
CONFIG_RMNET_IPA3=y
|
||||
CONFIG_GPIO_USB_DETECT=y
|
||||
CONFIG_SEEMP_CORE=y
|
||||
CONFIG_USB_BAM=y
|
||||
CONFIG_MSM_MDSS_PLL=y
|
||||
CONFIG_REMOTE_SPINLOCK_MSM=y
|
||||
|
@ -491,6 +497,7 @@ CONFIG_MSM_SMP2P_TEST=y
|
|||
CONFIG_MSM_QMI_INTERFACE=y
|
||||
CONFIG_MSM_RPM_SMD=y
|
||||
CONFIG_QCOM_BUS_SCALING=y
|
||||
CONFIG_MSM_SERVICE_LOCATOR=y
|
||||
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
|
||||
CONFIG_MSM_SYSMON_GLINK_COMM=y
|
||||
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
|
||||
|
@ -513,8 +520,10 @@ CONFIG_TRACER_PKT=y
|
|||
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
|
||||
CONFIG_MSM_MPM_OF=y
|
||||
CONFIG_MSM_EVENT_TIMER=y
|
||||
CONFIG_MSM_AVTIMER=y
|
||||
CONFIG_MSM_CORE_CTL_HELPER=y
|
||||
CONFIG_QCOM_REMOTEQDSS=y
|
||||
CONFIG_MSM_SERVICE_NOTIFIER=y
|
||||
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
|
||||
CONFIG_MSM_RPM_LOG=y
|
||||
CONFIG_MSM_RPM_STATS_LOG=y
|
||||
|
@ -532,6 +541,7 @@ CONFIG_IIO=y
|
|||
CONFIG_QCOM_RRADC=y
|
||||
CONFIG_PWM=y
|
||||
CONFIG_PWM_QPNP=y
|
||||
CONFIG_ARM_GIC_V3_ACL=y
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
CONFIG_SENSORS_SSC=y
|
||||
|
@ -561,10 +571,8 @@ CONFIG_DEBUG_SET_MODULE_RONX=y
|
|||
CONFIG_DEBUG_RODATA=y
|
||||
CONFIG_DEBUG_ALIGN_RODATA=y
|
||||
CONFIG_CORESIGHT=y
|
||||
CONFIG_CORESIGHT_LINKS_AND_SINKS=y
|
||||
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
|
||||
CONFIG_CORESIGHT_SOURCE_ETM4X=y
|
||||
CONFIG_CORESIGHT_REMOTE_ETM=y
|
||||
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
|
||||
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
|
||||
CONFIG_CORESIGHT_STM=y
|
||||
CONFIG_CORESIGHT_HWEVENT=y
|
||||
|
|
|
@ -11,6 +11,7 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
|
|||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_DEBUG=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
|
@ -52,6 +53,7 @@ CONFIG_PREEMPT=y
|
|||
CONFIG_HZ_100=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_CMA_DEBUGFS=y
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_SECCOMP=y
|
||||
CONFIG_ARMV8_DEPRECATED=y
|
||||
|
@ -67,7 +69,6 @@ CONFIG_PM_WAKELOCKS_LIMIT=0
|
|||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_PM_DEBUG=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_SCHED_FREQ_INPUT=y
|
||||
# CONFIG_CPU_FREQ_STAT is not set
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||||
|
@ -240,8 +241,11 @@ CONFIG_SCSI_SCAN_ASYNC=y
|
|||
CONFIG_SCSI_UFSHCD=y
|
||||
CONFIG_SCSI_UFSHCD_PLATFORM=y
|
||||
CONFIG_SCSI_UFS_QCOM=y
|
||||
CONFIG_SCSI_UFS_QCOM_ICE=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
CONFIG_DM_REQ_CRYPT=y
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DM_VERITY=y
|
||||
CONFIG_NETDEVICES=y
|
||||
|
@ -402,6 +406,7 @@ CONFIG_HID_ELECOM=y
|
|||
CONFIG_HID_MAGICMOUSE=y
|
||||
CONFIG_HID_MICROSOFT=y
|
||||
CONFIG_HID_MULTITOUCH=y
|
||||
CONFIG_HID_PLANTRONICS=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_XHCI_HCD=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
|
@ -478,6 +483,7 @@ CONFIG_GSI=y
|
|||
CONFIG_IPA3=y
|
||||
CONFIG_RMNET_IPA3=y
|
||||
CONFIG_GPIO_USB_DETECT=y
|
||||
CONFIG_SEEMP_CORE=y
|
||||
CONFIG_USB_BAM=y
|
||||
CONFIG_MSM_MDSS_PLL=y
|
||||
CONFIG_REMOTE_SPINLOCK_MSM=y
|
||||
|
@ -533,6 +539,7 @@ CONFIG_TRACER_PKT=y
|
|||
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
|
||||
CONFIG_MSM_MPM_OF=y
|
||||
CONFIG_MSM_EVENT_TIMER=y
|
||||
CONFIG_MSM_AVTIMER=y
|
||||
CONFIG_MSM_CORE_CTL_HELPER=y
|
||||
CONFIG_QCOM_REMOTEQDSS=y
|
||||
CONFIG_MSM_SERVICE_NOTIFIER=y
|
||||
|
@ -616,7 +623,8 @@ CONFIG_IPC_LOGGING=y
|
|||
CONFIG_QCOM_RTB=y
|
||||
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
|
||||
CONFIG_FUNCTION_TRACER=y
|
||||
CONFIG_TRACER_SNAPSHOT=y
|
||||
CONFIG_IRQSOFF_TRACER=y
|
||||
CONFIG_PREEMPT_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
|
||||
CONFIG_MEMTEST=y
|
||||
|
|
|
@ -25,16 +25,6 @@ config CPU_FREQ_BOOST_SW
|
|||
bool
|
||||
depends on THERMAL
|
||||
|
||||
config SCHED_FREQ_INPUT
|
||||
bool "Scheduler inputs to cpufreq governor"
|
||||
depends on SCHED_HMP
|
||||
help
|
||||
This option enables support for scheduler based CPU utilization
|
||||
calculations which may then be used by any cpufreq governor. The
|
||||
scheduler keeps track of "recent" cpu demand of tasks, which can
|
||||
help determine need for changing frequency well in advance of what
|
||||
a governor would have been able to detect on its own.
|
||||
|
||||
config CPU_FREQ_STAT
|
||||
tristate "CPU frequency translation statistics"
|
||||
default y
|
||||
|
|
|
@ -2947,7 +2947,7 @@ static const struct pid_entry tgid_base_stuff[] = {
|
|||
#endif
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
REG("sched_init_task_load", S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
|
||||
REG("sched_group_id", S_IRUGO|S_IWUSR, proc_pid_sched_group_id_operations),
|
||||
REG("sched_group_id", S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations),
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
|
||||
|
|
|
@ -1232,9 +1232,6 @@ struct sched_avg {
|
|||
u64 last_update_time, load_sum;
|
||||
u32 util_sum, period_contrib;
|
||||
unsigned long load_avg, util_avg;
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
u32 runnable_avg_sum_scaled;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
|
@ -1308,12 +1305,10 @@ struct ravg {
|
|||
u64 mark_start;
|
||||
u32 sum, demand;
|
||||
u32 sum_history[RAVG_HIST_SIZE_MAX];
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
u32 curr_window, prev_window;
|
||||
u16 active_windows;
|
||||
u32 pred_demand;
|
||||
u8 busy_buckets[NUM_BUSY_BUCKETS];
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sched_entity {
|
||||
|
@ -2155,32 +2150,6 @@ static inline cputime_t task_gtime(struct task_struct *t)
|
|||
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
|
||||
struct sched_load {
|
||||
unsigned long prev_load;
|
||||
unsigned long new_task_load;
|
||||
unsigned long predicted_load;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_SCHED_FREQ_INPUT)
|
||||
extern int sched_set_window(u64 window_start, unsigned int window_size);
|
||||
extern unsigned long sched_get_busy(int cpu);
|
||||
extern void sched_get_cpus_busy(struct sched_load *busy,
|
||||
const struct cpumask *query_cpus);
|
||||
extern void sched_set_io_is_busy(int val);
|
||||
#else
|
||||
static inline int sched_set_window(u64 window_start, unsigned int window_size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline unsigned long sched_get_busy(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void sched_get_cpus_busy(struct sched_load *busy,
|
||||
const struct cpumask *query_cpus) {};
|
||||
static inline void sched_set_io_is_busy(int val) {};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Per process flags
|
||||
*/
|
||||
|
@ -2349,10 +2318,6 @@ extern void do_set_cpus_allowed(struct task_struct *p,
|
|||
|
||||
extern int set_cpus_allowed_ptr(struct task_struct *p,
|
||||
const struct cpumask *new_mask);
|
||||
extern void sched_set_cpu_cstate(int cpu, int cstate,
|
||||
int wakeup_energy, int wakeup_latency);
|
||||
extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
|
||||
int wakeup_energy, int wakeup_latency);
|
||||
#else
|
||||
static inline void do_set_cpus_allowed(struct task_struct *p,
|
||||
const struct cpumask *new_mask)
|
||||
|
@ -2365,24 +2330,27 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
|
|||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
static inline void
|
||||
sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
|
||||
int dstate, int wakeup_energy, int wakeup_latency)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
struct sched_load {
|
||||
unsigned long prev_load;
|
||||
unsigned long new_task_load;
|
||||
unsigned long predicted_load;
|
||||
};
|
||||
|
||||
extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
|
||||
extern u32 sched_get_wake_up_idle(struct task_struct *p);
|
||||
extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
|
||||
extern unsigned int sched_get_group_id(struct task_struct *p);
|
||||
|
||||
struct cpu_cycle_counter_cb {
|
||||
u64 (*get_cpu_cycle_counter)(int cpu);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
|
||||
extern int sched_set_window(u64 window_start, unsigned int window_size);
|
||||
extern unsigned long sched_get_busy(int cpu);
|
||||
extern void sched_get_cpus_busy(struct sched_load *busy,
|
||||
const struct cpumask *query_cpus);
|
||||
extern void sched_set_io_is_busy(int val);
|
||||
extern int sched_set_boost(int enable);
|
||||
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
|
||||
extern u32 sched_get_init_task_load(struct task_struct *p);
|
||||
|
@ -2391,9 +2359,42 @@ extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
|
|||
extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
|
||||
extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
|
||||
extern int sched_update_freq_max_load(const cpumask_t *cpumask);
|
||||
extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32
|
||||
fmax);
|
||||
#else
|
||||
extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
|
||||
u32 fmin, u32 fmax);
|
||||
extern void sched_set_cpu_cstate(int cpu, int cstate,
|
||||
int wakeup_energy, int wakeup_latency);
|
||||
extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
|
||||
int wakeup_energy, int wakeup_latency);
|
||||
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
|
||||
extern u64 sched_ktime_clock(void);
|
||||
extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
|
||||
extern unsigned int sched_get_group_id(struct task_struct *p);
|
||||
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
static inline u64 sched_ktime_clock(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int sched_set_window(u64 window_start, unsigned int window_size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline unsigned long sched_get_busy(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void sched_get_cpus_busy(struct sched_load *busy,
|
||||
const struct cpumask *query_cpus) {};
|
||||
|
||||
static inline void sched_set_io_is_busy(int val) {};
|
||||
|
||||
static inline int sched_set_boost(int enable)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
@ -2406,7 +2407,17 @@ static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
|
|||
|
||||
static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
|
||||
u32 fmin, u32 fmax) { }
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
|
||||
int dstate, int wakeup_energy, int wakeup_latency)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
void calc_load_enter_idle(void);
|
||||
|
@ -2441,8 +2452,6 @@ extern u64 local_clock(void);
|
|||
extern u64 running_clock(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
|
||||
extern u64 sched_ktime_clock(void);
|
||||
|
||||
extern void sched_clock_init(void);
|
||||
extern int sched_clock_initialized(void);
|
||||
|
||||
|
@ -3379,9 +3388,4 @@ static inline unsigned long rlimit_max(unsigned int limit)
|
|||
return task_rlimit_max(current, limit);
|
||||
}
|
||||
|
||||
struct cpu_cycle_counter_cb {
|
||||
u64 (*get_cpu_cycle_counter)(int cpu);
|
||||
};
|
||||
int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -40,21 +40,14 @@ extern unsigned int sysctl_sched_min_granularity;
|
|||
extern unsigned int sysctl_sched_wakeup_granularity;
|
||||
extern unsigned int sysctl_sched_child_runs_first;
|
||||
extern unsigned int sysctl_sched_wake_to_idle;
|
||||
extern unsigned int sysctl_sched_wakeup_load_threshold;
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
extern int sysctl_sched_freq_inc_notify;
|
||||
extern int sysctl_sched_freq_dec_notify;
|
||||
extern unsigned int sysctl_sched_window_stats_policy;
|
||||
extern unsigned int sysctl_sched_ravg_hist_size;
|
||||
extern unsigned int sysctl_sched_cpu_high_irqload;
|
||||
|
||||
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||
extern unsigned int sysctl_sched_init_task_load_pct;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
extern int sysctl_sched_freq_inc_notify;
|
||||
extern int sysctl_sched_freq_dec_notify;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
extern unsigned int sysctl_sched_spill_nr_run;
|
||||
extern unsigned int sysctl_sched_spill_load_pct;
|
||||
extern unsigned int sysctl_sched_upmigrate_pct;
|
||||
|
@ -66,11 +59,10 @@ extern unsigned int sysctl_sched_big_waker_task_load_pct;
|
|||
extern unsigned int sysctl_sched_select_prev_cpu_us;
|
||||
extern unsigned int sysctl_sched_enable_colocation;
|
||||
extern unsigned int sysctl_sched_restrict_cluster_spill;
|
||||
#if defined(CONFIG_SCHED_FREQ_INPUT)
|
||||
extern unsigned int sysctl_sched_new_task_windows;
|
||||
extern unsigned int sysctl_sched_pred_alert_freq;
|
||||
extern unsigned int sysctl_sched_freq_aggregate;
|
||||
#endif
|
||||
extern unsigned int sysctl_sched_enable_thread_grouping;
|
||||
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
|
||||
|
|
|
@ -76,9 +76,7 @@ TRACE_EVENT(sched_enq_deq_task,
|
|||
__field(unsigned int, cpus_allowed )
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
__field(unsigned int, demand )
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__field(unsigned int, pred_demand )
|
||||
#endif
|
||||
#endif
|
||||
),
|
||||
|
||||
|
@ -94,18 +92,13 @@ TRACE_EVENT(sched_enq_deq_task,
|
|||
__entry->cpus_allowed = cpus_allowed;
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
__entry->demand = p->ravg.demand;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__entry->pred_demand = p->ravg.pred_demand;
|
||||
#endif
|
||||
#endif
|
||||
),
|
||||
|
||||
TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
" demand=%u"
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
" pred_demand=%u"
|
||||
#endif
|
||||
" demand=%u pred_demand=%u"
|
||||
#endif
|
||||
, __entry->cpu,
|
||||
__entry->enqueue ? "enqueue" : "dequeue",
|
||||
|
@ -113,10 +106,7 @@ TRACE_EVENT(sched_enq_deq_task,
|
|||
__entry->prio, __entry->nr_running,
|
||||
__entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
, __entry->demand
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
, __entry->pred_demand
|
||||
#endif
|
||||
, __entry->demand, __entry->pred_demand
|
||||
#endif
|
||||
)
|
||||
);
|
||||
|
@ -126,9 +116,9 @@ TRACE_EVENT(sched_enq_deq_task,
|
|||
TRACE_EVENT(sched_task_load,
|
||||
|
||||
TP_PROTO(struct task_struct *p, bool boost, int reason,
|
||||
bool sync, bool need_idle, bool fast_path, int best_cpu),
|
||||
bool sync, bool need_idle, u32 flags, int best_cpu),
|
||||
|
||||
TP_ARGS(p, boost, reason, sync, need_idle, fast_path, best_cpu),
|
||||
TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array( char, comm, TASK_COMM_LEN )
|
||||
|
@ -138,7 +128,7 @@ TRACE_EVENT(sched_task_load,
|
|||
__field( int, reason )
|
||||
__field( bool, sync )
|
||||
__field( bool, need_idle )
|
||||
__field( bool, fast_path )
|
||||
__field( u32, flags )
|
||||
__field( int, best_cpu )
|
||||
__field( u64, latency )
|
||||
),
|
||||
|
@ -151,17 +141,17 @@ TRACE_EVENT(sched_task_load,
|
|||
__entry->reason = reason;
|
||||
__entry->sync = sync;
|
||||
__entry->need_idle = need_idle;
|
||||
__entry->fast_path = fast_path;
|
||||
__entry->flags = flags;
|
||||
__entry->best_cpu = best_cpu;
|
||||
__entry->latency = p->state == TASK_WAKING ?
|
||||
sched_ktime_clock() -
|
||||
p->ravg.mark_start : 0;
|
||||
),
|
||||
|
||||
TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d fast_path=%d best_cpu=%d latency=%llu",
|
||||
TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x best_cpu=%d latency=%llu",
|
||||
__entry->pid, __entry->comm, __entry->demand,
|
||||
__entry->boost, __entry->reason, __entry->sync,
|
||||
__entry->need_idle, __entry->fast_path,
|
||||
__entry->need_idle, __entry->flags,
|
||||
__entry->best_cpu, __entry->latency)
|
||||
);
|
||||
|
||||
|
@ -291,7 +281,6 @@ TRACE_EVENT(sched_update_task_ravg,
|
|||
__field(unsigned int, demand )
|
||||
__field(unsigned int, sum )
|
||||
__field( int, cpu )
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__field(unsigned int, pred_demand )
|
||||
__field( u64, rq_cs )
|
||||
__field( u64, rq_ps )
|
||||
|
@ -304,7 +293,6 @@ TRACE_EVENT(sched_update_task_ravg,
|
|||
__field( u64, nt_cs )
|
||||
__field( u64, nt_ps )
|
||||
__field( u32, active_windows )
|
||||
#endif
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -322,7 +310,6 @@ TRACE_EVENT(sched_update_task_ravg,
|
|||
__entry->demand = p->ravg.demand;
|
||||
__entry->sum = p->ravg.sum;
|
||||
__entry->irqtime = irqtime;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__entry->pred_demand = p->ravg.pred_demand;
|
||||
__entry->rq_cs = rq->curr_runnable_sum;
|
||||
__entry->rq_ps = rq->prev_runnable_sum;
|
||||
|
@ -335,28 +322,19 @@ TRACE_EVENT(sched_update_task_ravg,
|
|||
__entry->nt_cs = rq->nt_curr_runnable_sum;
|
||||
__entry->nt_ps = rq->nt_prev_runnable_sum;
|
||||
__entry->active_windows = p->ravg.active_windows;
|
||||
#endif
|
||||
),
|
||||
|
||||
TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
" pred_demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu"
|
||||
#endif
|
||||
TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu"
|
||||
, __entry->wallclock, __entry->win_start, __entry->delta,
|
||||
task_event_names[__entry->evt], __entry->cpu,
|
||||
__entry->cur_freq, __entry->cur_pid,
|
||||
__entry->pid, __entry->comm, __entry->mark_start,
|
||||
__entry->delta_m, __entry->demand,
|
||||
__entry->sum, __entry->irqtime
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
, __entry->pred_demand, __entry->rq_cs, __entry->rq_ps,
|
||||
__entry->curr_window, __entry->prev_window,
|
||||
__entry->nt_cs, __entry->nt_ps,
|
||||
__entry->active_windows,
|
||||
__entry->grp_cs, __entry->grp_ps,
|
||||
__entry->grp_nt_cs, __entry->grp_nt_ps
|
||||
#endif
|
||||
)
|
||||
__entry->sum, __entry->irqtime, __entry->pred_demand,
|
||||
__entry->rq_cs, __entry->rq_ps, __entry->curr_window,
|
||||
__entry->prev_window, __entry->nt_cs, __entry->nt_ps,
|
||||
__entry->active_windows, __entry->grp_cs,
|
||||
__entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps)
|
||||
);
|
||||
|
||||
TRACE_EVENT(sched_get_task_cpu_cycles,
|
||||
|
@ -402,9 +380,7 @@ TRACE_EVENT(sched_update_history,
|
|||
__field( int, samples )
|
||||
__field(enum task_event, evt )
|
||||
__field(unsigned int, demand )
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__field(unsigned int, pred_demand )
|
||||
#endif
|
||||
__array( u32, hist, RAVG_HIST_SIZE_MAX)
|
||||
__field(unsigned int, nr_big_tasks )
|
||||
__field( int, cpu )
|
||||
|
@ -417,27 +393,19 @@ TRACE_EVENT(sched_update_history,
|
|||
__entry->samples = samples;
|
||||
__entry->evt = evt;
|
||||
__entry->demand = p->ravg.demand;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__entry->pred_demand = p->ravg.pred_demand;
|
||||
#endif
|
||||
memcpy(__entry->hist, p->ravg.sum_history,
|
||||
RAVG_HIST_SIZE_MAX * sizeof(u32));
|
||||
__entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
|
||||
__entry->cpu = rq->cpu;
|
||||
),
|
||||
|
||||
TP_printk("%d (%s): runtime %u samples %d event %s demand %u"
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
" pred_demand %u"
|
||||
#endif
|
||||
TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
|
||||
" (hist: %u %u %u %u %u) cpu %d nr_big %u",
|
||||
__entry->pid, __entry->comm,
|
||||
__entry->runtime, __entry->samples,
|
||||
task_event_names[__entry->evt],
|
||||
__entry->demand,
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__entry->pred_demand,
|
||||
#endif
|
||||
__entry->demand, __entry->pred_demand,
|
||||
__entry->hist[0], __entry->hist[1],
|
||||
__entry->hist[2], __entry->hist[3],
|
||||
__entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
|
||||
|
@ -476,8 +444,6 @@ TRACE_EVENT(sched_reset_all_window_stats,
|
|||
__entry->old_val, __entry->new_val)
|
||||
);
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
|
||||
TRACE_EVENT(sched_update_pred_demand,
|
||||
|
||||
TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
|
||||
|
@ -637,8 +603,6 @@ TRACE_EVENT(sched_freq_alert,
|
|||
__entry->old_pred, __entry->new_pred)
|
||||
);
|
||||
|
||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
/*
|
||||
|
|
|
@ -15,6 +15,7 @@ obj-y += core.o loadavg.o clock.o cputime.o
|
|||
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
|
||||
obj-y += wait.o completion.o idle.o sched_avg.o
|
||||
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
|
||||
obj-$(CONFIG_SCHED_HMP) += hmp.o
|
||||
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
|
||||
obj-$(CONFIG_SCHEDSTATS) += stats.o
|
||||
obj-$(CONFIG_SCHED_DEBUG) += debug.o
|
||||
|
|
3394
kernel/sched/core.c
3394
kernel/sched/core.c
File diff suppressed because it is too large
Load diff
|
@ -327,8 +327,6 @@ do { \
|
|||
P(cluster->cur_freq);
|
||||
P(cluster->max_freq);
|
||||
P(cluster->exec_scale_factor);
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
P(hmp_stats.nr_big_tasks);
|
||||
SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
|
||||
rq->hmp_stats.cumulative_runnable_avg);
|
||||
|
@ -417,10 +415,8 @@ static void sched_debug_header(struct seq_file *m)
|
|||
P(sched_upmigrate);
|
||||
P(sched_downmigrate);
|
||||
P(sched_init_task_load_windows);
|
||||
P(sched_init_task_load_pelt);
|
||||
P(min_capacity);
|
||||
P(max_capacity);
|
||||
P(sched_use_pelt);
|
||||
P(sched_ravg_window);
|
||||
#endif
|
||||
#undef PN
|
||||
|
@ -644,7 +640,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
|||
__P(load_avg);
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
P(ravg.demand);
|
||||
P(se.avg.runnable_avg_sum_scaled);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
1219
kernel/sched/fair.c
1219
kernel/sched/fair.c
File diff suppressed because it is too large
Load diff
3970
kernel/sched/hmp.c
Normal file
3970
kernel/sched/hmp.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -254,7 +254,6 @@ struct cfs_bandwidth {
|
|||
struct task_group {
|
||||
struct cgroup_subsys_state css;
|
||||
|
||||
bool notify_on_migrate;
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
bool upmigrate_discouraged;
|
||||
#endif
|
||||
|
@ -356,6 +355,7 @@ extern void sched_move_task(struct task_struct *tsk);
|
|||
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
|
||||
#endif
|
||||
|
||||
extern struct task_group *css_tg(struct cgroup_subsys_state *css);
|
||||
#else /* CONFIG_CGROUP_SCHED */
|
||||
|
||||
struct cfs_bandwidth { };
|
||||
|
@ -367,9 +367,7 @@ struct cfs_bandwidth { };
|
|||
struct hmp_sched_stats {
|
||||
int nr_big_tasks;
|
||||
u64 cumulative_runnable_avg;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
u64 pred_demands_sum;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sched_cluster {
|
||||
|
@ -393,6 +391,7 @@ struct sched_cluster {
|
|||
bool freq_init_done;
|
||||
int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
|
||||
unsigned int static_cluster_pwr_cost;
|
||||
int notifier_sent;
|
||||
};
|
||||
|
||||
extern unsigned long all_cluster_ids[];
|
||||
|
@ -410,23 +409,17 @@ struct related_thread_group {
|
|||
struct sched_cluster *preferred_cluster;
|
||||
struct rcu_head rcu;
|
||||
u64 last_update;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
struct group_cpu_time __percpu *cpu_time; /* one per cluster */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct migration_sum_data {
|
||||
struct rq *src_rq, *dst_rq;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
struct group_cpu_time *src_cpu_time, *dst_cpu_time;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct list_head cluster_head;
|
||||
extern int num_clusters;
|
||||
extern struct sched_cluster *sched_cluster[NR_CPUS];
|
||||
extern int group_will_fit(struct sched_cluster *cluster,
|
||||
struct related_thread_group *grp, u64 demand);
|
||||
|
||||
struct cpu_cycle {
|
||||
u64 cycles;
|
||||
|
@ -436,7 +429,7 @@ struct cpu_cycle {
|
|||
#define for_each_sched_cluster(cluster) \
|
||||
list_for_each_entry_rcu(cluster, &cluster_head, list)
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
/* CFS-related fields in a runqueue */
|
||||
struct cfs_rq {
|
||||
|
@ -736,7 +729,6 @@ struct rq {
|
|||
u64 age_stamp;
|
||||
u64 idle_stamp;
|
||||
u64 avg_idle;
|
||||
int cstate, wakeup_latency, wakeup_energy;
|
||||
|
||||
/* This is used to determine avg_idle's max value */
|
||||
u64 max_idle_balance_cost;
|
||||
|
@ -747,6 +739,7 @@ struct rq {
|
|||
struct cpumask freq_domain_cpumask;
|
||||
struct hmp_sched_stats hmp_stats;
|
||||
|
||||
int cstate, wakeup_latency, wakeup_energy;
|
||||
u64 window_start;
|
||||
unsigned long hmp_flags;
|
||||
|
||||
|
@ -756,15 +749,8 @@ struct rq {
|
|||
unsigned int static_cpu_pwr_cost;
|
||||
struct task_struct *ed_task;
|
||||
struct cpu_cycle cc;
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
u64 old_busy_time, old_busy_time_group;
|
||||
int notifier_sent;
|
||||
u64 old_estimated_time;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
u64 curr_runnable_sum;
|
||||
u64 prev_runnable_sum;
|
||||
u64 nt_curr_runnable_sum;
|
||||
|
@ -1036,8 +1022,6 @@ static inline void sched_ttwu_pending(void) { }
|
|||
#include "stats.h"
|
||||
#include "auto_group.h"
|
||||
|
||||
extern void init_new_task_load(struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
|
||||
#define WINDOW_STATS_RECENT 0
|
||||
|
@ -1046,9 +1030,16 @@ extern void init_new_task_load(struct task_struct *p);
|
|||
#define WINDOW_STATS_AVG 3
|
||||
#define WINDOW_STATS_INVALID_POLICY 4
|
||||
|
||||
#define MAJOR_TASK_PCT 85
|
||||
#define SCHED_UPMIGRATE_MIN_NICE 15
|
||||
#define EXITING_TASK_MARKER 0xdeaddead
|
||||
|
||||
#define UP_MIGRATION 1
|
||||
#define DOWN_MIGRATION 2
|
||||
#define IRQLOAD_MIGRATION 3
|
||||
|
||||
extern struct mutex policy_mutex;
|
||||
extern unsigned int sched_ravg_window;
|
||||
extern unsigned int sched_use_pelt;
|
||||
extern unsigned int sched_disable_window_stats;
|
||||
extern unsigned int sched_enable_hmp;
|
||||
extern unsigned int max_possible_freq;
|
||||
|
@ -1063,28 +1054,59 @@ extern unsigned int max_possible_capacity;
|
|||
extern unsigned int min_max_possible_capacity;
|
||||
extern unsigned int sched_upmigrate;
|
||||
extern unsigned int sched_downmigrate;
|
||||
extern unsigned int sched_init_task_load_pelt;
|
||||
extern unsigned int sched_init_task_load_windows;
|
||||
extern unsigned int up_down_migrate_scale_factor;
|
||||
extern unsigned int sysctl_sched_restrict_cluster_spill;
|
||||
extern unsigned int sched_pred_alert_load;
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
#define MAJOR_TASK_PCT 85
|
||||
extern unsigned int sched_major_task_runtime;
|
||||
#endif
|
||||
extern struct sched_cluster init_cluster;
|
||||
extern unsigned int __read_mostly sched_short_sleep_task_threshold;
|
||||
extern unsigned int __read_mostly sched_long_cpu_selection_threshold;
|
||||
extern unsigned int __read_mostly sched_big_waker_task_load;
|
||||
extern unsigned int __read_mostly sched_small_wakee_task_load;
|
||||
extern unsigned int __read_mostly sched_spill_load;
|
||||
extern unsigned int __read_mostly sched_upmigrate;
|
||||
extern unsigned int __read_mostly sched_downmigrate;
|
||||
extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
|
||||
|
||||
extern void init_new_task_load(struct task_struct *p);
|
||||
extern u64 sched_ktime_clock(void);
|
||||
extern int got_boost_kick(void);
|
||||
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
|
||||
extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
|
||||
u64 wallclock, u64 irqtime);
|
||||
extern bool early_detection_notify(struct rq *rq, u64 wallclock);
|
||||
extern void clear_ed_task(struct task_struct *p, struct rq *rq);
|
||||
extern void fixup_busy_time(struct task_struct *p, int new_cpu);
|
||||
extern void clear_boost_kick(int cpu);
|
||||
extern void clear_hmp_request(int cpu);
|
||||
extern void mark_task_starting(struct task_struct *p);
|
||||
extern void set_window_start(struct rq *rq);
|
||||
extern void migrate_sync_cpu(int cpu);
|
||||
extern void update_cluster_topology(void);
|
||||
extern void set_task_last_wake(struct task_struct *p, u64 wallclock);
|
||||
extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
|
||||
extern void init_clusters(void);
|
||||
extern int __init set_sched_enable_hmp(char *str);
|
||||
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
|
||||
extern unsigned int max_task_load(void);
|
||||
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
|
||||
u64 delta, u64 wallclock);
|
||||
extern void sched_account_irqstart(int cpu, struct task_struct *curr,
|
||||
u64 wallclock);
|
||||
|
||||
unsigned int cpu_temp(int cpu);
|
||||
int sched_set_group_id(struct task_struct *p, unsigned int group_id);
|
||||
extern unsigned int cpu_temp(int cpu);
|
||||
extern unsigned int nr_eligible_big_tasks(int cpu);
|
||||
extern void update_up_down_migrate(void);
|
||||
extern int update_preferred_cluster(struct related_thread_group *grp,
|
||||
struct task_struct *p, u32 old_load);
|
||||
extern void set_preferred_cluster(struct related_thread_group *grp);
|
||||
extern void add_new_task_to_grp(struct task_struct *new);
|
||||
|
||||
enum sched_boost_type {
|
||||
SCHED_BOOST_NONE,
|
||||
SCHED_BOOST_ON_BIG,
|
||||
SCHED_BOOST_ON_ALL,
|
||||
};
|
||||
|
||||
static inline struct sched_cluster *cpu_cluster(int cpu)
|
||||
{
|
||||
|
@ -1180,20 +1202,9 @@ static inline u64 scale_load_to_cpu(u64 task_load, int cpu)
|
|||
|
||||
static inline unsigned int task_load(struct task_struct *p)
|
||||
{
|
||||
if (sched_use_pelt)
|
||||
return p->se.avg.runnable_avg_sum_scaled;
|
||||
|
||||
return p->ravg.demand;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
#define set_pred_demands_sum(stats, x) ((stats)->pred_demands_sum = (x))
|
||||
#define verify_pred_demands_sum(stat) BUG_ON((s64)(stat)->pred_demands_sum < 0)
|
||||
#else
|
||||
#define set_pred_demands_sum(stats, x)
|
||||
#define verify_pred_demands_sum(stat)
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p)
|
||||
|
@ -1203,33 +1214,29 @@ inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
|||
if (!sched_enable_hmp || sched_disable_window_stats)
|
||||
return;
|
||||
|
||||
task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled :
|
||||
(sched_disable_window_stats ? 0 : p->ravg.demand);
|
||||
task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
|
||||
|
||||
stats->cumulative_runnable_avg += task_load;
|
||||
set_pred_demands_sum(stats, stats->pred_demands_sum +
|
||||
p->ravg.pred_demand);
|
||||
stats->pred_demands_sum += p->ravg.pred_demand;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p)
|
||||
struct task_struct *p)
|
||||
{
|
||||
u32 task_load;
|
||||
|
||||
if (!sched_enable_hmp || sched_disable_window_stats)
|
||||
return;
|
||||
|
||||
task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled :
|
||||
(sched_disable_window_stats ? 0 : p->ravg.demand);
|
||||
task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
|
||||
|
||||
stats->cumulative_runnable_avg -= task_load;
|
||||
|
||||
BUG_ON((s64)stats->cumulative_runnable_avg < 0);
|
||||
|
||||
set_pred_demands_sum(stats, stats->pred_demands_sum -
|
||||
p->ravg.pred_demand);
|
||||
verify_pred_demands_sum(stats);
|
||||
stats->pred_demands_sum -= p->ravg.pred_demand;
|
||||
BUG_ON((s64)stats->pred_demands_sum < 0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -1243,12 +1250,10 @@ fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
|||
stats->cumulative_runnable_avg += task_load_delta;
|
||||
BUG_ON((s64)stats->cumulative_runnable_avg < 0);
|
||||
|
||||
set_pred_demands_sum(stats, stats->pred_demands_sum +
|
||||
pred_demand_delta);
|
||||
verify_pred_demands_sum(stats);
|
||||
stats->pred_demands_sum += pred_demand_delta;
|
||||
BUG_ON((s64)stats->pred_demands_sum < 0);
|
||||
}
|
||||
|
||||
|
||||
#define pct_to_real(tunable) \
|
||||
(div64_u64((u64)tunable * (u64)max_task_load(), 100))
|
||||
|
||||
|
@ -1280,90 +1285,25 @@ static inline int sched_cpu_high_irqload(int cpu)
|
|||
return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
|
||||
}
|
||||
|
||||
static inline bool task_in_related_thread_group(struct task_struct *p)
|
||||
{
|
||||
return !!(rcu_access_pointer(p->grp) != NULL);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct related_thread_group *task_related_thread_group(struct task_struct *p)
|
||||
{
|
||||
return rcu_dereference(p->grp);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
|
||||
#define sched_use_pelt 0
|
||||
|
||||
struct hmp_sched_stats;
|
||||
struct related_thread_group;
|
||||
|
||||
static inline u64 scale_load_to_cpu(u64 load, int cpu)
|
||||
{
|
||||
return load;
|
||||
}
|
||||
|
||||
static inline unsigned int nr_eligible_big_tasks(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pct_task_load(struct task_struct *p) { return 0; }
|
||||
|
||||
static inline int cpu_capacity(int cpu)
|
||||
{
|
||||
return SCHED_LOAD_SCALE;
|
||||
}
|
||||
|
||||
static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
|
||||
|
||||
static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
|
||||
u64 delta, u64 wallclock)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
|
||||
u64 wallclock)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int sched_cpu_high_irqload(int cpu) { return 0; }
|
||||
|
||||
static inline void set_preferred_cluster(struct related_thread_group *grp) { }
|
||||
|
||||
static inline
|
||||
struct related_thread_group *task_related_thread_group(struct task_struct *p)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline u32 task_load(struct task_struct *p) { return 0; }
|
||||
|
||||
static inline int update_preferred_cluster(struct related_thread_group *grp,
|
||||
struct task_struct *p, u32 old_load)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
/*
|
||||
* Returns the rq capacity of any rq in a group. This does not play
|
||||
* well with groups where rq capacity can change independently.
|
||||
*/
|
||||
#define group_rq_capacity(group) cpu_capacity(group_first_cpu(group))
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
|
||||
|
||||
extern void
|
||||
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
|
||||
|
||||
extern void notify_migration(int src_cpu, int dest_cpu,
|
||||
bool src_cpu_dead, struct task_struct *p);
|
||||
|
||||
struct group_cpu_time {
|
||||
u64 curr_runnable_sum;
|
||||
u64 prev_runnable_sum;
|
||||
|
@ -1383,23 +1323,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
|
|||
return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
#define sched_migration_fixup 0
|
||||
#define PRED_DEMAND_DELTA (0)
|
||||
|
||||
static inline void
|
||||
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
|
||||
|
||||
static inline int same_freq_domain(int src_cpu, int dst_cpu)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
|
||||
#define BOOST_KICK 0
|
||||
#define CPU_RESERVED 1
|
||||
|
||||
|
@ -1456,11 +1379,220 @@ extern unsigned int power_cost(int cpu, u64 demand);
|
|||
extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
|
||||
extern void boost_kick(int cpu);
|
||||
extern int sched_boost(void);
|
||||
extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
|
||||
enum sched_boost_type boost_type);
|
||||
extern enum sched_boost_type sched_boost_type(void);
|
||||
extern int task_will_fit(struct task_struct *p, int cpu);
|
||||
extern int group_will_fit(struct sched_cluster *cluster,
|
||||
struct related_thread_group *grp, u64 demand);
|
||||
extern u64 cpu_load(int cpu);
|
||||
extern u64 cpu_load_sync(int cpu, int sync);
|
||||
extern int preferred_cluster(struct sched_cluster *cluster,
|
||||
struct task_struct *p);
|
||||
extern void inc_nr_big_task(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p);
|
||||
extern void dec_nr_big_task(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p);
|
||||
extern void inc_rq_hmp_stats(struct rq *rq,
|
||||
struct task_struct *p, int change_cra);
|
||||
extern void dec_rq_hmp_stats(struct rq *rq,
|
||||
struct task_struct *p, int change_cra);
|
||||
extern int is_big_task(struct task_struct *p);
|
||||
extern int upmigrate_discouraged(struct task_struct *p);
|
||||
extern struct sched_cluster *rq_cluster(struct rq *rq);
|
||||
extern int nr_big_tasks(struct rq *rq);
|
||||
extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p, s64 delta);
|
||||
extern void reset_task_stats(struct task_struct *p);
|
||||
extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
|
||||
extern void _inc_hmp_sched_stats_fair(struct rq *rq,
|
||||
struct task_struct *p, int change_cra);
|
||||
extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft);
|
||||
extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, u64 upmigrate_discourage);
|
||||
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
|
||||
struct hmp_sched_stats;
|
||||
struct related_thread_group;
|
||||
struct sched_cluster;
|
||||
|
||||
static inline int got_boost_kick(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
|
||||
int event, u64 wallclock, u64 irqtime) { }
|
||||
|
||||
static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
|
||||
static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
|
||||
static inline void clear_boost_kick(int cpu) { }
|
||||
static inline void clear_hmp_request(int cpu) { }
|
||||
static inline void mark_task_starting(struct task_struct *p) { }
|
||||
static inline void set_window_start(struct rq *rq) { }
|
||||
static inline void migrate_sync_cpu(int cpu) { }
|
||||
static inline void update_cluster_topology(void) { }
|
||||
static inline void set_task_last_wake(struct task_struct *p, u64 wallclock) { }
|
||||
static inline void set_task_last_switch_out(struct task_struct *p,
|
||||
u64 wallclock) { }
|
||||
|
||||
static inline int task_will_fit(struct task_struct *p, int cpu)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int select_best_cpu(struct task_struct *p, int target,
|
||||
int reason, int sync)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int power_cost(int cpu, u64 demand)
|
||||
{
|
||||
return SCHED_CAPACITY_SCALE;
|
||||
}
|
||||
|
||||
static inline int sched_boost(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int is_big_task(struct task_struct *p)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nr_big_tasks(struct rq *rq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int is_cpu_throttling_imminent(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int is_task_migration_throttled(struct task_struct *p)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_temp(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
|
||||
|
||||
static inline void
|
||||
dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
|
||||
|
||||
static inline void
|
||||
inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) { }
|
||||
|
||||
static inline void
|
||||
dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) { }
|
||||
|
||||
static inline int
|
||||
preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline struct sched_cluster *rq_cluster(struct rq *rq)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void init_new_task_load(struct task_struct *p) { }
|
||||
|
||||
static inline u64 scale_load_to_cpu(u64 load, int cpu)
|
||||
{
|
||||
return load;
|
||||
}
|
||||
|
||||
static inline unsigned int nr_eligible_big_tasks(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pct_task_load(struct task_struct *p) { return 0; }
|
||||
|
||||
static inline int cpu_capacity(int cpu)
|
||||
{
|
||||
return SCHED_LOAD_SCALE;
|
||||
}
|
||||
|
||||
static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
|
||||
|
||||
static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
|
||||
u64 delta, u64 wallclock)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
|
||||
u64 wallclock)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int sched_cpu_high_irqload(int cpu) { return 0; }
|
||||
|
||||
static inline void set_preferred_cluster(struct related_thread_group *grp) { }
|
||||
|
||||
static inline bool task_in_related_thread_group(struct task_struct *p)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct related_thread_group *task_related_thread_group(struct task_struct *p)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline u32 task_load(struct task_struct *p) { return 0; }
|
||||
|
||||
static inline int update_preferred_cluster(struct related_thread_group *grp,
|
||||
struct task_struct *p, u32 old_load)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void add_new_task_to_grp(struct task_struct *new) {}
|
||||
|
||||
#define sched_enable_hmp 0
|
||||
#define sched_freq_legacy_mode 1
|
||||
#define sched_migration_fixup 0
|
||||
#define PRED_DEMAND_DELTA (0)
|
||||
|
||||
static inline void
|
||||
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
|
||||
|
||||
static inline void notify_migration(int src_cpu, int dest_cpu,
|
||||
bool src_cpu_dead, struct task_struct *p) { }
|
||||
|
||||
static inline int same_freq_domain(int src_cpu, int dst_cpu)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
|
||||
static inline void pre_big_task_count_change(void) { }
|
||||
|
@ -1474,7 +1606,13 @@ static inline void clear_reserved(int cpu) { }
|
|||
#define trace_sched_cpu_load_cgroup(...)
|
||||
#define trace_sched_cpu_load_wakeup(...)
|
||||
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
/*
|
||||
* Returns the rq capacity of any rq in a group. This does not play
|
||||
* well with groups where rq capacity can change independently.
|
||||
*/
|
||||
#define group_rq_capacity(group) cpu_capacity(group_first_cpu(group))
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
|
||||
|
@ -1496,11 +1634,6 @@ static inline struct task_group *task_group(struct task_struct *p)
|
|||
return p->sched_task_group;
|
||||
}
|
||||
|
||||
static inline bool task_notify_on_migrate(struct task_struct *p)
|
||||
{
|
||||
return task_group(p)->notify_on_migrate;
|
||||
}
|
||||
|
||||
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
|
||||
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
||||
{
|
||||
|
@ -1526,10 +1659,6 @@ static inline struct task_group *task_group(struct task_struct *p)
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline bool task_notify_on_migrate(struct task_struct *p)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_CGROUP_SCHED */
|
||||
|
||||
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
||||
|
|
|
@ -292,14 +292,7 @@ static struct ctl_table kern_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "sched_wakeup_load_threshold",
|
||||
.data = &sysctl_sched_wakeup_load_threshold,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
{
|
||||
.procname = "sched_freq_inc_notify",
|
||||
.data = &sysctl_sched_freq_inc_notify,
|
||||
|
@ -316,8 +309,6 @@ static struct ctl_table kern_table[] = {
|
|||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &zero,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
{
|
||||
.procname = "sched_cpu_high_irqload",
|
||||
.data = &sysctl_sched_cpu_high_irqload,
|
||||
|
@ -414,7 +405,13 @@ static struct ctl_table kern_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = sched_hmp_proc_update_handler,
|
||||
},
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
{
|
||||
.procname = "sched_enable_thread_grouping",
|
||||
.data = &sysctl_sched_enable_thread_grouping,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "sched_new_task_windows",
|
||||
.data = &sysctl_sched_new_task_windows,
|
||||
|
@ -437,7 +434,6 @@ static struct ctl_table kern_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = sched_window_update_handler,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.procname = "sched_boost",
|
||||
.data = &sysctl_sched_boost,
|
||||
|
|
Loading…
Add table
Reference in a new issue