diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 9fe71c774543..6848454c5447 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -44,6 +44,7 @@ extern unsigned int sysctl_sched_wake_to_idle;
 #ifdef CONFIG_SCHED_HMP
 extern int sysctl_sched_freq_inc_notify;
 extern int sysctl_sched_freq_dec_notify;
+extern unsigned int sysctl_sched_freq_reporting_policy;
 extern unsigned int sysctl_sched_window_stats_policy;
 extern unsigned int sysctl_sched_ravg_hist_size;
 extern unsigned int sysctl_sched_cpu_high_irqload;
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index dffe18ebab74..b3dad1289ed4 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -789,6 +789,12 @@ __read_mostly unsigned int sysctl_sched_new_task_windows = 5;
 
 #define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
 
+/*
+ * This governs what load needs to be used when reporting CPU busy time
+ * to the cpufreq governor.
+ */
+__read_mostly unsigned int sysctl_sched_freq_reporting_policy;
+
 /*
  * For increase, send notification if
  *      freq_required - cur_freq > sysctl_sched_freq_inc_notify
@@ -2198,7 +2204,7 @@ void clear_top_tasks_bitmap(unsigned long *bitmap)
  * Note that sched_load_granule can change underneath us if we are not
  * holding any runqueue locks while calling the two functions below.
  */
-static u32  __maybe_unused top_task_load(struct rq *rq)
+static u32  top_task_load(struct rq *rq)
 {
 	int index = rq->prev_top;
 	u8 prev = 1 - rq->curr_table;
@@ -3268,6 +3274,26 @@ static inline void account_load_subtractions(struct rq *rq)
 	BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
 }
 
+static inline u64 freq_policy_load(struct rq *rq, u64 load)
+{
+	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
+
+	switch (reporting_policy) {
+	case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
+		load = max_t(u64, load, top_task_load(rq));
+		break;
+	case FREQ_REPORT_TOP_TASK:
+		load = top_task_load(rq);
+		break;
+	case FREQ_REPORT_CPU_LOAD:
+		break;
+	default:
+		WARN_ON_ONCE(1);
+	}
+
+	return load;
+}
+
 static inline void
 sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time);
 
@@ -3385,6 +3411,8 @@ void sched_get_cpus_busy(struct sched_load *busy,
 
 		load[i] += group_load[i];
 		nload[i] += ngload[i];
+
+		load[i] = freq_policy_load(rq, load[i]);
 		/*
 		 * Scale load in reference to cluster max_possible_freq.
 		 *
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4fd56b04c336..471dc9faab35 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1034,6 +1034,10 @@ static inline void sched_ttwu_pending(void) { }
 #define WINDOW_STATS_AVG		3
 #define WINDOW_STATS_INVALID_POLICY	4
 
+#define FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK	0
+#define FREQ_REPORT_CPU_LOAD			1
+#define FREQ_REPORT_TOP_TASK			2
+
 #define MAJOR_TASK_PCT 85
 #define SCHED_UPMIGRATE_MIN_NICE 15
 #define EXITING_TASK_MARKER	0xdeaddead
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 587dbe09c47d..c72cb2053da7 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -296,6 +296,14 @@ static struct ctl_table kern_table[] = {
 		.proc_handler	= proc_dointvec,
 	},
 #ifdef CONFIG_SCHED_HMP
+	{
+		.procname	= "sched_freq_reporting_policy",
+		.data		= &sysctl_sched_freq_reporting_policy,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+	},
 	{
 		.procname	= "sched_freq_inc_notify",
 		.data		= &sysctl_sched_freq_inc_notify,