PM / devfreq: cache_hwmon: Move IRQ handling to device drivers
The cache monitoring devices might have more than one IRQ to handle or might have notifications from other drivers instead of using actual IRQs. So, refactor the governor to move the IRQ handling to the cache monitoring device specific drivers and just provide an API that can be used to request a re-evaluation. The device specific driver can call this API to request an immediate re-evaluation whenever the cache request has exceeded the previously set limit instead of waiting for the periodic update. Change-Id: Ib2e9f53f95749d659f440739a1b074b5a0d94fd8 Signed-off-by: Junjie Wu <junjiew@codeaurora.org>
This commit is contained in:
parent
822ccc45e4
commit
95f77a9ab3
3 changed files with 44 additions and 35 deletions
|
@ -42,6 +42,7 @@ struct cache_hwmon_node {
|
|||
unsigned int decay_rate;
|
||||
unsigned long prev_mhz;
|
||||
ktime_t prev_ts;
|
||||
bool mon_started;
|
||||
struct list_head list;
|
||||
void *orig_data;
|
||||
struct cache_hwmon *hw;
|
||||
|
@ -167,18 +168,26 @@ static void compute_cache_freq(struct cache_hwmon_node *node,
|
|||
}
|
||||
|
||||
#define TOO_SOON_US (1 * USEC_PER_MSEC)
|
||||
static irqreturn_t mon_intr_handler(int irq, void *dev)
|
||||
int update_cache_hwmon(struct cache_hwmon *hwmon)
|
||||
{
|
||||
struct cache_hwmon_node *node = dev;
|
||||
struct devfreq *df = node->hw->df;
|
||||
struct cache_hwmon_node *node;
|
||||
struct devfreq *df;
|
||||
ktime_t ts;
|
||||
unsigned int us;
|
||||
int ret;
|
||||
|
||||
if (!node->hw->is_valid_irq(node->hw))
|
||||
return IRQ_NONE;
|
||||
if (!hwmon)
|
||||
return -EINVAL;
|
||||
df = hwmon->df;
|
||||
if (!df)
|
||||
return -ENODEV;
|
||||
node = df->data;
|
||||
if (!node)
|
||||
return -ENODEV;
|
||||
if (!node->mon_started)
|
||||
return -EBUSY;
|
||||
|
||||
dev_dbg(df->dev.parent, "Got interrupt\n");
|
||||
dev_dbg(df->dev.parent, "Got update request\n");
|
||||
devfreq_monitor_stop(df);
|
||||
|
||||
/*
|
||||
|
@ -200,13 +209,13 @@ static irqreturn_t mon_intr_handler(int irq, void *dev)
|
|||
ret = update_devfreq(df);
|
||||
if (ret)
|
||||
dev_err(df->dev.parent,
|
||||
"Unable to update freq on IRQ!\n");
|
||||
"Unable to update freq on request!\n");
|
||||
mutex_unlock(&df->lock);
|
||||
}
|
||||
|
||||
devfreq_monitor_start(df);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int devfreq_cache_hwmon_get_freq(struct devfreq *df,
|
||||
|
@ -280,15 +289,7 @@ static int start_monitoring(struct devfreq *df)
|
|||
}
|
||||
|
||||
devfreq_monitor_start(df);
|
||||
|
||||
if (hw->irq)
|
||||
ret = request_threaded_irq(hw->irq, NULL, mon_intr_handler,
|
||||
IRQF_ONESHOT | IRQF_SHARED,
|
||||
"cache_hwmon", node);
|
||||
if (ret) {
|
||||
dev_err(dev, "Unable to register interrupt handler!\n");
|
||||
goto req_irq_fail;
|
||||
}
|
||||
node->mon_started = true;
|
||||
|
||||
ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group);
|
||||
if (ret) {
|
||||
|
@ -299,11 +300,7 @@ static int start_monitoring(struct devfreq *df)
|
|||
return 0;
|
||||
|
||||
sysfs_fail:
|
||||
if (hw->irq) {
|
||||
disable_irq(hw->irq);
|
||||
free_irq(hw->irq, node);
|
||||
}
|
||||
req_irq_fail:
|
||||
node->mon_started = false;
|
||||
devfreq_monitor_stop(df);
|
||||
hw->stop_hwmon(hw);
|
||||
err_start:
|
||||
|
@ -319,10 +316,7 @@ static void stop_monitoring(struct devfreq *df)
|
|||
struct cache_hwmon *hw = node->hw;
|
||||
|
||||
sysfs_remove_group(&df->dev.kobj, &dev_attr_group);
|
||||
if (hw->irq) {
|
||||
disable_irq(hw->irq);
|
||||
free_irq(hw->irq, node);
|
||||
}
|
||||
node->mon_started = false;
|
||||
devfreq_monitor_stop(df);
|
||||
hw->stop_hwmon(hw);
|
||||
df->data = node->orig_data;
|
||||
|
|
|
@ -28,12 +28,9 @@ struct mrps_stats {
|
|||
* struct cache_hwmon - devfreq Cache HW monitor info
|
||||
* @start_hwmon: Start the HW monitoring
|
||||
* @stop_hwmon: Stop the HW monitoring
|
||||
* @is_valid_irq: Check whether the IRQ was triggered by the counter
|
||||
* used to monitor cache activity.
|
||||
* @meas_mrps_and_set_irq: Return the measured count and set up the
|
||||
* IRQ to fire if usage exceeds current
|
||||
* measurement by @tol percent.
|
||||
* @irq: IRQ number that corresponds to this HW monitor.
|
||||
* @dev: device that this HW monitor can monitor.
|
||||
* @of_node: OF node of device that this HW monitor can monitor.
|
||||
* @df: Devfreq node that this HW montior is being used
|
||||
|
@ -43,11 +40,9 @@ struct mrps_stats {
|
|||
struct cache_hwmon {
|
||||
int (*start_hwmon)(struct cache_hwmon *hw, struct mrps_stats *mrps);
|
||||
void (*stop_hwmon)(struct cache_hwmon *hw);
|
||||
bool (*is_valid_irq)(struct cache_hwmon *hw);
|
||||
unsigned long (*meas_mrps_and_set_irq)(struct cache_hwmon *hw,
|
||||
unsigned int tol, unsigned int us,
|
||||
struct mrps_stats *mrps);
|
||||
int irq;
|
||||
struct device *dev;
|
||||
struct device_node *of_node;
|
||||
struct devfreq *df;
|
||||
|
@ -55,12 +50,17 @@ struct cache_hwmon {
|
|||
|
||||
#ifdef CONFIG_DEVFREQ_GOV_MSM_CACHE_HWMON
|
||||
int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon);
|
||||
int update_cache_hwmon(struct cache_hwmon *hwmon);
|
||||
#else
|
||||
static inline int register_cache_hwmon(struct device *dev,
|
||||
struct cache_hwmon *hwmon)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int update_cache_hwmon(struct cache_hwmon *hwmon)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _GOVERNOR_CACHE_HWMON_H */
|
||||
|
|
|
@ -267,6 +267,7 @@ static struct bw_hwmon cpubw_hwmon = {
|
|||
/* ********** Cache reqs specific code ********** */
|
||||
|
||||
static u32 prev_req_start_val;
|
||||
static int cache_irq;
|
||||
|
||||
static void mon_mrps_init(void)
|
||||
{
|
||||
|
@ -335,14 +336,27 @@ static unsigned long meas_mrps_and_set_irq(struct cache_hwmon *hw,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool is_valid_mrps_irq(struct cache_hwmon *hw)
|
||||
static irqreturn_t mon_intr_handler(int irq, void *dev)
|
||||
{
|
||||
return mon_overflow(L2_H_REQ_MON) || mon_overflow(L2_M_REQ_MON);
|
||||
if (mon_overflow(L2_H_REQ_MON) || mon_overflow(L2_M_REQ_MON)) {
|
||||
update_cache_hwmon(dev);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static int start_mrps_hwmon(struct cache_hwmon *hw, struct mrps_stats *mrps)
|
||||
{
|
||||
u32 limit;
|
||||
int ret;
|
||||
|
||||
ret = request_threaded_irq(cache_irq, NULL, mon_intr_handler,
|
||||
IRQF_ONESHOT | IRQF_SHARED,
|
||||
"cache_hwmon", hw);
|
||||
if (ret) {
|
||||
pr_err("Unable to register interrupt handler!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
mon_mrps_init();
|
||||
mon_disable(L2_H_REQ_MON);
|
||||
|
@ -366,6 +380,8 @@ static int start_mrps_hwmon(struct cache_hwmon *hw, struct mrps_stats *mrps)
|
|||
|
||||
static void stop_mrps_hwmon(struct cache_hwmon *hw)
|
||||
{
|
||||
disable_irq(cache_irq);
|
||||
free_irq(cache_irq, hw);
|
||||
global_mon_enable(false);
|
||||
mon_disable(L2_H_REQ_MON);
|
||||
mon_disable(L2_M_REQ_MON);
|
||||
|
@ -377,7 +393,6 @@ static void stop_mrps_hwmon(struct cache_hwmon *hw)
|
|||
static struct cache_hwmon mrps_hwmon = {
|
||||
.start_hwmon = &start_mrps_hwmon,
|
||||
.stop_hwmon = &stop_mrps_hwmon,
|
||||
.is_valid_irq = &is_valid_mrps_irq,
|
||||
.meas_mrps_and_set_irq = &meas_mrps_and_set_irq,
|
||||
};
|
||||
|
||||
|
@ -405,7 +420,7 @@ static int krait_l2pm_driver_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
pr_err("CPUBW hwmon registration failed\n");
|
||||
|
||||
mrps_hwmon.irq = bw_irq;
|
||||
cache_irq = bw_irq;
|
||||
mrps_hwmon.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev",
|
||||
0);
|
||||
if (!mrps_hwmon.of_node)
|
||||
|
|
Loading…
Add table
Reference in a new issue