Merge "msm: kgsl: remove un-used GPU power states"

This commit is contained in:
Linux Build Service Account 2016-08-26 14:48:31 -07:00 committed by Gerrit - the friendly Code Review server
commit cd7ec1bbe6
7 changed files with 8 additions and 273 deletions

View file

@ -89,14 +89,10 @@ Optional Properties:
- qcom,gpubw-dev: a phandle to a device representing bus bandwidth requirements - qcom,gpubw-dev: a phandle to a device representing bus bandwidth requirements
(see devdw.txt) (see devdw.txt)
- qcom,idle-timeout: This property represents the time in milliseconds for idle timeout. - qcom,idle-timeout: This property represents the time in milliseconds for idle timeout.
- qcom,deep-nap-timeout: This property represents the time in milliseconds for entering deeper
power state.
- qcom,no-nap: If it exists software clockgating will be disabled at boot time. - qcom,no-nap: If it exists software clockgating will be disabled at boot time.
- qcom,chipid: If it exists this property is used to replace - qcom,chipid: If it exists this property is used to replace
the chip identification read from the GPU hardware. the chip identification read from the GPU hardware.
This is used to override faulty hardware readings. This is used to override faulty hardware readings.
- qcom,strtstp-sleepwake: Boolean. Enables use of GPU SLUMBER instead of SLEEP for power savings
- qcom,gx-retention: Boolean. Enables use of GX rail RETENTION voltage
- qcom,disable-busy-time-burst: - qcom,disable-busy-time-burst:
Boolean. Disables the busy time burst to avoid switching Boolean. Disables the busy time burst to avoid switching
of power level for large frames based on the busy time limit. of power level for large frames based on the busy time limit.

View file

@ -68,12 +68,6 @@
qcom,idle-timeout = <80>; //<HZ/12> qcom,idle-timeout = <80>; //<HZ/12>
qcom,no-nap; qcom,no-nap;
/*
* Timeout to enter deeper power saving state
* from NAP.
*/
qcom,deep-nap-timeout = <200>;
qcom,strtstp-sleepwake;
qcom,highest-bank-bit = <15>; qcom,highest-bank-bit = <15>;

View file

@ -874,9 +874,6 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev,
device->pwrctrl.interval_timeout = msecs_to_jiffies(timeout); device->pwrctrl.interval_timeout = msecs_to_jiffies(timeout);
device->pwrctrl.strtstp_sleepwake =
of_property_read_bool(node, "qcom,strtstp-sleepwake");
device->pwrctrl.bus_control = of_property_read_bool(node, device->pwrctrl.bus_control = of_property_read_bool(node,
"qcom,bus-control"); "qcom,bus-control");

View file

@ -43,11 +43,9 @@
#define KGSL_STATE_INIT 0x00000001 #define KGSL_STATE_INIT 0x00000001
#define KGSL_STATE_ACTIVE 0x00000002 #define KGSL_STATE_ACTIVE 0x00000002
#define KGSL_STATE_NAP 0x00000004 #define KGSL_STATE_NAP 0x00000004
#define KGSL_STATE_SLEEP 0x00000008
#define KGSL_STATE_SUSPEND 0x00000010 #define KGSL_STATE_SUSPEND 0x00000010
#define KGSL_STATE_AWARE 0x00000020 #define KGSL_STATE_AWARE 0x00000020
#define KGSL_STATE_SLUMBER 0x00000080 #define KGSL_STATE_SLUMBER 0x00000080
#define KGSL_STATE_DEEP_NAP 0x00000100
/** /**
* enum kgsl_event_results - result codes passed to an event callback when the * enum kgsl_event_results - result codes passed to an event callback when the

View file

@ -31,7 +31,6 @@
#define KGSL_PWRFLAGS_CLK_ON 1 #define KGSL_PWRFLAGS_CLK_ON 1
#define KGSL_PWRFLAGS_AXI_ON 2 #define KGSL_PWRFLAGS_AXI_ON 2
#define KGSL_PWRFLAGS_IRQ_ON 3 #define KGSL_PWRFLAGS_IRQ_ON 3
#define KGSL_PWRFLAGS_RETENTION_ON 4
#define KGSL_PWRFLAGS_NAP_OFF 5 #define KGSL_PWRFLAGS_NAP_OFF 5
#define UPDATE_BUSY_VAL 1000000 #define UPDATE_BUSY_VAL 1000000
@ -80,10 +79,8 @@ static void kgsl_pwrctrl_set_state(struct kgsl_device *device,
unsigned int state); unsigned int state);
static void kgsl_pwrctrl_request_state(struct kgsl_device *device, static void kgsl_pwrctrl_request_state(struct kgsl_device *device,
unsigned int state); unsigned int state);
static void kgsl_pwrctrl_retention_clk(struct kgsl_device *device, int state);
static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level); static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level);
/** /**
* _record_pwrevent() - Record the history of the new event * _record_pwrevent() - Record the history of the new event
* @device: Pointer to the kgsl_device struct * @device: Pointer to the kgsl_device struct
@ -816,8 +813,6 @@ static ssize_t __timer_store(struct device *dev, struct device_attribute *attr,
/* Let the timeout be requested in ms, but convert to jiffies. */ /* Let the timeout be requested in ms, but convert to jiffies. */
if (timer == KGSL_PWR_IDLE_TIMER) if (timer == KGSL_PWR_IDLE_TIMER)
device->pwrctrl.interval_timeout = msecs_to_jiffies(val); device->pwrctrl.interval_timeout = msecs_to_jiffies(val);
else if (timer == KGSL_PWR_DEEP_NAP_TIMER)
device->pwrctrl.deep_nap_timeout = msecs_to_jiffies(val);
mutex_unlock(&device->mutex); mutex_unlock(&device->mutex);
@ -843,27 +838,6 @@ static ssize_t kgsl_pwrctrl_idle_timer_show(struct device *dev,
jiffies_to_msecs(device->pwrctrl.interval_timeout)); jiffies_to_msecs(device->pwrctrl.interval_timeout));
} }
static ssize_t kgsl_pwrctrl_deep_nap_timer_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return __timer_store(dev, attr, buf, count, KGSL_PWR_DEEP_NAP_TIMER);
}
static ssize_t kgsl_pwrctrl_deep_nap_timer_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
if (device == NULL)
return 0;
/* Show the idle_timeout converted to msec */
return snprintf(buf, PAGE_SIZE, "%u\n",
jiffies_to_msecs(device->pwrctrl.deep_nap_timeout));
}
static ssize_t kgsl_pwrctrl_pmqos_active_latency_store(struct device *dev, static ssize_t kgsl_pwrctrl_pmqos_active_latency_store(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
@ -985,9 +959,6 @@ static void __force_on(struct kgsl_device *device, int flag, int on)
case KGSL_PWRFLAGS_POWER_ON: case KGSL_PWRFLAGS_POWER_ON:
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON); kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
break; break;
case KGSL_PWRFLAGS_RETENTION_ON:
kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_ON);
break;
} }
set_bit(flag, &device->pwrctrl.ctrl_flags); set_bit(flag, &device->pwrctrl.ctrl_flags);
} else { } else {
@ -1071,21 +1042,6 @@ static ssize_t kgsl_pwrctrl_force_rail_on_store(struct device *dev,
return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON); return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON);
} }
static ssize_t kgsl_pwrctrl_force_non_retention_on_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_RETENTION_ON);
}
static ssize_t kgsl_pwrctrl_force_non_retention_on_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return __force_on_store(dev, attr, buf, count,
KGSL_PWRFLAGS_RETENTION_ON);
}
static ssize_t kgsl_pwrctrl_force_no_nap_show(struct device *dev, static ssize_t kgsl_pwrctrl_force_no_nap_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
@ -1221,8 +1177,6 @@ static DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
kgsl_pwrctrl_max_gpuclk_store); kgsl_pwrctrl_max_gpuclk_store);
static DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show, static DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
kgsl_pwrctrl_idle_timer_store); kgsl_pwrctrl_idle_timer_store);
static DEVICE_ATTR(deep_nap_timer, 0644, kgsl_pwrctrl_deep_nap_timer_show,
kgsl_pwrctrl_deep_nap_timer_store);
static DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show, static DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
NULL); NULL);
static DEVICE_ATTR(gpu_available_frequencies, 0444, static DEVICE_ATTR(gpu_available_frequencies, 0444,
@ -1265,9 +1219,6 @@ static DEVICE_ATTR(default_pwrlevel, 0644,
kgsl_pwrctrl_default_pwrlevel_show, kgsl_pwrctrl_default_pwrlevel_show,
kgsl_pwrctrl_default_pwrlevel_store); kgsl_pwrctrl_default_pwrlevel_store);
static DEVICE_ATTR(popp, 0644, kgsl_popp_show, kgsl_popp_store); static DEVICE_ATTR(popp, 0644, kgsl_popp_show, kgsl_popp_store);
static DEVICE_ATTR(force_non_retention_on, 0644,
kgsl_pwrctrl_force_non_retention_on_show,
kgsl_pwrctrl_force_non_retention_on_store);
static DEVICE_ATTR(force_no_nap, 0644, static DEVICE_ATTR(force_no_nap, 0644,
kgsl_pwrctrl_force_no_nap_show, kgsl_pwrctrl_force_no_nap_show,
kgsl_pwrctrl_force_no_nap_store); kgsl_pwrctrl_force_no_nap_store);
@ -1276,7 +1227,6 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk, &dev_attr_gpuclk,
&dev_attr_max_gpuclk, &dev_attr_max_gpuclk,
&dev_attr_idle_timer, &dev_attr_idle_timer,
&dev_attr_deep_nap_timer,
&dev_attr_gpubusy, &dev_attr_gpubusy,
&dev_attr_gpu_available_frequencies, &dev_attr_gpu_available_frequencies,
&dev_attr_gpu_clock_stats, &dev_attr_gpu_clock_stats,
@ -1289,7 +1239,6 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_force_clk_on, &dev_attr_force_clk_on,
&dev_attr_force_bus_on, &dev_attr_force_bus_on,
&dev_attr_force_rail_on, &dev_attr_force_rail_on,
&dev_attr_force_non_retention_on,
&dev_attr_force_no_nap, &dev_attr_force_no_nap,
&dev_attr_bus_split, &dev_attr_bus_split,
&dev_attr_default_pwrlevel, &dev_attr_default_pwrlevel,
@ -1328,54 +1277,6 @@ void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy)
} }
EXPORT_SYMBOL(kgsl_pwrctrl_busy_time); EXPORT_SYMBOL(kgsl_pwrctrl_busy_time);
static void kgsl_pwrctrl_retention_clk(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int i = 0;
if (!(pwr->gx_retention) || test_bit(KGSL_PWRFLAGS_RETENTION_ON,
&device->pwrctrl.ctrl_flags))
return;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_RETENTION_ON,
&pwr->power_flags)) {
trace_kgsl_retention_clk(device, state);
/* prepare the mx clk to avoid RPM transactions*/
clk_set_rate(pwr->dummy_mx_clk,
pwr->pwrlevels
[pwr->active_pwrlevel].
gpu_freq);
clk_prepare(pwr->dummy_mx_clk);
/*
* Unprepare Gfx clocks to put Gfx rail to
* retention voltage.
*/
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
if (pwr->grp_clks[i])
clk_unprepare(pwr->grp_clks[i]);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_RETENTION_ON,
&pwr->power_flags)) {
trace_kgsl_retention_clk(device, state);
/*
* Prepare Gfx clocks to put Gfx rail out
* of rentention
*/
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
if (pwr->grp_clks[i])
clk_prepare(pwr->grp_clks[i]);
/* unprepare the dummy mx clk*/
clk_unprepare(pwr->dummy_mx_clk);
clk_set_rate(pwr->dummy_mx_clk,
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
}
}
}
static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
int requested_state) int requested_state)
{ {
@ -1401,9 +1302,7 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
clk_disable(pwr->grp_clks[i]); clk_disable(pwr->grp_clks[i]);
/* High latency clock maintenance. */ /* High latency clock maintenance. */
if ((pwr->pwrlevels[0].gpu_freq > 0) && if ((pwr->pwrlevels[0].gpu_freq > 0) &&
(requested_state != KGSL_STATE_NAP) && (requested_state != KGSL_STATE_NAP)) {
(requested_state !=
KGSL_STATE_DEEP_NAP)) {
for (i = KGSL_MAX_CLKS - 1; i > 0; i--) for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
clk_unprepare(pwr->grp_clks[i]); clk_unprepare(pwr->grp_clks[i]);
clk_set_rate(pwr->grp_clks[0], clk_set_rate(pwr->grp_clks[0],
@ -1415,7 +1314,7 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
/* Turn off the IOMMU clocks */ /* Turn off the IOMMU clocks */
kgsl_mmu_disable_clk(&device->mmu); kgsl_mmu_disable_clk(&device->mmu);
} else if (requested_state == KGSL_STATE_SLEEP) { } else if (requested_state == KGSL_STATE_SLUMBER) {
/* High latency clock maintenance. */ /* High latency clock maintenance. */
for (i = KGSL_MAX_CLKS - 1; i > 0; i--) for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
clk_unprepare(pwr->grp_clks[i]); clk_unprepare(pwr->grp_clks[i]);
@ -1433,8 +1332,7 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
trace_kgsl_clk(device, state, trace_kgsl_clk(device, state,
kgsl_pwrctrl_active_freq(pwr)); kgsl_pwrctrl_active_freq(pwr));
/* High latency clock maintenance. */ /* High latency clock maintenance. */
if ((device->state != KGSL_STATE_NAP) && if (device->state != KGSL_STATE_NAP) {
(device->state != KGSL_STATE_DEEP_NAP)) {
if (pwr->pwrlevels[0].gpu_freq > 0) { if (pwr->pwrlevels[0].gpu_freq > 0) {
clk_set_rate(pwr->grp_clks[0], clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels pwr->pwrlevels
@ -1658,16 +1556,6 @@ static void kgsl_thermal_timer(unsigned long data)
kgsl_schedule_work(&device->pwrctrl.thermal_cycle_ws); kgsl_schedule_work(&device->pwrctrl.thermal_cycle_ws);
} }
void kgsl_deep_nap_timer(unsigned long data)
{
struct kgsl_device *device = (struct kgsl_device *) data;
if (device->state == KGSL_STATE_NAP) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_DEEP_NAP);
kgsl_schedule_work(&device->idle_check_ws);
}
}
#ifdef CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON #ifdef CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON
static int kgsl_pwrctrl_vbif_init(void) static int kgsl_pwrctrl_vbif_init(void)
{ {
@ -1815,22 +1703,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
if (pwr->grp_clks[0] == NULL) if (pwr->grp_clks[0] == NULL)
pwr->grp_clks[0] = pwr->grp_clks[1]; pwr->grp_clks[0] = pwr->grp_clks[1];
if (of_property_read_u32(pdev->dev.of_node, "qcom,deep-nap-timeout",
&result))
result = 20;
pwr->deep_nap_timeout = msecs_to_jiffies(result);
pwr->gx_retention = of_property_read_bool(pdev->dev.of_node,
"qcom,gx-retention");
if (pwr->gx_retention) {
pwr->dummy_mx_clk = clk_get(&pdev->dev, "mx_clk");
if (IS_ERR(pwr->dummy_mx_clk)) {
pwr->gx_retention = 0;
pwr->dummy_mx_clk = NULL;
KGSL_CORE_ERR("Couldn't get clock: mx_clk\n");
}
}
/* Getting gfx-bimc-interface-clk frequency */ /* Getting gfx-bimc-interface-clk frequency */
if (!of_property_read_u32(pdev->dev.of_node, if (!of_property_read_u32(pdev->dev.of_node,
"qcom,gpu-bimc-interface-clk-freq", "qcom,gpu-bimc-interface-clk-freq",
@ -1838,8 +1710,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
pwr->gpu_bimc_int_clk = devm_clk_get(&pdev->dev, pwr->gpu_bimc_int_clk = devm_clk_get(&pdev->dev,
"bimc_gpu_clk"); "bimc_gpu_clk");
pwr->power_flags = BIT(KGSL_PWRFLAGS_RETENTION_ON);
if (of_property_read_bool(pdev->dev.of_node, "qcom,no-nap")) if (of_property_read_bool(pdev->dev.of_node, "qcom,no-nap"))
device->pwrctrl.ctrl_flags |= BIT(KGSL_PWRFLAGS_NAP_OFF); device->pwrctrl.ctrl_flags |= BIT(KGSL_PWRFLAGS_NAP_OFF);
@ -1986,9 +1856,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
kgsl_pwrctrl_vbif_init(); kgsl_pwrctrl_vbif_init();
setup_timer(&pwr->deep_nap_timer, kgsl_deep_nap_timer,
(unsigned long) device);
return result; return result;
} }
@ -2049,8 +1916,7 @@ void kgsl_idle_check(struct work_struct *work)
mutex_lock(&device->mutex); mutex_lock(&device->mutex);
if (device->state == KGSL_STATE_ACTIVE if (device->state == KGSL_STATE_ACTIVE
|| device->state == KGSL_STATE_NAP || device->state == KGSL_STATE_NAP) {
|| device->state == KGSL_STATE_DEEP_NAP) {
if (!atomic_read(&device->active_cnt)) if (!atomic_read(&device->active_cnt))
kgsl_pwrctrl_change_state(device, kgsl_pwrctrl_change_state(device,
@ -2062,8 +1928,7 @@ void kgsl_idle_check(struct work_struct *work)
jiffies + jiffies +
device->pwrctrl.interval_timeout); device->pwrctrl.interval_timeout);
} }
if (device->state != KGSL_STATE_DEEP_NAP) kgsl_pwrscale_update(device);
kgsl_pwrscale_update(device);
mutex_unlock(&device->mutex); mutex_unlock(&device->mutex);
} }
EXPORT_SYMBOL(kgsl_idle_check); EXPORT_SYMBOL(kgsl_idle_check);
@ -2074,10 +1939,7 @@ void kgsl_timer(unsigned long data)
KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id); KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
if (device->requested_state != KGSL_STATE_SUSPEND) { if (device->requested_state != KGSL_STATE_SUSPEND) {
if (device->pwrctrl.strtstp_sleepwake) kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
else
kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
/* Have work run in a non-interrupt context. */ /* Have work run in a non-interrupt context. */
kgsl_schedule_work(&device->idle_check_ws); kgsl_schedule_work(&device->idle_check_ws);
} }
@ -2139,7 +2001,7 @@ static void kgsl_pwrctrl_disable(struct kgsl_device *device)
/* Order pwrrail/clk sequence based upon platform */ /* Order pwrrail/clk sequence based upon platform */
device->ftbl->regulator_disable(device); device->ftbl->regulator_disable(device);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF); kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP); kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLUMBER);
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF); kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
} }
@ -2151,14 +2013,7 @@ static int _init(struct kgsl_device *device)
{ {
int status = 0; int status = 0;
switch (device->state) { switch (device->state) {
case KGSL_STATE_DEEP_NAP:
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
device->pwrctrl.pm_qos_active_latency);
/* Get the device out of retention */
kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_ON);
/* fall through */
case KGSL_STATE_NAP: case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
/* Force power on to do the stop */ /* Force power on to do the stop */
status = kgsl_pwrctrl_enable(device); status = kgsl_pwrctrl_enable(device);
case KGSL_STATE_ACTIVE: case KGSL_STATE_ACTIVE:
@ -2178,7 +2033,7 @@ static int _init(struct kgsl_device *device)
} }
/** /**
* _wake() - Power up the GPU from a slumber/sleep state * _wake() - Power up the GPU from a slumber state
* @device - Pointer to the kgsl_device struct * @device - Pointer to the kgsl_device struct
* *
* Resume the GPU from a lower power state to ACTIVE. * Resume the GPU from a lower power state to ACTIVE.
@ -2204,18 +2059,10 @@ static int _wake(struct kgsl_device *device)
KGSL_DRV_ERR(device, "start failed %d\n", status); KGSL_DRV_ERR(device, "start failed %d\n", status);
break; break;
} }
/* fall through */
case KGSL_STATE_SLEEP:
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON); kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
kgsl_pwrscale_wake(device); kgsl_pwrscale_wake(device);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON); kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
/* fall through */ /* fall through */
case KGSL_STATE_DEEP_NAP:
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
device->pwrctrl.pm_qos_active_latency);
/* Get the device out of retention */
kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_ON);
/* fall through */
case KGSL_STATE_NAP: case KGSL_STATE_NAP:
/* Turn on the core clocks */ /* Turn on the core clocks */
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE); kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
@ -2237,8 +2084,6 @@ static int _wake(struct kgsl_device *device)
pwr->previous_pwrlevel = pwr->active_pwrlevel; pwr->previous_pwrlevel = pwr->active_pwrlevel;
mod_timer(&device->idle_timer, jiffies + mod_timer(&device->idle_timer, jiffies +
device->pwrctrl.interval_timeout); device->pwrctrl.interval_timeout);
del_timer_sync(&device->pwrctrl.deep_nap_timer);
break; break;
case KGSL_STATE_AWARE: case KGSL_STATE_AWARE:
/* Enable state before turning on irq */ /* Enable state before turning on irq */
@ -2246,7 +2091,6 @@ static int _wake(struct kgsl_device *device)
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON); kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
mod_timer(&device->idle_timer, jiffies + mod_timer(&device->idle_timer, jiffies +
device->pwrctrl.interval_timeout); device->pwrctrl.interval_timeout);
del_timer_sync(&device->pwrctrl.deep_nap_timer);
break; break;
default: default:
KGSL_PWR_WARN(device, "unhandled state %s\n", KGSL_PWR_WARN(device, "unhandled state %s\n",
@ -2277,9 +2121,7 @@ _aware(struct kgsl_device *device)
status = kgsl_pwrctrl_enable(device); status = kgsl_pwrctrl_enable(device);
break; break;
/* The following 3 cases shouldn't occur, but don't panic. */ /* The following 3 cases shouldn't occur, but don't panic. */
case KGSL_STATE_DEEP_NAP:
case KGSL_STATE_NAP: case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
status = _wake(device); status = _wake(device);
case KGSL_STATE_ACTIVE: case KGSL_STATE_ACTIVE:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
@ -2317,12 +2159,8 @@ _nap(struct kgsl_device *device)
*/ */
kgsl_pwrscale_update_stats(device); kgsl_pwrscale_update_stats(device);
mod_timer(&device->pwrctrl.deep_nap_timer, jiffies +
device->pwrctrl.deep_nap_timeout);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP); kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP); kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
case KGSL_STATE_SLEEP:
case KGSL_STATE_SLUMBER: case KGSL_STATE_SLUMBER:
break; break;
case KGSL_STATE_AWARE: case KGSL_STATE_AWARE:
@ -2335,63 +2173,6 @@ _nap(struct kgsl_device *device)
return 0; return 0;
} }
static int
_deep_nap(struct kgsl_device *device)
{
switch (device->state) {
/*
* Device is expected to be clock gated to move to
* a deeper low power state. No other transition is permitted
*/
case KGSL_STATE_NAP:
kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_OFF);
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE);
kgsl_pwrctrl_set_state(device, KGSL_STATE_DEEP_NAP);
break;
default:
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
break;
}
return 0;
}
static int
_sleep(struct kgsl_device *device)
{
switch (device->state) {
case KGSL_STATE_ACTIVE:
if (!device->ftbl->is_hw_collapsible(device)) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
return -EBUSY;
}
/* fall through */
case KGSL_STATE_NAP:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrscale_sleep(device);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE);
if (device->pwrctrl.l2pc_cpus_mask)
pm_qos_update_request(
&device->pwrctrl.l2pc_cpus_qos,
PM_QOS_DEFAULT_VALUE);
break;
case KGSL_STATE_SLUMBER:
break;
case KGSL_STATE_AWARE:
KGSL_PWR_WARN(device,
"transition AWARE -> SLEEP is not permitted\n");
default:
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
break;
}
return 0;
}
static int static int
_slumber(struct kgsl_device *device) _slumber(struct kgsl_device *device)
{ {
@ -2404,17 +2185,12 @@ _slumber(struct kgsl_device *device)
} }
/* fall through */ /* fall through */
case KGSL_STATE_NAP: case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
case KGSL_STATE_DEEP_NAP:
del_timer_sync(&device->idle_timer); del_timer_sync(&device->idle_timer);
if (device->pwrctrl.thermal_cycle == CYCLE_ACTIVE) { if (device->pwrctrl.thermal_cycle == CYCLE_ACTIVE) {
device->pwrctrl.thermal_cycle = CYCLE_ENABLE; device->pwrctrl.thermal_cycle = CYCLE_ENABLE;
del_timer_sync(&device->pwrctrl.thermal_timer); del_timer_sync(&device->pwrctrl.thermal_timer);
} }
del_timer_sync(&device->pwrctrl.deep_nap_timer);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
/* Get the device out of retention */
kgsl_pwrctrl_retention_clk(device, KGSL_PWRFLAGS_ON);
/* make sure power is on to stop the device*/ /* make sure power is on to stop the device*/
status = kgsl_pwrctrl_enable(device); status = kgsl_pwrctrl_enable(device);
device->ftbl->suspend_context(device); device->ftbl->suspend_context(device);
@ -2519,18 +2295,12 @@ int kgsl_pwrctrl_change_state(struct kgsl_device *device, int state)
case KGSL_STATE_NAP: case KGSL_STATE_NAP:
status = _nap(device); status = _nap(device);
break; break;
case KGSL_STATE_SLEEP:
status = _sleep(device);
break;
case KGSL_STATE_SLUMBER: case KGSL_STATE_SLUMBER:
status = _slumber(device); status = _slumber(device);
break; break;
case KGSL_STATE_SUSPEND: case KGSL_STATE_SUSPEND:
status = _suspend(device); status = _suspend(device);
break; break;
case KGSL_STATE_DEEP_NAP:
status = _deep_nap(device);
break;
default: default:
KGSL_PWR_INFO(device, "bad state request 0x%x\n", state); KGSL_PWR_INFO(device, "bad state request 0x%x\n", state);
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
@ -2576,10 +2346,6 @@ const char *kgsl_pwrstate_to_str(unsigned int state)
return "ACTIVE"; return "ACTIVE";
case KGSL_STATE_NAP: case KGSL_STATE_NAP:
return "NAP"; return "NAP";
case KGSL_STATE_DEEP_NAP:
return "DEEP_NAP";
case KGSL_STATE_SLEEP:
return "SLEEP";
case KGSL_STATE_SUSPEND: case KGSL_STATE_SUSPEND:
return "SUSPEND"; return "SUSPEND";
case KGSL_STATE_SLUMBER: case KGSL_STATE_SLUMBER:

View file

@ -52,7 +52,6 @@
enum kgsl_pwrctrl_timer_type { enum kgsl_pwrctrl_timer_type {
KGSL_PWR_IDLE_TIMER, KGSL_PWR_IDLE_TIMER,
KGSL_PWR_DEEP_NAP_TIMER,
}; };
/* /*
@ -111,7 +110,6 @@ struct kgsl_regulator {
* struct kgsl_pwrctrl - Power control settings for a KGSL device * struct kgsl_pwrctrl - Power control settings for a KGSL device
* @interrupt_num - The interrupt number for the device * @interrupt_num - The interrupt number for the device
* @grp_clks - Array of clocks structures that we control * @grp_clks - Array of clocks structures that we control
* @dummy_mx_clk - mx clock that is contolled during retention
* @power_flags - Control flags for power * @power_flags - Control flags for power
* @pwrlevels - List of supported power levels * @pwrlevels - List of supported power levels
* @active_pwrlevel - The currently active power level * @active_pwrlevel - The currently active power level
@ -123,7 +121,6 @@ struct kgsl_regulator {
* @num_pwrlevels - number of available power levels * @num_pwrlevels - number of available power levels
* @interval_timeout - timeout in jiffies to be idle before a power event * @interval_timeout - timeout in jiffies to be idle before a power event
* @clock_times - Each GPU frequency's accumulated active time in us * @clock_times - Each GPU frequency's accumulated active time in us
* @strtstp_sleepwake - true if the device supports low latency GPU start/stop
* @regulators - array of pointers to kgsl_regulator structs * @regulators - array of pointers to kgsl_regulator structs
* @pcl - bus scale identifier * @pcl - bus scale identifier
* @ocmem - ocmem bus scale identifier * @ocmem - ocmem bus scale identifier
@ -153,9 +150,6 @@ struct kgsl_regulator {
* @limits - list head for limits * @limits - list head for limits
* @limits_lock - spin lock to protect limits list * @limits_lock - spin lock to protect limits list
* @sysfs_pwr_limit - pointer to the sysfs limits node * @sysfs_pwr_limit - pointer to the sysfs limits node
* @deep_nap_timer - Timer struct for entering deep nap
* @deep_nap_timeout - Timeout for entering deep nap
* @gx_retention - true if retention voltage is allowed
* isense_clk_indx - index of isense clock, 0 if no isense * isense_clk_indx - index of isense clock, 0 if no isense
* isense_clk_on_level - isense clock rate is XO rate below this level. * isense_clk_on_level - isense clock rate is XO rate below this level.
*/ */
@ -163,7 +157,6 @@ struct kgsl_regulator {
struct kgsl_pwrctrl { struct kgsl_pwrctrl {
int interrupt_num; int interrupt_num;
struct clk *grp_clks[KGSL_MAX_CLKS]; struct clk *grp_clks[KGSL_MAX_CLKS];
struct clk *dummy_mx_clk;
struct clk *gpu_bimc_int_clk; struct clk *gpu_bimc_int_clk;
int isense_clk_indx; int isense_clk_indx;
int isense_clk_on_level; int isense_clk_on_level;
@ -180,7 +173,6 @@ struct kgsl_pwrctrl {
unsigned int num_pwrlevels; unsigned int num_pwrlevels;
unsigned long interval_timeout; unsigned long interval_timeout;
u64 clock_times[KGSL_MAX_PWRLEVELS]; u64 clock_times[KGSL_MAX_PWRLEVELS];
bool strtstp_sleepwake;
struct kgsl_regulator regulators[KGSL_MAX_REGULATORS]; struct kgsl_regulator regulators[KGSL_MAX_REGULATORS];
uint32_t pcl; uint32_t pcl;
uint32_t ocmem_pcl; uint32_t ocmem_pcl;
@ -210,9 +202,6 @@ struct kgsl_pwrctrl {
struct list_head limits; struct list_head limits;
spinlock_t limits_lock; spinlock_t limits_lock;
struct kgsl_pwr_limit *sysfs_pwr_limit; struct kgsl_pwr_limit *sysfs_pwr_limit;
struct timer_list deep_nap_timer;
uint32_t deep_nap_timeout;
bool gx_retention;
unsigned int gpu_bimc_int_clk_freq; unsigned int gpu_bimc_int_clk_freq;
bool gpu_bimc_interface_enabled; bool gpu_bimc_interface_enabled;
}; };

View file

@ -221,11 +221,6 @@ DEFINE_EVENT(kgsl_pwr_template, kgsl_rail,
TP_ARGS(device, on) TP_ARGS(device, on)
); );
DEFINE_EVENT(kgsl_pwr_template, kgsl_retention_clk,
TP_PROTO(struct kgsl_device *device, int on),
TP_ARGS(device, on)
);
TRACE_EVENT(kgsl_clk, TRACE_EVENT(kgsl_clk,
TP_PROTO(struct kgsl_device *device, unsigned int on, TP_PROTO(struct kgsl_device *device, unsigned int on,