soc: qcom: idle: Snapshot of idle/sleep driver as of msm-3.18

This is a snapshot of the Sleep driver and realted functionality as of
e70ad0cd5efdd9dc91a77dcdac31d6132e1315c1 on msm-3.18 branch

Change-Id: I98fec26849898c5c66abbb1b094439780c23964d
This commit is contained in:
Mahesh Sivasubramanian 2016-02-01 10:40:26 -07:00 committed by David Keitel
parent a3b546e509
commit c184ee865f
21 changed files with 5704 additions and 0 deletions

View file

@ -27,3 +27,4 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
obj-$(CONFIG_MSM_PM) += lpm-levels.o lpm-levels-of.o lpm-workarounds.o

View file

@ -0,0 +1,852 @@
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/moduleparam.h>
#include "lpm-levels.h"
bool use_psci;
enum lpm_type {
IDLE = 0,
SUSPEND,
LPM_TYPE_NR
};
struct lpm_type_str {
enum lpm_type type;
char *str;
};
static const struct lpm_type_str lpm_types[] = {
{IDLE, "idle_enabled"},
{SUSPEND, "suspend_enabled"},
};
static struct lpm_level_avail *cpu_level_available[NR_CPUS];
static struct platform_device *lpm_pdev;
static void *get_avail_val(struct kobject *kobj, struct kobj_attribute *attr)
{
void *arg = NULL;
struct lpm_level_avail *avail = NULL;
if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) {
avail = container_of(attr, struct lpm_level_avail,
idle_enabled_attr);
arg = (void *) &avail->idle_enabled;
} else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) {
avail = container_of(attr, struct lpm_level_avail,
suspend_enabled_attr);
arg = (void *) &avail->suspend_enabled;
}
return arg;
}
ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int ret = 0;
struct kernel_param kp;
kp.arg = get_avail_val(kobj, attr);
ret = param_get_bool(buf, &kp);
if (ret > 0) {
strlcat(buf, "\n", PAGE_SIZE);
ret++;
}
return ret;
}
ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t len)
{
int ret = 0;
struct kernel_param kp;
kp.arg = get_avail_val(kobj, attr);
ret = param_set_bool(buf, &kp);
return ret ? ret : len;
}
static int create_lvl_avail_nodes(const char *name,
struct kobject *parent, struct lpm_level_avail *avail)
{
struct attribute_group *attr_group = NULL;
struct attribute **attr = NULL;
struct kobject *kobj = NULL;
int ret = 0;
kobj = kobject_create_and_add(name, parent);
if (!kobj)
return -ENOMEM;
attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
GFP_KERNEL);
if (!attr_group) {
ret = -ENOMEM;
goto failed;
}
attr = devm_kzalloc(&lpm_pdev->dev,
sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
if (!attr) {
ret = -ENOMEM;
goto failed;
}
sysfs_attr_init(&avail->idle_enabled_attr.attr);
avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
avail->idle_enabled_attr.attr.mode = 0644;
avail->idle_enabled_attr.show = lpm_enable_show;
avail->idle_enabled_attr.store = lpm_enable_store;
sysfs_attr_init(&avail->suspend_enabled_attr.attr);
avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
avail->suspend_enabled_attr.attr.mode = 0644;
avail->suspend_enabled_attr.show = lpm_enable_show;
avail->suspend_enabled_attr.store = lpm_enable_store;
attr[0] = &avail->idle_enabled_attr.attr;
attr[1] = &avail->suspend_enabled_attr.attr;
attr[2] = NULL;
attr_group->attrs = attr;
ret = sysfs_create_group(kobj, attr_group);
if (ret) {
ret = -ENOMEM;
goto failed;
}
avail->idle_enabled = true;
avail->suspend_enabled = true;
avail->kobj = kobj;
return ret;
failed:
kobject_put(kobj);
return ret;
}
static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
{
int cpu;
int i, cpu_idx;
struct kobject **cpu_kobj = NULL;
struct lpm_level_avail *level_list = NULL;
char cpu_name[20] = {0};
int ret = 0;
cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
cpumask_weight(&p->child_cpus), GFP_KERNEL);
if (!cpu_kobj)
return -ENOMEM;
cpu_idx = 0;
for_each_cpu(cpu, &p->child_cpus) {
snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
if (!cpu_kobj[cpu_idx]) {
ret = -ENOMEM;
goto release_kobj;
}
level_list = devm_kzalloc(&lpm_pdev->dev,
p->cpu->nlevels * sizeof(*level_list),
GFP_KERNEL);
if (!level_list) {
ret = -ENOMEM;
goto release_kobj;
}
for (i = 0; i < p->cpu->nlevels; i++) {
ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
cpu_kobj[cpu_idx], &level_list[i]);
if (ret)
goto release_kobj;
}
cpu_level_available[cpu] = level_list;
cpu_idx++;
}
return ret;
release_kobj:
for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
kobject_put(cpu_kobj[i]);
return ret;
}
int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
{
int ret = 0;
struct lpm_cluster *child = NULL;
int i;
struct kobject *cluster_kobj = NULL;
if (!p)
return -ENODEV;
cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
if (!cluster_kobj)
return -ENOMEM;
for (i = 0; i < p->nlevels; i++) {
ret = create_lvl_avail_nodes(p->levels[i].level_name,
cluster_kobj, &p->levels[i].available);
if (ret)
return ret;
}
list_for_each_entry(child, &p->child, list) {
ret = create_cluster_lvl_nodes(child, cluster_kobj);
if (ret)
return ret;
}
if (p->cpu) {
ret = create_cpu_lvl_nodes(p, cluster_kobj);
if (ret)
return ret;
}
return 0;
}
bool lpm_cpu_mode_allow(unsigned int cpu,
unsigned int index, bool from_idle)
{
struct lpm_level_avail *avail = cpu_level_available[cpu];
if (!lpm_pdev || !avail)
return !from_idle;
return !!(from_idle ? avail[index].idle_enabled :
avail[index].suspend_enabled);
}
bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
unsigned int mode, bool from_idle)
{
struct lpm_level_avail *avail = &cluster->levels[mode].available;
if (!lpm_pdev || !avail)
return false;
return !!(from_idle ? avail->idle_enabled :
avail->suspend_enabled);
}
static int parse_legacy_cluster_params(struct device_node *node,
struct lpm_cluster *c)
{
int i;
char *key;
int ret;
struct lpm_match {
char *devname;
int (*set_mode)(struct low_power_ops *, int, bool);
};
struct lpm_match match_tbl[] = {
{"l2", set_l2_mode},
{"cci", set_system_mode},
{"l3", set_l3_mode},
{"cbf", set_system_mode},
};
key = "qcom,spm-device-names";
c->ndevices = of_property_count_strings(node, key);
if (c->ndevices < 0) {
pr_info("%s(): Ignoring cluster params\n", __func__);
c->no_saw_devices = true;
c->ndevices = 0;
return 0;
}
c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name),
GFP_KERNEL);
c->lpm_dev = devm_kzalloc(&lpm_pdev->dev,
c->ndevices * sizeof(*c->lpm_dev),
GFP_KERNEL);
if (!c->name || !c->lpm_dev) {
ret = -ENOMEM;
goto failed;
}
for (i = 0; i < c->ndevices; i++) {
char device_name[20];
int j;
ret = of_property_read_string_index(node, key, i, &c->name[i]);
if (ret)
goto failed;
snprintf(device_name, sizeof(device_name), "%s-%s",
c->cluster_name, c->name[i]);
c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name);
if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) {
pr_err("Failed to get spm device by name:%s\n",
device_name);
ret = PTR_ERR(c->lpm_dev[i].spm);
goto failed;
}
for (j = 0; j < ARRAY_SIZE(match_tbl); j++) {
if (!strcmp(c->name[i], match_tbl[j].devname))
c->lpm_dev[i].set_mode = match_tbl[j].set_mode;
}
if (!c->lpm_dev[i].set_mode) {
ret = -ENODEV;
goto failed;
}
}
key = "qcom,default-level";
if (of_property_read_u32(node, key, &c->default_level))
c->default_level = 0;
return 0;
failed:
pr_err("%s(): Failed reading %s\n", __func__, key);
kfree(c->name);
kfree(c->lpm_dev);
c->name = NULL;
c->lpm_dev = NULL;
return ret;
}
static int parse_cluster_params(struct device_node *node,
struct lpm_cluster *c)
{
char *key;
int ret;
key = "label";
ret = of_property_read_string(node, key, &c->cluster_name);
if (ret) {
pr_err("%s(): Cannot read required param %s\n", __func__, key);
return ret;
}
if (use_psci) {
key = "qcom,psci-mode-shift";
ret = of_property_read_u32(node, key,
&c->psci_mode_shift);
if (ret) {
pr_err("%s(): Failed to read param: %s\n",
__func__, key);
return ret;
}
key = "qcom,psci-mode-mask";
ret = of_property_read_u32(node, key,
&c->psci_mode_mask);
if (ret) {
pr_err("%s(): Failed to read param: %s\n",
__func__, key);
return ret;
}
/* Set ndevice to 1 as default */
c->ndevices = 1;
return 0;
} else
return parse_legacy_cluster_params(node, c);
}
static int parse_lpm_mode(const char *str)
{
int i;
struct lpm_lookup_table mode_lookup[] = {
{MSM_SPM_MODE_POWER_COLLAPSE, "pc"},
{MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"},
{MSM_SPM_MODE_FASTPC, "fpc"},
{MSM_SPM_MODE_GDHS, "gdhs"},
{MSM_SPM_MODE_RETENTION, "retention"},
{MSM_SPM_MODE_CLOCK_GATING, "wfi"},
{MSM_SPM_MODE_DISABLED, "active"}
};
for (i = 0; i < ARRAY_SIZE(mode_lookup); i++)
if (!strcmp(str, mode_lookup[i].mode_name))
return mode_lookup[i].modes;
return -EINVAL;
}
static int parse_power_params(struct device_node *node,
struct power_params *pwr)
{
char *key;
int ret;
key = "qcom,latency-us";
ret = of_property_read_u32(node, key, &pwr->latency_us);
if (ret)
goto fail;
key = "qcom,ss-power";
ret = of_property_read_u32(node, key, &pwr->ss_power);
if (ret)
goto fail;
key = "qcom,energy-overhead";
ret = of_property_read_u32(node, key, &pwr->energy_overhead);
if (ret)
goto fail;
key = "qcom,time-overhead";
ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
fail:
if (ret)
pr_err("%s(): %s Error reading %s\n", __func__, node->name,
key);
return ret;
}
static int parse_cluster_level(struct device_node *node,
struct lpm_cluster *cluster)
{
int i = 0;
struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
int ret = -ENOMEM;
char *key;
key = "label";
ret = of_property_read_string(node, key, &level->level_name);
if (ret)
goto failed;
if (use_psci) {
char *k = "qcom,psci-mode";
ret = of_property_read_u32(node, k, &level->psci_id);
if (ret)
goto failed;
level->is_reset = of_property_read_bool(node, "qcom,is-reset");
} else if (!cluster->no_saw_devices) {
key = "no saw-devices";
level->mode = devm_kzalloc(&lpm_pdev->dev,
cluster->ndevices * sizeof(*level->mode),
GFP_KERNEL);
if (!level->mode) {
pr_err("Memory allocation failed\n");
goto failed;
}
for (i = 0; i < cluster->ndevices; i++) {
const char *spm_mode;
char key[25] = {0};
snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]);
ret = of_property_read_string(node, key, &spm_mode);
if (ret)
goto failed;
level->mode[i] = parse_lpm_mode(spm_mode);
if (level->mode[i] < 0)
goto failed;
if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE
|| level->mode[i] ==
MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE)
level->is_reset |= true;
}
}
key = "label";
ret = of_property_read_string(node, key, &level->level_name);
if (ret)
goto failed;
if (cluster->nlevels != cluster->default_level) {
key = "min child idx";
ret = of_property_read_u32(node, "qcom,min-child-idx",
&level->min_child_level);
if (ret)
goto failed;
if (cluster->min_child_level > level->min_child_level)
cluster->min_child_level = level->min_child_level;
}
level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
level->disable_dynamic_routing = of_property_read_bool(node,
"qcom,disable-dynamic-int-routing");
level->last_core_only = of_property_read_bool(node,
"qcom,last-core-only");
key = "parse_power_params";
ret = parse_power_params(node, &level->pwr);
if (ret)
goto failed;
cluster->nlevels++;
return 0;
failed:
pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
kfree(level->mode);
level->mode = NULL;
return ret;
}
static int parse_cpu_spm_mode(const char *mode_name)
{
struct lpm_lookup_table pm_sm_lookup[] = {
{MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
"wfi"},
{MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
"standalone_pc"},
{MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
"pc"},
{MSM_PM_SLEEP_MODE_RETENTION,
"retention"},
{MSM_PM_SLEEP_MODE_FASTPC,
"fpc"},
};
int i;
int ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
ret = pm_sm_lookup[i].modes;
break;
}
}
return ret;
}
static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
{
char *key;
int ret;
key = "qcom,spm-cpu-mode";
ret = of_property_read_string(n, key, &l->name);
if (ret) {
pr_err("Failed %s %d\n", n->name, __LINE__);
return ret;
}
if (use_psci) {
key = "qcom,psci-cpu-mode";
ret = of_property_read_u32(n, key, &l->psci_id);
if (ret) {
pr_err("Failed reading %s on device %s\n", key,
n->name);
return ret;
}
} else {
l->mode = parse_cpu_spm_mode(l->name);
if (l->mode < 0)
return l->mode;
}
return 0;
}
static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
{
struct device_node *cpu_node;
int cpu;
int idx = 0;
cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
if (!cpu_node) {
pr_info("%s: No CPU phandle, assuming single cluster\n",
node->full_name);
/*
* Not all targets have the cpu node populated in the device
* tree. If cpu node is not populated assume all possible
* nodes belong to this cluster
*/
cpumask_copy(mask, cpu_possible_mask);
return 0;
}
while (cpu_node) {
for_each_possible_cpu(cpu) {
if (of_get_cpu_node(cpu, NULL) == cpu_node) {
cpumask_set_cpu(cpu, mask);
break;
}
}
cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
}
return 0;
}
static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
{
struct device_node *n;
int ret = -ENOMEM;
int i;
char *key;
c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
if (!c->cpu)
return ret;
c->cpu->parent = c;
if (use_psci) {
key = "qcom,psci-mode-shift";
ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
if (ret) {
pr_err("Failed reading %s on device %s\n", key,
node->name);
return ret;
}
key = "qcom,psci-mode-mask";
ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
if (ret) {
pr_err("Failed reading %s on device %s\n", key,
node->name);
return ret;
}
}
for_each_child_of_node(node, n) {
struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
c->cpu->nlevels++;
ret = parse_cpu_mode(n, l);
if (ret < 0) {
pr_info("Failed %s\n", l->name);
goto failed;
}
ret = parse_power_params(n, &l->pwr);
if (ret)
goto failed;
key = "qcom,use-broadcast-timer";
l->use_bc_timer = of_property_read_bool(n, key);
l->is_reset = of_property_read_bool(n, "qcom,is-reset");
key = "qcom,jtag-save-restore";
l->jtag_save_restore = of_property_read_bool(n, key);
}
return 0;
failed:
for (i = 0; i < c->cpu->nlevels; i++) {
kfree(c->cpu->levels[i].name);
c->cpu->levels[i].name = NULL;
}
kfree(c->cpu);
c->cpu = NULL;
pr_err("%s(): Failed with error code:%d\n", __func__, ret);
return ret;
}
void free_cluster_node(struct lpm_cluster *cluster)
{
struct list_head *list;
int i;
list_for_each(list, &cluster->child) {
struct lpm_cluster *n;
n = list_entry(list, typeof(*n), list);
list_del(list);
free_cluster_node(n);
};
if (cluster->cpu) {
for (i = 0; i < cluster->cpu->nlevels; i++) {
kfree(cluster->cpu->levels[i].name);
cluster->cpu->levels[i].name = NULL;
}
}
for (i = 0; i < cluster->nlevels; i++) {
kfree(cluster->levels[i].mode);
cluster->levels[i].mode = NULL;
}
kfree(cluster->cpu);
kfree(cluster->name);
kfree(cluster->lpm_dev);
cluster->cpu = NULL;
cluster->name = NULL;
cluster->lpm_dev = NULL;
cluster->ndevices = 0;
}
/*
* TODO:
* Expects a CPU or a cluster only. This ensures that affinity
* level of a cluster is consistent with reference to its
* child nodes.
*/
struct lpm_cluster *parse_cluster(struct device_node *node,
struct lpm_cluster *parent)
{
struct lpm_cluster *c;
struct device_node *n;
char *key;
int ret = 0;
c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
ret = parse_cluster_params(node, c);
if (ret)
goto failed_parse_params;
INIT_LIST_HEAD(&c->child);
c->parent = parent;
spin_lock_init(&c->sync_lock);
c->min_child_level = NR_LPM_LEVELS;
for_each_child_of_node(node, n) {
if (!n->name)
continue;
key = "qcom,pm-cluster-level";
if (!of_node_cmp(n->name, key)) {
WARN_ON(!use_psci && c->no_saw_devices);
if (parse_cluster_level(n, c))
goto failed_parse_cluster;
continue;
}
key = "qcom,pm-cluster";
if (!of_node_cmp(n->name, key)) {
struct lpm_cluster *child;
WARN_ON(!use_psci && c->no_saw_devices);
child = parse_cluster(n, c);
if (!child)
goto failed_parse_cluster;
list_add(&child->list, &c->child);
cpumask_or(&c->child_cpus, &c->child_cpus,
&child->child_cpus);
c->aff_level = child->aff_level + 1;
continue;
}
key = "qcom,pm-cpu";
if (!of_node_cmp(n->name, key)) {
/*
* Parse the the cpu node only if a pm-cpu node
* is available, though the mask is defined @ the
* cluster level
*/
if (get_cpumask_for_node(node, &c->child_cpus))
goto failed_parse_cluster;
if (parse_cpu_levels(n, c))
goto failed_parse_cluster;
c->aff_level = 1;
}
}
if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
c->last_level = c->default_level;
else
c->last_level = c->nlevels-1;
return c;
failed_parse_cluster:
pr_err("Failed parse cluster:%s\n", key);
if (parent)
list_del(&c->list);
free_cluster_node(c);
failed_parse_params:
c->parent = NULL;
pr_err("Failed parse params\n");
kfree(c);
return NULL;
}
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
{
struct device_node *top = NULL;
use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
if (!top) {
pr_err("Failed to find root node\n");
return ERR_PTR(-ENODEV);
}
lpm_pdev = pdev;
return parse_cluster(top, NULL);
}
void cluster_dt_walkthrough(struct lpm_cluster *cluster)
{
struct list_head *list;
int i, j;
static int id;
char str[10] = {0};
if (!cluster)
return;
for (i = 0; i < id; i++)
snprintf(str+i, 10 - i, "\t");
pr_info("%d\n", __LINE__);
for (i = 0; i < cluster->nlevels; i++) {
struct lpm_cluster_level *l = &cluster->levels[i];
pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices);
for (j = 0; j < cluster->ndevices; j++)
pr_info("%sDevice: %p id:%p\n", str,
&cluster->name[j], &l->mode[i]);
}
if (cluster->cpu) {
pr_info("%d\n", __LINE__);
for (j = 0; j < cluster->cpu->nlevels; j++)
pr_info("%s\tCPU mode: %s id:%d\n", str,
cluster->cpu->levels[j].name,
cluster->cpu->levels[j].mode);
}
id++;
list_for_each(list, &cluster->child) {
struct lpm_cluster *n;
pr_info("%d\n", __LINE__);
n = list_entry(list, typeof(*n), list);
cluster_dt_walkthrough(n);
}
id--;
}

1336
drivers/cpuidle/lpm-levels.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,139 @@
/* Copyright (c) 2014, 2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <soc/qcom/pm.h>
#include <soc/qcom/spm.h>
#define NR_LPM_LEVELS 8
extern bool use_psci;
struct lpm_lookup_table {
uint32_t modes;
const char *mode_name;
};
struct power_params {
uint32_t latency_us; /* Enter + Exit latency */
uint32_t ss_power; /* Steady state power */
uint32_t energy_overhead; /* Enter + exit over head */
uint32_t time_overhead_us; /* Enter + exit overhead */
};
struct lpm_cpu_level {
const char *name;
enum msm_pm_sleep_mode mode;
bool use_bc_timer;
struct power_params pwr;
unsigned int psci_id;
bool is_reset;
bool jtag_save_restore;
};
struct lpm_cpu {
struct lpm_cpu_level levels[NR_LPM_LEVELS];
int nlevels;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
struct lpm_cluster *parent;
};
struct lpm_level_avail {
bool idle_enabled;
bool suspend_enabled;
struct kobject *kobj;
struct kobj_attribute idle_enabled_attr;
struct kobj_attribute suspend_enabled_attr;
};
struct lpm_cluster_level {
const char *level_name;
int *mode; /* SPM mode to enter */
int min_child_level;
struct cpumask num_cpu_votes;
struct power_params pwr;
bool notify_rpm;
bool disable_dynamic_routing;
bool sync_level;
bool last_core_only;
struct lpm_level_avail available;
unsigned int psci_id;
bool is_reset;
};
struct low_power_ops {
struct msm_spm_device *spm;
int (*set_mode)(struct low_power_ops *ops, int mode, bool notify_rpm);
enum msm_pm_l2_scm_flag tz_flag;
};
struct lpm_cluster {
struct list_head list;
struct list_head child;
const char *cluster_name;
const char **name;
unsigned long aff_level; /* Affinity level of the node */
struct low_power_ops *lpm_dev;
int ndevices;
struct lpm_cluster_level levels[NR_LPM_LEVELS];
int nlevels;
enum msm_pm_l2_scm_flag l2_flag;
int min_child_level;
int default_level;
int last_level;
struct lpm_cpu *cpu;
struct cpuidle_driver *drv;
spinlock_t sync_lock;
struct cpumask child_cpus;
struct cpumask num_children_in_sync;
struct lpm_cluster *parent;
struct lpm_stats *stats;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
bool no_saw_devices;
};
int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
void lpm_suspend_wake_time(uint64_t wakeup_time);
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
void free_cluster_node(struct lpm_cluster *cluster);
void cluster_dt_walkthrough(struct lpm_cluster *cluster);
int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
bool lpm_cpu_mode_allow(unsigned int cpu,
unsigned int mode, bool from_idle);
bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
unsigned int mode, bool from_idle);
extern struct lpm_cluster *lpm_root_node;
#ifdef CONFIG_SMP
extern DEFINE_PER_CPU(bool, pending_ipi);
static inline bool is_IPI_pending(const struct cpumask *mask)
{
unsigned int cpu;
for_each_cpu(cpu, mask) {
if per_cpu(pending_ipi, cpu)
return true;
}
return false;
}
#else
static inline bool is_IPI_pending(const struct cpumask *mask)
{
return false;
}
#endif

View file

@ -0,0 +1,134 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/rpm-smd-regulator.h>
static struct regulator *lpm_cx_reg;
static struct work_struct dummy_vote_work;
static struct workqueue_struct *lpm_wa_wq;
static bool lpm_wa_cx_turbo_unvote;
/* While exiting from RPM assisted power collapse on some targets like MSM8939
* the CX is bumped to turbo mode by RPM. To reduce the power impact, APSS
* low power driver need to remove the CX turbo vote.
*/
static void send_dummy_cx_vote(struct work_struct *w)
{
if (lpm_cx_reg) {
regulator_set_voltage(lpm_cx_reg,
RPM_REGULATOR_CORNER_SUPER_TURBO,
RPM_REGULATOR_CORNER_SUPER_TURBO);
regulator_set_voltage(lpm_cx_reg,
RPM_REGULATOR_CORNER_NONE,
RPM_REGULATOR_CORNER_SUPER_TURBO);
}
}
/*
* lpm_wa_cx_unvote_send(): Unvote for CX turbo mode
*/
void lpm_wa_cx_unvote_send(void)
{
if (lpm_wa_cx_turbo_unvote)
queue_work(lpm_wa_wq, &dummy_vote_work);
}
EXPORT_SYMBOL(lpm_wa_cx_unvote_send);
static int lpm_wa_cx_unvote_init(struct platform_device *pdev)
{
int ret = 0;
lpm_cx_reg = devm_regulator_get(&pdev->dev, "lpm-cx");
if (IS_ERR(lpm_cx_reg)) {
ret = PTR_ERR(lpm_cx_reg);
if (ret != -EPROBE_DEFER)
pr_err("Unable to get the CX regulator\n");
return ret;
}
INIT_WORK(&dummy_vote_work, send_dummy_cx_vote);
lpm_wa_wq = alloc_workqueue("lpm-wa",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
return ret;
}
static int lpm_wa_cx_unvote_exit(void)
{
if (lpm_wa_wq)
destroy_workqueue(lpm_wa_wq);
return 0;
}
static int lpm_wa_probe(struct platform_device *pdev)
{
int ret = 0;
lpm_wa_cx_turbo_unvote = of_property_read_bool(pdev->dev.of_node,
"qcom,lpm-wa-cx-turbo-unvote");
if (lpm_wa_cx_turbo_unvote) {
ret = lpm_wa_cx_unvote_init(pdev);
if (ret) {
pr_err("%s: Failed to initialize lpm_wa_cx_unvote (%d)\n",
__func__, ret);
return ret;
}
}
return ret;
}
static int lpm_wa_remove(struct platform_device *pdev)
{
int ret = 0;
if (lpm_wa_cx_turbo_unvote)
ret = lpm_wa_cx_unvote_exit();
return ret;
}
static struct of_device_id lpm_wa_mtch_tbl[] = {
{.compatible = "qcom,lpm-workarounds"},
{},
};
static struct platform_driver lpm_wa_driver = {
.probe = lpm_wa_probe,
.remove = lpm_wa_remove,
.driver = {
.name = "lpm-workarounds",
.owner = THIS_MODULE,
.of_match_table = lpm_wa_mtch_tbl,
},
};
static int __init lpm_wa_module_init(void)
{
int ret;
ret = platform_driver_register(&lpm_wa_driver);
if (ret)
pr_info("Error registering %s\n", lpm_wa_driver.driver.name);
return ret;
}
late_initcall(lpm_wa_module_init);

View file

@ -0,0 +1,19 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __LPM_WA_H
#define __LPM_WA_H
void lpm_wa_cx_unvote_send(void);
#endif /* __LPM_WA_H */

View file

@ -0,0 +1,778 @@
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <soc/qcom/spm.h>
#include <soc/qcom/pm.h>
#define MAX_STR_LEN 256
const char *lpm_stats_reset = "reset";
const char *lpm_stats_suspend = "suspend";
struct level_stats {
const char *name;
struct lpm_stats *owner;
int64_t first_bucket_time;
int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int success_count;
int failed_count;
int64_t total_time;
uint64_t enter_time;
};
struct lifo_stats {
uint32_t last_in;
uint32_t first_out;
};
struct lpm_stats {
char name[MAX_STR_LEN];
struct level_stats *time_stats;
uint32_t num_levels;
struct lifo_stats lifo;
struct lpm_stats *parent;
struct list_head sibling;
struct list_head child;
struct cpumask mask;
struct dentry *directory;
bool is_cpu;
};
static struct level_stats suspend_time_stats;
static DEFINE_PER_CPU_SHARED_ALIGNED(struct lpm_stats, cpu_stats);
static void update_level_stats(struct level_stats *stats, uint64_t t,
bool success)
{
uint64_t bt;
int i;
if (!success) {
stats->failed_count++;
return;
}
stats->success_count++;
stats->total_time += t;
bt = t;
do_div(bt, stats->first_bucket_time);
if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
(CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
i = DIV_ROUND_UP(fls((uint32_t)bt),
CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
else
i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
stats->bucket[i]++;
if (t < stats->min_time[i] || !stats->max_time[i])
stats->min_time[i] = t;
if (t > stats->max_time[i])
stats->max_time[i] = t;
return;
}
static void level_stats_print(struct seq_file *m, struct level_stats *stats)
{
int i = 0;
int64_t bucket_time = 0;
char seqs[MAX_STR_LEN] = {0};
int64_t s = stats->total_time;
uint32_t ns = do_div(s, NSEC_PER_SEC);
snprintf(seqs, MAX_STR_LEN,
"[%s] %s:\n"
" success count: %7d\n"
" total success time: %lld.%09u\n",
stats->owner->name,
stats->name,
stats->success_count,
s, ns);
seq_puts(m, seqs);
if (stats->failed_count) {
snprintf(seqs, MAX_STR_LEN, " failed count: %7d\n",
stats->failed_count);
seq_puts(m, seqs);
}
bucket_time = stats->first_bucket_time;
for (i = 0;
i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
i++) {
s = bucket_time;
ns = do_div(s, NSEC_PER_SEC);
snprintf(seqs, MAX_STR_LEN,
"\t<%6lld.%09u: %7d (%lld-%lld)\n",
s, ns, stats->bucket[i],
stats->min_time[i],
stats->max_time[i]);
seq_puts(m, seqs);
bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
}
snprintf(seqs, MAX_STR_LEN,
"\t>=%5lld.%09u:%8d (%lld-%lld)\n",
s, ns, stats->bucket[i],
stats->min_time[i],
stats->max_time[i]);
seq_puts(m, seqs);
}
static int level_stats_file_show(struct seq_file *m, void *v)
{
struct level_stats *stats = NULL;
if (!m->private)
return -EINVAL;
stats = (struct level_stats *) m->private;
level_stats_print(m, stats);
return 0;
}
static int level_stats_file_open(struct inode *inode, struct file *file)
{
return single_open(file, level_stats_file_show, inode->i_private);
}
static void level_stats_print_all(struct seq_file *m, struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
int i = 0;
for (i = 0; i < stats->num_levels; i++)
level_stats_print(m, &stats->time_stats[i]);
if (list_empty(&stats->child))
return;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
level_stats_print_all(m, pos);
}
}
static void level_stats_reset(struct level_stats *stats)
{
memset(stats->bucket, 0, sizeof(stats->bucket));
memset(stats->min_time, 0, sizeof(stats->min_time));
memset(stats->max_time, 0, sizeof(stats->max_time));
stats->success_count = 0;
stats->failed_count = 0;
stats->total_time = 0;
}
static void level_stats_reset_all(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
int i = 0;
for (i = 0; i < stats->num_levels; i++)
level_stats_reset(&stats->time_stats[i]);
if (list_empty(&stats->child))
return;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
level_stats_reset_all(pos);
}
}
static int lpm_stats_file_show(struct seq_file *m, void *v)
{
struct lpm_stats *stats = (struct lpm_stats *)m->private;
if (!m->private) {
pr_err("%s: Invalid pdata, Cannot print stats\n", __func__);
return -EINVAL;
}
level_stats_print_all(m, stats);
level_stats_print(m, &suspend_time_stats);
return 0;
}
static int lpm_stats_file_open(struct inode *inode, struct file *file)
{
return single_open(file, lpm_stats_file_show, inode->i_private);
}
static ssize_t level_stats_file_write(struct file *file,
const char __user *buffer, size_t count, loff_t *off)
{
char buf[MAX_STR_LEN] = {0};
struct inode *in = file->f_inode;
struct level_stats *stats = (struct level_stats *)in->i_private;
size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
if (!stats)
return -EINVAL;
if (count != len+1)
return -EINVAL;
if (copy_from_user(buf, buffer, len))
return -EFAULT;
if (strcmp(buf, lpm_stats_reset))
return -EINVAL;
level_stats_reset(stats);
return count;
}
static ssize_t lpm_stats_file_write(struct file *file,
const char __user *buffer, size_t count, loff_t *off)
{
char buf[MAX_STR_LEN] = {0};
struct inode *in = file->f_inode;
struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
if (!stats)
return -EINVAL;
if (count != len+1)
return -EINVAL;
if (copy_from_user(buf, buffer, len))
return -EFAULT;
if (strcmp(buf, lpm_stats_reset))
return -EINVAL;
level_stats_reset_all(stats);
return count;
}
int lifo_stats_file_show(struct seq_file *m, void *v)
{
struct lpm_stats *stats = NULL;
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
char seqs[MAX_STR_LEN] = {0};
if (!m->private)
return -EINVAL;
stats = (struct lpm_stats *)m->private;
if (list_empty(&stats->child)) {
pr_err("%s: ERROR: Lifo level with no children.\n",
__func__);
return -EINVAL;
}
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
snprintf(seqs, MAX_STR_LEN,
"%s:\n"
"\tLast-In:%u\n"
"\tFirst-Out:%u\n",
pos->name,
pos->lifo.last_in,
pos->lifo.first_out);
seq_puts(m, seqs);
}
return 0;
}
static int lifo_stats_file_open(struct inode *inode, struct file *file)
{
return single_open(file, lifo_stats_file_show, inode->i_private);
}
static void lifo_stats_reset_all(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
pos->lifo.last_in = 0;
pos->lifo.first_out = 0;
if (!list_empty(&pos->child))
lifo_stats_reset_all(pos);
}
}
static ssize_t lifo_stats_file_write(struct file *file,
const char __user *buffer, size_t count, loff_t *off)
{
char buf[MAX_STR_LEN] = {0};
struct inode *in = file->f_inode;
struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
if (!stats)
return -EINVAL;
if (count != len+1)
return -EINVAL;
if (copy_from_user(buf, buffer, len))
return -EFAULT;
if (strcmp(buf, lpm_stats_reset))
return -EINVAL;
lifo_stats_reset_all(stats);
return count;
}
static const struct file_operations level_stats_fops = {
.owner = THIS_MODULE,
.open = level_stats_file_open,
.read = seq_read,
.release = single_release,
.llseek = no_llseek,
.write = level_stats_file_write,
};
static const struct file_operations lpm_stats_fops = {
.owner = THIS_MODULE,
.open = lpm_stats_file_open,
.read = seq_read,
.release = single_release,
.llseek = no_llseek,
.write = lpm_stats_file_write,
};
static const struct file_operations lifo_stats_fops = {
.owner = THIS_MODULE,
.open = lifo_stats_file_open,
.read = seq_read,
.release = single_release,
.llseek = no_llseek,
.write = lifo_stats_file_write,
};
static void update_last_in_stats(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
if (list_empty(&stats->child))
return;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
pos->lifo.last_in++;
return;
}
}
WARN(1, "Should not reach here\n");
}
static void update_first_out_stats(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
if (list_empty(&stats->child))
return;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
pos->lifo.first_out++;
return;
}
}
WARN(1, "Should not reach here\n");
}
static inline void update_exit_stats(struct lpm_stats *stats, uint32_t index,
bool success)
{
uint64_t exit_time = 0;
/* Update time stats only when exit is preceded by enter */
if (stats->time_stats[index].enter_time) {
exit_time = sched_clock() -
stats->time_stats[index].enter_time;
update_level_stats(&stats->time_stats[index], exit_time,
success);
stats->time_stats[index].enter_time = 0;
}
}
static int config_level(const char *name, const char **levels,
int num_levels, struct lpm_stats *parent, struct lpm_stats *stats)
{
int i = 0;
struct dentry *directory = NULL;
const char *rootname = "lpm_stats";
const char *dirname = rootname;
strlcpy(stats->name, name, MAX_STR_LEN);
stats->num_levels = num_levels;
stats->parent = parent;
INIT_LIST_HEAD(&stats->sibling);
INIT_LIST_HEAD(&stats->child);
stats->time_stats = kzalloc(sizeof(struct level_stats) *
num_levels, GFP_KERNEL);
if (!stats->time_stats) {
pr_err("%s: Insufficient memory for %s level time stats\n",
__func__, name);
return -ENOMEM;
}
if (parent) {
list_add_tail(&stats->sibling, &parent->child);
directory = parent->directory;
dirname = name;
}
stats->directory = debugfs_create_dir(dirname, directory);
if (!stats->directory) {
pr_err("%s: Unable to create %s debugfs directory\n",
__func__, dirname);
kfree(stats->time_stats);
return -EPERM;
}
for (i = 0; i < num_levels; i++) {
stats->time_stats[i].name = levels[i];
stats->time_stats[i].owner = stats;
stats->time_stats[i].first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats->time_stats[i].enter_time = 0;
if (!debugfs_create_file(stats->time_stats[i].name, S_IRUGO,
stats->directory, (void *)&stats->time_stats[i],
&level_stats_fops)) {
pr_err("%s: Unable to create %s %s level-stats file\n",
__func__, stats->name,
stats->time_stats[i].name);
kfree(stats->time_stats);
return -EPERM;
}
}
if (!debugfs_create_file("stats", S_IRUGO, stats->directory,
(void *)stats, &lpm_stats_fops)) {
pr_err("%s: Unable to create %s's overall 'stats' file\n",
__func__, stats->name);
kfree(stats->time_stats);
return -EPERM;
}
return 0;
}
static struct lpm_stats *config_cpu_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent,
struct cpumask *mask)
{
int cpu = 0;
struct lpm_stats *pstats = NULL;
struct lpm_stats *stats = NULL;
for (pstats = parent; pstats; pstats = pstats->parent)
cpumask_or(&pstats->mask, &pstats->mask, mask);
for_each_cpu(cpu, mask) {
int ret = 0;
char cpu_name[MAX_STR_LEN] = {0};
stats = &per_cpu(cpu_stats, cpu);
snprintf(cpu_name, MAX_STR_LEN, "%s%d", name, cpu);
cpumask_set_cpu(cpu, &stats->mask);
stats->is_cpu = true;
ret = config_level(cpu_name, levels, num_levels, parent,
stats);
if (ret) {
pr_err("%s: Unable to create %s stats\n",
__func__, cpu_name);
return ERR_PTR(ret);
}
}
return stats;
}
static void config_suspend_level(struct lpm_stats *stats)
{
suspend_time_stats.name = lpm_stats_suspend;
suspend_time_stats.owner = stats;
suspend_time_stats.first_bucket_time =
CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
suspend_time_stats.enter_time = 0;
suspend_time_stats.success_count = 0;
suspend_time_stats.failed_count = 0;
if (!debugfs_create_file(suspend_time_stats.name, S_IRUGO,
stats->directory, (void *)&suspend_time_stats,
&level_stats_fops))
pr_err("%s: Unable to create %s Suspend stats file\n",
__func__, stats->name);
}
static struct lpm_stats *config_cluster_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent)
{
struct lpm_stats *stats = NULL;
int ret = 0;
stats = kzalloc(sizeof(struct lpm_stats), GFP_KERNEL);
if (!stats) {
pr_err("%s: Insufficient memory for %s stats\n",
__func__, name);
return ERR_PTR(-ENOMEM);
}
stats->is_cpu = false;
ret = config_level(name, levels, num_levels, parent, stats);
if (ret) {
pr_err("%s: Unable to create %s stats\n", __func__,
name);
kfree(stats);
return ERR_PTR(ret);
}
if (!debugfs_create_file("lifo", S_IRUGO, stats->directory,
(void *)stats, &lifo_stats_fops)) {
pr_err("%s: Unable to create %s lifo stats file\n",
__func__, stats->name);
kfree(stats);
return ERR_PTR(-EPERM);
}
if (!parent)
config_suspend_level(stats);
return stats;
}
static void cleanup_stats(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
centry = &stats->child;
list_for_each_entry_reverse(pos, centry, sibling) {
if (!list_empty(&pos->child))
cleanup_stats(pos);
list_del_init(&pos->child);
kfree(pos->time_stats);
if (!pos->is_cpu)
kfree(pos);
}
kfree(stats->time_stats);
kfree(stats);
}
static void lpm_stats_cleanup(struct lpm_stats *stats)
{
struct lpm_stats *pstats = stats;
if (!pstats)
return;
while (pstats->parent)
pstats = pstats->parent;
debugfs_remove_recursive(pstats->directory);
cleanup_stats(pstats);
}
/**
* lpm_stats_config_level() - API to configure levels stats.
*
* @name: Name of the cluster/cpu.
* @levels: Low power mode level names.
* @num_levels: Number of leves supported.
* @parent: Pointer to the parent's lpm_stats object.
* @mask: cpumask, if configuring cpu stats, else NULL.
*
* Function to communicate the low power mode levels supported by
* cpus or a cluster.
*
* Return: Pointer to the lpm_stats object or ERR_PTR(-ERRNO)
*/
struct lpm_stats *lpm_stats_config_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent,
struct cpumask *mask)
{
struct lpm_stats *stats = NULL;
if (!levels || num_levels <= 0 || IS_ERR(parent)) {
pr_err("%s: Invalid input\n\t\tlevels = %p\n\t\t"
"num_levels = %d\n\t\tparent = %ld\n",
__func__, levels, num_levels, PTR_ERR(parent));
return ERR_PTR(-EINVAL);
}
if (mask)
stats = config_cpu_level(name, levels, num_levels, parent,
mask);
else
stats = config_cluster_level(name, levels, num_levels,
parent);
if (IS_ERR(stats)) {
lpm_stats_cleanup(parent);
return stats;
}
return stats;
}
EXPORT_SYMBOL(lpm_stats_config_level);
/**
* lpm_stats_cluster_enter() - API to communicate the lpm level a cluster
* is prepared to enter.
*
* @stats: Pointer to the cluster's lpm_stats object.
* @index: Index of the lpm level that the cluster is going to enter.
*
* Function to communicate the low power mode level that the cluster is
* prepared to enter.
*/
void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index)
{
if (IS_ERR_OR_NULL(stats))
return;
stats->time_stats[index].enter_time = sched_clock();
update_last_in_stats(stats);
}
EXPORT_SYMBOL(lpm_stats_cluster_enter);
/**
* lpm_stats_cluster_exit() - API to communicate the lpm level a cluster
* exited.
*
* @stats: Pointer to the cluster's lpm_stats object.
* @index: Index of the cluster lpm level.
* @success: Success/Failure of the low power mode execution.
*
* Function to communicate the low power mode level that the cluster
* exited.
*/
void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index,
bool success)
{
if (IS_ERR_OR_NULL(stats))
return;
update_exit_stats(stats, index, success);
update_first_out_stats(stats);
}
EXPORT_SYMBOL(lpm_stats_cluster_exit);
/**
* lpm_stats_cpu_enter() - API to communicate the lpm level a cpu
* is prepared to enter.
*
* @index: cpu's lpm level index.
*
* Function to communicate the low power mode level that the cpu is
* prepared to enter.
*/
void lpm_stats_cpu_enter(uint32_t index)
{
struct lpm_stats *stats = &__get_cpu_var(cpu_stats);
if (!stats->time_stats)
return;
stats->time_stats[index].enter_time = sched_clock();
}
EXPORT_SYMBOL(lpm_stats_cpu_enter);
/**
* lpm_stats_cpu_exit() - API to communicate the lpm level that the cpu exited.
*
* @index: cpu's lpm level index.
* @success: Success/Failure of the low power mode execution.
*
* Function to communicate the low power mode level that the cpu exited.
*/
void lpm_stats_cpu_exit(uint32_t index, bool success)
{
struct lpm_stats *stats = &__get_cpu_var(cpu_stats);
if (!stats->time_stats)
return;
update_exit_stats(stats, index, success);
}
EXPORT_SYMBOL(lpm_stats_cpu_exit);
/**
* lpm_stats_suspend_enter() - API to communicate system entering suspend.
*
* Function to communicate that the system is ready to enter suspend.
*/
void lpm_stats_suspend_enter(void)
{
struct timespec ts;
getnstimeofday(&ts);
suspend_time_stats.enter_time = timespec_to_ns(&ts);
}
EXPORT_SYMBOL(lpm_stats_suspend_enter);
/**
* lpm_stats_suspend_exit() - API to communicate system exiting suspend.
*
* Function to communicate that the system exited suspend.
*/
void lpm_stats_suspend_exit(void)
{
struct timespec ts;
uint64_t exit_time = 0;
getnstimeofday(&ts);
exit_time = timespec_to_ns(&ts) - suspend_time_stats.enter_time;
update_level_stats(&suspend_time_stats, exit_time, true);
}
EXPORT_SYMBOL(lpm_stats_suspend_exit);

File diff suppressed because it is too large Load diff

View file

@ -282,3 +282,12 @@ config MSM_MPM_OF
interrupts when going to a system wide sleep mode. This config option
enables the MPM driver that supports initialization from a device
tree
config MSM_EVENT_TIMER
bool "Event timer"
help
This option enables a modules that manages a list of event timers that
need to be monitored by the PM. The enables the PM code to monitor
events that require the core to be awake and ready to handle the
event.

View file

@ -31,3 +31,4 @@ obj-$(CONFIG_SOC_BUS) += socinfo.o
obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
obj-$(CONFIG_MSM_SECURE_BUFFER) += secure_buffer.o
obj-$(CONFIG_MSM_MPM_OF) += mpm-of.o
obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o

View file

@ -0,0 +1,505 @@
/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <soc/qcom/event_timer.h>
/**
* struct event_timer_info - basic event timer structure
* @node: timerqueue node to track time ordered data structure
* of event timers
* @notify: irq affinity notifier.
* @timer: hrtimer created for this event.
* @function : callback function for event timer.
* @data : callback data for event timer.
* @irq: irq number for which event timer is created.
* @cpu: event timer associated cpu.
*/
struct event_timer_info {
struct timerqueue_node node;
struct irq_affinity_notify notify;
void (*function)(void *);
void *data;
int irq;
int cpu;
};
struct hrtimer_info {
struct hrtimer event_hrtimer;
bool timer_initialized;
};
static DEFINE_PER_CPU(struct hrtimer_info, per_cpu_hrtimer);
static DEFINE_PER_CPU(struct timerqueue_head, timer_head) = {
.head = RB_ROOT,
.next = NULL,
};
static DEFINE_SPINLOCK(event_timer_lock);
static DEFINE_SPINLOCK(event_setup_lock);
static void create_timer_smp(void *data);
static void setup_event_hrtimer(struct event_timer_info *event);
static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer);
static void irq_affinity_change_notifier(struct irq_affinity_notify *notify,
const cpumask_t *new_cpu_mask);
static void irq_affinity_release(struct kref *ref);
static int msm_event_debug_mask;
module_param_named(
debug_mask, msm_event_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
);
enum {
MSM_EVENT_TIMER_DEBUG = 1U << 0,
};
/**
* add_event_timer() : Add a wakeup event. Intended to be called
* by clients once. Returns a handle to be used
* for future transactions.
* @irq: event associated irq number.
* @function : The callback function will be called when event
* timer expires.
* @data: callback data provided by client.
*/
struct event_timer_info *add_event_timer(uint32_t irq,
void (*function)(void *), void *data)
{
struct event_timer_info *event_info =
kzalloc(sizeof(struct event_timer_info), GFP_KERNEL);
if (!event_info)
return NULL;
event_info->function = function;
event_info->data = data;
if (irq) {
struct irq_desc *desc = irq_to_desc(irq);
struct cpumask *mask = desc->irq_data.affinity;
get_online_cpus();
event_info->cpu = cpumask_any_and(mask, cpu_online_mask);
if (event_info->cpu >= nr_cpu_ids)
event_info->cpu = cpumask_first(cpu_online_mask);
event_info->notify.notify = irq_affinity_change_notifier;
event_info->notify.release = irq_affinity_release;
irq_set_affinity_notifier(irq, &event_info->notify);
put_online_cpus();
}
/* Init rb node and hr timer */
timerqueue_init(&event_info->node);
pr_debug("New Event Added. Event %p(on cpu%d). irq %d.\n",
event_info, event_info->cpu, irq);
return event_info;
}
EXPORT_SYMBOL(add_event_timer);
/**
* is_event_next(): Helper function to check if the event is the next
* expiring event
* @event : handle to the event to be checked.
*/
static bool is_event_next(struct event_timer_info *event)
{
struct event_timer_info *next_event;
struct timerqueue_node *next;
bool ret = false;
next = timerqueue_getnext(&per_cpu(timer_head, event->cpu));
if (!next)
goto exit_is_next_event;
next_event = container_of(next, struct event_timer_info, node);
if (!next_event)
goto exit_is_next_event;
if (next_event == event)
ret = true;
exit_is_next_event:
return ret;
}
/**
* is_event_active(): Helper function to check if the timer for a given event
* has been started.
* @event : handle to the event to be checked.
*/
static bool is_event_active(struct event_timer_info *event)
{
struct timerqueue_node *next;
struct event_timer_info *next_event;
bool ret = false;
for (next = timerqueue_getnext(&per_cpu(timer_head, event->cpu)); next;
next = timerqueue_iterate_next(next)) {
next_event = container_of(next, struct event_timer_info, node);
if (event == next_event) {
ret = true;
break;
}
}
return ret;
}
/**
* create_hrtimer(): Helper function to setup hrtimer.
*/
static void create_hrtimer(struct event_timer_info *event)
{
bool timer_initialized = per_cpu(per_cpu_hrtimer.timer_initialized,
event->cpu);
struct hrtimer *event_hrtimer = &per_cpu(per_cpu_hrtimer.event_hrtimer,
event->cpu);
if (!timer_initialized) {
hrtimer_init(event_hrtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS_PINNED);
per_cpu(per_cpu_hrtimer.timer_initialized, event->cpu) = true;
}
event_hrtimer->function = event_hrtimer_cb;
hrtimer_start(event_hrtimer, event->node.expires,
HRTIMER_MODE_ABS_PINNED);
}
/**
* event_hrtimer_cb() : Callback function for hr timer.
* Make the client CB from here and remove the event
* from the time ordered queue.
*/
static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer)
{
struct event_timer_info *event;
struct timerqueue_node *next;
unsigned long flags;
int cpu;
spin_lock_irqsave(&event_timer_lock, flags);
cpu = smp_processor_id();
next = timerqueue_getnext(&per_cpu(timer_head, cpu));
while (next && (ktime_to_ns(next->expires)
<= ktime_to_ns(hrtimer->node.expires))) {
event = container_of(next, struct event_timer_info, node);
if (!event)
goto hrtimer_cb_exit;
WARN_ON_ONCE(event->cpu != cpu);
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_debug("Deleting event %p @ %lu(on cpu%d)\n", event,
(unsigned long)ktime_to_ns(next->expires), cpu);
timerqueue_del(&per_cpu(timer_head, cpu), &event->node);
if (event->function)
event->function(event->data);
next = timerqueue_getnext(&per_cpu(timer_head, cpu));
}
if (next) {
event = container_of(next, struct event_timer_info, node);
create_hrtimer(event);
}
hrtimer_cb_exit:
spin_unlock_irqrestore(&event_timer_lock, flags);
return HRTIMER_NORESTART;
}
/**
* create_timer_smp(): Helper function used setting up timer on CPUs.
*/
static void create_timer_smp(void *data)
{
unsigned long flags;
struct event_timer_info *event =
(struct event_timer_info *)data;
struct timerqueue_node *next;
spin_lock_irqsave(&event_timer_lock, flags);
if (is_event_active(event))
timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node);
next = timerqueue_getnext(&per_cpu(timer_head, event->cpu));
timerqueue_add(&per_cpu(timer_head, event->cpu), &event->node);
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_debug("Adding Event %p(on cpu%d) for %lu\n", event,
event->cpu,
(unsigned long)ktime_to_ns(event->node.expires));
if (!next || (next && (ktime_to_ns(event->node.expires) <
ktime_to_ns(next->expires)))) {
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_debug("Setting timer for %lu(on cpu%d)\n",
(unsigned long)ktime_to_ns(event->node.expires),
event->cpu);
create_hrtimer(event);
}
spin_unlock_irqrestore(&event_timer_lock, flags);
}
/**
* setup_timer() : Helper function to setup timer on primary
* core during hrtimer callback.
* @event: event handle causing the wakeup.
*/
static void setup_event_hrtimer(struct event_timer_info *event)
{
smp_call_function_single(event->cpu, create_timer_smp, event, 1);
}
static void irq_affinity_release(struct kref *ref)
{
struct event_timer_info *event;
struct irq_affinity_notify *notify =
container_of(ref, struct irq_affinity_notify, kref);
event = container_of(notify, struct event_timer_info, notify);
pr_debug("event = %p\n", event);
}
static void irq_affinity_change_notifier(struct irq_affinity_notify *notify,
const cpumask_t *mask_val)
{
struct event_timer_info *event;
unsigned long flags;
unsigned int irq;
int old_cpu = -EINVAL, new_cpu = -EINVAL;
bool next_event = false;
event = container_of(notify, struct event_timer_info, notify);
irq = notify->irq;
if (!event)
return;
/*
* This logic is inline with irq-gic.c for finding
* the next affinity CPU.
*/
new_cpu = cpumask_any_and(mask_val, cpu_online_mask);
if (new_cpu >= nr_cpu_ids)
return;
old_cpu = event->cpu;
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_debug("irq %d, event %p, old_cpu(%d)->new_cpu(%d).\n",
irq, event, old_cpu, new_cpu);
/* No change in IRQ affinity */
if (old_cpu == new_cpu)
return;
spin_lock_irqsave(&event_timer_lock, flags);
/* If the event is not active OR
* If it is the next event
* and the timer is already in callback
* Just reset cpu and return
*/
if (!is_event_active(event) ||
(is_event_next(event) &&
(hrtimer_try_to_cancel(&per_cpu(per_cpu_hrtimer.
event_hrtimer, old_cpu)) < 0))) {
event->cpu = new_cpu;
spin_unlock_irqrestore(&event_timer_lock, flags);
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_debug("Event:%p is not active or in callback\n",
event);
return;
}
/* Update the flag based on EVENT is next are not */
if (is_event_next(event))
next_event = true;
event->cpu = new_cpu;
/*
* We are here either because hrtimer was active or event is not next
* Delete the event from the timer queue anyway
*/
timerqueue_del(&per_cpu(timer_head, old_cpu), &event->node);
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_debug("Event:%p is in the list\n", event);
spin_unlock_irqrestore(&event_timer_lock, flags);
/*
* Migrating event timer to a new CPU is automatically
* taken care. Since we have already modify the event->cpu
* with new CPU.
*
* Typical cases are
*
* 1)
* C0 C1
* | ^
* ----------------- |
* | | | |
* E1 E2 E3 |
* |(migrating) |
* -------------------------
*
* 2)
* C0 C1
* | ^
* ---------------- |
* | | | |
* E1 E2 E3 |
* |(migrating) |
* ---------------------------------
*
* Here after moving the E1 to C1. Need to start
* E2 on C0.
*/
spin_lock(&event_setup_lock);
/* Setup event timer on new cpu*/
setup_event_hrtimer(event);
/* Setup event on the old cpu*/
if (next_event) {
struct timerqueue_node *next;
next = timerqueue_getnext(&per_cpu(timer_head, old_cpu));
if (next) {
event = container_of(next,
struct event_timer_info, node);
setup_event_hrtimer(event);
}
}
spin_unlock(&event_setup_lock);
}
/**
* activate_event_timer() : Set the expiration time for an event in absolute
* ktime. This is a oneshot event timer, clients
* should call this again to set another expiration.
* @event : event handle.
* @event_time : event time in absolute ktime.
*/
void activate_event_timer(struct event_timer_info *event, ktime_t event_time)
{
if (!event)
return;
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_debug("Adding event %p timer @ %lu(on cpu%d)\n", event,
(unsigned long)ktime_to_us(event_time),
event->cpu);
spin_lock(&event_setup_lock);
event->node.expires = event_time;
/* Start hrtimer and add event to rb tree */
setup_event_hrtimer(event);
spin_unlock(&event_setup_lock);
}
EXPORT_SYMBOL(activate_event_timer);
/**
* deactivate_event_timer() : Deactivate an event timer, this removes the event from
* the time ordered queue of event timers.
* @event: event handle.
*/
void deactivate_event_timer(struct event_timer_info *event)
{
unsigned long flags;
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_debug("Deactivate timer\n");
spin_lock_irqsave(&event_timer_lock, flags);
if (is_event_active(event)) {
if (is_event_next(event))
hrtimer_try_to_cancel(&per_cpu(
per_cpu_hrtimer.event_hrtimer, event->cpu));
timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node);
}
spin_unlock_irqrestore(&event_timer_lock, flags);
}
/**
* destroy_event_timer() : Free the event info data structure allocated during
* add_event_timer().
* @event: event handle.
*/
void destroy_event_timer(struct event_timer_info *event)
{
unsigned long flags;
spin_lock_irqsave(&event_timer_lock, flags);
if (is_event_active(event)) {
if (is_event_next(event))
hrtimer_try_to_cancel(&per_cpu(
per_cpu_hrtimer.event_hrtimer, event->cpu));
timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node);
}
spin_unlock_irqrestore(&event_timer_lock, flags);
kfree(event);
}
EXPORT_SYMBOL(destroy_event_timer);
/**
* get_next_event_timer() - Get the next wakeup event. Returns
* a ktime value of the next expiring event.
*/
ktime_t get_next_event_time(int cpu)
{
unsigned long flags;
struct timerqueue_node *next;
struct event_timer_info *event;
ktime_t next_event = ns_to_ktime(0);
spin_lock_irqsave(&event_timer_lock, flags);
next = timerqueue_getnext(&per_cpu(timer_head, cpu));
event = container_of(next, struct event_timer_info, node);
spin_unlock_irqrestore(&event_timer_lock, flags);
if (!next || event->cpu != cpu)
return next_event;
next_event = hrtimer_get_remaining(
&per_cpu(per_cpu_hrtimer.event_hrtimer, cpu));
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_debug("Next Event %lu(on cpu%d)\n",
(unsigned long)ktime_to_us(next_event), cpu);
return next_event;
}

View file

@ -0,0 +1,93 @@
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _LINUX_CORESIGHT_CTI_H
#define _LINUX_CORESIGHT_CTI_H
#include <linux/list.h>
struct coresight_cti_data {
int nr_ctis;
const char **names;
};
struct coresight_cti {
const char *name;
struct list_head link;
};
#ifdef CONFIG_CORESIGHT_CTI
extern struct coresight_cti *coresight_cti_get(const char *name);
extern void coresight_cti_put(struct coresight_cti *cti);
extern int coresight_cti_map_trigin(
struct coresight_cti *cti, int trig, int ch);
extern int coresight_cti_map_trigout(
struct coresight_cti *cti, int trig, int ch);
extern void coresight_cti_unmap_trigin(
struct coresight_cti *cti, int trig, int ch);
extern void coresight_cti_unmap_trigout(
struct coresight_cti *cti, int trig, int ch);
extern void coresight_cti_reset(struct coresight_cti *cti);
extern int coresight_cti_set_trig(struct coresight_cti *cti, int ch);
extern void coresight_cti_clear_trig(struct coresight_cti *cti, int ch);
extern int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch);
extern int coresight_cti_enable_gate(struct coresight_cti *cti, int ch);
extern void coresight_cti_disable_gate(struct coresight_cti *cti, int ch);
extern void coresight_cti_ctx_save(void);
extern void coresight_cti_ctx_restore(void);
extern int coresight_cti_ack_trig(struct coresight_cti *cti, int trig);
#else
static inline struct coresight_cti *coresight_cti_get(const char *name)
{
return NULL;
}
static inline void coresight_cti_put(struct coresight_cti *cti) {}
static inline int coresight_cti_map_trigin(
struct coresight_cti *cti, int trig, int ch)
{
return -ENOSYS;
}
static inline int coresight_cti_map_trigout(
struct coresight_cti *cti, int trig, int ch)
{
return -ENOSYS;
}
static inline void coresight_cti_unmap_trigin(
struct coresight_cti *cti, int trig, int ch) {}
static inline void coresight_cti_unmap_trigout(
struct coresight_cti *cti, int trig, int ch) {}
static inline void coresight_cti_reset(struct coresight_cti *cti) {}
static inline int coresight_cti_set_trig(struct coresight_cti *cti, int ch)
{
return -ENOSYS;
}
static inline void coresight_cti_clear_trig(struct coresight_cti *cti, int ch)
{}
static inline int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch)
{
return -ENOSYS;
}
static inline int coresight_cti_enable_gate(struct coresight_cti *cti, int ch)
{
return -ENOSYS;
}
static inline void coresight_cti_disable_gate(struct coresight_cti *cti, int ch)
{}
static inline void coresight_cti_ctx_save(void){}
static inline void coresight_cti_ctx_restore(void){}
static inline int coresight_cti_ack_trig(struct coresight_cti *cti, int trig)
{
return -ENOSYS;
}
#endif
#endif

View file

@ -0,0 +1,13 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <uapi/linux/msm-core-interface.h>

View file

@ -0,0 +1,80 @@
/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
#define __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
#include <linux/hrtimer.h>
struct event_timer_info;
#ifdef CONFIG_MSM_EVENT_TIMER
/**
* add_event_timer() : Add a wakeup event. Intended to be called
* by clients once. Returns a handle to be used
* for future transactions.
* @irq : Interrupt number to track affinity.
* @function : The callback function will be called when event
* timer expires.
* @data : Callback data provided by client.
*/
struct event_timer_info *add_event_timer(uint32_t irq,
void (*function)(void *), void *data);
/** activate_event_timer() : Set the expiration time for an event in absolute
* ktime. This is a oneshot event timer, clients
* should call this again to set another expiration.
* @event : Event handle.
* @event_time : Event time in absolute ktime.
*/
void activate_event_timer(struct event_timer_info *event, ktime_t event_time);
/**
* deactivate_event_timer() : Deactivate an event timer.
* @event: event handle.
*/
void deactivate_event_timer(struct event_timer_info *event);
/**
* destroy_event_timer() : Free the event info data structure allocated during
* add_event_timer().
* @event: event handle.
*/
void destroy_event_timer(struct event_timer_info *event);
/**
* get_next_event_timer() : Get the next wakeup event.
* returns a ktime value of the next
* expiring event.
*/
ktime_t get_next_event_time(int cpu);
#else
static inline void *add_event_timer(uint32_t irq, void (*function)(void *),
void *data)
{
return NULL;
}
static inline void activate_event_timer(void *event, ktime_t event_time) {}
static inline void deactivate_event_timer(void *event) {}
static inline void destroy_event_timer(void *event) {}
static inline ktime_t get_next_event_time(int cpu)
{
return ns_to_ktime(0);
}
#endif /* CONFIG_MSM_EVENT_TIMER_MANAGER */
#endif /* __ARCH_ARM_MACH_MSM_EVENT_TIMER_H */

54
include/soc/qcom/jtag.h Normal file
View file

@ -0,0 +1,54 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MACH_JTAG_H
#define __MACH_JTAG_H
#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM) || \
defined(CONFIG_MSM_JTAGV8)
extern void msm_jtag_save_state(void);
extern void msm_jtag_restore_state(void);
extern void msm_jtag_etm_save_state(void);
extern void msm_jtag_etm_restore_state(void);
extern bool msm_jtag_fuse_apps_access_disabled(void);
#else
static inline void msm_jtag_save_state(void) {}
static inline void msm_jtag_restore_state(void) {}
static inline void msm_jtag_etm_save_state(void) {}
static inline void msm_jtag_etm_restore_state(void){}
static inline bool msm_jtag_fuse_apps_access_disabled(void) { return false; }
#endif
#ifdef CONFIG_MSM_JTAGV8
extern int msm_jtag_save_register(struct notifier_block *nb);
extern int msm_jtag_save_unregister(struct notifier_block *nb);
extern int msm_jtag_restore_register(struct notifier_block *nb);
extern int msm_jtag_restore_unregister(struct notifier_block *nb);
#else
static inline int msm_jtag_save_register(struct notifier_block *nb)
{
return 0;
}
static inline int msm_jtag_save_unregister(struct notifier_block *nb)
{
return 0;
}
static inline int msm_jtag_restore_register(struct notifier_block *nb)
{
return 0;
}
static inline int msm_jtag_restore_unregister(struct notifier_block *nb)
{
return 0;
}
#endif
#endif

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ARCH_ARM_MACH_MSM_LPM_STATS_H
#define __ARCH_ARM_MACH_MSM_LPM_STATS_H
struct lpm_stats;
#ifdef CONFIG_MSM_IDLE_STATS
struct lpm_stats *lpm_stats_config_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent,
struct cpumask *mask);
void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index);
void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index,
bool success);
void lpm_stats_cpu_enter(uint32_t index);
void lpm_stats_cpu_exit(uint32_t index, bool success);
void lpm_stats_suspend_enter(void);
void lpm_stats_suspend_exit(void);
#else
static inline struct lpm_stats *lpm_stats_config_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent,
struct cpumask *mask)
{
return ERR_PTR(-ENODEV);
}
static inline void lpm_stats_cluster_enter(struct lpm_stats *stats,
uint32_t index)
{
return;
}
static inline void lpm_stats_cluster_exit(struct lpm_stats *stats,
uint32_t index, bool success)
{
return;
}
static inline void lpm_stats_cpu_enter(uint32_t index)
{
return;
}
static inline void lpm_stats_cpu_exit(uint32_t index, bool success)
{
return;
}
static inline void lpm_stats_suspend_enter(void)
{
return;
}
static inline void lpm_stats_suspend_exit(void)
{
return;
}
#endif
#endif /* __ARCH_ARM_MACH_MSM_LPM_STATS_H */

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __ARCH_ARM_MACH_MSM_CORE_H
#define __ARCH_ARM_MACH_MSM_CORE_H
#ifdef CONFIG_APSS_CORE_EA
void set_cpu_throttled(struct cpumask *mask, bool throttling);
struct blocking_notifier_head *get_power_update_notifier(void);
#else
static inline void set_cpu_throttled(struct cpumask *mask, bool throttling) {}
struct blocking_notifier_head *get_power_update_notifier(void) {return NULL; }
#endif
#endif

172
include/soc/qcom/pm.h Normal file
View file

@ -0,0 +1,172 @@
/*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
* Author: San Mehat <san@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ARCH_ARM_MACH_MSM_PM_H
#define __ARCH_ARM_MACH_MSM_PM_H
#include <linux/types.h>
#include <linux/cpuidle.h>
#include <asm/smp_plat.h>
#include <asm/barrier.h>
#if !defined(CONFIG_SMP)
#define msm_secondary_startup NULL
#elif defined(CONFIG_CPU_V7)
#define msm_secondary_startup secondary_startup
#else
#define msm_secondary_startup secondary_holding_pen
#endif
enum msm_pm_sleep_mode {
MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
MSM_PM_SLEEP_MODE_RETENTION,
MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
MSM_PM_SLEEP_MODE_FASTPC,
MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND,
MSM_PM_SLEEP_MODE_NR,
MSM_PM_SLEEP_MODE_NOT_SELECTED,
};
enum msm_pm_l2_scm_flag {
MSM_SCM_L2_ON = 0,
MSM_SCM_L2_OFF = 1,
MSM_SCM_L2_GDHS = 2,
MSM_SCM_L3_PC_OFF = 4,
};
#define MSM_PM_MODE(cpu, mode_nr) ((cpu) * MSM_PM_SLEEP_MODE_NR + (mode_nr))
struct msm_pm_time_params {
uint32_t latency_us;
uint32_t sleep_us;
uint32_t next_event_us;
uint32_t modified_time_us;
};
struct msm_pm_sleep_status_data {
void *base_addr;
uint32_t mask;
};
/**
* lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ
*
* @cpu: cpuid of the CPU going down.
*
* Returns the l2 flush flag enum that is passed down to TZ during power
* collaps
*/
enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu);
/**
* msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
* @cpu: CPU on which to check for the sleep mode.
* @mode: Sleep Mode to check for.
* @idle: Idle or Suspend Sleep Mode.
*
* Helper function to determine if a Idle or Suspend
* Sleep mode is allowed for a specific CPU.
*
* Return: 1 for allowed; 0 if not allowed.
*/
int msm_pm_sleep_mode_allow(unsigned int, unsigned int, bool);
/**
* msm_pm_sleep_mode_supported() - API to determine if sleep mode is
* supported.
* @cpu: CPU on which to check for the sleep mode.
* @mode: Sleep Mode to check for.
* @idle: Idle or Suspend Sleep Mode.
*
* Helper function to determine if a Idle or Suspend
* Sleep mode is allowed and enabled for a specific CPU.
*
* Return: 1 for supported; 0 if not supported.
*/
int msm_pm_sleep_mode_supported(unsigned int, unsigned int, bool);
struct msm_pm_cpr_ops {
void (*cpr_suspend)(void);
void (*cpr_resume)(void);
};
void __init msm_pm_set_tz_retention_flag(unsigned int flag);
void msm_pm_enable_retention(bool enable);
bool msm_pm_retention_enabled(void);
bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle);
static inline void msm_arch_idle(void)
{
mb();
wfi();
}
#ifdef CONFIG_MSM_PM
void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
int msm_pm_wait_cpu_shutdown(unsigned int cpu);
int __init msm_pm_sleep_status_init(void);
void lpm_cpu_hotplug_enter(unsigned int cpu);
s32 msm_cpuidle_get_deep_idle_latency(void);
int msm_pm_collapse(unsigned long unused);
#else
static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
static inline int msm_pm_sleep_status_init(void) { return 0; };
static inline void lpm_cpu_hotplug_enter(unsigned int cpu)
{
msm_arch_idle();
};
static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
#define msm_pm_collapse NULL
#endif
#ifdef CONFIG_HOTPLUG_CPU
int msm_platform_secondary_init(unsigned int cpu);
#else
static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; }
#endif
enum msm_pm_time_stats_id {
MSM_PM_STAT_REQUESTED_IDLE = 0,
MSM_PM_STAT_IDLE_SPIN,
MSM_PM_STAT_IDLE_WFI,
MSM_PM_STAT_RETENTION,
MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
MSM_PM_STAT_IDLE_POWER_COLLAPSE,
MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
MSM_PM_STAT_SUSPEND,
MSM_PM_STAT_FAILED_SUSPEND,
MSM_PM_STAT_NOT_IDLE,
MSM_PM_STAT_COUNT
};
#ifdef CONFIG_MSM_IDLE_STATS
void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size);
void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t);
void msm_pm_l2_add_stat(uint32_t id, int64_t t);
#else
static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats,
int size) {}
static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {}
static inline void msm_pm_l2_add_stat(uint32_t id, int64_t t) {}
#endif
void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops);
extern dma_addr_t msm_pc_debug_counters_phys;
#endif /* __ARCH_ARM_MACH_MSM_PM_H */

View file

@ -0,0 +1,103 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM msm_core
#if !defined(_TRACE_MSM_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MSM_CORE_H
#include <linux/tracepoint.h>
#include <linux/thermal.h>
TRACE_EVENT(cpu_stats,
TP_PROTO(unsigned int cpu, long temp,
uint64_t min_power, uint64_t max_power),
TP_ARGS(cpu, temp, min_power, max_power),
TP_STRUCT__entry(
__field(unsigned int, cpu)
__field(long, temp)
__field(uint64_t, min_power)
__field(uint64_t, max_power)
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->temp = temp;
__entry->min_power = min_power;
__entry->max_power = max_power;
),
TP_printk("Cpu%d: temp:%ld power@minfreq:%llu power@maxfreq:%llu",
__entry->cpu, __entry->temp, __entry->min_power,
__entry->max_power)
);
TRACE_EVENT(temp_threshold,
TP_PROTO(unsigned int cpu, long temp,
long hi_thresh, long low_thresh),
TP_ARGS(cpu, temp, hi_thresh, low_thresh),
TP_STRUCT__entry(
__field(unsigned int, cpu)
__field(long, temp)
__field(long, hi_thresh)
__field(long, low_thresh)
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->temp = temp;
__entry->hi_thresh = hi_thresh;
__entry->low_thresh = low_thresh;
),
TP_printk("Cpu%d: temp:%ld hi_thresh:%ld low_thresh:%ld",
__entry->cpu, __entry->temp, __entry->hi_thresh,
__entry->low_thresh)
);
TRACE_EVENT(temp_notification,
TP_PROTO(unsigned int sensor_id, enum thermal_trip_type type,
int temp, int prev_temp),
TP_ARGS(sensor_id, type, temp, prev_temp),
TP_STRUCT__entry(
__field(unsigned int, sensor_id)
__field(enum thermal_trip_type, type)
__field(int, temp)
__field(int, prev_temp)
),
TP_fast_assign(
__entry->sensor_id = sensor_id;
__entry->type = type;
__entry->temp = temp;
__entry->prev_temp = prev_temp;
),
TP_printk("Sensor_id%d: %s threshold triggered temp:%d(previous:%d)",
__entry->sensor_id,
__entry->type == THERMAL_TRIP_CONFIGURABLE_HI ? "High" : "Low",
__entry->temp, __entry->prev_temp)
);
#endif
#define TRACE_INCLUDE_FILE trace_msm_core
#include <trace/define_trace.h>

View file

@ -0,0 +1,167 @@
/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM msm_low_power
#if !defined(_TRACE_MSM_LOW_POWER_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MSM_LOW_POWER_H_
#include <linux/tracepoint.h>
TRACE_EVENT(cpu_power_select,
TP_PROTO(int index, u32 sleep_us, u32 latency, u32 next_event_us),
TP_ARGS(index, sleep_us, latency, next_event_us),
TP_STRUCT__entry(
__field(int, index)
__field(u32, sleep_us)
__field(u32, latency)
__field(u32, next_event_us)
),
TP_fast_assign(
__entry->index = index;
__entry->sleep_us = sleep_us;
__entry->latency = latency;
__entry->next_event_us = next_event_us;
),
TP_printk("idx:%d sleep_time:%u latency:%u next_event:%u",
__entry->index, __entry->sleep_us, __entry->latency,
__entry->next_event_us)
);
TRACE_EVENT(cpu_idle_enter,
TP_PROTO(int index),
TP_ARGS(index),
TP_STRUCT__entry(
__field(int, index)
),
TP_fast_assign(
__entry->index = index;
),
TP_printk("idx:%d",
__entry->index)
);
TRACE_EVENT(cpu_idle_exit,
TP_PROTO(int index, bool success),
TP_ARGS(index, success),
TP_STRUCT__entry(
__field(int, index)
__field(bool, success)
),
TP_fast_assign(
__entry->index = index;
__entry->success = success;
),
TP_printk("idx:%d success:%d",
__entry->index,
__entry->success)
);
TRACE_EVENT(cluster_enter,
TP_PROTO(const char *name, int index, unsigned long sync_cpus,
unsigned long child_cpus, bool from_idle),
TP_ARGS(name, index, sync_cpus, child_cpus, from_idle),
TP_STRUCT__entry(
__field(const char *, name)
__field(int, index)
__field(unsigned long, sync_cpus)
__field(unsigned long, child_cpus)
__field(bool, from_idle)
),
TP_fast_assign(
__entry->name = name;
__entry->index = index;
__entry->sync_cpus = sync_cpus;
__entry->child_cpus = child_cpus;
__entry->from_idle = from_idle;
),
TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d",
__entry->name,
__entry->index,
__entry->sync_cpus,
__entry->child_cpus,
__entry->from_idle)
);
TRACE_EVENT(cluster_exit,
TP_PROTO(const char *name, int index, unsigned long sync_cpus,
unsigned long child_cpus, bool from_idle),
TP_ARGS(name, index, sync_cpus, child_cpus, from_idle),
TP_STRUCT__entry(
__field(const char *, name)
__field(int, index)
__field(unsigned long, sync_cpus)
__field(unsigned long, child_cpus)
__field(bool, from_idle)
),
TP_fast_assign(
__entry->name = name;
__entry->index = index;
__entry->sync_cpus = sync_cpus;
__entry->child_cpus = child_cpus;
__entry->from_idle = from_idle;
),
TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d",
__entry->name,
__entry->index,
__entry->sync_cpus,
__entry->child_cpus,
__entry->from_idle)
);
TRACE_EVENT(pre_pc_cb,
TP_PROTO(int tzflag),
TP_ARGS(tzflag),
TP_STRUCT__entry(
__field(int, tzflag)
),
TP_fast_assign(
__entry->tzflag = tzflag;
),
TP_printk("tzflag:%d",
__entry->tzflag
)
);
#endif
#define TRACE_INCLUDE_FILE trace_msm_low_power
#include <trace/define_trace.h>

View file

@ -0,0 +1,29 @@
#ifndef __MSM_CORE_LIB_H__
#define __MSM_CORE_LIB_H__
#include <linux/ioctl.h>
#define TEMP_DATA_POINTS 13
#define MAX_NUM_FREQ 200
enum msm_core_ioctl_params {
MSM_CORE_LEAKAGE,
MSM_CORE_VOLTAGE,
};
#define MSM_CORE_MAGIC 0x9D
struct sched_params {
uint32_t cpumask;
uint32_t cluster;
uint32_t power[TEMP_DATA_POINTS][MAX_NUM_FREQ];
uint32_t voltage[MAX_NUM_FREQ];
uint32_t freq[MAX_NUM_FREQ];
};
#define EA_LEAKAGE _IOWR(MSM_CORE_MAGIC, MSM_CORE_LEAKAGE,\
struct sched_params)
#define EA_VOLT _IOWR(MSM_CORE_MAGIC, MSM_CORE_VOLTAGE,\
struct sched_params)
#endif