diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 3ba81b1dffad..0782caa6e320 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -27,3 +27,4 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o # POWERPC drivers obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o +obj-$(CONFIG_MSM_PM) += lpm-levels.o lpm-levels-of.o lpm-workarounds.o diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c new file mode 100644 index 000000000000..bbe88b6fe8e6 --- /dev/null +++ b/drivers/cpuidle/lpm-levels-of.c @@ -0,0 +1,852 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lpm-levels.h" + +bool use_psci; +enum lpm_type { + IDLE = 0, + SUSPEND, + LPM_TYPE_NR +}; + +struct lpm_type_str { + enum lpm_type type; + char *str; +}; + +static const struct lpm_type_str lpm_types[] = { + {IDLE, "idle_enabled"}, + {SUSPEND, "suspend_enabled"}, +}; + +static struct lpm_level_avail *cpu_level_available[NR_CPUS]; +static struct platform_device *lpm_pdev; + +static void *get_avail_val(struct kobject *kobj, struct kobj_attribute *attr) +{ + void *arg = NULL; + struct lpm_level_avail *avail = NULL; + + if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) { + avail = container_of(attr, struct lpm_level_avail, + idle_enabled_attr); + arg = (void *) &avail->idle_enabled; + } else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) { + avail = container_of(attr, struct lpm_level_avail, + suspend_enabled_attr); + arg = (void *) &avail->suspend_enabled; + } + + return arg; +} + +ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + int ret = 0; + struct kernel_param kp; + + kp.arg = get_avail_val(kobj, attr); + ret = param_get_bool(buf, &kp); + if (ret > 0) { + strlcat(buf, "\n", PAGE_SIZE); + ret++; + } + + return ret; +} + +ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t len) +{ + int ret = 0; + struct kernel_param kp; + + kp.arg = get_avail_val(kobj, attr); + ret = param_set_bool(buf, &kp); + + return ret ? ret : len; +} + +static int create_lvl_avail_nodes(const char *name, + struct kobject *parent, struct lpm_level_avail *avail) +{ + struct attribute_group *attr_group = NULL; + struct attribute **attr = NULL; + struct kobject *kobj = NULL; + int ret = 0; + + kobj = kobject_create_and_add(name, parent); + if (!kobj) + return -ENOMEM; + + attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group), + GFP_KERNEL); + if (!attr_group) { + ret = -ENOMEM; + goto failed; + } + + attr = devm_kzalloc(&lpm_pdev->dev, + sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL); + if (!attr) { + ret = -ENOMEM; + goto failed; + } + + sysfs_attr_init(&avail->idle_enabled_attr.attr); + avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str; + avail->idle_enabled_attr.attr.mode = 0644; + avail->idle_enabled_attr.show = lpm_enable_show; + avail->idle_enabled_attr.store = lpm_enable_store; + + sysfs_attr_init(&avail->suspend_enabled_attr.attr); + avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str; + avail->suspend_enabled_attr.attr.mode = 0644; + avail->suspend_enabled_attr.show = lpm_enable_show; + avail->suspend_enabled_attr.store = lpm_enable_store; + + attr[0] = &avail->idle_enabled_attr.attr; + attr[1] = &avail->suspend_enabled_attr.attr; + attr[2] = NULL; + attr_group->attrs = attr; + + ret = sysfs_create_group(kobj, attr_group); + if (ret) { + ret = -ENOMEM; + goto failed; + } + + avail->idle_enabled = true; + avail->suspend_enabled = true; + avail->kobj = kobj; + + return ret; + +failed: + kobject_put(kobj); + return ret; +} + +static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent) +{ + int cpu; + int i, cpu_idx; + struct kobject **cpu_kobj = NULL; + struct lpm_level_avail *level_list = NULL; + char cpu_name[20] = {0}; + int ret = 0; + + cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) * + cpumask_weight(&p->child_cpus), GFP_KERNEL); + if (!cpu_kobj) + return -ENOMEM; + + cpu_idx = 0; + for_each_cpu(cpu, &p->child_cpus) { + snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu); + cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent); + if (!cpu_kobj[cpu_idx]) { + ret = -ENOMEM; + goto release_kobj; + } + + level_list = devm_kzalloc(&lpm_pdev->dev, + p->cpu->nlevels * sizeof(*level_list), + GFP_KERNEL); + if (!level_list) { + ret = -ENOMEM; + goto release_kobj; + } + + for (i = 0; i < p->cpu->nlevels; i++) { + + ret = create_lvl_avail_nodes(p->cpu->levels[i].name, + cpu_kobj[cpu_idx], &level_list[i]); + if (ret) + goto release_kobj; + } + + cpu_level_available[cpu] = level_list; + cpu_idx++; + } + + return ret; + +release_kobj: + for (i = 0; i < cpumask_weight(&p->child_cpus); i++) + kobject_put(cpu_kobj[i]); + + return ret; +} + +int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj) +{ + int ret = 0; + struct lpm_cluster *child = NULL; + int i; + struct kobject *cluster_kobj = NULL; + + if (!p) + return -ENODEV; + + cluster_kobj = kobject_create_and_add(p->cluster_name, kobj); + if (!cluster_kobj) + return -ENOMEM; + + for (i = 0; i < p->nlevels; i++) { + ret = create_lvl_avail_nodes(p->levels[i].level_name, + cluster_kobj, &p->levels[i].available); + if (ret) + return ret; + } + + list_for_each_entry(child, &p->child, list) { + ret = create_cluster_lvl_nodes(child, cluster_kobj); + if (ret) + return ret; + } + + if (p->cpu) { + ret = create_cpu_lvl_nodes(p, cluster_kobj); + if (ret) + return ret; + } + + return 0; +} + +bool lpm_cpu_mode_allow(unsigned int cpu, + unsigned int index, bool from_idle) +{ + struct lpm_level_avail *avail = cpu_level_available[cpu]; + + if (!lpm_pdev || !avail) + return !from_idle; + + return !!(from_idle ? avail[index].idle_enabled : + avail[index].suspend_enabled); +} + +bool lpm_cluster_mode_allow(struct lpm_cluster *cluster, + unsigned int mode, bool from_idle) +{ + struct lpm_level_avail *avail = &cluster->levels[mode].available; + + if (!lpm_pdev || !avail) + return false; + + return !!(from_idle ? avail->idle_enabled : + avail->suspend_enabled); +} + +static int parse_legacy_cluster_params(struct device_node *node, + struct lpm_cluster *c) +{ + int i; + char *key; + int ret; + struct lpm_match { + char *devname; + int (*set_mode)(struct low_power_ops *, int, bool); + }; + struct lpm_match match_tbl[] = { + {"l2", set_l2_mode}, + {"cci", set_system_mode}, + {"l3", set_l3_mode}, + {"cbf", set_system_mode}, + }; + + + key = "qcom,spm-device-names"; + c->ndevices = of_property_count_strings(node, key); + + if (c->ndevices < 0) { + pr_info("%s(): Ignoring cluster params\n", __func__); + c->no_saw_devices = true; + c->ndevices = 0; + return 0; + } + + c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name), + GFP_KERNEL); + c->lpm_dev = devm_kzalloc(&lpm_pdev->dev, + c->ndevices * sizeof(*c->lpm_dev), + GFP_KERNEL); + if (!c->name || !c->lpm_dev) { + ret = -ENOMEM; + goto failed; + } + + for (i = 0; i < c->ndevices; i++) { + char device_name[20]; + int j; + + ret = of_property_read_string_index(node, key, i, &c->name[i]); + if (ret) + goto failed; + snprintf(device_name, sizeof(device_name), "%s-%s", + c->cluster_name, c->name[i]); + + c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name); + + if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) { + pr_err("Failed to get spm device by name:%s\n", + device_name); + ret = PTR_ERR(c->lpm_dev[i].spm); + goto failed; + } + for (j = 0; j < ARRAY_SIZE(match_tbl); j++) { + if (!strcmp(c->name[i], match_tbl[j].devname)) + c->lpm_dev[i].set_mode = match_tbl[j].set_mode; + } + + if (!c->lpm_dev[i].set_mode) { + ret = -ENODEV; + goto failed; + } + } + + key = "qcom,default-level"; + if (of_property_read_u32(node, key, &c->default_level)) + c->default_level = 0; + return 0; +failed: + pr_err("%s(): Failed reading %s\n", __func__, key); + kfree(c->name); + kfree(c->lpm_dev); + c->name = NULL; + c->lpm_dev = NULL; + return ret; +} + +static int parse_cluster_params(struct device_node *node, + struct lpm_cluster *c) +{ + char *key; + int ret; + + key = "label"; + ret = of_property_read_string(node, key, &c->cluster_name); + if (ret) { + pr_err("%s(): Cannot read required param %s\n", __func__, key); + return ret; + } + + if (use_psci) { + key = "qcom,psci-mode-shift"; + ret = of_property_read_u32(node, key, + &c->psci_mode_shift); + if (ret) { + pr_err("%s(): Failed to read param: %s\n", + __func__, key); + return ret; + } + + key = "qcom,psci-mode-mask"; + ret = of_property_read_u32(node, key, + &c->psci_mode_mask); + if (ret) { + pr_err("%s(): Failed to read param: %s\n", + __func__, key); + return ret; + } + + /* Set ndevice to 1 as default */ + c->ndevices = 1; + + return 0; + } else + return parse_legacy_cluster_params(node, c); +} + +static int parse_lpm_mode(const char *str) +{ + int i; + struct lpm_lookup_table mode_lookup[] = { + {MSM_SPM_MODE_POWER_COLLAPSE, "pc"}, + {MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"}, + {MSM_SPM_MODE_FASTPC, "fpc"}, + {MSM_SPM_MODE_GDHS, "gdhs"}, + {MSM_SPM_MODE_RETENTION, "retention"}, + {MSM_SPM_MODE_CLOCK_GATING, "wfi"}, + {MSM_SPM_MODE_DISABLED, "active"} + }; + + for (i = 0; i < ARRAY_SIZE(mode_lookup); i++) + if (!strcmp(str, mode_lookup[i].mode_name)) + return mode_lookup[i].modes; + return -EINVAL; +} + +static int parse_power_params(struct device_node *node, + struct power_params *pwr) +{ + char *key; + int ret; + + key = "qcom,latency-us"; + ret = of_property_read_u32(node, key, &pwr->latency_us); + if (ret) + goto fail; + + key = "qcom,ss-power"; + ret = of_property_read_u32(node, key, &pwr->ss_power); + if (ret) + goto fail; + + key = "qcom,energy-overhead"; + ret = of_property_read_u32(node, key, &pwr->energy_overhead); + if (ret) + goto fail; + + key = "qcom,time-overhead"; + ret = of_property_read_u32(node, key, &pwr->time_overhead_us); +fail: + if (ret) + pr_err("%s(): %s Error reading %s\n", __func__, node->name, + key); + return ret; +} + +static int parse_cluster_level(struct device_node *node, + struct lpm_cluster *cluster) +{ + int i = 0; + struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels]; + int ret = -ENOMEM; + char *key; + + key = "label"; + ret = of_property_read_string(node, key, &level->level_name); + if (ret) + goto failed; + + if (use_psci) { + char *k = "qcom,psci-mode"; + ret = of_property_read_u32(node, k, &level->psci_id); + if (ret) + goto failed; + + level->is_reset = of_property_read_bool(node, "qcom,is-reset"); + } else if (!cluster->no_saw_devices) { + key = "no saw-devices"; + + level->mode = devm_kzalloc(&lpm_pdev->dev, + cluster->ndevices * sizeof(*level->mode), + GFP_KERNEL); + if (!level->mode) { + pr_err("Memory allocation failed\n"); + goto failed; + } + + for (i = 0; i < cluster->ndevices; i++) { + const char *spm_mode; + char key[25] = {0}; + + snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]); + ret = of_property_read_string(node, key, &spm_mode); + if (ret) + goto failed; + + level->mode[i] = parse_lpm_mode(spm_mode); + + if (level->mode[i] < 0) + goto failed; + + if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE + || level->mode[i] == + MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE) + level->is_reset |= true; + } + } + + key = "label"; + ret = of_property_read_string(node, key, &level->level_name); + if (ret) + goto failed; + + if (cluster->nlevels != cluster->default_level) { + key = "min child idx"; + ret = of_property_read_u32(node, "qcom,min-child-idx", + &level->min_child_level); + if (ret) + goto failed; + + if (cluster->min_child_level > level->min_child_level) + cluster->min_child_level = level->min_child_level; + } + + level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm"); + level->disable_dynamic_routing = of_property_read_bool(node, + "qcom,disable-dynamic-int-routing"); + level->last_core_only = of_property_read_bool(node, + "qcom,last-core-only"); + + key = "parse_power_params"; + ret = parse_power_params(node, &level->pwr); + if (ret) + goto failed; + + cluster->nlevels++; + return 0; +failed: + pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret); + kfree(level->mode); + level->mode = NULL; + return ret; +} + +static int parse_cpu_spm_mode(const char *mode_name) +{ + struct lpm_lookup_table pm_sm_lookup[] = { + {MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT, + "wfi"}, + {MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE, + "standalone_pc"}, + {MSM_PM_SLEEP_MODE_POWER_COLLAPSE, + "pc"}, + {MSM_PM_SLEEP_MODE_RETENTION, + "retention"}, + {MSM_PM_SLEEP_MODE_FASTPC, + "fpc"}, + }; + int i; + int ret = -EINVAL; + + for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) { + if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) { + ret = pm_sm_lookup[i].modes; + break; + } + } + return ret; +} + +static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l) +{ + char *key; + int ret; + + key = "qcom,spm-cpu-mode"; + ret = of_property_read_string(n, key, &l->name); + if (ret) { + pr_err("Failed %s %d\n", n->name, __LINE__); + return ret; + } + + if (use_psci) { + key = "qcom,psci-cpu-mode"; + + ret = of_property_read_u32(n, key, &l->psci_id); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + n->name); + return ret; + } + } else { + l->mode = parse_cpu_spm_mode(l->name); + + if (l->mode < 0) + return l->mode; + } + return 0; + +} + +static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask) +{ + struct device_node *cpu_node; + int cpu; + int idx = 0; + + cpu_node = of_parse_phandle(node, "qcom,cpu", idx++); + if (!cpu_node) { + pr_info("%s: No CPU phandle, assuming single cluster\n", + node->full_name); + /* + * Not all targets have the cpu node populated in the device + * tree. If cpu node is not populated assume all possible + * nodes belong to this cluster + */ + cpumask_copy(mask, cpu_possible_mask); + return 0; + } + + while (cpu_node) { + for_each_possible_cpu(cpu) { + if (of_get_cpu_node(cpu, NULL) == cpu_node) { + cpumask_set_cpu(cpu, mask); + break; + } + } + cpu_node = of_parse_phandle(node, "qcom,cpu", idx++); + } + + return 0; +} + +static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) +{ + struct device_node *n; + int ret = -ENOMEM; + int i; + char *key; + + c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL); + if (!c->cpu) + return ret; + + c->cpu->parent = c; + if (use_psci) { + + key = "qcom,psci-mode-shift"; + + ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + node->name); + return ret; + } + key = "qcom,psci-mode-mask"; + + ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + node->name); + return ret; + } + } + for_each_child_of_node(node, n) { + struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels]; + + c->cpu->nlevels++; + + ret = parse_cpu_mode(n, l); + if (ret < 0) { + pr_info("Failed %s\n", l->name); + goto failed; + } + + ret = parse_power_params(n, &l->pwr); + if (ret) + goto failed; + + key = "qcom,use-broadcast-timer"; + l->use_bc_timer = of_property_read_bool(n, key); + + l->is_reset = of_property_read_bool(n, "qcom,is-reset"); + + key = "qcom,jtag-save-restore"; + l->jtag_save_restore = of_property_read_bool(n, key); + } + return 0; +failed: + for (i = 0; i < c->cpu->nlevels; i++) { + kfree(c->cpu->levels[i].name); + c->cpu->levels[i].name = NULL; + } + kfree(c->cpu); + c->cpu = NULL; + pr_err("%s(): Failed with error code:%d\n", __func__, ret); + return ret; +} + +void free_cluster_node(struct lpm_cluster *cluster) +{ + struct list_head *list; + int i; + + list_for_each(list, &cluster->child) { + struct lpm_cluster *n; + n = list_entry(list, typeof(*n), list); + list_del(list); + free_cluster_node(n); + }; + + if (cluster->cpu) { + for (i = 0; i < cluster->cpu->nlevels; i++) { + kfree(cluster->cpu->levels[i].name); + cluster->cpu->levels[i].name = NULL; + } + } + for (i = 0; i < cluster->nlevels; i++) { + kfree(cluster->levels[i].mode); + cluster->levels[i].mode = NULL; + } + kfree(cluster->cpu); + kfree(cluster->name); + kfree(cluster->lpm_dev); + cluster->cpu = NULL; + cluster->name = NULL; + cluster->lpm_dev = NULL; + cluster->ndevices = 0; +} + +/* + * TODO: + * Expects a CPU or a cluster only. This ensures that affinity + * level of a cluster is consistent with reference to its + * child nodes. + */ +struct lpm_cluster *parse_cluster(struct device_node *node, + struct lpm_cluster *parent) +{ + struct lpm_cluster *c; + struct device_node *n; + char *key; + int ret = 0; + + c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + ret = parse_cluster_params(node, c); + + if (ret) + goto failed_parse_params; + + INIT_LIST_HEAD(&c->child); + c->parent = parent; + spin_lock_init(&c->sync_lock); + c->min_child_level = NR_LPM_LEVELS; + + for_each_child_of_node(node, n) { + + if (!n->name) + continue; + key = "qcom,pm-cluster-level"; + if (!of_node_cmp(n->name, key)) { + WARN_ON(!use_psci && c->no_saw_devices); + if (parse_cluster_level(n, c)) + goto failed_parse_cluster; + continue; + } + + key = "qcom,pm-cluster"; + if (!of_node_cmp(n->name, key)) { + struct lpm_cluster *child; + + WARN_ON(!use_psci && c->no_saw_devices); + child = parse_cluster(n, c); + if (!child) + goto failed_parse_cluster; + + list_add(&child->list, &c->child); + cpumask_or(&c->child_cpus, &c->child_cpus, + &child->child_cpus); + c->aff_level = child->aff_level + 1; + continue; + } + + key = "qcom,pm-cpu"; + if (!of_node_cmp(n->name, key)) { + /* + * Parse the the cpu node only if a pm-cpu node + * is available, though the mask is defined @ the + * cluster level + */ + if (get_cpumask_for_node(node, &c->child_cpus)) + goto failed_parse_cluster; + + if (parse_cpu_levels(n, c)) + goto failed_parse_cluster; + + c->aff_level = 1; + } + } + + if (cpumask_intersects(&c->child_cpus, cpu_online_mask)) + c->last_level = c->default_level; + else + c->last_level = c->nlevels-1; + + return c; + +failed_parse_cluster: + pr_err("Failed parse cluster:%s\n", key); + if (parent) + list_del(&c->list); + free_cluster_node(c); +failed_parse_params: + c->parent = NULL; + pr_err("Failed parse params\n"); + kfree(c); + return NULL; +} +struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev) +{ + struct device_node *top = NULL; + + use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci"); + + top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster"); + if (!top) { + pr_err("Failed to find root node\n"); + return ERR_PTR(-ENODEV); + } + + lpm_pdev = pdev; + return parse_cluster(top, NULL); +} + +void cluster_dt_walkthrough(struct lpm_cluster *cluster) +{ + struct list_head *list; + int i, j; + static int id; + char str[10] = {0}; + + if (!cluster) + return; + + for (i = 0; i < id; i++) + snprintf(str+i, 10 - i, "\t"); + pr_info("%d\n", __LINE__); + + for (i = 0; i < cluster->nlevels; i++) { + struct lpm_cluster_level *l = &cluster->levels[i]; + pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices); + for (j = 0; j < cluster->ndevices; j++) + pr_info("%sDevice: %p id:%p\n", str, + &cluster->name[j], &l->mode[i]); + } + + if (cluster->cpu) { + pr_info("%d\n", __LINE__); + for (j = 0; j < cluster->cpu->nlevels; j++) + pr_info("%s\tCPU mode: %s id:%d\n", str, + cluster->cpu->levels[j].name, + cluster->cpu->levels[j].mode); + } + + id++; + + + list_for_each(list, &cluster->child) { + struct lpm_cluster *n; + pr_info("%d\n", __LINE__); + n = list_entry(list, typeof(*n), list); + cluster_dt_walkthrough(n); + } + id--; +} diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c new file mode 100644 index 000000000000..2fbda5c7874b --- /dev/null +++ b/drivers/cpuidle/lpm-levels.c @@ -0,0 +1,1336 @@ +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lpm-levels.h" +#include "lpm-workarounds.h" +#include +#define CREATE_TRACE_POINTS +#include +#include "../../drivers/clk/msm/clock.h" + +#define SCLK_HZ (32768) +#define SCM_HANDOFF_LOCK_ID "S:7" +#define PSCI_POWER_STATE(reset) (reset << 30) +#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24) +static remote_spinlock_t scm_handoff_lock; + +enum { + MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0), + MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1), +}; + +enum debug_event { + CPU_ENTER, + CPU_EXIT, + CLUSTER_ENTER, + CLUSTER_EXIT, + PRE_PC_CB, +}; + +struct lpm_debug { + cycle_t time; + enum debug_event evt; + int cpu; + uint32_t arg1; + uint32_t arg2; + uint32_t arg3; + uint32_t arg4; +}; + +struct lpm_cluster *lpm_root_node; + +static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster); +static bool suspend_in_progress; +static struct hrtimer lpm_hrtimer; +static struct lpm_debug *lpm_debug; +static phys_addr_t lpm_debug_phys; +static const int num_dbg_elements = 0x100; +static int lpm_cpu_callback(struct notifier_block *cpu_nb, + unsigned long action, void *hcpu); + +static void cluster_unprepare(struct lpm_cluster *cluster, + const struct cpumask *cpu, int child_idx, bool from_idle); +static void cluster_prepare(struct lpm_cluster *cluster, + const struct cpumask *cpu, int child_idx, bool from_idle); + +static struct notifier_block __refdata lpm_cpu_nblk = { + .notifier_call = lpm_cpu_callback, +}; + +static bool menu_select; +module_param_named( + menu_select, menu_select, bool, S_IRUGO | S_IWUSR | S_IWGRP +); + +static int msm_pm_sleep_time_override; +module_param_named(sleep_time_override, + msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP); +static uint64_t suspend_wake_time; + +static bool print_parsed_dt; +module_param_named( + print_parsed_dt, print_parsed_dt, bool, S_IRUGO | S_IWUSR | S_IWGRP +); + +static bool sleep_disabled; +module_param_named(sleep_disabled, + sleep_disabled, bool, S_IRUGO | S_IWUSR | S_IWGRP); + +s32 msm_cpuidle_get_deep_idle_latency(void) +{ + return 10; +} + +void lpm_suspend_wake_time(uint64_t wakeup_time) +{ + if (wakeup_time <= 0) { + suspend_wake_time = msm_pm_sleep_time_override; + return; + } + + if (msm_pm_sleep_time_override && + (msm_pm_sleep_time_override < wakeup_time)) + suspend_wake_time = msm_pm_sleep_time_override; + else + suspend_wake_time = wakeup_time; +} +EXPORT_SYMBOL(lpm_suspend_wake_time); + +static void update_debug_pc_event(enum debug_event event, uint32_t arg1, + uint32_t arg2, uint32_t arg3, uint32_t arg4) +{ + struct lpm_debug *dbg; + int idx; + static DEFINE_SPINLOCK(debug_lock); + static int pc_event_index; + + if (!lpm_debug) + return; + + spin_lock(&debug_lock); + idx = pc_event_index++; + dbg = &lpm_debug[idx & (num_dbg_elements - 1)]; + + dbg->evt = event; + dbg->time = arch_counter_get_cntpct(); + dbg->cpu = raw_smp_processor_id(); + dbg->arg1 = arg1; + dbg->arg2 = arg2; + dbg->arg3 = arg3; + dbg->arg4 = arg4; + spin_unlock(&debug_lock); +} + +static void setup_broadcast_timer(void *arg) +{ + unsigned long reason = (unsigned long)arg; + int cpu = raw_smp_processor_id(); + + reason = reason ? + CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; + + clockevents_notify(reason, &cpu); +} + +static int lpm_cpu_callback(struct notifier_block *cpu_nb, + unsigned long action, void *hcpu) +{ + unsigned long cpu = (unsigned long) hcpu; + struct lpm_cluster *cluster = per_cpu(cpu_cluster, (unsigned int) cpu); + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DYING: + cluster_prepare(cluster, get_cpu_mask((unsigned int) cpu), + NR_LPM_LEVELS, false); + break; + case CPU_STARTING: + cluster_unprepare(cluster, get_cpu_mask((unsigned int) cpu), + NR_LPM_LEVELS, false); + break; + case CPU_ONLINE: + smp_call_function_single(cpu, setup_broadcast_timer, + (void *)true, 1); + break; + default: + break; + } + return NOTIFY_OK; +} + +static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h) +{ + return HRTIMER_NORESTART; +} + +static void msm_pm_set_timer(uint32_t modified_time_us) +{ + u64 modified_time_ns = modified_time_us * NSEC_PER_USEC; + ktime_t modified_ktime = ns_to_ktime(modified_time_ns); + lpm_hrtimer.function = lpm_hrtimer_cb; + hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED); +} + +int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm) +{ + int lpm = mode; + int rc = 0; + struct low_power_ops *cpu_ops = per_cpu(cpu_cluster, + smp_processor_id())->lpm_dev; + + if (cpu_ops->tz_flag & MSM_SCM_L2_OFF || + cpu_ops->tz_flag & MSM_SCM_L2_GDHS) + coresight_cti_ctx_restore(); + + switch (mode) { + case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE: + case MSM_SPM_MODE_POWER_COLLAPSE: + case MSM_SPM_MODE_FASTPC: + cpu_ops->tz_flag = MSM_SCM_L2_OFF; + coresight_cti_ctx_save(); + break; + case MSM_SPM_MODE_GDHS: + cpu_ops->tz_flag = MSM_SCM_L2_GDHS; + coresight_cti_ctx_save(); + break; + case MSM_SPM_MODE_CLOCK_GATING: + case MSM_SPM_MODE_RETENTION: + case MSM_SPM_MODE_DISABLED: + cpu_ops->tz_flag = MSM_SCM_L2_ON; + break; + default: + cpu_ops->tz_flag = MSM_SCM_L2_ON; + lpm = MSM_SPM_MODE_DISABLED; + break; + } + rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm); + + if (rc) + pr_err("%s: Failed to set L2 low power mode %d, ERR %d", + __func__, lpm, rc); + + return rc; +} + +int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm) +{ + struct low_power_ops *cpu_ops = per_cpu(cpu_cluster, + smp_processor_id())->lpm_dev; + + switch (mode) { + case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE: + case MSM_SPM_MODE_POWER_COLLAPSE: + case MSM_SPM_MODE_FASTPC: + cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF; + break; + default: + break; + } + return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm); +} + + +int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm) +{ + return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm); +} + +static int set_device_mode(struct lpm_cluster *cluster, int ndevice, + struct lpm_cluster_level *level) +{ + struct low_power_ops *ops; + + if (use_psci) + return 0; + + ops = &cluster->lpm_dev[ndevice]; + if (ops && ops->set_mode) + return ops->set_mode(ops, level->mode[ndevice], + level->notify_rpm); + else + return -EINVAL; +} + +static int cpu_power_select(struct cpuidle_device *dev, + struct lpm_cpu *cpu, int *index) +{ + int best_level = -1; + uint32_t best_level_pwr = ~0U; + uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY, + dev->cpu); + uint32_t sleep_us = + (uint32_t)(ktime_to_us(tick_nohz_get_sleep_length())); + uint32_t modified_time_us = 0; + uint32_t next_event_us = 0; + uint32_t pwr; + int i; + uint32_t lvl_latency_us = 0; + uint32_t lvl_overhead_us = 0; + uint32_t lvl_overhead_energy = 0; + + if (!cpu) + return -EINVAL; + + if (sleep_disabled) + return 0; + + next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu))); + + for (i = 0; i < cpu->nlevels; i++) { + struct lpm_cpu_level *level = &cpu->levels[i]; + struct power_params *pwr_params = &level->pwr; + uint32_t next_wakeup_us = sleep_us; + enum msm_pm_sleep_mode mode = level->mode; + bool allow; + + allow = lpm_cpu_mode_allow(dev->cpu, i, true); + + if (!allow) + continue; + + lvl_latency_us = pwr_params->latency_us; + + lvl_overhead_us = pwr_params->time_overhead_us; + + lvl_overhead_energy = pwr_params->energy_overhead; + + if (latency_us < lvl_latency_us) + continue; + + if (next_event_us) { + if (next_event_us < lvl_latency_us) + continue; + + if (((next_event_us - lvl_latency_us) < sleep_us) || + (next_event_us < sleep_us)) + next_wakeup_us = next_event_us - lvl_latency_us; + } + + if (next_wakeup_us <= pwr_params->time_overhead_us) + continue; + + /* + * If wakeup time greater than overhead by a factor of 1000 + * assume that core steady state power dominates the power + * equation + */ + if ((next_wakeup_us >> 10) > lvl_overhead_us) { + pwr = pwr_params->ss_power; + } else { + pwr = pwr_params->ss_power; + pwr -= (lvl_overhead_us * pwr_params->ss_power) / + next_wakeup_us; + pwr += pwr_params->energy_overhead / next_wakeup_us; + } + + if (best_level_pwr >= pwr) { + best_level = i; + best_level_pwr = pwr; + if (next_event_us && next_event_us < sleep_us && + (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)) + modified_time_us + = next_event_us - lvl_latency_us; + else + modified_time_us = 0; + } + } + + if (modified_time_us) + msm_pm_set_timer(modified_time_us); + + trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us); + + return best_level; +} + +static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster, + struct cpumask *mask, bool from_idle) +{ + int cpu; + int next_cpu = raw_smp_processor_id(); + ktime_t next_event; + struct tick_device *td; + struct cpumask online_cpus_in_cluster; + + next_event.tv64 = KTIME_MAX; + if (!suspend_wake_time) + suspend_wake_time = msm_pm_sleep_time_override; + if (!from_idle) { + if (mask) + cpumask_copy(mask, cpumask_of(raw_smp_processor_id())); + if (!suspend_wake_time) + return ~0ULL; + else + return USEC_PER_SEC * suspend_wake_time; + } + + cpumask_and(&online_cpus_in_cluster, + &cluster->num_children_in_sync, cpu_online_mask); + + for_each_cpu(cpu, &online_cpus_in_cluster) { + td = &per_cpu(tick_cpu_device, cpu); + if (td->evtdev->next_event.tv64 < next_event.tv64) { + next_event.tv64 = td->evtdev->next_event.tv64; + next_cpu = cpu; + } + } + + if (mask) + cpumask_copy(mask, cpumask_of(next_cpu)); + + + if (ktime_to_us(next_event) > ktime_to_us(ktime_get())) + return ktime_to_us(ktime_sub(next_event, ktime_get())); + else + return 0; +} + +static int cluster_select(struct lpm_cluster *cluster, bool from_idle) +{ + int best_level = -1; + int i; + uint32_t best_level_pwr = ~0U; + uint32_t pwr; + struct cpumask mask; + uint32_t latency_us = ~0U; + uint32_t sleep_us; + + if (!cluster) + return -EINVAL; + + sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle); + + if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus)) + latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY, + &mask); + + /* + * If atleast one of the core in the cluster is online, the cluster + * low power modes should be determined by the idle characteristics + * even if the last core enters the low power mode as a part of + * hotplug. + */ + + if (!from_idle && num_online_cpus() > 1 && + cpumask_intersects(&cluster->child_cpus, cpu_online_mask)) + from_idle = true; + + for (i = 0; i < cluster->nlevels; i++) { + struct lpm_cluster_level *level = &cluster->levels[i]; + struct power_params *pwr_params = &level->pwr; + + if (!lpm_cluster_mode_allow(cluster, i, from_idle)) + continue; + + if (level->last_core_only && + cpumask_weight(cpu_online_mask) > 1) + continue; + + if (!cpumask_equal(&cluster->num_children_in_sync, + &level->num_cpu_votes)) + continue; + + if (from_idle && latency_us < pwr_params->latency_us) + continue; + + if (sleep_us < pwr_params->time_overhead_us) + continue; + + if (suspend_in_progress && from_idle && level->notify_rpm) + continue; + + if (level->notify_rpm && msm_rpm_waiting_for_ack()) + continue; + + if ((sleep_us >> 10) > pwr_params->time_overhead_us) { + pwr = pwr_params->ss_power; + } else { + pwr = pwr_params->ss_power; + pwr -= (pwr_params->time_overhead_us * + pwr_params->ss_power) / sleep_us; + pwr += pwr_params->energy_overhead / sleep_us; + } + + if (best_level_pwr >= pwr) { + best_level = i; + best_level_pwr = pwr; + } + } + + return best_level; +} + +static void cluster_notify(struct lpm_cluster *cluster, + struct lpm_cluster_level *level, bool enter) +{ + if (level->is_reset && enter) + cpu_cluster_pm_enter(cluster->aff_level); + else if (level->is_reset && !enter) + cpu_cluster_pm_exit(cluster->aff_level); +} + +static int cluster_configure(struct lpm_cluster *cluster, int idx, + bool from_idle) +{ + struct lpm_cluster_level *level = &cluster->levels[idx]; + int ret, i; + + if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus) + || is_IPI_pending(&cluster->num_children_in_sync)) { + return -EPERM; + } + + if (idx != cluster->default_level) { + update_debug_pc_event(CLUSTER_ENTER, idx, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); + trace_cluster_enter(cluster->cluster_name, idx, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); + lpm_stats_cluster_enter(cluster->stats, idx); + } + + for (i = 0; i < cluster->ndevices; i++) { + ret = set_device_mode(cluster, i, level); + if (ret) + goto failed_set_mode; + } + + if (level->notify_rpm) { + struct cpumask nextcpu, *cpumask; + uint32_t us; + + us = get_cluster_sleep_time(cluster, &nextcpu, from_idle); + cpumask = level->disable_dynamic_routing ? NULL : &nextcpu; + + ret = msm_rpm_enter_sleep(0, cpumask); + if (ret) { + pr_info("Failed msm_rpm_enter_sleep() rc = %d\n", ret); + goto failed_set_mode; + } + + do_div(us, USEC_PER_SEC/SCLK_HZ); + msm_mpm_enter_sleep((uint32_t)us, from_idle, cpumask); + } + + /* Notify cluster enter event after successfully config completion */ + cluster_notify(cluster, level, true); + + sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0); + + cluster->last_level = idx; + return 0; + +failed_set_mode: + + for (i = 0; i < cluster->ndevices; i++) { + int rc = 0; + level = &cluster->levels[cluster->default_level]; + rc = set_device_mode(cluster, i, level); + BUG_ON(rc); + } + return ret; +} + +static void cluster_prepare(struct lpm_cluster *cluster, + const struct cpumask *cpu, int child_idx, bool from_idle) +{ + int i; + + if (!cluster) + return; + + if (cluster->min_child_level > child_idx) + return; + + spin_lock(&cluster->sync_lock); + cpumask_or(&cluster->num_children_in_sync, cpu, + &cluster->num_children_in_sync); + + for (i = 0; i < cluster->nlevels; i++) { + struct lpm_cluster_level *lvl = &cluster->levels[i]; + + if (child_idx >= lvl->min_child_level) + cpumask_or(&lvl->num_cpu_votes, cpu, + &lvl->num_cpu_votes); + } + + /* + * cluster_select() does not make any configuration changes. So its ok + * to release the lock here. If a core wakes up for a rude request, + * it need not wait for another to finish its cluster selection and + * configuration process + */ + + if (!cpumask_equal(&cluster->num_children_in_sync, + &cluster->child_cpus)) + goto failed; + + i = cluster_select(cluster, from_idle); + + if (i < 0) + goto failed; + + if (cluster_configure(cluster, i, from_idle)) + goto failed; + + cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i, + from_idle); +failed: + spin_unlock(&cluster->sync_lock); + return; +} + +static void cluster_unprepare(struct lpm_cluster *cluster, + const struct cpumask *cpu, int child_idx, bool from_idle) +{ + struct lpm_cluster_level *level; + bool first_cpu; + int last_level, i, ret; + + if (!cluster) + return; + + if (cluster->min_child_level > child_idx) + return; + + spin_lock(&cluster->sync_lock); + last_level = cluster->default_level; + first_cpu = cpumask_equal(&cluster->num_children_in_sync, + &cluster->child_cpus); + cpumask_andnot(&cluster->num_children_in_sync, + &cluster->num_children_in_sync, cpu); + + for (i = 0; i < cluster->nlevels; i++) { + struct lpm_cluster_level *lvl = &cluster->levels[i]; + + if (child_idx >= lvl->min_child_level) + cpumask_andnot(&lvl->num_cpu_votes, + &lvl->num_cpu_votes, cpu); + } + + if (!first_cpu || cluster->last_level == cluster->default_level) + goto unlock_return; + + lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true); + + level = &cluster->levels[cluster->last_level]; + if (level->notify_rpm) { + msm_rpm_exit_sleep(); + + /* If RPM bumps up CX to turbo, unvote CX turbo vote + * during exit of rpm assisted power collapse to + * reduce the power impact + */ + + lpm_wa_cx_unvote_send(); + msm_mpm_exit_sleep(from_idle); + } + + update_debug_pc_event(CLUSTER_EXIT, cluster->last_level, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); + trace_cluster_exit(cluster->cluster_name, cluster->last_level, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); + + last_level = cluster->last_level; + cluster->last_level = cluster->default_level; + + for (i = 0; i < cluster->ndevices; i++) { + level = &cluster->levels[cluster->default_level]; + ret = set_device_mode(cluster, i, level); + + BUG_ON(ret); + + } + sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0); + + cluster_notify(cluster, &cluster->levels[last_level], false); + cluster_unprepare(cluster->parent, &cluster->child_cpus, + last_level, from_idle); +unlock_return: + spin_unlock(&cluster->sync_lock); +} + +static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index, + bool from_idle) +{ + struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index]; + unsigned int cpu = raw_smp_processor_id(); + bool jtag_save_restore = + cluster->cpu->levels[cpu_index].jtag_save_restore; + + /* Use broadcast timer for aggregating sleep mode within a cluster. + * A broadcast timer could be used in the following scenarios + * 1) The architected timer HW gets reset during certain low power + * modes and the core relies on a external(broadcast) timer to wake up + * from sleep. This information is passed through device tree. + * 2) The CPU low power mode could trigger a system low power mode. + * The low power module relies on Broadcast timer to aggregate the + * next wakeup within a cluster, in which case, CPU switches over to + * use broadcast timer. + */ + if (from_idle && (cpu_level->use_bc_timer || + (cpu_index >= cluster->min_child_level))) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); + + if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) + || (cpu_level->mode == + MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE) + || (cpu_level->is_reset))) + cpu_pm_enter(); + + /* + * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM + */ + if (jtag_save_restore) + msm_jtag_save_state(); +} + +static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index, + bool from_idle) +{ + struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index]; + unsigned int cpu = raw_smp_processor_id(); + bool jtag_save_restore = + cluster->cpu->levels[cpu_index].jtag_save_restore; + + if (from_idle && (cpu_level->use_bc_timer || + (cpu_index >= cluster->min_child_level))) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); + + if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) + || (cpu_level->mode == + MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE) + || cpu_level->is_reset)) + cpu_pm_exit(); + + /* + * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM + */ + if (jtag_save_restore) + msm_jtag_restore_state(); +} + +int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl) +{ + int state_id = 0; + + if (!cluster) + return 0; + + spin_lock(&cluster->sync_lock); + + if (!cpumask_equal(&cluster->num_children_in_sync, + &cluster->child_cpus)) + goto unlock_and_return; + + state_id |= get_cluster_id(cluster->parent, aff_lvl); + + if (cluster->last_level != cluster->default_level) { + struct lpm_cluster_level *level + = &cluster->levels[cluster->last_level]; + + state_id |= (level->psci_id & cluster->psci_mode_mask) + << cluster->psci_mode_shift; + (*aff_lvl)++; + } +unlock_and_return: + spin_unlock(&cluster->sync_lock); + return state_id; +} + +#if !defined(CONFIG_CPU_V7) +bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) +{ + /* + * idx = 0 is the default LPM state + */ + if (!idx) { + stop_critical_timings(); + wfi(); + start_critical_timings(); + return 1; + } else { + int affinity_level = 0; + int state_id = get_cluster_id(cluster, &affinity_level); + int power_state = + PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset); + bool success = false; + + affinity_level = PSCI_AFFINITY_LEVEL(affinity_level); + state_id |= (power_state | affinity_level + | cluster->cpu->levels[idx].psci_id); + + update_debug_pc_event(CPU_ENTER, state_id, + 0xdeaffeed, 0xdeaffeed, true); + stop_critical_timings(); + success = !cpu_suspend(state_id); + start_critical_timings(); + update_debug_pc_event(CPU_EXIT, state_id, + success, 0xdeaffeed, true); + return success; + } +} +#elif defined(CONFIG_ARM_PSCI) +bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) +{ + int affinity_level = 0; + int state_id = get_cluster_id(cluster, &affinity_level); + int power_state = PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset); + + affinity_level = PSCI_AFFINITY_LEVEL(affinity_level); + if (!idx) { + wfi(); + return 1; + } + + state_id |= (power_state | affinity_level + | cluster->cpu->levels[idx].psci_id); + + return !cpu_suspend(state_id); +} +#else +bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) +{ + WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n"); + return false; +} +#endif + +static int lpm_cpuidle_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu); + int64_t time = ktime_to_ns(ktime_get()); + bool success = true; + int idx = cpu_power_select(dev, cluster->cpu, &index); + const struct cpumask *cpumask = get_cpu_mask(dev->cpu); + struct power_params *pwr_params; + + if (idx < 0) { + local_irq_enable(); + return -EPERM; + } + + trace_cpu_idle_rcuidle(idx, dev->cpu); + + pwr_params = &cluster->cpu->levels[idx].pwr; + sched_set_cpu_cstate(smp_processor_id(), idx + 1, + pwr_params->energy_overhead, pwr_params->latency_us); + + trace_cpu_idle_enter(idx); + + cpu_prepare(cluster, idx, true); + cluster_prepare(cluster, cpumask, idx, true); + lpm_stats_cpu_enter(idx); + + if (need_resched() || (idx < 0)) + goto exit; + + if (!use_psci) { + if (idx > 0) + update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed, + 0xdeaffeed, true); + + success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, + true); + + if (idx > 0) + update_debug_pc_event(CPU_EXIT, idx, success, + 0xdeaffeed, true); + } else { + success = psci_enter_sleep(cluster, idx, true); + } + +exit: + lpm_stats_cpu_exit(idx, success); + cluster_unprepare(cluster, cpumask, idx, true); + cpu_unprepare(cluster, idx, true); + + sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0); + + time = ktime_to_ns(ktime_get()) - time; + do_div(time, 1000); + dev->last_residency = (int)time; + trace_cpu_idle_exit(idx, success); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + local_irq_enable(); + return idx; +} + +#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS +static int cpuidle_register_cpu(struct cpuidle_driver *drv, + struct cpumask *mask) +{ + struct cpuidle_device *device; + int cpu, ret; + + + if (!mask || !drv) + return -EINVAL; + + drv->cpumask = mask; + ret = cpuidle_register_driver(drv); + if (ret) { + pr_err("Failed to register cpuidle driver %d\n", ret); + goto failed_driver_register; + } + + for_each_cpu(cpu, mask) { + device = &per_cpu(cpuidle_dev, cpu); + device->cpu = cpu; + + ret = cpuidle_register_device(device); + if (ret) { + pr_err("Failed to register cpuidle driver for cpu:%u\n", + cpu); + goto failed_driver_register; + } + } + return ret; +failed_driver_register: + for_each_cpu(cpu, mask) + cpuidle_unregister_driver(drv); + return ret; +} +#else +static int cpuidle_register_cpu(struct cpuidle_driver *drv, + struct cpumask *mask) +{ + return cpuidle_register(drv, NULL); +} +#endif + +static int cluster_cpuidle_register(struct lpm_cluster *cl) +{ + int i = 0, ret = 0; + unsigned cpu; + struct lpm_cluster *p = NULL; + + if (!cl->cpu) { + struct lpm_cluster *n; + + list_for_each_entry(n, &cl->child, list) { + ret = cluster_cpuidle_register(n); + if (ret) + break; + } + return ret; + } + + cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL); + if (!cl->drv) + return -ENOMEM; + + cl->drv->name = "msm_idle"; + + for (i = 0; i < cl->cpu->nlevels; i++) { + struct cpuidle_state *st = &cl->drv->states[i]; + struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i]; + snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i); + snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name); + st->flags = 0; + st->exit_latency = cpu_level->pwr.latency_us; + st->power_usage = cpu_level->pwr.ss_power; + st->target_residency = 0; + st->enter = lpm_cpuidle_enter; + } + + cl->drv->state_count = cl->cpu->nlevels; + cl->drv->safe_state_index = 0; + for_each_cpu(cpu, &cl->child_cpus) + per_cpu(cpu_cluster, cpu) = cl; + + for_each_possible_cpu(cpu) { + if (cpu_online(cpu)) + continue; + p = per_cpu(cpu_cluster, cpu); + while (p) { + int j; + spin_lock(&p->sync_lock); + cpumask_set_cpu(cpu, &p->num_children_in_sync); + for (j = 0; j < p->nlevels; j++) + cpumask_copy(&p->levels[j].num_cpu_votes, + &p->num_children_in_sync); + spin_unlock(&p->sync_lock); + p = p->parent; + } + } + ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus); + + if (ret) { + kfree(cl->drv); + return -ENOMEM; + } + + return 0; +} + +static void register_cpu_lpm_stats(struct lpm_cpu *cpu, + struct lpm_cluster *parent) +{ + const char **level_name; + int i; + + level_name = kzalloc(cpu->nlevels * sizeof(*level_name), GFP_KERNEL); + + if (!level_name) + return; + + for (i = 0; i < cpu->nlevels; i++) + level_name[i] = cpu->levels[i].name; + + lpm_stats_config_level("cpu", level_name, cpu->nlevels, + parent->stats, &parent->child_cpus); + + kfree(level_name); +} + +static void register_cluster_lpm_stats(struct lpm_cluster *cl, + struct lpm_cluster *parent) +{ + const char **level_name; + int i; + struct lpm_cluster *child; + + if (!cl) + return; + + level_name = kzalloc(cl->nlevels * sizeof(*level_name), GFP_KERNEL); + + if (!level_name) + return; + + for (i = 0; i < cl->nlevels; i++) + level_name[i] = cl->levels[i].level_name; + + cl->stats = lpm_stats_config_level(cl->cluster_name, level_name, + cl->nlevels, parent ? parent->stats : NULL, NULL); + + kfree(level_name); + + if (cl->cpu) { + register_cpu_lpm_stats(cl->cpu, cl); + return; + } + + list_for_each_entry(child, &cl->child, list) + register_cluster_lpm_stats(child, cl); +} + +static int lpm_suspend_prepare(void) +{ + suspend_in_progress = true; + msm_mpm_suspend_prepare(); + lpm_stats_suspend_enter(); + + return 0; +} + +static void lpm_suspend_wake(void) +{ + suspend_in_progress = false; + msm_mpm_suspend_wake(); + lpm_stats_suspend_exit(); +} + +static int lpm_suspend_enter(suspend_state_t state) +{ + int cpu = raw_smp_processor_id(); + struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + struct lpm_cpu *lpm_cpu = cluster->cpu; + const struct cpumask *cpumask = get_cpu_mask(cpu); + int idx; + + for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) { + + if (lpm_cpu_mode_allow(cpu, idx, false)) + break; + } + if (idx < 0) { + pr_err("Failed suspend\n"); + return 0; + } + cpu_prepare(cluster, idx, false); + cluster_prepare(cluster, cpumask, idx, false); + if (idx > 0) + update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed, + 0xdeaffeed, false); + + /* + * Print the clocks which are enabled during system suspend + * This debug information is useful to know which are the + * clocks that are enabled and preventing the system level + * LPMs(XO and Vmin). + */ + clock_debug_print_enabled(); + + if (!use_psci) + msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false); + else + psci_enter_sleep(cluster, idx, true); + + if (idx > 0) + update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed, + false); + cluster_unprepare(cluster, cpumask, idx, false); + cpu_unprepare(cluster, idx, false); + return 0; +} + +static const struct platform_suspend_ops lpm_suspend_ops = { + .enter = lpm_suspend_enter, + .valid = suspend_valid_only_mem, + .prepare_late = lpm_suspend_prepare, + .wake = lpm_suspend_wake, +}; + +static int lpm_probe(struct platform_device *pdev) +{ + int ret; + int size; + struct kobject *module_kobj = NULL; + + lpm_root_node = lpm_of_parse_cluster(pdev); + + if (IS_ERR_OR_NULL(lpm_root_node)) { + pr_err("%s(): Failed to probe low power modes\n", __func__); + return PTR_ERR(lpm_root_node); + } + + if (print_parsed_dt) + cluster_dt_walkthrough(lpm_root_node); + + /* + * Register hotplug notifier before broadcast time to ensure there + * to prevent race where a broadcast timer might not be setup on for a + * core. BUG in existing code but no known issues possibly because of + * how late lpm_levels gets initialized. + */ + register_hotcpu_notifier(&lpm_cpu_nblk); + get_cpu(); + on_each_cpu(setup_broadcast_timer, (void *)true, 1); + put_cpu(); + suspend_set_ops(&lpm_suspend_ops); + hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID); + if (ret) { + pr_err("%s: Failed initializing scm_handoff_lock (%d)\n", + __func__, ret); + return ret; + } + + size = num_dbg_elements * sizeof(struct lpm_debug); + lpm_debug = dma_alloc_coherent(&pdev->dev, size, + &lpm_debug_phys, GFP_KERNEL); + register_cluster_lpm_stats(lpm_root_node, NULL); + + ret = cluster_cpuidle_register(lpm_root_node); + if (ret) { + pr_err("%s()Failed to register with cpuidle framework\n", + __func__); + goto failed; + } + + module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); + if (!module_kobj) { + pr_err("%s: cannot find kobject for module %s\n", + __func__, KBUILD_MODNAME); + ret = -ENOENT; + goto failed; + } + + ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj); + if (ret) { + pr_err("%s(): Failed to create cluster level nodes\n", + __func__); + goto failed; + } + + return 0; +failed: + free_cluster_node(lpm_root_node); + lpm_root_node = NULL; + return ret; +} + +static struct of_device_id lpm_mtch_tbl[] = { + {.compatible = "qcom,lpm-levels"}, + {}, +}; + +static struct platform_driver lpm_driver = { + .probe = lpm_probe, + .driver = { + .name = "lpm-levels", + .owner = THIS_MODULE, + .of_match_table = lpm_mtch_tbl, + }, +}; + +static int __init lpm_levels_module_init(void) +{ + int rc; + rc = platform_driver_register(&lpm_driver); + if (rc) { + pr_info("Error registering %s\n", lpm_driver.driver.name); + goto fail; + } + +fail: + return rc; +} +late_initcall(lpm_levels_module_init); + +enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu) +{ + struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON; + + /* + * No need to acquire the lock if probe isn't completed yet + * In the event of the hotplug happening before lpm probe, we want to + * flush the cache to make sure that L2 is flushed. In particular, this + * could cause incoherencies for a cluster architecture. This wouldn't + * affect the idle case as the idle driver wouldn't be registered + * before the probe function + */ + if (!cluster) + return MSM_SCM_L2_OFF; + + /* + * Assumes L2 only. What/How parameters gets passed into TZ will + * determine how this function reports this info back in msm-pm.c + */ + spin_lock(&cluster->sync_lock); + + if (!cluster->lpm_dev) { + retflag = MSM_SCM_L2_OFF; + goto unlock_and_return; + } + + if (!cpumask_equal(&cluster->num_children_in_sync, + &cluster->child_cpus)) + goto unlock_and_return; + + if (cluster->lpm_dev) + retflag = cluster->lpm_dev->tz_flag; + /* + * The scm_handoff_lock will be release by the secure monitor. + * It is used to serialize power-collapses from this point on, + * so that both Linux and the secure context have a consistent + * view regarding the number of running cpus (cpu_count). + * + * It must be acquired before releasing the cluster lock. + */ +unlock_and_return: + update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef, + 0xdeadbeef); + trace_pre_pc_cb(retflag); + remote_spin_lock_rlock_id(&scm_handoff_lock, + REMOTE_SPINLOCK_TID_START + cpu); + spin_unlock(&cluster->sync_lock); + return retflag; +} + +/** + * lpm_cpu_hotplug_enter(): Called by dying CPU to terminate in low power mode + * + * @cpu: cpuid of the dying CPU + * + * Called from platform_cpu_kill() to terminate hotplug in a low power mode + */ +void lpm_cpu_hotplug_enter(unsigned int cpu) +{ + enum msm_pm_sleep_mode mode = MSM_PM_SLEEP_MODE_NR; + struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + int i; + int idx = -1; + + /* + * If lpm isn't probed yet, try to put cpu into the one of the modes + * available + */ + if (!cluster) { + if (msm_spm_is_mode_avail( + MSM_SPM_MODE_POWER_COLLAPSE)){ + mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE; + } else if (msm_spm_is_mode_avail( + MSM_SPM_MODE_FASTPC)) { + mode = MSM_PM_SLEEP_MODE_FASTPC; + } else if (msm_spm_is_mode_avail( + MSM_SPM_MODE_RETENTION)) { + mode = MSM_PM_SLEEP_MODE_RETENTION; + } else { + pr_err("No mode avail for cpu%d hotplug\n", cpu); + BUG_ON(1); + return; + } + } else { + struct lpm_cpu *lpm_cpu; + uint32_t ss_pwr = ~0U; + + lpm_cpu = cluster->cpu; + for (i = 0; i < lpm_cpu->nlevels; i++) { + if (ss_pwr < lpm_cpu->levels[i].pwr.ss_power) + continue; + ss_pwr = lpm_cpu->levels[i].pwr.ss_power; + idx = i; + mode = lpm_cpu->levels[i].mode; + } + + if (mode == MSM_PM_SLEEP_MODE_NR) + return; + + BUG_ON(idx < 0); + cluster_prepare(cluster, get_cpu_mask(cpu), idx, false); + } + + msm_cpu_pm_enter_sleep(mode, false); +} + diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h new file mode 100644 index 000000000000..6117ba334fe6 --- /dev/null +++ b/drivers/cpuidle/lpm-levels.h @@ -0,0 +1,139 @@ +/* Copyright (c) 2014, 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#define NR_LPM_LEVELS 8 + +extern bool use_psci; + +struct lpm_lookup_table { + uint32_t modes; + const char *mode_name; +}; + +struct power_params { + uint32_t latency_us; /* Enter + Exit latency */ + uint32_t ss_power; /* Steady state power */ + uint32_t energy_overhead; /* Enter + exit over head */ + uint32_t time_overhead_us; /* Enter + exit overhead */ +}; + +struct lpm_cpu_level { + const char *name; + enum msm_pm_sleep_mode mode; + bool use_bc_timer; + struct power_params pwr; + unsigned int psci_id; + bool is_reset; + bool jtag_save_restore; +}; + +struct lpm_cpu { + struct lpm_cpu_level levels[NR_LPM_LEVELS]; + int nlevels; + unsigned int psci_mode_shift; + unsigned int psci_mode_mask; + struct lpm_cluster *parent; +}; + +struct lpm_level_avail { + bool idle_enabled; + bool suspend_enabled; + struct kobject *kobj; + struct kobj_attribute idle_enabled_attr; + struct kobj_attribute suspend_enabled_attr; +}; + +struct lpm_cluster_level { + const char *level_name; + int *mode; /* SPM mode to enter */ + int min_child_level; + struct cpumask num_cpu_votes; + struct power_params pwr; + bool notify_rpm; + bool disable_dynamic_routing; + bool sync_level; + bool last_core_only; + struct lpm_level_avail available; + unsigned int psci_id; + bool is_reset; +}; + +struct low_power_ops { + struct msm_spm_device *spm; + int (*set_mode)(struct low_power_ops *ops, int mode, bool notify_rpm); + enum msm_pm_l2_scm_flag tz_flag; +}; + +struct lpm_cluster { + struct list_head list; + struct list_head child; + const char *cluster_name; + const char **name; + unsigned long aff_level; /* Affinity level of the node */ + struct low_power_ops *lpm_dev; + int ndevices; + struct lpm_cluster_level levels[NR_LPM_LEVELS]; + int nlevels; + enum msm_pm_l2_scm_flag l2_flag; + int min_child_level; + int default_level; + int last_level; + struct lpm_cpu *cpu; + struct cpuidle_driver *drv; + spinlock_t sync_lock; + struct cpumask child_cpus; + struct cpumask num_children_in_sync; + struct lpm_cluster *parent; + struct lpm_stats *stats; + unsigned int psci_mode_shift; + unsigned int psci_mode_mask; + bool no_saw_devices; +}; + +int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm); +int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm); +int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm); +void lpm_suspend_wake_time(uint64_t wakeup_time); + +struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev); +void free_cluster_node(struct lpm_cluster *cluster); +void cluster_dt_walkthrough(struct lpm_cluster *cluster); + +int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj); +bool lpm_cpu_mode_allow(unsigned int cpu, + unsigned int mode, bool from_idle); +bool lpm_cluster_mode_allow(struct lpm_cluster *cluster, + unsigned int mode, bool from_idle); + +extern struct lpm_cluster *lpm_root_node; + +#ifdef CONFIG_SMP +extern DEFINE_PER_CPU(bool, pending_ipi); +static inline bool is_IPI_pending(const struct cpumask *mask) +{ + unsigned int cpu; + + for_each_cpu(cpu, mask) { + if per_cpu(pending_ipi, cpu) + return true; + } + return false; +} +#else +static inline bool is_IPI_pending(const struct cpumask *mask) +{ + return false; +} +#endif diff --git a/drivers/cpuidle/lpm-workarounds.c b/drivers/cpuidle/lpm-workarounds.c new file mode 100644 index 000000000000..104def7e71e4 --- /dev/null +++ b/drivers/cpuidle/lpm-workarounds.c @@ -0,0 +1,134 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static struct regulator *lpm_cx_reg; +static struct work_struct dummy_vote_work; +static struct workqueue_struct *lpm_wa_wq; +static bool lpm_wa_cx_turbo_unvote; + +/* While exiting from RPM assisted power collapse on some targets like MSM8939 + * the CX is bumped to turbo mode by RPM. To reduce the power impact, APSS + * low power driver need to remove the CX turbo vote. + */ +static void send_dummy_cx_vote(struct work_struct *w) +{ + if (lpm_cx_reg) { + regulator_set_voltage(lpm_cx_reg, + RPM_REGULATOR_CORNER_SUPER_TURBO, + RPM_REGULATOR_CORNER_SUPER_TURBO); + + regulator_set_voltage(lpm_cx_reg, + RPM_REGULATOR_CORNER_NONE, + RPM_REGULATOR_CORNER_SUPER_TURBO); + } +} + +/* + * lpm_wa_cx_unvote_send(): Unvote for CX turbo mode + */ +void lpm_wa_cx_unvote_send(void) +{ + if (lpm_wa_cx_turbo_unvote) + queue_work(lpm_wa_wq, &dummy_vote_work); +} +EXPORT_SYMBOL(lpm_wa_cx_unvote_send); + +static int lpm_wa_cx_unvote_init(struct platform_device *pdev) +{ + int ret = 0; + + lpm_cx_reg = devm_regulator_get(&pdev->dev, "lpm-cx"); + if (IS_ERR(lpm_cx_reg)) { + ret = PTR_ERR(lpm_cx_reg); + if (ret != -EPROBE_DEFER) + pr_err("Unable to get the CX regulator\n"); + return ret; + } + + INIT_WORK(&dummy_vote_work, send_dummy_cx_vote); + + lpm_wa_wq = alloc_workqueue("lpm-wa", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1); + + return ret; +} + +static int lpm_wa_cx_unvote_exit(void) +{ + if (lpm_wa_wq) + destroy_workqueue(lpm_wa_wq); + + return 0; +} + +static int lpm_wa_probe(struct platform_device *pdev) +{ + int ret = 0; + + lpm_wa_cx_turbo_unvote = of_property_read_bool(pdev->dev.of_node, + "qcom,lpm-wa-cx-turbo-unvote"); + if (lpm_wa_cx_turbo_unvote) { + ret = lpm_wa_cx_unvote_init(pdev); + if (ret) { + pr_err("%s: Failed to initialize lpm_wa_cx_unvote (%d)\n", + __func__, ret); + return ret; + } + } + + return ret; +} + +static int lpm_wa_remove(struct platform_device *pdev) +{ + int ret = 0; + if (lpm_wa_cx_turbo_unvote) + ret = lpm_wa_cx_unvote_exit(); + + return ret; +} + +static struct of_device_id lpm_wa_mtch_tbl[] = { + {.compatible = "qcom,lpm-workarounds"}, + {}, +}; + +static struct platform_driver lpm_wa_driver = { + .probe = lpm_wa_probe, + .remove = lpm_wa_remove, + .driver = { + .name = "lpm-workarounds", + .owner = THIS_MODULE, + .of_match_table = lpm_wa_mtch_tbl, + }, +}; + +static int __init lpm_wa_module_init(void) +{ + int ret; + ret = platform_driver_register(&lpm_wa_driver); + if (ret) + pr_info("Error registering %s\n", lpm_wa_driver.driver.name); + + return ret; +} +late_initcall(lpm_wa_module_init); diff --git a/drivers/cpuidle/lpm-workarounds.h b/drivers/cpuidle/lpm-workarounds.h new file mode 100644 index 000000000000..c743691e1f58 --- /dev/null +++ b/drivers/cpuidle/lpm-workarounds.h @@ -0,0 +1,19 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LPM_WA_H +#define __LPM_WA_H + +void lpm_wa_cx_unvote_send(void); + +#endif /* __LPM_WA_H */ diff --git a/drivers/power/qcom/lpm-stats.c b/drivers/power/qcom/lpm-stats.c new file mode 100644 index 000000000000..321e13c2b7ea --- /dev/null +++ b/drivers/power/qcom/lpm-stats.c @@ -0,0 +1,778 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_STR_LEN 256 +const char *lpm_stats_reset = "reset"; +const char *lpm_stats_suspend = "suspend"; + +struct level_stats { + const char *name; + struct lpm_stats *owner; + int64_t first_bucket_time; + int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT]; + int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT]; + int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT]; + int success_count; + int failed_count; + int64_t total_time; + uint64_t enter_time; +}; + +struct lifo_stats { + uint32_t last_in; + uint32_t first_out; +}; + +struct lpm_stats { + char name[MAX_STR_LEN]; + struct level_stats *time_stats; + uint32_t num_levels; + struct lifo_stats lifo; + struct lpm_stats *parent; + struct list_head sibling; + struct list_head child; + struct cpumask mask; + struct dentry *directory; + bool is_cpu; +}; + +static struct level_stats suspend_time_stats; + +static DEFINE_PER_CPU_SHARED_ALIGNED(struct lpm_stats, cpu_stats); + +static void update_level_stats(struct level_stats *stats, uint64_t t, + bool success) +{ + uint64_t bt; + int i; + + if (!success) { + stats->failed_count++; + return; + } + + stats->success_count++; + stats->total_time += t; + bt = t; + do_div(bt, stats->first_bucket_time); + + if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT * + (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1))) + i = DIV_ROUND_UP(fls((uint32_t)bt), + CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT); + else + i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; + + if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT) + i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; + + stats->bucket[i]++; + + if (t < stats->min_time[i] || !stats->max_time[i]) + stats->min_time[i] = t; + if (t > stats->max_time[i]) + stats->max_time[i] = t; + return; +} + +static void level_stats_print(struct seq_file *m, struct level_stats *stats) +{ + int i = 0; + int64_t bucket_time = 0; + char seqs[MAX_STR_LEN] = {0}; + int64_t s = stats->total_time; + uint32_t ns = do_div(s, NSEC_PER_SEC); + + snprintf(seqs, MAX_STR_LEN, + "[%s] %s:\n" + " success count: %7d\n" + " total success time: %lld.%09u\n", + stats->owner->name, + stats->name, + stats->success_count, + s, ns); + seq_puts(m, seqs); + + if (stats->failed_count) { + snprintf(seqs, MAX_STR_LEN, " failed count: %7d\n", + stats->failed_count); + seq_puts(m, seqs); + } + + bucket_time = stats->first_bucket_time; + for (i = 0; + i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; + i++) { + s = bucket_time; + ns = do_div(s, NSEC_PER_SEC); + snprintf(seqs, MAX_STR_LEN, + "\t<%6lld.%09u: %7d (%lld-%lld)\n", + s, ns, stats->bucket[i], + stats->min_time[i], + stats->max_time[i]); + seq_puts(m, seqs); + bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT; + } + snprintf(seqs, MAX_STR_LEN, + "\t>=%5lld.%09u:%8d (%lld-%lld)\n", + s, ns, stats->bucket[i], + stats->min_time[i], + stats->max_time[i]); + seq_puts(m, seqs); +} + +static int level_stats_file_show(struct seq_file *m, void *v) +{ + struct level_stats *stats = NULL; + + if (!m->private) + return -EINVAL; + + stats = (struct level_stats *) m->private; + + level_stats_print(m, stats); + + return 0; +} + +static int level_stats_file_open(struct inode *inode, struct file *file) +{ + return single_open(file, level_stats_file_show, inode->i_private); +} + +static void level_stats_print_all(struct seq_file *m, struct lpm_stats *stats) +{ + struct list_head *centry = NULL; + struct lpm_stats *pos = NULL; + int i = 0; + + for (i = 0; i < stats->num_levels; i++) + level_stats_print(m, &stats->time_stats[i]); + + if (list_empty(&stats->child)) + return; + + centry = &stats->child; + list_for_each_entry(pos, centry, sibling) { + level_stats_print_all(m, pos); + } +} + +static void level_stats_reset(struct level_stats *stats) +{ + memset(stats->bucket, 0, sizeof(stats->bucket)); + memset(stats->min_time, 0, sizeof(stats->min_time)); + memset(stats->max_time, 0, sizeof(stats->max_time)); + stats->success_count = 0; + stats->failed_count = 0; + stats->total_time = 0; +} + +static void level_stats_reset_all(struct lpm_stats *stats) +{ + struct list_head *centry = NULL; + struct lpm_stats *pos = NULL; + int i = 0; + + for (i = 0; i < stats->num_levels; i++) + level_stats_reset(&stats->time_stats[i]); + + if (list_empty(&stats->child)) + return; + + centry = &stats->child; + list_for_each_entry(pos, centry, sibling) { + level_stats_reset_all(pos); + } +} + +static int lpm_stats_file_show(struct seq_file *m, void *v) +{ + struct lpm_stats *stats = (struct lpm_stats *)m->private; + + if (!m->private) { + pr_err("%s: Invalid pdata, Cannot print stats\n", __func__); + return -EINVAL; + } + + level_stats_print_all(m, stats); + level_stats_print(m, &suspend_time_stats); + + return 0; +} + +static int lpm_stats_file_open(struct inode *inode, struct file *file) +{ + return single_open(file, lpm_stats_file_show, inode->i_private); +} + +static ssize_t level_stats_file_write(struct file *file, + const char __user *buffer, size_t count, loff_t *off) +{ + char buf[MAX_STR_LEN] = {0}; + struct inode *in = file->f_inode; + struct level_stats *stats = (struct level_stats *)in->i_private; + size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN); + + if (!stats) + return -EINVAL; + + if (count != len+1) + return -EINVAL; + + if (copy_from_user(buf, buffer, len)) + return -EFAULT; + + if (strcmp(buf, lpm_stats_reset)) + return -EINVAL; + + level_stats_reset(stats); + + return count; +} + +static ssize_t lpm_stats_file_write(struct file *file, + const char __user *buffer, size_t count, loff_t *off) +{ + char buf[MAX_STR_LEN] = {0}; + struct inode *in = file->f_inode; + struct lpm_stats *stats = (struct lpm_stats *)in->i_private; + size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN); + + if (!stats) + return -EINVAL; + + if (count != len+1) + return -EINVAL; + + if (copy_from_user(buf, buffer, len)) + return -EFAULT; + + if (strcmp(buf, lpm_stats_reset)) + return -EINVAL; + + level_stats_reset_all(stats); + + return count; +} + +int lifo_stats_file_show(struct seq_file *m, void *v) +{ + struct lpm_stats *stats = NULL; + struct list_head *centry = NULL; + struct lpm_stats *pos = NULL; + char seqs[MAX_STR_LEN] = {0}; + + if (!m->private) + return -EINVAL; + + stats = (struct lpm_stats *)m->private; + + if (list_empty(&stats->child)) { + pr_err("%s: ERROR: Lifo level with no children.\n", + __func__); + return -EINVAL; + } + + centry = &stats->child; + list_for_each_entry(pos, centry, sibling) { + snprintf(seqs, MAX_STR_LEN, + "%s:\n" + "\tLast-In:%u\n" + "\tFirst-Out:%u\n", + pos->name, + pos->lifo.last_in, + pos->lifo.first_out); + seq_puts(m, seqs); + } + return 0; +} + +static int lifo_stats_file_open(struct inode *inode, struct file *file) +{ + return single_open(file, lifo_stats_file_show, inode->i_private); +} + +static void lifo_stats_reset_all(struct lpm_stats *stats) +{ + struct list_head *centry = NULL; + struct lpm_stats *pos = NULL; + + centry = &stats->child; + list_for_each_entry(pos, centry, sibling) { + pos->lifo.last_in = 0; + pos->lifo.first_out = 0; + if (!list_empty(&pos->child)) + lifo_stats_reset_all(pos); + } +} + +static ssize_t lifo_stats_file_write(struct file *file, + const char __user *buffer, size_t count, loff_t *off) +{ + char buf[MAX_STR_LEN] = {0}; + struct inode *in = file->f_inode; + struct lpm_stats *stats = (struct lpm_stats *)in->i_private; + size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN); + + if (!stats) + return -EINVAL; + + if (count != len+1) + return -EINVAL; + + if (copy_from_user(buf, buffer, len)) + return -EFAULT; + + if (strcmp(buf, lpm_stats_reset)) + return -EINVAL; + + lifo_stats_reset_all(stats); + + return count; +} + +static const struct file_operations level_stats_fops = { + .owner = THIS_MODULE, + .open = level_stats_file_open, + .read = seq_read, + .release = single_release, + .llseek = no_llseek, + .write = level_stats_file_write, +}; + +static const struct file_operations lpm_stats_fops = { + .owner = THIS_MODULE, + .open = lpm_stats_file_open, + .read = seq_read, + .release = single_release, + .llseek = no_llseek, + .write = lpm_stats_file_write, +}; + +static const struct file_operations lifo_stats_fops = { + .owner = THIS_MODULE, + .open = lifo_stats_file_open, + .read = seq_read, + .release = single_release, + .llseek = no_llseek, + .write = lifo_stats_file_write, +}; + +static void update_last_in_stats(struct lpm_stats *stats) +{ + struct list_head *centry = NULL; + struct lpm_stats *pos = NULL; + + if (list_empty(&stats->child)) + return; + + centry = &stats->child; + list_for_each_entry(pos, centry, sibling) { + if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) { + pos->lifo.last_in++; + return; + } + } + WARN(1, "Should not reach here\n"); +} + +static void update_first_out_stats(struct lpm_stats *stats) +{ + struct list_head *centry = NULL; + struct lpm_stats *pos = NULL; + + if (list_empty(&stats->child)) + return; + + centry = &stats->child; + list_for_each_entry(pos, centry, sibling) { + if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) { + pos->lifo.first_out++; + return; + } + } + WARN(1, "Should not reach here\n"); +} + +static inline void update_exit_stats(struct lpm_stats *stats, uint32_t index, + bool success) +{ + uint64_t exit_time = 0; + + /* Update time stats only when exit is preceded by enter */ + if (stats->time_stats[index].enter_time) { + exit_time = sched_clock() - + stats->time_stats[index].enter_time; + update_level_stats(&stats->time_stats[index], exit_time, + success); + stats->time_stats[index].enter_time = 0; + } +} + +static int config_level(const char *name, const char **levels, + int num_levels, struct lpm_stats *parent, struct lpm_stats *stats) +{ + int i = 0; + struct dentry *directory = NULL; + const char *rootname = "lpm_stats"; + const char *dirname = rootname; + + strlcpy(stats->name, name, MAX_STR_LEN); + stats->num_levels = num_levels; + stats->parent = parent; + INIT_LIST_HEAD(&stats->sibling); + INIT_LIST_HEAD(&stats->child); + + stats->time_stats = kzalloc(sizeof(struct level_stats) * + num_levels, GFP_KERNEL); + if (!stats->time_stats) { + pr_err("%s: Insufficient memory for %s level time stats\n", + __func__, name); + return -ENOMEM; + } + + if (parent) { + list_add_tail(&stats->sibling, &parent->child); + directory = parent->directory; + dirname = name; + } + + stats->directory = debugfs_create_dir(dirname, directory); + if (!stats->directory) { + pr_err("%s: Unable to create %s debugfs directory\n", + __func__, dirname); + kfree(stats->time_stats); + return -EPERM; + } + + for (i = 0; i < num_levels; i++) { + stats->time_stats[i].name = levels[i]; + stats->time_stats[i].owner = stats; + stats->time_stats[i].first_bucket_time = + CONFIG_MSM_IDLE_STATS_FIRST_BUCKET; + stats->time_stats[i].enter_time = 0; + + if (!debugfs_create_file(stats->time_stats[i].name, S_IRUGO, + stats->directory, (void *)&stats->time_stats[i], + &level_stats_fops)) { + pr_err("%s: Unable to create %s %s level-stats file\n", + __func__, stats->name, + stats->time_stats[i].name); + kfree(stats->time_stats); + return -EPERM; + } + } + + if (!debugfs_create_file("stats", S_IRUGO, stats->directory, + (void *)stats, &lpm_stats_fops)) { + pr_err("%s: Unable to create %s's overall 'stats' file\n", + __func__, stats->name); + kfree(stats->time_stats); + return -EPERM; + } + + return 0; +} + +static struct lpm_stats *config_cpu_level(const char *name, + const char **levels, int num_levels, struct lpm_stats *parent, + struct cpumask *mask) +{ + int cpu = 0; + struct lpm_stats *pstats = NULL; + struct lpm_stats *stats = NULL; + + for (pstats = parent; pstats; pstats = pstats->parent) + cpumask_or(&pstats->mask, &pstats->mask, mask); + + for_each_cpu(cpu, mask) { + int ret = 0; + char cpu_name[MAX_STR_LEN] = {0}; + + stats = &per_cpu(cpu_stats, cpu); + snprintf(cpu_name, MAX_STR_LEN, "%s%d", name, cpu); + cpumask_set_cpu(cpu, &stats->mask); + + stats->is_cpu = true; + + ret = config_level(cpu_name, levels, num_levels, parent, + stats); + if (ret) { + pr_err("%s: Unable to create %s stats\n", + __func__, cpu_name); + return ERR_PTR(ret); + } + } + + return stats; +} + +static void config_suspend_level(struct lpm_stats *stats) +{ + suspend_time_stats.name = lpm_stats_suspend; + suspend_time_stats.owner = stats; + suspend_time_stats.first_bucket_time = + CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET; + suspend_time_stats.enter_time = 0; + suspend_time_stats.success_count = 0; + suspend_time_stats.failed_count = 0; + + if (!debugfs_create_file(suspend_time_stats.name, S_IRUGO, + stats->directory, (void *)&suspend_time_stats, + &level_stats_fops)) + pr_err("%s: Unable to create %s Suspend stats file\n", + __func__, stats->name); +} + +static struct lpm_stats *config_cluster_level(const char *name, + const char **levels, int num_levels, struct lpm_stats *parent) +{ + struct lpm_stats *stats = NULL; + int ret = 0; + + stats = kzalloc(sizeof(struct lpm_stats), GFP_KERNEL); + if (!stats) { + pr_err("%s: Insufficient memory for %s stats\n", + __func__, name); + return ERR_PTR(-ENOMEM); + } + + stats->is_cpu = false; + + ret = config_level(name, levels, num_levels, parent, stats); + if (ret) { + pr_err("%s: Unable to create %s stats\n", __func__, + name); + kfree(stats); + return ERR_PTR(ret); + } + + if (!debugfs_create_file("lifo", S_IRUGO, stats->directory, + (void *)stats, &lifo_stats_fops)) { + pr_err("%s: Unable to create %s lifo stats file\n", + __func__, stats->name); + kfree(stats); + return ERR_PTR(-EPERM); + } + + if (!parent) + config_suspend_level(stats); + + return stats; +} + +static void cleanup_stats(struct lpm_stats *stats) +{ + struct list_head *centry = NULL; + struct lpm_stats *pos = NULL; + + centry = &stats->child; + list_for_each_entry_reverse(pos, centry, sibling) { + if (!list_empty(&pos->child)) + cleanup_stats(pos); + + list_del_init(&pos->child); + + kfree(pos->time_stats); + if (!pos->is_cpu) + kfree(pos); + } + kfree(stats->time_stats); + kfree(stats); +} + +static void lpm_stats_cleanup(struct lpm_stats *stats) +{ + struct lpm_stats *pstats = stats; + + if (!pstats) + return; + + while (pstats->parent) + pstats = pstats->parent; + + debugfs_remove_recursive(pstats->directory); + + cleanup_stats(pstats); +} + +/** + * lpm_stats_config_level() - API to configure levels stats. + * + * @name: Name of the cluster/cpu. + * @levels: Low power mode level names. + * @num_levels: Number of leves supported. + * @parent: Pointer to the parent's lpm_stats object. + * @mask: cpumask, if configuring cpu stats, else NULL. + * + * Function to communicate the low power mode levels supported by + * cpus or a cluster. + * + * Return: Pointer to the lpm_stats object or ERR_PTR(-ERRNO) + */ +struct lpm_stats *lpm_stats_config_level(const char *name, + const char **levels, int num_levels, struct lpm_stats *parent, + struct cpumask *mask) +{ + struct lpm_stats *stats = NULL; + + if (!levels || num_levels <= 0 || IS_ERR(parent)) { + pr_err("%s: Invalid input\n\t\tlevels = %p\n\t\t" + "num_levels = %d\n\t\tparent = %ld\n", + __func__, levels, num_levels, PTR_ERR(parent)); + return ERR_PTR(-EINVAL); + } + + if (mask) + stats = config_cpu_level(name, levels, num_levels, parent, + mask); + else + stats = config_cluster_level(name, levels, num_levels, + parent); + + if (IS_ERR(stats)) { + lpm_stats_cleanup(parent); + return stats; + } + + return stats; +} +EXPORT_SYMBOL(lpm_stats_config_level); + +/** + * lpm_stats_cluster_enter() - API to communicate the lpm level a cluster + * is prepared to enter. + * + * @stats: Pointer to the cluster's lpm_stats object. + * @index: Index of the lpm level that the cluster is going to enter. + * + * Function to communicate the low power mode level that the cluster is + * prepared to enter. + */ +void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index) +{ + if (IS_ERR_OR_NULL(stats)) + return; + + stats->time_stats[index].enter_time = sched_clock(); + + update_last_in_stats(stats); +} +EXPORT_SYMBOL(lpm_stats_cluster_enter); + +/** + * lpm_stats_cluster_exit() - API to communicate the lpm level a cluster + * exited. + * + * @stats: Pointer to the cluster's lpm_stats object. + * @index: Index of the cluster lpm level. + * @success: Success/Failure of the low power mode execution. + * + * Function to communicate the low power mode level that the cluster + * exited. + */ +void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index, + bool success) +{ + if (IS_ERR_OR_NULL(stats)) + return; + + update_exit_stats(stats, index, success); + + update_first_out_stats(stats); +} +EXPORT_SYMBOL(lpm_stats_cluster_exit); + +/** + * lpm_stats_cpu_enter() - API to communicate the lpm level a cpu + * is prepared to enter. + * + * @index: cpu's lpm level index. + * + * Function to communicate the low power mode level that the cpu is + * prepared to enter. + */ +void lpm_stats_cpu_enter(uint32_t index) +{ + struct lpm_stats *stats = &__get_cpu_var(cpu_stats); + + if (!stats->time_stats) + return; + + stats->time_stats[index].enter_time = sched_clock(); +} +EXPORT_SYMBOL(lpm_stats_cpu_enter); + +/** + * lpm_stats_cpu_exit() - API to communicate the lpm level that the cpu exited. + * + * @index: cpu's lpm level index. + * @success: Success/Failure of the low power mode execution. + * + * Function to communicate the low power mode level that the cpu exited. + */ +void lpm_stats_cpu_exit(uint32_t index, bool success) +{ + struct lpm_stats *stats = &__get_cpu_var(cpu_stats); + + if (!stats->time_stats) + return; + + update_exit_stats(stats, index, success); +} +EXPORT_SYMBOL(lpm_stats_cpu_exit); + +/** + * lpm_stats_suspend_enter() - API to communicate system entering suspend. + * + * Function to communicate that the system is ready to enter suspend. + */ +void lpm_stats_suspend_enter(void) +{ + struct timespec ts; + + getnstimeofday(&ts); + suspend_time_stats.enter_time = timespec_to_ns(&ts); +} +EXPORT_SYMBOL(lpm_stats_suspend_enter); + +/** + * lpm_stats_suspend_exit() - API to communicate system exiting suspend. + * + * Function to communicate that the system exited suspend. + */ +void lpm_stats_suspend_exit(void) +{ + struct timespec ts; + uint64_t exit_time = 0; + + getnstimeofday(&ts); + exit_time = timespec_to_ns(&ts) - suspend_time_stats.enter_time; + update_level_stats(&suspend_time_stats, exit_time, true); +} +EXPORT_SYMBOL(lpm_stats_suspend_exit); diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c new file mode 100644 index 000000000000..6198413e380f --- /dev/null +++ b/drivers/power/qcom/msm-core.c @@ -0,0 +1,1124 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define CREATE_TRACE_POINTS +#include + +#define TEMP_BASE_POINT 35 +#define TEMP_MAX_POINT 95 +#define CPU_HOTPLUG_LIMIT 80 +#define CPU_BIT_MASK(cpu) BIT(cpu) +#define DEFAULT_TEMP 40 +#define DEFAULT_LOW_HYST_TEMP 10 +#define DEFAULT_HIGH_HYST_TEMP 5 +#define CLUSTER_OFFSET_FOR_MPIDR 8 +#define MAX_CORES_PER_CLUSTER 4 +#define MAX_NUM_OF_CLUSTERS 2 +#define NUM_OF_CORNERS 10 +#define DEFAULT_SCALING_FACTOR 1 + +#define ALLOCATE_2D_ARRAY(type)\ +static type **allocate_2d_array_##type(int idx)\ +{\ + int i;\ + type **ptr = NULL;\ + if (!idx) \ + return ERR_PTR(-EINVAL);\ + ptr = kzalloc(sizeof(*ptr) * TEMP_DATA_POINTS, \ + GFP_KERNEL);\ + if (!ptr) { \ + return ERR_PTR(-ENOMEM); \ + } \ + for (i = 0; i < TEMP_DATA_POINTS; i++) { \ + ptr[i] = kzalloc(sizeof(*ptr[i]) * \ + idx, GFP_KERNEL);\ + if (!ptr[i]) {\ + goto done;\ + } \ + } \ + return ptr;\ +done:\ + for (i = 0; i < TEMP_DATA_POINTS; i++) \ + kfree(ptr[i]);\ + kfree(ptr);\ + return ERR_PTR(-ENOMEM);\ +} + +struct cpu_activity_info { + int cpu; + int mpidr; + long temp; + int sensor_id; + struct sensor_threshold hi_threshold; + struct sensor_threshold low_threshold; + struct cpu_static_info *sp; +}; + +struct cpu_static_info { + uint32_t **power; + cpumask_t mask; + struct cpufreq_frequency_table *table; + uint32_t *voltage; + uint32_t num_of_freqs; +}; + +static DEFINE_MUTEX(policy_update_mutex); +static DEFINE_MUTEX(kthread_update_mutex); +static DEFINE_SPINLOCK(update_lock); +static struct delayed_work sampling_work; +static struct completion sampling_completion; +static struct task_struct *sampling_task; +static int low_hyst_temp; +static int high_hyst_temp; +static struct platform_device *msm_core_pdev; +static struct cpu_activity_info activity[NR_CPUS]; +DEFINE_PER_CPU(struct cpu_pstate_pwr *, ptable); +static struct cpu_pwr_stats cpu_stats[NR_CPUS]; +static uint32_t scaling_factor; +ALLOCATE_2D_ARRAY(uint32_t); + +static int poll_ms; +module_param_named(polling_interval, poll_ms, int, + S_IRUGO | S_IWUSR | S_IWGRP); + +static int disabled; +module_param_named(disabled, disabled, int, + S_IRUGO | S_IWUSR | S_IWGRP); +static bool in_suspend; +static bool activate_power_table; +static int max_throttling_temp = 80; /* in C */ +module_param_named(throttling_temp, max_throttling_temp, int, + S_IRUGO | S_IWUSR | S_IWGRP); + +/* + * Cannot be called from an interrupt context + */ +static void set_and_activate_threshold(uint32_t sensor_id, + struct sensor_threshold *threshold) +{ + if (sensor_set_trip(sensor_id, threshold)) { + pr_err("%s: Error in setting trip %d\n", + KBUILD_MODNAME, threshold->trip); + return; + } + + if (sensor_activate_trip(sensor_id, threshold, true)) { + sensor_cancel_trip(sensor_id, threshold); + pr_err("%s: Error in enabling trip %d\n", + KBUILD_MODNAME, threshold->trip); + return; + } +} + +static void set_threshold(struct cpu_activity_info *cpu_node) +{ + if (cpu_node->sensor_id < 0) + return; + + /* + * Before operating on the threshold structure which is used by + * thermal core ensure that the sensor is disabled to prevent + * incorrect operations on the sensor list maintained by thermal code. + */ + sensor_activate_trip(cpu_node->sensor_id, + &cpu_node->hi_threshold, false); + sensor_activate_trip(cpu_node->sensor_id, + &cpu_node->low_threshold, false); + + cpu_node->hi_threshold.temp = (cpu_node->temp + high_hyst_temp) * + scaling_factor; + cpu_node->low_threshold.temp = (cpu_node->temp - low_hyst_temp) * + scaling_factor; + + /* + * Set the threshold only if we are below the hotplug limit + * Adding more work at this high temperature range, seems to + * fail hotplug notifications. + */ + if (cpu_node->hi_threshold.temp < (CPU_HOTPLUG_LIMIT * scaling_factor)) + set_and_activate_threshold(cpu_node->sensor_id, + &cpu_node->hi_threshold); + + set_and_activate_threshold(cpu_node->sensor_id, + &cpu_node->low_threshold); +} + +static void samplequeue_handle(struct work_struct *work) +{ + complete(&sampling_completion); +} + +/* May be called from an interrupt context */ +static void core_temp_notify(enum thermal_trip_type type, + int temp, void *data) +{ + struct cpu_activity_info *cpu_node = + (struct cpu_activity_info *) data; + + trace_temp_notification(cpu_node->sensor_id, + type, temp, cpu_node->temp); + + cpu_node->temp = temp / scaling_factor; + + complete(&sampling_completion); +} + +static void repopulate_stats(int cpu) +{ + int i; + struct cpu_activity_info *cpu_node = &activity[cpu]; + int temp_point; + struct cpu_pstate_pwr *pt = per_cpu(ptable, cpu); + + if (!pt) + return; + + if (cpu_node->temp < TEMP_BASE_POINT) + temp_point = 0; + else if (cpu_node->temp > TEMP_MAX_POINT) + temp_point = TEMP_DATA_POINTS - 1; + else + temp_point = (cpu_node->temp - TEMP_BASE_POINT) / 5; + + cpu_stats[cpu].temp = cpu_node->temp; + for (i = 0; i < cpu_node->sp->num_of_freqs; i++) + pt[i].power = cpu_node->sp->power[temp_point][i]; + + trace_cpu_stats(cpu, cpu_stats[cpu].temp, pt[0].power, + pt[cpu_node->sp->num_of_freqs-1].power); +}; + +void trigger_cpu_pwr_stats_calc(void) +{ + int cpu; + static long prev_temp[NR_CPUS]; + struct cpu_activity_info *cpu_node; + long temp; + + if (disabled) + return; + + spin_lock(&update_lock); + + for_each_online_cpu(cpu) { + cpu_node = &activity[cpu]; + if (cpu_node->sensor_id < 0) + continue; + + if (cpu_node->temp == prev_temp[cpu]) + sensor_get_temp(cpu_node->sensor_id, &temp); + + cpu_node->temp = temp / scaling_factor; + + prev_temp[cpu] = cpu_node->temp; + + /* + * Do not populate/update stats before policy and ptable have + * been updated. + */ + if (activate_power_table && cpu_stats[cpu].ptable + && cpu_node->sp->table) + repopulate_stats(cpu); + } + spin_unlock(&update_lock); +} +EXPORT_SYMBOL(trigger_cpu_pwr_stats_calc); + +void set_cpu_throttled(cpumask_t *mask, bool throttling) +{ + int cpu; + + if (!mask) + return; + + spin_lock(&update_lock); + for_each_cpu(cpu, mask) + cpu_stats[cpu].throttling = throttling; + spin_unlock(&update_lock); +} +EXPORT_SYMBOL(set_cpu_throttled); + +static void update_related_freq_table(struct cpufreq_policy *policy) +{ + int cpu, num_of_freqs; + struct cpufreq_frequency_table *table; + + table = cpufreq_frequency_get_table(policy->cpu); + if (!table) { + pr_err("Couldn't get freq table for cpu%d\n", + policy->cpu); + return; + } + + for (num_of_freqs = 0; table[num_of_freqs].frequency != + CPUFREQ_TABLE_END;) + num_of_freqs++; + + /* + * Synchronous cores within cluster have the same + * policy. Since these cores do not have the cpufreq + * table initialized for all of them, copy the same + * table to all the related cpus. + */ + for_each_cpu(cpu, policy->related_cpus) { + activity[cpu].sp->table = table; + activity[cpu].sp->num_of_freqs = num_of_freqs; + } +} + +static __ref int do_sampling(void *data) +{ + int cpu; + struct cpu_activity_info *cpu_node; + static int prev_temp[NR_CPUS]; + + while (!kthread_should_stop()) { + wait_for_completion(&sampling_completion); + cancel_delayed_work(&sampling_work); + + mutex_lock(&kthread_update_mutex); + if (in_suspend) + goto unlock; + + trigger_cpu_pwr_stats_calc(); + + for_each_online_cpu(cpu) { + cpu_node = &activity[cpu]; + if (prev_temp[cpu] != cpu_node->temp) { + prev_temp[cpu] = cpu_node->temp; + set_threshold(cpu_node); + trace_temp_threshold(cpu, cpu_node->temp, + cpu_node->hi_threshold.temp / + scaling_factor, + cpu_node->low_threshold.temp / + scaling_factor); + } + } + if (!poll_ms) + goto unlock; + + schedule_delayed_work(&sampling_work, + msecs_to_jiffies(poll_ms)); +unlock: + mutex_unlock(&kthread_update_mutex); + } + return 0; +} + +static void clear_static_power(struct cpu_static_info *sp) +{ + int i; + + if (!sp) + return; + + if (cpumask_first(&sp->mask) < num_possible_cpus()) + return; + + for (i = 0; i < TEMP_DATA_POINTS; i++) + kfree(sp->power[i]); + kfree(sp->power); + kfree(sp); +} + +BLOCKING_NOTIFIER_HEAD(msm_core_stats_notifier_list); + +struct blocking_notifier_head *get_power_update_notifier(void) +{ + return &msm_core_stats_notifier_list; +} + +int register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&msm_core_stats_notifier_list, + nb); +} + +static int update_userspace_power(struct sched_params __user *argp) +{ + int i; + int ret; + int cpu; + struct cpu_activity_info *node; + struct cpu_static_info *sp, *clear_sp; + int cpumask, cluster, mpidr; + + get_user(cpumask, &argp->cpumask); + get_user(cluster, &argp->cluster); + mpidr = cluster << 8; + + pr_debug("%s: cpumask %d, cluster: %d\n", __func__, cpumask, + cluster); + for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) { + if (!(cpumask & 0x01)) + continue; + + mpidr |= i; + for_each_possible_cpu(cpu) { + if (cpu_logical_map(cpu) == mpidr) + break; + } + } + + if (cpu >= num_possible_cpus()) + return -EINVAL; + + node = &activity[cpu]; + /* Allocate new memory to copy cpumask specific power + * information. + */ + sp = kzalloc(sizeof(*sp), GFP_KERNEL); + if (!sp) + return -ENOMEM; + + + sp->power = allocate_2d_array_uint32_t(node->sp->num_of_freqs); + if (IS_ERR_OR_NULL(sp->power)) { + ret = PTR_ERR(sp->power); + kfree(sp); + return ret; + } + sp->num_of_freqs = node->sp->num_of_freqs; + sp->voltage = node->sp->voltage; + sp->table = node->sp->table; + + for (i = 0; i < TEMP_DATA_POINTS; i++) { + ret = copy_from_user(sp->power[i], &argp->power[i][0], + sizeof(sp->power[i][0]) * node->sp->num_of_freqs); + if (ret) + goto failed; + } + + /* Copy the same power values for all the cpus in the cpumask + * argp->cpumask within the cluster (argp->cluster) + */ + spin_lock(&update_lock); + get_user(cpumask, &argp->cpumask); + for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) { + if (!(cpumask & 0x01)) + continue; + mpidr = (cluster << CLUSTER_OFFSET_FOR_MPIDR); + mpidr |= i; + for_each_possible_cpu(cpu) { + if (!(cpu_logical_map(cpu) == mpidr)) + continue; + + node = &activity[cpu]; + clear_sp = node->sp; + node->sp = sp; + cpumask_set_cpu(cpu, &sp->mask); + if (clear_sp) { + cpumask_clear_cpu(cpu, &clear_sp->mask); + clear_static_power(clear_sp); + } + cpu_stats[cpu].ptable = per_cpu(ptable, cpu); + repopulate_stats(cpu); + + blocking_notifier_call_chain( + &msm_core_stats_notifier_list, cpu, NULL); + } + } + spin_unlock(&update_lock); + + activate_power_table = true; + return 0; + +failed: + for (i = 0; i < TEMP_DATA_POINTS; i++) + kfree(sp->power[i]); + kfree(sp->power); + kfree(sp); + return ret; +} + +static long msm_core_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret = 0; + struct cpu_activity_info *node = NULL; + struct sched_params __user *argp = (struct sched_params __user *)arg; + int i, cpu = num_possible_cpus(); + int mpidr, cluster, cpumask; + + if (!argp) + return -EINVAL; + + get_user(cluster, &argp->cluster); + mpidr = (argp->cluster << (MAX_CORES_PER_CLUSTER * + MAX_NUM_OF_CLUSTERS)); + cpumask = argp->cpumask; + + switch (cmd) { + case EA_LEAKAGE: + ret = update_userspace_power(argp); + if (ret) + pr_err("Userspace power update failed with %ld\n", ret); + break; + case EA_VOLT: + for (i = 0; cpumask > 0; i++, cpumask >>= 1) { + for_each_possible_cpu(cpu) { + if (cpu_logical_map(cpu) == (mpidr | i)) + break; + } + } + if (cpu >= num_possible_cpus()) + break; + + mutex_lock(&policy_update_mutex); + node = &activity[cpu]; + if (!node->sp->table) { + ret = -EINVAL; + goto unlock; + } + ret = copy_to_user((void __user *)&argp->voltage[0], + node->sp->voltage, + sizeof(uint32_t) * node->sp->num_of_freqs); + if (ret) + break; + for (i = 0; i < node->sp->num_of_freqs; i++) { + ret = copy_to_user((void __user *)&argp->freq[i], + &node->sp->table[i].frequency, + sizeof(uint32_t)); + if (ret) + break; + } +unlock: + mutex_unlock(&policy_update_mutex); + break; + default: + break; + } + + return ret; +} + +#ifdef CONFIG_COMPAT +static long msm_core_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + arg = (unsigned long)compat_ptr(arg); + return msm_core_ioctl(file, cmd, arg); +} +#endif + +static int msm_core_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int msm_core_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static inline void init_sens_threshold(struct sensor_threshold *threshold, + enum thermal_trip_type trip, long temp, + void *data) +{ + threshold->trip = trip; + threshold->temp = temp; + threshold->data = data; + threshold->notify = (void *)core_temp_notify; +} + +static int msm_core_stats_init(struct device *dev, int cpu) +{ + int i; + struct cpu_activity_info *cpu_node; + struct cpu_pstate_pwr *pstate = NULL; + + cpu_node = &activity[cpu]; + cpu_stats[cpu].cpu = cpu; + cpu_stats[cpu].temp = cpu_node->temp; + cpu_stats[cpu].throttling = false; + + cpu_stats[cpu].len = cpu_node->sp->num_of_freqs; + pstate = devm_kzalloc(dev, + sizeof(*pstate) * cpu_node->sp->num_of_freqs, + GFP_KERNEL); + if (!pstate) + return -ENOMEM; + + for (i = 0; i < cpu_node->sp->num_of_freqs; i++) + pstate[i].freq = cpu_node->sp->table[i].frequency; + + per_cpu(ptable, cpu) = pstate; + + return 0; +} + +static int msm_core_task_init(struct device *dev) +{ + init_completion(&sampling_completion); + sampling_task = kthread_run(do_sampling, NULL, "msm-core:sampling"); + if (IS_ERR(sampling_task)) { + pr_err("Failed to create do_sampling err: %ld\n", + PTR_ERR(sampling_task)); + return PTR_ERR(sampling_task); + } + return 0; +} + +struct cpu_pwr_stats *get_cpu_pwr_stats(void) +{ + return cpu_stats; +} +EXPORT_SYMBOL(get_cpu_pwr_stats); + +static int msm_get_power_values(int cpu, struct cpu_static_info *sp) +{ + int i = 0, j; + int ret = 0; + uint64_t power; + + /* Calculate dynamic power spent for every frequency using formula: + * Power = V * V * f + * where V = voltage for frequency + * f = frequency + * */ + sp->power = allocate_2d_array_uint32_t(sp->num_of_freqs); + if (IS_ERR_OR_NULL(sp->power)) + return PTR_ERR(sp->power); + + for (i = 0; i < TEMP_DATA_POINTS; i++) { + for (j = 0; j < sp->num_of_freqs; j++) { + power = sp->voltage[j] * + sp->table[j].frequency; + do_div(power, 1000); + do_div(power, 1000); + power *= sp->voltage[j]; + do_div(power, 1000); + sp->power[i][j] = power; + } + } + return ret; +} + +static int msm_get_voltage_levels(struct device *dev, int cpu, + struct cpu_static_info *sp) +{ + unsigned int *voltage; + int i; + int corner; + struct dev_pm_opp *opp; + struct device *cpu_dev = get_cpu_device(cpu); + /* + * Convert cpr corner voltage to average voltage of both + * a53 and a57 votlage value + */ + int average_voltage[NUM_OF_CORNERS] = {0, 746, 841, 843, 940, 953, 976, + 1024, 1090, 1100}; + + if (!cpu_dev) + return -ENODEV; + + voltage = devm_kzalloc(dev, + sizeof(*voltage) * sp->num_of_freqs, GFP_KERNEL); + + if (!voltage) + return -ENOMEM; + + rcu_read_lock(); + for (i = 0; i < sp->num_of_freqs; i++) { + opp = dev_pm_opp_find_freq_exact(cpu_dev, + sp->table[i].frequency * 1000, true); + corner = dev_pm_opp_get_voltage(opp); + + if (corner > 400000) + voltage[i] = corner / 1000; + else if (corner > 0 && corner < ARRAY_SIZE(average_voltage)) + voltage[i] = average_voltage[corner]; + else + voltage[i] + = average_voltage[ARRAY_SIZE(average_voltage) - 1]; + } + rcu_read_unlock(); + + sp->voltage = voltage; + return 0; +} + +static int msm_core_dyn_pwr_init(struct platform_device *pdev, + int cpu) +{ + int ret = 0; + + if (!activity[cpu].sp->table) + return 0; + + ret = msm_get_voltage_levels(&pdev->dev, cpu, activity[cpu].sp); + if (ret) + return ret; + + ret = msm_get_power_values(cpu, activity[cpu].sp); + + return ret; +} + +static int msm_core_tsens_init(struct device_node *node, int cpu) +{ + int ret = 0; + char *key = NULL; + struct device_node *phandle; + const char *sensor_type = NULL; + struct cpu_activity_info *cpu_node = &activity[cpu]; + long temp; + + if (!node) + return -ENODEV; + + key = "sensor"; + phandle = of_parse_phandle(node, key, 0); + if (!phandle) { + pr_info("%s: No sensor mapping found for the core\n", + __func__); + /* Do not treat this as error as some targets might have + * temperature notification only in userspace. + * Use default temperature for the core. Userspace might + * update the temperature once it is up. + */ + cpu_node->sensor_id = -ENODEV; + cpu_node->temp = DEFAULT_TEMP; + return 0; + } + + key = "qcom,sensor-name"; + ret = of_property_read_string(phandle, key, + &sensor_type); + if (ret) { + pr_err("%s: Cannot read tsens id\n", __func__); + return ret; + } + + cpu_node->sensor_id = sensor_get_id((char *)sensor_type); + if (cpu_node->sensor_id < 0) + return cpu_node->sensor_id; + + key = "qcom,scaling-factor"; + ret = of_property_read_u32(phandle, key, + &scaling_factor); + if (ret) { + pr_info("%s: Cannot read tsens scaling factor\n", __func__); + scaling_factor = DEFAULT_SCALING_FACTOR; + } + + ret = sensor_get_temp(cpu_node->sensor_id, &temp); + if (ret) + return ret; + + cpu_node->temp = temp / scaling_factor; + + init_sens_threshold(&cpu_node->hi_threshold, + THERMAL_TRIP_CONFIGURABLE_HI, + (cpu_node->temp + high_hyst_temp) * scaling_factor, + (void *)cpu_node); + init_sens_threshold(&cpu_node->low_threshold, + THERMAL_TRIP_CONFIGURABLE_LOW, + (cpu_node->temp - low_hyst_temp) * scaling_factor, + (void *)cpu_node); + + return ret; +} + +static int msm_core_mpidr_init(struct device_node *phandle) +{ + int ret = 0; + char *key = NULL; + int mpidr; + + key = "reg"; + ret = of_property_read_u32(phandle, key, + &mpidr); + if (ret) { + pr_err("%s: Cannot read mpidr\n", __func__); + return ret; + } + return mpidr; +} + +static int msm_core_cpu_policy_handler(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_policy *policy = data; + struct cpu_activity_info *cpu_info = &activity[policy->cpu]; + int cpu; + int ret; + + if (cpu_info->sp->table) + return NOTIFY_OK; + + switch (val) { + case CPUFREQ_CREATE_POLICY: + mutex_lock(&policy_update_mutex); + update_related_freq_table(policy); + + for_each_cpu(cpu, policy->related_cpus) { + ret = msm_core_dyn_pwr_init(msm_core_pdev, cpu); + if (ret) + pr_debug("voltage-pwr table update failed\n"); + + ret = msm_core_stats_init(&msm_core_pdev->dev, cpu); + if (ret) + pr_debug("Stats table update failed\n"); + } + mutex_unlock(&policy_update_mutex); + break; + default: + break; + } + return NOTIFY_OK; +} + +struct notifier_block cpu_policy = { + .notifier_call = msm_core_cpu_policy_handler +}; + +static int system_suspend_handler(struct notifier_block *nb, + unsigned long val, void *data) +{ + int cpu; + + mutex_lock(&kthread_update_mutex); + switch (val) { + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + case PM_POST_RESTORE: + /* + * Set completion event to read temperature and repopulate + * stats + */ + in_suspend = 0; + complete(&sampling_completion); + break; + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + /* + * cancel delayed work to be able to restart immediately + * after system resume + */ + in_suspend = 1; + cancel_delayed_work(&sampling_work); + /* + * cancel TSENS interrupts as we do not want to wake up from + * suspend to take care of repopulate stats while the system is + * in suspend + */ + for_each_possible_cpu(cpu) { + if (activity[cpu].sensor_id < 0) + continue; + + sensor_activate_trip(activity[cpu].sensor_id, + &activity[cpu].hi_threshold, false); + sensor_activate_trip(activity[cpu].sensor_id, + &activity[cpu].low_threshold, false); + } + break; + default: + break; + } + mutex_unlock(&kthread_update_mutex); + + return NOTIFY_OK; +} + +static int msm_core_freq_init(void) +{ + int cpu; + struct cpufreq_policy *policy; + + for_each_possible_cpu(cpu) { + activity[cpu].sp = kzalloc(sizeof(*(activity[cpu].sp)), + GFP_KERNEL); + if (!activity[cpu].sp) + return -ENOMEM; + } + + for_each_online_cpu(cpu) { + if (activity[cpu].sp->table) + continue; + + policy = cpufreq_cpu_get(cpu); + if (!policy) + continue; + + update_related_freq_table(policy); + cpufreq_cpu_put(policy); + } + + return 0; +} + +static int msm_core_params_init(struct platform_device *pdev) +{ + int ret = 0; + unsigned long cpu = 0; + struct device_node *child_node = NULL; + struct device_node *ea_node = NULL; + char *key = NULL; + int mpidr; + + for_each_possible_cpu(cpu) { + child_node = of_get_cpu_node(cpu, NULL); + + if (!child_node) + continue; + + mpidr = msm_core_mpidr_init(child_node); + if (mpidr < 0) + return mpidr; + + if (cpu >= num_possible_cpus()) + continue; + + activity[cpu].mpidr = mpidr; + + key = "qcom,ea"; + ea_node = of_parse_phandle(child_node, key, 0); + if (!ea_node) { + pr_err("%s Couldn't find the ea_node for cpu%lu\n", + __func__, cpu); + return -ENODEV; + } + + ret = msm_core_tsens_init(ea_node, cpu); + if (ret) + return ret; + + if (!activity[cpu].sp->table) + continue; + + ret = msm_core_dyn_pwr_init(msm_core_pdev, cpu); + if (ret) + pr_debug("voltage-pwr table update failed\n"); + + ret = msm_core_stats_init(&msm_core_pdev->dev, cpu); + if (ret) + pr_debug("Stats table update failed\n"); + } + + return 0; +} + +static const struct file_operations msm_core_ops = { + .owner = THIS_MODULE, + .unlocked_ioctl = msm_core_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = msm_core_compat_ioctl, +#endif + .open = msm_core_open, + .release = msm_core_release, +}; + +static struct miscdevice msm_core_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "pta", + .fops = &msm_core_ops +}; + +static void free_dyn_memory(void) +{ + int i, cpu; + + for_each_possible_cpu(cpu) { + if (activity[cpu].sp) { + for (i = 0; i < TEMP_DATA_POINTS; i++) { + if (!activity[cpu].sp->power) + break; + + kfree(activity[cpu].sp->power[i]); + } + } + kfree(activity[cpu].sp); + } +} + +static int uio_init(struct platform_device *pdev) +{ + int ret = 0; + struct uio_info *info = NULL; + struct resource *clnt_res = NULL; + u32 ea_mem_size = 0; + phys_addr_t ea_mem_pyhsical = 0; + + clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!clnt_res) { + pr_err("resource not found\n"); + return -ENODEV; + } + + info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + ea_mem_size = resource_size(clnt_res); + ea_mem_pyhsical = clnt_res->start; + + if (ea_mem_size == 0) { + pr_err("msm-core: memory size is zero"); + return -EINVAL; + } + + /* Setup device */ + info->name = clnt_res->name; + info->version = "1.0"; + info->mem[0].addr = ea_mem_pyhsical; + info->mem[0].size = ea_mem_size; + info->mem[0].memtype = UIO_MEM_PHYS; + + ret = uio_register_device(&pdev->dev, info); + if (ret) { + pr_err("uio register failed ret=%d", ret); + return ret; + } + dev_set_drvdata(&pdev->dev, info); + + return 0; +} + +static int msm_core_dev_probe(struct platform_device *pdev) +{ + int ret = 0; + char *key = NULL; + struct device_node *node; + int cpu; + struct uio_info *info; + + if (!pdev) + return -ENODEV; + + msm_core_pdev = pdev; + node = pdev->dev.of_node; + if (!node) + return -ENODEV; + + key = "qcom,low-hyst-temp"; + ret = of_property_read_u32(node, key, &low_hyst_temp); + if (ret) + low_hyst_temp = DEFAULT_LOW_HYST_TEMP; + + key = "qcom,high-hyst-temp"; + ret = of_property_read_u32(node, key, &high_hyst_temp); + if (ret) + high_hyst_temp = DEFAULT_HIGH_HYST_TEMP; + + key = "qcom,polling-interval"; + ret = of_property_read_u32(node, key, &poll_ms); + if (ret) + pr_info("msm-core initialized without polling period\n"); + + key = "qcom,throttling-temp"; + ret = of_property_read_u32(node, key, &max_throttling_temp); + + ret = uio_init(pdev); + if (ret) + return ret; + + ret = msm_core_freq_init(); + if (ret) + goto failed; + + ret = misc_register(&msm_core_device); + if (ret) { + pr_err("%s: Error registering device %d\n", __func__, ret); + goto failed; + } + + ret = msm_core_params_init(pdev); + if (ret) + goto failed; + + ret = msm_core_task_init(&pdev->dev); + if (ret) + goto failed; + + for_each_possible_cpu(cpu) + set_threshold(&activity[cpu]); + + INIT_DEFERRABLE_WORK(&sampling_work, samplequeue_handle); + schedule_delayed_work(&sampling_work, msecs_to_jiffies(0)); + cpufreq_register_notifier(&cpu_policy, CPUFREQ_POLICY_NOTIFIER); + pm_notifier(system_suspend_handler, 0); + return 0; +failed: + info = dev_get_drvdata(&pdev->dev); + uio_unregister_device(info); + free_dyn_memory(); + return ret; +} + +static int msm_core_remove(struct platform_device *pdev) +{ + int cpu; + struct uio_info *info = dev_get_drvdata(&pdev->dev); + + uio_unregister_device(info); + + for_each_possible_cpu(cpu) { + if (activity[cpu].sensor_id < 0) + continue; + + sensor_cancel_trip(activity[cpu].sensor_id, + &activity[cpu].hi_threshold); + sensor_cancel_trip(activity[cpu].sensor_id, + &activity[cpu].low_threshold); + } + free_dyn_memory(); + misc_deregister(&msm_core_device); + return 0; +} + +static struct of_device_id msm_core_match_table[] = { + {.compatible = "qcom,apss-core-ea"}, + {}, +}; + +static struct platform_driver msm_core_driver = { + .probe = msm_core_dev_probe, + .driver = { + .name = "msm_core", + .owner = THIS_MODULE, + .of_match_table = msm_core_match_table, + }, + .remove = msm_core_remove, +}; + +static int __init msm_core_init(void) +{ + return platform_driver_register(&msm_core_driver); +} +late_initcall(msm_core_init); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 0a657c9c7029..517e1a87a488 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -282,3 +282,12 @@ config MSM_MPM_OF interrupts when going to a system wide sleep mode. This config option enables the MPM driver that supports initialization from a device tree + + +config MSM_EVENT_TIMER + bool "Event timer" + help + This option enables a modules that manages a list of event timers that + need to be monitored by the PM. The enables the PM code to monitor + events that require the core to be awake and ready to handle the + event. diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 0bc315d86910..fbb3815aeb40 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -31,3 +31,4 @@ obj-$(CONFIG_SOC_BUS) += socinfo.o obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/ obj-$(CONFIG_MSM_SECURE_BUFFER) += secure_buffer.o obj-$(CONFIG_MSM_MPM_OF) += mpm-of.o +obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o diff --git a/drivers/soc/qcom/event_timer.c b/drivers/soc/qcom/event_timer.c new file mode 100644 index 000000000000..374fa56b0b28 --- /dev/null +++ b/drivers/soc/qcom/event_timer.c @@ -0,0 +1,505 @@ +/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * struct event_timer_info - basic event timer structure + * @node: timerqueue node to track time ordered data structure + * of event timers + * @notify: irq affinity notifier. + * @timer: hrtimer created for this event. + * @function : callback function for event timer. + * @data : callback data for event timer. + * @irq: irq number for which event timer is created. + * @cpu: event timer associated cpu. + */ +struct event_timer_info { + struct timerqueue_node node; + struct irq_affinity_notify notify; + void (*function)(void *); + void *data; + int irq; + int cpu; +}; + +struct hrtimer_info { + struct hrtimer event_hrtimer; + bool timer_initialized; +}; + +static DEFINE_PER_CPU(struct hrtimer_info, per_cpu_hrtimer); + +static DEFINE_PER_CPU(struct timerqueue_head, timer_head) = { + .head = RB_ROOT, + .next = NULL, +}; + +static DEFINE_SPINLOCK(event_timer_lock); +static DEFINE_SPINLOCK(event_setup_lock); + +static void create_timer_smp(void *data); +static void setup_event_hrtimer(struct event_timer_info *event); +static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer); +static void irq_affinity_change_notifier(struct irq_affinity_notify *notify, + const cpumask_t *new_cpu_mask); +static void irq_affinity_release(struct kref *ref); + +static int msm_event_debug_mask; +module_param_named( + debug_mask, msm_event_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP +); + +enum { + MSM_EVENT_TIMER_DEBUG = 1U << 0, +}; + +/** + * add_event_timer() : Add a wakeup event. Intended to be called + * by clients once. Returns a handle to be used + * for future transactions. + * @irq: event associated irq number. + * @function : The callback function will be called when event + * timer expires. + * @data: callback data provided by client. + */ +struct event_timer_info *add_event_timer(uint32_t irq, + void (*function)(void *), void *data) +{ + struct event_timer_info *event_info = + kzalloc(sizeof(struct event_timer_info), GFP_KERNEL); + + if (!event_info) + return NULL; + + event_info->function = function; + event_info->data = data; + + if (irq) { + struct irq_desc *desc = irq_to_desc(irq); + struct cpumask *mask = desc->irq_data.affinity; + + get_online_cpus(); + event_info->cpu = cpumask_any_and(mask, cpu_online_mask); + if (event_info->cpu >= nr_cpu_ids) + event_info->cpu = cpumask_first(cpu_online_mask); + + event_info->notify.notify = irq_affinity_change_notifier; + event_info->notify.release = irq_affinity_release; + irq_set_affinity_notifier(irq, &event_info->notify); + put_online_cpus(); + } + + /* Init rb node and hr timer */ + timerqueue_init(&event_info->node); + pr_debug("New Event Added. Event %p(on cpu%d). irq %d.\n", + event_info, event_info->cpu, irq); + + return event_info; +} +EXPORT_SYMBOL(add_event_timer); + +/** + * is_event_next(): Helper function to check if the event is the next + * expiring event + * @event : handle to the event to be checked. + */ +static bool is_event_next(struct event_timer_info *event) +{ + struct event_timer_info *next_event; + struct timerqueue_node *next; + bool ret = false; + + next = timerqueue_getnext(&per_cpu(timer_head, event->cpu)); + if (!next) + goto exit_is_next_event; + + next_event = container_of(next, struct event_timer_info, node); + if (!next_event) + goto exit_is_next_event; + + if (next_event == event) + ret = true; + +exit_is_next_event: + return ret; +} + +/** + * is_event_active(): Helper function to check if the timer for a given event + * has been started. + * @event : handle to the event to be checked. + */ +static bool is_event_active(struct event_timer_info *event) +{ + struct timerqueue_node *next; + struct event_timer_info *next_event; + bool ret = false; + + for (next = timerqueue_getnext(&per_cpu(timer_head, event->cpu)); next; + next = timerqueue_iterate_next(next)) { + next_event = container_of(next, struct event_timer_info, node); + + if (event == next_event) { + ret = true; + break; + } + } + return ret; +} + +/** + * create_hrtimer(): Helper function to setup hrtimer. + */ +static void create_hrtimer(struct event_timer_info *event) + +{ + bool timer_initialized = per_cpu(per_cpu_hrtimer.timer_initialized, + event->cpu); + struct hrtimer *event_hrtimer = &per_cpu(per_cpu_hrtimer.event_hrtimer, + event->cpu); + + if (!timer_initialized) { + hrtimer_init(event_hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS_PINNED); + per_cpu(per_cpu_hrtimer.timer_initialized, event->cpu) = true; + } + + event_hrtimer->function = event_hrtimer_cb; + hrtimer_start(event_hrtimer, event->node.expires, + HRTIMER_MODE_ABS_PINNED); +} + +/** + * event_hrtimer_cb() : Callback function for hr timer. + * Make the client CB from here and remove the event + * from the time ordered queue. + */ +static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer) +{ + struct event_timer_info *event; + struct timerqueue_node *next; + unsigned long flags; + int cpu; + + spin_lock_irqsave(&event_timer_lock, flags); + cpu = smp_processor_id(); + next = timerqueue_getnext(&per_cpu(timer_head, cpu)); + + while (next && (ktime_to_ns(next->expires) + <= ktime_to_ns(hrtimer->node.expires))) { + event = container_of(next, struct event_timer_info, node); + if (!event) + goto hrtimer_cb_exit; + + WARN_ON_ONCE(event->cpu != cpu); + + if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG) + pr_debug("Deleting event %p @ %lu(on cpu%d)\n", event, + (unsigned long)ktime_to_ns(next->expires), cpu); + + timerqueue_del(&per_cpu(timer_head, cpu), &event->node); + + if (event->function) + event->function(event->data); + + next = timerqueue_getnext(&per_cpu(timer_head, cpu)); + } + + if (next) { + event = container_of(next, struct event_timer_info, node); + create_hrtimer(event); + } +hrtimer_cb_exit: + spin_unlock_irqrestore(&event_timer_lock, flags); + return HRTIMER_NORESTART; +} + +/** + * create_timer_smp(): Helper function used setting up timer on CPUs. + */ +static void create_timer_smp(void *data) +{ + unsigned long flags; + struct event_timer_info *event = + (struct event_timer_info *)data; + struct timerqueue_node *next; + + spin_lock_irqsave(&event_timer_lock, flags); + + if (is_event_active(event)) + timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node); + + next = timerqueue_getnext(&per_cpu(timer_head, event->cpu)); + timerqueue_add(&per_cpu(timer_head, event->cpu), &event->node); + + if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG) + pr_debug("Adding Event %p(on cpu%d) for %lu\n", event, + event->cpu, + (unsigned long)ktime_to_ns(event->node.expires)); + + if (!next || (next && (ktime_to_ns(event->node.expires) < + ktime_to_ns(next->expires)))) { + if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG) + pr_debug("Setting timer for %lu(on cpu%d)\n", + (unsigned long)ktime_to_ns(event->node.expires), + event->cpu); + + create_hrtimer(event); + } + spin_unlock_irqrestore(&event_timer_lock, flags); +} + +/** + * setup_timer() : Helper function to setup timer on primary + * core during hrtimer callback. + * @event: event handle causing the wakeup. + */ +static void setup_event_hrtimer(struct event_timer_info *event) +{ + smp_call_function_single(event->cpu, create_timer_smp, event, 1); +} + +static void irq_affinity_release(struct kref *ref) +{ + struct event_timer_info *event; + struct irq_affinity_notify *notify = + container_of(ref, struct irq_affinity_notify, kref); + + event = container_of(notify, struct event_timer_info, notify); + pr_debug("event = %p\n", event); +} + +static void irq_affinity_change_notifier(struct irq_affinity_notify *notify, + const cpumask_t *mask_val) +{ + struct event_timer_info *event; + unsigned long flags; + unsigned int irq; + int old_cpu = -EINVAL, new_cpu = -EINVAL; + bool next_event = false; + + event = container_of(notify, struct event_timer_info, notify); + irq = notify->irq; + + if (!event) + return; + + /* + * This logic is inline with irq-gic.c for finding + * the next affinity CPU. + */ + new_cpu = cpumask_any_and(mask_val, cpu_online_mask); + if (new_cpu >= nr_cpu_ids) + return; + + old_cpu = event->cpu; + + if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG) + pr_debug("irq %d, event %p, old_cpu(%d)->new_cpu(%d).\n", + irq, event, old_cpu, new_cpu); + + /* No change in IRQ affinity */ + if (old_cpu == new_cpu) + return; + + spin_lock_irqsave(&event_timer_lock, flags); + + /* If the event is not active OR + * If it is the next event + * and the timer is already in callback + * Just reset cpu and return + */ + if (!is_event_active(event) || + (is_event_next(event) && + (hrtimer_try_to_cancel(&per_cpu(per_cpu_hrtimer. + event_hrtimer, old_cpu)) < 0))) { + event->cpu = new_cpu; + spin_unlock_irqrestore(&event_timer_lock, flags); + if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG) + pr_debug("Event:%p is not active or in callback\n", + event); + return; + } + + /* Update the flag based on EVENT is next are not */ + if (is_event_next(event)) + next_event = true; + + event->cpu = new_cpu; + + /* + * We are here either because hrtimer was active or event is not next + * Delete the event from the timer queue anyway + */ + timerqueue_del(&per_cpu(timer_head, old_cpu), &event->node); + + if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG) + pr_debug("Event:%p is in the list\n", event); + + spin_unlock_irqrestore(&event_timer_lock, flags); + + /* + * Migrating event timer to a new CPU is automatically + * taken care. Since we have already modify the event->cpu + * with new CPU. + * + * Typical cases are + * + * 1) + * C0 C1 + * | ^ + * ----------------- | + * | | | | + * E1 E2 E3 | + * |(migrating) | + * ------------------------- + * + * 2) + * C0 C1 + * | ^ + * ---------------- | + * | | | | + * E1 E2 E3 | + * |(migrating) | + * --------------------------------- + * + * Here after moving the E1 to C1. Need to start + * E2 on C0. + */ + spin_lock(&event_setup_lock); + /* Setup event timer on new cpu*/ + setup_event_hrtimer(event); + + /* Setup event on the old cpu*/ + if (next_event) { + struct timerqueue_node *next; + + next = timerqueue_getnext(&per_cpu(timer_head, old_cpu)); + if (next) { + event = container_of(next, + struct event_timer_info, node); + setup_event_hrtimer(event); + } + } + spin_unlock(&event_setup_lock); +} + +/** + * activate_event_timer() : Set the expiration time for an event in absolute + * ktime. This is a oneshot event timer, clients + * should call this again to set another expiration. + * @event : event handle. + * @event_time : event time in absolute ktime. + */ +void activate_event_timer(struct event_timer_info *event, ktime_t event_time) +{ + if (!event) + return; + + if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG) + pr_debug("Adding event %p timer @ %lu(on cpu%d)\n", event, + (unsigned long)ktime_to_us(event_time), + event->cpu); + + spin_lock(&event_setup_lock); + event->node.expires = event_time; + /* Start hrtimer and add event to rb tree */ + setup_event_hrtimer(event); + spin_unlock(&event_setup_lock); +} +EXPORT_SYMBOL(activate_event_timer); + +/** + * deactivate_event_timer() : Deactivate an event timer, this removes the event from + * the time ordered queue of event timers. + * @event: event handle. + */ +void deactivate_event_timer(struct event_timer_info *event) +{ + unsigned long flags; + + if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG) + pr_debug("Deactivate timer\n"); + + spin_lock_irqsave(&event_timer_lock, flags); + if (is_event_active(event)) { + if (is_event_next(event)) + hrtimer_try_to_cancel(&per_cpu( + per_cpu_hrtimer.event_hrtimer, event->cpu)); + + timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node); + } + spin_unlock_irqrestore(&event_timer_lock, flags); +} + +/** + * destroy_event_timer() : Free the event info data structure allocated during + * add_event_timer(). + * @event: event handle. + */ +void destroy_event_timer(struct event_timer_info *event) +{ + unsigned long flags; + + spin_lock_irqsave(&event_timer_lock, flags); + if (is_event_active(event)) { + if (is_event_next(event)) + hrtimer_try_to_cancel(&per_cpu( + per_cpu_hrtimer.event_hrtimer, event->cpu)); + + timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node); + } + spin_unlock_irqrestore(&event_timer_lock, flags); + kfree(event); +} +EXPORT_SYMBOL(destroy_event_timer); + +/** + * get_next_event_timer() - Get the next wakeup event. Returns + * a ktime value of the next expiring event. + */ +ktime_t get_next_event_time(int cpu) +{ + unsigned long flags; + struct timerqueue_node *next; + struct event_timer_info *event; + ktime_t next_event = ns_to_ktime(0); + + spin_lock_irqsave(&event_timer_lock, flags); + next = timerqueue_getnext(&per_cpu(timer_head, cpu)); + event = container_of(next, struct event_timer_info, node); + spin_unlock_irqrestore(&event_timer_lock, flags); + + if (!next || event->cpu != cpu) + return next_event; + + next_event = hrtimer_get_remaining( + &per_cpu(per_cpu_hrtimer.event_hrtimer, cpu)); + + if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG) + pr_debug("Next Event %lu(on cpu%d)\n", + (unsigned long)ktime_to_us(next_event), cpu); + + return next_event; +} diff --git a/include/linux/coresight-cti.h b/include/linux/coresight-cti.h new file mode 100644 index 000000000000..73f56b76cc18 --- /dev/null +++ b/include/linux/coresight-cti.h @@ -0,0 +1,93 @@ +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_CORESIGHT_CTI_H +#define _LINUX_CORESIGHT_CTI_H + +#include + +struct coresight_cti_data { + int nr_ctis; + const char **names; +}; + +struct coresight_cti { + const char *name; + struct list_head link; +}; + +#ifdef CONFIG_CORESIGHT_CTI +extern struct coresight_cti *coresight_cti_get(const char *name); +extern void coresight_cti_put(struct coresight_cti *cti); +extern int coresight_cti_map_trigin( + struct coresight_cti *cti, int trig, int ch); +extern int coresight_cti_map_trigout( + struct coresight_cti *cti, int trig, int ch); +extern void coresight_cti_unmap_trigin( + struct coresight_cti *cti, int trig, int ch); +extern void coresight_cti_unmap_trigout( + struct coresight_cti *cti, int trig, int ch); +extern void coresight_cti_reset(struct coresight_cti *cti); +extern int coresight_cti_set_trig(struct coresight_cti *cti, int ch); +extern void coresight_cti_clear_trig(struct coresight_cti *cti, int ch); +extern int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch); +extern int coresight_cti_enable_gate(struct coresight_cti *cti, int ch); +extern void coresight_cti_disable_gate(struct coresight_cti *cti, int ch); +extern void coresight_cti_ctx_save(void); +extern void coresight_cti_ctx_restore(void); +extern int coresight_cti_ack_trig(struct coresight_cti *cti, int trig); +#else +static inline struct coresight_cti *coresight_cti_get(const char *name) +{ + return NULL; +} +static inline void coresight_cti_put(struct coresight_cti *cti) {} +static inline int coresight_cti_map_trigin( + struct coresight_cti *cti, int trig, int ch) +{ + return -ENOSYS; +} +static inline int coresight_cti_map_trigout( + struct coresight_cti *cti, int trig, int ch) +{ + return -ENOSYS; +} +static inline void coresight_cti_unmap_trigin( + struct coresight_cti *cti, int trig, int ch) {} +static inline void coresight_cti_unmap_trigout( + struct coresight_cti *cti, int trig, int ch) {} +static inline void coresight_cti_reset(struct coresight_cti *cti) {} +static inline int coresight_cti_set_trig(struct coresight_cti *cti, int ch) +{ + return -ENOSYS; +} +static inline void coresight_cti_clear_trig(struct coresight_cti *cti, int ch) +{} +static inline int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch) +{ + return -ENOSYS; +} +static inline int coresight_cti_enable_gate(struct coresight_cti *cti, int ch) +{ + return -ENOSYS; +} +static inline void coresight_cti_disable_gate(struct coresight_cti *cti, int ch) +{} +static inline void coresight_cti_ctx_save(void){} +static inline void coresight_cti_ctx_restore(void){} +static inline int coresight_cti_ack_trig(struct coresight_cti *cti, int trig) +{ + return -ENOSYS; +} +#endif + +#endif diff --git a/include/linux/msm-core-interface.h b/include/linux/msm-core-interface.h new file mode 100644 index 000000000000..33bb40829dd7 --- /dev/null +++ b/include/linux/msm-core-interface.h @@ -0,0 +1,13 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include diff --git a/include/soc/qcom/event_timer.h b/include/soc/qcom/event_timer.h new file mode 100644 index 000000000000..6f2a1aa2f9b1 --- /dev/null +++ b/include/soc/qcom/event_timer.h @@ -0,0 +1,80 @@ +/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_EVENT_TIMER_H +#define __ARCH_ARM_MACH_MSM_EVENT_TIMER_H + +#include + +struct event_timer_info; + +#ifdef CONFIG_MSM_EVENT_TIMER +/** + * add_event_timer() : Add a wakeup event. Intended to be called + * by clients once. Returns a handle to be used + * for future transactions. + * @irq : Interrupt number to track affinity. + * @function : The callback function will be called when event + * timer expires. + * @data : Callback data provided by client. + */ +struct event_timer_info *add_event_timer(uint32_t irq, + void (*function)(void *), void *data); + +/** activate_event_timer() : Set the expiration time for an event in absolute + * ktime. This is a oneshot event timer, clients + * should call this again to set another expiration. + * @event : Event handle. + * @event_time : Event time in absolute ktime. + */ +void activate_event_timer(struct event_timer_info *event, ktime_t event_time); + +/** + * deactivate_event_timer() : Deactivate an event timer. + * @event: event handle. + */ +void deactivate_event_timer(struct event_timer_info *event); + +/** + * destroy_event_timer() : Free the event info data structure allocated during + * add_event_timer(). + * @event: event handle. + */ +void destroy_event_timer(struct event_timer_info *event); + +/** + * get_next_event_timer() : Get the next wakeup event. + * returns a ktime value of the next + * expiring event. + */ +ktime_t get_next_event_time(int cpu); +#else +static inline void *add_event_timer(uint32_t irq, void (*function)(void *), + void *data) +{ + return NULL; +} + +static inline void activate_event_timer(void *event, ktime_t event_time) {} + +static inline void deactivate_event_timer(void *event) {} + +static inline void destroy_event_timer(void *event) {} + +static inline ktime_t get_next_event_time(int cpu) +{ + return ns_to_ktime(0); +} + +#endif /* CONFIG_MSM_EVENT_TIMER_MANAGER */ +#endif /* __ARCH_ARM_MACH_MSM_EVENT_TIMER_H */ diff --git a/include/soc/qcom/jtag.h b/include/soc/qcom/jtag.h new file mode 100644 index 000000000000..a5de362cdb8c --- /dev/null +++ b/include/soc/qcom/jtag.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_JTAG_H +#define __MACH_JTAG_H + +#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM) || \ + defined(CONFIG_MSM_JTAGV8) +extern void msm_jtag_save_state(void); +extern void msm_jtag_restore_state(void); +extern void msm_jtag_etm_save_state(void); +extern void msm_jtag_etm_restore_state(void); +extern bool msm_jtag_fuse_apps_access_disabled(void); +#else +static inline void msm_jtag_save_state(void) {} +static inline void msm_jtag_restore_state(void) {} +static inline void msm_jtag_etm_save_state(void) {} +static inline void msm_jtag_etm_restore_state(void){} +static inline bool msm_jtag_fuse_apps_access_disabled(void) { return false; } +#endif +#ifdef CONFIG_MSM_JTAGV8 +extern int msm_jtag_save_register(struct notifier_block *nb); +extern int msm_jtag_save_unregister(struct notifier_block *nb); +extern int msm_jtag_restore_register(struct notifier_block *nb); +extern int msm_jtag_restore_unregister(struct notifier_block *nb); +#else +static inline int msm_jtag_save_register(struct notifier_block *nb) +{ + return 0; +} +static inline int msm_jtag_save_unregister(struct notifier_block *nb) +{ + return 0; +} +static inline int msm_jtag_restore_register(struct notifier_block *nb) +{ + return 0; +} +static inline int msm_jtag_restore_unregister(struct notifier_block *nb) +{ + return 0; +} +#endif + +#endif diff --git a/include/soc/qcom/lpm-stats.h b/include/soc/qcom/lpm-stats.h new file mode 100644 index 000000000000..05f5516f4177 --- /dev/null +++ b/include/soc/qcom/lpm-stats.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_LPM_STATS_H +#define __ARCH_ARM_MACH_MSM_LPM_STATS_H + +struct lpm_stats; + +#ifdef CONFIG_MSM_IDLE_STATS +struct lpm_stats *lpm_stats_config_level(const char *name, + const char **levels, int num_levels, struct lpm_stats *parent, + struct cpumask *mask); +void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index); +void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index, + bool success); +void lpm_stats_cpu_enter(uint32_t index); +void lpm_stats_cpu_exit(uint32_t index, bool success); +void lpm_stats_suspend_enter(void); +void lpm_stats_suspend_exit(void); +#else +static inline struct lpm_stats *lpm_stats_config_level(const char *name, + const char **levels, int num_levels, struct lpm_stats *parent, + struct cpumask *mask) +{ + return ERR_PTR(-ENODEV); +} + +static inline void lpm_stats_cluster_enter(struct lpm_stats *stats, + uint32_t index) +{ + return; +} + +static inline void lpm_stats_cluster_exit(struct lpm_stats *stats, + uint32_t index, bool success) +{ + return; +} + +static inline void lpm_stats_cpu_enter(uint32_t index) +{ + return; +} + +static inline void lpm_stats_cpu_exit(uint32_t index, bool success) +{ + return; +} + +static inline void lpm_stats_suspend_enter(void) +{ + return; +} + +static inline void lpm_stats_suspend_exit(void) +{ + return; +} +#endif +#endif /* __ARCH_ARM_MACH_MSM_LPM_STATS_H */ diff --git a/include/soc/qcom/msm-core.h b/include/soc/qcom/msm-core.h new file mode 100644 index 000000000000..5e187183f07b --- /dev/null +++ b/include/soc/qcom/msm-core.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ARCH_ARM_MACH_MSM_CORE_H +#define __ARCH_ARM_MACH_MSM_CORE_H +#ifdef CONFIG_APSS_CORE_EA +void set_cpu_throttled(struct cpumask *mask, bool throttling); +struct blocking_notifier_head *get_power_update_notifier(void); +#else +static inline void set_cpu_throttled(struct cpumask *mask, bool throttling) {} +struct blocking_notifier_head *get_power_update_notifier(void) {return NULL; } +#endif +#endif + diff --git a/include/soc/qcom/pm.h b/include/soc/qcom/pm.h new file mode 100644 index 000000000000..b7ad2e5e6c2c --- /dev/null +++ b/include/soc/qcom/pm.h @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2015, The Linux Foundation. All rights reserved. + * Author: San Mehat + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_PM_H +#define __ARCH_ARM_MACH_MSM_PM_H + +#include +#include +#include +#include + +#if !defined(CONFIG_SMP) +#define msm_secondary_startup NULL +#elif defined(CONFIG_CPU_V7) +#define msm_secondary_startup secondary_startup +#else +#define msm_secondary_startup secondary_holding_pen +#endif + +enum msm_pm_sleep_mode { + MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT, + MSM_PM_SLEEP_MODE_RETENTION, + MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE, + MSM_PM_SLEEP_MODE_POWER_COLLAPSE, + MSM_PM_SLEEP_MODE_FASTPC, + MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND, + MSM_PM_SLEEP_MODE_NR, + MSM_PM_SLEEP_MODE_NOT_SELECTED, +}; + +enum msm_pm_l2_scm_flag { + MSM_SCM_L2_ON = 0, + MSM_SCM_L2_OFF = 1, + MSM_SCM_L2_GDHS = 2, + MSM_SCM_L3_PC_OFF = 4, +}; + +#define MSM_PM_MODE(cpu, mode_nr) ((cpu) * MSM_PM_SLEEP_MODE_NR + (mode_nr)) + +struct msm_pm_time_params { + uint32_t latency_us; + uint32_t sleep_us; + uint32_t next_event_us; + uint32_t modified_time_us; +}; + +struct msm_pm_sleep_status_data { + void *base_addr; + uint32_t mask; +}; + +/** + * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ + * + * @cpu: cpuid of the CPU going down. + * + * Returns the l2 flush flag enum that is passed down to TZ during power + * collaps + */ +enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu); + +/** + * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed. + * @cpu: CPU on which to check for the sleep mode. + * @mode: Sleep Mode to check for. + * @idle: Idle or Suspend Sleep Mode. + * + * Helper function to determine if a Idle or Suspend + * Sleep mode is allowed for a specific CPU. + * + * Return: 1 for allowed; 0 if not allowed. + */ +int msm_pm_sleep_mode_allow(unsigned int, unsigned int, bool); + +/** + * msm_pm_sleep_mode_supported() - API to determine if sleep mode is + * supported. + * @cpu: CPU on which to check for the sleep mode. + * @mode: Sleep Mode to check for. + * @idle: Idle or Suspend Sleep Mode. + * + * Helper function to determine if a Idle or Suspend + * Sleep mode is allowed and enabled for a specific CPU. + * + * Return: 1 for supported; 0 if not supported. + */ +int msm_pm_sleep_mode_supported(unsigned int, unsigned int, bool); + +struct msm_pm_cpr_ops { + void (*cpr_suspend)(void); + void (*cpr_resume)(void); +}; + +void __init msm_pm_set_tz_retention_flag(unsigned int flag); +void msm_pm_enable_retention(bool enable); +bool msm_pm_retention_enabled(void); +bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle); +static inline void msm_arch_idle(void) +{ + mb(); + wfi(); +} + +#ifdef CONFIG_MSM_PM +void msm_pm_set_rpm_wakeup_irq(unsigned int irq); +int msm_pm_wait_cpu_shutdown(unsigned int cpu); +int __init msm_pm_sleep_status_init(void); +void lpm_cpu_hotplug_enter(unsigned int cpu); +s32 msm_cpuidle_get_deep_idle_latency(void); +int msm_pm_collapse(unsigned long unused); +#else +static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {} +static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; } +static inline int msm_pm_sleep_status_init(void) { return 0; }; + +static inline void lpm_cpu_hotplug_enter(unsigned int cpu) +{ + msm_arch_idle(); +}; + +static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; } +#define msm_pm_collapse NULL +#endif + +#ifdef CONFIG_HOTPLUG_CPU +int msm_platform_secondary_init(unsigned int cpu); +#else +static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; } +#endif + +enum msm_pm_time_stats_id { + MSM_PM_STAT_REQUESTED_IDLE = 0, + MSM_PM_STAT_IDLE_SPIN, + MSM_PM_STAT_IDLE_WFI, + MSM_PM_STAT_RETENTION, + MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE, + MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE, + MSM_PM_STAT_IDLE_POWER_COLLAPSE, + MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE, + MSM_PM_STAT_SUSPEND, + MSM_PM_STAT_FAILED_SUSPEND, + MSM_PM_STAT_NOT_IDLE, + MSM_PM_STAT_COUNT +}; + +#ifdef CONFIG_MSM_IDLE_STATS +void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size); +void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t); +void msm_pm_l2_add_stat(uint32_t id, int64_t t); +#else +static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, + int size) {} +static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {} +static inline void msm_pm_l2_add_stat(uint32_t id, int64_t t) {} +#endif + +void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops); +extern dma_addr_t msm_pc_debug_counters_phys; +#endif /* __ARCH_ARM_MACH_MSM_PM_H */ diff --git a/include/trace/events/trace_msm_core.h b/include/trace/events/trace_msm_core.h new file mode 100644 index 000000000000..d99b72ca63fa --- /dev/null +++ b/include/trace/events/trace_msm_core.h @@ -0,0 +1,103 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM msm_core + +#if !defined(_TRACE_MSM_CORE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MSM_CORE_H + +#include +#include + +TRACE_EVENT(cpu_stats, + + TP_PROTO(unsigned int cpu, long temp, + uint64_t min_power, uint64_t max_power), + + TP_ARGS(cpu, temp, min_power, max_power), + + TP_STRUCT__entry( + __field(unsigned int, cpu) + __field(long, temp) + __field(uint64_t, min_power) + __field(uint64_t, max_power) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->temp = temp; + __entry->min_power = min_power; + __entry->max_power = max_power; + ), + + TP_printk("Cpu%d: temp:%ld power@minfreq:%llu power@maxfreq:%llu", + __entry->cpu, __entry->temp, __entry->min_power, + __entry->max_power) +); + +TRACE_EVENT(temp_threshold, + + TP_PROTO(unsigned int cpu, long temp, + long hi_thresh, long low_thresh), + + TP_ARGS(cpu, temp, hi_thresh, low_thresh), + + TP_STRUCT__entry( + __field(unsigned int, cpu) + __field(long, temp) + __field(long, hi_thresh) + __field(long, low_thresh) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->temp = temp; + __entry->hi_thresh = hi_thresh; + __entry->low_thresh = low_thresh; + ), + + TP_printk("Cpu%d: temp:%ld hi_thresh:%ld low_thresh:%ld", + __entry->cpu, __entry->temp, __entry->hi_thresh, + __entry->low_thresh) +); + +TRACE_EVENT(temp_notification, + + TP_PROTO(unsigned int sensor_id, enum thermal_trip_type type, + int temp, int prev_temp), + + TP_ARGS(sensor_id, type, temp, prev_temp), + + TP_STRUCT__entry( + __field(unsigned int, sensor_id) + __field(enum thermal_trip_type, type) + __field(int, temp) + __field(int, prev_temp) + ), + + TP_fast_assign( + __entry->sensor_id = sensor_id; + __entry->type = type; + __entry->temp = temp; + __entry->prev_temp = prev_temp; + ), + + TP_printk("Sensor_id%d: %s threshold triggered temp:%d(previous:%d)", + __entry->sensor_id, + __entry->type == THERMAL_TRIP_CONFIGURABLE_HI ? "High" : "Low", + __entry->temp, __entry->prev_temp) +); + +#endif +#define TRACE_INCLUDE_FILE trace_msm_core +#include diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h new file mode 100644 index 000000000000..691df1b2689b --- /dev/null +++ b/include/trace/events/trace_msm_low_power.h @@ -0,0 +1,167 @@ +/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM msm_low_power + +#if !defined(_TRACE_MSM_LOW_POWER_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MSM_LOW_POWER_H_ + +#include + +TRACE_EVENT(cpu_power_select, + + TP_PROTO(int index, u32 sleep_us, u32 latency, u32 next_event_us), + + TP_ARGS(index, sleep_us, latency, next_event_us), + + TP_STRUCT__entry( + __field(int, index) + __field(u32, sleep_us) + __field(u32, latency) + __field(u32, next_event_us) + ), + + TP_fast_assign( + __entry->index = index; + __entry->sleep_us = sleep_us; + __entry->latency = latency; + __entry->next_event_us = next_event_us; + ), + + TP_printk("idx:%d sleep_time:%u latency:%u next_event:%u", + __entry->index, __entry->sleep_us, __entry->latency, + __entry->next_event_us) +); + +TRACE_EVENT(cpu_idle_enter, + + TP_PROTO(int index), + + TP_ARGS(index), + + TP_STRUCT__entry( + __field(int, index) + ), + + TP_fast_assign( + __entry->index = index; + ), + + TP_printk("idx:%d", + __entry->index) +); + +TRACE_EVENT(cpu_idle_exit, + + TP_PROTO(int index, bool success), + + TP_ARGS(index, success), + + TP_STRUCT__entry( + __field(int, index) + __field(bool, success) + ), + + TP_fast_assign( + __entry->index = index; + __entry->success = success; + ), + + TP_printk("idx:%d success:%d", + __entry->index, + __entry->success) +); + +TRACE_EVENT(cluster_enter, + + TP_PROTO(const char *name, int index, unsigned long sync_cpus, + unsigned long child_cpus, bool from_idle), + + TP_ARGS(name, index, sync_cpus, child_cpus, from_idle), + + TP_STRUCT__entry( + __field(const char *, name) + __field(int, index) + __field(unsigned long, sync_cpus) + __field(unsigned long, child_cpus) + __field(bool, from_idle) + ), + + TP_fast_assign( + __entry->name = name; + __entry->index = index; + __entry->sync_cpus = sync_cpus; + __entry->child_cpus = child_cpus; + __entry->from_idle = from_idle; + ), + + TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d", + __entry->name, + __entry->index, + __entry->sync_cpus, + __entry->child_cpus, + __entry->from_idle) +); + +TRACE_EVENT(cluster_exit, + + TP_PROTO(const char *name, int index, unsigned long sync_cpus, + unsigned long child_cpus, bool from_idle), + + TP_ARGS(name, index, sync_cpus, child_cpus, from_idle), + + TP_STRUCT__entry( + __field(const char *, name) + __field(int, index) + __field(unsigned long, sync_cpus) + __field(unsigned long, child_cpus) + __field(bool, from_idle) + ), + + TP_fast_assign( + __entry->name = name; + __entry->index = index; + __entry->sync_cpus = sync_cpus; + __entry->child_cpus = child_cpus; + __entry->from_idle = from_idle; + ), + + TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d", + __entry->name, + __entry->index, + __entry->sync_cpus, + __entry->child_cpus, + __entry->from_idle) +); + +TRACE_EVENT(pre_pc_cb, + + TP_PROTO(int tzflag), + + TP_ARGS(tzflag), + + TP_STRUCT__entry( + __field(int, tzflag) + ), + + TP_fast_assign( + __entry->tzflag = tzflag; + ), + + TP_printk("tzflag:%d", + __entry->tzflag + ) +); +#endif +#define TRACE_INCLUDE_FILE trace_msm_low_power +#include diff --git a/include/uapi/linux/msm-core-interface.h b/include/uapi/linux/msm-core-interface.h new file mode 100644 index 000000000000..6c0dae46ab57 --- /dev/null +++ b/include/uapi/linux/msm-core-interface.h @@ -0,0 +1,29 @@ +#ifndef __MSM_CORE_LIB_H__ +#define __MSM_CORE_LIB_H__ + +#include + +#define TEMP_DATA_POINTS 13 +#define MAX_NUM_FREQ 200 + +enum msm_core_ioctl_params { + MSM_CORE_LEAKAGE, + MSM_CORE_VOLTAGE, +}; + +#define MSM_CORE_MAGIC 0x9D + +struct sched_params { + uint32_t cpumask; + uint32_t cluster; + uint32_t power[TEMP_DATA_POINTS][MAX_NUM_FREQ]; + uint32_t voltage[MAX_NUM_FREQ]; + uint32_t freq[MAX_NUM_FREQ]; +}; + + +#define EA_LEAKAGE _IOWR(MSM_CORE_MAGIC, MSM_CORE_LEAKAGE,\ + struct sched_params) +#define EA_VOLT _IOWR(MSM_CORE_MAGIC, MSM_CORE_VOLTAGE,\ + struct sched_params) +#endif