From 59b22dae44a822d5d77c1cd9c7cbbb5851deaecc Mon Sep 17 00:00:00 2001 From: Vivek Kumar Date: Mon, 19 Nov 2018 13:11:48 +0530 Subject: [PATCH] soc: qcom: Implement early domain handoff driver Early domain in LK runs in parallel with kernel with some memory and cpu core(s) reserved for it. LK updates the status of early services on a shared memory location. In Kernel, this driver provides new APIs to check any early service's status and to communicate with them. It hot adds the reserved cpu(s) and frees lk text once all early services end. Change-Id: I0b29b1886abea3280543d76492044c8946d7690e Signed-off-by: Vivek Kumar --- .../bindings/platform/msm/early_domain.txt | 29 ++ drivers/soc/qcom/Kconfig | 3 + drivers/soc/qcom/Makefile | 1 + drivers/soc/qcom/early_dom.c | 359 ++++++++++++++++++ include/soc/qcom/early_domain.h | 98 +++++ 5 files changed, 490 insertions(+) create mode 100644 Documentation/devicetree/bindings/platform/msm/early_domain.txt create mode 100644 drivers/soc/qcom/early_dom.c create mode 100644 include/soc/qcom/early_domain.h diff --git a/Documentation/devicetree/bindings/platform/msm/early_domain.txt b/Documentation/devicetree/bindings/platform/msm/early_domain.txt new file mode 100644 index 000000000000..c213fb81dfa0 --- /dev/null +++ b/Documentation/devicetree/bindings/platform/msm/early_domain.txt @@ -0,0 +1,29 @@ +Early domain core driver. + +Early domain in bootloader run multimedia services like display, +camera, audio by reserving cpu(s) and some memory in parallel with +kernel. + +These services may need to stop when they decide. Early domain +driver in kernel communicates with early services through a shared +memory block. This block is reserved during kernel boot. + +Early services running in bootloader update their status in the +shared memory block. Multimedia drivers in kernel can make use of +apis provided by early domain core driver to get the status of early +services and free reserved memory resource to system +when the early services end. + +Required properties +- compatible : should be "qcom,early_domain" +- status : should be "disabled" by default. + status should be made "ok" only + if earlydomain is active by + bootloaders. + +Example: + early_domain: early_domain { + compatible = "qcom,early_domain"; + status = "disabled"; + }; + diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 62b7d12629e4..d2ac10ad4c4c 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -392,6 +392,9 @@ config QCOM_SCM_QCPE bool "Para-Virtualized Secure Channel Manager (SCM) support over QCPE" default n +config QCOM_EARLY_DOMAIN + bool "Support for handoff of early domain services running in bootloader" + menuconfig QCOM_SCM_XPU bool "Qualcomm XPU configuration driver" depends on QCOM_SCM diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 7dff5d3b8d72..c8933e97901d 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o obj-$(CONFIG_QCOM_MINIDUMP) += msm_minidump.o obj-$(CONFIG_QCOM_DCC) += dcc.o obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o +obj-$(CONFIG_QCOM_EARLY_DOMAIN) += early_dom.o obj-$(CONFIG_QCOM_COMMON_LOG) += common_log.o obj-$(CONFIG_QCOM_IRQ_HELPER) += irq-helper.o obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o diff --git a/drivers/soc/qcom/early_dom.c b/drivers/soc/qcom/early_dom.c new file mode 100644 index 000000000000..6731284e7ecd --- /dev/null +++ b/drivers/soc/qcom/early_dom.c @@ -0,0 +1,359 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct early_domain_core *ed_core_data; +static bool early_domain_enabled; + +bool get_early_service_status(enum service_id sid) +{ + struct early_domain_core *domain; + unsigned long *status; + + if (!early_domain_enabled) + return false; + domain = ed_core_data; + status = &domain->pdata->early_domain_status; + cpu_relax(); + return test_bit(sid, status); +} +EXPORT_SYMBOL(get_early_service_status); + +static void active_early_services(void) +{ + enum service_id core; + int i; + bool active; + + core = EARLY_DOMAIN_CORE; + i = (int)core; + while (i < NUM_SERVICES) { + active = get_early_service_status((enum service_id)i); + if (active) + pr_info("Early service_id:%d active\n", i); + i++; + } +} + +void request_early_service_shutdown(enum service_id sid) +{ + struct early_domain_core *domain = ed_core_data; + unsigned long *request; + + request = &domain->pdata->early_domain_request; + set_bit(sid, request); +} +EXPORT_SYMBOL(request_early_service_shutdown); + +void *get_service_shared_mem_start(enum service_id sid) +{ + void *service_shared_mem_start; + void *early_domain_shm_start; + + if (!early_domain_enabled || sid < EARLY_DISPLAY || sid > NUM_SERVICES) + return NULL; + + early_domain_shm_start = ed_core_data->pdata; + service_shared_mem_start = early_domain_shm_start + + sizeof(struct early_domain_header) + + (SERVICE_SHARED_MEM_SIZE * (sid - 1)); + return service_shared_mem_start; +} +EXPORT_SYMBOL(get_service_shared_mem_start); + +static void free_reserved_lk_mem(phys_addr_t paddr, size_t size) +{ + unsigned long pfn_start, pfn_end, pfn_idx; + + memblock_free(paddr, size); + pfn_start = paddr >> PAGE_SHIFT; + pfn_end = (paddr + size) >> PAGE_SHIFT; + for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++) + free_reserved_page(pfn_to_page(pfn_idx)); +} + +static void early_domain_work(struct work_struct *work) +{ + struct early_domain_core *core_data; + struct device *cpu_device; + unsigned long *status; + int cpu; + int delay; + int max_delay; + + core_data = ed_core_data; + status = &core_data->pdata->early_domain_status; + delay = 40; + max_delay = 1000; + + /* Poll on status which will be updated by early-domain running in LK */ + while (*status) { + cpu_relax(); + msleep(delay); + delay = (delay > max_delay ? max_delay : delay + 20); + } + + /* An instruction barrier to ensure that execution pipline is flushed + * and instrutions ahead are not executed out of order, leading to + * an unwanted situation where we free resources with a non-zero status. + */ + + isb(); + + /* Once the status is zero, hot add reserved cpu cores and + * free reserved memory + */ + unregister_cpu_notifier(&core_data->ed_notifier); + + /* Take attempts to hot add the cpu(s) reserved for early domain + * with delays + */ + delay = 20; + max_delay = 200; + while (!cpumask_empty(&core_data->cpumask)) { + for_each_cpu(cpu, &core_data->cpumask) { + cpu_device = get_cpu_device(cpu); + if (!cpu_device) { + pr_err("cpu:%d absent in cpu_possible_mask\n" + , cpu); + cpumask_clear_cpu(cpu, &core_data->cpumask); + continue; + } + if (!device_online(cpu_device)) + cpumask_clear_cpu(cpu, &core_data->cpumask); + } + msleep(delay); + delay = (delay > max_delay ? max_delay : delay + 20); + } + free_reserved_lk_mem(core_data->lk_pool_paddr, core_data->lk_pool_size); + free_reserved_lk_mem(core_data->early_domain_shm, + core_data->early_domain_shm_size); + + /* Remove qos and wake locks */ + + pm_qos_remove_request(&core_data->ed_qos_request); + __pm_relax(&core_data->ed_wake_lock); +} + +static int early_domain_cpu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + struct early_domain_core *core_data = container_of(self, + struct early_domain_core, ed_notifier); + unsigned int notifier; + unsigned int cpu; + + notifier = NOTIFY_BAD; + cpu = (long)hcpu; + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + if (cpumask_test_cpu(cpu, &core_data->cpumask)) + pr_err("Early domain services are running on cpu%d\n" + , cpu); + break; + default: + notifier = NOTIFY_OK; + break; + } + return notifier; +} + +static int init_early_domain_data(struct early_domain_core *core_data) +{ + int cpu; + unsigned long cpumask; + int ret; + + cpumask_clear(&core_data->cpumask); + cpumask = (unsigned long)core_data->pdata->cpumask; + for_each_set_bit(cpu, &cpumask, sizeof(cpumask_t)) + cpumask_set_cpu(cpu, &core_data->cpumask); + + memset(&core_data->ed_qos_request, 0, + sizeof(core_data->ed_qos_request)); + core_data->ed_qos_request.type = PM_QOS_REQ_AFFINE_CORES; + core_data->ed_qos_request.cpus_affine = core_data->cpumask; + pm_qos_add_request(&core_data->ed_qos_request, + PM_QOS_CPU_DMA_LATENCY, PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE); + wakeup_source_init(&core_data->ed_wake_lock, "early_domain"); + __pm_stay_awake(&core_data->ed_wake_lock); + core_data->ed_notifier = (struct notifier_block) { + .notifier_call = early_domain_cpu_notifier, + }; + ret = register_cpu_notifier(&core_data->ed_notifier); + if (ret) { + dev_err(&core_data->pdev->dev, "Could not register cpu notifier\n"); + return ret; + } + INIT_WORK(&core_data->early_domain_work, early_domain_work); + if (!schedule_work(&core_data->early_domain_work)) { + dev_err(&core_data->pdev->dev, "Could not schedule work for handoff\n"); + unregister_cpu_notifier(&core_data->ed_notifier); + ret = -ENOMEM; + } + return ret; +} + +/* It is expected that probe is being called before the initialization of + * multimedia drivers like display/camera/audio, so they get correct status + * of any active early service. + */ + +static int early_domain_probe(struct platform_device *pdev) +{ + int ret; + struct early_domain_core *core_data; + struct resource res_shm, res_lk; + struct device_node *parent, *node; + + ret = 0; + parent = of_find_node_by_path("/reserved-memory"); + if (!parent) { + dev_err(&pdev->dev, "Could not find reserved-memory node\n"); + return -EINVAL; + } + /* early_domain_shm reserved memory node will be added by bootloader + * dynamically if earlydomain was enabled. + */ + + node = of_find_node_by_name(parent, "early_domain_shm"); + if (!node) { + dev_err(&pdev->dev, "Could not find early_domain_shm\n"); + of_node_put(parent); + return -EINVAL; + } + ret = of_address_to_resource(node, 0, &res_shm); + if (ret) { + dev_err(&pdev->dev, "No memory address assigned to the region\n"); + of_node_put(node); + of_node_put(parent); + return ret; + } + of_node_put(node); + /* lk_pool reserved memory node will be added by bootloader + * dynamically if earlydomain was enabled. + */ + + node = of_find_node_by_name(parent, "lk_pool"); + if (!node) { + dev_err(&pdev->dev, "Could not find lk_pool\n"); + of_node_put(parent); + return -EINVAL; + } + of_node_put(parent); + ret = of_address_to_resource(node, 0, &res_lk); + if (ret) { + dev_err(&pdev->dev, "No memory address assigned to lk_pool\n"); + of_node_put(node); + return ret; + } + of_node_put(node); + core_data = kzalloc(sizeof(*core_data), GFP_KERNEL); + if (!core_data) + return -ENOMEM; + ed_core_data = core_data; + core_data->pdata = (struct early_domain_header *) + ioremap_nocache(res_shm.start, resource_size(&res_shm)); + if (!core_data->pdata) { + dev_err(&pdev->dev, "%s cannot map reserved early domain space\n" + , __func__); + ret = -ENOMEM; + goto err; + } + core_data->early_domain_shm = (dma_addr_t)res_shm.start; + core_data->early_domain_shm_size = res_shm.end - res_shm.start; + core_data->lk_pool_paddr = (dma_addr_t)res_lk.start; + core_data->lk_pool_size = res_lk.end - res_lk.start; + ret = memcmp(core_data->pdata->magic, EARLY_DOMAIN_MAGIC, + MAGIC_SIZE); + if (ret) { + dev_err(&pdev->dev, "Early domain reserved page has been corrupted\n"); + ret = -EINVAL; + goto err; + } + core_data->pdev = pdev; + ret = init_early_domain_data(core_data); + if (ret) + goto err; + platform_set_drvdata(pdev, core_data); + early_domain_enabled = true; + active_early_services(); + return ret; +err: + if (pm_qos_request_active(&core_data->ed_qos_request)) + pm_qos_remove_request(&core_data->ed_qos_request); + if (core_data->ed_wake_lock.active) + wakeup_source_trash(&core_data->ed_wake_lock); + kfree(core_data); + return ret; +} + +static int early_domain_remove(struct platform_device *pdev) +{ + struct early_domain_core *core_data; + + core_data = platform_get_drvdata(pdev); + pm_qos_remove_request(&core_data->ed_qos_request); + __pm_relax(&core_data->ed_wake_lock); + unregister_cpu_notifier(&core_data->ed_notifier); + kfree(core_data); + return 0; +} + +static const struct of_device_id early_domain_table[] = { + { .compatible = "qcom,early_domain" }, + {} +}; + +static struct platform_driver early_domain_driver = { + .probe = early_domain_probe, + .remove = early_domain_remove, + .driver = { + .name = "early_domain", + .owner = THIS_MODULE, + .of_match_table = early_domain_table, + }, +}; + +static int __init earlydom_init(void) +{ + return platform_driver_register(&early_domain_driver); +} + +fs_initcall(earlydom_init); + +static void __exit earlydom_exit(void) +{ + platform_driver_unregister(&early_domain_driver); +} + +module_exit(earlydom_exit); +MODULE_DESCRIPTION("QCOM EARLYDOMAIN DRIVER"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, early_domain_table); diff --git a/include/soc/qcom/early_domain.h b/include/soc/qcom/early_domain.h new file mode 100644 index 000000000000..076f2d6fac2b --- /dev/null +++ b/include/soc/qcom/early_domain.h @@ -0,0 +1,98 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Early domain services invoked in bootloaders run in parallel after + * kernel takes over. One page in memory is reserved to pass information + * between bootloader and kernel. This page has a header to capture status, + * request and cpumask described in structure early_domain_header. Early + * domain core driver in kernel reads this header to decide the status of + * services and takes necessary action. The rest of the page is intended to + * pass service specific information. Offset for service specific area are + * defined in macros, and its the service specific driver's responsiblity + * to operate in their defined areas to pass service specific information. + * * + * * + * * + * ***************************************** + * * Header * + * * * + * ***************************************** + * * * + * * Early display * + * * * + * * * + * ***************************************** + * * * + * * Early camera * + * * * + * * * + * ***************************************** + * * * + * * Early audio * + * * * + * * * + * ***************************************** + * * + */ + +enum service_id { + EARLY_DOMAIN_CORE = 0, + EARLY_DISPLAY, + EARLY_CAMERA, + EARLY_AUDIO, +}; + +#ifdef CONFIG_QCOM_EARLY_DOMAIN +#include +#include +#include +#include + +#define EARLY_DOMAIN_MAGIC "ERLYDOM" +#define MAGIC_SIZE 8 +#define NUM_SERVICES 3 +#define SERVICE_SHARED_MEM_SIZE ((PAGE_SIZE)/(NUM_SERVICES)) + +struct early_domain_header { + char magic[MAGIC_SIZE]; + unsigned long cpumask; + unsigned long early_domain_status; + unsigned long early_domain_request; +}; + +struct early_domain_core { + struct platform_device *pdev; + struct early_domain_header *pdata; + struct work_struct early_domain_work; + phys_addr_t lk_pool_paddr; + size_t lk_pool_size; + phys_addr_t early_domain_shm; + size_t early_domain_shm_size; + cpumask_t cpumask; + struct notifier_block ed_notifier; + struct pm_qos_request ed_qos_request; + struct wakeup_source ed_wake_lock; +}; + +void request_early_service_shutdown(enum service_id); +bool get_early_service_status(enum service_id sid); +void *get_service_shared_mem_start(enum service_id sid); + +#else + +static inline void request_early_service_shutdown(enum service_id sid) {} +static inline bool get_early_service_status(enum service_id sid) + { return false; } +static inline void *get_service_shared_mem_start(enum service_id sid) + { return NULL; } + +#endif