From 232738f76fd30de10a8102b43417b25e355b2e5b Mon Sep 17 00:00:00 2001 From: Amit Blay Date: Mon, 10 Jul 2017 18:30:20 +0300 Subject: [PATCH] soc: qcom: SCM front-end over QCPE This is an implementation of para-virtualized SCM driver. This driver is the FE. The BE is QCPE running in an hypervisor. The FE driver forwards SCM calls over HAB to the BE. Change-Id: I88c269e856b0a6cc20b9ab8bf10110842d90a382 Signed-off-by: Amit Blay --- drivers/soc/Makefile | 1 + drivers/soc/qcom/Kconfig | 4 + drivers/soc/qcom/Makefile | 1 + drivers/soc/qcom/scm_qcpe.c | 1137 +++++++++++++++++++++++++++++++++++ include/soc/qcom/scm.h | 4 +- 5 files changed, 1145 insertions(+), 2 deletions(-) create mode 100644 drivers/soc/qcom/scm_qcpe.c diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 6358d1256bb1..fccbdf313e08 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ obj-$(CONFIG_ARCH_QCOM) += qcom/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ +obj-$(CONFIG_QCOM_SCM_QCPE) += qcom/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_SOC_TI) += ti/ diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 0c4414d27eeb..907960cfa9d5 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -379,6 +379,10 @@ config QCOM_SCM bool "Secure Channel Manager (SCM) support" default n +config QCOM_SCM_QCPE + bool "Para-Virtualized Secure Channel Manager (SCM) support over QCPE" + default n + menuconfig QCOM_SCM_XPU bool "Qualcomm XPU configuration driver" depends on QCOM_SCM diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 5eeede23333d..0bf54bedd6ea 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -62,6 +62,7 @@ CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) obj-$(CONFIG_QCOM_SCM_ERRATA) += scm-errata.o obj-$(CONFIG_QCOM_SCM) += scm.o scm-boot.o +obj-$(CONFIG_QCOM_SCM_QCPE) += scm_qcpe.o obj-$(CONFIG_QCOM_SCM_XPU) += scm-xpu.o obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o obj-$(CONFIG_QCOM_MEMORY_DUMP) += memory_dump.o diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c new file mode 100644 index 000000000000..54a978157bda --- /dev/null +++ b/drivers/soc/qcom/scm_qcpe.c @@ -0,0 +1,1137 @@ +/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define CREATE_TRACE_POINTS +#include + +#include + +#define SCM_ENOMEM (-5) +#define SCM_EOPNOTSUPP (-4) +#define SCM_EINVAL_ADDR (-3) +#define SCM_EINVAL_ARG (-2) +#define SCM_ERROR (-1) +#define SCM_INTERRUPTED (1) +#define SCM_EBUSY (-55) +#define SCM_V2_EBUSY (-12) + +static DEFINE_MUTEX(scm_lock); + +/* + * MSM8996 V2 requires a lock to protect against + * concurrent accesses between the limits management + * driver and the clock controller + */ +DEFINE_MUTEX(scm_lmh_lock); + +#define SCM_EBUSY_WAIT_MS 30 +#define SCM_EBUSY_MAX_RETRY 67 + +#define N_EXT_SCM_ARGS 7 +#define FIRST_EXT_ARG_IDX 3 +#define SMC_ATOMIC_SYSCALL 31 +#define N_REGISTER_ARGS (MAX_SCM_ARGS - N_EXT_SCM_ARGS + 1) +#define SMC64_MASK 0x40000000 +#define SMC_ATOMIC_MASK 0x80000000 +#define IS_CALL_AVAIL_CMD 1 + +#define SCM_BUF_LEN(__cmd_size, __resp_size) ({ \ + size_t x = __cmd_size + __resp_size; \ + size_t y = sizeof(struct scm_command) + sizeof(struct scm_response); \ + size_t result; \ + if (x < __cmd_size || (x + y) < x) \ + result = 0; \ + else \ + result = x + y; \ + result; \ + }) +/** + * struct scm_command - one SCM command buffer + * @len: total available memory for command and response + * @buf_offset: start of command buffer + * @resp_hdr_offset: start of response buffer + * @id: command to be executed + * @buf: buffer returned from scm_get_command_buffer() + * + * An SCM command is laid out in memory as follows: + * + * ------------------- <--- struct scm_command + * | command header | + * ------------------- <--- scm_get_command_buffer() + * | command buffer | + * ------------------- <--- struct scm_response and + * | response header | scm_command_to_response() + * ------------------- <--- scm_get_response_buffer() + * | response buffer | + * ------------------- + * + * There can be arbitrary padding between the headers and buffers so + * you should always use the appropriate scm_get_*_buffer() routines + * to access the buffers in a safe manner. + */ +struct scm_command { + u32 len; + u32 buf_offset; + u32 resp_hdr_offset; + u32 id; + u32 buf[0]; +}; + +/** + * struct scm_response - one SCM response buffer + * @len: total available memory for response + * @buf_offset: start of response data relative to start of scm_response + * @is_complete: indicates if the command has finished processing + */ +struct scm_response { + u32 len; + u32 buf_offset; + u32 is_complete; +}; + +#ifdef CONFIG_ARM64 + +#define R0_STR "x0" +#define R1_STR "x1" +#define R2_STR "x2" +#define R3_STR "x3" +#define R4_STR "x4" +#define R5_STR "x5" +#define R6_STR "x6" + +/* Outer caches unsupported on ARM64 platforms */ +#define outer_inv_range(x, y) +#define outer_flush_range(x, y) + +#define __cpuc_flush_dcache_area __flush_dcache_area + +#else + +#define R0_STR "r0" +#define R1_STR "r1" +#define R2_STR "r2" +#define R3_STR "r3" +#define R4_STR "r4" +#define R5_STR "r5" +#define R6_STR "r6" + +#endif + +/** + * scm_command_to_response() - Get a pointer to a scm_response + * @cmd: command + * + * Returns a pointer to a response for a command. + */ +static inline struct scm_response *scm_command_to_response( + const struct scm_command *cmd) +{ + return (void *)cmd + cmd->resp_hdr_offset; +} + +/** + * scm_get_command_buffer() - Get a pointer to a command buffer + * @cmd: command + * + * Returns a pointer to the command buffer of a command. + */ +static inline void *scm_get_command_buffer(const struct scm_command *cmd) +{ + return (void *)cmd->buf; +} + +/** + * scm_get_response_buffer() - Get a pointer to a response buffer + * @rsp: response + * + * Returns a pointer to a response buffer of a response. + */ +static inline void *scm_get_response_buffer(const struct scm_response *rsp) +{ + return (void *)rsp + rsp->buf_offset; +} + +static int scm_remap_error(int err) +{ + switch (err) { + case SCM_ERROR: + return -EIO; + case SCM_EINVAL_ADDR: + case SCM_EINVAL_ARG: + return -EINVAL; + case SCM_EOPNOTSUPP: + return -EOPNOTSUPP; + case SCM_ENOMEM: + return -ENOMEM; + case SCM_EBUSY: + return SCM_EBUSY; + case SCM_V2_EBUSY: + return SCM_V2_EBUSY; + } + return -EINVAL; +} + +static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) +{ + static bool opened; + static u32 handle; + u32 ret; + u32 size_bytes; + + struct smc_params_s { + uint64_t x0; + uint64_t x1; + uint64_t x2; + uint64_t x3; + uint64_t x4; + uint64_t x5; + uint64_t sid; + } smc_params; + + pr_info("scm_call_qcpe: IN: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx", + fn_id, desc->arginfo, desc->args[0], desc->args[1], + desc->args[2], desc->args[3], desc->args[4], + desc->args[5], desc->args[6]); + + if (!opened) { + ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0); + if (ret != HAB_OK) { + pr_err("scm_call2: habmm_socket_open failed with ret = %d", + ret); + return ret; + } + opened = true; + } + + smc_params.x0 = fn_id | 0x40000000; /* SMC64_MASK */ + smc_params.x1 = desc->arginfo; + smc_params.x2 = desc->args[0]; + smc_params.x3 = desc->args[1]; + smc_params.x4 = desc->args[2]; + smc_params.x5 = desc->x5; + smc_params.sid = 0; + + ret = habmm_socket_send(handle, &smc_params, sizeof(smc_params), 0); + if (ret != HAB_OK) + return ret; + + size_bytes = sizeof(smc_params); + + ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0); + if (ret != HAB_OK) + return ret; + + desc->ret[0] = smc_params.x1; + desc->ret[1] = smc_params.x2; + desc->ret[2] = smc_params.x3; + + pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx", + desc->ret[0], desc->ret[1], desc->ret[2]); + + return 0; +} + +static u32 smc(u32 cmd_addr) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = 1; + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = cmd_addr; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + return 0; +} + +static int __scm_call(const struct scm_command *cmd) +{ + int ret; + u32 cmd_addr = virt_to_phys(cmd); + + /* + * Flush the command buffer so that the secure world sees + * the correct data. + */ + __cpuc_flush_dcache_area((void *)cmd, cmd->len); + outer_flush_range(cmd_addr, cmd_addr + cmd->len); + + ret = smc(cmd_addr); + if (ret < 0) { + if (ret != SCM_EBUSY) + pr_err("scm_call failed with error code %d\n", ret); + ret = scm_remap_error(ret); + } + return ret; +} + +#ifndef CONFIG_ARM64 +static void scm_inv_range(unsigned long start, unsigned long end) +{ + u32 cacheline_size, ctr; + + asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); + cacheline_size = 4 << ((ctr >> 16) & 0xf); + + start = round_down(start, cacheline_size); + end = round_up(end, cacheline_size); + outer_inv_range(start, end); + while (start < end) { + asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start) + : "memory"); + start += cacheline_size; + } + mb(); /* Make sure memory is visible to TZ */ + isb(); +} +#else + +static void scm_inv_range(unsigned long start, unsigned long end) +{ + dmac_inv_range((void *)start, (void *)end); +} +#endif + +/** + * scm_call_common() - Send an SCM command + * @svc_id: service identifier + * @cmd_id: command identifier + * @cmd_buf: command buffer + * @cmd_len: length of the command buffer + * @resp_buf: response buffer + * @resp_len: length of the response buffer + * @scm_buf: internal scm structure used for passing data + * @scm_buf_len: length of the internal scm structure + * + * Core function to scm call. Initializes the given cmd structure with + * appropriate values and makes the actual scm call. Validation of cmd + * pointer and length must occur in the calling function. + * + * Returns the appropriate error code from the scm call + */ + +static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len, + struct scm_command *scm_buf, + size_t scm_buf_length) +{ + int ret; + struct scm_response *rsp; + unsigned long start, end; + + scm_buf->len = scm_buf_length; + scm_buf->buf_offset = offsetof(struct scm_command, buf); + scm_buf->resp_hdr_offset = scm_buf->buf_offset + cmd_len; + scm_buf->id = (svc_id << 10) | cmd_id; + + if (cmd_buf) + memcpy(scm_get_command_buffer(scm_buf), cmd_buf, cmd_len); + + mutex_lock(&scm_lock); + ret = __scm_call(scm_buf); + mutex_unlock(&scm_lock); + if (ret) + return ret; + + rsp = scm_command_to_response(scm_buf); + start = (unsigned long)rsp; + + do { + scm_inv_range(start, start + sizeof(*rsp)); + } while (!rsp->is_complete); + + end = (unsigned long)scm_get_response_buffer(rsp) + resp_len; + scm_inv_range(start, end); + + if (resp_buf) + memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len); + + return ret; +} + +/* + * Sometimes the secure world may be busy waiting for a particular resource. + * In those situations, it is expected that the secure world returns a special + * error code (SCM_EBUSY). Retry any scm_call that fails with this error code, + * but with a timeout in place. Also, don't move this into scm_call_common, + * since we want the first attempt to be the "fastpath". + */ +static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len, + struct scm_command *cmd, + size_t len) +{ + int ret, retry_count = 0; + + do { + ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, + resp_buf, resp_len, cmd, len); + if (ret == SCM_EBUSY) + msleep(SCM_EBUSY_WAIT_MS); + if (retry_count == 33) + pr_warn("scm: secure world has been busy for 1 second!\n"); + } while (ret == SCM_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY)); + + if (ret == SCM_EBUSY) + pr_err("scm: secure world busy (rc = SCM_EBUSY)\n"); + + return ret; +} + +/** + * scm_call_noalloc - Send an SCM command + * + * Same as scm_call except clients pass in a buffer (@scm_buf) to be used for + * scm internal structures. The buffer should be allocated with + * DEFINE_SCM_BUFFER to account for the proper alignment and size. + */ +int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len, + void *scm_buf, size_t scm_buf_len) +{ + int ret; + size_t len = SCM_BUF_LEN(cmd_len, resp_len); + + if (len == 0) + return -EINVAL; + + if (!IS_ALIGNED((unsigned long)scm_buf, PAGE_SIZE)) + return -EINVAL; + + memset(scm_buf, 0, scm_buf_len); + + ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, + resp_len, scm_buf, len); + return ret; + +} + +struct scm_extra_arg { + union { + u32 args32[N_EXT_SCM_ARGS]; + u64 args64[N_EXT_SCM_ARGS]; + }; +}; + +static enum scm_interface_version { + SCM_UNKNOWN, + SCM_LEGACY, + SCM_ARMV8_32, + SCM_ARMV8_64, +} scm_version = SCM_UNKNOWN; + +/* This will be set to specify SMC32 or SMC64 */ +static u32 scm_version_mask; + +bool is_scm_armv8(void) +{ + int ret; + u64 ret1, x0; + + struct scm_desc desc = {0}; + + if (likely(scm_version != SCM_UNKNOWN)) + return (scm_version == SCM_ARMV8_32) || + (scm_version == SCM_ARMV8_64); + /* + * This is a one time check that runs on the first ever + * invocation of is_scm_armv8. We might be called in atomic + * context so no mutexes etc. Also, we can't use the scm_call2 + * or scm_call2_APIs directly since they depend on this init. + */ + + /* First try a SMC64 call */ + scm_version = SCM_ARMV8_64; + ret1 = 0; + x0 = SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD) | SMC_ATOMIC_MASK; + + desc.arginfo = SCM_ARGS(1); + desc.args[0] = x0; + + ret = scm_call_qcpe(x0 | SMC64_MASK, &desc); + + ret1 = desc.arginfo; + + if (ret || !ret1) { + /* Try SMC32 call */ + ret1 = 0; + + desc.arginfo = SCM_ARGS(1); + desc.args[0] = x0; + + ret = scm_call_qcpe(x0, &desc); + + if (ret || !ret1) + scm_version = SCM_LEGACY; + else + scm_version = SCM_ARMV8_32; + } else + scm_version_mask = SMC64_MASK; + + pr_debug("scm_call: scm version is %x, mask is %x\n", scm_version, + scm_version_mask); + + return (scm_version == SCM_ARMV8_32) || + (scm_version == SCM_ARMV8_64); +} +EXPORT_SYMBOL(is_scm_armv8); + +/* + * If there are more than N_REGISTER_ARGS, allocate a buffer and place + * the additional arguments in it. The extra argument buffer will be + * pointed to by X5. + */ +static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags) +{ + int i, j; + struct scm_extra_arg *argbuf; + int arglen = desc->arginfo & 0xf; + size_t argbuflen = PAGE_ALIGN(sizeof(struct scm_extra_arg)); + + desc->x5 = desc->args[FIRST_EXT_ARG_IDX]; + + if (likely(arglen <= N_REGISTER_ARGS)) { + desc->extra_arg_buf = NULL; + return 0; + } + + argbuf = kzalloc(argbuflen, flags); + if (!argbuf) + return -ENOMEM; + + desc->extra_arg_buf = argbuf; + + j = FIRST_EXT_ARG_IDX; + if (scm_version == SCM_ARMV8_64) + for (i = 0; i < N_EXT_SCM_ARGS; i++) + argbuf->args64[i] = desc->args[j++]; + else + for (i = 0; i < N_EXT_SCM_ARGS; i++) + argbuf->args32[i] = desc->args[j++]; + desc->x5 = virt_to_phys(argbuf); + __cpuc_flush_dcache_area(argbuf, argbuflen); + outer_flush_range(virt_to_phys(argbuf), + virt_to_phys(argbuf) + argbuflen); + + return 0; +} + +/** + * scm_call2() - Invoke a syscall in the secure world + * @fn_id: The function ID for this syscall + * @desc: Descriptor structure containing arguments and return values + * + * Sends a command to the SCM and waits for the command to finish processing. + * This should *only* be called in pre-emptible context. + * + * A note on cache maintenance: + * Note that any buffers that are expected to be accessed by the secure world + * must be flushed before invoking scm_call and invalidated in the cache + * immediately after scm_call returns. An important point that must be noted + * is that on ARMV8 architectures, invalidation actually also causes a dirty + * cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of + * paramount importance that the buffer be flushed before invoking scm_call2, + * even if you don't care about the contents of that buffer. + * + * Note that cache maintenance on the argument buffer (desc->args) is taken care + * of by scm_call2; however, callers are responsible for any other cached + * buffers passed over to the secure world. +*/ +int scm_call2(u32 fn_id, struct scm_desc *desc) +{ + int arglen = desc->arginfo & 0xf; + int ret; + u64 x0; + + if (unlikely(!is_scm_armv8())) + return -ENODEV; + + ret = allocate_extra_arg_buffer(desc, GFP_NOIO); + if (ret) + return ret; + + x0 = fn_id | scm_version_mask; + + mutex_lock(&scm_lock); + + if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH) + mutex_lock(&scm_lmh_lock); + + desc->ret[0] = desc->ret[1] = desc->ret[2] = 0; + + trace_scm_call_start(x0, desc); + + ret = scm_call_qcpe(x0, desc); + + trace_scm_call_end(desc); + + if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH) + mutex_unlock(&scm_lmh_lock); + + mutex_unlock(&scm_lock); + + if (ret < 0) + pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n", + x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]); + + if (arglen > N_REGISTER_ARGS) + kfree(desc->extra_arg_buf); + if (ret < 0) + return scm_remap_error(ret); + return 0; +} +EXPORT_SYMBOL(scm_call2); + +/** + * scm_call2_atomic() - Invoke a syscall in the secure world + * + * Similar to scm_call2 except that this can be invoked in atomic context. + * There is also no retry mechanism implemented. Please ensure that the + * secure world syscall can be executed in such a context and can complete + * in a timely manner. + */ +int scm_call2_atomic(u32 fn_id, struct scm_desc *desc) +{ + int arglen = desc->arginfo & 0xf; + int ret; + u64 x0; + + if (unlikely(!is_scm_armv8())) + return -ENODEV; + + ret = allocate_extra_arg_buffer(desc, GFP_ATOMIC); + if (ret) + return ret; + + x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask; + + pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n", + x0, desc->arginfo, desc->args[0], desc->args[1], + desc->args[2], desc->x5); + + ret = scm_call_qcpe(x0, desc); + + if (ret < 0) + pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n", + x0, desc->arginfo, desc->args[0], desc->args[1], + desc->args[2], desc->x5, ret, desc->ret[0], + desc->ret[1], desc->ret[2]); + + if (arglen > N_REGISTER_ARGS) + kfree(desc->extra_arg_buf); + if (ret < 0) + return scm_remap_error(ret); + return ret; +} + +/** + * scm_call() - Send an SCM command + * @svc_id: service identifier + * @cmd_id: command identifier + * @cmd_buf: command buffer + * @cmd_len: length of the command buffer + * @resp_buf: response buffer + * @resp_len: length of the response buffer + * + * Sends a command to the SCM and waits for the command to finish processing. + * + * A note on cache maintenance: + * Note that any buffers that are expected to be accessed by the secure world + * must be flushed before invoking scm_call and invalidated in the cache + * immediately after scm_call returns. Cache maintenance on the command and + * response buffers is taken care of by scm_call; however, callers are + * responsible for any other cached buffers passed over to the secure world. + */ +int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len, + void *resp_buf, size_t resp_len) +{ + struct scm_command *cmd; + int ret; + size_t len = SCM_BUF_LEN(cmd_len, resp_len); + + if (len == 0 || PAGE_ALIGN(len) < len) + return -EINVAL; + + cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, + resp_len, cmd, len); + if (unlikely(ret == SCM_EBUSY)) + ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len, + resp_buf, resp_len, cmd, PAGE_ALIGN(len)); + kfree(cmd); + return ret; +} +EXPORT_SYMBOL(scm_call); + +#define SCM_CLASS_REGISTER (0x2 << 8) +#define SCM_MASK_IRQS BIT(5) +#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \ + SCM_CLASS_REGISTER | \ + SCM_MASK_IRQS | \ + (n & 0xf)) + +/** + * scm_call_atomic1() - Send an atomic SCM command with one argument + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic1); + +/** + * scm_call_atomic1_1() - SCM command with one argument and one return value + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * @ret1: first return value + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + *ret1 = desc.arginfo; + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic1_1); + +/** + * scm_call_atomic2() - Send an atomic SCM command with two arguments + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * @arg2: second argument + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + register u32 r3 asm("r3") = arg2; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + desc.args[1] = r3; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic2); + +/** + * scm_call_atomic3() - Send an atomic SCM command with three arguments + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * @arg2: second argument + * @arg3: third argument + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 3); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + register u32 r3 asm("r3") = arg2; + register u32 r4 asm("r4") = arg3; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + desc.args[1] = r3; + desc.args[2] = r4; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic3); + +s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, + u32 arg3, u32 arg4, u32 *ret1, u32 *ret2) +{ + int ret; + int context_id; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 4); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + register u32 r3 asm("r3") = arg2; + register u32 r4 asm("r4") = arg3; + register u32 r5 asm("r5") = arg4; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + desc.args[1] = r3; + desc.args[2] = r4; + desc.args[3] = r5; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + *ret1 = desc.arginfo; + *ret2 = desc.args[0]; + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic4_3); + +/** + * scm_call_atomic5_3() - SCM command with five argument and three return value + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * @arg2: second argument + * @arg3: third argument + * @arg4: fourth argument + * @arg5: fifth argument + * @ret1: first return value + * @ret2: second return value + * @ret3: third return value + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, + u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3) +{ + int ret; + int context_id; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 5); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + register u32 r3 asm("r3") = arg2; + register u32 r4 asm("r4") = arg3; + register u32 r5 asm("r5") = arg4; + register u32 r6 asm("r6") = arg5; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + desc.args[1] = r3; + desc.args[2] = r4; + desc.args[3] = r5; + desc.args[4] = r6; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + *ret1 = desc.arginfo; + *ret2 = desc.args[0]; + *ret3 = desc.args[1]; + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic5_3); + +u32 scm_get_version(void) +{ + int context_id; + static u32 version = -1; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0"); + register u32 r1 asm("r1"); + + if (version != -1) + return version; + + mutex_lock(&scm_lock); + + r0 = 0x1 << 8; + r1 = (uintptr_t)&context_id; + + x0 = r0; + desc.arginfo = r1; + + ret = scm_call_qcpe(x0, &desc); + + version = desc.arginfo; + + mutex_unlock(&scm_lock); + + if (ret < 0) + return scm_remap_error(ret); + + return version; +} +EXPORT_SYMBOL(scm_get_version); + +#define SCM_IO_READ 0x1 +#define SCM_IO_WRITE 0x2 + +u32 scm_io_read(phys_addr_t address) +{ + struct scm_desc desc = { + .args[0] = address, + .arginfo = SCM_ARGS(1), + }; + + if (!is_scm_armv8()) + return scm_call_atomic1(SCM_SVC_IO, SCM_IO_READ, address); + + scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_READ), &desc); + return desc.ret[0]; +} +EXPORT_SYMBOL(scm_io_read); + +int scm_io_write(phys_addr_t address, u32 val) +{ + int ret; + + if (!is_scm_armv8()) + ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val); + else { + struct scm_desc desc = { + .args[0] = address, + .args[1] = val, + .arginfo = SCM_ARGS(2), + }; + ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_WRITE), + &desc); + } + return ret; +} +EXPORT_SYMBOL(scm_io_write); + +int scm_is_call_available(u32 svc_id, u32 cmd_id) +{ + int ret; + struct scm_desc desc = {0}; + + if (!is_scm_armv8()) { + u32 ret_val = 0; + u32 svc_cmd = (svc_id << 10) | cmd_id; + + ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd, + sizeof(svc_cmd), &ret_val, sizeof(ret_val)); + if (ret) + return ret; + + return ret_val; + } + desc.arginfo = SCM_ARGS(1); + desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc); + if (ret) + return ret; + + return desc.ret[0]; +} +EXPORT_SYMBOL(scm_is_call_available); + +#define GET_FEAT_VERSION_CMD 3 +int scm_get_feat_version(u32 feat) +{ + struct scm_desc desc = {0}; + int ret; + + if (!is_scm_armv8()) { + if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) { + u32 version; + + if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat, + sizeof(feat), &version, sizeof(version))) + return version; + } + return 0; + } + + ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD); + if (ret <= 0) + return 0; + + desc.args[0] = feat; + desc.arginfo = SCM_ARGS(1); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD), + &desc); + if (!ret) + return desc.ret[0]; + + return 0; +} +EXPORT_SYMBOL(scm_get_feat_version); + +#define RESTORE_SEC_CFG 2 +int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret) +{ + struct scm_desc desc = {0}; + int ret; + struct restore_sec_cfg { + u32 device_id; + u32 spare; + } cfg; + + cfg.device_id = device_id; + cfg.spare = spare; + + if (IS_ERR_OR_NULL(scm_ret)) + return -EINVAL; + + if (!is_scm_armv8()) + return scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &cfg, sizeof(cfg), + scm_ret, sizeof(*scm_ret)); + + desc.args[0] = device_id; + desc.args[1] = spare; + desc.arginfo = SCM_ARGS(2); + + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, RESTORE_SEC_CFG), &desc); + if (ret) + return ret; + + *scm_ret = desc.ret[0]; + return 0; +} +EXPORT_SYMBOL(scm_restore_sec_cfg); + +/* + * SCM call command ID to check secure mode + * Return zero for secure device. + * Return one for non secure device or secure + * device with debug enabled device. + */ +#define TZ_INFO_GET_SECURE_STATE 0x4 +bool scm_is_secure_device(void) +{ + struct scm_desc desc = {0}; + int ret = 0, resp; + + desc.args[0] = 0; + desc.arginfo = 0; + if (!is_scm_armv8()) { + ret = scm_call(SCM_SVC_INFO, TZ_INFO_GET_SECURE_STATE, NULL, + 0, &resp, sizeof(resp)); + } else { + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, + TZ_INFO_GET_SECURE_STATE), + &desc); + resp = desc.ret[0]; + } + + if (ret) { + pr_err("%s: SCM call failed\n", __func__); + return false; + } + + if ((resp & BIT(0)) || (resp & BIT(2))) + return true; + else + return false; +} +EXPORT_SYMBOL(scm_is_secure_device); diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h index ad57eda97f9d..af389305207f 100644 --- a/include/soc/qcom/scm.h +++ b/include/soc/qcom/scm.h @@ -95,7 +95,7 @@ struct scm_desc { u64 x5; }; -#ifdef CONFIG_QCOM_SCM +#if defined(CONFIG_QCOM_SCM) || defined(CONFIG_QCOM_SCM_QCPE) extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len, void *resp_buf, size_t resp_len); @@ -230,7 +230,7 @@ static inline int scm_io_write(phys_addr_t address, u32 val) return 0; } -inline bool scm_is_secure_device(void) +static inline bool scm_is_secure_device(void) { return false; }