Merge android-4.4.104 (8bc4213
) into msm-4.4
* refs/heads/tmp-8bc4213 Linux 4.4.104 nfsd: Fix another OPEN stateid race nfsd: Fix stateid races between OPEN and CLOSE nfsd: Make init_open_stateid() a bit more whole drm/i915: Prevent zero length "index" write drm/i915: Don't try indexed reads to alternate slave addresses NFS: revalidate "." etc correctly on "open". mtd: nand: Fix writing mtdoops to nand flash. drm/panel: simple: Add missing panel_simple_unprepare() calls drm/radeon: fix atombios on big endian Revert "drm/radeon: dont switch vt on suspend" bcache: Fix building error on MIPS eeprom: at24: check at24_read/write arguments mmc: core: Do not leave the block driver in a suspended state KVM: x86: inject exceptions produced by x86_decode_insn KVM: x86: Exit to user-mode on #UD intercept when emulator requires KVM: x86: pvclock: Handle first-time write to pvclock-page contains random junk btrfs: clear space cache inode generation always mm/madvise.c: fix madvise() infinite loop under special circumstances mm, thp: Do not make page table dirty unconditionally in touch_p[mu]d() x86/efi-bgrt: Replace early_memremap() with memremap() x86/efi-bgrt: Fix kernel panic when mapping BGRT data ARM: dts: omap3: logicpd-torpedo-37xx-devkit: Fix MMC1 cd-gpio x86/efi: Build our own page table structures x86/efi: Hoist page table switching code into efi_call_virt() x86/mm/pat: Ensure cpa->pfn only contains page frame numbers ipsec: Fix aborted xfrm policy dump crash netlink: add a start callback for starting a netlink dump Documentation: tee subsystem and op-tee driver tee: add OP-TEE driver tee: generic TEE subsystem dt/bindings: add bindings for optee kernel.h: add u64_to_user_ptr() ARM: 8481/2: drivers: psci: replace psci firmware calls ARM: 8480/2: arm64: add implementation for arm-smccc ARM: 8479/2: add implementation for arm-smccc ARM: 8478/2: arm/arm64: add arm-smccc UPSTREAM: net: xfrm: allow clearing socket xfrm policies. Conflicts: arch/arm64/kernel/arm64ksyms.c arch/arm64/kernel/asm-offsets.c arch/arm64/kvm/hyp/fpsimd.S drivers/Kconfig drivers/Makefile drivers/firmware/psci.c drivers/gpu/drm/msm/msm_gem_submit.c drivers/mmc/core/bus.c include/linux/arm-smccc.h Change-Id: Ib22051f60c49ecf3bb7e18f9940ba9a4ac5143af Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
commit
80eb7ccae4
67 changed files with 5551 additions and 314 deletions
|
@ -437,6 +437,8 @@ sysrq.txt
|
|||
- info on the magic SysRq key.
|
||||
target/
|
||||
- directory with info on generating TCM v4 fabric .ko modules
|
||||
tee.txt
|
||||
- info on the TEE subsystem and drivers
|
||||
this_cpu_ops.txt
|
||||
- List rationale behind and the way to use this_cpu operations.
|
||||
thermal/
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
OP-TEE Device Tree Bindings
|
||||
|
||||
OP-TEE is a piece of software using hardware features to provide a Trusted
|
||||
Execution Environment. The security can be provided with ARM TrustZone, but
|
||||
also by virtualization or a separate chip.
|
||||
|
||||
We're using "linaro" as the first part of the compatible property for
|
||||
the reference implementation maintained by Linaro.
|
||||
|
||||
* OP-TEE based on ARM TrustZone required properties:
|
||||
|
||||
- compatible : should contain "linaro,optee-tz"
|
||||
|
||||
- method : The method of calling the OP-TEE Trusted OS. Permitted
|
||||
values are:
|
||||
|
||||
"smc" : SMC #0, with the register assignments specified
|
||||
in drivers/tee/optee/optee_smc.h
|
||||
|
||||
"hvc" : HVC #0, with the register assignments specified
|
||||
in drivers/tee/optee/optee_smc.h
|
||||
|
||||
|
||||
|
||||
Example:
|
||||
firmware {
|
||||
optee {
|
||||
compatible = "linaro,optee-tz";
|
||||
method = "smc";
|
||||
};
|
||||
};
|
|
@ -133,6 +133,7 @@ lacie LaCie
|
|||
lantiq Lantiq Semiconductor
|
||||
lenovo Lenovo Group Ltd.
|
||||
lg LG Corporation
|
||||
linaro Linaro Limited
|
||||
linux Linux-specific binding
|
||||
lsi LSI Corp. (LSI Logic)
|
||||
lltc Linear Technology Corporation
|
||||
|
|
|
@ -307,6 +307,7 @@ Code Seq#(hex) Include File Comments
|
|||
0xA3 80-8F Port ACL in development:
|
||||
<mailto:tlewis@mindspring.com>
|
||||
0xA3 90-9F linux/dtlk.h
|
||||
0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem
|
||||
0xAA 00-3F linux/uapi/linux/userfaultfd.h
|
||||
0xAB 00-1F linux/nbd.h
|
||||
0xAC 00-1F linux/raw.h
|
||||
|
|
118
Documentation/tee.txt
Normal file
118
Documentation/tee.txt
Normal file
|
@ -0,0 +1,118 @@
|
|||
TEE subsystem
|
||||
This document describes the TEE subsystem in Linux.
|
||||
|
||||
A TEE (Trusted Execution Environment) is a trusted OS running in some
|
||||
secure environment, for example, TrustZone on ARM CPUs, or a separate
|
||||
secure co-processor etc. A TEE driver handles the details needed to
|
||||
communicate with the TEE.
|
||||
|
||||
This subsystem deals with:
|
||||
|
||||
- Registration of TEE drivers
|
||||
|
||||
- Managing shared memory between Linux and the TEE
|
||||
|
||||
- Providing a generic API to the TEE
|
||||
|
||||
The TEE interface
|
||||
=================
|
||||
|
||||
include/uapi/linux/tee.h defines the generic interface to a TEE.
|
||||
|
||||
User space (the client) connects to the driver by opening /dev/tee[0-9]* or
|
||||
/dev/teepriv[0-9]*.
|
||||
|
||||
- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor
|
||||
which user space can mmap. When user space doesn't need the file
|
||||
descriptor any more, it should be closed. When shared memory isn't needed
|
||||
any longer it should be unmapped with munmap() to allow the reuse of
|
||||
memory.
|
||||
|
||||
- TEE_IOC_VERSION lets user space know which TEE this driver handles and
|
||||
the its capabilities.
|
||||
|
||||
- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application.
|
||||
|
||||
- TEE_IOC_INVOKE invokes a function in a Trusted Application.
|
||||
|
||||
- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE.
|
||||
|
||||
- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application.
|
||||
|
||||
There are two classes of clients, normal clients and supplicants. The latter is
|
||||
a helper process for the TEE to access resources in Linux, for example file
|
||||
system access. A normal client opens /dev/tee[0-9]* and a supplicant opens
|
||||
/dev/teepriv[0-9].
|
||||
|
||||
Much of the communication between clients and the TEE is opaque to the
|
||||
driver. The main job for the driver is to receive requests from the
|
||||
clients, forward them to the TEE and send back the results. In the case of
|
||||
supplicants the communication goes in the other direction, the TEE sends
|
||||
requests to the supplicant which then sends back the result.
|
||||
|
||||
OP-TEE driver
|
||||
=============
|
||||
|
||||
The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM
|
||||
TrustZone based OP-TEE solution that is supported.
|
||||
|
||||
Lowest level of communication with OP-TEE builds on ARM SMC Calling
|
||||
Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface
|
||||
[3] used internally by the driver. Stacked on top of that is OP-TEE Message
|
||||
Protocol [4].
|
||||
|
||||
OP-TEE SMC interface provides the basic functions required by SMCCC and some
|
||||
additional functions specific for OP-TEE. The most interesting functions are:
|
||||
|
||||
- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information
|
||||
which is then returned by TEE_IOC_VERSION
|
||||
|
||||
- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used
|
||||
to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a
|
||||
separate secure co-processor.
|
||||
|
||||
- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol
|
||||
|
||||
- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory
|
||||
range to used for shared memory between Linux and OP-TEE.
|
||||
|
||||
The GlobalPlatform TEE Client API [5] is implemented on top of the generic
|
||||
TEE API.
|
||||
|
||||
Picture of the relationship between the different components in the
|
||||
OP-TEE architecture.
|
||||
|
||||
User space Kernel Secure world
|
||||
~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~
|
||||
+--------+ +-------------+
|
||||
| Client | | Trusted |
|
||||
+--------+ | Application |
|
||||
/\ +-------------+
|
||||
|| +----------+ /\
|
||||
|| |tee- | ||
|
||||
|| |supplicant| \/
|
||||
|| +----------+ +-------------+
|
||||
\/ /\ | TEE Internal|
|
||||
+-------+ || | API |
|
||||
+ TEE | || +--------+--------+ +-------------+
|
||||
| Client| || | TEE | OP-TEE | | OP-TEE |
|
||||
| API | \/ | subsys | driver | | Trusted OS |
|
||||
+-------+----------------+----+-------+----+-----------+-------------+
|
||||
| Generic TEE API | | OP-TEE MSG |
|
||||
| IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) |
|
||||
+-----------------------------+ +------------------------------+
|
||||
|
||||
RPC (Remote Procedure Call) are requests from secure world to kernel driver
|
||||
or tee-supplicant. An RPC is identified by a special range of SMCCC return
|
||||
values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the
|
||||
kernel are handled by the kernel driver. Other RPC messages will be forwarded to
|
||||
tee-supplicant without further involvement of the driver, except switching
|
||||
shared memory buffer representation.
|
||||
|
||||
References:
|
||||
[1] https://github.com/OP-TEE/optee_os
|
||||
[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
|
||||
[3] drivers/tee/optee/optee_smc.h
|
||||
[4] drivers/tee/optee/optee_msg.h
|
||||
[5] http://www.globalplatform.org/specificationsdevice.asp look for
|
||||
"TEE Client API Specification v1.0" and click download.
|
13
MAINTAINERS
13
MAINTAINERS
|
@ -7955,6 +7955,11 @@ F: arch/*/oprofile/
|
|||
F: drivers/oprofile/
|
||||
F: include/linux/oprofile.h
|
||||
|
||||
OP-TEE DRIVER
|
||||
M: Jens Wiklander <jens.wiklander@linaro.org>
|
||||
S: Maintained
|
||||
F: drivers/tee/optee/
|
||||
|
||||
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
|
||||
M: Mark Fasheh <mfasheh@suse.com>
|
||||
M: Joel Becker <jlbec@evilplan.org>
|
||||
|
@ -9382,6 +9387,14 @@ F: drivers/hwtracing/stm/
|
|||
F: include/linux/stm.h
|
||||
F: include/uapi/linux/stm.h
|
||||
|
||||
TEE SUBSYSTEM
|
||||
M: Jens Wiklander <jens.wiklander@linaro.org>
|
||||
S: Maintained
|
||||
F: include/linux/tee_drv.h
|
||||
F: include/uapi/linux/tee.h
|
||||
F: drivers/tee/
|
||||
F: Documentation/tee.txt
|
||||
|
||||
THUNDERBOLT DRIVER
|
||||
M: Andreas Noever <andreas.noever@gmail.com>
|
||||
S: Maintained
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 103
|
||||
SUBLEVEL = 104
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@
|
|||
interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc1_pins &mmc1_cd>;
|
||||
cd-gpios = <&gpio4 31 IRQ_TYPE_LEVEL_LOW>; /* gpio127 */
|
||||
cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio127 */
|
||||
vmmc-supply = <&vmmc1>;
|
||||
bus-width = <4>;
|
||||
cap-power-off-card;
|
||||
|
|
|
@ -104,6 +104,7 @@ config ARM64
|
|||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_ARM_SMCCC
|
||||
select THREAD_INFO_IN_TASK
|
||||
select HAVE_ARM_SMCCC
|
||||
help
|
||||
ARM 64-bit (AArch64) Linux support.
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
/*
|
||||
* We map the EFI regions needed for runtime services non-contiguously,
|
||||
|
@ -66,6 +67,17 @@ extern u64 asmlinkage efi_call(void *fp, ...);
|
|||
|
||||
#define efi_call_phys(f, args...) efi_call((f), args)
|
||||
|
||||
/*
|
||||
* Scratch space used for switching the pagetable in the EFI stub
|
||||
*/
|
||||
struct efi_scratch {
|
||||
u64 r15;
|
||||
u64 prev_cr3;
|
||||
pgd_t *efi_pgt;
|
||||
bool use_pgd;
|
||||
u64 phys_stack;
|
||||
} __packed;
|
||||
|
||||
#define efi_call_virt(f, ...) \
|
||||
({ \
|
||||
efi_status_t __s; \
|
||||
|
@ -73,7 +85,20 @@ extern u64 asmlinkage efi_call(void *fp, ...);
|
|||
efi_sync_low_kernel_mappings(); \
|
||||
preempt_disable(); \
|
||||
__kernel_fpu_begin(); \
|
||||
\
|
||||
if (efi_scratch.use_pgd) { \
|
||||
efi_scratch.prev_cr3 = read_cr3(); \
|
||||
write_cr3((unsigned long)efi_scratch.efi_pgt); \
|
||||
__flush_tlb_all(); \
|
||||
} \
|
||||
\
|
||||
__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
|
||||
\
|
||||
if (efi_scratch.use_pgd) { \
|
||||
write_cr3(efi_scratch.prev_cr3); \
|
||||
__flush_tlb_all(); \
|
||||
} \
|
||||
\
|
||||
__kernel_fpu_end(); \
|
||||
preempt_enable(); \
|
||||
__s; \
|
||||
|
@ -113,6 +138,7 @@ extern void __init efi_memory_uc(u64 addr, unsigned long size);
|
|||
extern void __init efi_map_region(efi_memory_desc_t *md);
|
||||
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
|
||||
extern void efi_sync_low_kernel_mappings(void);
|
||||
extern int __init efi_alloc_page_tables(void);
|
||||
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
|
||||
extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
|
||||
extern void __init old_map_region(efi_memory_desc_t *md);
|
||||
|
|
|
@ -1696,6 +1696,8 @@ static int ud_interception(struct vcpu_svm *svm)
|
|||
int er;
|
||||
|
||||
er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
|
||||
if (er == EMULATE_USER_EXIT)
|
||||
return 0;
|
||||
if (er != EMULATE_DONE)
|
||||
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
|
|
|
@ -5267,6 +5267,8 @@ static int handle_exception(struct kvm_vcpu *vcpu)
|
|||
return 1;
|
||||
}
|
||||
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
|
||||
if (er == EMULATE_USER_EXIT)
|
||||
return 0;
|
||||
if (er != EMULATE_DONE)
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
|
|
|
@ -1812,6 +1812,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
|||
*/
|
||||
BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
|
||||
|
||||
if (guest_hv_clock.version & 1)
|
||||
++guest_hv_clock.version; /* first time write, random junk */
|
||||
|
||||
vcpu->hv_clock.version = guest_hv_clock.version + 1;
|
||||
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
|
||||
&vcpu->hv_clock,
|
||||
|
@ -5426,6 +5429,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
|||
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
|
||||
emulation_type))
|
||||
return EMULATE_DONE;
|
||||
if (ctxt->have_exception && inject_emulated_exception(vcpu))
|
||||
return EMULATE_DONE;
|
||||
if (emulation_type & EMULTYPE_SKIP)
|
||||
return EMULATE_FAIL;
|
||||
return handle_emulation_failure(vcpu);
|
||||
|
|
|
@ -911,15 +911,10 @@ static void populate_pte(struct cpa_data *cpa,
|
|||
pte = pte_offset_kernel(pmd, start);
|
||||
|
||||
while (num_pages-- && start < end) {
|
||||
|
||||
/* deal with the NX bit */
|
||||
if (!(pgprot_val(pgprot) & _PAGE_NX))
|
||||
cpa->pfn &= ~_PAGE_NX;
|
||||
|
||||
set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
|
||||
set_pte(pte, pfn_pte(cpa->pfn, pgprot));
|
||||
|
||||
start += PAGE_SIZE;
|
||||
cpa->pfn += PAGE_SIZE;
|
||||
cpa->pfn++;
|
||||
pte++;
|
||||
}
|
||||
}
|
||||
|
@ -975,11 +970,11 @@ static int populate_pmd(struct cpa_data *cpa,
|
|||
|
||||
pmd = pmd_offset(pud, start);
|
||||
|
||||
set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
|
||||
set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
|
||||
massage_pgprot(pmd_pgprot)));
|
||||
|
||||
start += PMD_SIZE;
|
||||
cpa->pfn += PMD_SIZE;
|
||||
cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
|
||||
cur_pages += PMD_SIZE >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
|
@ -1048,11 +1043,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
|
|||
* Map everything starting from the Gb boundary, possibly with 1G pages
|
||||
*/
|
||||
while (end - start >= PUD_SIZE) {
|
||||
set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
|
||||
set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
|
||||
massage_pgprot(pud_pgprot)));
|
||||
|
||||
start += PUD_SIZE;
|
||||
cpa->pfn += PUD_SIZE;
|
||||
cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
|
||||
cur_pages += PUD_SIZE >> PAGE_SHIFT;
|
||||
pud++;
|
||||
}
|
||||
|
|
|
@ -28,8 +28,7 @@ struct bmp_header {
|
|||
void __init efi_bgrt_init(void)
|
||||
{
|
||||
acpi_status status;
|
||||
void __iomem *image;
|
||||
bool ioremapped = false;
|
||||
void *image;
|
||||
struct bmp_header bmp_header;
|
||||
|
||||
if (acpi_disabled)
|
||||
|
@ -70,20 +69,14 @@ void __init efi_bgrt_init(void)
|
|||
return;
|
||||
}
|
||||
|
||||
image = efi_lookup_mapped_addr(bgrt_tab->image_address);
|
||||
image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
|
||||
if (!image) {
|
||||
image = early_ioremap(bgrt_tab->image_address,
|
||||
sizeof(bmp_header));
|
||||
ioremapped = true;
|
||||
if (!image) {
|
||||
pr_err("Ignoring BGRT: failed to map image header memory\n");
|
||||
return;
|
||||
}
|
||||
pr_err("Ignoring BGRT: failed to map image header memory\n");
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
|
||||
if (ioremapped)
|
||||
early_iounmap(image, sizeof(bmp_header));
|
||||
memcpy(&bmp_header, image, sizeof(bmp_header));
|
||||
memunmap(image);
|
||||
bgrt_image_size = bmp_header.size;
|
||||
|
||||
bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
|
||||
|
@ -93,18 +86,14 @@ void __init efi_bgrt_init(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (ioremapped) {
|
||||
image = early_ioremap(bgrt_tab->image_address,
|
||||
bmp_header.size);
|
||||
if (!image) {
|
||||
pr_err("Ignoring BGRT: failed to map image memory\n");
|
||||
kfree(bgrt_image);
|
||||
bgrt_image = NULL;
|
||||
return;
|
||||
}
|
||||
image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
|
||||
if (!image) {
|
||||
pr_err("Ignoring BGRT: failed to map image memory\n");
|
||||
kfree(bgrt_image);
|
||||
bgrt_image = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy_fromio(bgrt_image, image, bgrt_image_size);
|
||||
if (ioremapped)
|
||||
early_iounmap(image, bmp_header.size);
|
||||
memcpy(bgrt_image, image, bgrt_image_size);
|
||||
memunmap(image);
|
||||
}
|
||||
|
|
|
@ -869,7 +869,7 @@ static void __init kexec_enter_virtual_mode(void)
|
|||
* This function will switch the EFI runtime services to virtual mode.
|
||||
* Essentially, we look through the EFI memmap and map every region that
|
||||
* has the runtime attribute bit set in its memory descriptor into the
|
||||
* ->trampoline_pgd page table using a top-down VA allocation scheme.
|
||||
* efi_pgd page table.
|
||||
*
|
||||
* The old method which used to update that memory descriptor with the
|
||||
* virtual address obtained from ioremap() is still supported when the
|
||||
|
@ -879,8 +879,8 @@ static void __init kexec_enter_virtual_mode(void)
|
|||
*
|
||||
* The new method does a pagetable switch in a preemption-safe manner
|
||||
* so that we're in a different address space when calling a runtime
|
||||
* function. For function arguments passing we do copy the PGDs of the
|
||||
* kernel page table into ->trampoline_pgd prior to each call.
|
||||
* function. For function arguments passing we do copy the PUDs of the
|
||||
* kernel page table into efi_pgd prior to each call.
|
||||
*
|
||||
* Specially for kexec boot, efi runtime maps in previous kernel should
|
||||
* be passed in via setup_data. In that case runtime ranges will be mapped
|
||||
|
@ -895,6 +895,12 @@ static void __init __efi_enter_virtual_mode(void)
|
|||
|
||||
efi.systab = NULL;
|
||||
|
||||
if (efi_alloc_page_tables()) {
|
||||
pr_err("Failed to allocate EFI page tables\n");
|
||||
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
|
||||
return;
|
||||
}
|
||||
|
||||
efi_merge_regions();
|
||||
new_memmap = efi_map_regions(&count, &pg_shift);
|
||||
if (!new_memmap) {
|
||||
|
@ -954,28 +960,11 @@ static void __init __efi_enter_virtual_mode(void)
|
|||
efi_runtime_mkexec();
|
||||
|
||||
/*
|
||||
* We mapped the descriptor array into the EFI pagetable above but we're
|
||||
* not unmapping it here. Here's why:
|
||||
*
|
||||
* We're copying select PGDs from the kernel page table to the EFI page
|
||||
* table and when we do so and make changes to those PGDs like unmapping
|
||||
* stuff from them, those changes appear in the kernel page table and we
|
||||
* go boom.
|
||||
*
|
||||
* From setup_real_mode():
|
||||
*
|
||||
* ...
|
||||
* trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
|
||||
*
|
||||
* In this particular case, our allocation is in PGD 0 of the EFI page
|
||||
* table but we've copied that PGD from PGD[272] of the EFI page table:
|
||||
*
|
||||
* pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
|
||||
*
|
||||
* where the direct memory mapping in kernel space is.
|
||||
*
|
||||
* new_memmap's VA comes from that direct mapping and thus clearing it,
|
||||
* it would get cleared in the kernel page table too.
|
||||
* We mapped the descriptor array into the EFI pagetable above
|
||||
* but we're not unmapping it here because if we're running in
|
||||
* EFI mixed mode we need all of memory to be accessible when
|
||||
* we pass parameters to the EFI runtime services in the
|
||||
* thunking code.
|
||||
*
|
||||
* efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
|
||||
*/
|
||||
|
|
|
@ -38,6 +38,11 @@
|
|||
* say 0 - 3G.
|
||||
*/
|
||||
|
||||
int __init efi_alloc_page_tables(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void efi_sync_low_kernel_mappings(void) {}
|
||||
void __init efi_dump_pagetable(void) {}
|
||||
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <asm/fixmap.h>
|
||||
#include <asm/realmode.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
/*
|
||||
* We allocate runtime services regions bottom-up, starting from -4G, i.e.
|
||||
|
@ -47,16 +48,7 @@
|
|||
*/
|
||||
static u64 efi_va = EFI_VA_START;
|
||||
|
||||
/*
|
||||
* Scratch space used for switching the pagetable in the EFI stub
|
||||
*/
|
||||
struct efi_scratch {
|
||||
u64 r15;
|
||||
u64 prev_cr3;
|
||||
pgd_t *efi_pgt;
|
||||
bool use_pgd;
|
||||
u64 phys_stack;
|
||||
} __packed;
|
||||
struct efi_scratch efi_scratch;
|
||||
|
||||
static void __init early_code_mapping_set_exec(int executable)
|
||||
{
|
||||
|
@ -83,8 +75,11 @@ pgd_t * __init efi_call_phys_prolog(void)
|
|||
int pgd;
|
||||
int n_pgds;
|
||||
|
||||
if (!efi_enabled(EFI_OLD_MEMMAP))
|
||||
return NULL;
|
||||
if (!efi_enabled(EFI_OLD_MEMMAP)) {
|
||||
save_pgd = (pgd_t *)read_cr3();
|
||||
write_cr3((unsigned long)efi_scratch.efi_pgt);
|
||||
goto out;
|
||||
}
|
||||
|
||||
early_code_mapping_set_exec(1);
|
||||
|
||||
|
@ -96,6 +91,7 @@ pgd_t * __init efi_call_phys_prolog(void)
|
|||
vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
|
||||
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
|
||||
}
|
||||
out:
|
||||
__flush_tlb_all();
|
||||
|
||||
return save_pgd;
|
||||
|
@ -109,8 +105,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
|
|||
int pgd_idx;
|
||||
int nr_pgds;
|
||||
|
||||
if (!save_pgd)
|
||||
if (!efi_enabled(EFI_OLD_MEMMAP)) {
|
||||
write_cr3((unsigned long)save_pgd);
|
||||
__flush_tlb_all();
|
||||
return;
|
||||
}
|
||||
|
||||
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
|
||||
|
||||
|
@ -123,27 +122,97 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
|
|||
early_code_mapping_set_exec(0);
|
||||
}
|
||||
|
||||
static pgd_t *efi_pgd;
|
||||
|
||||
/*
|
||||
* We need our own copy of the higher levels of the page tables
|
||||
* because we want to avoid inserting EFI region mappings (EFI_VA_END
|
||||
* to EFI_VA_START) into the standard kernel page tables. Everything
|
||||
* else can be shared, see efi_sync_low_kernel_mappings().
|
||||
*/
|
||||
int __init efi_alloc_page_tables(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
gfp_t gfp_mask;
|
||||
|
||||
if (efi_enabled(EFI_OLD_MEMMAP))
|
||||
return 0;
|
||||
|
||||
gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
|
||||
efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
|
||||
if (!efi_pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
pgd = efi_pgd + pgd_index(EFI_VA_END);
|
||||
|
||||
pud = pud_alloc_one(NULL, 0);
|
||||
if (!pud) {
|
||||
free_page((unsigned long)efi_pgd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pgd_populate(NULL, pgd, pud);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add low kernel mappings for passing arguments to EFI functions.
|
||||
*/
|
||||
void efi_sync_low_kernel_mappings(void)
|
||||
{
|
||||
unsigned num_pgds;
|
||||
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
|
||||
unsigned num_entries;
|
||||
pgd_t *pgd_k, *pgd_efi;
|
||||
pud_t *pud_k, *pud_efi;
|
||||
|
||||
if (efi_enabled(EFI_OLD_MEMMAP))
|
||||
return;
|
||||
|
||||
num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
|
||||
/*
|
||||
* We can share all PGD entries apart from the one entry that
|
||||
* covers the EFI runtime mapping space.
|
||||
*
|
||||
* Make sure the EFI runtime region mappings are guaranteed to
|
||||
* only span a single PGD entry and that the entry also maps
|
||||
* other important kernel regions.
|
||||
*/
|
||||
BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
|
||||
BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
|
||||
(EFI_VA_END & PGDIR_MASK));
|
||||
|
||||
memcpy(pgd + pgd_index(PAGE_OFFSET),
|
||||
init_mm.pgd + pgd_index(PAGE_OFFSET),
|
||||
sizeof(pgd_t) * num_pgds);
|
||||
pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
|
||||
pgd_k = pgd_offset_k(PAGE_OFFSET);
|
||||
|
||||
num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
|
||||
memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
|
||||
|
||||
/*
|
||||
* We share all the PUD entries apart from those that map the
|
||||
* EFI regions. Copy around them.
|
||||
*/
|
||||
BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
|
||||
BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
|
||||
|
||||
pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
|
||||
pud_efi = pud_offset(pgd_efi, 0);
|
||||
|
||||
pgd_k = pgd_offset_k(EFI_VA_END);
|
||||
pud_k = pud_offset(pgd_k, 0);
|
||||
|
||||
num_entries = pud_index(EFI_VA_END);
|
||||
memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
|
||||
|
||||
pud_efi = pud_offset(pgd_efi, EFI_VA_START);
|
||||
pud_k = pud_offset(pgd_k, EFI_VA_START);
|
||||
|
||||
num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
|
||||
memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
|
||||
}
|
||||
|
||||
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
{
|
||||
unsigned long text;
|
||||
unsigned long pfn, text;
|
||||
struct page *page;
|
||||
unsigned npages;
|
||||
pgd_t *pgd;
|
||||
|
@ -151,8 +220,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
|||
if (efi_enabled(EFI_OLD_MEMMAP))
|
||||
return 0;
|
||||
|
||||
efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
|
||||
pgd = __va(efi_scratch.efi_pgt);
|
||||
efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
|
||||
pgd = efi_pgd;
|
||||
|
||||
/*
|
||||
* It can happen that the physical address of new_memmap lands in memory
|
||||
|
@ -160,7 +229,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
|||
* and ident-map those pages containing the map before calling
|
||||
* phys_efi_set_virtual_address_map().
|
||||
*/
|
||||
if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
|
||||
pfn = pa_memmap >> PAGE_SHIFT;
|
||||
if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
|
||||
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
|
||||
return 1;
|
||||
}
|
||||
|
@ -185,8 +255,9 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
|||
|
||||
npages = (_end - _text) >> PAGE_SHIFT;
|
||||
text = __pa(_text);
|
||||
pfn = text >> PAGE_SHIFT;
|
||||
|
||||
if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
|
||||
if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
|
||||
pr_err("Failed to map kernel text 1:1\n");
|
||||
return 1;
|
||||
}
|
||||
|
@ -196,20 +267,20 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
|||
|
||||
void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
{
|
||||
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
|
||||
|
||||
kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
|
||||
kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
|
||||
}
|
||||
|
||||
static void __init __map_region(efi_memory_desc_t *md, u64 va)
|
||||
{
|
||||
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
|
||||
unsigned long pf = 0;
|
||||
unsigned long flags = 0;
|
||||
unsigned long pfn;
|
||||
pgd_t *pgd = efi_pgd;
|
||||
|
||||
if (!(md->attribute & EFI_MEMORY_WB))
|
||||
pf |= _PAGE_PCD;
|
||||
flags |= _PAGE_PCD;
|
||||
|
||||
if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
|
||||
pfn = md->phys_addr >> PAGE_SHIFT;
|
||||
if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
|
||||
pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
|
||||
md->phys_addr, va);
|
||||
}
|
||||
|
@ -312,9 +383,7 @@ void __init efi_runtime_mkexec(void)
|
|||
void __init efi_dump_pagetable(void)
|
||||
{
|
||||
#ifdef CONFIG_EFI_PGT_DUMP
|
||||
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
|
||||
|
||||
ptdump_walk_pgd_level(NULL, pgd);
|
||||
ptdump_walk_pgd_level(NULL, efi_pgd);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -38,41 +38,6 @@
|
|||
mov %rsi, %cr0; \
|
||||
mov (%rsp), %rsp
|
||||
|
||||
/* stolen from gcc */
|
||||
.macro FLUSH_TLB_ALL
|
||||
movq %r15, efi_scratch(%rip)
|
||||
movq %r14, efi_scratch+8(%rip)
|
||||
movq %cr4, %r15
|
||||
movq %r15, %r14
|
||||
andb $0x7f, %r14b
|
||||
movq %r14, %cr4
|
||||
movq %r15, %cr4
|
||||
movq efi_scratch+8(%rip), %r14
|
||||
movq efi_scratch(%rip), %r15
|
||||
.endm
|
||||
|
||||
.macro SWITCH_PGT
|
||||
cmpb $0, efi_scratch+24(%rip)
|
||||
je 1f
|
||||
movq %r15, efi_scratch(%rip) # r15
|
||||
# save previous CR3
|
||||
movq %cr3, %r15
|
||||
movq %r15, efi_scratch+8(%rip) # prev_cr3
|
||||
movq efi_scratch+16(%rip), %r15 # EFI pgt
|
||||
movq %r15, %cr3
|
||||
1:
|
||||
.endm
|
||||
|
||||
.macro RESTORE_PGT
|
||||
cmpb $0, efi_scratch+24(%rip)
|
||||
je 2f
|
||||
movq efi_scratch+8(%rip), %r15
|
||||
movq %r15, %cr3
|
||||
movq efi_scratch(%rip), %r15
|
||||
FLUSH_TLB_ALL
|
||||
2:
|
||||
.endm
|
||||
|
||||
ENTRY(efi_call)
|
||||
SAVE_XMM
|
||||
mov (%rsp), %rax
|
||||
|
@ -83,16 +48,8 @@ ENTRY(efi_call)
|
|||
mov %r8, %r9
|
||||
mov %rcx, %r8
|
||||
mov %rsi, %rcx
|
||||
SWITCH_PGT
|
||||
call *%rdi
|
||||
RESTORE_PGT
|
||||
addq $48, %rsp
|
||||
RESTORE_XMM
|
||||
ret
|
||||
ENDPROC(efi_call)
|
||||
|
||||
.data
|
||||
ENTRY(efi_scratch)
|
||||
.fill 3,8,0
|
||||
.byte 0
|
||||
.quad 0
|
||||
|
|
|
@ -212,4 +212,6 @@ source "drivers/bif/Kconfig"
|
|||
|
||||
source "drivers/sensors/Kconfig"
|
||||
|
||||
source "drivers/tee/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -182,3 +182,4 @@ obj-$(CONFIG_BIF) += bif/
|
|||
|
||||
obj-$(CONFIG_SENSORS_SSC) += sensors/
|
||||
obj-$(CONFIG_ESOC) += esoc/
|
||||
obj-$(CONFIG_TEE) += tee/
|
||||
|
|
|
@ -327,38 +327,6 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
|
|||
return end;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't ioremap data in EFI boot services RAM, because we've already mapped
|
||||
* it as RAM. So, look it up in the existing EFI memory map instead. Only
|
||||
* callable after efi_enter_virtual_mode and before efi_free_boot_services.
|
||||
*/
|
||||
void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
|
||||
{
|
||||
struct efi_memory_map *map;
|
||||
void *p;
|
||||
map = efi.memmap;
|
||||
if (!map)
|
||||
return NULL;
|
||||
if (WARN_ON(!map->map))
|
||||
return NULL;
|
||||
for (p = map->map; p < map->map_end; p += map->desc_size) {
|
||||
efi_memory_desc_t *md = p;
|
||||
u64 size = md->num_pages << EFI_PAGE_SHIFT;
|
||||
u64 end = md->phys_addr + size;
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
|
||||
md->type != EFI_BOOT_SERVICES_CODE &&
|
||||
md->type != EFI_BOOT_SERVICES_DATA)
|
||||
continue;
|
||||
if (!md->virt_addr)
|
||||
continue;
|
||||
if (phys_addr >= md->phys_addr && phys_addr < end) {
|
||||
phys_addr += md->virt_addr - md->phys_addr;
|
||||
return (__force void __iomem *)(unsigned long)phys_addr;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __initdata efi_config_table_type_t common_tables[] = {
|
||||
{ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
|
||||
{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
|
||||
|
|
|
@ -1575,34 +1575,32 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
|
|||
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
|
||||
}
|
||||
|
||||
/* Atom needs data in little endian format
|
||||
* so swap as appropriate when copying data to
|
||||
* or from atom. Note that atom operates on
|
||||
* dw units.
|
||||
/* Atom needs data in little endian format so swap as appropriate when copying
|
||||
* data to or from atom. Note that atom operates on dw units.
|
||||
*
|
||||
* Use to_le=true when sending data to atom and provide at least
|
||||
* ALIGN(num_bytes,4) bytes in the dst buffer.
|
||||
*
|
||||
* Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
|
||||
* byes in the src buffer.
|
||||
*/
|
||||
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
|
||||
u32 *dst32, *src32;
|
||||
u32 src_tmp[5], dst_tmp[5];
|
||||
int i;
|
||||
u8 align_num_bytes = ALIGN(num_bytes, 4);
|
||||
|
||||
memcpy(src_tmp, src, num_bytes);
|
||||
src32 = (u32 *)src_tmp;
|
||||
dst32 = (u32 *)dst_tmp;
|
||||
if (to_le) {
|
||||
for (i = 0; i < ((num_bytes + 3) / 4); i++)
|
||||
dst32[i] = cpu_to_le32(src32[i]);
|
||||
memcpy(dst, dst_tmp, num_bytes);
|
||||
memcpy(src_tmp, src, num_bytes);
|
||||
for (i = 0; i < align_num_bytes / 4; i++)
|
||||
dst_tmp[i] = cpu_to_le32(src_tmp[i]);
|
||||
memcpy(dst, dst_tmp, align_num_bytes);
|
||||
} else {
|
||||
u8 dws = num_bytes & ~3;
|
||||
for (i = 0; i < ((num_bytes + 3) / 4); i++)
|
||||
dst32[i] = le32_to_cpu(src32[i]);
|
||||
memcpy(dst, dst_tmp, dws);
|
||||
if (num_bytes % 4) {
|
||||
for (i = 0; i < (num_bytes % 4); i++)
|
||||
dst[dws+i] = dst_tmp[dws+i];
|
||||
}
|
||||
memcpy(src_tmp, src, align_num_bytes);
|
||||
for (i = 0; i < align_num_bytes / 4; i++)
|
||||
dst_tmp[i] = le32_to_cpu(src_tmp[i]);
|
||||
memcpy(dst, dst_tmp, num_bytes);
|
||||
}
|
||||
#else
|
||||
memcpy(dst, src, num_bytes);
|
||||
|
|
|
@ -3475,11 +3475,6 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
|
|||
return VGACNTRL;
|
||||
}
|
||||
|
||||
static inline void __user *to_user_ptr(u64 address)
|
||||
{
|
||||
return (void __user *)(uintptr_t)address;
|
||||
}
|
||||
|
||||
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
|
||||
{
|
||||
unsigned long j = msecs_to_jiffies(m);
|
||||
|
|
|
@ -324,7 +324,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
void *vaddr = obj->phys_handle->vaddr + args->offset;
|
||||
char __user *user_data = to_user_ptr(args->data_ptr);
|
||||
char __user *user_data = u64_to_user_ptr(args->data_ptr);
|
||||
int ret = 0;
|
||||
|
||||
/* We manually control the domain here and pretend that it
|
||||
|
@ -605,7 +605,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|||
int needs_clflush = 0;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
user_data = to_user_ptr(args->data_ptr);
|
||||
user_data = u64_to_user_ptr(args->data_ptr);
|
||||
remain = args->size;
|
||||
|
||||
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
||||
|
@ -692,7 +692,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE,
|
||||
to_user_ptr(args->data_ptr),
|
||||
u64_to_user_ptr(args->data_ptr),
|
||||
args->size))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -783,7 +783,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto out_unpin;
|
||||
|
||||
user_data = to_user_ptr(args->data_ptr);
|
||||
user_data = u64_to_user_ptr(args->data_ptr);
|
||||
remain = args->size;
|
||||
|
||||
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
|
||||
|
@ -907,7 +907,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
|||
int needs_clflush_before = 0;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
user_data = to_user_ptr(args->data_ptr);
|
||||
user_data = u64_to_user_ptr(args->data_ptr);
|
||||
remain = args->size;
|
||||
|
||||
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
||||
|
@ -1036,12 +1036,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
|
||||
if (!access_ok(VERIFY_READ,
|
||||
to_user_ptr(args->data_ptr),
|
||||
u64_to_user_ptr(args->data_ptr),
|
||||
args->size))
|
||||
return -EFAULT;
|
||||
|
||||
if (likely(!i915.prefault_disable)) {
|
||||
ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
|
||||
ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
|
||||
args->size);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
|
|
@ -492,7 +492,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
|
|||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
int remain, ret;
|
||||
|
||||
user_relocs = to_user_ptr(entry->relocs_ptr);
|
||||
user_relocs = u64_to_user_ptr(entry->relocs_ptr);
|
||||
|
||||
remain = entry->relocation_count;
|
||||
while (remain) {
|
||||
|
@ -831,7 +831,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||
u64 invalid_offset = (u64)-1;
|
||||
int j;
|
||||
|
||||
user_relocs = to_user_ptr(exec[i].relocs_ptr);
|
||||
user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
|
||||
|
||||
if (copy_from_user(reloc+total, user_relocs,
|
||||
exec[i].relocation_count * sizeof(*reloc))) {
|
||||
|
@ -975,7 +975,7 @@ validate_exec_list(struct drm_device *dev,
|
|||
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
|
||||
char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
|
||||
int length; /* limited by fault_in_pages_readable() */
|
||||
|
||||
if (exec[i].flags & invalid_flags)
|
||||
|
@ -1633,7 +1633,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|||
return -ENOMEM;
|
||||
}
|
||||
ret = copy_from_user(exec_list,
|
||||
to_user_ptr(args->buffers_ptr),
|
||||
u64_to_user_ptr(args->buffers_ptr),
|
||||
sizeof(*exec_list) * args->buffer_count);
|
||||
if (ret != 0) {
|
||||
DRM_DEBUG("copy %d exec entries failed %d\n",
|
||||
|
@ -1669,7 +1669,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|||
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
|
||||
if (!ret) {
|
||||
struct drm_i915_gem_exec_object __user *user_exec_list =
|
||||
to_user_ptr(args->buffers_ptr);
|
||||
u64_to_user_ptr(args->buffers_ptr);
|
||||
|
||||
/* Copy the new buffer offsets back to the user's exec list. */
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
|
@ -1721,7 +1721,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|||
return -ENOMEM;
|
||||
}
|
||||
ret = copy_from_user(exec2_list,
|
||||
to_user_ptr(args->buffers_ptr),
|
||||
u64_to_user_ptr(args->buffers_ptr),
|
||||
sizeof(*exec2_list) * args->buffer_count);
|
||||
if (ret != 0) {
|
||||
DRM_DEBUG("copy %d exec entries failed %d\n",
|
||||
|
@ -1734,7 +1734,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|||
if (!ret) {
|
||||
/* Copy the new buffer offsets back to the user's exec list. */
|
||||
struct drm_i915_gem_exec_object2 __user *user_exec_list =
|
||||
to_user_ptr(args->buffers_ptr);
|
||||
u64_to_user_ptr(args->buffers_ptr);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
|
|
|
@ -440,7 +440,9 @@ static bool
|
|||
gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
|
||||
{
|
||||
return (i + 1 < num &&
|
||||
!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
|
||||
msgs[i].addr == msgs[i + 1].addr &&
|
||||
!(msgs[i].flags & I2C_M_RD) &&
|
||||
(msgs[i].len == 1 || msgs[i].len == 2) &&
|
||||
(msgs[i + 1].flags & I2C_M_RD));
|
||||
}
|
||||
|
||||
|
|
|
@ -29,11 +29,6 @@
|
|||
#define BO_LOCKED 0x4000
|
||||
#define BO_PINNED 0x2000
|
||||
|
||||
static inline void __user *to_user_ptr(u64 address)
|
||||
{
|
||||
return (void __user *)(uintptr_t)address;
|
||||
}
|
||||
|
||||
static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
||||
struct msm_gem_address_space *aspace,
|
||||
uint32_t nr_bos, uint32_t nr_cmds,
|
||||
|
@ -107,7 +102,7 @@ static int submit_lookup_objects(struct msm_gpu *gpu,
|
|||
struct drm_gem_object *obj;
|
||||
struct msm_gem_object *msm_obj;
|
||||
void __user *userptr =
|
||||
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
|
||||
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
|
||||
|
||||
if (copy_from_user_inatomic(&submit_bo, userptr,
|
||||
sizeof(submit_bo))) {
|
||||
|
@ -362,7 +357,7 @@ static int submit_reloc(struct msm_gpu *gpu,
|
|||
for (i = 0; i < nr_relocs; i++) {
|
||||
struct drm_msm_gem_submit_reloc submit_reloc;
|
||||
void __user *userptr =
|
||||
to_user_ptr(relocs + (i * sizeof(submit_reloc)));
|
||||
u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
|
||||
uint64_t iova;
|
||||
uint32_t off;
|
||||
bool valid;
|
||||
|
@ -473,7 +468,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
for (i = 0; i < args->nr_cmds; i++) {
|
||||
struct drm_msm_gem_submit_cmd submit_cmd;
|
||||
void __user *userptr =
|
||||
to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
|
||||
u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
|
||||
struct msm_gem_object *msm_obj;
|
||||
uint64_t iova;
|
||||
size_t size;
|
||||
|
|
|
@ -352,6 +352,7 @@ static int panel_simple_remove(struct device *dev)
|
|||
drm_panel_remove(&panel->base);
|
||||
|
||||
panel_simple_disable(&panel->base);
|
||||
panel_simple_unprepare(&panel->base);
|
||||
|
||||
if (panel->ddc)
|
||||
put_device(&panel->ddc->dev);
|
||||
|
@ -367,6 +368,7 @@ static void panel_simple_shutdown(struct device *dev)
|
|||
struct panel_simple *panel = dev_get_drvdata(dev);
|
||||
|
||||
panel_simple_disable(&panel->base);
|
||||
panel_simple_unprepare(&panel->base);
|
||||
}
|
||||
|
||||
static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
|
||||
|
|
|
@ -45,34 +45,32 @@ static char *pre_emph_names[] = {
|
|||
|
||||
/***** radeon AUX functions *****/
|
||||
|
||||
/* Atom needs data in little endian format
|
||||
* so swap as appropriate when copying data to
|
||||
* or from atom. Note that atom operates on
|
||||
* dw units.
|
||||
/* Atom needs data in little endian format so swap as appropriate when copying
|
||||
* data to or from atom. Note that atom operates on dw units.
|
||||
*
|
||||
* Use to_le=true when sending data to atom and provide at least
|
||||
* ALIGN(num_bytes,4) bytes in the dst buffer.
|
||||
*
|
||||
* Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
|
||||
* byes in the src buffer.
|
||||
*/
|
||||
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
|
||||
u32 *dst32, *src32;
|
||||
u32 src_tmp[5], dst_tmp[5];
|
||||
int i;
|
||||
u8 align_num_bytes = ALIGN(num_bytes, 4);
|
||||
|
||||
memcpy(src_tmp, src, num_bytes);
|
||||
src32 = (u32 *)src_tmp;
|
||||
dst32 = (u32 *)dst_tmp;
|
||||
if (to_le) {
|
||||
for (i = 0; i < ((num_bytes + 3) / 4); i++)
|
||||
dst32[i] = cpu_to_le32(src32[i]);
|
||||
memcpy(dst, dst_tmp, num_bytes);
|
||||
memcpy(src_tmp, src, num_bytes);
|
||||
for (i = 0; i < align_num_bytes / 4; i++)
|
||||
dst_tmp[i] = cpu_to_le32(src_tmp[i]);
|
||||
memcpy(dst, dst_tmp, align_num_bytes);
|
||||
} else {
|
||||
u8 dws = num_bytes & ~3;
|
||||
for (i = 0; i < ((num_bytes + 3) / 4); i++)
|
||||
dst32[i] = le32_to_cpu(src32[i]);
|
||||
memcpy(dst, dst_tmp, dws);
|
||||
if (num_bytes % 4) {
|
||||
for (i = 0; i < (num_bytes % 4); i++)
|
||||
dst[dws+i] = dst_tmp[dws+i];
|
||||
}
|
||||
memcpy(src_tmp, src, align_num_bytes);
|
||||
for (i = 0; i < align_num_bytes / 4; i++)
|
||||
dst_tmp[i] = le32_to_cpu(src_tmp[i]);
|
||||
memcpy(dst, dst_tmp, num_bytes);
|
||||
}
|
||||
#else
|
||||
memcpy(dst, src, num_bytes);
|
||||
|
|
|
@ -226,7 +226,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
|
|||
}
|
||||
|
||||
info->par = rfbdev;
|
||||
info->skip_vt_switch = true;
|
||||
|
||||
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
|
||||
if (ret) {
|
||||
|
|
|
@ -479,7 +479,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
|
|||
if (b == -1)
|
||||
goto err;
|
||||
|
||||
k->ptr[i] = PTR(ca->buckets[b].gen,
|
||||
k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
|
||||
bucket_to_sector(c, b),
|
||||
ca->sb.nr_this_dev);
|
||||
|
||||
|
|
|
@ -584,7 +584,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
|
|||
return false;
|
||||
|
||||
for (i = 0; i < KEY_PTRS(l); i++)
|
||||
if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
|
||||
if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
|
||||
PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
|
||||
return false;
|
||||
|
||||
|
|
|
@ -508,7 +508,7 @@ static void journal_reclaim(struct cache_set *c)
|
|||
continue;
|
||||
|
||||
ja->cur_idx = next;
|
||||
k->ptr[n++] = PTR(0,
|
||||
k->ptr[n++] = MAKE_PTR(0,
|
||||
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
|
||||
ca->sb.nr_this_dev);
|
||||
}
|
||||
|
|
|
@ -257,6 +257,9 @@ static ssize_t at24_read(struct at24_data *at24,
|
|||
if (unlikely(!count))
|
||||
return count;
|
||||
|
||||
if (off + count > at24->chip.byte_len)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Read data from chip, protecting against concurrent updates
|
||||
* from this host, but not from other I2C masters.
|
||||
|
@ -311,6 +314,9 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
|
|||
unsigned long timeout, write_time;
|
||||
unsigned next_page;
|
||||
|
||||
if (offset + count > at24->chip.byte_len)
|
||||
return -EINVAL;
|
||||
|
||||
/* Get corresponding I2C address and adjust offset */
|
||||
client = at24_translate_offset(at24, &offset);
|
||||
|
||||
|
|
|
@ -2663,15 +2663,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|||
size_t *retlen, const uint8_t *buf)
|
||||
{
|
||||
struct nand_chip *chip = mtd->priv;
|
||||
int chipnr = (int)(to >> chip->chip_shift);
|
||||
struct mtd_oob_ops ops;
|
||||
int ret;
|
||||
|
||||
/* Wait for the device to get ready */
|
||||
panic_nand_wait(mtd, chip, 400);
|
||||
|
||||
/* Grab the device */
|
||||
panic_nand_get_device(chip, mtd, FL_WRITING);
|
||||
|
||||
chip->select_chip(mtd, chipnr);
|
||||
|
||||
/* Wait for the device to get ready */
|
||||
panic_nand_wait(mtd, chip, 400);
|
||||
|
||||
memset(&ops, 0, sizeof(ops));
|
||||
ops.len = len;
|
||||
ops.datbuf = (uint8_t *)buf;
|
||||
|
|
18
drivers/tee/Kconfig
Normal file
18
drivers/tee/Kconfig
Normal file
|
@ -0,0 +1,18 @@
|
|||
# Generic Trusted Execution Environment Configuration
|
||||
config TEE
|
||||
tristate "Trusted Execution Environment support"
|
||||
select DMA_SHARED_BUFFER
|
||||
select GENERIC_ALLOCATOR
|
||||
help
|
||||
This implements a generic interface towards a Trusted Execution
|
||||
Environment (TEE).
|
||||
|
||||
if TEE
|
||||
|
||||
menu "TEE drivers"
|
||||
|
||||
source "drivers/tee/optee/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
||||
endif
|
5
drivers/tee/Makefile
Normal file
5
drivers/tee/Makefile
Normal file
|
@ -0,0 +1,5 @@
|
|||
obj-$(CONFIG_TEE) += tee.o
|
||||
tee-objs += tee_core.o
|
||||
tee-objs += tee_shm.o
|
||||
tee-objs += tee_shm_pool.o
|
||||
obj-$(CONFIG_OPTEE) += optee/
|
7
drivers/tee/optee/Kconfig
Normal file
7
drivers/tee/optee/Kconfig
Normal file
|
@ -0,0 +1,7 @@
|
|||
# OP-TEE Trusted Execution Environment Configuration
|
||||
config OPTEE
|
||||
tristate "OP-TEE"
|
||||
depends on HAVE_ARM_SMCCC
|
||||
help
|
||||
This implements the OP-TEE Trusted Execution Environment (TEE)
|
||||
driver.
|
5
drivers/tee/optee/Makefile
Normal file
5
drivers/tee/optee/Makefile
Normal file
|
@ -0,0 +1,5 @@
|
|||
obj-$(CONFIG_OPTEE) += optee.o
|
||||
optee-objs += core.o
|
||||
optee-objs += call.o
|
||||
optee-objs += rpc.o
|
||||
optee-objs += supp.o
|
444
drivers/tee/optee/call.c
Normal file
444
drivers/tee/optee/call.c
Normal file
|
@ -0,0 +1,444 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tee_drv.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include "optee_private.h"
|
||||
#include "optee_smc.h"
|
||||
|
||||
struct optee_call_waiter {
|
||||
struct list_head list_node;
|
||||
struct completion c;
|
||||
};
|
||||
|
||||
static void optee_cq_wait_init(struct optee_call_queue *cq,
|
||||
struct optee_call_waiter *w)
|
||||
{
|
||||
/*
|
||||
* We're preparing to make a call to secure world. In case we can't
|
||||
* allocate a thread in secure world we'll end up waiting in
|
||||
* optee_cq_wait_for_completion().
|
||||
*
|
||||
* Normally if there's no contention in secure world the call will
|
||||
* complete and we can cleanup directly with optee_cq_wait_final().
|
||||
*/
|
||||
mutex_lock(&cq->mutex);
|
||||
|
||||
/*
|
||||
* We add ourselves to the queue, but we don't wait. This
|
||||
* guarantees that we don't lose a completion if secure world
|
||||
* returns busy and another thread just exited and try to complete
|
||||
* someone.
|
||||
*/
|
||||
init_completion(&w->c);
|
||||
list_add_tail(&w->list_node, &cq->waiters);
|
||||
|
||||
mutex_unlock(&cq->mutex);
|
||||
}
|
||||
|
||||
static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
|
||||
struct optee_call_waiter *w)
|
||||
{
|
||||
wait_for_completion(&w->c);
|
||||
|
||||
mutex_lock(&cq->mutex);
|
||||
|
||||
/* Move to end of list to get out of the way for other waiters */
|
||||
list_del(&w->list_node);
|
||||
reinit_completion(&w->c);
|
||||
list_add_tail(&w->list_node, &cq->waiters);
|
||||
|
||||
mutex_unlock(&cq->mutex);
|
||||
}
|
||||
|
||||
static void optee_cq_complete_one(struct optee_call_queue *cq)
|
||||
{
|
||||
struct optee_call_waiter *w;
|
||||
|
||||
list_for_each_entry(w, &cq->waiters, list_node) {
|
||||
if (!completion_done(&w->c)) {
|
||||
complete(&w->c);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void optee_cq_wait_final(struct optee_call_queue *cq,
|
||||
struct optee_call_waiter *w)
|
||||
{
|
||||
/*
|
||||
* We're done with the call to secure world. The thread in secure
|
||||
* world that was used for this call is now available for some
|
||||
* other task to use.
|
||||
*/
|
||||
mutex_lock(&cq->mutex);
|
||||
|
||||
/* Get out of the list */
|
||||
list_del(&w->list_node);
|
||||
|
||||
/* Wake up one eventual waiting task */
|
||||
optee_cq_complete_one(cq);
|
||||
|
||||
/*
|
||||
* If we're completed we've got a completion from another task that
|
||||
* was just done with its call to secure world. Since yet another
|
||||
* thread now is available in secure world wake up another eventual
|
||||
* waiting task.
|
||||
*/
|
||||
if (completion_done(&w->c))
|
||||
optee_cq_complete_one(cq);
|
||||
|
||||
mutex_unlock(&cq->mutex);
|
||||
}
|
||||
|
||||
/* Requires the filpstate mutex to be held */
|
||||
static struct optee_session *find_session(struct optee_context_data *ctxdata,
|
||||
u32 session_id)
|
||||
{
|
||||
struct optee_session *sess;
|
||||
|
||||
list_for_each_entry(sess, &ctxdata->sess_list, list_node)
|
||||
if (sess->session_id == session_id)
|
||||
return sess;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
|
||||
* @ctx: calling context
|
||||
* @parg: physical address of message to pass to secure world
|
||||
*
|
||||
* Does and SMC to OP-TEE in secure world and handles eventual resulting
|
||||
* Remote Procedure Calls (RPC) from OP-TEE.
|
||||
*
|
||||
* Returns return code from secure world, 0 is OK
|
||||
*/
|
||||
u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
|
||||
{
|
||||
struct optee *optee = tee_get_drvdata(ctx->teedev);
|
||||
struct optee_call_waiter w;
|
||||
struct optee_rpc_param param = { };
|
||||
u32 ret;
|
||||
|
||||
param.a0 = OPTEE_SMC_CALL_WITH_ARG;
|
||||
reg_pair_from_64(¶m.a1, ¶m.a2, parg);
|
||||
/* Initialize waiter */
|
||||
optee_cq_wait_init(&optee->call_queue, &w);
|
||||
while (true) {
|
||||
struct arm_smccc_res res;
|
||||
|
||||
optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
|
||||
param.a4, param.a5, param.a6, param.a7,
|
||||
&res);
|
||||
|
||||
if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
|
||||
/*
|
||||
* Out of threads in secure world, wait for a thread
|
||||
* become available.
|
||||
*/
|
||||
optee_cq_wait_for_completion(&optee->call_queue, &w);
|
||||
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
|
||||
param.a0 = res.a0;
|
||||
param.a1 = res.a1;
|
||||
param.a2 = res.a2;
|
||||
param.a3 = res.a3;
|
||||
optee_handle_rpc(ctx, ¶m);
|
||||
} else {
|
||||
ret = res.a0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We're done with our thread in secure world, if there's any
|
||||
* thread waiters wake up one.
|
||||
*/
|
||||
optee_cq_wait_final(&optee->call_queue, &w);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
|
||||
struct optee_msg_arg **msg_arg,
|
||||
phys_addr_t *msg_parg)
|
||||
{
|
||||
int rc;
|
||||
struct tee_shm *shm;
|
||||
struct optee_msg_arg *ma;
|
||||
|
||||
shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
|
||||
TEE_SHM_MAPPED);
|
||||
if (IS_ERR(shm))
|
||||
return shm;
|
||||
|
||||
ma = tee_shm_get_va(shm, 0);
|
||||
if (IS_ERR(ma)) {
|
||||
rc = PTR_ERR(ma);
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = tee_shm_get_pa(shm, 0, msg_parg);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
|
||||
ma->num_params = num_params;
|
||||
*msg_arg = ma;
|
||||
out:
|
||||
if (rc) {
|
||||
tee_shm_free(shm);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
return shm;
|
||||
}
|
||||
|
||||
int optee_open_session(struct tee_context *ctx,
|
||||
struct tee_ioctl_open_session_arg *arg,
|
||||
struct tee_param *param)
|
||||
{
|
||||
struct optee_context_data *ctxdata = ctx->data;
|
||||
int rc;
|
||||
struct tee_shm *shm;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
phys_addr_t msg_parg;
|
||||
struct optee_session *sess = NULL;
|
||||
|
||||
/* +2 for the meta parameters added below */
|
||||
shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
|
||||
msg_arg->cancel_id = arg->cancel_id;
|
||||
|
||||
/*
|
||||
* Initialize and add the meta parameters needed when opening a
|
||||
* session.
|
||||
*/
|
||||
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
|
||||
OPTEE_MSG_ATTR_META;
|
||||
msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
|
||||
OPTEE_MSG_ATTR_META;
|
||||
memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
|
||||
memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
|
||||
msg_arg->params[1].u.value.c = arg->clnt_login;
|
||||
|
||||
rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
|
||||
if (!sess) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (optee_do_call_with_arg(ctx, msg_parg)) {
|
||||
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
|
||||
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
||||
}
|
||||
|
||||
if (msg_arg->ret == TEEC_SUCCESS) {
|
||||
/* A new session has been created, add it to the list. */
|
||||
sess->session_id = msg_arg->session;
|
||||
mutex_lock(&ctxdata->mutex);
|
||||
list_add(&sess->list_node, &ctxdata->sess_list);
|
||||
mutex_unlock(&ctxdata->mutex);
|
||||
} else {
|
||||
kfree(sess);
|
||||
}
|
||||
|
||||
if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
|
||||
arg->ret = TEEC_ERROR_COMMUNICATION;
|
||||
arg->ret_origin = TEEC_ORIGIN_COMMS;
|
||||
/* Close session again to avoid leakage */
|
||||
optee_close_session(ctx, msg_arg->session);
|
||||
} else {
|
||||
arg->session = msg_arg->session;
|
||||
arg->ret = msg_arg->ret;
|
||||
arg->ret_origin = msg_arg->ret_origin;
|
||||
}
|
||||
out:
|
||||
tee_shm_free(shm);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int optee_close_session(struct tee_context *ctx, u32 session)
|
||||
{
|
||||
struct optee_context_data *ctxdata = ctx->data;
|
||||
struct tee_shm *shm;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
phys_addr_t msg_parg;
|
||||
struct optee_session *sess;
|
||||
|
||||
/* Check that the session is valid and remove it from the list */
|
||||
mutex_lock(&ctxdata->mutex);
|
||||
sess = find_session(ctxdata, session);
|
||||
if (sess)
|
||||
list_del(&sess->list_node);
|
||||
mutex_unlock(&ctxdata->mutex);
|
||||
if (!sess)
|
||||
return -EINVAL;
|
||||
kfree(sess);
|
||||
|
||||
shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
|
||||
msg_arg->session = session;
|
||||
optee_do_call_with_arg(ctx, msg_parg);
|
||||
|
||||
tee_shm_free(shm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
|
||||
struct tee_param *param)
|
||||
{
|
||||
struct optee_context_data *ctxdata = ctx->data;
|
||||
struct tee_shm *shm;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
phys_addr_t msg_parg;
|
||||
struct optee_session *sess;
|
||||
int rc;
|
||||
|
||||
/* Check that the session is valid */
|
||||
mutex_lock(&ctxdata->mutex);
|
||||
sess = find_session(ctxdata, arg->session);
|
||||
mutex_unlock(&ctxdata->mutex);
|
||||
if (!sess)
|
||||
return -EINVAL;
|
||||
|
||||
shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
|
||||
msg_arg->func = arg->func;
|
||||
msg_arg->session = arg->session;
|
||||
msg_arg->cancel_id = arg->cancel_id;
|
||||
|
||||
rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (optee_do_call_with_arg(ctx, msg_parg)) {
|
||||
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
|
||||
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
||||
}
|
||||
|
||||
if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
|
||||
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
|
||||
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
|
||||
}
|
||||
|
||||
arg->ret = msg_arg->ret;
|
||||
arg->ret_origin = msg_arg->ret_origin;
|
||||
out:
|
||||
tee_shm_free(shm);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
|
||||
{
|
||||
struct optee_context_data *ctxdata = ctx->data;
|
||||
struct tee_shm *shm;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
phys_addr_t msg_parg;
|
||||
struct optee_session *sess;
|
||||
|
||||
/* Check that the session is valid */
|
||||
mutex_lock(&ctxdata->mutex);
|
||||
sess = find_session(ctxdata, session);
|
||||
mutex_unlock(&ctxdata->mutex);
|
||||
if (!sess)
|
||||
return -EINVAL;
|
||||
|
||||
shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
|
||||
msg_arg->session = session;
|
||||
msg_arg->cancel_id = cancel_id;
|
||||
optee_do_call_with_arg(ctx, msg_parg);
|
||||
|
||||
tee_shm_free(shm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* optee_enable_shm_cache() - Enables caching of some shared memory allocation
|
||||
* in OP-TEE
|
||||
* @optee: main service struct
|
||||
*/
|
||||
void optee_enable_shm_cache(struct optee *optee)
|
||||
{
|
||||
struct optee_call_waiter w;
|
||||
|
||||
/* We need to retry until secure world isn't busy. */
|
||||
optee_cq_wait_init(&optee->call_queue, &w);
|
||||
while (true) {
|
||||
struct arm_smccc_res res;
|
||||
|
||||
optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
|
||||
0, &res);
|
||||
if (res.a0 == OPTEE_SMC_RETURN_OK)
|
||||
break;
|
||||
optee_cq_wait_for_completion(&optee->call_queue, &w);
|
||||
}
|
||||
optee_cq_wait_final(&optee->call_queue, &w);
|
||||
}
|
||||
|
||||
/**
|
||||
* optee_disable_shm_cache() - Disables caching of some shared memory allocation
|
||||
* in OP-TEE
|
||||
* @optee: main service struct
|
||||
*/
|
||||
void optee_disable_shm_cache(struct optee *optee)
|
||||
{
|
||||
struct optee_call_waiter w;
|
||||
|
||||
/* We need to retry until secure world isn't busy. */
|
||||
optee_cq_wait_init(&optee->call_queue, &w);
|
||||
while (true) {
|
||||
union {
|
||||
struct arm_smccc_res smccc;
|
||||
struct optee_smc_disable_shm_cache_result result;
|
||||
} res;
|
||||
|
||||
optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
|
||||
0, &res.smccc);
|
||||
if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
|
||||
break; /* All shm's freed */
|
||||
if (res.result.status == OPTEE_SMC_RETURN_OK) {
|
||||
struct tee_shm *shm;
|
||||
|
||||
shm = reg_pair_to_ptr(res.result.shm_upper32,
|
||||
res.result.shm_lower32);
|
||||
tee_shm_free(shm);
|
||||
} else {
|
||||
optee_cq_wait_for_completion(&optee->call_queue, &w);
|
||||
}
|
||||
}
|
||||
optee_cq_wait_final(&optee->call_queue, &w);
|
||||
}
|
622
drivers/tee/optee/core.c
Normal file
622
drivers/tee/optee/core.c
Normal file
|
@ -0,0 +1,622 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/tee_drv.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include "optee_private.h"
|
||||
#include "optee_smc.h"
|
||||
|
||||
#define DRIVER_NAME "optee"
|
||||
|
||||
#define OPTEE_SHM_NUM_PRIV_PAGES 1
|
||||
|
||||
/**
|
||||
* optee_from_msg_param() - convert from OPTEE_MSG parameters to
|
||||
* struct tee_param
|
||||
* @params: subsystem internal parameter representation
|
||||
* @num_params: number of elements in the parameter arrays
|
||||
* @msg_params: OPTEE_MSG parameters
|
||||
* Returns 0 on success or <0 on failure
|
||||
*/
|
||||
int optee_from_msg_param(struct tee_param *params, size_t num_params,
|
||||
const struct optee_msg_param *msg_params)
|
||||
{
|
||||
int rc;
|
||||
size_t n;
|
||||
struct tee_shm *shm;
|
||||
phys_addr_t pa;
|
||||
|
||||
for (n = 0; n < num_params; n++) {
|
||||
struct tee_param *p = params + n;
|
||||
const struct optee_msg_param *mp = msg_params + n;
|
||||
u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
|
||||
|
||||
switch (attr) {
|
||||
case OPTEE_MSG_ATTR_TYPE_NONE:
|
||||
p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
|
||||
memset(&p->u, 0, sizeof(p->u));
|
||||
break;
|
||||
case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
|
||||
case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
|
||||
case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
|
||||
p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
|
||||
attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
|
||||
p->u.value.a = mp->u.value.a;
|
||||
p->u.value.b = mp->u.value.b;
|
||||
p->u.value.c = mp->u.value.c;
|
||||
break;
|
||||
case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
|
||||
case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
|
||||
case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
|
||||
p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
|
||||
attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
|
||||
p->u.memref.size = mp->u.tmem.size;
|
||||
shm = (struct tee_shm *)(unsigned long)
|
||||
mp->u.tmem.shm_ref;
|
||||
if (!shm) {
|
||||
p->u.memref.shm_offs = 0;
|
||||
p->u.memref.shm = NULL;
|
||||
break;
|
||||
}
|
||||
rc = tee_shm_get_pa(shm, 0, &pa);
|
||||
if (rc)
|
||||
return rc;
|
||||
p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
|
||||
p->u.memref.shm = shm;
|
||||
|
||||
/* Check that the memref is covered by the shm object */
|
||||
if (p->u.memref.size) {
|
||||
size_t o = p->u.memref.shm_offs +
|
||||
p->u.memref.size - 1;
|
||||
|
||||
rc = tee_shm_get_pa(shm, o, NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
|
||||
* @msg_params: OPTEE_MSG parameters
|
||||
* @num_params: number of elements in the parameter arrays
|
||||
* @params: subsystem itnernal parameter representation
|
||||
* Returns 0 on success or <0 on failure
|
||||
*/
|
||||
int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
|
||||
const struct tee_param *params)
|
||||
{
|
||||
int rc;
|
||||
size_t n;
|
||||
phys_addr_t pa;
|
||||
|
||||
for (n = 0; n < num_params; n++) {
|
||||
const struct tee_param *p = params + n;
|
||||
struct optee_msg_param *mp = msg_params + n;
|
||||
|
||||
switch (p->attr) {
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
|
||||
mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
|
||||
memset(&mp->u, 0, sizeof(mp->u));
|
||||
break;
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
|
||||
mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
|
||||
TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
|
||||
mp->u.value.a = p->u.value.a;
|
||||
mp->u.value.b = p->u.value.b;
|
||||
mp->u.value.c = p->u.value.c;
|
||||
break;
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT +
|
||||
p->attr -
|
||||
TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
|
||||
mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
|
||||
mp->u.tmem.size = p->u.memref.size;
|
||||
if (!p->u.memref.shm) {
|
||||
mp->u.tmem.buf_ptr = 0;
|
||||
break;
|
||||
}
|
||||
rc = tee_shm_get_pa(p->u.memref.shm,
|
||||
p->u.memref.shm_offs, &pa);
|
||||
if (rc)
|
||||
return rc;
|
||||
mp->u.tmem.buf_ptr = pa;
|
||||
mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
|
||||
OPTEE_MSG_ATTR_CACHE_SHIFT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void optee_get_version(struct tee_device *teedev,
|
||||
struct tee_ioctl_version_data *vers)
|
||||
{
|
||||
struct tee_ioctl_version_data v = {
|
||||
.impl_id = TEE_IMPL_ID_OPTEE,
|
||||
.impl_caps = TEE_OPTEE_CAP_TZ,
|
||||
.gen_caps = TEE_GEN_CAP_GP,
|
||||
};
|
||||
*vers = v;
|
||||
}
|
||||
|
||||
static int optee_open(struct tee_context *ctx)
|
||||
{
|
||||
struct optee_context_data *ctxdata;
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct optee *optee = tee_get_drvdata(teedev);
|
||||
|
||||
ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
|
||||
if (!ctxdata)
|
||||
return -ENOMEM;
|
||||
|
||||
if (teedev == optee->supp_teedev) {
|
||||
bool busy = true;
|
||||
|
||||
mutex_lock(&optee->supp.ctx_mutex);
|
||||
if (!optee->supp.ctx) {
|
||||
busy = false;
|
||||
optee->supp.ctx = ctx;
|
||||
}
|
||||
mutex_unlock(&optee->supp.ctx_mutex);
|
||||
if (busy) {
|
||||
kfree(ctxdata);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_init(&ctxdata->mutex);
|
||||
INIT_LIST_HEAD(&ctxdata->sess_list);
|
||||
|
||||
ctx->data = ctxdata;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void optee_release(struct tee_context *ctx)
|
||||
{
|
||||
struct optee_context_data *ctxdata = ctx->data;
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct optee *optee = tee_get_drvdata(teedev);
|
||||
struct tee_shm *shm;
|
||||
struct optee_msg_arg *arg = NULL;
|
||||
phys_addr_t parg;
|
||||
struct optee_session *sess;
|
||||
struct optee_session *sess_tmp;
|
||||
|
||||
if (!ctxdata)
|
||||
return;
|
||||
|
||||
shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
|
||||
if (!IS_ERR(shm)) {
|
||||
arg = tee_shm_get_va(shm, 0);
|
||||
/*
|
||||
* If va2pa fails for some reason, we can't call
|
||||
* optee_close_session(), only free the memory. Secure OS
|
||||
* will leak sessions and finally refuse more sessions, but
|
||||
* we will at least let normal world reclaim its memory.
|
||||
*/
|
||||
if (!IS_ERR(arg))
|
||||
tee_shm_va2pa(shm, arg, &parg);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
|
||||
list_node) {
|
||||
list_del(&sess->list_node);
|
||||
if (!IS_ERR_OR_NULL(arg)) {
|
||||
memset(arg, 0, sizeof(*arg));
|
||||
arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
|
||||
arg->session = sess->session_id;
|
||||
optee_do_call_with_arg(ctx, parg);
|
||||
}
|
||||
kfree(sess);
|
||||
}
|
||||
kfree(ctxdata);
|
||||
|
||||
if (!IS_ERR(shm))
|
||||
tee_shm_free(shm);
|
||||
|
||||
ctx->data = NULL;
|
||||
|
||||
if (teedev == optee->supp_teedev) {
|
||||
mutex_lock(&optee->supp.ctx_mutex);
|
||||
optee->supp.ctx = NULL;
|
||||
mutex_unlock(&optee->supp.ctx_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static struct tee_driver_ops optee_ops = {
|
||||
.get_version = optee_get_version,
|
||||
.open = optee_open,
|
||||
.release = optee_release,
|
||||
.open_session = optee_open_session,
|
||||
.close_session = optee_close_session,
|
||||
.invoke_func = optee_invoke_func,
|
||||
.cancel_req = optee_cancel_req,
|
||||
};
|
||||
|
||||
static struct tee_desc optee_desc = {
|
||||
.name = DRIVER_NAME "-clnt",
|
||||
.ops = &optee_ops,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct tee_driver_ops optee_supp_ops = {
|
||||
.get_version = optee_get_version,
|
||||
.open = optee_open,
|
||||
.release = optee_release,
|
||||
.supp_recv = optee_supp_recv,
|
||||
.supp_send = optee_supp_send,
|
||||
};
|
||||
|
||||
static struct tee_desc optee_supp_desc = {
|
||||
.name = DRIVER_NAME "-supp",
|
||||
.ops = &optee_supp_ops,
|
||||
.owner = THIS_MODULE,
|
||||
.flags = TEE_DESC_PRIVILEGED,
|
||||
};
|
||||
|
||||
static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
|
||||
|
||||
if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
|
||||
res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
|
||||
{
|
||||
union {
|
||||
struct arm_smccc_res smccc;
|
||||
struct optee_smc_calls_revision_result result;
|
||||
} res;
|
||||
|
||||
invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
|
||||
|
||||
if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
|
||||
(int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
|
||||
u32 *sec_caps)
|
||||
{
|
||||
union {
|
||||
struct arm_smccc_res smccc;
|
||||
struct optee_smc_exchange_capabilities_result result;
|
||||
} res;
|
||||
u32 a1 = 0;
|
||||
|
||||
/*
|
||||
* TODO This isn't enough to tell if it's UP system (from kernel
|
||||
* point of view) or not, is_smp() returns the the information
|
||||
* needed, but can't be called directly from here.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
|
||||
a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
|
||||
|
||||
invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
|
||||
&res.smccc);
|
||||
|
||||
if (res.result.status != OPTEE_SMC_RETURN_OK)
|
||||
return false;
|
||||
|
||||
*sec_caps = res.result.capabilities;
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct tee_shm_pool *
|
||||
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
|
||||
{
|
||||
union {
|
||||
struct arm_smccc_res smccc;
|
||||
struct optee_smc_get_shm_config_result result;
|
||||
} res;
|
||||
struct tee_shm_pool *pool;
|
||||
unsigned long vaddr;
|
||||
phys_addr_t paddr;
|
||||
size_t size;
|
||||
phys_addr_t begin;
|
||||
phys_addr_t end;
|
||||
void *va;
|
||||
struct tee_shm_pool_mem_info priv_info;
|
||||
struct tee_shm_pool_mem_info dmabuf_info;
|
||||
|
||||
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
|
||||
if (res.result.status != OPTEE_SMC_RETURN_OK) {
|
||||
pr_info("shm service not available\n");
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
|
||||
pr_err("only normal cached shared memory supported\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
begin = roundup(res.result.start, PAGE_SIZE);
|
||||
end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
|
||||
paddr = begin;
|
||||
size = end - begin;
|
||||
|
||||
if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
|
||||
pr_err("too small shared memory area\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
va = memremap(paddr, size, MEMREMAP_WB);
|
||||
if (!va) {
|
||||
pr_err("shared memory ioremap failed\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
vaddr = (unsigned long)va;
|
||||
|
||||
priv_info.vaddr = vaddr;
|
||||
priv_info.paddr = paddr;
|
||||
priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
|
||||
pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info);
|
||||
if (IS_ERR(pool)) {
|
||||
memunmap(va);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*memremaped_shm = va;
|
||||
out:
|
||||
return pool;
|
||||
}
|
||||
|
||||
/* Simple wrapper functions to be able to use a function pointer */
|
||||
static void optee_smccc_smc(unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3,
|
||||
unsigned long a4, unsigned long a5,
|
||||
unsigned long a6, unsigned long a7,
|
||||
struct arm_smccc_res *res)
|
||||
{
|
||||
arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
|
||||
}
|
||||
|
||||
static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3,
|
||||
unsigned long a4, unsigned long a5,
|
||||
unsigned long a6, unsigned long a7,
|
||||
struct arm_smccc_res *res)
|
||||
{
|
||||
arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
|
||||
}
|
||||
|
||||
static optee_invoke_fn *get_invoke_func(struct device_node *np)
|
||||
{
|
||||
const char *method;
|
||||
|
||||
pr_info("probing for conduit method from DT.\n");
|
||||
|
||||
if (of_property_read_string(np, "method", &method)) {
|
||||
pr_warn("missing \"method\" property\n");
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
|
||||
if (!strcmp("hvc", method))
|
||||
return optee_smccc_hvc;
|
||||
else if (!strcmp("smc", method))
|
||||
return optee_smccc_smc;
|
||||
|
||||
pr_warn("invalid \"method\" property: %s\n", method);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static struct optee *optee_probe(struct device_node *np)
|
||||
{
|
||||
optee_invoke_fn *invoke_fn;
|
||||
struct tee_shm_pool *pool;
|
||||
struct optee *optee = NULL;
|
||||
void *memremaped_shm = NULL;
|
||||
struct tee_device *teedev;
|
||||
u32 sec_caps;
|
||||
int rc;
|
||||
|
||||
invoke_fn = get_invoke_func(np);
|
||||
if (IS_ERR(invoke_fn))
|
||||
return (void *)invoke_fn;
|
||||
|
||||
if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
|
||||
pr_warn("api uid mismatch\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
|
||||
pr_warn("api revision mismatch\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
|
||||
pr_warn("capabilities mismatch\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
* We have no other option for shared memory, if secure world
|
||||
* doesn't have any reserved memory we can use we can't continue.
|
||||
*/
|
||||
if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
|
||||
if (IS_ERR(pool))
|
||||
return (void *)pool;
|
||||
|
||||
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
|
||||
if (!optee) {
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
optee->invoke_fn = invoke_fn;
|
||||
|
||||
teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
|
||||
if (IS_ERR(teedev)) {
|
||||
rc = PTR_ERR(teedev);
|
||||
goto err;
|
||||
}
|
||||
optee->teedev = teedev;
|
||||
|
||||
teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
|
||||
if (IS_ERR(teedev)) {
|
||||
rc = PTR_ERR(teedev);
|
||||
goto err;
|
||||
}
|
||||
optee->supp_teedev = teedev;
|
||||
|
||||
rc = tee_device_register(optee->teedev);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
rc = tee_device_register(optee->supp_teedev);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
mutex_init(&optee->call_queue.mutex);
|
||||
INIT_LIST_HEAD(&optee->call_queue.waiters);
|
||||
optee_wait_queue_init(&optee->wait_queue);
|
||||
optee_supp_init(&optee->supp);
|
||||
optee->memremaped_shm = memremaped_shm;
|
||||
optee->pool = pool;
|
||||
|
||||
optee_enable_shm_cache(optee);
|
||||
|
||||
pr_info("initialized driver\n");
|
||||
return optee;
|
||||
err:
|
||||
if (optee) {
|
||||
/*
|
||||
* tee_device_unregister() is safe to call even if the
|
||||
* devices hasn't been registered with
|
||||
* tee_device_register() yet.
|
||||
*/
|
||||
tee_device_unregister(optee->supp_teedev);
|
||||
tee_device_unregister(optee->teedev);
|
||||
kfree(optee);
|
||||
}
|
||||
if (pool)
|
||||
tee_shm_pool_free(pool);
|
||||
if (memremaped_shm)
|
||||
memunmap(memremaped_shm);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
static void optee_remove(struct optee *optee)
|
||||
{
|
||||
/*
|
||||
* Ask OP-TEE to free all cached shared memory objects to decrease
|
||||
* reference counters and also avoid wild pointers in secure world
|
||||
* into the old shared memory range.
|
||||
*/
|
||||
optee_disable_shm_cache(optee);
|
||||
|
||||
/*
|
||||
* The two devices has to be unregistered before we can free the
|
||||
* other resources.
|
||||
*/
|
||||
tee_device_unregister(optee->supp_teedev);
|
||||
tee_device_unregister(optee->teedev);
|
||||
|
||||
tee_shm_pool_free(optee->pool);
|
||||
if (optee->memremaped_shm)
|
||||
memunmap(optee->memremaped_shm);
|
||||
optee_wait_queue_exit(&optee->wait_queue);
|
||||
optee_supp_uninit(&optee->supp);
|
||||
mutex_destroy(&optee->call_queue.mutex);
|
||||
|
||||
kfree(optee);
|
||||
}
|
||||
|
||||
static const struct of_device_id optee_match[] = {
|
||||
{ .compatible = "linaro,optee-tz" },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct optee *optee_svc;
|
||||
|
||||
static int __init optee_driver_init(void)
|
||||
{
|
||||
struct device_node *fw_np;
|
||||
struct device_node *np;
|
||||
struct optee *optee;
|
||||
|
||||
/* Node is supposed to be below /firmware */
|
||||
fw_np = of_find_node_by_name(NULL, "firmware");
|
||||
if (!fw_np)
|
||||
return -ENODEV;
|
||||
|
||||
np = of_find_matching_node(fw_np, optee_match);
|
||||
of_node_put(fw_np);
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
optee = optee_probe(np);
|
||||
of_node_put(np);
|
||||
|
||||
if (IS_ERR(optee))
|
||||
return PTR_ERR(optee);
|
||||
|
||||
optee_svc = optee;
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(optee_driver_init);
|
||||
|
||||
static void __exit optee_driver_exit(void)
|
||||
{
|
||||
struct optee *optee = optee_svc;
|
||||
|
||||
optee_svc = NULL;
|
||||
if (optee)
|
||||
optee_remove(optee);
|
||||
}
|
||||
module_exit(optee_driver_exit);
|
||||
|
||||
MODULE_AUTHOR("Linaro");
|
||||
MODULE_DESCRIPTION("OP-TEE driver");
|
||||
MODULE_SUPPORTED_DEVICE("");
|
||||
MODULE_VERSION("1.0");
|
||||
MODULE_LICENSE("GPL v2");
|
418
drivers/tee/optee/optee_msg.h
Normal file
418
drivers/tee/optee/optee_msg.h
Normal file
|
@ -0,0 +1,418 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _OPTEE_MSG_H
|
||||
#define _OPTEE_MSG_H
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* This file defines the OP-TEE message protocol used to communicate
|
||||
* with an instance of OP-TEE running in secure world.
|
||||
*
|
||||
* This file is divided into three sections.
|
||||
* 1. Formatting of messages.
|
||||
* 2. Requests from normal world
|
||||
* 3. Requests from secure world, Remote Procedure Call (RPC), handled by
|
||||
* tee-supplicant.
|
||||
*/
|
||||
|
||||
/*****************************************************************************
|
||||
* Part 1 - formatting of messages
|
||||
*****************************************************************************/
|
||||
|
||||
#define OPTEE_MSG_ATTR_TYPE_NONE 0x0
|
||||
#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1
|
||||
#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2
|
||||
#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3
|
||||
#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5
|
||||
#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6
|
||||
#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7
|
||||
#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9
|
||||
#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa
|
||||
#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb
|
||||
|
||||
#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0)
|
||||
|
||||
/*
|
||||
* Meta parameter to be absorbed by the Secure OS and not passed
|
||||
* to the Trusted Application.
|
||||
*
|
||||
* Currently only used with OPTEE_MSG_CMD_OPEN_SESSION.
|
||||
*/
|
||||
#define OPTEE_MSG_ATTR_META BIT(8)
|
||||
|
||||
/*
|
||||
* The temporary shared memory object is not physically contigous and this
|
||||
* temp memref is followed by another fragment until the last temp memref
|
||||
* that doesn't have this bit set.
|
||||
*/
|
||||
#define OPTEE_MSG_ATTR_FRAGMENT BIT(9)
|
||||
|
||||
/*
|
||||
* Memory attributes for caching passed with temp memrefs. The actual value
|
||||
* used is defined outside the message protocol with the exception of
|
||||
* OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already
|
||||
* defined for the memory range should be used. If optee_smc.h is used as
|
||||
* bearer of this protocol OPTEE_SMC_SHM_* is used for values.
|
||||
*/
|
||||
#define OPTEE_MSG_ATTR_CACHE_SHIFT 16
|
||||
#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0)
|
||||
#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0
|
||||
|
||||
/*
|
||||
* Same values as TEE_LOGIN_* from TEE Internal API
|
||||
*/
|
||||
#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000
|
||||
#define OPTEE_MSG_LOGIN_USER 0x00000001
|
||||
#define OPTEE_MSG_LOGIN_GROUP 0x00000002
|
||||
#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004
|
||||
#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
|
||||
#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
|
||||
|
||||
/**
|
||||
* struct optee_msg_param_tmem - temporary memory reference parameter
|
||||
* @buf_ptr: Address of the buffer
|
||||
* @size: Size of the buffer
|
||||
* @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm
|
||||
*
|
||||
* Secure and normal world communicates pointers as physical address
|
||||
* instead of the virtual address. This is because secure and normal world
|
||||
* have completely independent memory mapping. Normal world can even have a
|
||||
* hypervisor which need to translate the guest physical address (AKA IPA
|
||||
* in ARM documentation) to a real physical address before passing the
|
||||
* structure to secure world.
|
||||
*/
|
||||
struct optee_msg_param_tmem {
|
||||
u64 buf_ptr;
|
||||
u64 size;
|
||||
u64 shm_ref;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct optee_msg_param_rmem - registered memory reference parameter
|
||||
* @offs: Offset into shared memory reference
|
||||
* @size: Size of the buffer
|
||||
* @shm_ref: Shared memory reference, pointer to a struct tee_shm
|
||||
*/
|
||||
struct optee_msg_param_rmem {
|
||||
u64 offs;
|
||||
u64 size;
|
||||
u64 shm_ref;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct optee_msg_param_value - opaque value parameter
|
||||
*
|
||||
* Value parameters are passed unchecked between normal and secure world.
|
||||
*/
|
||||
struct optee_msg_param_value {
|
||||
u64 a;
|
||||
u64 b;
|
||||
u64 c;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct optee_msg_param - parameter used together with struct optee_msg_arg
|
||||
* @attr: attributes
|
||||
* @tmem: parameter by temporary memory reference
|
||||
* @rmem: parameter by registered memory reference
|
||||
* @value: parameter by opaque value
|
||||
*
|
||||
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
|
||||
* the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
|
||||
* OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and
|
||||
* OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem.
|
||||
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
|
||||
*/
|
||||
struct optee_msg_param {
|
||||
u64 attr;
|
||||
union {
|
||||
struct optee_msg_param_tmem tmem;
|
||||
struct optee_msg_param_rmem rmem;
|
||||
struct optee_msg_param_value value;
|
||||
} u;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct optee_msg_arg - call argument
|
||||
* @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_*
|
||||
* @func: Trusted Application function, specific to the Trusted Application,
|
||||
* used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND
|
||||
* @session: In parameter for all OPTEE_MSG_CMD_* except
|
||||
* OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead
|
||||
* @cancel_id: Cancellation id, a unique value to identify this request
|
||||
* @ret: return value
|
||||
* @ret_origin: origin of the return value
|
||||
* @num_params: number of parameters supplied to the OS Command
|
||||
* @params: the parameters supplied to the OS Command
|
||||
*
|
||||
* All normal calls to Trusted OS uses this struct. If cmd requires further
|
||||
* information than what these field holds it can be passed as a parameter
|
||||
* tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding
|
||||
* attrs field). All parameters tagged as meta has to come first.
|
||||
*
|
||||
* Temp memref parameters can be fragmented if supported by the Trusted OS
|
||||
* (when optee_smc.h is bearer of this protocol this is indicated with
|
||||
* OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is
|
||||
* fragmented then has all but the last fragment the
|
||||
* OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented
|
||||
* it will still be presented as a single logical memref to the Trusted
|
||||
* Application.
|
||||
*/
|
||||
struct optee_msg_arg {
|
||||
u32 cmd;
|
||||
u32 func;
|
||||
u32 session;
|
||||
u32 cancel_id;
|
||||
u32 pad;
|
||||
u32 ret;
|
||||
u32 ret_origin;
|
||||
u32 num_params;
|
||||
|
||||
/* num_params tells the actual number of element in params */
|
||||
struct optee_msg_param params[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg
|
||||
*
|
||||
* @num_params: Number of parameters embedded in the struct optee_msg_arg
|
||||
*
|
||||
* Returns the size of the struct optee_msg_arg together with the number
|
||||
* of embedded parameters.
|
||||
*/
|
||||
#define OPTEE_MSG_GET_ARG_SIZE(num_params) \
|
||||
(sizeof(struct optee_msg_arg) + \
|
||||
sizeof(struct optee_msg_param) * (num_params))
|
||||
|
||||
/*****************************************************************************
|
||||
* Part 2 - requests from normal world
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Return the following UID if using API specified in this file without
|
||||
* further extensions:
|
||||
* 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
|
||||
* Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1,
|
||||
* OPTEE_MSG_UID_2, OPTEE_MSG_UID_3.
|
||||
*/
|
||||
#define OPTEE_MSG_UID_0 0x384fb3e0
|
||||
#define OPTEE_MSG_UID_1 0xe7f811e3
|
||||
#define OPTEE_MSG_UID_2 0xaf630002
|
||||
#define OPTEE_MSG_UID_3 0xa5d5c51b
|
||||
#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01
|
||||
|
||||
/*
|
||||
* Returns 2.0 if using API specified in this file without further
|
||||
* extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR
|
||||
* and OPTEE_MSG_REVISION_MINOR
|
||||
*/
|
||||
#define OPTEE_MSG_REVISION_MAJOR 2
|
||||
#define OPTEE_MSG_REVISION_MINOR 0
|
||||
#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03
|
||||
|
||||
/*
|
||||
* Get UUID of Trusted OS.
|
||||
*
|
||||
* Used by non-secure world to figure out which Trusted OS is installed.
|
||||
* Note that returned UUID is the UUID of the Trusted OS, not of the API.
|
||||
*
|
||||
* Returns UUID in 4 32-bit words in the same way as
|
||||
* OPTEE_MSG_FUNCID_CALLS_UID described above.
|
||||
*/
|
||||
#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0
|
||||
#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3
|
||||
#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002
|
||||
#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b
|
||||
#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000
|
||||
|
||||
/*
|
||||
* Get revision of Trusted OS.
|
||||
*
|
||||
* Used by non-secure world to figure out which version of the Trusted OS
|
||||
* is installed. Note that the returned revision is the revision of the
|
||||
* Trusted OS, not of the API.
|
||||
*
|
||||
* Returns revision in 2 32-bit words in the same way as
|
||||
* OPTEE_MSG_CALLS_REVISION described above.
|
||||
*/
|
||||
#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
|
||||
|
||||
/*
|
||||
* Do a secure call with struct optee_msg_arg as argument
|
||||
* The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
|
||||
*
|
||||
* OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application.
|
||||
* The first two parameters are tagged as meta, holding two value
|
||||
* parameters to pass the following information:
|
||||
* param[0].u.value.a-b uuid of Trusted Application
|
||||
* param[1].u.value.a-b uuid of Client
|
||||
* param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_*
|
||||
*
|
||||
* OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened
|
||||
* session to a Trusted Application. struct optee_msg_arg::func is Trusted
|
||||
* Application function, specific to the Trusted Application.
|
||||
*
|
||||
* OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to
|
||||
* Trusted Application.
|
||||
*
|
||||
* OPTEE_MSG_CMD_CANCEL cancels a currently invoked command.
|
||||
*
|
||||
* OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The
|
||||
* information is passed as:
|
||||
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
|
||||
* [| OPTEE_MSG_ATTR_FRAGMENT]
|
||||
* [in] param[0].u.tmem.buf_ptr physical address (of first fragment)
|
||||
* [in] param[0].u.tmem.size size (of first fragment)
|
||||
* [in] param[0].u.tmem.shm_ref holds shared memory reference
|
||||
* ...
|
||||
* The shared memory can optionally be fragmented, temp memrefs can follow
|
||||
* each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set.
|
||||
*
|
||||
* OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared
|
||||
* memory reference. The information is passed as:
|
||||
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
|
||||
* [in] param[0].u.rmem.shm_ref holds shared memory reference
|
||||
* [in] param[0].u.rmem.offs 0
|
||||
* [in] param[0].u.rmem.size 0
|
||||
*/
|
||||
#define OPTEE_MSG_CMD_OPEN_SESSION 0
|
||||
#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
|
||||
#define OPTEE_MSG_CMD_CLOSE_SESSION 2
|
||||
#define OPTEE_MSG_CMD_CANCEL 3
|
||||
#define OPTEE_MSG_CMD_REGISTER_SHM 4
|
||||
#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
|
||||
#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
|
||||
|
||||
/*****************************************************************************
|
||||
* Part 3 - Requests from secure world, RPC
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* All RPC is done with a struct optee_msg_arg as bearer of information,
|
||||
* struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below
|
||||
*
|
||||
* RPC communication with tee-supplicant is reversed compared to normal
|
||||
* client communication desribed above. The supplicant receives requests
|
||||
* and sends responses.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Load a TA into memory, defined in tee-supplicant
|
||||
*/
|
||||
#define OPTEE_MSG_RPC_CMD_LOAD_TA 0
|
||||
|
||||
/*
|
||||
* Reserved
|
||||
*/
|
||||
#define OPTEE_MSG_RPC_CMD_RPMB 1
|
||||
|
||||
/*
|
||||
* File system access, defined in tee-supplicant
|
||||
*/
|
||||
#define OPTEE_MSG_RPC_CMD_FS 2
|
||||
|
||||
/*
|
||||
* Get time
|
||||
*
|
||||
* Returns number of seconds and nano seconds since the Epoch,
|
||||
* 1970-01-01 00:00:00 +0000 (UTC).
|
||||
*
|
||||
* [out] param[0].u.value.a Number of seconds
|
||||
* [out] param[0].u.value.b Number of nano seconds.
|
||||
*/
|
||||
#define OPTEE_MSG_RPC_CMD_GET_TIME 3
|
||||
|
||||
/*
|
||||
* Wait queue primitive, helper for secure world to implement a wait queue.
|
||||
*
|
||||
* If secure world need to wait for a secure world mutex it issues a sleep
|
||||
* request instead of spinning in secure world. Conversely is a wakeup
|
||||
* request issued when a secure world mutex with a thread waiting thread is
|
||||
* unlocked.
|
||||
*
|
||||
* Waiting on a key
|
||||
* [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP
|
||||
* [in] param[0].u.value.b wait key
|
||||
*
|
||||
* Waking up a key
|
||||
* [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP
|
||||
* [in] param[0].u.value.b wakeup key
|
||||
*/
|
||||
#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4
|
||||
#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0
|
||||
#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1
|
||||
|
||||
/*
|
||||
* Suspend execution
|
||||
*
|
||||
* [in] param[0].value .a number of milliseconds to suspend
|
||||
*/
|
||||
#define OPTEE_MSG_RPC_CMD_SUSPEND 5
|
||||
|
||||
/*
|
||||
* Allocate a piece of shared memory
|
||||
*
|
||||
* Shared memory can optionally be fragmented, to support that additional
|
||||
* spare param entries are allocated to make room for eventual fragments.
|
||||
* The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when
|
||||
* unused. All returned temp memrefs except the last should have the
|
||||
* OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field.
|
||||
*
|
||||
* [in] param[0].u.value.a type of memory one of
|
||||
* OPTEE_MSG_RPC_SHM_TYPE_* below
|
||||
* [in] param[0].u.value.b requested size
|
||||
* [in] param[0].u.value.c required alignment
|
||||
*
|
||||
* [out] param[0].u.tmem.buf_ptr physical address (of first fragment)
|
||||
* [out] param[0].u.tmem.size size (of first fragment)
|
||||
* [out] param[0].u.tmem.shm_ref shared memory reference
|
||||
* ...
|
||||
* [out] param[n].u.tmem.buf_ptr physical address
|
||||
* [out] param[n].u.tmem.size size
|
||||
* [out] param[n].u.tmem.shm_ref shared memory reference (same value
|
||||
* as in param[n-1].u.tmem.shm_ref)
|
||||
*/
|
||||
#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6
|
||||
/* Memory that can be shared with a non-secure user space application */
|
||||
#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0
|
||||
/* Memory only shared with non-secure kernel */
|
||||
#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1
|
||||
|
||||
/*
|
||||
* Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC
|
||||
*
|
||||
* [in] param[0].u.value.a type of memory one of
|
||||
* OPTEE_MSG_RPC_SHM_TYPE_* above
|
||||
* [in] param[0].u.value.b value of shared memory reference
|
||||
* returned in param[0].u.tmem.shm_ref
|
||||
* above
|
||||
*/
|
||||
#define OPTEE_MSG_RPC_CMD_SHM_FREE 7
|
||||
|
||||
#endif /* _OPTEE_MSG_H */
|
183
drivers/tee/optee/optee_private.h
Normal file
183
drivers/tee/optee/optee_private.h
Normal file
|
@ -0,0 +1,183 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OPTEE_PRIVATE_H
|
||||
#define OPTEE_PRIVATE_H
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/tee_drv.h>
|
||||
#include <linux/types.h>
|
||||
#include "optee_msg.h"
|
||||
|
||||
#define OPTEE_MAX_ARG_SIZE 1024
|
||||
|
||||
/* Some Global Platform error codes used in this driver */
|
||||
#define TEEC_SUCCESS 0x00000000
|
||||
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
|
||||
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
|
||||
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
|
||||
|
||||
#define TEEC_ORIGIN_COMMS 0x00000002
|
||||
|
||||
typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long,
|
||||
struct arm_smccc_res *);
|
||||
|
||||
struct optee_call_queue {
|
||||
/* Serializes access to this struct */
|
||||
struct mutex mutex;
|
||||
struct list_head waiters;
|
||||
};
|
||||
|
||||
struct optee_wait_queue {
|
||||
/* Serializes access to this struct */
|
||||
struct mutex mu;
|
||||
struct list_head db;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct optee_supp - supplicant synchronization struct
|
||||
* @ctx the context of current connected supplicant.
|
||||
* if !NULL the supplicant device is available for use,
|
||||
* else busy
|
||||
* @ctx_mutex: held while accessing @ctx
|
||||
* @func: supplicant function id to call
|
||||
* @ret: call return value
|
||||
* @num_params: number of elements in @param
|
||||
* @param: parameters for @func
|
||||
* @req_posted: if true, a request has been posted to the supplicant
|
||||
* @supp_next_send: if true, next step is for supplicant to send response
|
||||
* @thrd_mutex: held by the thread doing a request to supplicant
|
||||
* @supp_mutex: held by supplicant while operating on this struct
|
||||
* @data_to_supp: supplicant is waiting on this for next request
|
||||
* @data_from_supp: requesting thread is waiting on this to get the result
|
||||
*/
|
||||
struct optee_supp {
|
||||
struct tee_context *ctx;
|
||||
/* Serializes access of ctx */
|
||||
struct mutex ctx_mutex;
|
||||
|
||||
u32 func;
|
||||
u32 ret;
|
||||
size_t num_params;
|
||||
struct tee_param *param;
|
||||
|
||||
bool req_posted;
|
||||
bool supp_next_send;
|
||||
/* Serializes access to this struct for requesting thread */
|
||||
struct mutex thrd_mutex;
|
||||
/* Serializes access to this struct for supplicant threads */
|
||||
struct mutex supp_mutex;
|
||||
struct completion data_to_supp;
|
||||
struct completion data_from_supp;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct optee - main service struct
|
||||
* @supp_teedev: supplicant device
|
||||
* @teedev: client device
|
||||
* @invoke_fn: function to issue smc or hvc
|
||||
* @call_queue: queue of threads waiting to call @invoke_fn
|
||||
* @wait_queue: queue of threads from secure world waiting for a
|
||||
* secure world sync object
|
||||
* @supp: supplicant synchronization struct for RPC to supplicant
|
||||
* @pool: shared memory pool
|
||||
* @memremaped_shm virtual address of memory in shared memory pool
|
||||
*/
|
||||
struct optee {
|
||||
struct tee_device *supp_teedev;
|
||||
struct tee_device *teedev;
|
||||
optee_invoke_fn *invoke_fn;
|
||||
struct optee_call_queue call_queue;
|
||||
struct optee_wait_queue wait_queue;
|
||||
struct optee_supp supp;
|
||||
struct tee_shm_pool *pool;
|
||||
void *memremaped_shm;
|
||||
};
|
||||
|
||||
struct optee_session {
|
||||
struct list_head list_node;
|
||||
u32 session_id;
|
||||
};
|
||||
|
||||
struct optee_context_data {
|
||||
/* Serializes access to this struct */
|
||||
struct mutex mutex;
|
||||
struct list_head sess_list;
|
||||
};
|
||||
|
||||
struct optee_rpc_param {
|
||||
u32 a0;
|
||||
u32 a1;
|
||||
u32 a2;
|
||||
u32 a3;
|
||||
u32 a4;
|
||||
u32 a5;
|
||||
u32 a6;
|
||||
u32 a7;
|
||||
};
|
||||
|
||||
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param);
|
||||
|
||||
void optee_wait_queue_init(struct optee_wait_queue *wq);
|
||||
void optee_wait_queue_exit(struct optee_wait_queue *wq);
|
||||
|
||||
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
|
||||
struct tee_param *param);
|
||||
|
||||
int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
|
||||
int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
|
||||
void optee_supp_init(struct optee_supp *supp);
|
||||
void optee_supp_uninit(struct optee_supp *supp);
|
||||
|
||||
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
|
||||
struct tee_param *param);
|
||||
int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
|
||||
struct tee_param *param);
|
||||
|
||||
u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg);
|
||||
int optee_open_session(struct tee_context *ctx,
|
||||
struct tee_ioctl_open_session_arg *arg,
|
||||
struct tee_param *param);
|
||||
int optee_close_session(struct tee_context *ctx, u32 session);
|
||||
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
|
||||
struct tee_param *param);
|
||||
int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
|
||||
|
||||
void optee_enable_shm_cache(struct optee *optee);
|
||||
void optee_disable_shm_cache(struct optee *optee);
|
||||
|
||||
int optee_from_msg_param(struct tee_param *params, size_t num_params,
|
||||
const struct optee_msg_param *msg_params);
|
||||
int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
|
||||
const struct tee_param *params);
|
||||
|
||||
/*
|
||||
* Small helpers
|
||||
*/
|
||||
|
||||
static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1)
|
||||
{
|
||||
return (void *)(unsigned long)(((u64)reg0 << 32) | reg1);
|
||||
}
|
||||
|
||||
static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
|
||||
{
|
||||
*reg0 = val >> 32;
|
||||
*reg1 = val;
|
||||
}
|
||||
|
||||
#endif /*OPTEE_PRIVATE_H*/
|
450
drivers/tee/optee/optee_smc.h
Normal file
450
drivers/tee/optee/optee_smc.h
Normal file
|
@ -0,0 +1,450 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef OPTEE_SMC_H
|
||||
#define OPTEE_SMC_H
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#define OPTEE_SMC_STD_CALL_VAL(func_num) \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \
|
||||
ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
|
||||
#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
|
||||
ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
|
||||
|
||||
/*
|
||||
* Function specified by SMC Calling convention.
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00
|
||||
#define OPTEE_SMC_CALLS_COUNT \
|
||||
ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \
|
||||
SMCCC_OWNER_TRUSTED_OS_END, \
|
||||
OPTEE_SMC_FUNCID_CALLS_COUNT)
|
||||
|
||||
/*
|
||||
* Normal cached memory (write-back), shareable for SMP systems and not
|
||||
* shareable for UP systems.
|
||||
*/
|
||||
#define OPTEE_SMC_SHM_CACHED 1
|
||||
|
||||
/*
|
||||
* a0..a7 is used as register names in the descriptions below, on arm32
|
||||
* that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
|
||||
* 32-bit registers.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Function specified by SMC Calling convention
|
||||
*
|
||||
* Return one of the following UIDs if using API specified in this file
|
||||
* without further extentions:
|
||||
* 65cb6b93-af0c-4617-8ed6-644a8d1140f8
|
||||
* see also OPTEE_SMC_UID_* in optee_msg.h
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
|
||||
#define OPTEE_SMC_CALLS_UID \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
|
||||
ARM_SMCCC_OWNER_TRUSTED_OS_END, \
|
||||
OPTEE_SMC_FUNCID_CALLS_UID)
|
||||
|
||||
/*
|
||||
* Function specified by SMC Calling convention
|
||||
*
|
||||
* Returns 2.0 if using API specified in this file without further extentions.
|
||||
* see also OPTEE_MSG_REVISION_* in optee_msg.h
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
|
||||
#define OPTEE_SMC_CALLS_REVISION \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
|
||||
ARM_SMCCC_OWNER_TRUSTED_OS_END, \
|
||||
OPTEE_SMC_FUNCID_CALLS_REVISION)
|
||||
|
||||
struct optee_smc_calls_revision_result {
|
||||
unsigned long major;
|
||||
unsigned long minor;
|
||||
unsigned long reserved0;
|
||||
unsigned long reserved1;
|
||||
};
|
||||
|
||||
/*
|
||||
* Get UUID of Trusted OS.
|
||||
*
|
||||
* Used by non-secure world to figure out which Trusted OS is installed.
|
||||
* Note that returned UUID is the UUID of the Trusted OS, not of the API.
|
||||
*
|
||||
* Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
|
||||
* described above.
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
|
||||
#define OPTEE_SMC_CALL_GET_OS_UUID \
|
||||
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
|
||||
|
||||
/*
|
||||
* Get revision of Trusted OS.
|
||||
*
|
||||
* Used by non-secure world to figure out which version of the Trusted OS
|
||||
* is installed. Note that the returned revision is the revision of the
|
||||
* Trusted OS, not of the API.
|
||||
*
|
||||
* Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
|
||||
* described above.
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
|
||||
#define OPTEE_SMC_CALL_GET_OS_REVISION \
|
||||
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
|
||||
|
||||
/*
|
||||
* Call with struct optee_msg_arg as argument
|
||||
*
|
||||
* Call register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
|
||||
* a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg
|
||||
* a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg
|
||||
* a3 Cache settings, not used if physical pointer is in a predefined shared
|
||||
* memory area else per OPTEE_SMC_SHM_*
|
||||
* a4-6 Not used
|
||||
* a7 Hypervisor Client ID register
|
||||
*
|
||||
* Normal return register usage:
|
||||
* a0 Return value, OPTEE_SMC_RETURN_*
|
||||
* a1-3 Not used
|
||||
* a4-7 Preserved
|
||||
*
|
||||
* OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
|
||||
* a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
|
||||
* a1-3 Preserved
|
||||
* a4-7 Preserved
|
||||
*
|
||||
* RPC return register usage:
|
||||
* a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val)
|
||||
* a1-2 RPC parameters
|
||||
* a3-7 Resume information, must be preserved
|
||||
*
|
||||
* Possible return values:
|
||||
* OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
|
||||
* function.
|
||||
* OPTEE_SMC_RETURN_OK Call completed, result updated in
|
||||
* the previously supplied struct
|
||||
* optee_msg_arg.
|
||||
* OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
|
||||
* try again later.
|
||||
* OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct
|
||||
* optee_msg_arg.
|
||||
* OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
|
||||
* OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
|
||||
* world.
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
|
||||
#define OPTEE_SMC_CALL_WITH_ARG \
|
||||
OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
|
||||
|
||||
/*
|
||||
* Get Shared Memory Config
|
||||
*
|
||||
* Returns the Secure/Non-secure shared memory config.
|
||||
*
|
||||
* Call register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
|
||||
* a1-6 Not used
|
||||
* a7 Hypervisor Client ID register
|
||||
*
|
||||
* Have config return register usage:
|
||||
* a0 OPTEE_SMC_RETURN_OK
|
||||
* a1 Physical address of start of SHM
|
||||
* a2 Size of of SHM
|
||||
* a3 Cache settings of memory, as defined by the
|
||||
* OPTEE_SMC_SHM_* values above
|
||||
* a4-7 Preserved
|
||||
*
|
||||
* Not available register usage:
|
||||
* a0 OPTEE_SMC_RETURN_ENOTAVAIL
|
||||
* a1-3 Not used
|
||||
* a4-7 Preserved
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7
|
||||
#define OPTEE_SMC_GET_SHM_CONFIG \
|
||||
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
|
||||
|
||||
struct optee_smc_get_shm_config_result {
|
||||
unsigned long status;
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
unsigned long settings;
|
||||
};
|
||||
|
||||
/*
|
||||
* Exchanges capabilities between normal world and secure world
|
||||
*
|
||||
* Call register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
|
||||
* a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
|
||||
* a2-6 Not used
|
||||
* a7 Hypervisor Client ID register
|
||||
*
|
||||
* Normal return register usage:
|
||||
* a0 OPTEE_SMC_RETURN_OK
|
||||
* a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
|
||||
* a2-7 Preserved
|
||||
*
|
||||
* Error return register usage:
|
||||
* a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
|
||||
* a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
|
||||
* a2-7 Preserved
|
||||
*/
|
||||
/* Normal world works as a uniprocessor system */
|
||||
#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0)
|
||||
/* Secure world has reserved shared memory for normal world to use */
|
||||
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
|
||||
/* Secure world can communicate via previously unregistered shared memory */
|
||||
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
|
||||
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
|
||||
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
|
||||
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
|
||||
|
||||
struct optee_smc_exchange_capabilities_result {
|
||||
unsigned long status;
|
||||
unsigned long capabilities;
|
||||
unsigned long reserved0;
|
||||
unsigned long reserved1;
|
||||
};
|
||||
|
||||
/*
|
||||
* Disable and empties cache of shared memory objects
|
||||
*
|
||||
* Secure world can cache frequently used shared memory objects, for
|
||||
* example objects used as RPC arguments. When secure world is idle this
|
||||
* function returns one shared memory reference to free. To disable the
|
||||
* cache and free all cached objects this function has to be called until
|
||||
* it returns OPTEE_SMC_RETURN_ENOTAVAIL.
|
||||
*
|
||||
* Call register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
|
||||
* a1-6 Not used
|
||||
* a7 Hypervisor Client ID register
|
||||
*
|
||||
* Normal return register usage:
|
||||
* a0 OPTEE_SMC_RETURN_OK
|
||||
* a1 Upper 32bit of a 64bit Shared memory cookie
|
||||
* a2 Lower 32bit of a 64bit Shared memory cookie
|
||||
* a3-7 Preserved
|
||||
*
|
||||
* Cache empty return register usage:
|
||||
* a0 OPTEE_SMC_RETURN_ENOTAVAIL
|
||||
* a1-7 Preserved
|
||||
*
|
||||
* Not idle return register usage:
|
||||
* a0 OPTEE_SMC_RETURN_EBUSY
|
||||
* a1-7 Preserved
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10
|
||||
#define OPTEE_SMC_DISABLE_SHM_CACHE \
|
||||
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
|
||||
|
||||
struct optee_smc_disable_shm_cache_result {
|
||||
unsigned long status;
|
||||
unsigned long shm_upper32;
|
||||
unsigned long shm_lower32;
|
||||
unsigned long reserved0;
|
||||
};
|
||||
|
||||
/*
|
||||
* Enable cache of shared memory objects
|
||||
*
|
||||
* Secure world can cache frequently used shared memory objects, for
|
||||
* example objects used as RPC arguments. When secure world is idle this
|
||||
* function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
|
||||
* secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
|
||||
*
|
||||
* Call register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
|
||||
* a1-6 Not used
|
||||
* a7 Hypervisor Client ID register
|
||||
*
|
||||
* Normal return register usage:
|
||||
* a0 OPTEE_SMC_RETURN_OK
|
||||
* a1-7 Preserved
|
||||
*
|
||||
* Not idle return register usage:
|
||||
* a0 OPTEE_SMC_RETURN_EBUSY
|
||||
* a1-7 Preserved
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11
|
||||
#define OPTEE_SMC_ENABLE_SHM_CACHE \
|
||||
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
|
||||
|
||||
/*
|
||||
* Resume from RPC (for example after processing an IRQ)
|
||||
*
|
||||
* Call register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
|
||||
* a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
|
||||
* OPTEE_SMC_RETURN_RPC in a0
|
||||
*
|
||||
* Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
|
||||
*
|
||||
* Possible return values
|
||||
* OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
|
||||
* function.
|
||||
* OPTEE_SMC_RETURN_OK Original call completed, result
|
||||
* updated in the previously supplied.
|
||||
* struct optee_msg_arg
|
||||
* OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal
|
||||
* world.
|
||||
* OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume
|
||||
* information was corrupt.
|
||||
*/
|
||||
#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3
|
||||
#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
|
||||
OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
|
||||
|
||||
#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000
|
||||
#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000
|
||||
#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF
|
||||
|
||||
#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
|
||||
((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
|
||||
|
||||
#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
|
||||
|
||||
/*
|
||||
* Allocate memory for RPC parameter passing. The memory is used to hold a
|
||||
* struct optee_msg_arg.
|
||||
*
|
||||
* "Call" register usage:
|
||||
* a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC
|
||||
* a1 Size in bytes of required argument memory
|
||||
* a2 Not used
|
||||
* a3 Resume information, must be preserved
|
||||
* a4-5 Not used
|
||||
* a6-7 Resume information, must be preserved
|
||||
*
|
||||
* "Return" register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
|
||||
* a1 Upper 32bits of 64bit physical pointer to allocated
|
||||
* memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
|
||||
* be allocated.
|
||||
* a2 Lower 32bits of 64bit physical pointer to allocated
|
||||
* memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
|
||||
* be allocated
|
||||
* a3 Preserved
|
||||
* a4 Upper 32bits of 64bit Shared memory cookie used when freeing
|
||||
* the memory or doing an RPC
|
||||
* a5 Lower 32bits of 64bit Shared memory cookie used when freeing
|
||||
* the memory or doing an RPC
|
||||
* a6-7 Preserved
|
||||
*/
|
||||
#define OPTEE_SMC_RPC_FUNC_ALLOC 0
|
||||
#define OPTEE_SMC_RETURN_RPC_ALLOC \
|
||||
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
|
||||
|
||||
/*
|
||||
* Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
|
||||
*
|
||||
* "Call" register usage:
|
||||
* a0 This value, OPTEE_SMC_RETURN_RPC_FREE
|
||||
* a1 Upper 32bits of 64bit shared memory cookie belonging to this
|
||||
* argument memory
|
||||
* a2 Lower 32bits of 64bit shared memory cookie belonging to this
|
||||
* argument memory
|
||||
* a3-7 Resume information, must be preserved
|
||||
*
|
||||
* "Return" register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
|
||||
* a1-2 Not used
|
||||
* a3-7 Preserved
|
||||
*/
|
||||
#define OPTEE_SMC_RPC_FUNC_FREE 2
|
||||
#define OPTEE_SMC_RETURN_RPC_FREE \
|
||||
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
|
||||
|
||||
/*
|
||||
* Deliver an IRQ in normal world.
|
||||
*
|
||||
* "Call" register usage:
|
||||
* a0 OPTEE_SMC_RETURN_RPC_IRQ
|
||||
* a1-7 Resume information, must be preserved
|
||||
*
|
||||
* "Return" register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
|
||||
* a1-7 Preserved
|
||||
*/
|
||||
#define OPTEE_SMC_RPC_FUNC_IRQ 4
|
||||
#define OPTEE_SMC_RETURN_RPC_IRQ \
|
||||
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ)
|
||||
|
||||
/*
|
||||
* Do an RPC request. The supplied struct optee_msg_arg tells which
|
||||
* request to do and the parameters for the request. The following fields
|
||||
* are used (the rest are unused):
|
||||
* - cmd the Request ID
|
||||
* - ret return value of the request, filled in by normal world
|
||||
* - num_params number of parameters for the request
|
||||
* - params the parameters
|
||||
* - param_attrs attributes of the parameters
|
||||
*
|
||||
* "Call" register usage:
|
||||
* a0 OPTEE_SMC_RETURN_RPC_CMD
|
||||
* a1 Upper 32bit of a 64bit Shared memory cookie holding a
|
||||
* struct optee_msg_arg, must be preserved, only the data should
|
||||
* be updated
|
||||
* a2 Lower 32bit of a 64bit Shared memory cookie holding a
|
||||
* struct optee_msg_arg, must be preserved, only the data should
|
||||
* be updated
|
||||
* a3-7 Resume information, must be preserved
|
||||
*
|
||||
* "Return" register usage:
|
||||
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
|
||||
* a1-2 Not used
|
||||
* a3-7 Preserved
|
||||
*/
|
||||
#define OPTEE_SMC_RPC_FUNC_CMD 5
|
||||
#define OPTEE_SMC_RETURN_RPC_CMD \
|
||||
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
|
||||
|
||||
/* Returned in a0 */
|
||||
#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
|
||||
|
||||
/* Returned in a0 only from Trusted OS functions */
|
||||
#define OPTEE_SMC_RETURN_OK 0x0
|
||||
#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1
|
||||
#define OPTEE_SMC_RETURN_EBUSY 0x2
|
||||
#define OPTEE_SMC_RETURN_ERESUME 0x3
|
||||
#define OPTEE_SMC_RETURN_EBADADDR 0x4
|
||||
#define OPTEE_SMC_RETURN_EBADCMD 0x5
|
||||
#define OPTEE_SMC_RETURN_ENOMEM 0x6
|
||||
#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7
|
||||
#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret))
|
||||
|
||||
static inline bool __optee_smc_return_is_rpc(u32 ret)
|
||||
{
|
||||
return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION &&
|
||||
(ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) ==
|
||||
OPTEE_SMC_RETURN_RPC_PREFIX;
|
||||
}
|
||||
|
||||
#endif /* OPTEE_SMC_H */
|
396
drivers/tee/optee/rpc.c
Normal file
396
drivers/tee/optee/rpc.c
Normal file
|
@ -0,0 +1,396 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tee_drv.h>
|
||||
#include "optee_private.h"
|
||||
#include "optee_smc.h"
|
||||
|
||||
struct wq_entry {
|
||||
struct list_head link;
|
||||
struct completion c;
|
||||
u32 key;
|
||||
};
|
||||
|
||||
void optee_wait_queue_init(struct optee_wait_queue *priv)
|
||||
{
|
||||
mutex_init(&priv->mu);
|
||||
INIT_LIST_HEAD(&priv->db);
|
||||
}
|
||||
|
||||
void optee_wait_queue_exit(struct optee_wait_queue *priv)
|
||||
{
|
||||
mutex_destroy(&priv->mu);
|
||||
}
|
||||
|
||||
static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
|
||||
if (arg->num_params != 1)
|
||||
goto bad;
|
||||
if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
|
||||
OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
|
||||
goto bad;
|
||||
|
||||
getnstimeofday64(&ts);
|
||||
arg->params[0].u.value.a = ts.tv_sec;
|
||||
arg->params[0].u.value.b = ts.tv_nsec;
|
||||
|
||||
arg->ret = TEEC_SUCCESS;
|
||||
return;
|
||||
bad:
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
}
|
||||
|
||||
static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
|
||||
{
|
||||
struct wq_entry *w;
|
||||
|
||||
mutex_lock(&wq->mu);
|
||||
|
||||
list_for_each_entry(w, &wq->db, link)
|
||||
if (w->key == key)
|
||||
goto out;
|
||||
|
||||
w = kmalloc(sizeof(*w), GFP_KERNEL);
|
||||
if (w) {
|
||||
init_completion(&w->c);
|
||||
w->key = key;
|
||||
list_add_tail(&w->link, &wq->db);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&wq->mu);
|
||||
return w;
|
||||
}
|
||||
|
||||
static void wq_sleep(struct optee_wait_queue *wq, u32 key)
|
||||
{
|
||||
struct wq_entry *w = wq_entry_get(wq, key);
|
||||
|
||||
if (w) {
|
||||
wait_for_completion(&w->c);
|
||||
mutex_lock(&wq->mu);
|
||||
list_del(&w->link);
|
||||
mutex_unlock(&wq->mu);
|
||||
kfree(w);
|
||||
}
|
||||
}
|
||||
|
||||
static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
|
||||
{
|
||||
struct wq_entry *w = wq_entry_get(wq, key);
|
||||
|
||||
if (w)
|
||||
complete(&w->c);
|
||||
}
|
||||
|
||||
static void handle_rpc_func_cmd_wq(struct optee *optee,
|
||||
struct optee_msg_arg *arg)
|
||||
{
|
||||
if (arg->num_params != 1)
|
||||
goto bad;
|
||||
|
||||
if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
|
||||
OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
|
||||
goto bad;
|
||||
|
||||
switch (arg->params[0].u.value.a) {
|
||||
case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP:
|
||||
wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
|
||||
break;
|
||||
case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP:
|
||||
wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
|
||||
break;
|
||||
default:
|
||||
goto bad;
|
||||
}
|
||||
|
||||
arg->ret = TEEC_SUCCESS;
|
||||
return;
|
||||
bad:
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
}
|
||||
|
||||
static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
|
||||
{
|
||||
u32 msec_to_wait;
|
||||
|
||||
if (arg->num_params != 1)
|
||||
goto bad;
|
||||
|
||||
if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
|
||||
OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
|
||||
goto bad;
|
||||
|
||||
msec_to_wait = arg->params[0].u.value.a;
|
||||
|
||||
/* set task's state to interruptible sleep */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
/* take a nap */
|
||||
msleep(msec_to_wait);
|
||||
|
||||
arg->ret = TEEC_SUCCESS;
|
||||
return;
|
||||
bad:
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
}
|
||||
|
||||
static void handle_rpc_supp_cmd(struct tee_context *ctx,
|
||||
struct optee_msg_arg *arg)
|
||||
{
|
||||
struct tee_param *params;
|
||||
|
||||
arg->ret_origin = TEEC_ORIGIN_COMMS;
|
||||
|
||||
params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
|
||||
GFP_KERNEL);
|
||||
if (!params) {
|
||||
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
|
||||
return;
|
||||
}
|
||||
|
||||
if (optee_from_msg_param(params, arg->num_params, arg->params)) {
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
|
||||
|
||||
if (optee_to_msg_param(arg->params, arg->num_params, params))
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
out:
|
||||
kfree(params);
|
||||
}
|
||||
|
||||
static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
|
||||
{
|
||||
u32 ret;
|
||||
struct tee_param param;
|
||||
struct optee *optee = tee_get_drvdata(ctx->teedev);
|
||||
struct tee_shm *shm;
|
||||
|
||||
param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
|
||||
param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
|
||||
param.u.value.b = sz;
|
||||
param.u.value.c = 0;
|
||||
|
||||
ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, ¶m);
|
||||
if (ret)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_lock(&optee->supp.ctx_mutex);
|
||||
/* Increases count as secure world doesn't have a reference */
|
||||
shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
|
||||
mutex_unlock(&optee->supp.ctx_mutex);
|
||||
return shm;
|
||||
}
|
||||
|
||||
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
|
||||
struct optee_msg_arg *arg)
|
||||
{
|
||||
phys_addr_t pa;
|
||||
struct tee_shm *shm;
|
||||
size_t sz;
|
||||
size_t n;
|
||||
|
||||
arg->ret_origin = TEEC_ORIGIN_COMMS;
|
||||
|
||||
if (!arg->num_params ||
|
||||
arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
return;
|
||||
}
|
||||
|
||||
for (n = 1; n < arg->num_params; n++) {
|
||||
if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
sz = arg->params[0].u.value.b;
|
||||
switch (arg->params[0].u.value.a) {
|
||||
case OPTEE_MSG_RPC_SHM_TYPE_APPL:
|
||||
shm = cmd_alloc_suppl(ctx, sz);
|
||||
break;
|
||||
case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
|
||||
shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
|
||||
break;
|
||||
default:
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_ERR(shm)) {
|
||||
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
|
||||
return;
|
||||
}
|
||||
|
||||
if (tee_shm_get_pa(shm, 0, &pa)) {
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
goto bad;
|
||||
}
|
||||
|
||||
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
|
||||
arg->params[0].u.tmem.buf_ptr = pa;
|
||||
arg->params[0].u.tmem.size = sz;
|
||||
arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
|
||||
arg->ret = TEEC_SUCCESS;
|
||||
return;
|
||||
bad:
|
||||
tee_shm_free(shm);
|
||||
}
|
||||
|
||||
static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
|
||||
{
|
||||
struct tee_param param;
|
||||
|
||||
param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
|
||||
param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
|
||||
param.u.value.b = tee_shm_get_id(shm);
|
||||
param.u.value.c = 0;
|
||||
|
||||
/*
|
||||
* Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
|
||||
* world has released its reference.
|
||||
*
|
||||
* It's better to do this before sending the request to supplicant
|
||||
* as we'd like to let the process doing the initial allocation to
|
||||
* do release the last reference too in order to avoid stacking
|
||||
* many pending fput() on the client process. This could otherwise
|
||||
* happen if secure world does many allocate and free in a single
|
||||
* invoke.
|
||||
*/
|
||||
tee_shm_put(shm);
|
||||
|
||||
optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, ¶m);
|
||||
}
|
||||
|
||||
static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
|
||||
struct optee_msg_arg *arg)
|
||||
{
|
||||
struct tee_shm *shm;
|
||||
|
||||
arg->ret_origin = TEEC_ORIGIN_COMMS;
|
||||
|
||||
if (arg->num_params != 1 ||
|
||||
arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
return;
|
||||
}
|
||||
|
||||
shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
|
||||
switch (arg->params[0].u.value.a) {
|
||||
case OPTEE_MSG_RPC_SHM_TYPE_APPL:
|
||||
cmd_free_suppl(ctx, shm);
|
||||
break;
|
||||
case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
|
||||
tee_shm_free(shm);
|
||||
break;
|
||||
default:
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
}
|
||||
arg->ret = TEEC_SUCCESS;
|
||||
}
|
||||
|
||||
static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
|
||||
struct tee_shm *shm)
|
||||
{
|
||||
struct optee_msg_arg *arg;
|
||||
|
||||
arg = tee_shm_get_va(shm, 0);
|
||||
if (IS_ERR(arg)) {
|
||||
pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (arg->cmd) {
|
||||
case OPTEE_MSG_RPC_CMD_GET_TIME:
|
||||
handle_rpc_func_cmd_get_time(arg);
|
||||
break;
|
||||
case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
|
||||
handle_rpc_func_cmd_wq(optee, arg);
|
||||
break;
|
||||
case OPTEE_MSG_RPC_CMD_SUSPEND:
|
||||
handle_rpc_func_cmd_wait(arg);
|
||||
break;
|
||||
case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
|
||||
handle_rpc_func_cmd_shm_alloc(ctx, arg);
|
||||
break;
|
||||
case OPTEE_MSG_RPC_CMD_SHM_FREE:
|
||||
handle_rpc_func_cmd_shm_free(ctx, arg);
|
||||
break;
|
||||
default:
|
||||
handle_rpc_supp_cmd(ctx, arg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* optee_handle_rpc() - handle RPC from secure world
|
||||
* @ctx: context doing the RPC
|
||||
* @param: value of registers for the RPC
|
||||
*
|
||||
* Result of RPC is written back into @param.
|
||||
*/
|
||||
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct optee *optee = tee_get_drvdata(teedev);
|
||||
struct tee_shm *shm;
|
||||
phys_addr_t pa;
|
||||
|
||||
switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
|
||||
case OPTEE_SMC_RPC_FUNC_ALLOC:
|
||||
shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
|
||||
if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
|
||||
reg_pair_from_64(¶m->a1, ¶m->a2, pa);
|
||||
reg_pair_from_64(¶m->a4, ¶m->a5,
|
||||
(unsigned long)shm);
|
||||
} else {
|
||||
param->a1 = 0;
|
||||
param->a2 = 0;
|
||||
param->a4 = 0;
|
||||
param->a5 = 0;
|
||||
}
|
||||
break;
|
||||
case OPTEE_SMC_RPC_FUNC_FREE:
|
||||
shm = reg_pair_to_ptr(param->a1, param->a2);
|
||||
tee_shm_free(shm);
|
||||
break;
|
||||
case OPTEE_SMC_RPC_FUNC_IRQ:
|
||||
/*
|
||||
* An IRQ was raised while secure world was executing,
|
||||
* since all IRQs are handled in Linux a dummy RPC is
|
||||
* performed to let Linux take the IRQ through the normal
|
||||
* vector.
|
||||
*/
|
||||
break;
|
||||
case OPTEE_SMC_RPC_FUNC_CMD:
|
||||
shm = reg_pair_to_ptr(param->a1, param->a2);
|
||||
handle_rpc_func_cmd(ctx, optee, shm);
|
||||
break;
|
||||
default:
|
||||
pr_warn("Unknown RPC func 0x%x\n",
|
||||
(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
|
||||
break;
|
||||
}
|
||||
|
||||
param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
|
||||
}
|
273
drivers/tee/optee/supp.c
Normal file
273
drivers/tee/optee/supp.c
Normal file
|
@ -0,0 +1,273 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include "optee_private.h"
|
||||
|
||||
void optee_supp_init(struct optee_supp *supp)
|
||||
{
|
||||
memset(supp, 0, sizeof(*supp));
|
||||
mutex_init(&supp->ctx_mutex);
|
||||
mutex_init(&supp->thrd_mutex);
|
||||
mutex_init(&supp->supp_mutex);
|
||||
init_completion(&supp->data_to_supp);
|
||||
init_completion(&supp->data_from_supp);
|
||||
}
|
||||
|
||||
void optee_supp_uninit(struct optee_supp *supp)
|
||||
{
|
||||
mutex_destroy(&supp->ctx_mutex);
|
||||
mutex_destroy(&supp->thrd_mutex);
|
||||
mutex_destroy(&supp->supp_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* optee_supp_thrd_req() - request service from supplicant
|
||||
* @ctx: context doing the request
|
||||
* @func: function requested
|
||||
* @num_params: number of elements in @param array
|
||||
* @param: parameters for function
|
||||
*
|
||||
* Returns result of operation to be passed to secure world
|
||||
*/
|
||||
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
|
||||
struct tee_param *param)
|
||||
{
|
||||
bool interruptable;
|
||||
struct optee *optee = tee_get_drvdata(ctx->teedev);
|
||||
struct optee_supp *supp = &optee->supp;
|
||||
u32 ret;
|
||||
|
||||
/*
|
||||
* Other threads blocks here until we've copied our answer from
|
||||
* supplicant.
|
||||
*/
|
||||
while (mutex_lock_interruptible(&supp->thrd_mutex)) {
|
||||
/* See comment below on when the RPC can be interrupted. */
|
||||
mutex_lock(&supp->ctx_mutex);
|
||||
interruptable = !supp->ctx;
|
||||
mutex_unlock(&supp->ctx_mutex);
|
||||
if (interruptable)
|
||||
return TEEC_ERROR_COMMUNICATION;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have exclusive access now since the supplicant at this
|
||||
* point is either doing a
|
||||
* wait_for_completion_interruptible(&supp->data_to_supp) or is in
|
||||
* userspace still about to do the ioctl() to enter
|
||||
* optee_supp_recv() below.
|
||||
*/
|
||||
|
||||
supp->func = func;
|
||||
supp->num_params = num_params;
|
||||
supp->param = param;
|
||||
supp->req_posted = true;
|
||||
|
||||
/* Let supplicant get the data */
|
||||
complete(&supp->data_to_supp);
|
||||
|
||||
/*
|
||||
* Wait for supplicant to process and return result, once we've
|
||||
* returned from wait_for_completion(data_from_supp) we have
|
||||
* exclusive access again.
|
||||
*/
|
||||
while (wait_for_completion_interruptible(&supp->data_from_supp)) {
|
||||
mutex_lock(&supp->ctx_mutex);
|
||||
interruptable = !supp->ctx;
|
||||
if (interruptable) {
|
||||
/*
|
||||
* There's no supplicant available and since the
|
||||
* supp->ctx_mutex currently is held none can
|
||||
* become available until the mutex released
|
||||
* again.
|
||||
*
|
||||
* Interrupting an RPC to supplicant is only
|
||||
* allowed as a way of slightly improving the user
|
||||
* experience in case the supplicant hasn't been
|
||||
* started yet. During normal operation the supplicant
|
||||
* will serve all requests in a timely manner and
|
||||
* interrupting then wouldn't make sense.
|
||||
*/
|
||||
supp->ret = TEEC_ERROR_COMMUNICATION;
|
||||
init_completion(&supp->data_to_supp);
|
||||
}
|
||||
mutex_unlock(&supp->ctx_mutex);
|
||||
if (interruptable)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = supp->ret;
|
||||
supp->param = NULL;
|
||||
supp->req_posted = false;
|
||||
|
||||
/* We're done, let someone else talk to the supplicant now. */
|
||||
mutex_unlock(&supp->thrd_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* optee_supp_recv() - receive request for supplicant
|
||||
* @ctx: context receiving the request
|
||||
* @func: requested function in supplicant
|
||||
* @num_params: number of elements allocated in @param, updated with number
|
||||
* used elements
|
||||
* @param: space for parameters for @func
|
||||
*
|
||||
* Returns 0 on success or <0 on failure
|
||||
*/
|
||||
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
|
||||
struct tee_param *param)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct optee *optee = tee_get_drvdata(teedev);
|
||||
struct optee_supp *supp = &optee->supp;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* In case two threads in one supplicant is calling this function
|
||||
* simultaneously we need to protect the data with a mutex which
|
||||
* we'll release before returning.
|
||||
*/
|
||||
mutex_lock(&supp->supp_mutex);
|
||||
|
||||
if (supp->supp_next_send) {
|
||||
/*
|
||||
* optee_supp_recv() has been called again without
|
||||
* a optee_supp_send() in between. Supplicant has
|
||||
* probably been restarted before it was able to
|
||||
* write back last result. Abort last request and
|
||||
* wait for a new.
|
||||
*/
|
||||
if (supp->req_posted) {
|
||||
supp->ret = TEEC_ERROR_COMMUNICATION;
|
||||
supp->supp_next_send = false;
|
||||
complete(&supp->data_from_supp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is where supplicant will be hanging most of the
|
||||
* time, let's make this interruptable so we can easily
|
||||
* restart supplicant if needed.
|
||||
*/
|
||||
if (wait_for_completion_interruptible(&supp->data_to_supp)) {
|
||||
rc = -ERESTARTSYS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* We have exlusive access to the data */
|
||||
|
||||
if (*num_params < supp->num_params) {
|
||||
/*
|
||||
* Not enough room for parameters, tell supplicant
|
||||
* it failed and abort last request.
|
||||
*/
|
||||
supp->ret = TEEC_ERROR_COMMUNICATION;
|
||||
rc = -EINVAL;
|
||||
complete(&supp->data_from_supp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*func = supp->func;
|
||||
*num_params = supp->num_params;
|
||||
memcpy(param, supp->param,
|
||||
sizeof(struct tee_param) * supp->num_params);
|
||||
|
||||
/* Allow optee_supp_send() below to do its work */
|
||||
supp->supp_next_send = true;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
mutex_unlock(&supp->supp_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* optee_supp_send() - send result of request from supplicant
|
||||
* @ctx: context sending result
|
||||
* @ret: return value of request
|
||||
* @num_params: number of parameters returned
|
||||
* @param: returned parameters
|
||||
*
|
||||
* Returns 0 on success or <0 on failure.
|
||||
*/
|
||||
int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
|
||||
struct tee_param *param)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct optee *optee = tee_get_drvdata(teedev);
|
||||
struct optee_supp *supp = &optee->supp;
|
||||
size_t n;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* We still have exclusive access to the data since that's how we
|
||||
* left it when returning from optee_supp_read().
|
||||
*/
|
||||
|
||||
/* See comment on mutex in optee_supp_read() above */
|
||||
mutex_lock(&supp->supp_mutex);
|
||||
|
||||
if (!supp->supp_next_send) {
|
||||
/*
|
||||
* Something strange is going on, supplicant shouldn't
|
||||
* enter optee_supp_send() in this state
|
||||
*/
|
||||
rc = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (num_params != supp->num_params) {
|
||||
/*
|
||||
* Something is wrong, let supplicant restart. Next call to
|
||||
* optee_supp_recv() will give an error to the requesting
|
||||
* thread and release it.
|
||||
*/
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Update out and in/out parameters */
|
||||
for (n = 0; n < num_params; n++) {
|
||||
struct tee_param *p = supp->param + n;
|
||||
|
||||
switch (p->attr) {
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
|
||||
p->u.value.a = param[n].u.value.a;
|
||||
p->u.value.b = param[n].u.value.b;
|
||||
p->u.value.c = param[n].u.value.c;
|
||||
break;
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
p->u.memref.size = param[n].u.memref.size;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
supp->ret = ret;
|
||||
|
||||
/* Allow optee_supp_recv() above to do its work */
|
||||
supp->supp_next_send = false;
|
||||
|
||||
/* Let the requesting thread continue */
|
||||
complete(&supp->data_from_supp);
|
||||
out:
|
||||
mutex_unlock(&supp->supp_mutex);
|
||||
return rc;
|
||||
}
|
893
drivers/tee/tee_core.c
Normal file
893
drivers/tee/tee_core.c
Normal file
|
@ -0,0 +1,893 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "%s: " fmt, __func__
|
||||
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tee_drv.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include "tee_private.h"
|
||||
|
||||
#define TEE_NUM_DEVICES 32
|
||||
|
||||
#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
|
||||
|
||||
/*
|
||||
* Unprivileged devices in the lower half range and privileged devices in
|
||||
* the upper half range.
|
||||
*/
|
||||
static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
|
||||
static DEFINE_SPINLOCK(driver_lock);
|
||||
|
||||
static struct class *tee_class;
|
||||
static dev_t tee_devt;
|
||||
|
||||
static int tee_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int rc;
|
||||
struct tee_device *teedev;
|
||||
struct tee_context *ctx;
|
||||
|
||||
teedev = container_of(inode->i_cdev, struct tee_device, cdev);
|
||||
if (!tee_device_get(teedev))
|
||||
return -EINVAL;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx) {
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ctx->teedev = teedev;
|
||||
INIT_LIST_HEAD(&ctx->list_shm);
|
||||
filp->private_data = ctx;
|
||||
rc = teedev->desc->ops->open(ctx);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
kfree(ctx);
|
||||
tee_device_put(teedev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int tee_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct tee_context *ctx = filp->private_data;
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm *shm;
|
||||
|
||||
ctx->teedev->desc->ops->release(ctx);
|
||||
mutex_lock(&ctx->teedev->mutex);
|
||||
list_for_each_entry(shm, &ctx->list_shm, link)
|
||||
shm->ctx = NULL;
|
||||
mutex_unlock(&ctx->teedev->mutex);
|
||||
kfree(ctx);
|
||||
tee_device_put(teedev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tee_ioctl_version(struct tee_context *ctx,
|
||||
struct tee_ioctl_version_data __user *uvers)
|
||||
{
|
||||
struct tee_ioctl_version_data vers;
|
||||
|
||||
ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
|
||||
if (copy_to_user(uvers, &vers, sizeof(vers)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tee_ioctl_shm_alloc(struct tee_context *ctx,
|
||||
struct tee_ioctl_shm_alloc_data __user *udata)
|
||||
{
|
||||
long ret;
|
||||
struct tee_ioctl_shm_alloc_data data;
|
||||
struct tee_shm *shm;
|
||||
|
||||
if (copy_from_user(&data, udata, sizeof(data)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Currently no input flags are supported */
|
||||
if (data.flags)
|
||||
return -EINVAL;
|
||||
|
||||
data.id = -1;
|
||||
|
||||
shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
data.id = shm->id;
|
||||
data.flags = shm->flags;
|
||||
data.size = shm->size;
|
||||
|
||||
if (copy_to_user(udata, &data, sizeof(data)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = tee_shm_get_fd(shm);
|
||||
|
||||
/*
|
||||
* When user space closes the file descriptor the shared memory
|
||||
* should be freed or if tee_shm_get_fd() failed then it will
|
||||
* be freed immediately.
|
||||
*/
|
||||
tee_shm_put(shm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int params_from_user(struct tee_context *ctx, struct tee_param *params,
|
||||
size_t num_params,
|
||||
struct tee_ioctl_param __user *uparams)
|
||||
{
|
||||
size_t n;
|
||||
|
||||
for (n = 0; n < num_params; n++) {
|
||||
struct tee_shm *shm;
|
||||
struct tee_ioctl_param ip;
|
||||
|
||||
if (copy_from_user(&ip, uparams + n, sizeof(ip)))
|
||||
return -EFAULT;
|
||||
|
||||
/* All unused attribute bits has to be zero */
|
||||
if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
params[n].attr = ip.attr;
|
||||
switch (ip.attr) {
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
|
||||
break;
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
|
||||
params[n].u.value.a = ip.a;
|
||||
params[n].u.value.b = ip.b;
|
||||
params[n].u.value.c = ip.c;
|
||||
break;
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
/*
|
||||
* If we fail to get a pointer to a shared memory
|
||||
* object (and increase the ref count) from an
|
||||
* identifier we return an error. All pointers that
|
||||
* has been added in params have an increased ref
|
||||
* count. It's the callers responibility to do
|
||||
* tee_shm_put() on all resolved pointers.
|
||||
*/
|
||||
shm = tee_shm_get_from_id(ctx, ip.c);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
params[n].u.memref.shm_offs = ip.a;
|
||||
params[n].u.memref.size = ip.b;
|
||||
params[n].u.memref.shm = shm;
|
||||
break;
|
||||
default:
|
||||
/* Unknown attribute */
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int params_to_user(struct tee_ioctl_param __user *uparams,
|
||||
size_t num_params, struct tee_param *params)
|
||||
{
|
||||
size_t n;
|
||||
|
||||
for (n = 0; n < num_params; n++) {
|
||||
struct tee_ioctl_param __user *up = uparams + n;
|
||||
struct tee_param *p = params + n;
|
||||
|
||||
switch (p->attr) {
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
|
||||
if (put_user(p->u.value.a, &up->a) ||
|
||||
put_user(p->u.value.b, &up->b) ||
|
||||
put_user(p->u.value.c, &up->c))
|
||||
return -EFAULT;
|
||||
break;
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
if (put_user((u64)p->u.memref.size, &up->b))
|
||||
return -EFAULT;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool param_is_memref(struct tee_param *param)
|
||||
{
|
||||
switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int tee_ioctl_open_session(struct tee_context *ctx,
|
||||
struct tee_ioctl_buf_data __user *ubuf)
|
||||
{
|
||||
int rc;
|
||||
size_t n;
|
||||
struct tee_ioctl_buf_data buf;
|
||||
struct tee_ioctl_open_session_arg __user *uarg;
|
||||
struct tee_ioctl_open_session_arg arg;
|
||||
struct tee_ioctl_param __user *uparams = NULL;
|
||||
struct tee_param *params = NULL;
|
||||
bool have_session = false;
|
||||
|
||||
if (!ctx->teedev->desc->ops->open_session)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, sizeof(buf)))
|
||||
return -EFAULT;
|
||||
|
||||
if (buf.buf_len > TEE_MAX_ARG_SIZE ||
|
||||
buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
|
||||
return -EINVAL;
|
||||
|
||||
uarg = u64_to_user_ptr(buf.buf_ptr);
|
||||
if (copy_from_user(&arg, uarg, sizeof(arg)))
|
||||
return -EFAULT;
|
||||
|
||||
if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
|
||||
return -EINVAL;
|
||||
|
||||
if (arg.num_params) {
|
||||
params = kcalloc(arg.num_params, sizeof(struct tee_param),
|
||||
GFP_KERNEL);
|
||||
if (!params)
|
||||
return -ENOMEM;
|
||||
uparams = uarg->params;
|
||||
rc = params_from_user(ctx, params, arg.num_params, uparams);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
|
||||
if (rc)
|
||||
goto out;
|
||||
have_session = true;
|
||||
|
||||
if (put_user(arg.session, &uarg->session) ||
|
||||
put_user(arg.ret, &uarg->ret) ||
|
||||
put_user(arg.ret_origin, &uarg->ret_origin)) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
rc = params_to_user(uparams, arg.num_params, params);
|
||||
out:
|
||||
/*
|
||||
* If we've succeeded to open the session but failed to communicate
|
||||
* it back to user space, close the session again to avoid leakage.
|
||||
*/
|
||||
if (rc && have_session && ctx->teedev->desc->ops->close_session)
|
||||
ctx->teedev->desc->ops->close_session(ctx, arg.session);
|
||||
|
||||
if (params) {
|
||||
/* Decrease ref count for all valid shared memory pointers */
|
||||
for (n = 0; n < arg.num_params; n++)
|
||||
if (param_is_memref(params + n) &&
|
||||
params[n].u.memref.shm)
|
||||
tee_shm_put(params[n].u.memref.shm);
|
||||
kfree(params);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int tee_ioctl_invoke(struct tee_context *ctx,
|
||||
struct tee_ioctl_buf_data __user *ubuf)
|
||||
{
|
||||
int rc;
|
||||
size_t n;
|
||||
struct tee_ioctl_buf_data buf;
|
||||
struct tee_ioctl_invoke_arg __user *uarg;
|
||||
struct tee_ioctl_invoke_arg arg;
|
||||
struct tee_ioctl_param __user *uparams = NULL;
|
||||
struct tee_param *params = NULL;
|
||||
|
||||
if (!ctx->teedev->desc->ops->invoke_func)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, sizeof(buf)))
|
||||
return -EFAULT;
|
||||
|
||||
if (buf.buf_len > TEE_MAX_ARG_SIZE ||
|
||||
buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
|
||||
return -EINVAL;
|
||||
|
||||
uarg = u64_to_user_ptr(buf.buf_ptr);
|
||||
if (copy_from_user(&arg, uarg, sizeof(arg)))
|
||||
return -EFAULT;
|
||||
|
||||
if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
|
||||
return -EINVAL;
|
||||
|
||||
if (arg.num_params) {
|
||||
params = kcalloc(arg.num_params, sizeof(struct tee_param),
|
||||
GFP_KERNEL);
|
||||
if (!params)
|
||||
return -ENOMEM;
|
||||
uparams = uarg->params;
|
||||
rc = params_from_user(ctx, params, arg.num_params, uparams);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (put_user(arg.ret, &uarg->ret) ||
|
||||
put_user(arg.ret_origin, &uarg->ret_origin)) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
rc = params_to_user(uparams, arg.num_params, params);
|
||||
out:
|
||||
if (params) {
|
||||
/* Decrease ref count for all valid shared memory pointers */
|
||||
for (n = 0; n < arg.num_params; n++)
|
||||
if (param_is_memref(params + n) &&
|
||||
params[n].u.memref.shm)
|
||||
tee_shm_put(params[n].u.memref.shm);
|
||||
kfree(params);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int tee_ioctl_cancel(struct tee_context *ctx,
|
||||
struct tee_ioctl_cancel_arg __user *uarg)
|
||||
{
|
||||
struct tee_ioctl_cancel_arg arg;
|
||||
|
||||
if (!ctx->teedev->desc->ops->cancel_req)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&arg, uarg, sizeof(arg)))
|
||||
return -EFAULT;
|
||||
|
||||
return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
|
||||
arg.session);
|
||||
}
|
||||
|
||||
static int
|
||||
tee_ioctl_close_session(struct tee_context *ctx,
|
||||
struct tee_ioctl_close_session_arg __user *uarg)
|
||||
{
|
||||
struct tee_ioctl_close_session_arg arg;
|
||||
|
||||
if (!ctx->teedev->desc->ops->close_session)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&arg, uarg, sizeof(arg)))
|
||||
return -EFAULT;
|
||||
|
||||
return ctx->teedev->desc->ops->close_session(ctx, arg.session);
|
||||
}
|
||||
|
||||
static int params_to_supp(struct tee_context *ctx,
|
||||
struct tee_ioctl_param __user *uparams,
|
||||
size_t num_params, struct tee_param *params)
|
||||
{
|
||||
size_t n;
|
||||
|
||||
for (n = 0; n < num_params; n++) {
|
||||
struct tee_ioctl_param ip;
|
||||
struct tee_param *p = params + n;
|
||||
|
||||
ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK;
|
||||
switch (p->attr) {
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
|
||||
ip.a = p->u.value.a;
|
||||
ip.b = p->u.value.b;
|
||||
ip.c = p->u.value.c;
|
||||
break;
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
ip.b = p->u.memref.size;
|
||||
if (!p->u.memref.shm) {
|
||||
ip.a = 0;
|
||||
ip.c = (u64)-1; /* invalid shm id */
|
||||
break;
|
||||
}
|
||||
ip.a = p->u.memref.shm_offs;
|
||||
ip.c = p->u.memref.shm->id;
|
||||
break;
|
||||
default:
|
||||
ip.a = 0;
|
||||
ip.b = 0;
|
||||
ip.c = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (copy_to_user(uparams + n, &ip, sizeof(ip)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tee_ioctl_supp_recv(struct tee_context *ctx,
|
||||
struct tee_ioctl_buf_data __user *ubuf)
|
||||
{
|
||||
int rc;
|
||||
struct tee_ioctl_buf_data buf;
|
||||
struct tee_iocl_supp_recv_arg __user *uarg;
|
||||
struct tee_param *params;
|
||||
u32 num_params;
|
||||
u32 func;
|
||||
|
||||
if (!ctx->teedev->desc->ops->supp_recv)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, sizeof(buf)))
|
||||
return -EFAULT;
|
||||
|
||||
if (buf.buf_len > TEE_MAX_ARG_SIZE ||
|
||||
buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
|
||||
return -EINVAL;
|
||||
|
||||
uarg = u64_to_user_ptr(buf.buf_ptr);
|
||||
if (get_user(num_params, &uarg->num_params))
|
||||
return -EFAULT;
|
||||
|
||||
if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
|
||||
return -EINVAL;
|
||||
|
||||
params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
|
||||
if (!params)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (put_user(func, &uarg->func) ||
|
||||
put_user(num_params, &uarg->num_params)) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = params_to_supp(ctx, uarg->params, num_params, params);
|
||||
out:
|
||||
kfree(params);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int params_from_supp(struct tee_param *params, size_t num_params,
|
||||
struct tee_ioctl_param __user *uparams)
|
||||
{
|
||||
size_t n;
|
||||
|
||||
for (n = 0; n < num_params; n++) {
|
||||
struct tee_param *p = params + n;
|
||||
struct tee_ioctl_param ip;
|
||||
|
||||
if (copy_from_user(&ip, uparams + n, sizeof(ip)))
|
||||
return -EFAULT;
|
||||
|
||||
/* All unused attribute bits has to be zero */
|
||||
if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
p->attr = ip.attr;
|
||||
switch (ip.attr) {
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
|
||||
/* Only out and in/out values can be updated */
|
||||
p->u.value.a = ip.a;
|
||||
p->u.value.b = ip.b;
|
||||
p->u.value.c = ip.c;
|
||||
break;
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
/*
|
||||
* Only the size of the memref can be updated.
|
||||
* Since we don't have access to the original
|
||||
* parameters here, only store the supplied size.
|
||||
* The driver will copy the updated size into the
|
||||
* original parameters.
|
||||
*/
|
||||
p->u.memref.shm = NULL;
|
||||
p->u.memref.shm_offs = 0;
|
||||
p->u.memref.size = ip.b;
|
||||
break;
|
||||
default:
|
||||
memset(&p->u, 0, sizeof(p->u));
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tee_ioctl_supp_send(struct tee_context *ctx,
|
||||
struct tee_ioctl_buf_data __user *ubuf)
|
||||
{
|
||||
long rc;
|
||||
struct tee_ioctl_buf_data buf;
|
||||
struct tee_iocl_supp_send_arg __user *uarg;
|
||||
struct tee_param *params;
|
||||
u32 num_params;
|
||||
u32 ret;
|
||||
|
||||
/* Not valid for this driver */
|
||||
if (!ctx->teedev->desc->ops->supp_send)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, sizeof(buf)))
|
||||
return -EFAULT;
|
||||
|
||||
if (buf.buf_len > TEE_MAX_ARG_SIZE ||
|
||||
buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
|
||||
return -EINVAL;
|
||||
|
||||
uarg = u64_to_user_ptr(buf.buf_ptr);
|
||||
if (get_user(ret, &uarg->ret) ||
|
||||
get_user(num_params, &uarg->num_params))
|
||||
return -EFAULT;
|
||||
|
||||
if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
|
||||
return -EINVAL;
|
||||
|
||||
params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
|
||||
if (!params)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = params_from_supp(params, num_params, uarg->params);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
|
||||
out:
|
||||
kfree(params);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct tee_context *ctx = filp->private_data;
|
||||
void __user *uarg = (void __user *)arg;
|
||||
|
||||
switch (cmd) {
|
||||
case TEE_IOC_VERSION:
|
||||
return tee_ioctl_version(ctx, uarg);
|
||||
case TEE_IOC_SHM_ALLOC:
|
||||
return tee_ioctl_shm_alloc(ctx, uarg);
|
||||
case TEE_IOC_OPEN_SESSION:
|
||||
return tee_ioctl_open_session(ctx, uarg);
|
||||
case TEE_IOC_INVOKE:
|
||||
return tee_ioctl_invoke(ctx, uarg);
|
||||
case TEE_IOC_CANCEL:
|
||||
return tee_ioctl_cancel(ctx, uarg);
|
||||
case TEE_IOC_CLOSE_SESSION:
|
||||
return tee_ioctl_close_session(ctx, uarg);
|
||||
case TEE_IOC_SUPPL_RECV:
|
||||
return tee_ioctl_supp_recv(ctx, uarg);
|
||||
case TEE_IOC_SUPPL_SEND:
|
||||
return tee_ioctl_supp_send(ctx, uarg);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct file_operations tee_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = tee_open,
|
||||
.release = tee_release,
|
||||
.unlocked_ioctl = tee_ioctl,
|
||||
.compat_ioctl = tee_ioctl,
|
||||
};
|
||||
|
||||
static void tee_release_device(struct device *dev)
|
||||
{
|
||||
struct tee_device *teedev = container_of(dev, struct tee_device, dev);
|
||||
|
||||
spin_lock(&driver_lock);
|
||||
clear_bit(teedev->id, dev_mask);
|
||||
spin_unlock(&driver_lock);
|
||||
mutex_destroy(&teedev->mutex);
|
||||
idr_destroy(&teedev->idr);
|
||||
kfree(teedev);
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_device_alloc() - Allocate a new struct tee_device instance
|
||||
* @teedesc: Descriptor for this driver
|
||||
* @dev: Parent device for this device
|
||||
* @pool: Shared memory pool, NULL if not used
|
||||
* @driver_data: Private driver data for this device
|
||||
*
|
||||
* Allocates a new struct tee_device instance. The device is
|
||||
* removed by tee_device_unregister().
|
||||
*
|
||||
* @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
|
||||
*/
|
||||
struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
|
||||
struct device *dev,
|
||||
struct tee_shm_pool *pool,
|
||||
void *driver_data)
|
||||
{
|
||||
struct tee_device *teedev;
|
||||
void *ret;
|
||||
int rc;
|
||||
int offs = 0;
|
||||
|
||||
if (!teedesc || !teedesc->name || !teedesc->ops ||
|
||||
!teedesc->ops->get_version || !teedesc->ops->open ||
|
||||
!teedesc->ops->release || !pool)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
|
||||
if (!teedev) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (teedesc->flags & TEE_DESC_PRIVILEGED)
|
||||
offs = TEE_NUM_DEVICES / 2;
|
||||
|
||||
spin_lock(&driver_lock);
|
||||
teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs);
|
||||
if (teedev->id < TEE_NUM_DEVICES)
|
||||
set_bit(teedev->id, dev_mask);
|
||||
spin_unlock(&driver_lock);
|
||||
|
||||
if (teedev->id >= TEE_NUM_DEVICES) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
}
|
||||
|
||||
snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
|
||||
teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
|
||||
teedev->id - offs);
|
||||
|
||||
teedev->dev.class = tee_class;
|
||||
teedev->dev.release = tee_release_device;
|
||||
teedev->dev.parent = dev;
|
||||
|
||||
teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
|
||||
|
||||
rc = dev_set_name(&teedev->dev, "%s", teedev->name);
|
||||
if (rc) {
|
||||
ret = ERR_PTR(rc);
|
||||
goto err_devt;
|
||||
}
|
||||
|
||||
cdev_init(&teedev->cdev, &tee_fops);
|
||||
teedev->cdev.owner = teedesc->owner;
|
||||
teedev->cdev.kobj.parent = &teedev->dev.kobj;
|
||||
|
||||
dev_set_drvdata(&teedev->dev, driver_data);
|
||||
device_initialize(&teedev->dev);
|
||||
|
||||
/* 1 as tee_device_unregister() does one final tee_device_put() */
|
||||
teedev->num_users = 1;
|
||||
init_completion(&teedev->c_no_users);
|
||||
mutex_init(&teedev->mutex);
|
||||
idr_init(&teedev->idr);
|
||||
|
||||
teedev->desc = teedesc;
|
||||
teedev->pool = pool;
|
||||
|
||||
return teedev;
|
||||
err_devt:
|
||||
unregister_chrdev_region(teedev->dev.devt, 1);
|
||||
err:
|
||||
pr_err("could not register %s driver\n",
|
||||
teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
|
||||
if (teedev && teedev->id < TEE_NUM_DEVICES) {
|
||||
spin_lock(&driver_lock);
|
||||
clear_bit(teedev->id, dev_mask);
|
||||
spin_unlock(&driver_lock);
|
||||
}
|
||||
kfree(teedev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_device_alloc);
|
||||
|
||||
static ssize_t implementation_id_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct tee_device *teedev = container_of(dev, struct tee_device, dev);
|
||||
struct tee_ioctl_version_data vers;
|
||||
|
||||
teedev->desc->ops->get_version(teedev, &vers);
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
|
||||
}
|
||||
static DEVICE_ATTR_RO(implementation_id);
|
||||
|
||||
static struct attribute *tee_dev_attrs[] = {
|
||||
&dev_attr_implementation_id.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group tee_dev_group = {
|
||||
.attrs = tee_dev_attrs,
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_device_register() - Registers a TEE device
|
||||
* @teedev: Device to register
|
||||
*
|
||||
* tee_device_unregister() need to be called to remove the @teedev if
|
||||
* this function fails.
|
||||
*
|
||||
* @returns < 0 on failure
|
||||
*/
|
||||
int tee_device_register(struct tee_device *teedev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
|
||||
dev_err(&teedev->dev, "attempt to register twice\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
|
||||
if (rc) {
|
||||
dev_err(&teedev->dev,
|
||||
"unable to cdev_add() %s, major %d, minor %d, err=%d\n",
|
||||
teedev->name, MAJOR(teedev->dev.devt),
|
||||
MINOR(teedev->dev.devt), rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = device_add(&teedev->dev);
|
||||
if (rc) {
|
||||
dev_err(&teedev->dev,
|
||||
"unable to device_add() %s, major %d, minor %d, err=%d\n",
|
||||
teedev->name, MAJOR(teedev->dev.devt),
|
||||
MINOR(teedev->dev.devt), rc);
|
||||
goto err_device_add;
|
||||
}
|
||||
|
||||
rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
|
||||
if (rc) {
|
||||
dev_err(&teedev->dev,
|
||||
"failed to create sysfs attributes, err=%d\n", rc);
|
||||
goto err_sysfs_create_group;
|
||||
}
|
||||
|
||||
teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
|
||||
return 0;
|
||||
|
||||
err_sysfs_create_group:
|
||||
device_del(&teedev->dev);
|
||||
err_device_add:
|
||||
cdev_del(&teedev->cdev);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_device_register);
|
||||
|
||||
void tee_device_put(struct tee_device *teedev)
|
||||
{
|
||||
mutex_lock(&teedev->mutex);
|
||||
/* Shouldn't put in this state */
|
||||
if (!WARN_ON(!teedev->desc)) {
|
||||
teedev->num_users--;
|
||||
if (!teedev->num_users) {
|
||||
teedev->desc = NULL;
|
||||
complete(&teedev->c_no_users);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
|
||||
bool tee_device_get(struct tee_device *teedev)
|
||||
{
|
||||
mutex_lock(&teedev->mutex);
|
||||
if (!teedev->desc) {
|
||||
mutex_unlock(&teedev->mutex);
|
||||
return false;
|
||||
}
|
||||
teedev->num_users++;
|
||||
mutex_unlock(&teedev->mutex);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_device_unregister() - Removes a TEE device
|
||||
* @teedev: Device to unregister
|
||||
*
|
||||
* This function should be called to remove the @teedev even if
|
||||
* tee_device_register() hasn't been called yet. Does nothing if
|
||||
* @teedev is NULL.
|
||||
*/
|
||||
void tee_device_unregister(struct tee_device *teedev)
|
||||
{
|
||||
if (!teedev)
|
||||
return;
|
||||
|
||||
if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
|
||||
sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
|
||||
cdev_del(&teedev->cdev);
|
||||
device_del(&teedev->dev);
|
||||
}
|
||||
|
||||
tee_device_put(teedev);
|
||||
wait_for_completion(&teedev->c_no_users);
|
||||
|
||||
/*
|
||||
* No need to take a mutex any longer now since teedev->desc was
|
||||
* set to NULL before teedev->c_no_users was completed.
|
||||
*/
|
||||
|
||||
teedev->pool = NULL;
|
||||
|
||||
put_device(&teedev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_device_unregister);
|
||||
|
||||
/**
|
||||
* tee_get_drvdata() - Return driver_data pointer
|
||||
* @teedev: Device containing the driver_data pointer
|
||||
* @returns the driver_data pointer supplied to tee_register().
|
||||
*/
|
||||
void *tee_get_drvdata(struct tee_device *teedev)
|
||||
{
|
||||
return dev_get_drvdata(&teedev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_get_drvdata);
|
||||
|
||||
static int __init tee_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
tee_class = class_create(THIS_MODULE, "tee");
|
||||
if (IS_ERR(tee_class)) {
|
||||
pr_err("couldn't create class\n");
|
||||
return PTR_ERR(tee_class);
|
||||
}
|
||||
|
||||
rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
|
||||
if (rc) {
|
||||
pr_err("failed to allocate char dev region\n");
|
||||
class_destroy(tee_class);
|
||||
tee_class = NULL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit tee_exit(void)
|
||||
{
|
||||
class_destroy(tee_class);
|
||||
tee_class = NULL;
|
||||
unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
|
||||
}
|
||||
|
||||
subsys_initcall(tee_init);
|
||||
module_exit(tee_exit);
|
||||
|
||||
MODULE_AUTHOR("Linaro");
|
||||
MODULE_DESCRIPTION("TEE Driver");
|
||||
MODULE_VERSION("1.0");
|
||||
MODULE_LICENSE("GPL v2");
|
129
drivers/tee/tee_private.h
Normal file
129
drivers/tee/tee_private.h
Normal file
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef TEE_PRIVATE_H
|
||||
#define TEE_PRIVATE_H
|
||||
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct tee_device;
|
||||
|
||||
/**
|
||||
* struct tee_shm - shared memory object
|
||||
* @teedev: device used to allocate the object
|
||||
* @ctx: context using the object, if NULL the context is gone
|
||||
* @link link element
|
||||
* @paddr: physical address of the shared memory
|
||||
* @kaddr: virtual address of the shared memory
|
||||
* @size: size of shared memory
|
||||
* @dmabuf: dmabuf used to for exporting to user space
|
||||
* @flags: defined by TEE_SHM_* in tee_drv.h
|
||||
* @id: unique id of a shared memory object on this device
|
||||
*/
|
||||
struct tee_shm {
|
||||
struct tee_device *teedev;
|
||||
struct tee_context *ctx;
|
||||
struct list_head link;
|
||||
phys_addr_t paddr;
|
||||
void *kaddr;
|
||||
size_t size;
|
||||
struct dma_buf *dmabuf;
|
||||
u32 flags;
|
||||
int id;
|
||||
};
|
||||
|
||||
struct tee_shm_pool_mgr;
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mgr_ops - shared memory pool manager operations
|
||||
* @alloc: called when allocating shared memory
|
||||
* @free: called when freeing shared memory
|
||||
*/
|
||||
struct tee_shm_pool_mgr_ops {
|
||||
int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
|
||||
size_t size);
|
||||
void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mgr - shared memory manager
|
||||
* @ops: operations
|
||||
* @private_data: private data for the shared memory manager
|
||||
*/
|
||||
struct tee_shm_pool_mgr {
|
||||
const struct tee_shm_pool_mgr_ops *ops;
|
||||
void *private_data;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool - shared memory pool
|
||||
* @private_mgr: pool manager for shared memory only between kernel
|
||||
* and secure world
|
||||
* @dma_buf_mgr: pool manager for shared memory exported to user space
|
||||
* @destroy: called when destroying the pool
|
||||
* @private_data: private data for the pool
|
||||
*/
|
||||
struct tee_shm_pool {
|
||||
struct tee_shm_pool_mgr private_mgr;
|
||||
struct tee_shm_pool_mgr dma_buf_mgr;
|
||||
void (*destroy)(struct tee_shm_pool *pool);
|
||||
void *private_data;
|
||||
};
|
||||
|
||||
#define TEE_DEVICE_FLAG_REGISTERED 0x1
|
||||
#define TEE_MAX_DEV_NAME_LEN 32
|
||||
|
||||
/**
|
||||
* struct tee_device - TEE Device representation
|
||||
* @name: name of device
|
||||
* @desc: description of device
|
||||
* @id: unique id of device
|
||||
* @flags: represented by TEE_DEVICE_FLAG_REGISTERED above
|
||||
* @dev: embedded basic device structure
|
||||
* @cdev: embedded cdev
|
||||
* @num_users: number of active users of this device
|
||||
* @c_no_user: completion used when unregistering the device
|
||||
* @mutex: mutex protecting @num_users and @idr
|
||||
* @idr: register of shared memory object allocated on this device
|
||||
* @pool: shared memory pool
|
||||
*/
|
||||
struct tee_device {
|
||||
char name[TEE_MAX_DEV_NAME_LEN];
|
||||
const struct tee_desc *desc;
|
||||
int id;
|
||||
unsigned int flags;
|
||||
|
||||
struct device dev;
|
||||
struct cdev cdev;
|
||||
|
||||
size_t num_users;
|
||||
struct completion c_no_users;
|
||||
struct mutex mutex; /* protects num_users and idr */
|
||||
|
||||
struct idr idr;
|
||||
struct tee_shm_pool *pool;
|
||||
};
|
||||
|
||||
int tee_shm_init(void);
|
||||
|
||||
int tee_shm_get_fd(struct tee_shm *shm);
|
||||
|
||||
bool tee_device_get(struct tee_device *teedev);
|
||||
void tee_device_put(struct tee_device *teedev);
|
||||
|
||||
#endif /*TEE_PRIVATE_H*/
|
358
drivers/tee/tee_shm.c
Normal file
358
drivers/tee/tee_shm.c
Normal file
|
@ -0,0 +1,358 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tee_drv.h>
|
||||
#include "tee_private.h"
|
||||
|
||||
static void tee_shm_release(struct tee_shm *shm)
|
||||
{
|
||||
struct tee_device *teedev = shm->teedev;
|
||||
struct tee_shm_pool_mgr *poolm;
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
if (shm->ctx)
|
||||
list_del(&shm->link);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
poolm = &teedev->pool->dma_buf_mgr;
|
||||
else
|
||||
poolm = &teedev->pool->private_mgr;
|
||||
|
||||
poolm->ops->free(poolm, shm);
|
||||
kfree(shm);
|
||||
|
||||
tee_device_put(teedev);
|
||||
}
|
||||
|
||||
static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
|
||||
*attach, enum dma_data_direction dir)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||
struct sg_table *table,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
static void tee_shm_op_release(struct dma_buf *dmabuf)
|
||||
{
|
||||
struct tee_shm *shm = dmabuf->priv;
|
||||
|
||||
tee_shm_release(shm);
|
||||
}
|
||||
|
||||
static void *tee_shm_op_kmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *tee_shm_op_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
{
|
||||
struct tee_shm *shm = dmabuf->priv;
|
||||
size_t size = vma->vm_end - vma->vm_start;
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
|
||||
size, vma->vm_page_prot);
|
||||
}
|
||||
|
||||
static struct dma_buf_ops tee_shm_dma_buf_ops = {
|
||||
.map_dma_buf = tee_shm_op_map_dma_buf,
|
||||
.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
|
||||
.release = tee_shm_op_release,
|
||||
.kmap_atomic = tee_shm_op_kmap_atomic,
|
||||
.kmap = tee_shm_op_kmap,
|
||||
.mmap = tee_shm_op_mmap,
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_shm_alloc() - Allocate shared memory
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* Memory allocated as global shared memory is automatically freed when the
|
||||
* TEE file pointer is closed. The @flags field uses the bits defined by
|
||||
* TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
|
||||
* set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
|
||||
* associated with a dma-buf handle, else driver private memory.
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm_pool_mgr *poolm = NULL;
|
||||
struct tee_shm *shm;
|
||||
void *ret;
|
||||
int rc;
|
||||
|
||||
if (!(flags & TEE_SHM_MAPPED)) {
|
||||
dev_err(teedev->dev.parent,
|
||||
"only mapped allocations supported\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
|
||||
dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!tee_device_get(teedev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!teedev->pool) {
|
||||
/* teedev has been detached from driver */
|
||||
ret = ERR_PTR(-EINVAL);
|
||||
goto err_dev_put;
|
||||
}
|
||||
|
||||
shm = kzalloc(sizeof(*shm), GFP_KERNEL);
|
||||
if (!shm) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err_dev_put;
|
||||
}
|
||||
|
||||
shm->flags = flags;
|
||||
shm->teedev = teedev;
|
||||
shm->ctx = ctx;
|
||||
if (flags & TEE_SHM_DMA_BUF)
|
||||
poolm = &teedev->pool->dma_buf_mgr;
|
||||
else
|
||||
poolm = &teedev->pool->private_mgr;
|
||||
|
||||
rc = poolm->ops->alloc(poolm, shm, size);
|
||||
if (rc) {
|
||||
ret = ERR_PTR(rc);
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (shm->id < 0) {
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err_pool_free;
|
||||
}
|
||||
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &tee_shm_dma_buf_ops;
|
||||
exp_info.size = shm->size;
|
||||
exp_info.flags = O_RDWR;
|
||||
exp_info.priv = shm;
|
||||
|
||||
shm->dmabuf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(shm->dmabuf)) {
|
||||
ret = ERR_CAST(shm->dmabuf);
|
||||
goto err_rem;
|
||||
}
|
||||
}
|
||||
mutex_lock(&teedev->mutex);
|
||||
list_add_tail(&shm->link, &ctx->list_shm);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
return shm;
|
||||
err_rem:
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
err_pool_free:
|
||||
poolm->ops->free(poolm, shm);
|
||||
err_kfree:
|
||||
kfree(shm);
|
||||
err_dev_put:
|
||||
tee_device_put(teedev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc);
|
||||
|
||||
/**
|
||||
* tee_shm_get_fd() - Increase reference count and return file descriptor
|
||||
* @shm: Shared memory handle
|
||||
* @returns user space file descriptor to shared memory
|
||||
*/
|
||||
int tee_shm_get_fd(struct tee_shm *shm)
|
||||
{
|
||||
u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
|
||||
int fd;
|
||||
|
||||
if ((shm->flags & req_flags) != req_flags)
|
||||
return -EINVAL;
|
||||
|
||||
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
|
||||
if (fd >= 0)
|
||||
get_dma_buf(shm->dmabuf);
|
||||
return fd;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_free() - Free shared memory
|
||||
* @shm: Handle to shared memory to free
|
||||
*/
|
||||
void tee_shm_free(struct tee_shm *shm)
|
||||
{
|
||||
/*
|
||||
* dma_buf_put() decreases the dmabuf reference counter and will
|
||||
* call tee_shm_release() when the last reference is gone.
|
||||
*
|
||||
* In the case of driver private memory we call tee_shm_release
|
||||
* directly instead as it doesn't have a reference counter.
|
||||
*/
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
dma_buf_put(shm->dmabuf);
|
||||
else
|
||||
tee_shm_release(shm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_free);
|
||||
|
||||
/**
|
||||
* tee_shm_va2pa() - Get physical address of a virtual address
|
||||
* @shm: Shared memory handle
|
||||
* @va: Virtual address to tranlsate
|
||||
* @pa: Returned physical address
|
||||
* @returns 0 on success and < 0 on failure
|
||||
*/
|
||||
int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
|
||||
{
|
||||
/* Check that we're in the range of the shm */
|
||||
if ((char *)va < (char *)shm->kaddr)
|
||||
return -EINVAL;
|
||||
if ((char *)va >= ((char *)shm->kaddr + shm->size))
|
||||
return -EINVAL;
|
||||
|
||||
return tee_shm_get_pa(
|
||||
shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_va2pa);
|
||||
|
||||
/**
|
||||
* tee_shm_pa2va() - Get virtual address of a physical address
|
||||
* @shm: Shared memory handle
|
||||
* @pa: Physical address to tranlsate
|
||||
* @va: Returned virtual address
|
||||
* @returns 0 on success and < 0 on failure
|
||||
*/
|
||||
int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
|
||||
{
|
||||
/* Check that we're in the range of the shm */
|
||||
if (pa < shm->paddr)
|
||||
return -EINVAL;
|
||||
if (pa >= (shm->paddr + shm->size))
|
||||
return -EINVAL;
|
||||
|
||||
if (va) {
|
||||
void *v = tee_shm_get_va(shm, pa - shm->paddr);
|
||||
|
||||
if (IS_ERR(v))
|
||||
return PTR_ERR(v);
|
||||
*va = v;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pa2va);
|
||||
|
||||
/**
|
||||
* tee_shm_get_va() - Get virtual address of a shared memory plus an offset
|
||||
* @shm: Shared memory handle
|
||||
* @offs: Offset from start of this shared memory
|
||||
* @returns virtual address of the shared memory + offs if offs is within
|
||||
* the bounds of this shared memory, else an ERR_PTR
|
||||
*/
|
||||
void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
|
||||
{
|
||||
if (offs >= shm->size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return (char *)shm->kaddr + offs;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_get_va);
|
||||
|
||||
/**
|
||||
* tee_shm_get_pa() - Get physical address of a shared memory plus an offset
|
||||
* @shm: Shared memory handle
|
||||
* @offs: Offset from start of this shared memory
|
||||
* @pa: Physical address to return
|
||||
* @returns 0 if offs is within the bounds of this shared memory, else an
|
||||
* error code.
|
||||
*/
|
||||
int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
|
||||
{
|
||||
if (offs >= shm->size)
|
||||
return -EINVAL;
|
||||
if (pa)
|
||||
*pa = shm->paddr + offs;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_get_pa);
|
||||
|
||||
/**
|
||||
* tee_shm_get_from_id() - Find shared memory object and increase reference
|
||||
* count
|
||||
* @ctx: Context owning the shared memory
|
||||
* @id: Id of shared memory object
|
||||
* @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
|
||||
*/
|
||||
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
|
||||
{
|
||||
struct tee_device *teedev;
|
||||
struct tee_shm *shm;
|
||||
|
||||
if (!ctx)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
teedev = ctx->teedev;
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm = idr_find(&teedev->idr, id);
|
||||
if (!shm || shm->ctx != ctx)
|
||||
shm = ERR_PTR(-EINVAL);
|
||||
else if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
get_dma_buf(shm->dmabuf);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
return shm;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
|
||||
|
||||
/**
|
||||
* tee_shm_get_id() - Get id of a shared memory object
|
||||
* @shm: Shared memory handle
|
||||
* @returns id
|
||||
*/
|
||||
int tee_shm_get_id(struct tee_shm *shm)
|
||||
{
|
||||
return shm->id;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_get_id);
|
||||
|
||||
/**
|
||||
* tee_shm_put() - Decrease reference count on a shared memory handle
|
||||
* @shm: Shared memory handle
|
||||
*/
|
||||
void tee_shm_put(struct tee_shm *shm)
|
||||
{
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
dma_buf_put(shm->dmabuf);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_put);
|
156
drivers/tee/tee_shm_pool.c
Normal file
156
drivers/tee/tee_shm_pool.c
Normal file
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tee_drv.h>
|
||||
#include "tee_private.h"
|
||||
|
||||
static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm, size_t size)
|
||||
{
|
||||
unsigned long va;
|
||||
struct gen_pool *genpool = poolm->private_data;
|
||||
size_t s = roundup(size, 1 << genpool->min_alloc_order);
|
||||
|
||||
va = gen_pool_alloc(genpool, s);
|
||||
if (!va)
|
||||
return -ENOMEM;
|
||||
|
||||
memset((void *)va, 0, s);
|
||||
shm->kaddr = (void *)va;
|
||||
shm->paddr = gen_pool_virt_to_phys(genpool, va);
|
||||
shm->size = s;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm)
|
||||
{
|
||||
gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
|
||||
shm->size);
|
||||
shm->kaddr = NULL;
|
||||
}
|
||||
|
||||
static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
|
||||
.alloc = pool_op_gen_alloc,
|
||||
.free = pool_op_gen_free,
|
||||
};
|
||||
|
||||
static void pool_res_mem_destroy(struct tee_shm_pool *pool)
|
||||
{
|
||||
gen_pool_destroy(pool->private_mgr.private_data);
|
||||
gen_pool_destroy(pool->dma_buf_mgr.private_data);
|
||||
}
|
||||
|
||||
static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
|
||||
struct tee_shm_pool_mem_info *info,
|
||||
int min_alloc_order)
|
||||
{
|
||||
size_t page_mask = PAGE_SIZE - 1;
|
||||
struct gen_pool *genpool = NULL;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Start and end must be page aligned
|
||||
*/
|
||||
if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
|
||||
(info->size & page_mask))
|
||||
return -EINVAL;
|
||||
|
||||
genpool = gen_pool_create(min_alloc_order, -1);
|
||||
if (!genpool)
|
||||
return -ENOMEM;
|
||||
|
||||
gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
|
||||
rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
|
||||
-1);
|
||||
if (rc) {
|
||||
gen_pool_destroy(genpool);
|
||||
return rc;
|
||||
}
|
||||
|
||||
mgr->private_data = genpool;
|
||||
mgr->ops = &pool_ops_generic;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
|
||||
* memory range
|
||||
* @priv_info: Information for driver private shared memory pool
|
||||
* @dmabuf_info: Information for dma-buf shared memory pool
|
||||
*
|
||||
* Start and end of pools will must be page aligned.
|
||||
*
|
||||
* Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
|
||||
* in @dmabuf, others will use the range provided by @priv.
|
||||
*
|
||||
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
|
||||
*/
|
||||
struct tee_shm_pool *
|
||||
tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
|
||||
struct tee_shm_pool_mem_info *dmabuf_info)
|
||||
{
|
||||
struct tee_shm_pool *pool = NULL;
|
||||
int ret;
|
||||
|
||||
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
||||
if (!pool) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the pool for driver private shared memory
|
||||
*/
|
||||
ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info,
|
||||
3 /* 8 byte aligned */);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* Create the pool for dma_buf shared memory
|
||||
*/
|
||||
ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info,
|
||||
PAGE_SHIFT);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
pool->destroy = pool_res_mem_destroy;
|
||||
return pool;
|
||||
err:
|
||||
if (ret == -ENOMEM)
|
||||
pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__);
|
||||
if (pool && pool->private_mgr.private_data)
|
||||
gen_pool_destroy(pool->private_mgr.private_data);
|
||||
kfree(pool);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
|
||||
|
||||
/**
|
||||
* tee_shm_pool_free() - Free a shared memory pool
|
||||
* @pool: The shared memory pool to free
|
||||
*
|
||||
* There must be no remaining shared memory allocated from this pool when
|
||||
* this function is called.
|
||||
*/
|
||||
void tee_shm_pool_free(struct tee_shm_pool *pool)
|
||||
{
|
||||
pool->destroy(pool);
|
||||
kfree(pool);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_free);
|
|
@ -3361,13 +3361,6 @@ again:
|
|||
goto again;
|
||||
}
|
||||
|
||||
/* We've already setup this transaction, go ahead and exit */
|
||||
if (block_group->cache_generation == trans->transid &&
|
||||
i_size_read(inode)) {
|
||||
dcs = BTRFS_DC_SETUP;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to set the generation to 0, that way if anything goes wrong
|
||||
* from here on out we know not to trust this cache when we load up next
|
||||
|
@ -3391,6 +3384,13 @@ again:
|
|||
}
|
||||
WARN_ON(ret);
|
||||
|
||||
/* We've already setup this transaction, go ahead and exit */
|
||||
if (block_group->cache_generation == trans->transid &&
|
||||
i_size_read(inode)) {
|
||||
dcs = BTRFS_DC_SETUP;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
if (i_size_read(inode) > 0) {
|
||||
ret = btrfs_check_trunc_cache_free_space(root,
|
||||
&root->fs_info->global_block_rsv);
|
||||
|
|
|
@ -1260,7 +1260,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
|
|||
return 0;
|
||||
}
|
||||
|
||||
error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
||||
error = nfs_lookup_verify_inode(inode, flags);
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
|
||||
__func__, inode->i_ino, error ? "invalid" : "valid");
|
||||
return !error;
|
||||
|
@ -1420,6 +1420,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
|
|||
|
||||
const struct dentry_operations nfs4_dentry_operations = {
|
||||
.d_revalidate = nfs4_lookup_revalidate,
|
||||
.d_weak_revalidate = nfs_weak_revalidate,
|
||||
.d_delete = nfs_dentry_delete,
|
||||
.d_iput = nfs_dentry_iput,
|
||||
.d_automount = nfs_d_automount,
|
||||
|
|
|
@ -3379,7 +3379,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
|
|||
/* ignore lock owners */
|
||||
if (local->st_stateowner->so_is_open_owner == 0)
|
||||
continue;
|
||||
if (local->st_stateowner == &oo->oo_owner) {
|
||||
if (local->st_stateowner != &oo->oo_owner)
|
||||
continue;
|
||||
if (local->st_stid.sc_type == NFS4_OPEN_STID) {
|
||||
ret = local;
|
||||
atomic_inc(&ret->st_stid.sc_count);
|
||||
break;
|
||||
|
@ -3388,6 +3390,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static __be32
|
||||
nfsd4_verify_open_stid(struct nfs4_stid *s)
|
||||
{
|
||||
__be32 ret = nfs_ok;
|
||||
|
||||
switch (s->sc_type) {
|
||||
default:
|
||||
break;
|
||||
case NFS4_CLOSED_STID:
|
||||
case NFS4_CLOSED_DELEG_STID:
|
||||
ret = nfserr_bad_stateid;
|
||||
break;
|
||||
case NFS4_REVOKED_DELEG_STID:
|
||||
ret = nfserr_deleg_revoked;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Lock the stateid st_mutex, and deal with races with CLOSE */
|
||||
static __be32
|
||||
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
|
||||
{
|
||||
__be32 ret;
|
||||
|
||||
mutex_lock(&stp->st_mutex);
|
||||
ret = nfsd4_verify_open_stid(&stp->st_stid);
|
||||
if (ret != nfs_ok)
|
||||
mutex_unlock(&stp->st_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nfs4_ol_stateid *
|
||||
nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
|
||||
{
|
||||
struct nfs4_ol_stateid *stp;
|
||||
for (;;) {
|
||||
spin_lock(&fp->fi_lock);
|
||||
stp = nfsd4_find_existing_open(fp, open);
|
||||
spin_unlock(&fp->fi_lock);
|
||||
if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
|
||||
break;
|
||||
nfs4_put_stid(&stp->st_stid);
|
||||
}
|
||||
return stp;
|
||||
}
|
||||
|
||||
static struct nfs4_openowner *
|
||||
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
|
||||
struct nfsd4_compound_state *cstate)
|
||||
|
@ -3420,23 +3468,27 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
|
|||
}
|
||||
|
||||
static struct nfs4_ol_stateid *
|
||||
init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
|
||||
struct nfsd4_open *open)
|
||||
init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
|
||||
{
|
||||
|
||||
struct nfs4_openowner *oo = open->op_openowner;
|
||||
struct nfs4_ol_stateid *retstp = NULL;
|
||||
struct nfs4_ol_stateid *stp;
|
||||
|
||||
stp = open->op_stp;
|
||||
/* We are moving these outside of the spinlocks to avoid the warnings */
|
||||
mutex_init(&stp->st_mutex);
|
||||
mutex_lock(&stp->st_mutex);
|
||||
|
||||
retry:
|
||||
spin_lock(&oo->oo_owner.so_client->cl_lock);
|
||||
spin_lock(&fp->fi_lock);
|
||||
|
||||
retstp = nfsd4_find_existing_open(fp, open);
|
||||
if (retstp)
|
||||
goto out_unlock;
|
||||
|
||||
open->op_stp = NULL;
|
||||
atomic_inc(&stp->st_stid.sc_count);
|
||||
stp->st_stid.sc_type = NFS4_OPEN_STID;
|
||||
INIT_LIST_HEAD(&stp->st_locks);
|
||||
|
@ -3453,11 +3505,16 @@ out_unlock:
|
|||
spin_unlock(&fp->fi_lock);
|
||||
spin_unlock(&oo->oo_owner.so_client->cl_lock);
|
||||
if (retstp) {
|
||||
mutex_lock(&retstp->st_mutex);
|
||||
/* Not that we need to, just for neatness */
|
||||
/* Handle races with CLOSE */
|
||||
if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
|
||||
nfs4_put_stid(&retstp->st_stid);
|
||||
goto retry;
|
||||
}
|
||||
/* To keep mutex tracking happy */
|
||||
mutex_unlock(&stp->st_mutex);
|
||||
stp = retstp;
|
||||
}
|
||||
return retstp;
|
||||
return stp;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4260,9 +4317,9 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
|
|||
struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
|
||||
struct nfs4_file *fp = NULL;
|
||||
struct nfs4_ol_stateid *stp = NULL;
|
||||
struct nfs4_ol_stateid *swapstp = NULL;
|
||||
struct nfs4_delegation *dp = NULL;
|
||||
__be32 status;
|
||||
bool new_stp = false;
|
||||
|
||||
/*
|
||||
* Lookup file; if found, lookup stateid and check open request,
|
||||
|
@ -4274,9 +4331,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
|
|||
status = nfs4_check_deleg(cl, open, &dp);
|
||||
if (status)
|
||||
goto out;
|
||||
spin_lock(&fp->fi_lock);
|
||||
stp = nfsd4_find_existing_open(fp, open);
|
||||
spin_unlock(&fp->fi_lock);
|
||||
stp = nfsd4_find_and_lock_existing_open(fp, open);
|
||||
} else {
|
||||
open->op_file = NULL;
|
||||
status = nfserr_bad_stateid;
|
||||
|
@ -4284,41 +4339,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!stp) {
|
||||
stp = init_open_stateid(fp, open);
|
||||
if (!open->op_stp)
|
||||
new_stp = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* OPEN the file, or upgrade an existing OPEN.
|
||||
* If truncate fails, the OPEN fails.
|
||||
*
|
||||
* stp is already locked.
|
||||
*/
|
||||
if (stp) {
|
||||
if (!new_stp) {
|
||||
/* Stateid was found, this is an OPEN upgrade */
|
||||
mutex_lock(&stp->st_mutex);
|
||||
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
|
||||
if (status) {
|
||||
mutex_unlock(&stp->st_mutex);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
stp = open->op_stp;
|
||||
open->op_stp = NULL;
|
||||
/*
|
||||
* init_open_stateid() either returns a locked stateid
|
||||
* it found, or initializes and locks the new one we passed in
|
||||
*/
|
||||
swapstp = init_open_stateid(stp, fp, open);
|
||||
if (swapstp) {
|
||||
nfs4_put_stid(&stp->st_stid);
|
||||
stp = swapstp;
|
||||
status = nfs4_upgrade_open(rqstp, fp, current_fh,
|
||||
stp, open);
|
||||
if (status) {
|
||||
mutex_unlock(&stp->st_mutex);
|
||||
goto out;
|
||||
}
|
||||
goto upgrade_out;
|
||||
}
|
||||
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
|
||||
if (status) {
|
||||
mutex_unlock(&stp->st_mutex);
|
||||
stp->st_stid.sc_type = NFS4_CLOSED_STID;
|
||||
release_open_stateid(stp);
|
||||
mutex_unlock(&stp->st_mutex);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -4327,7 +4372,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
|
|||
if (stp->st_clnt_odstate == open->op_odstate)
|
||||
open->op_odstate = NULL;
|
||||
}
|
||||
upgrade_out:
|
||||
|
||||
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
|
||||
mutex_unlock(&stp->st_mutex);
|
||||
|
||||
|
@ -5153,7 +5198,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
|
|||
bool unhashed;
|
||||
LIST_HEAD(reaplist);
|
||||
|
||||
s->st_stid.sc_type = NFS4_CLOSED_STID;
|
||||
spin_lock(&clp->cl_lock);
|
||||
unhashed = unhash_open_stateid(s, &reaplist);
|
||||
|
||||
|
@ -5192,10 +5236,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|||
nfsd4_bump_seqid(cstate, status);
|
||||
if (status)
|
||||
goto out;
|
||||
|
||||
stp->st_stid.sc_type = NFS4_CLOSED_STID;
|
||||
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
|
||||
mutex_unlock(&stp->st_mutex);
|
||||
|
||||
nfsd4_close_open_stateid(stp);
|
||||
mutex_unlock(&stp->st_mutex);
|
||||
|
||||
/* put reference from nfs4_preprocess_seqid_op */
|
||||
nfs4_put_stid(&stp->st_stid);
|
||||
|
|
|
@ -53,6 +53,13 @@
|
|||
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
|
||||
|
||||
#define u64_to_user_ptr(x) ( \
|
||||
{ \
|
||||
typecheck(u64, x); \
|
||||
(void __user *)(uintptr_t)x; \
|
||||
} \
|
||||
)
|
||||
|
||||
/*
|
||||
* This looks more complex than it should be. But we need to
|
||||
* get the type for the ~ right in round_down (it needs to be
|
||||
|
|
|
@ -131,6 +131,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
|
|||
struct netlink_callback {
|
||||
struct sk_buff *skb;
|
||||
const struct nlmsghdr *nlh;
|
||||
int (*start)(struct netlink_callback *);
|
||||
int (*dump)(struct sk_buff * skb,
|
||||
struct netlink_callback *cb);
|
||||
int (*done)(struct netlink_callback *cb);
|
||||
|
@ -153,6 +154,7 @@ struct nlmsghdr *
|
|||
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
|
||||
|
||||
struct netlink_dump_control {
|
||||
int (*start)(struct netlink_callback *);
|
||||
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
|
||||
int (*done)(struct netlink_callback *);
|
||||
void *data;
|
||||
|
|
277
include/linux/tee_drv.h
Normal file
277
include/linux/tee_drv.h
Normal file
|
@ -0,0 +1,277 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __TEE_DRV_H
|
||||
#define __TEE_DRV_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/tee.h>
|
||||
|
||||
/*
|
||||
* The file describes the API provided by the generic TEE driver to the
|
||||
* specific TEE driver.
|
||||
*/
|
||||
|
||||
#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */
|
||||
#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */
|
||||
|
||||
struct tee_device;
|
||||
struct tee_shm;
|
||||
struct tee_shm_pool;
|
||||
|
||||
/**
|
||||
* struct tee_context - driver specific context on file pointer data
|
||||
* @teedev: pointer to this drivers struct tee_device
|
||||
* @list_shm: List of shared memory object owned by this context
|
||||
* @data: driver specific context data, managed by the driver
|
||||
*/
|
||||
struct tee_context {
|
||||
struct tee_device *teedev;
|
||||
struct list_head list_shm;
|
||||
void *data;
|
||||
};
|
||||
|
||||
struct tee_param_memref {
|
||||
size_t shm_offs;
|
||||
size_t size;
|
||||
struct tee_shm *shm;
|
||||
};
|
||||
|
||||
struct tee_param_value {
|
||||
u64 a;
|
||||
u64 b;
|
||||
u64 c;
|
||||
};
|
||||
|
||||
struct tee_param {
|
||||
u64 attr;
|
||||
union {
|
||||
struct tee_param_memref memref;
|
||||
struct tee_param_value value;
|
||||
} u;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_driver_ops - driver operations vtable
|
||||
* @get_version: returns version of driver
|
||||
* @open: called when the device file is opened
|
||||
* @release: release this open file
|
||||
* @open_session: open a new session
|
||||
* @close_session: close a session
|
||||
* @invoke_func: invoke a trusted function
|
||||
* @cancel_req: request cancel of an ongoing invoke or open
|
||||
* @supp_revc: called for supplicant to get a command
|
||||
* @supp_send: called for supplicant to send a response
|
||||
*/
|
||||
struct tee_driver_ops {
|
||||
void (*get_version)(struct tee_device *teedev,
|
||||
struct tee_ioctl_version_data *vers);
|
||||
int (*open)(struct tee_context *ctx);
|
||||
void (*release)(struct tee_context *ctx);
|
||||
int (*open_session)(struct tee_context *ctx,
|
||||
struct tee_ioctl_open_session_arg *arg,
|
||||
struct tee_param *param);
|
||||
int (*close_session)(struct tee_context *ctx, u32 session);
|
||||
int (*invoke_func)(struct tee_context *ctx,
|
||||
struct tee_ioctl_invoke_arg *arg,
|
||||
struct tee_param *param);
|
||||
int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
|
||||
int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
|
||||
struct tee_param *param);
|
||||
int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
|
||||
struct tee_param *param);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_desc - Describes the TEE driver to the subsystem
|
||||
* @name: name of driver
|
||||
* @ops: driver operations vtable
|
||||
* @owner: module providing the driver
|
||||
* @flags: Extra properties of driver, defined by TEE_DESC_* below
|
||||
*/
|
||||
#define TEE_DESC_PRIVILEGED 0x1
|
||||
struct tee_desc {
|
||||
const char *name;
|
||||
const struct tee_driver_ops *ops;
|
||||
struct module *owner;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_device_alloc() - Allocate a new struct tee_device instance
|
||||
* @teedesc: Descriptor for this driver
|
||||
* @dev: Parent device for this device
|
||||
* @pool: Shared memory pool, NULL if not used
|
||||
* @driver_data: Private driver data for this device
|
||||
*
|
||||
* Allocates a new struct tee_device instance. The device is
|
||||
* removed by tee_device_unregister().
|
||||
*
|
||||
* @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
|
||||
*/
|
||||
struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
|
||||
struct device *dev,
|
||||
struct tee_shm_pool *pool,
|
||||
void *driver_data);
|
||||
|
||||
/**
|
||||
* tee_device_register() - Registers a TEE device
|
||||
* @teedev: Device to register
|
||||
*
|
||||
* tee_device_unregister() need to be called to remove the @teedev if
|
||||
* this function fails.
|
||||
*
|
||||
* @returns < 0 on failure
|
||||
*/
|
||||
int tee_device_register(struct tee_device *teedev);
|
||||
|
||||
/**
|
||||
* tee_device_unregister() - Removes a TEE device
|
||||
* @teedev: Device to unregister
|
||||
*
|
||||
* This function should be called to remove the @teedev even if
|
||||
* tee_device_register() hasn't been called yet. Does nothing if
|
||||
* @teedev is NULL.
|
||||
*/
|
||||
void tee_device_unregister(struct tee_device *teedev);
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mem_info - holds information needed to create a shared
|
||||
* memory pool
|
||||
* @vaddr: Virtual address of start of pool
|
||||
* @paddr: Physical address of start of pool
|
||||
* @size: Size in bytes of the pool
|
||||
*/
|
||||
struct tee_shm_pool_mem_info {
|
||||
unsigned long vaddr;
|
||||
phys_addr_t paddr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
|
||||
* memory range
|
||||
* @priv_info: Information for driver private shared memory pool
|
||||
* @dmabuf_info: Information for dma-buf shared memory pool
|
||||
*
|
||||
* Start and end of pools will must be page aligned.
|
||||
*
|
||||
* Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
|
||||
* in @dmabuf, others will use the range provided by @priv.
|
||||
*
|
||||
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
|
||||
*/
|
||||
struct tee_shm_pool *
|
||||
tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
|
||||
struct tee_shm_pool_mem_info *dmabuf_info);
|
||||
|
||||
/**
|
||||
* tee_shm_pool_free() - Free a shared memory pool
|
||||
* @pool: The shared memory pool to free
|
||||
*
|
||||
* The must be no remaining shared memory allocated from this pool when
|
||||
* this function is called.
|
||||
*/
|
||||
void tee_shm_pool_free(struct tee_shm_pool *pool);
|
||||
|
||||
/**
|
||||
* tee_get_drvdata() - Return driver_data pointer
|
||||
* @returns the driver_data pointer supplied to tee_register().
|
||||
*/
|
||||
void *tee_get_drvdata(struct tee_device *teedev);
|
||||
|
||||
/**
|
||||
* tee_shm_alloc() - Allocate shared memory
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* Memory allocated as global shared memory is automatically freed when the
|
||||
* TEE file pointer is closed. The @flags field uses the bits defined by
|
||||
* TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
|
||||
* TEE_SHM_DMA_BUF global shared memory will be allocated and associated
|
||||
* with a dma-buf handle, else driver private memory.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
|
||||
|
||||
/**
|
||||
* tee_shm_free() - Free shared memory
|
||||
* @shm: Handle to shared memory to free
|
||||
*/
|
||||
void tee_shm_free(struct tee_shm *shm);
|
||||
|
||||
/**
|
||||
* tee_shm_put() - Decrease reference count on a shared memory handle
|
||||
* @shm: Shared memory handle
|
||||
*/
|
||||
void tee_shm_put(struct tee_shm *shm);
|
||||
|
||||
/**
|
||||
* tee_shm_va2pa() - Get physical address of a virtual address
|
||||
* @shm: Shared memory handle
|
||||
* @va: Virtual address to tranlsate
|
||||
* @pa: Returned physical address
|
||||
* @returns 0 on success and < 0 on failure
|
||||
*/
|
||||
int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
|
||||
|
||||
/**
|
||||
* tee_shm_pa2va() - Get virtual address of a physical address
|
||||
* @shm: Shared memory handle
|
||||
* @pa: Physical address to tranlsate
|
||||
* @va: Returned virtual address
|
||||
* @returns 0 on success and < 0 on failure
|
||||
*/
|
||||
int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
|
||||
|
||||
/**
|
||||
* tee_shm_get_va() - Get virtual address of a shared memory plus an offset
|
||||
* @shm: Shared memory handle
|
||||
* @offs: Offset from start of this shared memory
|
||||
* @returns virtual address of the shared memory + offs if offs is within
|
||||
* the bounds of this shared memory, else an ERR_PTR
|
||||
*/
|
||||
void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
|
||||
|
||||
/**
|
||||
* tee_shm_get_pa() - Get physical address of a shared memory plus an offset
|
||||
* @shm: Shared memory handle
|
||||
* @offs: Offset from start of this shared memory
|
||||
* @pa: Physical address to return
|
||||
* @returns 0 if offs is within the bounds of this shared memory, else an
|
||||
* error code.
|
||||
*/
|
||||
int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
|
||||
|
||||
/**
|
||||
* tee_shm_get_id() - Get id of a shared memory object
|
||||
* @shm: Shared memory handle
|
||||
* @returns id
|
||||
*/
|
||||
int tee_shm_get_id(struct tee_shm *shm);
|
||||
|
||||
/**
|
||||
* tee_shm_get_from_id() - Find shared memory object and increase reference
|
||||
* count
|
||||
* @ctx: Context owning the shared memory
|
||||
* @id: Id of shared memory object
|
||||
* @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
|
||||
*/
|
||||
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
|
||||
|
||||
#endif /*__TEE_DRV_H*/
|
|
@ -114,6 +114,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
|
|||
* @flags: flags
|
||||
* @policy: attribute validation policy
|
||||
* @doit: standard command callback
|
||||
* @start: start callback for dumps
|
||||
* @dumpit: callback for dumpers
|
||||
* @done: completion callback for dumps
|
||||
* @ops_list: operations list
|
||||
|
@ -122,6 +123,7 @@ struct genl_ops {
|
|||
const struct nla_policy *policy;
|
||||
int (*doit)(struct sk_buff *skb,
|
||||
struct genl_info *info);
|
||||
int (*start)(struct netlink_callback *cb);
|
||||
int (*dumpit)(struct sk_buff *skb,
|
||||
struct netlink_callback *cb);
|
||||
int (*done)(struct netlink_callback *cb);
|
||||
|
|
|
@ -90,7 +90,7 @@ PTR_FIELD(PTR_GEN, 0, 8)
|
|||
|
||||
#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
|
||||
|
||||
#define PTR(gen, offset, dev) \
|
||||
#define MAKE_PTR(gen, offset, dev) \
|
||||
((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
|
||||
|
||||
/* Bkey utility code */
|
||||
|
|
346
include/uapi/linux/tee.h
Normal file
346
include/uapi/linux/tee.h
Normal file
|
@ -0,0 +1,346 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __TEE_H
|
||||
#define __TEE_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* This file describes the API provided by a TEE driver to user space.
|
||||
*
|
||||
* Each TEE driver defines a TEE specific protocol which is used for the
|
||||
* data passed back and forth using TEE_IOC_CMD.
|
||||
*/
|
||||
|
||||
/* Helpers to make the ioctl defines */
|
||||
#define TEE_IOC_MAGIC 0xa4
|
||||
#define TEE_IOC_BASE 0
|
||||
|
||||
/* Flags relating to shared memory */
|
||||
#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */
|
||||
#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */
|
||||
|
||||
#define TEE_MAX_ARG_SIZE 1024
|
||||
|
||||
#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
|
||||
|
||||
/*
|
||||
* TEE Implementation ID
|
||||
*/
|
||||
#define TEE_IMPL_ID_OPTEE 1
|
||||
|
||||
/*
|
||||
* OP-TEE specific capabilities
|
||||
*/
|
||||
#define TEE_OPTEE_CAP_TZ (1 << 0)
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_version_data - TEE version
|
||||
* @impl_id: [out] TEE implementation id
|
||||
* @impl_caps: [out] Implementation specific capabilities
|
||||
* @gen_caps: [out] Generic capabilities, defined by TEE_GEN_CAPS_* above
|
||||
*
|
||||
* Identifies the TEE implementation, @impl_id is one of TEE_IMPL_ID_* above.
|
||||
* @impl_caps is implementation specific, for example TEE_OPTEE_CAP_*
|
||||
* is valid when @impl_id == TEE_IMPL_ID_OPTEE.
|
||||
*/
|
||||
struct tee_ioctl_version_data {
|
||||
__u32 impl_id;
|
||||
__u32 impl_caps;
|
||||
__u32 gen_caps;
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_VERSION - query version of TEE
|
||||
*
|
||||
* Takes a tee_ioctl_version_data struct and returns with the TEE version
|
||||
* data filled in.
|
||||
*/
|
||||
#define TEE_IOC_VERSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 0, \
|
||||
struct tee_ioctl_version_data)
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_shm_alloc_data - Shared memory allocate argument
|
||||
* @size: [in/out] Size of shared memory to allocate
|
||||
* @flags: [in/out] Flags to/from allocation.
|
||||
* @id: [out] Identifier of the shared memory
|
||||
*
|
||||
* The flags field should currently be zero as input. Updated by the call
|
||||
* with actual flags as defined by TEE_IOCTL_SHM_* above.
|
||||
* This structure is used as argument for TEE_IOC_SHM_ALLOC below.
|
||||
*/
|
||||
struct tee_ioctl_shm_alloc_data {
|
||||
__u64 size;
|
||||
__u32 flags;
|
||||
__s32 id;
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_SHM_ALLOC - allocate shared memory
|
||||
*
|
||||
* Allocates shared memory between the user space process and secure OS.
|
||||
*
|
||||
* Returns a file descriptor on success or < 0 on failure
|
||||
*
|
||||
* The returned file descriptor is used to map the shared memory into user
|
||||
* space. The shared memory is freed when the descriptor is closed and the
|
||||
* memory is unmapped.
|
||||
*/
|
||||
#define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \
|
||||
struct tee_ioctl_shm_alloc_data)
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_buf_data - Variable sized buffer
|
||||
* @buf_ptr: [in] A __user pointer to a buffer
|
||||
* @buf_len: [in] Length of the buffer above
|
||||
*
|
||||
* Used as argument for TEE_IOC_OPEN_SESSION, TEE_IOC_INVOKE,
|
||||
* TEE_IOC_SUPPL_RECV, and TEE_IOC_SUPPL_SEND below.
|
||||
*/
|
||||
struct tee_ioctl_buf_data {
|
||||
__u64 buf_ptr;
|
||||
__u64 buf_len;
|
||||
};
|
||||
|
||||
/*
|
||||
* Attributes for struct tee_ioctl_param, selects field in the union
|
||||
*/
|
||||
#define TEE_IOCTL_PARAM_ATTR_TYPE_NONE 0 /* parameter not used */
|
||||
|
||||
/*
|
||||
* These defines value parameters (struct tee_ioctl_param_value)
|
||||
*/
|
||||
#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT 1
|
||||
#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT 2
|
||||
#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT 3 /* input and output */
|
||||
|
||||
/*
|
||||
* These defines shared memory reference parameters (struct
|
||||
* tee_ioctl_param_memref)
|
||||
*/
|
||||
#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT 5
|
||||
#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT 6
|
||||
#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT 7 /* input and output */
|
||||
|
||||
/*
|
||||
* Mask for the type part of the attribute, leaves room for more types
|
||||
*/
|
||||
#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
|
||||
|
||||
/*
|
||||
* Matches TEEC_LOGIN_* in GP TEE Client API
|
||||
* Are only defined for GP compliant TEEs
|
||||
*/
|
||||
#define TEE_IOCTL_LOGIN_PUBLIC 0
|
||||
#define TEE_IOCTL_LOGIN_USER 1
|
||||
#define TEE_IOCTL_LOGIN_GROUP 2
|
||||
#define TEE_IOCTL_LOGIN_APPLICATION 4
|
||||
#define TEE_IOCTL_LOGIN_USER_APPLICATION 5
|
||||
#define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_param - parameter
|
||||
* @attr: attributes
|
||||
* @a: if a memref, offset into the shared memory object, else a value parameter
|
||||
* @b: if a memref, size of the buffer, else a value parameter
|
||||
* @c: if a memref, shared memory identifier, else a value parameter
|
||||
*
|
||||
* @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref or value is used in
|
||||
* the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value and
|
||||
* TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref. TEE_PARAM_ATTR_TYPE_NONE
|
||||
* indicates that none of the members are used.
|
||||
*
|
||||
* Shared memory is allocated with TEE_IOC_SHM_ALLOC which returns an
|
||||
* identifier representing the shared memory object. A memref can reference
|
||||
* a part of a shared memory by specifying an offset (@a) and size (@b) of
|
||||
* the object. To supply the entire shared memory object set the offset
|
||||
* (@a) to 0 and size (@b) to the previously returned size of the object.
|
||||
*/
|
||||
struct tee_ioctl_param {
|
||||
__u64 attr;
|
||||
__u64 a;
|
||||
__u64 b;
|
||||
__u64 c;
|
||||
};
|
||||
|
||||
#define TEE_IOCTL_UUID_LEN 16
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_open_session_arg - Open session argument
|
||||
* @uuid: [in] UUID of the Trusted Application
|
||||
* @clnt_uuid: [in] UUID of client
|
||||
* @clnt_login: [in] Login class of client, TEE_IOCTL_LOGIN_* above
|
||||
* @cancel_id: [in] Cancellation id, a unique value to identify this request
|
||||
* @session: [out] Session id
|
||||
* @ret: [out] return value
|
||||
* @ret_origin [out] origin of the return value
|
||||
* @num_params [in] number of parameters following this struct
|
||||
*/
|
||||
struct tee_ioctl_open_session_arg {
|
||||
__u8 uuid[TEE_IOCTL_UUID_LEN];
|
||||
__u8 clnt_uuid[TEE_IOCTL_UUID_LEN];
|
||||
__u32 clnt_login;
|
||||
__u32 cancel_id;
|
||||
__u32 session;
|
||||
__u32 ret;
|
||||
__u32 ret_origin;
|
||||
__u32 num_params;
|
||||
/* num_params tells the actual number of element in params */
|
||||
struct tee_ioctl_param params[];
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_OPEN_SESSION - opens a session to a Trusted Application
|
||||
*
|
||||
* Takes a struct tee_ioctl_buf_data which contains a struct
|
||||
* tee_ioctl_open_session_arg followed by any array of struct
|
||||
* tee_ioctl_param
|
||||
*/
|
||||
#define TEE_IOC_OPEN_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 2, \
|
||||
struct tee_ioctl_buf_data)
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted
|
||||
* Application
|
||||
* @func: [in] Trusted Application function, specific to the TA
|
||||
* @session: [in] Session id
|
||||
* @cancel_id: [in] Cancellation id, a unique value to identify this request
|
||||
* @ret: [out] return value
|
||||
* @ret_origin [out] origin of the return value
|
||||
* @num_params [in] number of parameters following this struct
|
||||
*/
|
||||
struct tee_ioctl_invoke_arg {
|
||||
__u32 func;
|
||||
__u32 session;
|
||||
__u32 cancel_id;
|
||||
__u32 ret;
|
||||
__u32 ret_origin;
|
||||
__u32 num_params;
|
||||
/* num_params tells the actual number of element in params */
|
||||
struct tee_ioctl_param params[];
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_INVOKE - Invokes a function in a Trusted Application
|
||||
*
|
||||
* Takes a struct tee_ioctl_buf_data which contains a struct
|
||||
* tee_invoke_func_arg followed by any array of struct tee_param
|
||||
*/
|
||||
#define TEE_IOC_INVOKE _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 3, \
|
||||
struct tee_ioctl_buf_data)
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_cancel_arg - Cancels an open session or invoke ioctl
|
||||
* @cancel_id: [in] Cancellation id, a unique value to identify this request
|
||||
* @session: [in] Session id, if the session is opened, else set to 0
|
||||
*/
|
||||
struct tee_ioctl_cancel_arg {
|
||||
__u32 cancel_id;
|
||||
__u32 session;
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_CANCEL - Cancels an open session or invoke
|
||||
*/
|
||||
#define TEE_IOC_CANCEL _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 4, \
|
||||
struct tee_ioctl_cancel_arg)
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_close_session_arg - Closes an open session
|
||||
* @session: [in] Session id
|
||||
*/
|
||||
struct tee_ioctl_close_session_arg {
|
||||
__u32 session;
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_CLOSE_SESSION - Closes a session
|
||||
*/
|
||||
#define TEE_IOC_CLOSE_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 5, \
|
||||
struct tee_ioctl_close_session_arg)
|
||||
|
||||
/**
|
||||
* struct tee_iocl_supp_recv_arg - Receive a request for a supplicant function
|
||||
* @func: [in] supplicant function
|
||||
* @num_params [in/out] number of parameters following this struct
|
||||
*
|
||||
* @num_params is the number of params that tee-supplicant has room to
|
||||
* receive when input, @num_params is the number of actual params
|
||||
* tee-supplicant receives when output.
|
||||
*/
|
||||
struct tee_iocl_supp_recv_arg {
|
||||
__u32 func;
|
||||
__u32 num_params;
|
||||
/* num_params tells the actual number of element in params */
|
||||
struct tee_ioctl_param params[];
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_SUPPL_RECV - Receive a request for a supplicant function
|
||||
*
|
||||
* Takes a struct tee_ioctl_buf_data which contains a struct
|
||||
* tee_iocl_supp_recv_arg followed by any array of struct tee_param
|
||||
*/
|
||||
#define TEE_IOC_SUPPL_RECV _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 6, \
|
||||
struct tee_ioctl_buf_data)
|
||||
|
||||
/**
|
||||
* struct tee_iocl_supp_send_arg - Send a response to a received request
|
||||
* @ret: [out] return value
|
||||
* @num_params [in] number of parameters following this struct
|
||||
*/
|
||||
struct tee_iocl_supp_send_arg {
|
||||
__u32 ret;
|
||||
__u32 num_params;
|
||||
/* num_params tells the actual number of element in params */
|
||||
struct tee_ioctl_param params[];
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function
|
||||
*
|
||||
* Takes a struct tee_ioctl_buf_data which contains a struct
|
||||
* tee_iocl_supp_send_arg followed by any array of struct tee_param
|
||||
*/
|
||||
#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
|
||||
struct tee_ioctl_buf_data)
|
||||
|
||||
/*
|
||||
* Five syscalls are used when communicating with the TEE driver.
|
||||
* open(): opens the device associated with the driver
|
||||
* ioctl(): as described above operating on the file descriptor from open()
|
||||
* close(): two cases
|
||||
* - closes the device file descriptor
|
||||
* - closes a file descriptor connected to allocated shared memory
|
||||
* mmap(): maps shared memory into user space using information from struct
|
||||
* tee_ioctl_shm_alloc_data
|
||||
* munmap(): unmaps previously shared memory
|
||||
*/
|
||||
|
||||
#endif /*__TEE_H*/
|
|
@ -1304,17 +1304,11 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
if (flags & FOLL_TOUCH) {
|
||||
pmd_t _pmd;
|
||||
/*
|
||||
* We should set the dirty bit only for FOLL_WRITE but
|
||||
* for now the dirty bit in the pmd is meaningless.
|
||||
* And if the dirty bit will become meaningful and
|
||||
* we'll only set it with FOLL_WRITE, an atomic
|
||||
* set_bit will be required on the pmd to set the
|
||||
* young bit, instead of the current set_pmd_at.
|
||||
*/
|
||||
_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
|
||||
_pmd = pmd_mkyoung(*pmd);
|
||||
if (flags & FOLL_WRITE)
|
||||
_pmd = pmd_mkdirty(_pmd);
|
||||
if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
|
||||
pmd, _pmd, 1))
|
||||
pmd, _pmd, flags & FOLL_WRITE))
|
||||
update_mmu_cache_pmd(vma, addr, pmd);
|
||||
}
|
||||
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
|
||||
|
|
|
@ -223,15 +223,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
|
|||
{
|
||||
struct file *file = vma->vm_file;
|
||||
|
||||
*prev = vma;
|
||||
#ifdef CONFIG_SWAP
|
||||
if (!file) {
|
||||
*prev = vma;
|
||||
force_swapin_readahead(vma, start, end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (shmem_mapping(file->f_mapping)) {
|
||||
*prev = vma;
|
||||
force_shm_swapin_readahead(vma, start, end,
|
||||
file->f_mapping);
|
||||
return 0;
|
||||
|
@ -246,7 +245,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
|
|||
return 0;
|
||||
}
|
||||
|
||||
*prev = vma;
|
||||
start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
|
||||
if (end > vma->vm_end)
|
||||
end = vma->vm_end;
|
||||
|
|
|
@ -2183,6 +2183,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|||
|
||||
cb = &nlk->cb;
|
||||
memset(cb, 0, sizeof(*cb));
|
||||
cb->start = control->start;
|
||||
cb->dump = control->dump;
|
||||
cb->done = control->done;
|
||||
cb->nlh = nlh;
|
||||
|
@ -2196,6 +2197,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|||
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
|
||||
if (cb->start)
|
||||
cb->start(cb);
|
||||
|
||||
ret = netlink_dump(sk);
|
||||
sock_put(sk);
|
||||
|
||||
|
|
|
@ -513,6 +513,20 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
|
|||
}
|
||||
EXPORT_SYMBOL(genlmsg_put);
|
||||
|
||||
static int genl_lock_start(struct netlink_callback *cb)
|
||||
{
|
||||
/* our ops are always const - netlink API doesn't propagate that */
|
||||
const struct genl_ops *ops = cb->data;
|
||||
int rc = 0;
|
||||
|
||||
if (ops->start) {
|
||||
genl_lock();
|
||||
rc = ops->start(cb);
|
||||
genl_unlock();
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
/* our ops are always const - netlink API doesn't propagate that */
|
||||
|
@ -577,6 +591,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
|
|||
.module = family->module,
|
||||
/* we have const, but the netlink API doesn't */
|
||||
.data = (void *)ops,
|
||||
.start = genl_lock_start,
|
||||
.dump = genl_lock_dumpit,
|
||||
.done = genl_lock_done,
|
||||
};
|
||||
|
@ -588,6 +603,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
|
|||
} else {
|
||||
struct netlink_dump_control c = {
|
||||
.module = family->module,
|
||||
.start = ops->start,
|
||||
.dump = ops->dumpit,
|
||||
.done = ops->done,
|
||||
};
|
||||
|
|
|
@ -1307,7 +1307,7 @@ EXPORT_SYMBOL(xfrm_policy_delete);
|
|||
|
||||
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
|
||||
{
|
||||
struct net *net = xp_net(pol);
|
||||
struct net *net = sock_net(sk);
|
||||
struct xfrm_policy *old_pol;
|
||||
|
||||
#ifdef CONFIG_XFRM_SUB_POLICY
|
||||
|
|
|
@ -1845,6 +1845,13 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
|
|||
struct xfrm_mgr *km;
|
||||
struct xfrm_policy *pol = NULL;
|
||||
|
||||
if (!optval && !optlen) {
|
||||
xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
|
||||
xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
|
||||
__sk_dst_reset(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (optlen <= 0 || optlen > PAGE_SIZE)
|
||||
return -EMSGSIZE;
|
||||
|
||||
|
|
|
@ -1660,32 +1660,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
|
|||
|
||||
static int xfrm_dump_policy_done(struct netlink_callback *cb)
|
||||
{
|
||||
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
|
||||
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
|
||||
struct net *net = sock_net(cb->skb->sk);
|
||||
|
||||
xfrm_policy_walk_done(walk, net);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xfrm_dump_policy_start(struct netlink_callback *cb)
|
||||
{
|
||||
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
|
||||
|
||||
BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
|
||||
|
||||
xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
|
||||
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
|
||||
struct xfrm_dump_info info;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
|
||||
sizeof(cb->args) - sizeof(cb->args[0]));
|
||||
|
||||
info.in_skb = cb->skb;
|
||||
info.out_skb = skb;
|
||||
info.nlmsg_seq = cb->nlh->nlmsg_seq;
|
||||
info.nlmsg_flags = NLM_F_MULTI;
|
||||
|
||||
if (!cb->args[0]) {
|
||||
cb->args[0] = 1;
|
||||
xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
|
||||
}
|
||||
|
||||
(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
|
||||
|
||||
return skb->len;
|
||||
|
@ -2437,6 +2439,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
|
|||
|
||||
static const struct xfrm_link {
|
||||
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
|
||||
int (*start)(struct netlink_callback *);
|
||||
int (*dump)(struct sk_buff *, struct netlink_callback *);
|
||||
int (*done)(struct netlink_callback *);
|
||||
const struct nla_policy *nla_pol;
|
||||
|
@ -2450,6 +2453,7 @@ static const struct xfrm_link {
|
|||
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
|
||||
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
|
||||
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
|
||||
.start = xfrm_dump_policy_start,
|
||||
.dump = xfrm_dump_policy,
|
||||
.done = xfrm_dump_policy_done },
|
||||
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
|
||||
|
@ -2501,6 +2505,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
|
||||
{
|
||||
struct netlink_dump_control c = {
|
||||
.start = link->start,
|
||||
.dump = link->dump,
|
||||
.done = link->done,
|
||||
};
|
||||
|
|
Loading…
Add table
Reference in a new issue