Merge "soc: qcom: Add hypervisor abstraction driver" into dev/msm-4.4-8996au
This commit is contained in:
commit
2f5c62e75e
24 changed files with 3597 additions and 0 deletions
19
Documentation/devicetree/bindings/soc/qcom/guest_shm.txt
Normal file
19
Documentation/devicetree/bindings/soc/qcom/guest_shm.txt
Normal file
|
@ -0,0 +1,19 @@
|
|||
QVM Guest Shared Memory
|
||||
|
||||
guest_shm is a device that enables linux as a guest operating system
|
||||
to allocate shared memory between virtual machines and send notifications
|
||||
of updates to other virtual machines.
|
||||
|
||||
Required properties:
|
||||
- compatible: Must be "qvm,guest_shm".
|
||||
- interrupt-parent: Parent interrupt controller.
|
||||
- interrupts: Should contain QVM interrupt.
|
||||
- reg: Physical address of the guest factory and length.
|
||||
|
||||
Example:
|
||||
qvm,guest_shm {
|
||||
compatible = "qvm,guest_shm";
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <6 4>;
|
||||
reg = <0x1c050000 0x1000>;
|
||||
};
|
|
@ -186,6 +186,7 @@ qcom Qualcomm Technologies, Inc
|
|||
qemu QEMU, a generic and open source machine emulator and virtualizer
|
||||
qi Qi Hardware
|
||||
qnap QNAP Systems, Inc.
|
||||
qvm BlackBerry Ltd
|
||||
radxa Radxa
|
||||
raidsonic RaidSonic Technology GmbH
|
||||
ralink Mediatek/Ralink Technology Corp.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#
|
||||
# QCOM Soc drivers
|
||||
#
|
||||
source "drivers/soc/qcom/hab/Kconfig"
|
||||
|
||||
config MSM_INRUSH_CURRENT_MITIGATION
|
||||
bool "Inrush-current mitigation Driver"
|
||||
help
|
||||
|
|
|
@ -107,3 +107,4 @@ obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
|
|||
obj-$(CONFIG_QCOM_EARLY_RANDOM) += early_random.o
|
||||
obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o
|
||||
obj-$(CONFIG_MSM_CACHE_M4M_ERP64) += cache_m4m_erp64.o
|
||||
obj-$(CONFIG_MSM_HAB) += hab/
|
||||
|
|
7
drivers/soc/qcom/hab/Kconfig
Normal file
7
drivers/soc/qcom/hab/Kconfig
Normal file
|
@ -0,0 +1,7 @@
|
|||
config MSM_HAB
|
||||
bool "Enable Multimedia driver Hypervisor Abstraction Layer"
|
||||
help
|
||||
Multimedia driver hypervisor abstraction layer.
|
||||
Required for drivers to use the HAB API to communicate with the host
|
||||
OS.
|
||||
|
14
drivers/soc/qcom/hab/Makefile
Normal file
14
drivers/soc/qcom/hab/Makefile
Normal file
|
@ -0,0 +1,14 @@
|
|||
msm_hab-objs = \
|
||||
khab.o \
|
||||
hab.o \
|
||||
hab_msg.o \
|
||||
hab_vchan.o \
|
||||
hab_pchan.o \
|
||||
hab_open.o \
|
||||
hab_mimex.o \
|
||||
hab_mem_linux.o \
|
||||
hab_pipe.o \
|
||||
qvm_comm.o \
|
||||
hab_qvm.o
|
||||
|
||||
obj-$(CONFIG_MSM_HAB) += msm_hab.o
|
726
drivers/soc/qcom/hab/hab.c
Normal file
726
drivers/soc/qcom/hab/hab.c
Normal file
|
@ -0,0 +1,726 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
|
||||
#define HAB_DEVICE_CNSTR(__name__, __id__, __num__) { \
|
||||
.name = __name__,\
|
||||
.id = __id__,\
|
||||
.pchannels = LIST_HEAD_INIT(hab_devices[__num__].pchannels),\
|
||||
.pchan_lock = __MUTEX_INITIALIZER(hab_devices[__num__].pchan_lock),\
|
||||
.openq_list = LIST_HEAD_INIT(hab_devices[__num__].openq_list),\
|
||||
.openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
|
||||
}
|
||||
|
||||
/* the following has to match habmm definitions, order does not matter */
|
||||
static struct hab_device hab_devices[] = {
|
||||
HAB_DEVICE_CNSTR(DEVICE_AUD1_NAME, MM_AUD_1, 0),
|
||||
HAB_DEVICE_CNSTR(DEVICE_AUD2_NAME, MM_AUD_2, 1),
|
||||
HAB_DEVICE_CNSTR(DEVICE_AUD3_NAME, MM_AUD_3, 2),
|
||||
HAB_DEVICE_CNSTR(DEVICE_AUD4_NAME, MM_AUD_4, 3),
|
||||
HAB_DEVICE_CNSTR(DEVICE_CAM_NAME, MM_CAM, 4),
|
||||
HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 5),
|
||||
HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 6),
|
||||
HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 7),
|
||||
HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 8),
|
||||
HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 9),
|
||||
HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 10),
|
||||
HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 11),
|
||||
HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 12),
|
||||
HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 13),
|
||||
HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 14),
|
||||
HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 15),
|
||||
HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 16)
|
||||
};
|
||||
|
||||
struct hab_driver hab_driver = {
|
||||
.ndevices = ARRAY_SIZE(hab_devices),
|
||||
.devp = hab_devices,
|
||||
};
|
||||
|
||||
struct uhab_context *hab_ctx_alloc(int kernel)
|
||||
{
|
||||
struct uhab_context *ctx;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
|
||||
ctx->closing = 0;
|
||||
INIT_LIST_HEAD(&ctx->vchannels);
|
||||
INIT_LIST_HEAD(&ctx->exp_whse);
|
||||
INIT_LIST_HEAD(&ctx->imp_whse);
|
||||
|
||||
INIT_LIST_HEAD(&ctx->exp_rxq);
|
||||
init_waitqueue_head(&ctx->exp_wq);
|
||||
spin_lock_init(&ctx->expq_lock);
|
||||
|
||||
spin_lock_init(&ctx->imp_lock);
|
||||
rwlock_init(&ctx->exp_lock);
|
||||
rwlock_init(&ctx->ctx_lock);
|
||||
|
||||
kref_init(&ctx->refcount);
|
||||
ctx->import_ctx = habmem_imp_hyp_open();
|
||||
if (!ctx->import_ctx) {
|
||||
kfree(ctx);
|
||||
return NULL;
|
||||
}
|
||||
ctx->kernel = kernel;
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
void hab_ctx_free(struct kref *ref)
|
||||
{
|
||||
struct uhab_context *ctx =
|
||||
container_of(ref, struct uhab_context, refcount);
|
||||
struct hab_export_ack_recvd *ack_recvd, *tmp;
|
||||
|
||||
habmem_imp_hyp_close(ctx->import_ctx, ctx->kernel);
|
||||
|
||||
list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
|
||||
list_del(&ack_recvd->node);
|
||||
kfree(ack_recvd);
|
||||
}
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
|
||||
struct uhab_context *ctx)
|
||||
{
|
||||
struct virtual_channel *vchan;
|
||||
|
||||
read_lock(&ctx->ctx_lock);
|
||||
list_for_each_entry(vchan, &ctx->vchannels, node) {
|
||||
if (vcid == vchan->id) {
|
||||
kref_get(&vchan->refcount);
|
||||
read_unlock(&ctx->ctx_lock);
|
||||
return vchan;
|
||||
}
|
||||
}
|
||||
read_unlock(&ctx->ctx_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct hab_device *find_hab_device(unsigned int mm_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hab_driver.ndevices; i++) {
|
||||
if (hab_driver.devp[i].id == HAB_MMID_GET_MAJOR(mm_id))
|
||||
return &hab_driver.devp[i];
|
||||
}
|
||||
|
||||
pr_err("find_hab_device failed: id=%d\n", mm_id);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* open handshake in FE and BE
|
||||
|
||||
* frontend backend
|
||||
* send(INIT) wait(INIT)
|
||||
* wait(INIT_ACK) send(INIT_ACK)
|
||||
* send(ACK) wait(ACK)
|
||||
|
||||
*/
|
||||
struct virtual_channel *frontend_open(struct uhab_context *ctx,
|
||||
unsigned int mm_id,
|
||||
int dom_id)
|
||||
{
|
||||
int ret, open_id = 0;
|
||||
struct physical_channel *pchan = NULL;
|
||||
struct hab_device *dev;
|
||||
struct virtual_channel *vchan = NULL;
|
||||
static atomic_t open_id_counter = ATOMIC_INIT(0);
|
||||
struct hab_open_request request;
|
||||
struct hab_open_request *recv_request;
|
||||
int sub_id = HAB_MMID_GET_MINOR(mm_id);
|
||||
|
||||
dev = find_hab_device(mm_id);
|
||||
if (dev == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
pchan = hab_pchan_find_domid(dev, dom_id);
|
||||
if (!pchan) {
|
||||
pr_err("hab_pchan_find_domid failed: dom_id=%d\n", dom_id);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
vchan = hab_vchan_alloc(ctx, pchan);
|
||||
if (!vchan) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Send Init sequence */
|
||||
open_id = atomic_inc_return(&open_id_counter);
|
||||
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT, pchan,
|
||||
vchan->id, sub_id, open_id);
|
||||
ret = hab_open_request_send(&request);
|
||||
if (ret) {
|
||||
pr_err("hab_open_request_send failed: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Wait for Init-Ack sequence */
|
||||
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK, pchan,
|
||||
0, sub_id, open_id);
|
||||
ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
|
||||
if (ret || !recv_request) {
|
||||
pr_err("hab_open_listen failed: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
vchan->otherend_id = recv_request->vchan_id;
|
||||
hab_open_request_free(recv_request);
|
||||
|
||||
/* Send Ack sequence */
|
||||
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan,
|
||||
0, sub_id, open_id);
|
||||
ret = hab_open_request_send(&request);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
hab_pchan_put(pchan);
|
||||
|
||||
return vchan;
|
||||
err:
|
||||
if (vchan)
|
||||
hab_vchan_put(vchan);
|
||||
if (pchan)
|
||||
hab_pchan_put(pchan);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct virtual_channel *backend_listen(struct uhab_context *ctx,
|
||||
unsigned int mm_id)
|
||||
{
|
||||
int ret;
|
||||
int open_id;
|
||||
int sub_id = HAB_MMID_GET_MINOR(mm_id);
|
||||
struct physical_channel *pchan = NULL;
|
||||
struct hab_device *dev;
|
||||
struct virtual_channel *vchan = NULL;
|
||||
struct hab_open_request request;
|
||||
struct hab_open_request *recv_request;
|
||||
uint32_t otherend_vchan_id;
|
||||
|
||||
dev = find_hab_device(mm_id);
|
||||
if (dev == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
/* Wait for Init sequence */
|
||||
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT,
|
||||
NULL, 0, sub_id, 0);
|
||||
ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
|
||||
if (ret || !recv_request) {
|
||||
pr_err("hab_open_listen failed: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
otherend_vchan_id = recv_request->vchan_id;
|
||||
open_id = recv_request->open_id;
|
||||
pchan = recv_request->pchan;
|
||||
hab_pchan_get(pchan);
|
||||
hab_open_request_free(recv_request);
|
||||
|
||||
vchan = hab_vchan_alloc(ctx, pchan);
|
||||
if (!vchan) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
vchan->otherend_id = otherend_vchan_id;
|
||||
|
||||
/* Send Init-Ack sequence */
|
||||
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK,
|
||||
pchan, vchan->id, sub_id, open_id);
|
||||
ret = hab_open_request_send(&request);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* Wait for Ack sequence */
|
||||
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK,
|
||||
pchan, 0, sub_id, open_id);
|
||||
ret = hab_open_listen(ctx, dev, &request, &recv_request, HZ);
|
||||
|
||||
if (ret != -EAGAIN)
|
||||
break;
|
||||
|
||||
hab_vchan_put(vchan);
|
||||
vchan = NULL;
|
||||
hab_pchan_put(pchan);
|
||||
pchan = NULL;
|
||||
}
|
||||
|
||||
if (ret || !recv_request) {
|
||||
pr_err("backend_listen failed: %d\n", ret);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
hab_open_request_free(recv_request);
|
||||
hab_pchan_put(pchan);
|
||||
return vchan;
|
||||
err:
|
||||
if (vchan)
|
||||
hab_vchan_put(vchan);
|
||||
if (pchan)
|
||||
hab_pchan_put(pchan);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
long hab_vchan_send(struct uhab_context *ctx,
|
||||
int vcid,
|
||||
size_t sizebytes,
|
||||
void *data,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct virtual_channel *vchan;
|
||||
int ret;
|
||||
struct hab_header header = HAB_HEADER_INITIALIZER;
|
||||
int nonblocking_flag = flags & HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING;
|
||||
|
||||
if (sizebytes > HAB_MAX_MSG_SIZEBYTES) {
|
||||
pr_err("Message too large, %lu bytes\n", sizebytes);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vchan = hab_get_vchan_fromvcid(vcid, ctx);
|
||||
if (!vchan || vchan->otherend_closed)
|
||||
return -ENODEV;
|
||||
|
||||
HAB_HEADER_SET_SIZE(header, sizebytes);
|
||||
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
|
||||
HAB_HEADER_SET_ID(header, vchan->otherend_id);
|
||||
|
||||
while (1) {
|
||||
ret = physical_channel_send(vchan->pchan, &header, data);
|
||||
|
||||
if (vchan->otherend_closed || nonblocking_flag ||
|
||||
ret != -EAGAIN)
|
||||
break;
|
||||
|
||||
schedule();
|
||||
}
|
||||
|
||||
hab_vchan_put(vchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
|
||||
int vcid,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct virtual_channel *vchan;
|
||||
struct hab_message *message;
|
||||
int ret = 0;
|
||||
int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING;
|
||||
|
||||
vchan = hab_get_vchan_fromvcid(vcid, ctx);
|
||||
if (!vchan || vchan->otherend_closed)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (nonblocking_flag) {
|
||||
/*
|
||||
* Try to pull data from the ring in this context instead of
|
||||
* IRQ handler. Any available messages will be copied and queued
|
||||
* internally, then fetched by hab_msg_dequeue()
|
||||
*/
|
||||
physical_channel_rx_dispatch((unsigned long) vchan->pchan);
|
||||
}
|
||||
|
||||
message = hab_msg_dequeue(vchan, !nonblocking_flag);
|
||||
if (!message) {
|
||||
if (nonblocking_flag)
|
||||
ret = -EAGAIN;
|
||||
else
|
||||
ret = -EPIPE;
|
||||
}
|
||||
|
||||
hab_vchan_put(vchan);
|
||||
return ret ? ERR_PTR(ret) : message;
|
||||
}
|
||||
|
||||
bool hab_is_loopback(void)
|
||||
{
|
||||
return hab_driver.b_loopback;
|
||||
}
|
||||
|
||||
int hab_vchan_open(struct uhab_context *ctx,
|
||||
unsigned int mmid,
|
||||
int32_t *vcid,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct virtual_channel *vchan;
|
||||
|
||||
if (!vcid)
|
||||
return -EINVAL;
|
||||
|
||||
if (hab_is_loopback()) {
|
||||
if (!hab_driver.loopback_num) {
|
||||
hab_driver.loopback_num = 1;
|
||||
vchan = backend_listen(ctx, mmid);
|
||||
} else {
|
||||
hab_driver.loopback_num = 0;
|
||||
vchan = frontend_open(ctx, mmid, LOOPBACK_DOM);
|
||||
}
|
||||
} else {
|
||||
if (hab_driver.b_server_dom)
|
||||
vchan = backend_listen(ctx, mmid);
|
||||
else
|
||||
vchan = frontend_open(ctx, mmid, 0);
|
||||
}
|
||||
|
||||
if (IS_ERR(vchan))
|
||||
return PTR_ERR(vchan);
|
||||
|
||||
write_lock(&ctx->ctx_lock);
|
||||
list_add_tail(&vchan->node, &ctx->vchannels);
|
||||
write_unlock(&ctx->ctx_lock);
|
||||
|
||||
*vcid = vchan->id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hab_send_close_msg(struct virtual_channel *vchan)
|
||||
{
|
||||
struct hab_header header;
|
||||
|
||||
if (vchan && !vchan->otherend_closed) {
|
||||
HAB_HEADER_SET_SIZE(header, 0);
|
||||
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_CLOSE);
|
||||
HAB_HEADER_SET_ID(header, vchan->otherend_id);
|
||||
physical_channel_send(vchan->pchan, &header, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void hab_vchan_close_impl(struct kref *ref)
|
||||
{
|
||||
struct virtual_channel *vchan =
|
||||
container_of(ref, struct virtual_channel, usagecnt);
|
||||
|
||||
list_del(&vchan->node);
|
||||
hab_vchan_stop_notify(vchan);
|
||||
hab_vchan_put(vchan);
|
||||
}
|
||||
|
||||
|
||||
void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
|
||||
{
|
||||
struct virtual_channel *vchan, *tmp;
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
write_lock(&ctx->ctx_lock);
|
||||
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
|
||||
if (vchan->id == vcid) {
|
||||
kref_put(&vchan->usagecnt, hab_vchan_close_impl);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
write_unlock(&ctx->ctx_lock);
|
||||
}
|
||||
|
||||
static int hab_open(struct inode *inodep, struct file *filep)
|
||||
{
|
||||
int result = 0;
|
||||
struct uhab_context *ctx;
|
||||
|
||||
ctx = hab_ctx_alloc(0);
|
||||
|
||||
if (!ctx) {
|
||||
pr_err("hab_ctx_alloc failed\n");
|
||||
filep->private_data = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
filep->private_data = ctx;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int hab_release(struct inode *inodep, struct file *filep)
|
||||
{
|
||||
struct uhab_context *ctx = filep->private_data;
|
||||
struct virtual_channel *vchan, *tmp;
|
||||
|
||||
if (!ctx)
|
||||
return 0;
|
||||
|
||||
write_lock(&ctx->ctx_lock);
|
||||
|
||||
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
|
||||
list_del(&vchan->node);
|
||||
hab_vchan_stop_notify(vchan);
|
||||
hab_vchan_put(vchan);
|
||||
}
|
||||
|
||||
write_unlock(&ctx->ctx_lock);
|
||||
|
||||
hab_ctx_put(ctx);
|
||||
filep->private_data = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct uhab_context *ctx = (struct uhab_context *)filep->private_data;
|
||||
struct hab_open *open_param;
|
||||
struct hab_close *close_param;
|
||||
struct hab_recv *recv_param;
|
||||
struct hab_send *send_param;
|
||||
struct hab_message *msg;
|
||||
void *send_data;
|
||||
unsigned char data[256] = { 0 };
|
||||
long ret = 0;
|
||||
|
||||
if (_IOC_SIZE(cmd) && (cmd & IOC_IN)) {
|
||||
if (_IOC_SIZE(cmd) > sizeof(data))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(data, (void __user *)arg, _IOC_SIZE(cmd))) {
|
||||
pr_err("copy_from_user failed cmd=%x size=%d\n",
|
||||
cmd, _IOC_SIZE(cmd));
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case IOCTL_HAB_VC_OPEN:
|
||||
open_param = (struct hab_open *)data;
|
||||
ret = hab_vchan_open(ctx, open_param->mmid,
|
||||
&open_param->vcid, open_param->flags);
|
||||
break;
|
||||
case IOCTL_HAB_VC_CLOSE:
|
||||
close_param = (struct hab_close *)data;
|
||||
hab_vchan_close(ctx, close_param->vcid);
|
||||
break;
|
||||
case IOCTL_HAB_SEND:
|
||||
send_param = (struct hab_send *)data;
|
||||
if (send_param->sizebytes > HAB_MAX_MSG_SIZEBYTES) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
send_data = kzalloc(send_param->sizebytes, GFP_TEMPORARY);
|
||||
if (!send_data) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
if (copy_from_user(send_data, (void __user *)send_param->data,
|
||||
send_param->sizebytes)) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
ret = hab_vchan_send(ctx, send_param->vcid,
|
||||
send_param->sizebytes,
|
||||
send_data,
|
||||
send_param->flags);
|
||||
}
|
||||
kfree(send_data);
|
||||
break;
|
||||
case IOCTL_HAB_RECV:
|
||||
recv_param = (struct hab_recv *)data;
|
||||
if (!recv_param->data) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
msg = hab_vchan_recv(ctx, recv_param->vcid, recv_param->flags);
|
||||
|
||||
if (IS_ERR(msg)) {
|
||||
recv_param->sizebytes = 0;
|
||||
ret = PTR_ERR(msg);
|
||||
break;
|
||||
}
|
||||
|
||||
if (recv_param->sizebytes < msg->sizebytes) {
|
||||
recv_param->sizebytes = 0;
|
||||
ret = -EINVAL;
|
||||
} else if (copy_to_user((void __user *)recv_param->data,
|
||||
msg->data,
|
||||
msg->sizebytes)) {
|
||||
pr_err("copy_to_user failed: vc=%x size=%d\n",
|
||||
recv_param->vcid, (int)msg->sizebytes);
|
||||
recv_param->sizebytes = 0;
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
recv_param->sizebytes = msg->sizebytes;
|
||||
}
|
||||
|
||||
hab_msg_free(msg);
|
||||
break;
|
||||
case IOCTL_HAB_VC_EXPORT:
|
||||
ret = hab_mem_export(ctx, (struct hab_export *)data, 0);
|
||||
break;
|
||||
case IOCTL_HAB_VC_IMPORT:
|
||||
ret = hab_mem_import(ctx, (struct hab_import *)data, 0);
|
||||
break;
|
||||
case IOCTL_HAB_VC_UNEXPORT:
|
||||
ret = hab_mem_unexport(ctx, (struct hab_unexport *)data, 0);
|
||||
break;
|
||||
case IOCTL_HAB_VC_UNIMPORT:
|
||||
ret = hab_mem_unimport(ctx, (struct hab_unimport *)data, 0);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
if (ret == 0 && _IOC_SIZE(cmd) && (cmd & IOC_OUT))
|
||||
if (copy_to_user((void __user *) arg, data, _IOC_SIZE(cmd))) {
|
||||
pr_err("copy_to_user failed: cmd=%x\n", cmd);
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations hab_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = hab_open,
|
||||
.release = hab_release,
|
||||
.mmap = habmem_imp_hyp_mmap,
|
||||
.unlocked_ioctl = hab_ioctl
|
||||
};
|
||||
|
||||
/*
|
||||
* These map sg functions are pass through because the memory backing the
|
||||
* sg list is already accessible to the kernel as they come from a the
|
||||
* dedicated shared vm pool
|
||||
*/
|
||||
|
||||
static int hab_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nelems, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
/* return nelems directly */
|
||||
return nelems;
|
||||
}
|
||||
|
||||
static void hab_unmap_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
/*Do nothing */
|
||||
}
|
||||
|
||||
static const struct dma_map_ops hab_dma_ops = {
|
||||
.map_sg = hab_map_sg,
|
||||
.unmap_sg = hab_unmap_sg,
|
||||
};
|
||||
|
||||
static int __init hab_init(void)
|
||||
{
|
||||
int result;
|
||||
int i;
|
||||
dev_t dev;
|
||||
struct hab_device *device;
|
||||
|
||||
result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab");
|
||||
|
||||
if (result < 0) {
|
||||
pr_err("alloc_chrdev_region failed: %d\n", result);
|
||||
return result;
|
||||
}
|
||||
|
||||
cdev_init(&hab_driver.cdev, &hab_fops);
|
||||
hab_driver.cdev.owner = THIS_MODULE;
|
||||
hab_driver.cdev.ops = &hab_fops;
|
||||
dev = MKDEV(MAJOR(hab_driver.major), 0);
|
||||
|
||||
result = cdev_add(&hab_driver.cdev, dev, 1);
|
||||
|
||||
if (result < 0) {
|
||||
unregister_chrdev_region(dev, 1);
|
||||
pr_err("cdev_add failed: %d\n", result);
|
||||
return result;
|
||||
}
|
||||
|
||||
hab_driver.class = class_create(THIS_MODULE, "hab");
|
||||
|
||||
if (IS_ERR(hab_driver.class)) {
|
||||
result = PTR_ERR(hab_driver.class);
|
||||
pr_err("class_create failed: %d\n", result);
|
||||
goto err;
|
||||
}
|
||||
|
||||
hab_driver.dev = device_create(hab_driver.class, NULL,
|
||||
dev, &hab_driver, "hab");
|
||||
|
||||
if (IS_ERR(hab_driver.dev)) {
|
||||
result = PTR_ERR(hab_driver.dev);
|
||||
pr_err("device_create failed: %d\n", result);
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < hab_driver.ndevices; i++) {
|
||||
device = &hab_driver.devp[i];
|
||||
init_waitqueue_head(&device->openq);
|
||||
}
|
||||
|
||||
hab_hypervisor_register();
|
||||
|
||||
hab_driver.kctx = hab_ctx_alloc(1);
|
||||
if (!hab_driver.kctx) {
|
||||
pr_err("hab_ctx_alloc failed");
|
||||
result = -ENOMEM;
|
||||
hab_hypervisor_unregister();
|
||||
goto err;
|
||||
}
|
||||
|
||||
set_dma_ops(hab_driver.dev, &hab_dma_ops);
|
||||
|
||||
return result;
|
||||
|
||||
err:
|
||||
if (!IS_ERR_OR_NULL(hab_driver.dev))
|
||||
device_destroy(hab_driver.class, dev);
|
||||
if (!IS_ERR_OR_NULL(hab_driver.class))
|
||||
class_destroy(hab_driver.class);
|
||||
cdev_del(&hab_driver.cdev);
|
||||
unregister_chrdev_region(dev, 1);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void __exit hab_exit(void)
|
||||
{
|
||||
dev_t dev;
|
||||
|
||||
hab_hypervisor_unregister();
|
||||
hab_ctx_put(hab_driver.kctx);
|
||||
dev = MKDEV(MAJOR(hab_driver.major), 0);
|
||||
device_destroy(hab_driver.class, dev);
|
||||
class_destroy(hab_driver.class);
|
||||
cdev_del(&hab_driver.cdev);
|
||||
unregister_chrdev_region(dev, 1);
|
||||
}
|
||||
|
||||
subsys_initcall(hab_init);
|
||||
module_exit(hab_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Hypervisor abstraction layer");
|
||||
MODULE_LICENSE("GPL v2");
|
415
drivers/soc/qcom/hab/hab.h
Normal file
415
drivers/soc/qcom/hab/hab.h
Normal file
|
@ -0,0 +1,415 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef __HAB_H
|
||||
#define __HAB_H
|
||||
|
||||
#define pr_fmt(fmt) "hab: " fmt
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <linux/habmm.h>
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
enum hab_payload_type {
|
||||
HAB_PAYLOAD_TYPE_MSG = 0x0,
|
||||
HAB_PAYLOAD_TYPE_INIT,
|
||||
HAB_PAYLOAD_TYPE_INIT_ACK,
|
||||
HAB_PAYLOAD_TYPE_ACK,
|
||||
HAB_PAYLOAD_TYPE_EXPORT,
|
||||
HAB_PAYLOAD_TYPE_EXPORT_ACK,
|
||||
HAB_PAYLOAD_TYPE_PROFILE,
|
||||
HAB_PAYLOAD_TYPE_CLOSE,
|
||||
};
|
||||
#define LOOPBACK_DOM 0xFF
|
||||
|
||||
/*
|
||||
* Tuning required. If there are multiple clients, the aging of previous
|
||||
* "request" might be discarded
|
||||
*/
|
||||
#define Q_AGE_THRESHOLD 1000000
|
||||
|
||||
/* match the name to dtsi if for real HYP framework */
|
||||
#define DEVICE_AUD1_NAME "hab_aud1"
|
||||
#define DEVICE_AUD2_NAME "hab_aud2"
|
||||
#define DEVICE_AUD3_NAME "hab_aud3"
|
||||
#define DEVICE_AUD4_NAME "hab_aud4"
|
||||
#define DEVICE_CAM_NAME "hab_cam"
|
||||
#define DEVICE_DISP1_NAME "hab_disp1"
|
||||
#define DEVICE_DISP2_NAME "hab_disp2"
|
||||
#define DEVICE_DISP3_NAME "hab_disp3"
|
||||
#define DEVICE_DISP4_NAME "hab_disp4"
|
||||
#define DEVICE_DISP5_NAME "hab_disp5"
|
||||
#define DEVICE_GFX_NAME "hab_ogles"
|
||||
#define DEVICE_VID_NAME "hab_vid"
|
||||
#define DEVICE_MISC_NAME "hab_misc"
|
||||
#define DEVICE_QCPE1_NAME "hab_qcpe_vm1"
|
||||
#define DEVICE_QCPE2_NAME "hab_qcpe_vm2"
|
||||
#define DEVICE_QCPE3_NAME "hab_qcpe_vm3"
|
||||
#define DEVICE_QCPE4_NAME "hab_qcpe_vm4"
|
||||
|
||||
/* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */
|
||||
#define HAB_HEADER_SIZE_SHIFT 0
|
||||
#define HAB_HEADER_TYPE_SHIFT 16
|
||||
#define HAB_HEADER_ID_SHIFT 24
|
||||
#define HAB_HEADER_SIZE_MASK 0x0000FFFF
|
||||
#define HAB_HEADER_TYPE_MASK 0x00FF0000
|
||||
#define HAB_HEADER_ID_MASK 0xFF000000
|
||||
#define HAB_HEADER_INITIALIZER {0}
|
||||
|
||||
#define HAB_MMID_GET_MAJOR(mmid) (mmid & 0xFFFF)
|
||||
#define HAB_MMID_GET_MINOR(mmid) ((mmid>>16) & 0xFF)
|
||||
|
||||
#define HAB_VCID_ID_SHIFT 0
|
||||
#define HAB_VCID_DOMID_SHIFT 8
|
||||
#define HAB_VCID_MMID_SHIFT 16
|
||||
#define HAB_VCID_ID_MASK 0x000000FF
|
||||
#define HAB_VCID_DOMID_MASK 0x0000FF00
|
||||
#define HAB_VCID_MMID_MASK 0xFFFF0000
|
||||
#define HAB_VCID_GET_ID(vcid) \
|
||||
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
|
||||
|
||||
#define HAB_HEADER_SET_SIZE(header, size) \
|
||||
((header).info = (((header).info) & (~HAB_HEADER_SIZE_MASK)) | \
|
||||
(((size) << HAB_HEADER_SIZE_SHIFT) & HAB_HEADER_SIZE_MASK))
|
||||
|
||||
#define HAB_HEADER_SET_TYPE(header, type) \
|
||||
((header).info = (((header).info) & (~HAB_HEADER_TYPE_MASK)) | \
|
||||
(((type) << HAB_HEADER_TYPE_SHIFT) & HAB_HEADER_TYPE_MASK))
|
||||
|
||||
#define HAB_HEADER_SET_ID(header, id) \
|
||||
((header).info = (((header).info) & (~HAB_HEADER_ID_MASK)) | \
|
||||
((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) \
|
||||
& HAB_HEADER_ID_MASK))
|
||||
|
||||
#define HAB_HEADER_GET_SIZE(header) \
|
||||
((((header).info) & HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT)
|
||||
|
||||
#define HAB_HEADER_GET_TYPE(header) \
|
||||
((((header).info) & HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT)
|
||||
|
||||
#define HAB_HEADER_GET_ID(header) \
|
||||
(((((header).info) & HAB_HEADER_ID_MASK) >> \
|
||||
(HAB_HEADER_ID_SHIFT - HAB_VCID_ID_SHIFT)) & HAB_VCID_ID_MASK)
|
||||
|
||||
struct hab_header {
|
||||
uint32_t info;
|
||||
};
|
||||
|
||||
struct physical_channel {
|
||||
struct kref refcount;
|
||||
struct hab_device *habdev;
|
||||
struct list_head node;
|
||||
struct idr vchan_idr;
|
||||
spinlock_t vid_lock;
|
||||
|
||||
struct idr expid_idr;
|
||||
spinlock_t expid_lock;
|
||||
|
||||
void *hyp_data;
|
||||
int dom_id;
|
||||
int closed;
|
||||
|
||||
spinlock_t rxbuf_lock;
|
||||
};
|
||||
|
||||
struct hab_open_send_data {
|
||||
int vchan_id;
|
||||
int sub_id;
|
||||
int open_id;
|
||||
};
|
||||
|
||||
struct hab_open_request {
|
||||
int type;
|
||||
struct physical_channel *pchan;
|
||||
int vchan_id;
|
||||
int sub_id;
|
||||
int open_id;
|
||||
};
|
||||
|
||||
struct hab_open_node {
|
||||
struct hab_open_request request;
|
||||
struct list_head node;
|
||||
int age;
|
||||
};
|
||||
|
||||
struct hab_export_ack {
|
||||
uint32_t export_id;
|
||||
int32_t vcid_local;
|
||||
int32_t vcid_remote;
|
||||
};
|
||||
|
||||
struct hab_export_ack_recvd {
|
||||
struct hab_export_ack ack;
|
||||
struct list_head node;
|
||||
int age;
|
||||
};
|
||||
|
||||
struct hab_message {
|
||||
size_t sizebytes;
|
||||
struct list_head node;
|
||||
uint32_t data[];
|
||||
};
|
||||
|
||||
struct hab_device {
|
||||
const char *name;
|
||||
unsigned int id;
|
||||
struct list_head pchannels;
|
||||
struct mutex pchan_lock;
|
||||
struct list_head openq_list;
|
||||
spinlock_t openlock;
|
||||
wait_queue_head_t openq;
|
||||
};
|
||||
|
||||
struct uhab_context {
|
||||
struct kref refcount;
|
||||
struct list_head vchannels;
|
||||
|
||||
struct list_head exp_whse;
|
||||
uint32_t export_total;
|
||||
|
||||
wait_queue_head_t exp_wq;
|
||||
struct list_head exp_rxq;
|
||||
rwlock_t exp_lock;
|
||||
spinlock_t expq_lock;
|
||||
|
||||
struct list_head imp_whse;
|
||||
spinlock_t imp_lock;
|
||||
uint32_t import_total;
|
||||
|
||||
void *import_ctx;
|
||||
|
||||
rwlock_t ctx_lock;
|
||||
int closing;
|
||||
int kernel;
|
||||
};
|
||||
|
||||
struct hab_driver {
|
||||
struct device *dev;
|
||||
struct cdev cdev;
|
||||
dev_t major;
|
||||
struct class *class;
|
||||
int irq;
|
||||
|
||||
int ndevices;
|
||||
struct hab_device *devp;
|
||||
struct uhab_context *kctx;
|
||||
int b_server_dom;
|
||||
int loopback_num;
|
||||
int b_loopback;
|
||||
};
|
||||
|
||||
struct virtual_channel {
|
||||
struct work_struct work;
|
||||
/*
|
||||
* refcount is used to track the references from hab core to the virtual
|
||||
* channel such as references from physical channels,
|
||||
* i.e. references from the "other" side
|
||||
*/
|
||||
struct kref refcount;
|
||||
/*
|
||||
* usagecnt is used to track the clients who are using this virtual
|
||||
* channel such as local clients, client sowftware etc,
|
||||
* i.e. references from "this" side
|
||||
*/
|
||||
struct kref usagecnt;
|
||||
struct physical_channel *pchan;
|
||||
struct uhab_context *ctx;
|
||||
struct list_head node;
|
||||
struct list_head rx_list;
|
||||
wait_queue_head_t rx_queue;
|
||||
spinlock_t rx_lock;
|
||||
int id;
|
||||
int otherend_id;
|
||||
int otherend_closed;
|
||||
};
|
||||
|
||||
/*
|
||||
* Struct shared between local and remote, contents are composed by exporter,
|
||||
* the importer only writes to pdata and local (exporter) domID
|
||||
*/
|
||||
struct export_desc {
|
||||
uint32_t export_id;
|
||||
int readonly;
|
||||
uint64_t import_index;
|
||||
|
||||
struct virtual_channel *vchan;
|
||||
|
||||
int32_t vcid_local;
|
||||
int32_t vcid_remote;
|
||||
int domid_local;
|
||||
int domid_remote;
|
||||
|
||||
struct list_head node;
|
||||
void *kva;
|
||||
int payload_count;
|
||||
unsigned char payload[1];
|
||||
};
|
||||
|
||||
int hab_vchan_open(struct uhab_context *ctx,
|
||||
unsigned int mmid, int32_t *vcid, uint32_t flags);
|
||||
void hab_vchan_close(struct uhab_context *ctx,
|
||||
int32_t vcid);
|
||||
long hab_vchan_send(struct uhab_context *ctx,
|
||||
int vcid,
|
||||
size_t sizebytes,
|
||||
void *data,
|
||||
unsigned int flags);
|
||||
struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
|
||||
int vcid,
|
||||
unsigned int flags);
|
||||
void hab_vchan_stop(struct virtual_channel *vchan);
|
||||
void hab_vchan_stop_notify(struct virtual_channel *vchan);
|
||||
|
||||
int hab_mem_export(struct uhab_context *ctx,
|
||||
struct hab_export *param, int kernel);
|
||||
int hab_mem_import(struct uhab_context *ctx,
|
||||
struct hab_import *param, int kernel);
|
||||
int hab_mem_unexport(struct uhab_context *ctx,
|
||||
struct hab_unexport *param, int kernel);
|
||||
int hab_mem_unimport(struct uhab_context *ctx,
|
||||
struct hab_unimport *param, int kernel);
|
||||
|
||||
void habmem_remove_export(struct export_desc *exp);
|
||||
|
||||
/* memory hypervisor framework plugin I/F */
|
||||
void *habmm_hyp_allocate_grantable(int page_count,
|
||||
uint32_t *sizebytes);
|
||||
|
||||
int habmem_hyp_grant_user(unsigned long address,
|
||||
int page_count,
|
||||
int flags,
|
||||
int remotedom,
|
||||
void *ppdata);
|
||||
|
||||
int habmem_hyp_grant(unsigned long address,
|
||||
int page_count,
|
||||
int flags,
|
||||
int remotedom,
|
||||
void *ppdata);
|
||||
|
||||
int habmem_hyp_revoke(void *expdata, uint32_t count);
|
||||
|
||||
void *habmem_imp_hyp_open(void);
|
||||
void habmem_imp_hyp_close(void *priv, int kernel);
|
||||
|
||||
long habmem_imp_hyp_map(void *priv, void *impdata, uint32_t count,
|
||||
uint32_t remotedom,
|
||||
uint64_t *index,
|
||||
void **pkva,
|
||||
int kernel,
|
||||
uint32_t userflags);
|
||||
|
||||
long habmm_imp_hyp_unmap(void *priv, uint64_t index,
|
||||
uint32_t count,
|
||||
int kernel);
|
||||
|
||||
int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma);
|
||||
|
||||
|
||||
|
||||
void hab_msg_free(struct hab_message *message);
|
||||
struct hab_message *hab_msg_dequeue(struct virtual_channel *vchan,
|
||||
int wait_flag);
|
||||
|
||||
void hab_msg_recv(struct physical_channel *pchan,
|
||||
struct hab_header *header);
|
||||
|
||||
void hab_open_request_init(struct hab_open_request *request,
|
||||
int type,
|
||||
struct physical_channel *pchan,
|
||||
int vchan_id,
|
||||
int sub_id,
|
||||
int open_id);
|
||||
int hab_open_request_send(struct hab_open_request *request);
|
||||
int hab_open_request_add(struct physical_channel *pchan,
|
||||
struct hab_header *header);
|
||||
void hab_open_request_free(struct hab_open_request *request);
|
||||
int hab_open_listen(struct uhab_context *ctx,
|
||||
struct hab_device *dev,
|
||||
struct hab_open_request *listen,
|
||||
struct hab_open_request **recv_request,
|
||||
int ms_timeout);
|
||||
|
||||
struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx,
|
||||
struct physical_channel *pchan);
|
||||
struct virtual_channel *hab_vchan_get(struct physical_channel *pchan,
|
||||
uint32_t vchan_id);
|
||||
void hab_vchan_put(struct virtual_channel *vchan);
|
||||
|
||||
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
|
||||
struct uhab_context *ctx);
|
||||
struct physical_channel *hab_pchan_alloc(struct hab_device *habdev,
|
||||
int otherend_id);
|
||||
struct physical_channel *hab_pchan_find_domid(struct hab_device *dev,
|
||||
int dom_id);
|
||||
int hab_vchan_find_domid(struct virtual_channel *vchan);
|
||||
|
||||
void hab_pchan_get(struct physical_channel *pchan);
|
||||
void hab_pchan_put(struct physical_channel *pchan);
|
||||
|
||||
struct uhab_context *hab_ctx_alloc(int kernel);
|
||||
|
||||
void hab_ctx_free(struct kref *ref);
|
||||
|
||||
static inline void hab_ctx_get(struct uhab_context *ctx)
|
||||
{
|
||||
if (ctx)
|
||||
kref_get(&ctx->refcount);
|
||||
}
|
||||
|
||||
static inline void hab_ctx_put(struct uhab_context *ctx)
|
||||
{
|
||||
if (ctx)
|
||||
kref_put(&ctx->refcount, hab_ctx_free);
|
||||
}
|
||||
|
||||
void hab_send_close_msg(struct virtual_channel *vchan);
|
||||
int hab_hypervisor_register(void);
|
||||
void hab_hypervisor_unregister(void);
|
||||
|
||||
int physical_channel_read(struct physical_channel *pchan,
|
||||
void *payload,
|
||||
size_t read_size);
|
||||
|
||||
int physical_channel_send(struct physical_channel *pchan,
|
||||
struct hab_header *header,
|
||||
void *payload);
|
||||
|
||||
void physical_channel_rx_dispatch(unsigned long physical_channel);
|
||||
|
||||
int loopback_pchan_create(char *dev_name);
|
||||
|
||||
bool hab_is_loopback(void);
|
||||
|
||||
/* Global singleton HAB instance */
|
||||
extern struct hab_driver hab_driver;
|
||||
|
||||
#endif /* __HAB_H */
|
29
drivers/soc/qcom/hab/hab_grantable.h
Normal file
29
drivers/soc/qcom/hab/hab_grantable.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef __HAB_GRANTABLE_H
|
||||
#define __HAB_GRANTABLE_H
|
||||
|
||||
/* Grantable should be common between exporter and importer */
|
||||
struct grantable {
|
||||
unsigned long pfn;
|
||||
};
|
||||
|
||||
struct compressed_pfns {
|
||||
unsigned long first_pfn;
|
||||
int nregions;
|
||||
struct region {
|
||||
int size;
|
||||
int space;
|
||||
} region[];
|
||||
};
|
||||
#endif /* __HAB_GRANTABLE_H */
|
451
drivers/soc/qcom/hab/hab_mem_linux.c
Normal file
451
drivers/soc/qcom/hab/hab_mem_linux.c
Normal file
|
@ -0,0 +1,451 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include "hab_grantable.h"
|
||||
|
||||
|
||||
struct pages_list {
|
||||
struct list_head list;
|
||||
struct page **pages;
|
||||
long npages;
|
||||
uint64_t index; /* for mmap first call */
|
||||
int kernel;
|
||||
void *kva;
|
||||
void *uva;
|
||||
int refcntk;
|
||||
int refcntu;
|
||||
uint32_t userflags;
|
||||
struct file *filp_owner;
|
||||
struct file *filp_mapper;
|
||||
};
|
||||
|
||||
struct importer_context {
|
||||
int cnt; /* pages allocated for local file */
|
||||
struct list_head imp_list;
|
||||
struct file *filp;
|
||||
};
|
||||
|
||||
void *habmm_hyp_allocate_grantable(int page_count,
|
||||
uint32_t *sizebytes)
|
||||
{
|
||||
if (!sizebytes || !page_count)
|
||||
return NULL;
|
||||
|
||||
*sizebytes = page_count * sizeof(struct grantable);
|
||||
return vmalloc(*sizebytes);
|
||||
}
|
||||
|
||||
static int match_file(const void *p, struct file *file, unsigned int fd)
|
||||
{
|
||||
/*
|
||||
* We must return fd + 1 because iterate_fd stops searching on
|
||||
* non-zero return, but 0 is a valid fd.
|
||||
*/
|
||||
return (p == file) ? (fd + 1) : 0;
|
||||
}
|
||||
|
||||
|
||||
static int habmem_get_dma_pages(unsigned long address,
|
||||
int page_count,
|
||||
struct page **pages)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct dma_buf *dmabuf = NULL;
|
||||
unsigned long offset;
|
||||
unsigned long page_offset;
|
||||
struct scatterlist *s;
|
||||
struct sg_table *sg_table = NULL;
|
||||
struct dma_buf_attachment *attach = NULL;
|
||||
struct page *page;
|
||||
int i, j, rc = 0;
|
||||
int fd;
|
||||
|
||||
vma = find_vma(current->mm, address);
|
||||
if (!vma || !vma->vm_file)
|
||||
goto err;
|
||||
|
||||
/* Look for the fd that matches this the vma file */
|
||||
fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
|
||||
if (fd == 0) {
|
||||
pr_err("iterate_fd failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
offset = address - vma->vm_start;
|
||||
page_offset = offset/PAGE_SIZE;
|
||||
|
||||
dmabuf = dma_buf_get(fd - 1);
|
||||
|
||||
attach = dma_buf_attach(dmabuf, hab_driver.dev);
|
||||
if (IS_ERR_OR_NULL(attach)) {
|
||||
pr_err("dma_buf_attach failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
|
||||
|
||||
if (IS_ERR_OR_NULL(sg_table)) {
|
||||
pr_err("dma_buf_map_attachment failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
|
||||
page = sg_page(s);
|
||||
|
||||
for (j = page_offset; j < (s->length >> PAGE_SHIFT); j++) {
|
||||
pages[rc] = nth_page(page, j);
|
||||
rc++;
|
||||
if (rc >= page_count)
|
||||
break;
|
||||
}
|
||||
if (rc >= page_count)
|
||||
break;
|
||||
|
||||
if (page_offset > (s->length >> PAGE_SHIFT)) {
|
||||
/* carry-over the remaining offset to next s list */
|
||||
page_offset = page_offset-(s->length >> PAGE_SHIFT);
|
||||
} else {
|
||||
/*
|
||||
* the page_offset is within this s list
|
||||
* there is no more offset for the next s list
|
||||
*/
|
||||
page_offset = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
err:
|
||||
if (!IS_ERR_OR_NULL(sg_table))
|
||||
dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE);
|
||||
if (!IS_ERR_OR_NULL(attach))
|
||||
dma_buf_detach(dmabuf, attach);
|
||||
if (!IS_ERR_OR_NULL(dmabuf))
|
||||
dma_buf_put(dmabuf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int habmem_hyp_grant_user(unsigned long address,
|
||||
int page_count,
|
||||
int flags,
|
||||
int remotedom,
|
||||
void *ppdata)
|
||||
{
|
||||
int i, ret = 0;
|
||||
struct grantable *item = (struct grantable *)ppdata;
|
||||
struct page **pages;
|
||||
|
||||
pages = vmalloc(page_count * sizeof(struct page *));
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
||||
if (HABMM_EXP_MEM_TYPE_DMA & flags) {
|
||||
ret = habmem_get_dma_pages(address,
|
||||
page_count,
|
||||
pages);
|
||||
} else {
|
||||
ret = get_user_pages(current, current->mm,
|
||||
address,
|
||||
page_count,
|
||||
1,
|
||||
1,
|
||||
pages,
|
||||
NULL);
|
||||
}
|
||||
|
||||
if (ret > 0) {
|
||||
for (i = 0; i < page_count; i++)
|
||||
item[i].pfn = page_to_pfn(pages[i]);
|
||||
} else {
|
||||
pr_err("get %d user pages failed: %d\n", page_count, ret);
|
||||
}
|
||||
|
||||
vfree(pages);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* exporter - grant & revoke
|
||||
* generate shareable page list based on CPU friendly virtual "address".
|
||||
* The result as an array is stored in ppdata to return to caller
|
||||
* page size 4KB is assumed
|
||||
*/
|
||||
int habmem_hyp_grant(unsigned long address,
|
||||
int page_count,
|
||||
int flags,
|
||||
int remotedom,
|
||||
void *ppdata)
|
||||
{
|
||||
int i;
|
||||
struct grantable *item;
|
||||
void *kva = (void *)(uintptr_t)address;
|
||||
int is_vmalloc = is_vmalloc_addr(kva);
|
||||
|
||||
item = (struct grantable *)ppdata;
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
kva = (void *)(uintptr_t)(address + i*PAGE_SIZE);
|
||||
if (is_vmalloc)
|
||||
item[i].pfn = page_to_pfn(vmalloc_to_page(kva));
|
||||
else
|
||||
item[i].pfn = page_to_pfn(virt_to_page(kva));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int habmem_hyp_revoke(void *expdata, uint32_t count)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *habmem_imp_hyp_open(void)
|
||||
{
|
||||
struct importer_context *priv;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&priv->imp_list);
|
||||
|
||||
return priv;
|
||||
}
|
||||
|
||||
void habmem_imp_hyp_close(void *imp_ctx, int kernel)
|
||||
{
|
||||
struct importer_context *priv = imp_ctx;
|
||||
struct pages_list *pglist, *pglist_tmp;
|
||||
|
||||
if (!priv)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(pglist, pglist_tmp, &priv->imp_list, list) {
|
||||
if (kernel && pglist->kva)
|
||||
vunmap(pglist->kva);
|
||||
|
||||
list_del(&pglist->list);
|
||||
priv->cnt--;
|
||||
|
||||
vfree(pglist->pages);
|
||||
kfree(pglist);
|
||||
}
|
||||
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* setup pages, be ready for the following mmap call
|
||||
* index is output to refer to this imported buffer described by the import data
|
||||
*/
|
||||
long habmem_imp_hyp_map(void *imp_ctx,
|
||||
void *impdata,
|
||||
uint32_t count,
|
||||
uint32_t remotedom,
|
||||
uint64_t *index,
|
||||
void **pkva,
|
||||
int kernel,
|
||||
uint32_t userflags)
|
||||
{
|
||||
struct page **pages;
|
||||
struct compressed_pfns *pfn_table = impdata;
|
||||
struct pages_list *pglist;
|
||||
struct importer_context *priv = imp_ctx;
|
||||
unsigned long pfn;
|
||||
int i, j, k = 0;
|
||||
|
||||
if (!pfn_table || !priv)
|
||||
return -EINVAL;
|
||||
|
||||
pages = vmalloc(count * sizeof(struct page *));
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
|
||||
if (!pglist) {
|
||||
vfree(pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pfn = pfn_table->first_pfn;
|
||||
for (i = 0; i < pfn_table->nregions; i++) {
|
||||
for (j = 0; j < pfn_table->region[i].size; j++) {
|
||||
pages[k] = pfn_to_page(pfn+j);
|
||||
k++;
|
||||
}
|
||||
pfn += pfn_table->region[i].size + pfn_table->region[i].space;
|
||||
}
|
||||
|
||||
pglist->pages = pages;
|
||||
pglist->npages = count;
|
||||
pglist->kernel = kernel;
|
||||
pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
|
||||
pglist->refcntk = pglist->refcntu = 0;
|
||||
pglist->userflags = userflags;
|
||||
|
||||
*index = pglist->index << PAGE_SHIFT;
|
||||
|
||||
if (kernel) {
|
||||
pgprot_t prot = PAGE_KERNEL;
|
||||
|
||||
if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
|
||||
prot = pgprot_writecombine(prot);
|
||||
|
||||
pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
|
||||
if (pglist->kva == NULL) {
|
||||
vfree(pages);
|
||||
kfree(pglist);
|
||||
pr_err("%ld pages vmap failed\n", pglist->npages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pglist->uva = NULL;
|
||||
pglist->refcntk++;
|
||||
*pkva = pglist->kva;
|
||||
*index = (uint64_t)((uintptr_t)pglist->kva);
|
||||
} else {
|
||||
pglist->kva = NULL;
|
||||
}
|
||||
|
||||
list_add_tail(&pglist->list, &priv->imp_list);
|
||||
priv->cnt++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* the input index is PHY address shifted for uhab, and kva for khab */
|
||||
long habmm_imp_hyp_unmap(void *imp_ctx,
|
||||
uint64_t index,
|
||||
uint32_t count,
|
||||
int kernel)
|
||||
{
|
||||
struct importer_context *priv = imp_ctx;
|
||||
struct pages_list *pglist;
|
||||
int found = 0;
|
||||
uint64_t pg_index = index >> PAGE_SHIFT;
|
||||
|
||||
list_for_each_entry(pglist, &priv->imp_list, list) {
|
||||
if (kernel) {
|
||||
if (pglist->kva == (void *)((uintptr_t)index))
|
||||
found = 1;
|
||||
} else {
|
||||
if (pglist->index == pg_index)
|
||||
found = 1;
|
||||
}
|
||||
|
||||
if (found) {
|
||||
list_del(&pglist->list);
|
||||
priv->cnt--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
pr_err("failed to find export id on index %llx\n", index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (kernel)
|
||||
if (pglist->kva)
|
||||
vunmap(pglist->kva);
|
||||
|
||||
vfree(pglist->pages);
|
||||
kfree(pglist);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct page *page;
|
||||
struct pages_list *pglist;
|
||||
|
||||
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
||||
|
||||
/* PHY address */
|
||||
unsigned long fault_offset =
|
||||
(unsigned long)vmf->virtual_address - vma->vm_start + offset;
|
||||
unsigned long fault_index = fault_offset>>PAGE_SHIFT;
|
||||
int page_idx;
|
||||
|
||||
if (vma == NULL)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pglist = vma->vm_private_data;
|
||||
|
||||
page_idx = fault_index - pglist->index;
|
||||
if (page_idx < 0 || page_idx >= pglist->npages) {
|
||||
pr_err("Out of page array. page_idx %d, pg cnt %ld",
|
||||
page_idx, pglist->npages);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
page = pglist->pages[page_idx];
|
||||
get_page(page);
|
||||
vmf->page = page;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hab_map_open(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static void hab_map_close(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct habmem_vm_ops = {
|
||||
|
||||
.fault = hab_map_fault,
|
||||
.open = hab_map_open,
|
||||
.close = hab_map_close,
|
||||
};
|
||||
|
||||
int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct uhab_context *ctx = (struct uhab_context *) filp->private_data;
|
||||
struct importer_context *imp_ctx = ctx->import_ctx;
|
||||
long length = vma->vm_end - vma->vm_start;
|
||||
struct pages_list *pglist;
|
||||
int bfound = 0;
|
||||
|
||||
list_for_each_entry(pglist, &imp_ctx->imp_list, list) {
|
||||
if (pglist->index == vma->vm_pgoff) {
|
||||
bfound = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!bfound) {
|
||||
pr_err("Failed to find pglist vm_pgoff: %d\n", vma->vm_pgoff);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (length > pglist->npages * PAGE_SIZE) {
|
||||
pr_err("Error vma length %ld not matching page list %ld\n",
|
||||
length, pglist->npages * PAGE_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_ops = &habmem_vm_ops;
|
||||
|
||||
vma->vm_private_data = pglist;
|
||||
|
||||
if (!(pglist->userflags & HABMM_IMPORT_FLAGS_CACHED))
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
||||
return 0;
|
||||
}
|
394
drivers/soc/qcom/hab/hab_mimex.c
Normal file
394
drivers/soc/qcom/hab/hab_mimex.c
Normal file
|
@ -0,0 +1,394 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
#include "hab_grantable.h"
|
||||
|
||||
/*
|
||||
* use physical channel to send export parcel
|
||||
|
||||
* local remote
|
||||
* send(export) --> IRQ store to export warehouse
|
||||
* wait(export ack) <-- send(export ack)
|
||||
|
||||
* the actual data consists the following 3 parts listed in order
|
||||
* 1. header (uint32_t) vcid|type|size
|
||||
* 2. export parcel (full struct)
|
||||
* 3. full contents in export->pdata
|
||||
*/
|
||||
|
||||
|
||||
static int hab_export_ack_find(struct uhab_context *ctx,
|
||||
struct hab_export_ack *expect_ack)
|
||||
{
|
||||
int ret = 0;
|
||||
struct hab_export_ack_recvd *ack_recvd;
|
||||
|
||||
spin_lock_bh(&ctx->expq_lock);
|
||||
|
||||
list_for_each_entry(ack_recvd, &ctx->exp_rxq, node) {
|
||||
if (ack_recvd->ack.export_id == expect_ack->export_id &&
|
||||
ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
|
||||
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) {
|
||||
list_del(&ack_recvd->node);
|
||||
kfree(ack_recvd);
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
ack_recvd->age++;
|
||||
if (ack_recvd->age > Q_AGE_THRESHOLD) {
|
||||
list_del(&ack_recvd->node);
|
||||
kfree(ack_recvd);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&ctx->expq_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hab_export_ack_wait(struct uhab_context *ctx,
|
||||
struct hab_export_ack *expect_ack)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = wait_event_interruptible_timeout(ctx->exp_wq,
|
||||
hab_export_ack_find(ctx, expect_ack),
|
||||
HZ);
|
||||
if (!ret || (ret == -ERESTARTSYS))
|
||||
ret = -EAGAIN;
|
||||
else if (ret > 0)
|
||||
ret = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get id from free list first. if not available, new id is generated.
|
||||
* Once generated it will not be erased
|
||||
* assumptions: no handshake or memory map/unmap in this helper function
|
||||
*/
|
||||
static struct export_desc *habmem_add_export(struct virtual_channel *vchan,
|
||||
int sizebytes,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct uhab_context *ctx;
|
||||
struct export_desc *exp;
|
||||
|
||||
if (!vchan || !sizebytes)
|
||||
return NULL;
|
||||
|
||||
exp = vmalloc(sizebytes);
|
||||
if (!exp)
|
||||
return NULL;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&vchan->pchan->expid_lock);
|
||||
exp->export_id =
|
||||
idr_alloc(&vchan->pchan->expid_idr, exp, 1, 0, GFP_NOWAIT);
|
||||
spin_unlock(&vchan->pchan->expid_lock);
|
||||
idr_preload_end();
|
||||
|
||||
exp->readonly = flags;
|
||||
exp->vchan = vchan;
|
||||
exp->vcid_local = vchan->id;
|
||||
exp->vcid_remote = vchan->otherend_id;
|
||||
exp->domid_local = -1; /* dom id, provided on the importer */
|
||||
exp->domid_remote = vchan->pchan->dom_id;
|
||||
|
||||
ctx = vchan->ctx;
|
||||
write_lock(&ctx->exp_lock);
|
||||
ctx->export_total++;
|
||||
list_add_tail(&exp->node, &ctx->exp_whse);
|
||||
write_unlock(&ctx->exp_lock);
|
||||
|
||||
return exp;
|
||||
}
|
||||
|
||||
void habmem_remove_export(struct export_desc *exp)
|
||||
{
|
||||
struct physical_channel *pchan;
|
||||
struct uhab_context *ctx;
|
||||
|
||||
if (!exp || !exp->vchan || !exp->vchan->ctx || !exp->vchan->pchan)
|
||||
return;
|
||||
|
||||
ctx = exp->vchan->ctx;
|
||||
ctx->export_total--;
|
||||
|
||||
pchan = exp->vchan->pchan;
|
||||
|
||||
spin_lock(&pchan->expid_lock);
|
||||
idr_remove(&pchan->expid_idr, exp->export_id);
|
||||
spin_unlock(&pchan->expid_lock);
|
||||
|
||||
vfree(exp);
|
||||
}
|
||||
|
||||
static int compress_pfns(void **pfns, int npages, unsigned int *data_size)
|
||||
{
|
||||
int i, j = 0;
|
||||
struct grantable *item = (struct grantable *)*pfns;
|
||||
int region_size = 1;
|
||||
struct compressed_pfns *new_table =
|
||||
vmalloc(sizeof(struct compressed_pfns) +
|
||||
npages * sizeof(struct region));
|
||||
|
||||
if (!new_table)
|
||||
return -ENOMEM;
|
||||
|
||||
new_table->first_pfn = item[0].pfn;
|
||||
for (i = 1; i < npages; i++) {
|
||||
if (item[i].pfn-1 == item[i-1].pfn) {
|
||||
region_size++;
|
||||
} else {
|
||||
new_table->region[j].size = region_size;
|
||||
new_table->region[j].space = item[i].pfn -
|
||||
item[i-1].pfn - 1;
|
||||
j++;
|
||||
region_size = 1;
|
||||
}
|
||||
}
|
||||
new_table->region[j].size = region_size;
|
||||
new_table->region[j].space = 0;
|
||||
new_table->nregions = j+1;
|
||||
vfree(*pfns);
|
||||
|
||||
*data_size = sizeof(struct compressed_pfns) +
|
||||
sizeof(struct region)*new_table->nregions;
|
||||
*pfns = new_table;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* store the parcel to the warehouse, then send the parcel to remote side
|
||||
* both exporter composed export descriptor and the grantrefids are sent
|
||||
* as one msg to the importer side
|
||||
*/
|
||||
static int habmem_export_vchan(struct uhab_context *ctx,
|
||||
struct virtual_channel *vchan,
|
||||
void *pdata,
|
||||
int payload_size,
|
||||
int nunits,
|
||||
uint32_t flags,
|
||||
uint32_t *export_id) {
|
||||
int ret;
|
||||
struct export_desc *exp;
|
||||
uint32_t sizebytes = sizeof(*exp) + payload_size;
|
||||
struct hab_export_ack expected_ack = {0};
|
||||
struct hab_header header = HAB_HEADER_INITIALIZER;
|
||||
|
||||
exp = habmem_add_export(vchan, sizebytes, flags);
|
||||
if (!exp)
|
||||
return -ENOMEM;
|
||||
|
||||
/* append the pdata to the export descriptor */
|
||||
exp->payload_count = nunits;
|
||||
memcpy(exp->payload, pdata, payload_size);
|
||||
|
||||
HAB_HEADER_SET_SIZE(header, sizebytes);
|
||||
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT);
|
||||
HAB_HEADER_SET_ID(header, vchan->otherend_id);
|
||||
ret = physical_channel_send(vchan->pchan, &header, exp);
|
||||
|
||||
if (ret != 0) {
|
||||
pr_err("failed to export payload to the remote %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
expected_ack.export_id = exp->export_id;
|
||||
expected_ack.vcid_local = exp->vcid_local;
|
||||
expected_ack.vcid_remote = exp->vcid_remote;
|
||||
ret = hab_export_ack_wait(ctx, &expected_ack);
|
||||
|
||||
*export_id = exp->export_id;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hab_mem_export(struct uhab_context *ctx,
|
||||
struct hab_export *param,
|
||||
int kernel)
|
||||
{
|
||||
int ret = 0;
|
||||
void *pdata_exp = NULL;
|
||||
unsigned int pdata_size = 0;
|
||||
uint32_t export_id = 0;
|
||||
struct virtual_channel *vchan;
|
||||
int page_count;
|
||||
|
||||
if (!ctx || !param || param->sizebytes > HAB_MAX_EXPORT_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
|
||||
if (!vchan || !vchan->pchan) {
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
||||
page_count = param->sizebytes/PAGE_SIZE;
|
||||
pdata_exp = habmm_hyp_allocate_grantable(page_count, &pdata_size);
|
||||
if (!pdata_exp) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (kernel) {
|
||||
ret = habmem_hyp_grant((unsigned long)param->buffer,
|
||||
page_count,
|
||||
param->flags,
|
||||
vchan->pchan->dom_id,
|
||||
pdata_exp);
|
||||
} else {
|
||||
ret = habmem_hyp_grant_user((unsigned long)param->buffer,
|
||||
page_count,
|
||||
param->flags,
|
||||
vchan->pchan->dom_id,
|
||||
pdata_exp);
|
||||
}
|
||||
if (ret < 0) {
|
||||
pr_err("habmem_hyp_grant failed size=%d ret=%d\n",
|
||||
pdata_size, ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
compress_pfns(&pdata_exp, page_count, &pdata_size);
|
||||
|
||||
ret = habmem_export_vchan(ctx,
|
||||
vchan,
|
||||
pdata_exp,
|
||||
pdata_size,
|
||||
page_count,
|
||||
param->flags,
|
||||
&export_id);
|
||||
|
||||
param->exportid = export_id;
|
||||
err:
|
||||
vfree(pdata_exp);
|
||||
if (vchan)
|
||||
hab_vchan_put(vchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hab_mem_unexport(struct uhab_context *ctx,
|
||||
struct hab_unexport *param,
|
||||
int kernel)
|
||||
{
|
||||
int ret = 0, found = 0;
|
||||
struct export_desc *exp, *tmp;
|
||||
|
||||
if (!ctx || !param)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock(&ctx->exp_lock);
|
||||
list_for_each_entry_safe(exp, tmp, &ctx->exp_whse, node) {
|
||||
if ((param->exportid == exp->export_id) &&
|
||||
(param->vcid == exp->vcid_local)) {
|
||||
list_del(&exp->node);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
write_unlock(&ctx->exp_lock);
|
||||
|
||||
if (!found)
|
||||
return -EINVAL;
|
||||
|
||||
ret = habmem_hyp_revoke(exp->payload, exp->payload_count);
|
||||
|
||||
habmem_remove_export(exp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hab_mem_import(struct uhab_context *ctx,
|
||||
struct hab_import *param,
|
||||
int kernel)
|
||||
{
|
||||
int ret = 0, found = 0;
|
||||
struct export_desc *exp = NULL;
|
||||
|
||||
if (!ctx || !param)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&ctx->imp_lock);
|
||||
list_for_each_entry(exp, &ctx->imp_whse, node) {
|
||||
if ((exp->export_id == param->exportid) &&
|
||||
(param->vcid == exp->vcid_remote)) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&ctx->imp_lock);
|
||||
|
||||
if (!found) {
|
||||
pr_err("Fail to get export descriptor from export id %d\n",
|
||||
param->exportid);
|
||||
ret = -ENODEV;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = habmem_imp_hyp_map(ctx->import_ctx,
|
||||
exp->payload,
|
||||
exp->payload_count,
|
||||
exp->domid_local,
|
||||
&exp->import_index,
|
||||
&exp->kva,
|
||||
kernel,
|
||||
param->flags);
|
||||
if (ret) {
|
||||
pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
|
||||
ret, exp->payload_count,
|
||||
exp->domid_local, *((uint32_t *)exp->payload));
|
||||
return ret;
|
||||
}
|
||||
|
||||
param->index = exp->import_index;
|
||||
param->kva = (uint64_t)exp->kva;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hab_mem_unimport(struct uhab_context *ctx,
|
||||
struct hab_unimport *param,
|
||||
int kernel)
|
||||
{
|
||||
int ret = 0, found = 0;
|
||||
struct export_desc *exp = NULL, *exp_tmp;
|
||||
|
||||
if (!ctx || !param)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&ctx->imp_lock);
|
||||
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
|
||||
if ((exp->export_id == param->exportid) &&
|
||||
(param->vcid == exp->vcid_remote)) {
|
||||
list_del(&exp->node);
|
||||
ctx->import_total--;
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&ctx->imp_lock);
|
||||
|
||||
if (!found)
|
||||
ret = -EINVAL;
|
||||
else {
|
||||
ret = habmm_imp_hyp_unmap(ctx->import_ctx,
|
||||
exp->import_index,
|
||||
exp->payload_count,
|
||||
kernel);
|
||||
|
||||
param->kva = (uint64_t)exp->kva;
|
||||
kfree(exp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
208
drivers/soc/qcom/hab/hab_msg.c
Normal file
208
drivers/soc/qcom/hab/hab_msg.c
Normal file
|
@ -0,0 +1,208 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
|
||||
static int hab_rx_queue_empty(struct virtual_channel *vchan)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&vchan->rx_lock);
|
||||
ret = list_empty(&vchan->rx_list);
|
||||
spin_unlock_bh(&vchan->rx_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct hab_message*
|
||||
hab_msg_alloc(struct physical_channel *pchan, size_t sizebytes)
|
||||
{
|
||||
struct hab_message *message;
|
||||
|
||||
message = kzalloc(sizeof(*message) + sizebytes, GFP_ATOMIC);
|
||||
if (!message)
|
||||
return NULL;
|
||||
|
||||
message->sizebytes =
|
||||
physical_channel_read(pchan, message->data, sizebytes);
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
void hab_msg_free(struct hab_message *message)
|
||||
{
|
||||
kfree(message);
|
||||
}
|
||||
|
||||
struct hab_message *
|
||||
hab_msg_dequeue(struct virtual_channel *vchan, int wait_flag)
|
||||
{
|
||||
struct hab_message *message = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (wait_flag) {
|
||||
if (hab_rx_queue_empty(vchan))
|
||||
ret = wait_event_interruptible(vchan->rx_queue,
|
||||
!hab_rx_queue_empty(vchan) ||
|
||||
vchan->otherend_closed);
|
||||
}
|
||||
|
||||
if (!ret && !vchan->otherend_closed) {
|
||||
spin_lock_bh(&vchan->rx_lock);
|
||||
if (!list_empty(&vchan->rx_list)) {
|
||||
message = list_first_entry(&vchan->rx_list,
|
||||
struct hab_message, node);
|
||||
list_del(&message->node);
|
||||
}
|
||||
spin_unlock_bh(&vchan->rx_lock);
|
||||
}
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
static void hab_msg_queue(struct virtual_channel *vchan,
|
||||
struct hab_message *message)
|
||||
{
|
||||
spin_lock_bh(&vchan->rx_lock);
|
||||
list_add_tail(&message->node, &vchan->rx_list);
|
||||
spin_unlock_bh(&vchan->rx_lock);
|
||||
|
||||
wake_up_interruptible(&vchan->rx_queue);
|
||||
}
|
||||
|
||||
static int hab_export_enqueue(struct virtual_channel *vchan,
|
||||
struct export_desc *exp)
|
||||
{
|
||||
struct uhab_context *ctx = vchan->ctx;
|
||||
|
||||
spin_lock_bh(&ctx->imp_lock);
|
||||
list_add_tail(&exp->node, &ctx->imp_whse);
|
||||
ctx->import_total++;
|
||||
spin_unlock_bh(&ctx->imp_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hab_send_export_ack(struct physical_channel *pchan,
|
||||
struct export_desc *exp)
|
||||
{
|
||||
struct hab_export_ack exp_ack = {
|
||||
.export_id = exp->export_id,
|
||||
.vcid_local = exp->vcid_local,
|
||||
.vcid_remote = exp->vcid_remote
|
||||
};
|
||||
struct hab_header header = HAB_HEADER_INITIALIZER;
|
||||
|
||||
HAB_HEADER_SET_SIZE(header, sizeof(exp_ack));
|
||||
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK);
|
||||
HAB_HEADER_SET_ID(header, exp->vcid_local);
|
||||
return physical_channel_send(pchan, &header, &exp_ack);
|
||||
}
|
||||
|
||||
static int hab_receive_create_export_ack(struct physical_channel *pchan,
|
||||
struct uhab_context *ctx)
|
||||
{
|
||||
struct hab_export_ack_recvd *ack_recvd =
|
||||
kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
|
||||
|
||||
if (!ack_recvd)
|
||||
return -ENOMEM;
|
||||
|
||||
if (physical_channel_read(pchan,
|
||||
&ack_recvd->ack,
|
||||
sizeof(ack_recvd->ack)) != sizeof(ack_recvd->ack))
|
||||
return -EIO;
|
||||
|
||||
spin_lock_bh(&ctx->expq_lock);
|
||||
list_add_tail(&ack_recvd->node, &ctx->exp_rxq);
|
||||
spin_unlock_bh(&ctx->expq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hab_msg_recv(struct physical_channel *pchan,
|
||||
struct hab_header *header)
|
||||
{
|
||||
int ret;
|
||||
struct hab_message *message;
|
||||
struct hab_device *dev = pchan->habdev;
|
||||
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
|
||||
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
|
||||
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
|
||||
struct virtual_channel *vchan = NULL;
|
||||
struct export_desc *exp_desc;
|
||||
|
||||
/* get the local virtual channel if it isn't an open message */
|
||||
if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
|
||||
payload_type != HAB_PAYLOAD_TYPE_INIT_ACK &&
|
||||
payload_type != HAB_PAYLOAD_TYPE_ACK) {
|
||||
vchan = hab_vchan_get(pchan, vchan_id);
|
||||
if (!vchan) {
|
||||
return;
|
||||
} else if (vchan->otherend_closed) {
|
||||
hab_vchan_put(vchan);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
switch (payload_type) {
|
||||
case HAB_PAYLOAD_TYPE_MSG:
|
||||
message = hab_msg_alloc(pchan, sizebytes);
|
||||
if (!message)
|
||||
break;
|
||||
|
||||
hab_msg_queue(vchan, message);
|
||||
break;
|
||||
|
||||
case HAB_PAYLOAD_TYPE_INIT:
|
||||
case HAB_PAYLOAD_TYPE_INIT_ACK:
|
||||
case HAB_PAYLOAD_TYPE_ACK:
|
||||
ret = hab_open_request_add(pchan, header);
|
||||
if (ret)
|
||||
break;
|
||||
wake_up_interruptible(&dev->openq);
|
||||
break;
|
||||
|
||||
case HAB_PAYLOAD_TYPE_EXPORT:
|
||||
exp_desc = kzalloc(sizebytes, GFP_ATOMIC);
|
||||
if (!exp_desc)
|
||||
break;
|
||||
|
||||
if (physical_channel_read(pchan, exp_desc, sizebytes) !=
|
||||
sizebytes) {
|
||||
vfree(exp_desc);
|
||||
break;
|
||||
}
|
||||
|
||||
exp_desc->domid_local = pchan->dom_id;
|
||||
|
||||
hab_export_enqueue(vchan, exp_desc);
|
||||
hab_send_export_ack(pchan, exp_desc);
|
||||
break;
|
||||
|
||||
case HAB_PAYLOAD_TYPE_EXPORT_ACK:
|
||||
ret = hab_receive_create_export_ack(pchan, vchan->ctx);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
wake_up_interruptible(&vchan->ctx->exp_wq);
|
||||
break;
|
||||
|
||||
case HAB_PAYLOAD_TYPE_CLOSE:
|
||||
hab_vchan_stop(vchan);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (vchan)
|
||||
hab_vchan_put(vchan);
|
||||
}
|
154
drivers/soc/qcom/hab/hab_open.c
Normal file
154
drivers/soc/qcom/hab/hab_open.c
Normal file
|
@ -0,0 +1,154 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
|
||||
void hab_open_request_init(struct hab_open_request *request,
|
||||
int type,
|
||||
struct physical_channel *pchan,
|
||||
int vchan_id,
|
||||
int sub_id,
|
||||
int open_id)
|
||||
{
|
||||
request->type = type;
|
||||
request->pchan = pchan;
|
||||
request->vchan_id = vchan_id;
|
||||
request->sub_id = sub_id;
|
||||
request->open_id = open_id;
|
||||
}
|
||||
|
||||
int hab_open_request_send(struct hab_open_request *request)
|
||||
{
|
||||
struct hab_header header = HAB_HEADER_INITIALIZER;
|
||||
struct hab_open_send_data data;
|
||||
|
||||
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
|
||||
HAB_HEADER_SET_TYPE(header, request->type);
|
||||
|
||||
data.vchan_id = request->vchan_id;
|
||||
data.open_id = request->open_id;
|
||||
data.sub_id = request->sub_id;
|
||||
|
||||
return physical_channel_send(request->pchan, &header, &data);
|
||||
}
|
||||
|
||||
int hab_open_request_add(struct physical_channel *pchan,
|
||||
struct hab_header *header)
|
||||
{
|
||||
struct hab_open_node *node;
|
||||
struct hab_device *dev = pchan->habdev;
|
||||
struct hab_open_send_data data;
|
||||
struct hab_open_request *request;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_ATOMIC);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
if (physical_channel_read(pchan, &data, HAB_HEADER_GET_SIZE(*header)) !=
|
||||
HAB_HEADER_GET_SIZE(*header))
|
||||
return -EIO;
|
||||
|
||||
request = &node->request;
|
||||
request->type = HAB_HEADER_GET_TYPE(*header);
|
||||
request->pchan = pchan;
|
||||
request->vchan_id = data.vchan_id;
|
||||
request->sub_id = data.sub_id;
|
||||
request->open_id = data.open_id;
|
||||
node->age = 0;
|
||||
hab_pchan_get(pchan);
|
||||
|
||||
spin_lock_bh(&dev->openlock);
|
||||
list_add_tail(&node->node, &dev->openq_list);
|
||||
spin_unlock_bh(&dev->openlock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hab_open_request_find(struct uhab_context *ctx,
|
||||
struct hab_device *dev,
|
||||
struct hab_open_request *listen,
|
||||
struct hab_open_request **recv_request)
|
||||
{
|
||||
struct hab_open_node *node, *tmp;
|
||||
struct hab_open_request *request;
|
||||
int ret = 0;
|
||||
|
||||
if (ctx->closing ||
|
||||
(listen->pchan && listen->pchan->closed)) {
|
||||
*recv_request = NULL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
spin_lock_bh(&dev->openlock);
|
||||
if (list_empty(&dev->openq_list))
|
||||
goto done;
|
||||
|
||||
list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
|
||||
request = (struct hab_open_request *)node;
|
||||
if (request->type == listen->type &&
|
||||
(request->sub_id == listen->sub_id) &&
|
||||
(!listen->open_id ||
|
||||
request->open_id == listen->open_id) &&
|
||||
(!listen->pchan ||
|
||||
request->pchan == listen->pchan)) {
|
||||
list_del(&node->node);
|
||||
*recv_request = request;
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
node->age++;
|
||||
if (node->age > Q_AGE_THRESHOLD) {
|
||||
list_del(&node->node);
|
||||
hab_open_request_free(request);
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
spin_unlock_bh(&dev->openlock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hab_open_request_free(struct hab_open_request *request)
|
||||
{
|
||||
if (request) {
|
||||
hab_pchan_put(request->pchan);
|
||||
kfree(request);
|
||||
}
|
||||
}
|
||||
|
||||
int hab_open_listen(struct uhab_context *ctx,
|
||||
struct hab_device *dev,
|
||||
struct hab_open_request *listen,
|
||||
struct hab_open_request **recv_request,
|
||||
int ms_timeout)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!ctx || !listen || !recv_request)
|
||||
return -EINVAL;
|
||||
|
||||
*recv_request = NULL;
|
||||
if (ms_timeout > 0) {
|
||||
ret = wait_event_interruptible_timeout(dev->openq,
|
||||
hab_open_request_find(ctx, dev, listen, recv_request),
|
||||
ms_timeout);
|
||||
if (!ret || (-ERESTARTSYS == ret))
|
||||
ret = -EAGAIN;
|
||||
else if (ret > 0)
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = wait_event_interruptible(dev->openq,
|
||||
hab_open_request_find(ctx, dev, listen, recv_request));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
86
drivers/soc/qcom/hab/hab_pchan.c
Normal file
86
drivers/soc/qcom/hab/hab_pchan.c
Normal file
|
@ -0,0 +1,86 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
|
||||
struct physical_channel *
|
||||
hab_pchan_alloc(struct hab_device *habdev, int otherend_id)
|
||||
{
|
||||
struct physical_channel *pchan = kzalloc(sizeof(*pchan), GFP_KERNEL);
|
||||
|
||||
if (!pchan)
|
||||
return NULL;
|
||||
|
||||
idr_init(&pchan->vchan_idr);
|
||||
spin_lock_init(&pchan->vid_lock);
|
||||
idr_init(&pchan->expid_idr);
|
||||
spin_lock_init(&pchan->expid_lock);
|
||||
kref_init(&pchan->refcount);
|
||||
|
||||
pchan->habdev = habdev;
|
||||
pchan->dom_id = otherend_id;
|
||||
pchan->closed = 1;
|
||||
pchan->hyp_data = NULL;
|
||||
|
||||
spin_lock_init(&pchan->rxbuf_lock);
|
||||
|
||||
mutex_lock(&habdev->pchan_lock);
|
||||
list_add_tail(&pchan->node, &habdev->pchannels);
|
||||
mutex_unlock(&habdev->pchan_lock);
|
||||
|
||||
return pchan;
|
||||
}
|
||||
|
||||
static void hab_pchan_free(struct kref *ref)
|
||||
{
|
||||
struct physical_channel *pchan =
|
||||
container_of(ref, struct physical_channel, refcount);
|
||||
|
||||
mutex_lock(&pchan->habdev->pchan_lock);
|
||||
list_del(&pchan->node);
|
||||
mutex_unlock(&pchan->habdev->pchan_lock);
|
||||
kfree(pchan->hyp_data);
|
||||
kfree(pchan);
|
||||
}
|
||||
|
||||
struct physical_channel *
|
||||
hab_pchan_find_domid(struct hab_device *dev, int dom_id)
|
||||
{
|
||||
struct physical_channel *pchan;
|
||||
|
||||
mutex_lock(&dev->pchan_lock);
|
||||
list_for_each_entry(pchan, &dev->pchannels, node)
|
||||
if (pchan->dom_id == dom_id)
|
||||
break;
|
||||
|
||||
if (pchan->dom_id != dom_id)
|
||||
pchan = NULL;
|
||||
|
||||
if (pchan && !kref_get_unless_zero(&pchan->refcount))
|
||||
pchan = NULL;
|
||||
|
||||
mutex_unlock(&dev->pchan_lock);
|
||||
|
||||
return pchan;
|
||||
}
|
||||
|
||||
void hab_pchan_get(struct physical_channel *pchan)
|
||||
{
|
||||
if (pchan)
|
||||
kref_get(&pchan->refcount);
|
||||
}
|
||||
|
||||
void hab_pchan_put(struct physical_channel *pchan)
|
||||
{
|
||||
if (pchan)
|
||||
kref_put(&pchan->refcount, hab_pchan_free);
|
||||
}
|
131
drivers/soc/qcom/hab/hab_pipe.c
Normal file
131
drivers/soc/qcom/hab/hab_pipe.c
Normal file
|
@ -0,0 +1,131 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
#include "hab_pipe.h"
|
||||
|
||||
size_t hab_pipe_calc_required_bytes(uint32_t shared_buf_size)
|
||||
{
|
||||
return sizeof(struct hab_pipe)
|
||||
+ (2 * (sizeof(struct hab_shared_buf) + shared_buf_size));
|
||||
}
|
||||
|
||||
struct hab_pipe_endpoint *hab_pipe_init(struct hab_pipe *pipe,
|
||||
uint32_t shared_buf_size, int top)
|
||||
{
|
||||
struct hab_pipe_endpoint *ep = NULL;
|
||||
struct hab_shared_buf *buf_a;
|
||||
struct hab_shared_buf *buf_b;
|
||||
|
||||
if (!pipe)
|
||||
return NULL;
|
||||
|
||||
buf_a = (struct hab_shared_buf *) pipe->buf_base;
|
||||
buf_b = (struct hab_shared_buf *) (pipe->buf_base
|
||||
+ sizeof(struct hab_shared_buf) + shared_buf_size);
|
||||
|
||||
if (top) {
|
||||
ep = &pipe->top;
|
||||
memset(ep, 0, sizeof(*ep));
|
||||
ep->tx_info.sh_buf = buf_a;
|
||||
ep->rx_info.sh_buf = buf_b;
|
||||
} else {
|
||||
ep = &pipe->bottom;
|
||||
memset(ep, 0, sizeof(*ep));
|
||||
ep->tx_info.sh_buf = buf_b;
|
||||
ep->rx_info.sh_buf = buf_a;
|
||||
memset(ep->tx_info.sh_buf, 0, sizeof(struct hab_shared_buf));
|
||||
memset(ep->rx_info.sh_buf, 0, sizeof(struct hab_shared_buf));
|
||||
ep->tx_info.sh_buf->size = shared_buf_size;
|
||||
ep->rx_info.sh_buf->size = shared_buf_size;
|
||||
|
||||
pipe->buf_a = buf_a;
|
||||
pipe->buf_b = buf_b;
|
||||
pipe->total_size =
|
||||
hab_pipe_calc_required_bytes(shared_buf_size);
|
||||
}
|
||||
return ep;
|
||||
}
|
||||
|
||||
uint32_t hab_pipe_write(struct hab_pipe_endpoint *ep,
|
||||
unsigned char *p, uint32_t num_bytes)
|
||||
{
|
||||
struct hab_shared_buf *sh_buf = ep->tx_info.sh_buf;
|
||||
uint32_t space =
|
||||
(sh_buf->size - (ep->tx_info.wr_count - sh_buf->rd_count));
|
||||
uint32_t count1, count2;
|
||||
|
||||
if (!p || num_bytes > space || num_bytes == 0)
|
||||
return 0;
|
||||
|
||||
count1 = (num_bytes <= (sh_buf->size - ep->tx_info.index)) ? num_bytes :
|
||||
(sh_buf->size - ep->tx_info.index);
|
||||
count2 = num_bytes - count1;
|
||||
|
||||
if (count1 > 0) {
|
||||
memcpy(&sh_buf->data[ep->tx_info.index], p, count1);
|
||||
ep->tx_info.wr_count += count1;
|
||||
ep->tx_info.index += count1;
|
||||
if (ep->tx_info.index >= sh_buf->size)
|
||||
ep->tx_info.index = 0;
|
||||
}
|
||||
if (count2 > 0) {/* handle buffer wrapping */
|
||||
memcpy(&sh_buf->data[ep->tx_info.index], p + count1, count2);
|
||||
ep->tx_info.wr_count += count2;
|
||||
ep->tx_info.index += count2;
|
||||
if (ep->tx_info.index >= sh_buf->size)
|
||||
ep->tx_info.index = 0;
|
||||
}
|
||||
return num_bytes;
|
||||
}
|
||||
|
||||
/* Updates the write index which is shared with the other VM */
|
||||
void hab_pipe_write_commit(struct hab_pipe_endpoint *ep)
|
||||
{
|
||||
struct hab_shared_buf *sh_buf = ep->tx_info.sh_buf;
|
||||
|
||||
mb(); /* Must commit data before incrementing count */
|
||||
sh_buf->wr_count = ep->tx_info.wr_count;
|
||||
}
|
||||
|
||||
uint32_t hab_pipe_read(struct hab_pipe_endpoint *ep,
|
||||
unsigned char *p, uint32_t size)
|
||||
{
|
||||
struct hab_shared_buf *sh_buf = ep->rx_info.sh_buf;
|
||||
uint32_t avail = sh_buf->wr_count - sh_buf->rd_count;
|
||||
uint32_t count1, count2, to_read;
|
||||
|
||||
if (!p || avail == 0 || size == 0)
|
||||
return 0;
|
||||
|
||||
to_read = (avail < size) ? avail : size;
|
||||
count1 = (to_read <= (sh_buf->size - ep->rx_info.index)) ? to_read :
|
||||
(sh_buf->size - ep->rx_info.index);
|
||||
count2 = to_read - count1;
|
||||
|
||||
if (count1 > 0) {
|
||||
memcpy(p, &sh_buf->data[ep->rx_info.index], count1);
|
||||
ep->rx_info.index += count1;
|
||||
if (ep->rx_info.index >= sh_buf->size)
|
||||
ep->rx_info.index = 0;
|
||||
mb(); /*Must commit data before incremeting count*/
|
||||
sh_buf->rd_count += count1;
|
||||
}
|
||||
if (count2 > 0) { /* handle buffer wrapping */
|
||||
memcpy(p + count1, &sh_buf->data[ep->rx_info.index], count2);
|
||||
ep->rx_info.index += count2;
|
||||
mb(); /*Must commit data before incremeting count*/
|
||||
sh_buf->rd_count += count2;
|
||||
}
|
||||
|
||||
return to_read;
|
||||
}
|
60
drivers/soc/qcom/hab/hab_pipe.h
Normal file
60
drivers/soc/qcom/hab/hab_pipe.h
Normal file
|
@ -0,0 +1,60 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef HAB_PIPE_H
|
||||
#define HAB_PIPE_H
|
||||
|
||||
struct hab_shared_buf {
|
||||
uint32_t rd_count;
|
||||
uint32_t wr_count;
|
||||
uint32_t size;
|
||||
unsigned char data[];
|
||||
};
|
||||
|
||||
struct hab_pipe_endpoint {
|
||||
struct {
|
||||
uint32_t wr_count;
|
||||
uint32_t index;
|
||||
struct hab_shared_buf *sh_buf;
|
||||
} tx_info;
|
||||
struct {
|
||||
uint32_t index;
|
||||
struct hab_shared_buf *sh_buf;
|
||||
} rx_info;
|
||||
};
|
||||
|
||||
struct hab_pipe {
|
||||
struct hab_pipe_endpoint top;
|
||||
struct hab_pipe_endpoint bottom;
|
||||
|
||||
/* For debugging only */
|
||||
struct hab_shared_buf *buf_a; /* top TX, bottom RX */
|
||||
struct hab_shared_buf *buf_b; /* top RX, bottom TX */
|
||||
size_t total_size;
|
||||
|
||||
unsigned char buf_base[];
|
||||
};
|
||||
|
||||
size_t hab_pipe_calc_required_bytes(uint32_t shared_buf_size);
|
||||
|
||||
struct hab_pipe_endpoint *hab_pipe_init(struct hab_pipe *pipe,
|
||||
uint32_t shared_buf_size, int top);
|
||||
|
||||
uint32_t hab_pipe_write(struct hab_pipe_endpoint *ep,
|
||||
unsigned char *p, uint32_t num_bytes);
|
||||
|
||||
void hab_pipe_write_commit(struct hab_pipe_endpoint *ep);
|
||||
|
||||
uint32_t hab_pipe_read(struct hab_pipe_endpoint *ep,
|
||||
unsigned char *p, uint32_t size);
|
||||
|
||||
#endif /* HAB_PIPE_H */
|
251
drivers/soc/qcom/hab/hab_qvm.c
Normal file
251
drivers/soc/qcom/hab/hab_qvm.c
Normal file
|
@ -0,0 +1,251 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
#include "hab_qvm.h"
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#define DEFAULT_HAB_SHMEM_IRQ 7
|
||||
|
||||
#define SHMEM_PHYSICAL_ADDR 0x1c050000
|
||||
|
||||
static irqreturn_t shm_irq_handler(int irq, void *_pchan)
|
||||
{
|
||||
irqreturn_t rc = IRQ_NONE;
|
||||
struct physical_channel *pchan = _pchan;
|
||||
struct qvm_channel *dev =
|
||||
(struct qvm_channel *) (pchan ? pchan->hyp_data : NULL);
|
||||
|
||||
if (dev && dev->guest_ctrl) {
|
||||
int status = dev->guest_ctrl->status;
|
||||
|
||||
if (status & dev->idx) {
|
||||
rc = IRQ_HANDLED;
|
||||
tasklet_schedule(&dev->task);
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static uint64_t get_guest_factory_paddr(struct qvm_channel *dev,
|
||||
const char *name, uint32_t pages)
|
||||
{
|
||||
int i;
|
||||
|
||||
dev->guest_factory = ioremap(SHMEM_PHYSICAL_ADDR, PAGE_SIZE);
|
||||
|
||||
if (!dev->guest_factory) {
|
||||
pr_err("Couldn't map guest_factory\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (dev->guest_factory->signature != GUEST_SHM_SIGNATURE) {
|
||||
pr_err("shmem factory signature incorrect: %ld != %lu\n",
|
||||
GUEST_SHM_SIGNATURE, dev->guest_factory->signature);
|
||||
iounmap(dev->guest_factory);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev->guest_intr = dev->guest_factory->vector;
|
||||
|
||||
/*
|
||||
* Set the name field on the factory page to identify the shared memory
|
||||
* region
|
||||
*/
|
||||
for (i = 0; i < strlen(name) && i < GUEST_SHM_MAX_NAME - 1; i++)
|
||||
dev->guest_factory->name[i] = name[i];
|
||||
dev->guest_factory->name[i] = (char) 0;
|
||||
|
||||
guest_shm_create(dev->guest_factory, pages);
|
||||
|
||||
/* See if we successfully created/attached to the region. */
|
||||
if (dev->guest_factory->status != GSS_OK) {
|
||||
pr_err("create failed: %d\n", dev->guest_factory->status);
|
||||
iounmap(dev->guest_factory);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_debug("shm creation size %x\n", dev->guest_factory->size);
|
||||
|
||||
return dev->guest_factory->shmem;
|
||||
}
|
||||
|
||||
static int create_dispatcher(struct physical_channel *pchan, int id)
|
||||
{
|
||||
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
|
||||
int ret;
|
||||
|
||||
tasklet_init(&dev->task, physical_channel_rx_dispatch,
|
||||
(unsigned long) pchan);
|
||||
|
||||
ret = request_irq(hab_driver.irq, shm_irq_handler, IRQF_SHARED,
|
||||
hab_driver.devp[id].name, pchan);
|
||||
|
||||
if (ret)
|
||||
pr_err("request_irq for %s failed: %d\n",
|
||||
hab_driver.devp[id].name, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct physical_channel *habhyp_commdev_alloc(int id)
|
||||
{
|
||||
struct qvm_channel *dev;
|
||||
struct physical_channel *pchan = NULL;
|
||||
int ret = 0, channel = 0;
|
||||
char *shmdata;
|
||||
uint32_t pipe_alloc_size =
|
||||
hab_pipe_calc_required_bytes(PIPE_SHMEM_SIZE);
|
||||
uint32_t pipe_alloc_pages =
|
||||
(pipe_alloc_size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
uint64_t paddr;
|
||||
int temp;
|
||||
int total_pages;
|
||||
struct page **pages;
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spin_lock_init(&dev->io_lock);
|
||||
|
||||
paddr = get_guest_factory_paddr(dev,
|
||||
hab_driver.devp[id].name,
|
||||
pipe_alloc_pages);
|
||||
|
||||
total_pages = dev->guest_factory->size + 1;
|
||||
pages = kmalloc_array(total_pages, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (temp = 0; temp < total_pages; temp++)
|
||||
pages[temp] = pfn_to_page((paddr / PAGE_SIZE) + temp);
|
||||
|
||||
dev->guest_ctrl = vmap(pages, total_pages, VM_MAP, PAGE_KERNEL);
|
||||
if (!dev->guest_ctrl) {
|
||||
ret = -ENOMEM;
|
||||
kfree(pages);
|
||||
goto err;
|
||||
}
|
||||
|
||||
shmdata = (char *)dev->guest_ctrl + PAGE_SIZE;
|
||||
dev->idx = dev->guest_ctrl->idx;
|
||||
|
||||
kfree(pages);
|
||||
|
||||
dev->pipe = (struct hab_pipe *) shmdata;
|
||||
dev->pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE,
|
||||
dev->be ? 0 : 1);
|
||||
|
||||
pchan = hab_pchan_alloc(&hab_driver.devp[id], dev->be);
|
||||
if (!pchan) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
pchan->closed = 0;
|
||||
pchan->hyp_data = (void *)dev;
|
||||
|
||||
dev->channel = channel;
|
||||
|
||||
ret = create_dispatcher(pchan, id);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
return pchan;
|
||||
|
||||
err:
|
||||
kfree(dev);
|
||||
|
||||
if (pchan)
|
||||
hab_pchan_put(pchan);
|
||||
pr_err("habhyp_commdev_alloc failed: %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int hab_hypervisor_register(void)
|
||||
{
|
||||
int ret = 0, i;
|
||||
|
||||
hab_driver.b_server_dom = 0;
|
||||
|
||||
/*
|
||||
* Can still attempt to instantiate more channels if one fails.
|
||||
* Others can be retried later.
|
||||
*/
|
||||
for (i = 0; i < hab_driver.ndevices; i++) {
|
||||
if (IS_ERR(habhyp_commdev_alloc(i)))
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hab_hypervisor_unregister(void)
|
||||
{
|
||||
}
|
||||
|
||||
static int hab_shmem_probe(struct platform_device *pdev)
|
||||
{
|
||||
int irq = platform_get_irq(pdev, 0);
|
||||
|
||||
if (irq > 0)
|
||||
hab_driver.irq = irq;
|
||||
else
|
||||
hab_driver.irq = DEFAULT_HAB_SHMEM_IRQ;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hab_shmem_remove(struct platform_device *pdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id hab_shmem_match_table[] = {
|
||||
{.compatible = "qvm,guest_shm"},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver hab_shmem_driver = {
|
||||
.probe = hab_shmem_probe,
|
||||
.remove = hab_shmem_remove,
|
||||
.driver = {
|
||||
.name = "hab_shmem",
|
||||
.of_match_table = of_match_ptr(hab_shmem_match_table),
|
||||
},
|
||||
};
|
||||
|
||||
static int __init hab_shmem_init(void)
|
||||
{
|
||||
return platform_driver_register(&hab_shmem_driver);
|
||||
}
|
||||
|
||||
static void __exit hab_shmem_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&hab_shmem_driver);
|
||||
}
|
||||
|
||||
core_initcall(hab_shmem_init);
|
||||
module_exit(hab_shmem_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Hypervisor shared memory driver");
|
||||
MODULE_LICENSE("GPL v2");
|
47
drivers/soc/qcom/hab/hab_qvm.h
Normal file
47
drivers/soc/qcom/hab/hab_qvm.h
Normal file
|
@ -0,0 +1,47 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef __HAB_QNX_H
|
||||
#define __HAB_QNX_H
|
||||
#include "hab.h"
|
||||
#include "hab_pipe.h"
|
||||
|
||||
#include <guest_shm.h>
|
||||
#include <linux/stddef.h>
|
||||
|
||||
#define PULSE_CODE_NOTIFY 0
|
||||
#define PULSE_CODE_INPUT 1
|
||||
|
||||
struct qvm_channel {
|
||||
int be;
|
||||
|
||||
struct hab_pipe *pipe;
|
||||
struct hab_pipe_endpoint *pipe_ep;
|
||||
spinlock_t io_lock;
|
||||
struct tasklet_struct task;
|
||||
struct guest_shm_factory *guest_factory;
|
||||
struct guest_shm_control *guest_ctrl;
|
||||
uint32_t idx;
|
||||
|
||||
int channel;
|
||||
int coid;
|
||||
|
||||
unsigned int guest_intr;
|
||||
unsigned int guest_iid;
|
||||
};
|
||||
|
||||
/* Shared mem size in each direction for communication pipe */
|
||||
#define PIPE_SHMEM_SIZE (128 * 1024)
|
||||
|
||||
void *qnx_hyp_rx_dispatch(void *data);
|
||||
|
||||
#endif /* __HAB_QNX_H */
|
184
drivers/soc/qcom/hab/hab_vchan.c
Normal file
184
drivers/soc/qcom/hab/hab_vchan.c
Normal file
|
@ -0,0 +1,184 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
|
||||
struct virtual_channel *
|
||||
hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
|
||||
{
|
||||
int id;
|
||||
struct virtual_channel *vchan;
|
||||
|
||||
if (!pchan || !ctx)
|
||||
return NULL;
|
||||
|
||||
vchan = kzalloc(sizeof(*vchan), GFP_KERNEL);
|
||||
if (!vchan)
|
||||
return NULL;
|
||||
|
||||
/* This should be the first thing we do in this function */
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_bh(&pchan->vid_lock);
|
||||
id = idr_alloc(&pchan->vchan_idr, vchan, 1, 256, GFP_NOWAIT);
|
||||
spin_unlock_bh(&pchan->vid_lock);
|
||||
idr_preload_end();
|
||||
|
||||
if (id < 0) {
|
||||
kfree(vchan);
|
||||
return NULL;
|
||||
}
|
||||
mb(); /* id must be generated done before pchan_get */
|
||||
|
||||
hab_pchan_get(pchan);
|
||||
vchan->pchan = pchan;
|
||||
vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) |
|
||||
((pchan->habdev->id << HAB_VCID_MMID_SHIFT) &
|
||||
HAB_VCID_MMID_MASK) |
|
||||
((pchan->dom_id << HAB_VCID_DOMID_SHIFT) &
|
||||
HAB_VCID_DOMID_MASK);
|
||||
spin_lock_init(&vchan->rx_lock);
|
||||
INIT_LIST_HEAD(&vchan->rx_list);
|
||||
init_waitqueue_head(&vchan->rx_queue);
|
||||
|
||||
kref_init(&vchan->refcount);
|
||||
kref_init(&vchan->usagecnt);
|
||||
vchan->otherend_closed = pchan->closed;
|
||||
|
||||
hab_ctx_get(ctx);
|
||||
vchan->ctx = ctx;
|
||||
|
||||
return vchan;
|
||||
}
|
||||
|
||||
static void
|
||||
hab_vchan_free(struct kref *ref)
|
||||
{
|
||||
int found;
|
||||
struct virtual_channel *vchan =
|
||||
container_of(ref, struct virtual_channel, refcount);
|
||||
struct hab_message *message, *msg_tmp;
|
||||
struct export_desc *exp;
|
||||
struct physical_channel *pchan = vchan->pchan;
|
||||
struct uhab_context *ctx = vchan->ctx;
|
||||
|
||||
list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) {
|
||||
list_del(&message->node);
|
||||
hab_msg_free(message);
|
||||
}
|
||||
|
||||
do {
|
||||
found = 0;
|
||||
write_lock(&ctx->exp_lock);
|
||||
list_for_each_entry(exp, &ctx->exp_whse, node) {
|
||||
if (exp->vcid_local == vchan->id) {
|
||||
list_del(&exp->node);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
write_unlock(&ctx->exp_lock);
|
||||
if (found) {
|
||||
habmem_hyp_revoke(exp->payload, exp->payload_count);
|
||||
habmem_remove_export(exp);
|
||||
}
|
||||
} while (found);
|
||||
|
||||
do {
|
||||
found = 0;
|
||||
spin_lock_bh(&ctx->imp_lock);
|
||||
list_for_each_entry(exp, &ctx->imp_whse, node) {
|
||||
if (exp->vcid_remote == vchan->id) {
|
||||
list_del(&exp->node);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&ctx->imp_lock);
|
||||
if (found) {
|
||||
habmm_imp_hyp_unmap(ctx->import_ctx,
|
||||
exp->import_index,
|
||||
exp->payload_count,
|
||||
ctx->kernel);
|
||||
ctx->import_total--;
|
||||
kfree(exp);
|
||||
}
|
||||
} while (found);
|
||||
|
||||
spin_lock_bh(&pchan->vid_lock);
|
||||
idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
|
||||
spin_unlock_bh(&pchan->vid_lock);
|
||||
|
||||
hab_pchan_put(pchan);
|
||||
hab_ctx_put(ctx);
|
||||
|
||||
kfree(vchan);
|
||||
}
|
||||
|
||||
struct virtual_channel*
|
||||
hab_vchan_get(struct physical_channel *pchan, uint32_t vchan_id)
|
||||
{
|
||||
struct virtual_channel *vchan;
|
||||
|
||||
spin_lock_bh(&pchan->vid_lock);
|
||||
vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
|
||||
if (vchan)
|
||||
if (!kref_get_unless_zero(&vchan->refcount))
|
||||
vchan = NULL;
|
||||
spin_unlock_bh(&pchan->vid_lock);
|
||||
|
||||
return vchan;
|
||||
}
|
||||
|
||||
void hab_vchan_stop(struct virtual_channel *vchan)
|
||||
{
|
||||
if (vchan) {
|
||||
vchan->otherend_closed = 1;
|
||||
wake_up_interruptible(&vchan->rx_queue);
|
||||
}
|
||||
}
|
||||
|
||||
void hab_vchan_stop_notify(struct virtual_channel *vchan)
|
||||
{
|
||||
hab_send_close_msg(vchan);
|
||||
hab_vchan_stop(vchan);
|
||||
}
|
||||
|
||||
|
||||
int hab_vchan_find_domid(struct virtual_channel *vchan)
|
||||
{
|
||||
return vchan ? vchan->pchan->dom_id : -1;
|
||||
}
|
||||
|
||||
static void
|
||||
hab_vchan_free_deferred(struct work_struct *work)
|
||||
{
|
||||
struct virtual_channel *vchan =
|
||||
container_of(work, struct virtual_channel, work);
|
||||
|
||||
hab_vchan_free(&vchan->refcount);
|
||||
}
|
||||
|
||||
static void
|
||||
hab_vchan_schedule_free(struct kref *ref)
|
||||
{
|
||||
struct virtual_channel *vchan =
|
||||
container_of(ref, struct virtual_channel, refcount);
|
||||
|
||||
INIT_WORK(&vchan->work, hab_vchan_free_deferred);
|
||||
schedule_work(&vchan->work);
|
||||
}
|
||||
|
||||
void hab_vchan_put(struct virtual_channel *vchan)
|
||||
{
|
||||
if (vchan)
|
||||
kref_put(&vchan->refcount, hab_vchan_schedule_free);
|
||||
}
|
140
drivers/soc/qcom/hab/khab.c
Normal file
140
drivers/soc/qcom/hab/khab.c
Normal file
|
@ -0,0 +1,140 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include "hab.h"
|
||||
|
||||
int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id,
|
||||
uint32_t timeout, uint32_t flags)
|
||||
{
|
||||
return hab_vchan_open(hab_driver.kctx, mm_ip_id, handle, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(habmm_socket_open);
|
||||
|
||||
int32_t habmm_socket_close(int32_t handle)
|
||||
{
|
||||
hab_vchan_close(hab_driver.kctx, handle);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(habmm_socket_close);
|
||||
|
||||
int32_t habmm_socket_send(int32_t handle, void *src_buff,
|
||||
uint32_t size_bytes, uint32_t flags)
|
||||
{
|
||||
struct hab_send param = {0};
|
||||
|
||||
param.vcid = handle;
|
||||
param.data = (uint64_t)(uintptr_t)src_buff;
|
||||
param.sizebytes = size_bytes;
|
||||
param.flags = flags;
|
||||
|
||||
return hab_vchan_send(hab_driver.kctx, handle,
|
||||
size_bytes, src_buff, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(habmm_socket_send);
|
||||
|
||||
int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
|
||||
uint32_t timeout, uint32_t flags)
|
||||
{
|
||||
int ret = 0;
|
||||
struct hab_message *msg;
|
||||
|
||||
if (!size_bytes || !dst_buff)
|
||||
return -EINVAL;
|
||||
|
||||
msg = hab_vchan_recv(hab_driver.kctx, handle, flags);
|
||||
|
||||
if (IS_ERR(msg)) {
|
||||
*size_bytes = 0;
|
||||
return PTR_ERR(msg);
|
||||
}
|
||||
|
||||
if (*size_bytes < msg->sizebytes) {
|
||||
*size_bytes = 0;
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
memcpy(dst_buff, msg->data, msg->sizebytes);
|
||||
*size_bytes = msg->sizebytes;
|
||||
}
|
||||
|
||||
hab_msg_free(msg);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(habmm_socket_recv);
|
||||
|
||||
int32_t habmm_export(int32_t handle, void *buff_to_share, uint32_t size_bytes,
|
||||
uint32_t *export_id, uint32_t flags)
|
||||
{
|
||||
int ret;
|
||||
struct hab_export param = {0};
|
||||
|
||||
if (!export_id)
|
||||
return -EINVAL;
|
||||
|
||||
param.vcid = handle;
|
||||
param.buffer = (uint64_t)(uintptr_t)buff_to_share;
|
||||
param.sizebytes = size_bytes;
|
||||
|
||||
ret = hab_mem_export(hab_driver.kctx, ¶m, 1);
|
||||
|
||||
*export_id = param.exportid;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(habmm_export);
|
||||
|
||||
int32_t habmm_unexport(int32_t handle, uint32_t export_id, uint32_t flags)
|
||||
{
|
||||
struct hab_unexport param = {0};
|
||||
|
||||
param.vcid = handle;
|
||||
param.exportid = export_id;
|
||||
|
||||
return hab_mem_unexport(hab_driver.kctx, ¶m, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(habmm_unexport);
|
||||
|
||||
int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes,
|
||||
uint32_t export_id, uint32_t flags)
|
||||
{
|
||||
int ret;
|
||||
struct hab_import param = {0};
|
||||
|
||||
if (!buff_shared)
|
||||
return -EINVAL;
|
||||
|
||||
param.vcid = handle;
|
||||
param.sizebytes = size_bytes;
|
||||
param.exportid = export_id;
|
||||
param.flags = flags;
|
||||
|
||||
ret = hab_mem_import(hab_driver.kctx, ¶m, 1);
|
||||
if (!IS_ERR(ret))
|
||||
*buff_shared = (void *)(uintptr_t)param.kva;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(habmm_import);
|
||||
|
||||
int32_t habmm_unimport(int32_t handle,
|
||||
uint32_t export_id,
|
||||
void *buff_shared,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct hab_unimport param = {0};
|
||||
|
||||
param.vcid = handle;
|
||||
param.exportid = export_id;
|
||||
param.kva = (uint64_t)(uintptr_t)buff_shared;
|
||||
|
||||
return hab_mem_unimport(hab_driver.kctx, ¶m, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(habmm_unimport);
|
95
drivers/soc/qcom/hab/qvm_comm.c
Normal file
95
drivers/soc/qcom/hab/qvm_comm.c
Normal file
|
@ -0,0 +1,95 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include "hab.h"
|
||||
#include "hab_qvm.h"
|
||||
|
||||
static inline void habhyp_notify(void *commdev)
|
||||
{
|
||||
struct qvm_channel *dev = (struct qvm_channel *)commdev;
|
||||
|
||||
if (dev && dev->guest_ctrl)
|
||||
dev->guest_ctrl->notify = ~0;
|
||||
}
|
||||
|
||||
int physical_channel_read(struct physical_channel *pchan,
|
||||
void *payload,
|
||||
size_t read_size)
|
||||
{
|
||||
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
|
||||
|
||||
if (dev)
|
||||
return hab_pipe_read(dev->pipe_ep, payload, read_size);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
int physical_channel_send(struct physical_channel *pchan,
|
||||
struct hab_header *header,
|
||||
void *payload)
|
||||
{
|
||||
int sizebytes = HAB_HEADER_GET_SIZE(*header);
|
||||
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
|
||||
int total_size = sizeof(*header) + sizebytes;
|
||||
|
||||
if (total_size > dev->pipe_ep->tx_info.sh_buf->size)
|
||||
return -EINVAL; /* too much data for ring */
|
||||
|
||||
spin_lock_bh(&dev->io_lock);
|
||||
|
||||
if ((dev->pipe_ep->tx_info.sh_buf->size -
|
||||
(dev->pipe_ep->tx_info.wr_count -
|
||||
dev->pipe_ep->tx_info.sh_buf->rd_count)) < total_size) {
|
||||
spin_unlock_bh(&dev->io_lock);
|
||||
return -EAGAIN; /* not enough free space */
|
||||
}
|
||||
|
||||
if (hab_pipe_write(dev->pipe_ep,
|
||||
(unsigned char *)header,
|
||||
sizeof(*header)) != sizeof(*header)) {
|
||||
spin_unlock_bh(&dev->io_lock);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (sizebytes) {
|
||||
if (hab_pipe_write(dev->pipe_ep,
|
||||
(unsigned char *)payload,
|
||||
sizebytes) != sizebytes) {
|
||||
spin_unlock_bh(&dev->io_lock);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
hab_pipe_write_commit(dev->pipe_ep);
|
||||
spin_unlock_bh(&dev->io_lock);
|
||||
habhyp_notify(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void physical_channel_rx_dispatch(unsigned long data)
|
||||
{
|
||||
struct hab_header header;
|
||||
struct physical_channel *pchan = (struct physical_channel *)data;
|
||||
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
|
||||
|
||||
spin_lock_bh(&pchan->rxbuf_lock);
|
||||
while (1) {
|
||||
if (hab_pipe_read(dev->pipe_ep,
|
||||
(unsigned char *)&header,
|
||||
sizeof(header)) != sizeof(header))
|
||||
break; /* no data available */
|
||||
|
||||
hab_msg_recv(pchan, &header);
|
||||
}
|
||||
spin_unlock_bh(&pchan->rxbuf_lock);
|
||||
}
|
38
include/linux/habmm.h
Normal file
38
include/linux/habmm.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <uapi/linux/habmm.h>
|
||||
|
||||
#ifndef _HABMM_H
|
||||
#define _HABMM_H
|
||||
|
||||
int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id,
|
||||
uint32_t timeout, uint32_t flags);
|
||||
int32_t habmm_socket_close(int32_t handle);
|
||||
int32_t habmm_socket_send(int32_t handle, void *src_buff, uint32_t size_bytes,
|
||||
uint32_t flags);
|
||||
int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
|
||||
uint32_t timeout, uint32_t flags);
|
||||
int32_t habmm_socket_sendto(int32_t handle, void *src_buff, uint32_t size_bytes,
|
||||
int32_t remote_handle, uint32_t flags);
|
||||
int32_t habmm_socket_recvfrom(int32_t handle, void *dst_buff,
|
||||
uint32_t *size_bytes, uint32_t timeout,
|
||||
int32_t *remote_handle, uint32_t flags);
|
||||
int32_t habmm_export(int32_t handle, void *buff_to_share, uint32_t size_bytes,
|
||||
uint32_t *export_id, uint32_t flags);
|
||||
int32_t habmm_unexport(int32_t handle, uint32_t export_id, uint32_t flags);
|
||||
int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes,
|
||||
uint32_t export_id, uint32_t flags);
|
||||
int32_t habmm_unimport(int32_t handle, uint32_t export_id, void *buff_shared,
|
||||
uint32_t flags);
|
||||
|
||||
#endif
|
|
@ -528,3 +528,4 @@ header-y += ipa_qmi_service_v01.h
|
|||
header-y += rmnet_ipa_fd_ioctl.h
|
||||
header-y += msm_ipa.h
|
||||
header-y += smcinvoke.h
|
||||
header-y += habmm.h
|
||||
|
|
143
include/uapi/linux/habmm.h
Normal file
143
include/uapi/linux/habmm.h
Normal file
|
@ -0,0 +1,143 @@
|
|||
#ifndef HABMM_H
|
||||
#define HABMM_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct hab_send {
|
||||
__u64 data;
|
||||
__s32 vcid;
|
||||
__u32 sizebytes;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct hab_recv {
|
||||
__u64 data;
|
||||
__s32 vcid;
|
||||
__u32 sizebytes;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct hab_open {
|
||||
__s32 vcid;
|
||||
__u32 mmid;
|
||||
__u32 timeout;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct hab_close {
|
||||
__s32 vcid;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct hab_export {
|
||||
__u64 buffer;
|
||||
__s32 vcid;
|
||||
__u32 sizebytes;
|
||||
__u32 exportid;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct hab_import {
|
||||
__u64 index;
|
||||
__u64 kva;
|
||||
__s32 vcid;
|
||||
__u32 sizebytes;
|
||||
__u32 exportid;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct hab_unexport {
|
||||
__s32 vcid;
|
||||
__u32 exportid;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct hab_unimport {
|
||||
__s32 vcid;
|
||||
__u32 exportid;
|
||||
__u64 kva;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
#define HAB_IOC_TYPE 0x0A
|
||||
#define HAB_MAX_MSG_SIZEBYTES 0x1000
|
||||
#define HAB_MAX_EXPORT_SIZE 0x8000000
|
||||
|
||||
#define HAB_MMID_CREATE(major, minor) ((major&0xFFFF) | ((minor&0xFF)<<16))
|
||||
|
||||
#define MM_AUD_START 100
|
||||
#define MM_AUD_1 101
|
||||
#define MM_AUD_2 102
|
||||
#define MM_AUD_3 103
|
||||
#define MM_AUD_4 104
|
||||
#define MM_AUD_END 105
|
||||
|
||||
#define MM_CAM_START 200
|
||||
#define MM_CAM 201
|
||||
#define MM_CAM_END 202
|
||||
|
||||
#define MM_DISP_START 300
|
||||
#define MM_DISP_1 301
|
||||
#define MM_DISP_2 302
|
||||
#define MM_DISP_3 303
|
||||
#define MM_DISP_4 304
|
||||
#define MM_DISP_5 305
|
||||
#define MM_DISP_END 306
|
||||
|
||||
#define MM_GFX_START 400
|
||||
#define MM_GFX 401
|
||||
#define MM_GFX_END 402
|
||||
|
||||
#define MM_VID_START 500
|
||||
#define MM_VID 501
|
||||
#define MM_VID_END 502
|
||||
|
||||
#define MM_MISC_START 600
|
||||
#define MM_MISC 601
|
||||
#define MM_MISC_END 602
|
||||
|
||||
#define MM_QCPE_START 700
|
||||
#define MM_QCPE_VM1 701
|
||||
#define MM_QCPE_VM2 702
|
||||
#define MM_QCPE_VM3 703
|
||||
#define MM_QCPE_VM4 704
|
||||
#define MM_QCPE_END 705
|
||||
#define MM_ID_MAX 706
|
||||
|
||||
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE 0x00000000
|
||||
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU 0x00000001
|
||||
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_MULTI_DOMUS 0x00000002
|
||||
|
||||
#define HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING 0x00000001
|
||||
|
||||
#define HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING 0x00000001
|
||||
|
||||
#define HABMM_EXP_MEM_TYPE_DMA 0x00000001
|
||||
|
||||
#define HABMM_IMPORT_FLAGS_CACHED 0x00000001
|
||||
|
||||
#define IOCTL_HAB_SEND \
|
||||
_IOW(HAB_IOC_TYPE, 0x2, struct hab_send)
|
||||
|
||||
#define IOCTL_HAB_RECV \
|
||||
_IOWR(HAB_IOC_TYPE, 0x3, struct hab_recv)
|
||||
|
||||
#define IOCTL_HAB_VC_OPEN \
|
||||
_IOWR(HAB_IOC_TYPE, 0x4, struct hab_open)
|
||||
|
||||
#define IOCTL_HAB_VC_CLOSE \
|
||||
_IOW(HAB_IOC_TYPE, 0x5, struct hab_close)
|
||||
|
||||
#define IOCTL_HAB_VC_EXPORT \
|
||||
_IOWR(HAB_IOC_TYPE, 0x6, struct hab_export)
|
||||
|
||||
#define IOCTL_HAB_VC_IMPORT \
|
||||
_IOWR(HAB_IOC_TYPE, 0x7, struct hab_import)
|
||||
|
||||
#define IOCTL_HAB_VC_UNEXPORT \
|
||||
_IOW(HAB_IOC_TYPE, 0x8, struct hab_unexport)
|
||||
|
||||
#define IOCTL_HAB_VC_UNIMPORT \
|
||||
_IOW(HAB_IOC_TYPE, 0x9, struct hab_unimport)
|
||||
|
||||
#endif /* HABMM_H */
|
Loading…
Add table
Reference in a new issue