Merge "soc: qcom: hab: add some more logs"

This commit is contained in:
Linux Build Service Account 2018-01-09 15:40:00 -08:00 committed by Gerrit - the friendly Code Review server
commit fa18c55b4d
15 changed files with 944 additions and 159 deletions

View file

@ -9,6 +9,7 @@ msm_hab-objs = \
hab_mem_linux.o \
hab_pipe.o \
qvm_comm.o \
hab_qvm.o
hab_qvm.o \
hab_parser.o
obj-$(CONFIG_MSM_HAB) += msm_hab.o

View file

@ -21,25 +21,32 @@
.openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
}
/* the following has to match habmm definitions, order does not matter */
/*
* The following has to match habmm definitions, order does not matter if
* hab config does not care either. When hab config is not present, the default
* is as guest VM all pchans are pchan opener (FE)
*/
static struct hab_device hab_devices[] = {
HAB_DEVICE_CNSTR(DEVICE_AUD1_NAME, MM_AUD_1, 0),
HAB_DEVICE_CNSTR(DEVICE_AUD2_NAME, MM_AUD_2, 1),
HAB_DEVICE_CNSTR(DEVICE_AUD3_NAME, MM_AUD_3, 2),
HAB_DEVICE_CNSTR(DEVICE_AUD4_NAME, MM_AUD_4, 3),
HAB_DEVICE_CNSTR(DEVICE_CAM_NAME, MM_CAM, 4),
HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 5),
HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 6),
HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 7),
HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 8),
HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 9),
HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 10),
HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 11),
HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 12),
HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 13),
HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 14),
HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 15),
HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 16)
HAB_DEVICE_CNSTR(DEVICE_CAM1_NAME, MM_CAM_1, 4),
HAB_DEVICE_CNSTR(DEVICE_CAM2_NAME, MM_CAM_2, 5),
HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 6),
HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 7),
HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 8),
HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 9),
HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 10),
HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 11),
HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 12),
HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 13),
HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 14),
HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 15),
HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 16),
HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 17),
HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 18),
HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 19),
};
struct hab_driver hab_driver = {
@ -71,6 +78,7 @@ struct uhab_context *hab_ctx_alloc(int kernel)
kref_init(&ctx->refcount);
ctx->import_ctx = habmem_imp_hyp_open();
if (!ctx->import_ctx) {
pr_err("habmem_imp_hyp_open failed\n");
kfree(ctx);
return NULL;
}
@ -148,6 +156,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
dev = find_hab_device(mm_id);
if (dev == NULL) {
pr_err("HAB device %d is not initialized\n", mm_id);
ret = -EINVAL;
goto err;
}
@ -161,6 +170,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
vchan = hab_vchan_alloc(ctx, pchan);
if (!vchan) {
pr_err("vchan alloc failed\n");
ret = -ENOMEM;
goto err;
}
@ -187,6 +197,9 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
vchan->otherend_id = recv_request->vchan_id;
hab_open_request_free(recv_request);
vchan->session_id = open_id;
pr_debug("vchan->session_id:%d\n", vchan->session_id);
/* Send Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan,
0, sub_id, open_id);
@ -221,6 +234,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
dev = find_hab_device(mm_id);
if (dev == NULL) {
pr_err("failed to find dev based on id %d\n", mm_id);
ret = -EINVAL;
goto err;
}
@ -249,6 +263,9 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
vchan->otherend_id = otherend_vchan_id;
vchan->session_id = open_id;
pr_debug("vchan->session_id:%d\n", vchan->session_id);
/* Send Init-Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK,
pchan, vchan->id, sub_id, open_id);
@ -259,7 +276,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
/* Wait for Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK,
pchan, 0, sub_id, open_id);
ret = hab_open_listen(ctx, dev, &request, &recv_request, HZ);
ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
if (ret != -EAGAIN)
break;
@ -280,6 +297,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
hab_pchan_put(pchan);
return vchan;
err:
pr_err("listen on mmid %d failed\n", mm_id);
if (vchan)
hab_vchan_put(vchan);
if (pchan)
@ -304,12 +322,19 @@ long hab_vchan_send(struct uhab_context *ctx,
}
vchan = hab_get_vchan_fromvcid(vcid, ctx);
if (!vchan || vchan->otherend_closed)
return -ENODEV;
if (!vchan || vchan->otherend_closed) {
ret = -ENODEV;
goto err;
}
HAB_HEADER_SET_SIZE(header, sizebytes);
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT)
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_PROFILE);
else
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
while (1) {
ret = physical_channel_send(vchan->pchan, &header, data);
@ -321,7 +346,11 @@ long hab_vchan_send(struct uhab_context *ctx,
schedule();
}
hab_vchan_put(vchan);
err:
if (vchan)
hab_vchan_put(vchan);
return ret;
}
@ -335,7 +364,7 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING;
vchan = hab_get_vchan_fromvcid(vcid, ctx);
if (!vchan || vchan->otherend_closed)
if (!vchan)
return ERR_PTR(-ENODEV);
if (nonblocking_flag) {
@ -351,6 +380,8 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
if (!message) {
if (nonblocking_flag)
ret = -EAGAIN;
else if (vchan->otherend_closed)
ret = -ENODEV;
else
ret = -EPIPE;
}
@ -369,7 +400,11 @@ int hab_vchan_open(struct uhab_context *ctx,
int32_t *vcid,
uint32_t flags)
{
struct virtual_channel *vchan;
struct virtual_channel *vchan = NULL;
struct hab_device *dev;
pr_debug("Open mmid=%d, loopback mode=%d, loopback num=%d\n",
mmid, hab_driver.b_loopback, hab_driver.loopback_num);
if (!vcid)
return -EINVAL;
@ -383,14 +418,29 @@ int hab_vchan_open(struct uhab_context *ctx,
vchan = frontend_open(ctx, mmid, LOOPBACK_DOM);
}
} else {
if (hab_driver.b_server_dom)
vchan = backend_listen(ctx, mmid);
else
vchan = frontend_open(ctx, mmid, 0);
dev = find_hab_device(mmid);
if (dev) {
struct physical_channel *pchan =
hab_pchan_find_domid(dev, HABCFG_VMID_DONT_CARE);
if (pchan->is_be)
vchan = backend_listen(ctx, mmid);
else
vchan = frontend_open(ctx, mmid,
HABCFG_VMID_DONT_CARE);
} else {
pr_err("failed to find device, mmid %d\n", mmid);
}
}
if (IS_ERR(vchan))
if (IS_ERR(vchan)) {
pr_err("vchan open failed over mmid=%d\n", mmid);
return PTR_ERR(vchan);
}
pr_debug("vchan id %x, remote id %x\n",
vchan->id, vchan->otherend_id);
write_lock(&ctx->ctx_lock);
list_add_tail(&vchan->node, &ctx->vchannels);
@ -403,12 +453,13 @@ int hab_vchan_open(struct uhab_context *ctx,
void hab_send_close_msg(struct virtual_channel *vchan)
{
struct hab_header header;
struct hab_header header = {0};
if (vchan && !vchan->otherend_closed) {
HAB_HEADER_SET_SIZE(header, 0);
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_CLOSE);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
physical_channel_send(vchan->pchan, &header, NULL);
}
}
@ -442,6 +493,220 @@ void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
write_unlock(&ctx->ctx_lock);
}
/*
* To name the pchan - the pchan has two ends, either FE or BE locally.
* if is_be is true, then this is listener for BE. pchane name use remote
* FF's vmid from the table.
* if is_be is false, then local is FE as opener. pchan name use local FE's
* vmid (self)
*/
static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
int vmid_local, int vmid_remote, int is_be)
{
char pchan_name[MAX_VMID_NAME_SIZE];
struct physical_channel *pchan = NULL;
int ret;
int vmid = is_be ? vmid_remote : vmid_local;
if (!mmid_device) {
pr_err("habdev %pK, vmid local %d, remote %d, is be %d\n",
mmid_device, vmid_local, vmid_remote, is_be);
return -EINVAL;
}
snprintf(pchan_name, MAX_VMID_NAME_SIZE, "vm%d-", vmid);
strlcat(pchan_name, mmid_device->name, MAX_VMID_NAME_SIZE);
ret = habhyp_commdev_alloc((void **)&pchan, is_be, pchan_name,
vmid_remote, mmid_device);
if (ret == 0) {
pr_debug("pchan %s added, vmid local %d, remote %d, is_be %d, total %d\n",
pchan_name, vmid_local, vmid_remote, is_be,
mmid_device->pchan_cnt);
} else {
pr_err("failed %d to allocate pchan %s, vmid local %d, remote %d, is_be %d, total %d\n",
ret, pchan_name, vmid_local, vmid_remote,
is_be, mmid_device->pchan_cnt);
}
return ret;
}
static void hab_generate_pchan(struct local_vmid *settings, int i, int j)
{
int k, ret = 0;
pr_debug("%d as mmid %d in vmid %d\n",
HABCFG_GET_MMID(settings, i, j), j, i);
switch (HABCFG_GET_MMID(settings, i, j)) {
case MM_AUD_START/100:
for (k = MM_AUD_START + 1; k < MM_AUD_END; k++) {
/*
* if this local pchan end is BE, then use
* remote FE's vmid. If local end is FE, then
* use self vmid
*/
ret += hab_initialize_pchan_entry(
find_hab_device(k),
settings->self,
HABCFG_GET_VMID(settings, i),
HABCFG_GET_BE(settings, i, j));
}
break;
case MM_CAM_START/100:
for (k = MM_CAM_START + 1; k < MM_CAM_END; k++) {
ret += hab_initialize_pchan_entry(
find_hab_device(k),
settings->self,
HABCFG_GET_VMID(settings, i),
HABCFG_GET_BE(settings, i, j));
}
break;
case MM_DISP_START/100:
for (k = MM_DISP_START + 1; k < MM_DISP_END; k++) {
ret += hab_initialize_pchan_entry(
find_hab_device(k),
settings->self,
HABCFG_GET_VMID(settings, i),
HABCFG_GET_BE(settings, i, j));
}
break;
case MM_GFX_START/100:
for (k = MM_GFX_START + 1; k < MM_GFX_END; k++) {
ret += hab_initialize_pchan_entry(
find_hab_device(k),
settings->self,
HABCFG_GET_VMID(settings, i),
HABCFG_GET_BE(settings, i, j));
}
break;
case MM_VID_START/100:
for (k = MM_VID_START + 1; k < MM_VID_END; k++) {
ret += hab_initialize_pchan_entry(
find_hab_device(k),
settings->self,
HABCFG_GET_VMID(settings, i),
HABCFG_GET_BE(settings, i, j));
}
break;
case MM_MISC_START/100:
for (k = MM_MISC_START + 1; k < MM_MISC_END; k++) {
ret += hab_initialize_pchan_entry(
find_hab_device(k),
settings->self,
HABCFG_GET_VMID(settings, i),
HABCFG_GET_BE(settings, i, j));
}
break;
case MM_QCPE_START/100:
for (k = MM_QCPE_START + 1; k < MM_QCPE_END; k++) {
ret += hab_initialize_pchan_entry(
find_hab_device(k),
settings->self,
HABCFG_GET_VMID(settings, i),
HABCFG_GET_BE(settings, i, j));
}
break;
case MM_CLK_START/100:
for (k = MM_CLK_START + 1; k < MM_CLK_END; k++) {
ret += hab_initialize_pchan_entry(
find_hab_device(k),
settings->self,
HABCFG_GET_VMID(settings, i),
HABCFG_GET_BE(settings, i, j));
}
break;
default:
pr_err("failed to find mmid %d, i %d, j %d\n",
HABCFG_GET_MMID(settings, i, j), i, j);
break;
}
}
/*
* generate pchan list based on hab settings table.
* return status 0: success, otherwise failure
*/
static int hab_generate_pchan_list(struct local_vmid *settings)
{
int i, j;
/* scan by valid VMs, then mmid */
pr_debug("self vmid is %d\n", settings->self);
for (i = 0; i < HABCFG_VMID_MAX; i++) {
if (HABCFG_GET_VMID(settings, i) != HABCFG_VMID_INVALID &&
HABCFG_GET_VMID(settings, i) != settings->self) {
pr_debug("create pchans for vm %d\n", i);
for (j = 1; j <= HABCFG_MMID_AREA_MAX; j++) {
if (HABCFG_GET_MMID(settings, i, j)
!= HABCFG_VMID_INVALID)
hab_generate_pchan(settings, i, j);
}
}
}
return 0;
}
/*
* This function checks hypervisor plug-in readiness, read in hab configs,
* and configure pchans
*/
int do_hab_parse(void)
{
int result;
int i;
struct hab_device *device;
int pchan_total = 0;
/* first check if hypervisor plug-in is ready */
result = hab_hypervisor_register();
if (result) {
pr_err("register HYP plug-in failed, ret %d\n", result);
return result;
}
/* Initialize open Q before first pchan starts */
for (i = 0; i < hab_driver.ndevices; i++) {
device = &hab_driver.devp[i];
init_waitqueue_head(&device->openq);
}
/* read in hab config and create pchans*/
memset(&hab_driver.settings, HABCFG_VMID_INVALID,
sizeof(hab_driver.settings));
pr_debug("prepare default gvm 2 settings...\n");
fill_default_gvm_settings(&hab_driver.settings, 2,
MM_AUD_START, MM_ID_MAX);
/* now generate hab pchan list */
result = hab_generate_pchan_list(&hab_driver.settings);
if (result) {
pr_err("generate pchan list failed, ret %d\n", result);
} else {
for (i = 0; i < hab_driver.ndevices; i++) {
device = &hab_driver.devp[i];
pchan_total += device->pchan_cnt;
}
pr_debug("ret %d, total %d pchans added, ndevices %d\n",
result, pchan_total, hab_driver.ndevices);
}
return result;
}
static int hab_open(struct inode *inodep, struct file *filep)
{
int result = 0;
@ -468,6 +733,8 @@ static int hab_release(struct inode *inodep, struct file *filep)
if (!ctx)
return 0;
pr_debug("inode %pK, filep %pK\n", inodep, filep);
write_lock(&ctx->ctx_lock);
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
@ -635,9 +902,7 @@ static const struct dma_map_ops hab_dma_ops = {
static int __init hab_init(void)
{
int result;
int i;
dev_t dev;
struct hab_device *device;
result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab");
@ -676,25 +941,23 @@ static int __init hab_init(void)
goto err;
}
for (i = 0; i < hab_driver.ndevices; i++) {
device = &hab_driver.devp[i];
init_waitqueue_head(&device->openq);
/* read in hab config, then configure pchans */
result = do_hab_parse();
if (!result) {
hab_driver.kctx = hab_ctx_alloc(1);
if (!hab_driver.kctx) {
pr_err("hab_ctx_alloc failed");
result = -ENOMEM;
hab_hypervisor_unregister();
goto err;
}
set_dma_ops(hab_driver.dev, &hab_dma_ops);
return result;
}
hab_hypervisor_register();
hab_driver.kctx = hab_ctx_alloc(1);
if (!hab_driver.kctx) {
pr_err("hab_ctx_alloc failed");
result = -ENOMEM;
hab_hypervisor_unregister();
goto err;
}
set_dma_ops(hab_driver.dev, &hab_dma_ops);
return result;
err:
if (!IS_ERR_OR_NULL(hab_driver.dev))
device_destroy(hab_driver.class, dev);
@ -703,6 +966,7 @@ err:
cdev_del(&hab_driver.cdev);
unregister_chrdev_region(dev, 1);
pr_err("Error in hab init, result %d\n", result);
return result;
}

View file

@ -13,7 +13,7 @@
#ifndef __HAB_H
#define __HAB_H
#define pr_fmt(fmt) "hab: " fmt
#define pr_fmt(fmt) "|hab:%s:%d|" fmt, __func__, __LINE__
#include <linux/types.h>
@ -47,6 +47,7 @@ enum hab_payload_type {
HAB_PAYLOAD_TYPE_EXPORT_ACK,
HAB_PAYLOAD_TYPE_PROFILE,
HAB_PAYLOAD_TYPE_CLOSE,
HAB_PAYLOAD_TYPE_MAX,
};
#define LOOPBACK_DOM 0xFF
@ -61,7 +62,8 @@ enum hab_payload_type {
#define DEVICE_AUD2_NAME "hab_aud2"
#define DEVICE_AUD3_NAME "hab_aud3"
#define DEVICE_AUD4_NAME "hab_aud4"
#define DEVICE_CAM_NAME "hab_cam"
#define DEVICE_CAM1_NAME "hab_cam1"
#define DEVICE_CAM2_NAME "hab_cam2"
#define DEVICE_DISP1_NAME "hab_disp1"
#define DEVICE_DISP2_NAME "hab_disp2"
#define DEVICE_DISP3_NAME "hab_disp3"
@ -74,6 +76,48 @@ enum hab_payload_type {
#define DEVICE_QCPE2_NAME "hab_qcpe_vm2"
#define DEVICE_QCPE3_NAME "hab_qcpe_vm3"
#define DEVICE_QCPE4_NAME "hab_qcpe_vm4"
#define DEVICE_CLK1_NAME "hab_clock_vm1"
#define DEVICE_CLK2_NAME "hab_clock_vm2"
/* make sure concascaded name is less than this value */
#define MAX_VMID_NAME_SIZE 30
#define HABCFG_FILE_SIZE_MAX 256
#define HABCFG_MMID_AREA_MAX (MM_ID_MAX/100)
#define HABCFG_VMID_MAX 16
#define HABCFG_VMID_INVALID (-1)
#define HABCFG_VMID_DONT_CARE (-2)
#define HABCFG_ID_LINE_LIMIT ","
#define HABCFG_ID_VMID "VMID="
#define HABCFG_ID_BE "BE="
#define HABCFG_ID_FE "FE="
#define HABCFG_ID_MMID "MMID="
#define HABCFG_ID_RANGE "-"
#define HABCFG_ID_DONTCARE "X"
#define HABCFG_FOUND_VMID 1
#define HABCFG_FOUND_FE_MMIDS 2
#define HABCFG_FOUND_BE_MMIDS 3
#define HABCFG_FOUND_NOTHING (-1)
#define HABCFG_BE_FALSE 0
#define HABCFG_BE_TRUE 1
#define HABCFG_GET_VMID(_local_cfg_, _vmid_) \
((settings)->vmid_mmid_list[_vmid_].vmid)
#define HABCFG_GET_MMID(_local_cfg_, _vmid_, _mmid_) \
((settings)->vmid_mmid_list[_vmid_].mmid[_mmid_])
#define HABCFG_GET_BE(_local_cfg_, _vmid_, _mmid_) \
((settings)->vmid_mmid_list[_vmid_].is_listener[_mmid_])
struct hab_header {
uint32_t id_type_size;
uint32_t session_id;
uint32_t signature;
uint32_t sequence;
} __packed;
/* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */
#define HAB_HEADER_SIZE_SHIFT 0
@ -96,34 +140,44 @@ enum hab_payload_type {
#define HAB_VCID_GET_ID(vcid) \
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid))
#define HAB_HEADER_SET_SIZE(header, size) \
((header).info = (((header).info) & (~HAB_HEADER_SIZE_MASK)) | \
(((size) << HAB_HEADER_SIZE_SHIFT) & HAB_HEADER_SIZE_MASK))
((header).id_type_size = ((header).id_type_size & \
(~HAB_HEADER_SIZE_MASK)) | \
(((size) << HAB_HEADER_SIZE_SHIFT) & \
HAB_HEADER_SIZE_MASK))
#define HAB_HEADER_SET_TYPE(header, type) \
((header).info = (((header).info) & (~HAB_HEADER_TYPE_MASK)) | \
(((type) << HAB_HEADER_TYPE_SHIFT) & HAB_HEADER_TYPE_MASK))
((header).id_type_size = ((header).id_type_size & \
(~HAB_HEADER_TYPE_MASK)) | \
(((type) << HAB_HEADER_TYPE_SHIFT) & \
HAB_HEADER_TYPE_MASK))
#define HAB_HEADER_SET_ID(header, id) \
((header).info = (((header).info) & (~HAB_HEADER_ID_MASK)) | \
((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) \
& HAB_HEADER_ID_MASK))
((header).id_type_size = ((header).id_type_size & \
(~HAB_HEADER_ID_MASK)) | \
((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) & \
HAB_HEADER_ID_MASK))
#define HAB_HEADER_GET_SIZE(header) \
((((header).info) & HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT)
(((header).id_type_size & \
HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT)
#define HAB_HEADER_GET_TYPE(header) \
((((header).info) & HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT)
(((header).id_type_size & \
HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT)
#define HAB_HEADER_GET_ID(header) \
(((((header).info) & HAB_HEADER_ID_MASK) >> \
((((header).id_type_size & HAB_HEADER_ID_MASK) >> \
(HAB_HEADER_ID_SHIFT - HAB_VCID_ID_SHIFT)) & HAB_VCID_ID_MASK)
struct hab_header {
uint32_t info;
};
#define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id)
struct physical_channel {
char name[MAX_VMID_NAME_SIZE];
int is_be;
struct kref refcount;
struct hab_device *habdev;
struct list_head node;
@ -138,6 +192,10 @@ struct physical_channel {
int closed;
spinlock_t rxbuf_lock;
/* vchans over this pchan */
struct list_head vchannels;
rwlock_t vchans_lock;
};
struct hab_open_send_data {
@ -179,9 +237,10 @@ struct hab_message {
};
struct hab_device {
const char *name;
char name[MAX_VMID_NAME_SIZE];
unsigned int id;
struct list_head pchannels;
int pchan_cnt;
struct mutex pchan_lock;
struct list_head openq_list;
spinlock_t openlock;
@ -211,19 +270,37 @@ struct uhab_context {
int kernel;
};
/*
* array to describe the VM and its MMID configuration as what is connected to
* so this is describing a pchan's remote side
*/
struct vmid_mmid_desc {
int vmid; /* remote vmid */
int mmid[HABCFG_MMID_AREA_MAX+1]; /* selected or not */
int is_listener[HABCFG_MMID_AREA_MAX+1]; /* yes or no */
};
struct local_vmid {
int32_t self; /* only this field is for local */
struct vmid_mmid_desc vmid_mmid_list[HABCFG_VMID_MAX];
};
struct hab_driver {
struct device *dev;
struct cdev cdev;
dev_t major;
struct class *class;
int irq;
int ndevices;
struct hab_device *devp;
struct uhab_context *kctx;
struct local_vmid settings; /* parser results */
int b_server_dom;
int loopback_num;
int b_loopback;
void *hyp_priv; /* hypervisor plug-in storage */
};
struct virtual_channel {
@ -243,12 +320,14 @@ struct virtual_channel {
struct physical_channel *pchan;
struct uhab_context *ctx;
struct list_head node;
struct list_head pnode;
struct list_head rx_list;
wait_queue_head_t rx_queue;
spinlock_t rx_lock;
int id;
int otherend_id;
int otherend_closed;
uint32_t session_id;
};
/*
@ -271,7 +350,7 @@ struct export_desc {
void *kva;
int payload_count;
unsigned char payload[1];
};
} __packed;
int hab_vchan_open(struct uhab_context *ctx,
unsigned int mmid, int32_t *vcid, uint32_t flags);
@ -286,6 +365,7 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
int vcid,
unsigned int flags);
void hab_vchan_stop(struct virtual_channel *vchan);
void hab_vchans_stop(struct physical_channel *pchan);
void hab_vchan_stop_notify(struct virtual_channel *vchan);
int hab_mem_export(struct uhab_context *ctx,
@ -350,7 +430,7 @@ void hab_open_request_init(struct hab_open_request *request,
int open_id);
int hab_open_request_send(struct hab_open_request *request);
int hab_open_request_add(struct physical_channel *pchan,
struct hab_header *header);
size_t sizebytes, int request_type);
void hab_open_request_free(struct hab_open_request *request);
int hab_open_listen(struct uhab_context *ctx,
struct hab_device *dev,
@ -361,7 +441,7 @@ int hab_open_listen(struct uhab_context *ctx,
struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx,
struct physical_channel *pchan);
struct virtual_channel *hab_vchan_get(struct physical_channel *pchan,
uint32_t vchan_id);
struct hab_header *header);
void hab_vchan_put(struct virtual_channel *vchan);
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
@ -394,6 +474,9 @@ static inline void hab_ctx_put(struct uhab_context *ctx)
void hab_send_close_msg(struct virtual_channel *vchan);
int hab_hypervisor_register(void);
void hab_hypervisor_unregister(void);
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device);
int habhyp_commdev_dealloc(void *commdev);
int physical_channel_read(struct physical_channel *pchan,
void *payload,
@ -407,6 +490,13 @@ void physical_channel_rx_dispatch(unsigned long physical_channel);
int loopback_pchan_create(char *dev_name);
int hab_parse(struct local_vmid *settings);
int do_hab_parse(void);
int fill_default_gvm_settings(struct local_vmid *settings,
int vmid_local, int mmid_start, int mmid_end);
bool hab_is_loopback(void);
/* Global singleton HAB instance */

View file

@ -35,6 +35,7 @@ struct importer_context {
int cnt; /* pages allocated for local file */
struct list_head imp_list;
struct file *filp;
rwlock_t implist_lock;
};
void *habmm_hyp_allocate_grantable(int page_count,
@ -73,8 +74,12 @@ static int habmem_get_dma_pages(unsigned long address,
int fd;
vma = find_vma(current->mm, address);
if (!vma || !vma->vm_file)
if (!vma || !vma->vm_file) {
pr_err("cannot find vma\n");
goto err;
}
pr_debug("vma flags %lx\n", vma->vm_flags);
/* Look for the fd that matches this the vma file */
fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
@ -103,6 +108,7 @@ static int habmem_get_dma_pages(unsigned long address,
for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
page = sg_page(s);
pr_debug("sgl length %d\n", s->length);
for (j = page_offset; j < (s->length >> PAGE_SHIFT); j++) {
pages[rc] = nth_page(page, j);
@ -136,6 +142,12 @@ err:
return rc;
}
/*
* exporter - grant & revoke
* degenerate sharabled page list based on CPU friendly virtual "address".
* The result as an array is stored in ppdata to return to caller
* page size 4KB is assumed
*/
int habmem_hyp_grant_user(unsigned long address,
int page_count,
int flags,
@ -220,6 +232,7 @@ void *habmem_imp_hyp_open(void)
if (!priv)
return NULL;
rwlock_init(&priv->implist_lock);
INIT_LIST_HEAD(&priv->imp_list);
return priv;
@ -261,7 +274,7 @@ long habmem_imp_hyp_map(void *imp_ctx,
uint32_t userflags)
{
struct page **pages;
struct compressed_pfns *pfn_table = impdata;
struct compressed_pfns *pfn_table = (struct compressed_pfns *)impdata;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
unsigned long pfn;
@ -310,6 +323,9 @@ long habmem_imp_hyp_map(void *imp_ctx,
kfree(pglist);
pr_err("%ld pages vmap failed\n", pglist->npages);
return -ENOMEM;
} else {
pr_debug("%ld pages vmap pass, return %pK\n",
pglist->npages, pglist->kva);
}
pglist->uva = NULL;
@ -320,8 +336,11 @@ long habmem_imp_hyp_map(void *imp_ctx,
pglist->kva = NULL;
}
write_lock(&priv->implist_lock);
list_add_tail(&pglist->list, &priv->imp_list);
priv->cnt++;
write_unlock(&priv->implist_lock);
pr_debug("index returned %llx\n", *index);
return 0;
}
@ -333,11 +352,15 @@ long habmm_imp_hyp_unmap(void *imp_ctx,
int kernel)
{
struct importer_context *priv = imp_ctx;
struct pages_list *pglist;
struct pages_list *pglist, *tmp;
int found = 0;
uint64_t pg_index = index >> PAGE_SHIFT;
list_for_each_entry(pglist, &priv->imp_list, list) {
write_lock(&priv->implist_lock);
list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
pr_debug("node pglist %pK, kernel %d, pg_index %llx\n",
pglist, pglist->kernel, pg_index);
if (kernel) {
if (pglist->kva == (void *)((uintptr_t)index))
found = 1;
@ -353,11 +376,15 @@ long habmm_imp_hyp_unmap(void *imp_ctx,
}
}
write_unlock(&priv->implist_lock);
if (!found) {
pr_err("failed to find export id on index %llx\n", index);
return -EINVAL;
}
pr_debug("detach pglist %pK, index %llx, kernel %d, list cnt %d\n",
pglist, pglist->index, pglist->kernel, priv->cnt);
if (kernel)
if (pglist->kva)
vunmap(pglist->kva);
@ -393,6 +420,8 @@ static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
pr_debug("Fault page index %d\n", page_idx);
page = pglist->pages[page_idx];
get_page(page);
vmf->page = page;
@ -422,15 +451,20 @@ int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
struct pages_list *pglist;
int bfound = 0;
pr_debug("mmap request start %lX, len %ld, index %lX\n",
vma->vm_start, length, vma->vm_pgoff);
read_lock(&imp_ctx->implist_lock);
list_for_each_entry(pglist, &imp_ctx->imp_list, list) {
if (pglist->index == vma->vm_pgoff) {
bfound = 1;
break;
}
}
read_unlock(&imp_ctx->implist_lock);
if (!bfound) {
pr_err("Failed to find pglist vm_pgoff: %d\n", vma->vm_pgoff);
pr_err("Failed to find pglist vm_pgoff: %ld\n", vma->vm_pgoff);
return -EINVAL;
}

View file

@ -31,11 +31,11 @@ static int hab_export_ack_find(struct uhab_context *ctx,
struct hab_export_ack *expect_ack)
{
int ret = 0;
struct hab_export_ack_recvd *ack_recvd;
struct hab_export_ack_recvd *ack_recvd, *tmp;
spin_lock_bh(&ctx->expq_lock);
list_for_each_entry(ack_recvd, &ctx->exp_rxq, node) {
list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
if (ack_recvd->ack.export_id == expect_ack->export_id &&
ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) {
@ -197,6 +197,7 @@ static int habmem_export_vchan(struct uhab_context *ctx,
HAB_HEADER_SET_SIZE(header, sizebytes);
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
ret = physical_channel_send(vchan->pchan, &header, exp);
if (ret != 0) {
@ -228,6 +229,8 @@ int hab_mem_export(struct uhab_context *ctx,
if (!ctx || !param || param->sizebytes > HAB_MAX_EXPORT_SIZE)
return -EINVAL;
pr_debug("vc %X, mem size %d\n", param->vcid, param->sizebytes);
vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
@ -303,7 +306,10 @@ int hab_mem_unexport(struct uhab_context *ctx,
return -EINVAL;
ret = habmem_hyp_revoke(exp->payload, exp->payload_count);
if (ret) {
pr_err("Error found in revoke grant with ret %d", ret);
return ret;
}
habmem_remove_export(exp);
return ret;
}
@ -335,6 +341,10 @@ int hab_mem_import(struct uhab_context *ctx,
return ret;
}
pr_debug("call map id: %d pcnt %d remote_dom %d 1st_ref:0x%X\n",
exp->export_id, exp->payload_count, exp->domid_local,
*((uint32_t *)exp->payload));
ret = habmem_imp_hyp_map(ctx->import_ctx,
exp->payload,
exp->payload_count,
@ -349,6 +359,8 @@ int hab_mem_import(struct uhab_context *ctx,
exp->domid_local, *((uint32_t *)exp->payload));
return ret;
}
pr_debug("import index %llx, kva %llx, kernel %d\n",
exp->import_index, param->kva, kernel);
param->index = exp->import_index;
param->kva = (uint64_t)exp->kva;
@ -373,6 +385,9 @@ int hab_mem_unimport(struct uhab_context *ctx,
list_del(&exp->node);
ctx->import_total--;
found = 1;
pr_debug("found id:%d payload cnt:%d kernel:%d\n",
exp->export_id, exp->payload_count, kernel);
break;
}
}
@ -385,7 +400,10 @@ int hab_mem_unimport(struct uhab_context *ctx,
exp->import_index,
exp->payload_count,
kernel);
if (ret) {
pr_err("unmap fail id:%d pcnt:%d kernel:%d\n",
exp->export_id, exp->payload_count, kernel);
}
param->kva = (uint64_t)exp->kva;
kfree(exp);
}

View file

@ -55,13 +55,12 @@ hab_msg_dequeue(struct virtual_channel *vchan, int wait_flag)
vchan->otherend_closed);
}
if (!ret && !vchan->otherend_closed) {
/* return all the received messages before the remote close */
if (!ret && !hab_rx_queue_empty(vchan)) {
spin_lock_bh(&vchan->rx_lock);
if (!list_empty(&vchan->rx_list)) {
message = list_first_entry(&vchan->rx_list,
message = list_first_entry(&vchan->rx_list,
struct hab_message, node);
list_del(&message->node);
}
list_del(&message->node);
spin_unlock_bh(&vchan->rx_lock);
}
@ -91,8 +90,9 @@ static int hab_export_enqueue(struct virtual_channel *vchan,
return 0;
}
static int hab_send_export_ack(struct physical_channel *pchan,
struct export_desc *exp)
static int hab_send_export_ack(struct virtual_channel *vchan,
struct physical_channel *pchan,
struct export_desc *exp)
{
struct hab_export_ack exp_ack = {
.export_id = exp->export_id,
@ -104,11 +104,12 @@ static int hab_send_export_ack(struct physical_channel *pchan,
HAB_HEADER_SET_SIZE(header, sizeof(exp_ack));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK);
HAB_HEADER_SET_ID(header, exp->vcid_local);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
return physical_channel_send(pchan, &header, &exp_ack);
}
static int hab_receive_create_export_ack(struct physical_channel *pchan,
struct uhab_context *ctx)
struct uhab_context *ctx, size_t sizebytes)
{
struct hab_export_ack_recvd *ack_recvd =
kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
@ -116,11 +117,20 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan,
if (!ack_recvd)
return -ENOMEM;
if (sizeof(ack_recvd->ack) != sizebytes)
pr_err("exp ack size %lu is not as arrived %zu\n",
sizeof(ack_recvd->ack), sizebytes);
if (physical_channel_read(pchan,
&ack_recvd->ack,
sizeof(ack_recvd->ack)) != sizeof(ack_recvd->ack))
sizebytes) != sizebytes)
return -EIO;
pr_debug("receive export id %d, local vc %X, vd remote %X\n",
ack_recvd->ack.export_id,
ack_recvd->ack.vcid_local,
ack_recvd->ack.vcid_remote);
spin_lock_bh(&ctx->expq_lock);
list_add_tail(&ack_recvd->node, &ctx->exp_rxq);
spin_unlock_bh(&ctx->expq_lock);
@ -137,20 +147,48 @@ void hab_msg_recv(struct physical_channel *pchan,
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
struct virtual_channel *vchan = NULL;
struct export_desc *exp_desc;
struct timeval tv;
/* get the local virtual channel if it isn't an open message */
if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
payload_type != HAB_PAYLOAD_TYPE_INIT_ACK &&
payload_type != HAB_PAYLOAD_TYPE_ACK) {
vchan = hab_vchan_get(pchan, vchan_id);
/* sanity check the received message */
if (payload_type >= HAB_PAYLOAD_TYPE_MAX ||
vchan_id > (HAB_HEADER_ID_MASK >> HAB_HEADER_ID_SHIFT)
|| !vchan_id || !session_id) {
pr_err("Invalid message received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
}
vchan = hab_vchan_get(pchan, header);
if (!vchan) {
pr_debug("vchan is not found, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
if (sizebytes)
pr_err("message is dropped\n");
return;
} else if (vchan->otherend_closed) {
hab_vchan_put(vchan);
pr_debug("vchan remote is closed, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
if (sizebytes)
pr_err("message is dropped\n");
return;
}
} else {
if (sizebytes != sizeof(struct hab_open_send_data)) {
pr_err("Invalid open request received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
}
}
switch (payload_type) {
@ -165,9 +203,12 @@ void hab_msg_recv(struct physical_channel *pchan,
case HAB_PAYLOAD_TYPE_INIT:
case HAB_PAYLOAD_TYPE_INIT_ACK:
case HAB_PAYLOAD_TYPE_ACK:
ret = hab_open_request_add(pchan, header);
if (ret)
ret = hab_open_request_add(pchan, sizebytes, payload_type);
if (ret) {
pr_err("open request add failed, ret %d, payload type %d, sizebytes %zx\n",
ret, payload_type, sizebytes);
break;
}
wake_up_interruptible(&dev->openq);
break;
@ -185,22 +226,49 @@ void hab_msg_recv(struct physical_channel *pchan,
exp_desc->domid_local = pchan->dom_id;
hab_export_enqueue(vchan, exp_desc);
hab_send_export_ack(pchan, exp_desc);
hab_send_export_ack(vchan, pchan, exp_desc);
break;
case HAB_PAYLOAD_TYPE_EXPORT_ACK:
ret = hab_receive_create_export_ack(pchan, vchan->ctx);
if (ret)
ret = hab_receive_create_export_ack(pchan, vchan->ctx,
sizebytes);
if (ret) {
pr_err("failed to handled export ack %d\n", ret);
break;
}
wake_up_interruptible(&vchan->ctx->exp_wq);
break;
case HAB_PAYLOAD_TYPE_CLOSE:
/* remote request close */
pr_debug("remote side request close\n");
pr_debug(" vchan id %X, other end %X, session %d\n",
vchan->id, vchan->otherend_id, session_id);
hab_vchan_stop(vchan);
break;
case HAB_PAYLOAD_TYPE_PROFILE:
do_gettimeofday(&tv);
/* pull down the incoming data */
message = hab_msg_alloc(pchan, sizebytes);
if (!message) {
pr_err("msg alloc failed\n");
break;
}
((uint64_t *)message->data)[2] = tv.tv_sec;
((uint64_t *)message->data)[3] = tv.tv_usec;
hab_msg_queue(vchan, message);
break;
default:
pr_err("unknown msg is received\n");
pr_err("payload type %d, vchan id %x\n",
payload_type, vchan_id);
pr_err("sizebytes %zx, session %d\n",
sizebytes, session_id);
break;
}
if (vchan)

View file

@ -42,7 +42,7 @@ int hab_open_request_send(struct hab_open_request *request)
}
int hab_open_request_add(struct physical_channel *pchan,
struct hab_header *header)
size_t sizebytes, int request_type)
{
struct hab_open_node *node;
struct hab_device *dev = pchan->habdev;
@ -53,12 +53,11 @@ int hab_open_request_add(struct physical_channel *pchan,
if (!node)
return -ENOMEM;
if (physical_channel_read(pchan, &data, HAB_HEADER_GET_SIZE(*header)) !=
HAB_HEADER_GET_SIZE(*header))
if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
return -EIO;
request = &node->request;
request->type = HAB_HEADER_GET_TYPE(*header);
request->type = request_type;
request->pchan = pchan;
request->vchan_id = data.vchan_id;
request->sub_id = data.sub_id;

View file

@ -0,0 +1,65 @@
/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "hab.h"
/*
* set valid mmid value in tbl to show this is valid entry. All inputs here are
* normalized to 1 based integer
*/
static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
int32_t vm_range, int32_t mmid_start,
int32_t mmid_range, int32_t be)
{
int ret = 0;
int i, j;
for (i = vm_start; i < vm_start+vm_range; i++) {
tbl[i].vmid = i; /* set valid vmid value to make it usable */
for (j = mmid_start; j < mmid_start + mmid_range; j++) {
/* sanity check */
if (tbl[i].mmid[j] != HABCFG_VMID_INVALID) {
pr_err("overwrite previous setting, i %d, j %d, be %d\n",
i, j, tbl[i].is_listener[j]);
}
tbl[i].mmid[j] = j;
tbl[i].is_listener[j] = be; /* BE IS listen */
}
}
return ret;
}
void dump_settings(struct local_vmid *settings)
{
int i, j;
pr_debug("self vmid is %d\n", settings->self);
for (i = 0; i < HABCFG_VMID_MAX; i++) {
pr_debug("remote vmid %d\n",
settings->vmid_mmid_list[i].vmid);
for (j = 0; j <= HABCFG_MMID_AREA_MAX; j++) {
pr_debug("mmid %d, is_be %d\n",
settings->vmid_mmid_list[i].mmid[j],
settings->vmid_mmid_list[i].is_listener[j]);
}
}
}
int fill_default_gvm_settings(struct local_vmid *settings, int vmid_local,
int mmid_start, int mmid_end) {
settings->self = vmid_local;
/* default gvm always talks to host as vm0 */
return fill_vmid_mmid_tbl(settings->vmid_mmid_list, 0, 1,
mmid_start/100, (mmid_end-mmid_start)/100+1, HABCFG_BE_FALSE);
}

View file

@ -31,10 +31,13 @@ hab_pchan_alloc(struct hab_device *habdev, int otherend_id)
pchan->closed = 1;
pchan->hyp_data = NULL;
INIT_LIST_HEAD(&pchan->vchannels);
rwlock_init(&pchan->vchans_lock);
spin_lock_init(&pchan->rxbuf_lock);
mutex_lock(&habdev->pchan_lock);
list_add_tail(&pchan->node, &habdev->pchannels);
habdev->pchan_cnt++;
mutex_unlock(&habdev->pchan_lock);
return pchan;
@ -47,6 +50,7 @@ static void hab_pchan_free(struct kref *ref)
mutex_lock(&pchan->habdev->pchan_lock);
list_del(&pchan->node);
pchan->habdev->pchan_cnt--;
mutex_unlock(&pchan->habdev->pchan_lock);
kfree(pchan->hyp_data);
kfree(pchan);
@ -59,11 +63,14 @@ hab_pchan_find_domid(struct hab_device *dev, int dom_id)
mutex_lock(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node)
if (pchan->dom_id == dom_id)
if (pchan->dom_id == dom_id || dom_id == HABCFG_VMID_DONT_CARE)
break;
if (pchan->dom_id != dom_id)
if (pchan->dom_id != dom_id && dom_id != HABCFG_VMID_DONT_CARE) {
pr_err("dom_id mismatch requested %d, existing %d\n",
dom_id, pchan->dom_id);
pchan = NULL;
}
if (pchan && !kref_get_unless_zero(&pchan->refcount))
pchan = NULL;

View file

@ -21,9 +21,51 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#define DEFAULT_HAB_SHMEM_IRQ 7
#define SHMEM_PHYSICAL_ADDR 0x1c050000
struct shmem_irq_config {
unsigned long factory_addr; /* from gvm settings when provided */
int irq; /* from gvm settings when provided */
};
/*
* this is for platform does not provide probe features. the size should match
* hab device side (all mmids)
*/
static struct shmem_irq_config pchan_factory_settings[] = {
{0x1b000000, 7},
{0x1b001000, 8},
{0x1b002000, 9},
{0x1b003000, 10},
{0x1b004000, 11},
{0x1b005000, 12},
{0x1b006000, 13},
{0x1b007000, 14},
{0x1b008000, 15},
{0x1b009000, 16},
{0x1b00a000, 17},
{0x1b00b000, 18},
{0x1b00c000, 19},
{0x1b00d000, 20},
{0x1b00e000, 21},
{0x1b00f000, 22},
{0x1b010000, 23},
{0x1b011000, 24},
{0x1b012000, 25},
{0x1b013000, 26},
};
static struct qvm_plugin_info {
struct shmem_irq_config *pchan_settings;
int setting_size;
int curr;
int probe_cnt;
} qvm_priv_info = {
pchan_factory_settings,
ARRAY_SIZE(pchan_factory_settings),
0,
ARRAY_SIZE(pchan_factory_settings)
};
static irqreturn_t shm_irq_handler(int irq, void *_pchan)
{
@ -43,22 +85,22 @@ static irqreturn_t shm_irq_handler(int irq, void *_pchan)
return rc;
}
/*
* this is only for guest
*/
static uint64_t get_guest_factory_paddr(struct qvm_channel *dev,
const char *name, uint32_t pages)
unsigned long factory_addr, int irq, const char *name, uint32_t pages)
{
int i;
dev->guest_factory = ioremap(SHMEM_PHYSICAL_ADDR, PAGE_SIZE);
if (!dev->guest_factory) {
pr_err("Couldn't map guest_factory\n");
return 0;
}
pr_debug("name = %s, factory paddr = 0x%lx, irq %d, pages %d\n",
name, factory_addr, irq, pages);
dev->guest_factory = (struct guest_shm_factory *)factory_addr;
if (dev->guest_factory->signature != GUEST_SHM_SIGNATURE) {
pr_err("shmem factory signature incorrect: %ld != %lu\n",
GUEST_SHM_SIGNATURE, dev->guest_factory->signature);
iounmap(dev->guest_factory);
pr_err("signature error: %ld != %llu, factory addr %lx\n",
GUEST_SHM_SIGNATURE, dev->guest_factory->signature,
factory_addr);
return 0;
}
@ -77,16 +119,22 @@ static uint64_t get_guest_factory_paddr(struct qvm_channel *dev,
/* See if we successfully created/attached to the region. */
if (dev->guest_factory->status != GSS_OK) {
pr_err("create failed: %d\n", dev->guest_factory->status);
iounmap(dev->guest_factory);
return 0;
}
pr_debug("shm creation size %x\n", dev->guest_factory->size);
pr_debug("shm creation size %x, paddr=%llx, vector %d, dev %pK\n",
dev->guest_factory->size,
dev->guest_factory->shmem,
dev->guest_intr,
dev);
dev->factory_addr = factory_addr;
dev->irq = irq;
return dev->guest_factory->shmem;
}
static int create_dispatcher(struct physical_channel *pchan, int id)
static int create_dispatcher(struct physical_channel *pchan)
{
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int ret;
@ -94,21 +142,45 @@ static int create_dispatcher(struct physical_channel *pchan, int id)
tasklet_init(&dev->task, physical_channel_rx_dispatch,
(unsigned long) pchan);
ret = request_irq(hab_driver.irq, shm_irq_handler, IRQF_SHARED,
hab_driver.devp[id].name, pchan);
pr_debug("request_irq: irq = %d, pchan name = %s",
dev->irq, pchan->name);
ret = request_irq(dev->irq, shm_irq_handler, IRQF_SHARED,
pchan->name, pchan);
if (ret)
pr_err("request_irq for %s failed: %d\n",
hab_driver.devp[id].name, ret);
pchan->name, ret);
return ret;
}
static struct physical_channel *habhyp_commdev_alloc(int id)
void hab_pipe_reset(struct physical_channel *pchan)
{
struct qvm_channel *dev;
struct physical_channel *pchan = NULL;
int ret = 0, channel = 0;
struct hab_pipe_endpoint *pipe_ep;
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE,
pchan->is_be ? 0 : 1);
if (dev->pipe_ep != pipe_ep)
pr_warn("The pipe endpoint must not change\n");
}
/*
* allocate hypervisor plug-in specific resource for pchan, and call hab pchan
* alloc common function. hab driver struct is directly accessed.
* commdev: pointer to store the pchan address
* id: index to hab_device (mmids)
* is_be: pchan local endpoint role
* name: pchan name
* return: status 0: success, otherwise: failures
*/
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device)
{
struct qvm_channel *dev = NULL;
struct qvm_plugin_info *qvm_priv = hab_driver.hyp_priv;
struct physical_channel **pchan = (struct physical_channel **)commdev;
int ret = 0, coid = 0, channel = 0;
char *shmdata;
uint32_t pipe_alloc_size =
hab_pipe_calc_required_bytes(PIPE_SHMEM_SIZE);
@ -119,15 +191,27 @@ static struct physical_channel *habhyp_commdev_alloc(int id)
int total_pages;
struct page **pages;
pr_debug("habhyp_commdev_alloc: pipe_alloc_size is %d\n",
pipe_alloc_size);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
return -ENOMEM;
spin_lock_init(&dev->io_lock);
paddr = get_guest_factory_paddr(dev,
hab_driver.devp[id].name,
qvm_priv->pchan_settings[qvm_priv->curr].factory_addr,
qvm_priv->pchan_settings[qvm_priv->curr].irq,
name,
pipe_alloc_pages);
qvm_priv->curr++;
if (qvm_priv->curr > qvm_priv->probe_cnt) {
pr_err("factory setting %d overflow probed cnt %d\n",
qvm_priv->curr, qvm_priv->probe_cnt);
ret = -1;
goto err;
}
total_pages = dev->guest_factory->size + 1;
pages = kmalloc_array(total_pages, sizeof(struct page *), GFP_KERNEL);
@ -147,72 +231,138 @@ static struct physical_channel *habhyp_commdev_alloc(int id)
}
shmdata = (char *)dev->guest_ctrl + PAGE_SIZE;
pr_debug("ctrl page 0x%llx mapped at 0x%pK, idx %d\n",
paddr, dev->guest_ctrl, dev->guest_ctrl->idx);
pr_debug("data buffer mapped at 0x%pK\n", shmdata);
dev->idx = dev->guest_ctrl->idx;
kfree(pages);
dev->pipe = (struct hab_pipe *) shmdata;
pr_debug("\"%s\": pipesize %d, addr 0x%pK, be %d\n", name,
pipe_alloc_size, dev->pipe, is_be);
dev->pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE,
dev->be ? 0 : 1);
pchan = hab_pchan_alloc(&hab_driver.devp[id], dev->be);
if (!pchan) {
is_be ? 0 : 1);
/* newly created pchan is added to mmid device list */
*pchan = hab_pchan_alloc(mmid_device, vmid_remote);
if (!(*pchan)) {
ret = -ENOMEM;
goto err;
}
pchan->closed = 0;
pchan->hyp_data = (void *)dev;
(*pchan)->closed = 0;
(*pchan)->hyp_data = (void *)dev;
strlcpy((*pchan)->name, name, MAX_VMID_NAME_SIZE);
(*pchan)->is_be = is_be;
dev->channel = channel;
dev->coid = coid;
ret = create_dispatcher(pchan, id);
if (ret < 0)
ret = create_dispatcher(*pchan);
if (ret)
goto err;
return pchan;
return ret;
err:
kfree(dev);
if (pchan)
hab_pchan_put(pchan);
if (*pchan)
hab_pchan_put(*pchan);
pr_err("habhyp_commdev_alloc failed: %d\n", ret);
return ERR_PTR(ret);
return ret;
}
int habhyp_commdev_dealloc(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct qvm_channel *dev = pchan->hyp_data;
kfree(dev);
hab_pchan_put(pchan);
return 0;
}
int hab_hypervisor_register(void)
{
int ret = 0, i;
int ret = 0;
hab_driver.b_server_dom = 0;
/*
* Can still attempt to instantiate more channels if one fails.
* Others can be retried later.
*/
for (i = 0; i < hab_driver.ndevices; i++) {
if (IS_ERR(habhyp_commdev_alloc(i)))
ret = -EAGAIN;
}
pr_info("initializing for %s VM\n", hab_driver.b_server_dom ?
"host" : "guest");
hab_driver.hyp_priv = &qvm_priv_info;
return ret;
}
void hab_hypervisor_unregister(void)
{
int status, i;
for (i = 0; i < hab_driver.ndevices; i++) {
struct hab_device *dev = &hab_driver.devp[i];
struct physical_channel *pchan;
list_for_each_entry(pchan, &dev->pchannels, node) {
status = habhyp_commdev_dealloc(pchan);
if (status) {
pr_err("failed to free pchan %pK, i %d, ret %d\n",
pchan, i, status);
}
}
}
qvm_priv_info.probe_cnt = 0;
qvm_priv_info.curr = 0;
}
static int hab_shmem_probe(struct platform_device *pdev)
{
int irq = platform_get_irq(pdev, 0);
int irq = 0;
struct resource *mem;
void *shmem_base = NULL;
int ret = 0;
if (irq > 0)
hab_driver.irq = irq;
else
hab_driver.irq = DEFAULT_HAB_SHMEM_IRQ;
/* hab in one GVM will not have pchans more than one VM could allowed */
if (qvm_priv_info.probe_cnt >= hab_driver.ndevices) {
pr_err("no more channel, current %d, maximum %d\n",
qvm_priv_info.probe_cnt, hab_driver.ndevices);
return -ENODEV;
}
return 0;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
pr_err("no interrupt for the channel %d, error %d\n",
qvm_priv_info.probe_cnt, irq);
return irq;
}
qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].irq = irq;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
pr_err("can not get io mem resource for channel %d\n",
qvm_priv_info.probe_cnt);
return -EINVAL;
}
shmem_base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(shmem_base)) {
pr_err("ioremap failed for channel %d, mem %pK\n",
qvm_priv_info.probe_cnt, mem);
return -EINVAL;
}
qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].factory_addr
= (unsigned long)((uintptr_t)shmem_base);
pr_debug("pchan idx %d, hab irq=%d shmem_base=%pK, mem %pK\n",
qvm_priv_info.probe_cnt, irq, shmem_base, mem);
qvm_priv_info.probe_cnt++;
return ret;
}
static int hab_shmem_remove(struct platform_device *pdev)
@ -220,6 +370,23 @@ static int hab_shmem_remove(struct platform_device *pdev)
return 0;
}
static void hab_shmem_shutdown(struct platform_device *pdev)
{
int i;
struct qvm_channel *dev;
struct physical_channel *pchan;
struct hab_device hab_dev;
for (i = 0; i < hab_driver.ndevices; i++) {
hab_dev = hab_driver.devp[i];
pr_debug("detaching %s\n", hab_dev.name);
list_for_each_entry(pchan, &hab_dev.pchannels, node) {
dev = (struct qvm_channel *)pchan->hyp_data;
dev->guest_ctrl->detach = 0;
}
}
}
static const struct of_device_id hab_shmem_match_table[] = {
{.compatible = "qvm,guest_shm"},
{},
@ -228,6 +395,7 @@ static const struct of_device_id hab_shmem_match_table[] = {
static struct platform_driver hab_shmem_driver = {
.probe = hab_shmem_probe,
.remove = hab_shmem_remove,
.shutdown = hab_shmem_shutdown,
.driver = {
.name = "hab_shmem",
.of_match_table = of_match_ptr(hab_shmem_match_table),
@ -236,12 +404,14 @@ static struct platform_driver hab_shmem_driver = {
static int __init hab_shmem_init(void)
{
qvm_priv_info.probe_cnt = 0;
return platform_driver_register(&hab_shmem_driver);
}
static void __exit hab_shmem_exit(void)
{
platform_driver_unregister(&hab_shmem_driver);
qvm_priv_info.probe_cnt = 0;
}
core_initcall(hab_shmem_init);

View file

@ -30,6 +30,7 @@ struct qvm_channel {
struct tasklet_struct task;
struct guest_shm_factory *guest_factory;
struct guest_shm_control *guest_ctrl;
/* cached guest ctrl idx value to prevent trap when accessed */
uint32_t idx;
int channel;
@ -37,11 +38,15 @@ struct qvm_channel {
unsigned int guest_intr;
unsigned int guest_iid;
unsigned int factory_addr;
unsigned int irq;
};
/* Shared mem size in each direction for communication pipe */
#define PIPE_SHMEM_SIZE (128 * 1024)
void *qnx_hyp_rx_dispatch(void *data);
void hab_pipe_reset(struct physical_channel *pchan);
#endif /* __HAB_QNX_H */

View file

@ -40,6 +40,9 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
hab_pchan_get(pchan);
vchan->pchan = pchan;
write_lock(&pchan->vchans_lock);
list_add_tail(&vchan->pnode, &pchan->vchannels);
write_unlock(&pchan->vchans_lock);
vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) |
((pchan->habdev->id << HAB_VCID_MMID_SHIFT) &
HAB_VCID_MMID_MASK) |
@ -66,19 +69,22 @@ hab_vchan_free(struct kref *ref)
struct virtual_channel *vchan =
container_of(ref, struct virtual_channel, refcount);
struct hab_message *message, *msg_tmp;
struct export_desc *exp;
struct export_desc *exp, *exp_tmp;
struct physical_channel *pchan = vchan->pchan;
struct uhab_context *ctx = vchan->ctx;
struct virtual_channel *vc, *vc_tmp;
spin_lock_bh(&vchan->rx_lock);
list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) {
list_del(&message->node);
hab_msg_free(message);
}
spin_unlock_bh(&vchan->rx_lock);
do {
found = 0;
write_lock(&ctx->exp_lock);
list_for_each_entry(exp, &ctx->exp_whse, node) {
list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
if (exp->vcid_local == vchan->id) {
list_del(&exp->node);
found = 1;
@ -95,7 +101,7 @@ hab_vchan_free(struct kref *ref)
do {
found = 0;
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry(exp, &ctx->imp_whse, node) {
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
if (exp->vcid_remote == vchan->id) {
list_del(&exp->node);
found = 1;
@ -117,6 +123,15 @@ hab_vchan_free(struct kref *ref)
idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
spin_unlock_bh(&pchan->vid_lock);
write_lock(&pchan->vchans_lock);
list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) {
if (vchan == vc) {
list_del(&vc->pnode);
break;
}
}
write_unlock(&pchan->vchans_lock);
hab_pchan_put(pchan);
hab_ctx_put(ctx);
@ -124,14 +139,17 @@ hab_vchan_free(struct kref *ref)
}
struct virtual_channel*
hab_vchan_get(struct physical_channel *pchan, uint32_t vchan_id)
hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
{
struct virtual_channel *vchan;
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
spin_lock_bh(&pchan->vid_lock);
vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
if (vchan)
if (!kref_get_unless_zero(&vchan->refcount))
if ((vchan->session_id != session_id) ||
(!kref_get_unless_zero(&vchan->refcount)))
vchan = NULL;
spin_unlock_bh(&pchan->vid_lock);
@ -146,6 +164,17 @@ void hab_vchan_stop(struct virtual_channel *vchan)
}
}
void hab_vchans_stop(struct physical_channel *pchan)
{
struct virtual_channel *vchan, *tmp;
read_lock(&pchan->vchans_lock);
list_for_each_entry_safe(vchan, tmp, &pchan->vchannels, pnode) {
hab_vchan_stop(vchan);
}
read_unlock(&pchan->vchans_lock);
}
void hab_vchan_stop_notify(struct virtual_channel *vchan)
{
hab_send_close_msg(vchan);

View file

@ -117,7 +117,7 @@ int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes,
param.flags = flags;
ret = hab_mem_import(hab_driver.kctx, &param, 1);
if (!IS_ERR(ret))
if (!ret)
*buff_shared = (void *)(uintptr_t)param.kva;
return ret;

View file

@ -21,6 +21,7 @@ static inline void habhyp_notify(void *commdev)
dev->guest_ctrl->notify = ~0;
}
/* this is only used to read payload, never the head! */
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size)
@ -33,6 +34,8 @@ int physical_channel_read(struct physical_channel *pchan,
return 0;
}
#define HAB_HEAD_SIGNATURE 0xBEE1BEE1
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
@ -40,6 +43,7 @@ int physical_channel_send(struct physical_channel *pchan,
int sizebytes = HAB_HEADER_GET_SIZE(*header);
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int total_size = sizeof(*header) + sizebytes;
struct timeval tv;
if (total_size > dev->pipe_ep->tx_info.sh_buf->size)
return -EINVAL; /* too much data for ring */
@ -53,6 +57,8 @@ int physical_channel_send(struct physical_channel *pchan,
return -EAGAIN; /* not enough free space */
}
header->signature = HAB_HEAD_SIGNATURE;
if (hab_pipe_write(dev->pipe_ep,
(unsigned char *)header,
sizeof(*header)) != sizeof(*header)) {
@ -60,6 +66,12 @@ int physical_channel_send(struct physical_channel *pchan,
return -EIO;
}
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
do_gettimeofday(&tv);
((uint64_t *)payload)[0] = tv.tv_sec;
((uint64_t *)payload)[1] = tv.tv_usec;
}
if (sizebytes) {
if (hab_pipe_write(dev->pipe_ep,
(unsigned char *)payload,
@ -89,6 +101,14 @@ void physical_channel_rx_dispatch(unsigned long data)
sizeof(header)) != sizeof(header))
break; /* no data available */
if (header.signature != HAB_HEAD_SIGNATURE) {
pr_err("HAB signature mismatch, expect %X, received %X, id_type_size %X, session %X, sequence %X\n",
HAB_HEAD_SIGNATURE, header.signature,
header.id_type_size,
header.session_id,
header.sequence);
}
hab_msg_recv(pchan, &header);
}
spin_unlock_bh(&pchan->rxbuf_lock);

View file

@ -73,8 +73,9 @@ struct hab_unimport {
#define MM_AUD_END 105
#define MM_CAM_START 200
#define MM_CAM 201
#define MM_CAM_END 202
#define MM_CAM_1 201
#define MM_CAM_2 202
#define MM_CAM_END 203
#define MM_DISP_START 300
#define MM_DISP_1 301
@ -102,7 +103,13 @@ struct hab_unimport {
#define MM_QCPE_VM3 703
#define MM_QCPE_VM4 704
#define MM_QCPE_END 705
#define MM_ID_MAX 706
#define MM_CLK_START 800
#define MM_CLK_VM1 801
#define MM_CLK_VM2 802
#define MM_CLK_END 803
#define MM_ID_MAX 804
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE 0x00000000
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU 0x00000001
@ -110,6 +117,14 @@ struct hab_unimport {
#define HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING 0x00000001
/*
* Collect cross-VM stats: client provides stat-buffer large enough to allow 2
* ets of a 2-uint64_t pair to collect seconds and nano-seconds at the
* beginning of the stat-buffer. Stats are collected when the stat-buffer leaves
* VM1, then enters VM2
*/
#define HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT 0x00000002
#define HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING 0x00000001
#define HABMM_EXP_MEM_TYPE_DMA 0x00000001