soc: qcom: hab: add support for multiple hypervisors

HAB(Hypervisor ABstraction) is used for message transaction
and buffer sharing among different virtual machines with the
underlying hypervisor support. Here is the change to add HAB
support for linux running in multiple hypervisors.

Change-Id: I0747b2fa0f16c7cc0ab662369d45467ac6ba62e5
Signed-off-by: Yong Ding <yongding@codeaurora.org>
This commit is contained in:
Yong Ding 2018-06-07 10:00:07 +08:00 committed by Shiju Mathew
parent 7bb3a011f8
commit 0f3634fa2f
19 changed files with 1493 additions and 418 deletions

View file

@ -5,3 +5,7 @@ config MSM_HAB
Required for drivers to use the HAB API to communicate with the host
OS.
config MSM_AGL
bool "Enable built-in hab config"
help
Use built-in configuration to setup hab driver.

View file

@ -8,9 +8,24 @@ msm_hab-objs = \
hab_mimex.o \
hab_mem_linux.o \
hab_pipe.o \
qvm_comm.o \
hab_qvm.o \
hab_parser.o \
khab_test.o
obj-$(CONFIG_MSM_HAB) += msm_hab.o
ifdef CONFIG_GHS_VMM
msm_hab_hyp-objs = \
ghs_comm.o \
hab_ghs.o
ifndef CONFIG_MSM_AGL
ccflags-y += -DHABMM_HC_VMID
endif
else
ifdef CONFIG_MSM_GVM_QUIN
msm_hab_hyp-objs = \
qvm_comm.o \
hab_qvm.o
endif
endif
obj-$(CONFIG_MSM_HAB) += msm_hab.o msm_hab_hyp.o

View file

@ -0,0 +1,141 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "hab.h"
#include "hab_ghs.h"
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size)
{
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
/* size in header is only for payload excluding the header itself */
if (dev->read_size < read_size + sizeof(struct hab_header)) {
pr_warn("read %zd is less than requested %zd plus header %zd\n",
dev->read_size, read_size, sizeof(struct hab_header));
read_size = dev->read_size;
}
/* always skip the header */
memcpy(payload, (unsigned char *)dev->read_data +
sizeof(struct hab_header) + dev->read_offset, read_size);
dev->read_offset += read_size;
return read_size;
}
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
{
int sizebytes = HAB_HEADER_GET_SIZE(*header);
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
GIPC_Result result;
uint8_t *msg;
spin_lock_bh(&dev->io_lock);
result = GIPC_PrepareMessage(dev->endpoint, sizebytes+sizeof(*header),
(void **)&msg);
if (result == GIPC_Full) {
spin_unlock_bh(&dev->io_lock);
/* need to wait for space! */
pr_err("failed to reserve send msg for %zd bytes\n",
sizebytes+sizeof(*header));
return -EBUSY;
} else if (result != GIPC_Success) {
spin_unlock_bh(&dev->io_lock);
pr_err("failed to send due to error %d\n", result);
return -ENOMEM;
}
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
struct timeval tv;
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)payload;
do_gettimeofday(&tv);
pstat->tx_sec = tv.tv_sec;
pstat->tx_usec = tv.tv_usec;
}
memcpy(msg, header, sizeof(*header));
if (sizebytes)
memcpy(msg+sizeof(*header), payload, sizebytes);
result = GIPC_IssueMessage(dev->endpoint, sizebytes+sizeof(*header),
header->id_type_size);
spin_unlock_bh(&dev->io_lock);
if (result != GIPC_Success) {
pr_err("send error %d, sz %zd, prot %x\n",
result, sizebytes+sizeof(*header),
header->id_type_size);
return -EAGAIN;
}
return 0;
}
void physical_channel_rx_dispatch(unsigned long physical_channel)
{
struct hab_header header;
struct physical_channel *pchan =
(struct physical_channel *)physical_channel;
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
GIPC_Result result;
uint32_t events;
unsigned long flags;
spin_lock_irqsave(&pchan->rxbuf_lock, flags);
events = kgipc_dequeue_events(dev->endpoint);
spin_unlock_irqrestore(&pchan->rxbuf_lock, flags);
if (events & (GIPC_EVENT_RESET))
pr_err("hab gipc %s remote vmid %d RESET\n",
dev->name, pchan->vmid_remote);
if (events & (GIPC_EVENT_RESETINPROGRESS))
pr_err("hab gipc %s remote vmid %d RESETINPROGRESS\n",
dev->name, pchan->vmid_remote);
if (events & (GIPC_EVENT_RECEIVEREADY)) {
spin_lock_bh(&pchan->rxbuf_lock);
while (1) {
dev->read_size = 0;
dev->read_offset = 0;
result = GIPC_ReceiveMessage(dev->endpoint,
dev->read_data,
GIPC_RECV_BUFF_SIZE_BYTES,
&dev->read_size,
&header.id_type_size);
if (result == GIPC_Success || dev->read_size > 0) {
/* handle corrupted msg? */
hab_msg_recv(pchan, dev->read_data);
continue;
} else if (result == GIPC_Empty) {
/* no more pending msg */
break;
}
pr_error("recv unhandled result %d, size %zd\n",
result, dev->read_size);
break;
}
spin_unlock_bh(&pchan->rxbuf_lock);
}
if (events & (GIPC_EVENT_SENDREADY))
pr_debug("kgipc send ready\n");
}

View file

@ -16,11 +16,13 @@
.name = __name__,\
.id = __id__,\
.pchannels = LIST_HEAD_INIT(hab_devices[__num__].pchannels),\
.pchan_lock = __MUTEX_INITIALIZER(hab_devices[__num__].pchan_lock),\
.pchan_lock = __SPIN_LOCK_UNLOCKED(hab_devices[__num__].pchan_lock),\
.openq_list = LIST_HEAD_INIT(hab_devices[__num__].openq_list),\
.openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
}
static const char hab_info_str[] = "Change: 16239527 Revision: #65";
/*
* The following has to match habmm definitions, order does not matter if
* hab config does not care either. When hab config is not present, the default
@ -54,6 +56,8 @@ static struct hab_device hab_devices[] = {
struct hab_driver hab_driver = {
.ndevices = ARRAY_SIZE(hab_devices),
.devp = hab_devices,
.uctx_list = LIST_HEAD_INIT(hab_driver.uctx_list),
.drvlock = __SPIN_LOCK_UNLOCKED(hab_driver.drvlock),
};
struct uhab_context *hab_ctx_alloc(int kernel)
@ -77,6 +81,7 @@ struct uhab_context *hab_ctx_alloc(int kernel)
rwlock_init(&ctx->exp_lock);
rwlock_init(&ctx->ctx_lock);
INIT_LIST_HEAD(&ctx->pending_open);
kref_init(&ctx->refcount);
ctx->import_ctx = habmem_imp_hyp_open();
if (!ctx->import_ctx) {
@ -86,14 +91,53 @@ struct uhab_context *hab_ctx_alloc(int kernel)
}
ctx->kernel = kernel;
spin_lock_bh(&hab_driver.drvlock);
list_add_tail(&ctx->node, &hab_driver.uctx_list);
hab_driver.ctx_cnt++;
ctx->lb_be = hab_driver.b_loopback_be; /* loopback only */
hab_driver.b_loopback_be = ~hab_driver.b_loopback_be; /* loopback only*/
spin_unlock_bh(&hab_driver.drvlock);
pr_debug("ctx %pK live %d loopback be %d\n",
ctx, hab_driver.ctx_cnt, ctx->lb_be);
return ctx;
}
/* ctx can only be freed when all the vchan releases the refcnt */
void hab_ctx_free(struct kref *ref)
{
struct uhab_context *ctx =
container_of(ref, struct uhab_context, refcount);
struct hab_export_ack_recvd *ack_recvd, *tmp;
struct virtual_channel *vchan;
struct physical_channel *pchan;
int i;
struct uhab_context *ctxdel, *ctxtmp;
struct hab_open_node *node;
struct export_desc *exp, *exp_tmp;
/* garbage-collect exp/imp buffers */
write_lock(&ctx->exp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
list_del(&exp->node);
pr_err("potential leak exp %d vcid %X recovered\n",
exp->export_id, exp->vcid_local);
habmem_hyp_revoke(exp->payload, exp->payload_count);
habmem_remove_export(exp);
}
write_unlock(&ctx->exp_lock);
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
list_del(&exp->node);
ctx->import_total--;
pr_warn("leaked imp %d vcid %X for ctx is collected total %d\n",
exp->export_id, exp->vcid_local,
ctx->import_total);
habmm_imp_hyp_unmap(ctx->import_ctx, exp, ctx->kernel);
kfree(exp);
}
spin_unlock_bh(&ctx->imp_lock);
habmem_imp_hyp_close(ctx->import_ctx, ctx->kernel);
@ -102,9 +146,70 @@ void hab_ctx_free(struct kref *ref)
kfree(ack_recvd);
}
/* walk vchan list to find the leakage */
spin_lock_bh(&hab_driver.drvlock);
hab_driver.ctx_cnt--;
list_for_each_entry_safe(ctxdel, ctxtmp, &hab_driver.uctx_list, node) {
if (ctxdel == ctx)
list_del(&ctxdel->node);
}
spin_unlock_bh(&hab_driver.drvlock);
pr_debug("live ctx %d refcnt %d kernel %d close %d owner %d\n",
hab_driver.ctx_cnt, get_refcnt(ctx->refcount),
ctx->kernel, ctx->closing, ctx->owner);
/* check vchans in this ctx */
write_lock(&ctx->ctx_lock);
list_for_each_entry(vchan, &ctx->vchannels, node) {
pr_warn("leak vchan id %X cnt %X remote %d in ctx\n",
vchan->id, get_refcnt(vchan->refcount),
vchan->otherend_id);
}
write_unlock(&ctx->ctx_lock);
/* check pending open */
if (ctx->pending_cnt)
pr_warn("potential leak of pendin_open nodes %d\n",
ctx->pending_cnt);
write_lock(&ctx->ctx_lock);
list_for_each_entry(node, &ctx->pending_open, node) {
pr_warn("leak pending open vcid %X type %d subid %d openid %d\n",
node->request.xdata.vchan_id, node->request.type,
node->request.xdata.sub_id,
node->request.xdata.open_id);
}
write_unlock(&ctx->ctx_lock);
/* check vchans belong to this ctx in all hab/mmid devices */
for (i = 0; i < hab_driver.ndevices; i++) {
struct hab_device *habdev = &hab_driver.devp[i];
spin_lock_bh(&habdev->pchan_lock);
list_for_each_entry(pchan, &habdev->pchannels, node) {
/* check vchan ctx owner */
write_lock(&pchan->vchans_lock);
list_for_each_entry(vchan, &pchan->vchannels, pnode) {
if (vchan->ctx == ctx) {
pr_warn("leak vcid %X cnt %d pchan %s local %d remote %d\n",
vchan->id,
get_refcnt(vchan->refcount),
pchan->name, pchan->vmid_local,
pchan->vmid_remote);
}
}
write_unlock(&pchan->vchans_lock);
}
spin_unlock_bh(&habdev->pchan_lock);
}
kfree(ctx);
}
/*
* caller needs to call vchan_put() afterwards. this is used to refcnt
* the local ioctl access based on ctx
*/
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
struct uhab_context *ctx)
{
@ -140,14 +245,14 @@ struct hab_device *find_hab_device(unsigned int mm_id)
* frontend backend
* send(INIT) wait(INIT)
* wait(INIT_ACK) send(INIT_ACK)
* send(ACK) wait(ACK)
* send(INIT_DONE) wait(INIT_DONE)
*/
struct virtual_channel *frontend_open(struct uhab_context *ctx,
unsigned int mm_id,
int dom_id)
{
int ret, open_id = 0;
int ret, ret2, open_id = 0;
struct physical_channel *pchan = NULL;
struct hab_device *dev;
struct virtual_channel *vchan = NULL;
@ -155,6 +260,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
struct hab_open_request request;
struct hab_open_request *recv_request;
int sub_id = HAB_MMID_GET_MINOR(mm_id);
struct hab_open_node pending_open = { { 0 } };
dev = find_hab_device(mm_id);
if (dev == NULL) {
@ -163,6 +269,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
goto err;
}
/* guest can find its own id */
pchan = hab_pchan_find_domid(dev, dom_id);
if (!pchan) {
pr_err("hab_pchan_find_domid failed: dom_id=%d\n", dom_id);
@ -170,44 +277,82 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
goto err;
}
vchan = hab_vchan_alloc(ctx, pchan);
open_id = atomic_inc_return(&open_id_counter);
vchan = hab_vchan_alloc(ctx, pchan, open_id);
if (!vchan) {
pr_err("vchan alloc failed\n");
ret = -ENOMEM;
goto err;
}
} else
/* Send Init sequence */
open_id = atomic_inc_return(&open_id_counter);
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT, pchan,
vchan->id, sub_id, open_id);
request.xdata.ver_fe = HAB_API_VER;
ret = hab_open_request_send(&request);
if (ret) {
pr_err("hab_open_request_send failed: %d\n", ret);
goto err;
}
pending_open.request = request;
/* during wait app could be terminated */
hab_open_pending_enter(ctx, pchan, &pending_open);
/* Wait for Init-Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK, pchan,
0, sub_id, open_id);
/* wait forever */
ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
if (ret || !recv_request) {
pr_err("hab_open_listen failed: %d\n", ret);
if (!ret && recv_request && ((recv_request->xdata.ver_fe & 0xFFFF0000)
!= (recv_request->xdata.ver_be & 0xFFFF0000))) {
/* version check */
pr_err("hab major version mismatch fe %X be %X on mmid %d\n",
recv_request->xdata.ver_fe,
recv_request->xdata.ver_be, mm_id);
hab_open_pending_exit(ctx, pchan, &pending_open);
ret = -EPROTO;
goto err;
} else if (ret || !recv_request) {
pr_err("hab_open_listen failed: %d, send cancel vcid %x subid %d openid %d\n",
ret, vchan->id,
sub_id, open_id);
/* send cancel to BE due to FE's local close */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_CANCEL,
pchan, vchan->id, sub_id, open_id);
request.xdata.ver_fe = HAB_API_VER;
ret2 = hab_open_request_send(&request);
if (ret2)
pr_err("send init_cancel failed %d on vcid %x\n", ret2,
vchan->id);
hab_open_pending_exit(ctx, pchan, &pending_open);
ret = -EINVAL;
goto err;
}
vchan->otherend_id = recv_request->vchan_id;
/* remove pending open locally after good pairing */
hab_open_pending_exit(ctx, pchan, &pending_open);
pr_debug("hab version match fe %X be %X on mmid %d\n",
recv_request->xdata.ver_fe, recv_request->xdata.ver_be,
mm_id);
vchan->otherend_id = recv_request->xdata.vchan_id;
hab_open_request_free(recv_request);
vchan->session_id = open_id;
pr_debug("vchan->session_id:%d\n", vchan->session_id);
/* Send Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan,
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_DONE, pchan,
0, sub_id, open_id);
request.xdata.ver_fe = HAB_API_VER;
ret = hab_open_request_send(&request);
if (ret)
if (ret) {
pr_err("failed to send init-done vcid %x remote %x openid %d\n",
vchan->id, vchan->otherend_id, vchan->session_id);
goto err;
}
hab_pchan_put(pchan);
@ -222,10 +367,10 @@ err:
}
struct virtual_channel *backend_listen(struct uhab_context *ctx,
unsigned int mm_id)
unsigned int mm_id, int timeout)
{
int ret;
int open_id;
int ret, ret2;
int open_id, ver_fe;
int sub_id = HAB_MMID_GET_MINOR(mm_id);
struct physical_channel *pchan = NULL;
struct hab_device *dev;
@ -233,6 +378,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
struct hab_open_request request;
struct hab_open_request *recv_request;
uint32_t otherend_vchan_id;
struct hab_open_node pending_open = { { 0 } };
dev = find_hab_device(mm_id);
if (dev == NULL) {
@ -245,19 +391,50 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
/* Wait for Init sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT,
NULL, 0, sub_id, 0);
ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
/* cancel should not happen at this moment */
ret = hab_open_listen(ctx, dev, &request, &recv_request,
timeout);
if (ret || !recv_request) {
pr_err("hab_open_listen failed: %d\n", ret);
if (!ret && !recv_request)
ret = -EINVAL;
if (-EAGAIN == ret) {
ret = -ETIMEDOUT;
} else {
/* device is closed */
pr_err("open request wait failed ctx closing %d\n",
ctx->closing);
}
goto err;
} else if (!ret && recv_request &&
((recv_request->xdata.ver_fe & 0xFFFF0000) !=
(HAB_API_VER & 0xFFFF0000))) {
int ret2;
/* version check */
pr_err("version mismatch fe %X be %X on mmid %d\n",
recv_request->xdata.ver_fe, HAB_API_VER, mm_id);
hab_open_request_init(&request,
HAB_PAYLOAD_TYPE_INIT_ACK,
NULL, 0, sub_id, recv_request->xdata.open_id);
request.xdata.ver_be = HAB_API_VER;
/* reply to allow FE to bail out */
ret2 = hab_open_request_send(&request);
if (ret2)
pr_err("send FE version mismatch failed mmid %d sub %d\n",
mm_id, sub_id);
ret = -EPROTO;
goto err;
}
otherend_vchan_id = recv_request->vchan_id;
open_id = recv_request->open_id;
/* guest id from guest */
otherend_vchan_id = recv_request->xdata.vchan_id;
open_id = recv_request->xdata.open_id;
ver_fe = recv_request->xdata.ver_fe;
pchan = recv_request->pchan;
hab_pchan_get(pchan);
hab_open_request_free(recv_request);
recv_request = NULL;
vchan = hab_vchan_alloc(ctx, pchan);
vchan = hab_vchan_alloc(ctx, pchan, open_id);
if (!vchan) {
ret = -ENOMEM;
goto err;
@ -265,23 +442,64 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
vchan->otherend_id = otherend_vchan_id;
vchan->session_id = open_id;
pr_debug("vchan->session_id:%d\n", vchan->session_id);
/* Send Init-Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK,
pchan, vchan->id, sub_id, open_id);
request.xdata.ver_fe = ver_fe; /* carry over */
request.xdata.ver_be = HAB_API_VER;
ret = hab_open_request_send(&request);
if (ret)
goto err;
/* Wait for Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK,
pchan, 0, sub_id, open_id);
ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
pending_open.request = request;
/* wait only after init-ack is sent */
hab_open_pending_enter(ctx, pchan, &pending_open);
if (ret != -EAGAIN)
/* Wait for Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_DONE,
pchan, 0, sub_id, open_id);
ret = hab_open_listen(ctx, dev, &request, &recv_request,
HAB_HS_TIMEOUT);
hab_open_pending_exit(ctx, pchan, &pending_open);
if (ret && recv_request &&
recv_request->type == HAB_PAYLOAD_TYPE_INIT_CANCEL) {
pr_err("listen cancelled vcid %x subid %d openid %d ret %d\n",
request.xdata.vchan_id, request.xdata.sub_id,
request.xdata.open_id, ret);
/* FE cancels this session.
* So BE has to cancel its too
*/
hab_open_request_init(&request,
HAB_PAYLOAD_TYPE_INIT_CANCEL, pchan,
vchan->id, sub_id, open_id);
ret2 = hab_open_request_send(&request);
if (ret2)
pr_err("send init_ack failed %d on vcid %x\n",
ret2, vchan->id);
hab_open_pending_exit(ctx, pchan, &pending_open);
ret = -ENODEV; /* open request cancelled remotely */
break;
} else if (ret != -EAGAIN) {
hab_open_pending_exit(ctx, pchan, &pending_open);
break; /* received something. good case! */
}
/* stay in the loop retry */
pr_warn("retry open ret %d vcid %X remote %X sub %d open %d\n",
ret, vchan->id, vchan->otherend_id, sub_id, open_id);
/* retry path starting here. free previous vchan */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_CANCEL,
pchan, vchan->id, sub_id, open_id);
request.xdata.ver_fe = ver_fe;
request.xdata.ver_be = HAB_API_VER;
ret2 = hab_open_request_send(&request);
if (ret2)
pr_err("send init_ack failed %d on vcid %x\n", ret2,
vchan->id);
hab_open_pending_exit(ctx, pchan, &pending_open);
hab_vchan_put(vchan);
vchan = NULL;
@ -290,7 +508,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
}
if (ret || !recv_request) {
pr_err("backend_listen failed: %d\n", ret);
pr_err("backend mmid %d listen error %d\n", mm_id, ret);
ret = -EINVAL;
goto err;
}
@ -299,7 +517,8 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
hab_pchan_put(pchan);
return vchan;
err:
pr_err("listen on mmid %d failed\n", mm_id);
if (ret != -ETIMEDOUT)
pr_err("listen on mmid %d failed\n", mm_id);
if (vchan)
hab_vchan_put(vchan);
if (pchan)
@ -318,8 +537,9 @@ long hab_vchan_send(struct uhab_context *ctx,
struct hab_header header = HAB_HEADER_INITIALIZER;
int nonblocking_flag = flags & HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING;
if (sizebytes > HAB_MAX_MSG_SIZEBYTES) {
pr_err("Message too large, %lu bytes\n", sizebytes);
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("Message too large, %lu bytes, max is %d\n",
sizebytes, HAB_HEADER_SIZE_MASK);
return -EINVAL;
}
@ -330,11 +550,17 @@ long hab_vchan_send(struct uhab_context *ctx,
}
HAB_HEADER_SET_SIZE(header, sizebytes);
if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT)
if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT) {
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_PROFILE);
else
if (sizebytes < sizeof(struct habmm_xing_vm_stat)) {
pr_err("wrong profiling buffer size %zd, expect %zd\n",
sizebytes,
sizeof(struct habmm_xing_vm_stat));
return -EINVAL;
}
} else {
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
}
HAB_HEADER_SET_ID(header, vchan->otherend_id);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
@ -347,8 +573,6 @@ long hab_vchan_send(struct uhab_context *ctx,
schedule();
}
err:
if (vchan)
hab_vchan_put(vchan);
@ -403,23 +627,22 @@ bool hab_is_loopback(void)
int hab_vchan_open(struct uhab_context *ctx,
unsigned int mmid,
int32_t *vcid,
int32_t timeout,
uint32_t flags)
{
struct virtual_channel *vchan = NULL;
struct hab_device *dev;
pr_debug("Open mmid=%d, loopback mode=%d, loopback num=%d\n",
mmid, hab_driver.b_loopback, hab_driver.loopback_num);
pr_debug("Open mmid=%d, loopback mode=%d, loopback be ctx %d\n",
mmid, hab_driver.b_loopback, ctx->lb_be);
if (!vcid)
return -EINVAL;
if (hab_is_loopback()) {
if (!hab_driver.loopback_num) {
hab_driver.loopback_num = 1;
vchan = backend_listen(ctx, mmid);
if (ctx->lb_be) {
vchan = backend_listen(ctx, mmid, timeout);
} else {
hab_driver.loopback_num = 0;
vchan = frontend_open(ctx, mmid, LOOPBACK_DOM);
}
} else {
@ -427,28 +650,37 @@ int hab_vchan_open(struct uhab_context *ctx,
if (dev) {
struct physical_channel *pchan =
hab_pchan_find_domid(dev, HABCFG_VMID_DONT_CARE);
if (pchan->is_be)
vchan = backend_listen(ctx, mmid);
else
vchan = frontend_open(ctx, mmid,
HABCFG_VMID_DONT_CARE);
hab_pchan_find_domid(dev,
HABCFG_VMID_DONT_CARE);
if (pchan) {
if (pchan->is_be)
vchan = backend_listen(ctx, mmid,
timeout);
else
vchan = frontend_open(ctx, mmid,
HABCFG_VMID_DONT_CARE);
} else {
pr_err("open on nonexistent pchan (mmid %x)",
mmid);
return -ENODEV;
}
} else {
pr_err("failed to find device, mmid %d\n", mmid);
}
}
if (IS_ERR(vchan)) {
pr_err("vchan open failed over mmid=%d\n", mmid);
if (-ETIMEDOUT != PTR_ERR(vchan) && -EAGAIN != PTR_ERR(vchan))
pr_err("vchan open failed mmid=%d\n", mmid);
return PTR_ERR(vchan);
}
pr_debug("vchan id %x, remote id %x\n",
vchan->id, vchan->otherend_id);
pr_debug("vchan id %x remote id %x session %d\n", vchan->id,
vchan->otherend_id, vchan->session_id);
write_lock(&ctx->ctx_lock);
list_add_tail(&vchan->node, &ctx->vchannels);
ctx->vcnt++;
write_unlock(&ctx->ctx_lock);
*vcid = vchan->id;
@ -469,17 +701,6 @@ void hab_send_close_msg(struct virtual_channel *vchan)
}
}
static void hab_vchan_close_impl(struct kref *ref)
{
struct virtual_channel *vchan =
container_of(ref, struct virtual_channel, usagecnt);
list_del(&vchan->node);
hab_vchan_stop_notify(vchan);
hab_vchan_put(vchan);
}
void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
{
struct virtual_channel *vchan, *tmp;
@ -490,11 +711,29 @@ void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
write_lock(&ctx->ctx_lock);
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
if (vchan->id == vcid) {
kref_put(&vchan->usagecnt, hab_vchan_close_impl);
write_unlock(&ctx->ctx_lock);
pr_debug("vcid %x remote %x session %d refcnt %d\n",
vchan->id, vchan->otherend_id,
vchan->session_id, get_refcnt(vchan->refcount));
/*
* only set when vc close is called locally by user
* explicity. Used to block remote msg. if forked once
* before, this local close is skipped due to child
* usage. if forked but not closed locally, the local
* context could NOT be closed, vchan can be prolonged
* by arrived remote msgs
*/
if (vchan->forked)
vchan->forked = 0;
else {
vchan->closed = 1;
hab_vchan_stop_notify(vchan);
}
hab_vchan_put(vchan); /* there is a lock inside */
write_lock(&ctx->ctx_lock);
break;
}
}
write_unlock(&ctx->ctx_lock);
}
@ -511,7 +750,7 @@ static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
char pchan_name[MAX_VMID_NAME_SIZE];
struct physical_channel *pchan = NULL;
int ret;
int vmid = is_be ? vmid_remote : vmid_local;
int vmid = is_be ? vmid_remote : vmid_local; /* used for naming only */
if (!mmid_device) {
pr_err("habdev %pK, vmid local %d, remote %d, is be %d\n",
@ -541,7 +780,11 @@ static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
return ret;
}
static void hab_generate_pchan(struct local_vmid *settings, int i, int j)
/*
* generate pchan list based on hab settings table.
* return status 0: success, otherwise failure
*/
static int hab_generate_pchan(struct local_vmid *settings, int i, int j)
{
int k, ret = 0;
@ -657,6 +900,7 @@ static void hab_generate_pchan(struct local_vmid *settings, int i, int j)
break;
}
return ret;
}
/*
@ -665,7 +909,7 @@ static void hab_generate_pchan(struct local_vmid *settings, int i, int j)
*/
static int hab_generate_pchan_list(struct local_vmid *settings)
{
int i, j;
int i, j, ret = 0;
/* scan by valid VMs, then mmid */
pr_debug("self vmid is %d\n", settings->self);
@ -677,24 +921,34 @@ static int hab_generate_pchan_list(struct local_vmid *settings)
for (j = 1; j <= HABCFG_MMID_AREA_MAX; j++) {
if (HABCFG_GET_MMID(settings, i, j)
!= HABCFG_VMID_INVALID)
hab_generate_pchan(settings, i, j);
ret = hab_generate_pchan(settings,
i, j);
}
}
}
return 0;
return ret;
}
/*
* This function checks hypervisor plug-in readiness, read in hab configs,
* and configure pchans
*/
#ifdef HABMM_HC_VMID
#define DEFAULT_GVMID 3
#else
#define DEFAULT_GVMID 2
#endif
int do_hab_parse(void)
{
int result;
int i;
struct hab_device *device;
int pchan_total = 0;
/* single GVM is 2, multigvm is 2 or 3. GHS LV-GVM 2, LA-GVM 3 */
int default_gvmid = DEFAULT_GVMID;
pr_debug("hab parse starts for %s\n", hab_info_str);
/* first check if hypervisor plug-in is ready */
result = hab_hypervisor_register();
@ -703,7 +957,10 @@ int do_hab_parse(void)
return result;
}
/* Initialize open Q before first pchan starts */
/*
* Initialize open Q before first pchan starts.
* Each is for one pchan list
*/
for (i = 0; i < hab_driver.ndevices; i++) {
device = &hab_driver.devp[i];
init_waitqueue_head(&device->openq);
@ -712,12 +969,12 @@ int do_hab_parse(void)
/* read in hab config and create pchans*/
memset(&hab_driver.settings, HABCFG_VMID_INVALID,
sizeof(hab_driver.settings));
result = hab_parse(&hab_driver.settings);
if (result) {
pr_warn("hab_parse failed and use the default settings\n");
fill_default_gvm_settings(&hab_driver.settings, 2,
MM_AUD_START, MM_ID_MAX);
pr_err("hab config open failed, prepare default gvm %d settings\n",
default_gvmid);
fill_default_gvm_settings(&hab_driver.settings, default_gvmid,
MM_AUD_START, MM_ID_MAX);
}
/* now generate hab pchan list */
@ -725,6 +982,7 @@ int do_hab_parse(void)
if (result) {
pr_err("generate pchan list failed, ret %d\n", result);
} else {
int pchan_total = 0;
for (i = 0; i < hab_driver.ndevices; i++) {
device = &hab_driver.devp[i];
pchan_total += device->pchan_cnt;
@ -736,6 +994,48 @@ int do_hab_parse(void)
return result;
}
int get_refcnt(struct kref ref)
{
return ref.refcount.counter;
}
void hab_hypervisor_unregister_common(void)
{
int status, i;
struct uhab_context *ctx;
struct virtual_channel *vchan;
for (i = 0; i < hab_driver.ndevices; i++) {
struct hab_device *habdev = &hab_driver.devp[i];
struct physical_channel *pchan, *pchan_tmp;
list_for_each_entry_safe(pchan, pchan_tmp,
&habdev->pchannels, node) {
status = habhyp_commdev_dealloc(pchan);
if (status) {
pr_err("failed to free pchan %pK, i %d, ret %d\n",
pchan, i, status);
}
}
}
/* detect leaking uctx */
spin_lock_bh(&hab_driver.drvlock);
list_for_each_entry(ctx, &hab_driver.uctx_list, node) {
pr_warn("leaking ctx owner %d refcnt %d kernel %d\n",
ctx->owner, get_refcnt(ctx->refcount), ctx->kernel);
/* further check vchan leak */
read_lock(&ctx->ctx_lock);
list_for_each_entry(vchan, &ctx->vchannels, node) {
pr_warn("leaking vchan id %X remote %X refcnt %d\n",
vchan->id, vchan->otherend_id,
get_refcnt(vchan->refcount));
}
read_unlock(&ctx->ctx_lock);
}
spin_unlock_bh(&hab_driver.drvlock);
}
static int hab_open(struct inode *inodep, struct file *filep)
{
int result = 0;
@ -749,7 +1049,10 @@ static int hab_open(struct inode *inodep, struct file *filep)
return -ENOMEM;
}
ctx->owner = task_pid_nr(current);
filep->private_data = ctx;
pr_debug("ctx owner %d refcnt %d\n", ctx->owner,
get_refcnt(ctx->refcount));
return result;
}
@ -758,25 +1061,50 @@ static int hab_release(struct inode *inodep, struct file *filep)
{
struct uhab_context *ctx = filep->private_data;
struct virtual_channel *vchan, *tmp;
struct hab_open_node *node;
if (!ctx)
return 0;
pr_debug("inode %pK, filep %pK\n", inodep, filep);
pr_debug("inode %pK, filep %pK ctx %pK\n", inodep, filep, ctx);
write_lock(&ctx->ctx_lock);
/* notify remote side on vchan closing */
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
list_del(&vchan->node);
list_del(&vchan->node); /* vchan is not in this ctx anymore */
hab_vchan_stop_notify(vchan);
hab_vchan_put(vchan);
write_unlock(&ctx->ctx_lock);
if (!vchan->closed) {
pr_warn("potential leak vc %pK %x remote %x session %d refcnt %d\n",
vchan, vchan->id, vchan->otherend_id,
vchan->session_id,
get_refcnt(vchan->refcount));
hab_vchan_put(vchan); /* there is a lock inside */
}
write_lock(&ctx->ctx_lock);
}
/* notify remote side on pending open */
list_for_each_entry(node, &ctx->pending_open, node) {
/* no touch to the list itself. it is allocated on the stack */
if (hab_open_cancel_notify(&node->request))
pr_err("failed to send open cancel vcid %x subid %d openid %d pchan %s\n",
node->request.xdata.vchan_id,
node->request.xdata.sub_id,
node->request.xdata.open_id,
node->request.pchan->habdev->name);
}
write_unlock(&ctx->ctx_lock);
hab_ctx_put(ctx);
filep->private_data = NULL;
/* ctx leak check */
if (get_refcnt(ctx->refcount))
pr_warn("pending ctx release owner %d refcnt %d total %d\n",
ctx->owner, get_refcnt(ctx->refcount),
hab_driver.ctx_cnt);
return 0;
}
@ -809,7 +1137,9 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
case IOCTL_HAB_VC_OPEN:
open_param = (struct hab_open *)data;
ret = hab_vchan_open(ctx, open_param->mmid,
&open_param->vcid, open_param->flags);
&open_param->vcid,
open_param->timeout,
open_param->flags);
break;
case IOCTL_HAB_VC_CLOSE:
close_param = (struct hab_close *)data;
@ -858,6 +1188,9 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
recv_param->sizebytes = 0;
ret = -EFAULT;
}
} else if (ret && msg) {
pr_warn("vcid %X recv failed %d and msg is still of %zd bytes\n",
recv_param->vcid, (int)ret, msg->sizebytes);
}
if (msg)
@ -879,22 +1212,22 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
info_param = (struct hab_info *)data;
if (!info_param->names || !info_param->namesize ||
info_param->namesize > sizeof(names)) {
pr_err("wrong vm info vcid %X, names %llX, sz %d\n",
info_param->vcid, info_param->names,
info_param->namesize);
pr_err("wrong param for vm info vcid %X, names %llX, sz %d\n",
info_param->vcid, info_param->names,
info_param->namesize);
ret = -EINVAL;
break;
}
ret = hab_vchan_query(ctx, info_param->vcid,
(uint64_t *)&info_param->ids,
names, info_param->namesize, 0);
names, info_param->namesize, 0);
if (!ret) {
if (copy_to_user((void __user *)info_param->names,
names,
info_param->namesize)) {
pr_err("copy_to_user failed: vc=%x size=%d\n",
info_param->vcid,
info_param->namesize*2);
info_param->vcid,
info_param->namesize*2);
info_param->namesize = 0;
ret = -EFAULT;
}
@ -904,7 +1237,7 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
ret = -ENOIOCTLCMD;
}
if (ret == 0 && _IOC_SIZE(cmd) && (cmd & IOC_OUT))
if (_IOC_SIZE(cmd) && (cmd & IOC_OUT))
if (copy_to_user((void __user *) arg, data, _IOC_SIZE(cmd))) {
pr_err("copy_to_user failed: cmd=%x\n", cmd);
ret = -EFAULT;
@ -955,6 +1288,26 @@ static const struct dma_map_ops hab_dma_ops = {
.unmap_sg = hab_unmap_sg,
};
static int hab_power_down_callback(
struct notifier_block *nfb, unsigned long action, void *data)
{
switch (action) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
pr_debug("reboot called %ld\n", action);
hab_hypervisor_unregister(); /* only for single VM guest */
break;
}
pr_debug("reboot called %ld done\n", action);
return NOTIFY_DONE;
}
static struct notifier_block hab_reboot_notifier = {
.notifier_call = hab_power_down_callback,
};
static int __init hab_init(void)
{
int result;
@ -997,6 +1350,10 @@ static int __init hab_init(void)
goto err;
}
result = register_reboot_notifier(&hab_reboot_notifier);
if (result)
pr_err("failed to register reboot notifier %d\n", result);
/* read in hab config, then configure pchans */
result = do_hab_parse();
@ -1007,12 +1364,10 @@ static int __init hab_init(void)
result = -ENOMEM;
hab_hypervisor_unregister();
goto err;
}
set_dma_ops(hab_driver.dev, &hab_dma_ops);
return result;
} else
set_dma_ops(hab_driver.dev, &hab_dma_ops);
}
return result;
err:
if (!IS_ERR_OR_NULL(hab_driver.dev))
@ -1037,6 +1392,8 @@ static void __exit hab_exit(void)
class_destroy(hab_driver.class);
cdev_del(&hab_driver.cdev);
unregister_chrdev_region(dev, 1);
unregister_reboot_notifier(&hab_reboot_notifier);
pr_debug("hab exit called\n");
}
subsys_initcall(hab_init);

View file

@ -16,7 +16,7 @@
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) "|hab:%s:%d|" fmt, __func__, __LINE__
#define pr_fmt(fmt) "hab:%s:%d " fmt, __func__, __LINE__
#include <linux/types.h>
@ -41,16 +41,19 @@
#include <linux/uaccess.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/jiffies.h>
#include <linux/reboot.h>
enum hab_payload_type {
HAB_PAYLOAD_TYPE_MSG = 0x0,
HAB_PAYLOAD_TYPE_INIT,
HAB_PAYLOAD_TYPE_INIT_ACK,
HAB_PAYLOAD_TYPE_ACK,
HAB_PAYLOAD_TYPE_INIT_DONE,
HAB_PAYLOAD_TYPE_EXPORT,
HAB_PAYLOAD_TYPE_EXPORT_ACK,
HAB_PAYLOAD_TYPE_PROFILE,
HAB_PAYLOAD_TYPE_CLOSE,
HAB_PAYLOAD_TYPE_INIT_CANCEL,
HAB_PAYLOAD_TYPE_MAX,
};
#define LOOPBACK_DOM 0xFF
@ -128,21 +131,21 @@ struct hab_header {
/* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */
#define HAB_HEADER_SIZE_SHIFT 0
#define HAB_HEADER_TYPE_SHIFT 16
#define HAB_HEADER_ID_SHIFT 24
#define HAB_HEADER_ID_SHIFT 20
#define HAB_HEADER_SIZE_MASK 0x0000FFFF
#define HAB_HEADER_TYPE_MASK 0x00FF0000
#define HAB_HEADER_ID_MASK 0xFF000000
#define HAB_HEADER_TYPE_MASK 0x000F0000
#define HAB_HEADER_ID_MASK 0xFFF00000
#define HAB_HEADER_INITIALIZER {0}
#define HAB_MMID_GET_MAJOR(mmid) (mmid & 0xFFFF)
#define HAB_MMID_GET_MINOR(mmid) ((mmid>>16) & 0xFF)
#define HAB_VCID_ID_SHIFT 0
#define HAB_VCID_DOMID_SHIFT 8
#define HAB_VCID_MMID_SHIFT 16
#define HAB_VCID_ID_MASK 0x000000FF
#define HAB_VCID_DOMID_MASK 0x0000FF00
#define HAB_VCID_MMID_MASK 0xFFFF0000
#define HAB_VCID_DOMID_SHIFT 12
#define HAB_VCID_MMID_SHIFT 20
#define HAB_VCID_ID_MASK 0x00000FFF
#define HAB_VCID_DOMID_MASK 0x000FF000
#define HAB_VCID_MMID_MASK 0xFFF00000
#define HAB_VCID_GET_ID(vcid) \
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
@ -182,12 +185,14 @@ struct hab_header {
#define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id)
#define HAB_HS_TIMEOUT (10*1000*1000)
struct physical_channel {
struct list_head node;
char name[MAX_VMID_NAME_SIZE];
int is_be;
struct kref refcount;
struct hab_device *habdev;
struct list_head node;
struct idr vchan_idr;
spinlock_t vid_lock;
@ -195,38 +200,44 @@ struct physical_channel {
spinlock_t expid_lock;
void *hyp_data;
int dom_id;
int vmid_local;
int dom_id; /* BE role: remote vmid; FE role: don't care */
int vmid_local; /* from DT or hab_config */
int vmid_remote;
char vmname_local[12];
char vmname_local[12]; /* from DT */
char vmname_remote[12];
int closed;
spinlock_t rxbuf_lock;
/* vchans over this pchan */
/* debug only */
uint32_t sequence_tx;
uint32_t sequence_rx;
/* vchans on this pchan */
struct list_head vchannels;
int vcnt;
rwlock_t vchans_lock;
};
/* this payload has to be used together with type */
struct hab_open_send_data {
int vchan_id;
int sub_id;
int open_id;
int ver_fe;
int ver_be;
int reserved;
};
struct hab_open_request {
int type;
struct physical_channel *pchan;
int vchan_id;
int sub_id;
int open_id;
struct hab_open_send_data xdata;
};
struct hab_open_node {
struct hab_open_request request;
struct list_head node;
int age;
int64_t age; /* sec */
};
struct hab_export_ack {
@ -247,20 +258,25 @@ struct hab_message {
uint32_t data[];
};
/* for all the pchans of same kind */
struct hab_device {
char name[MAX_VMID_NAME_SIZE];
unsigned int id;
uint32_t id;
struct list_head pchannels;
int pchan_cnt;
struct mutex pchan_lock;
struct list_head openq_list;
spinlock_t pchan_lock;
struct list_head openq_list; /* received */
spinlock_t openlock;
wait_queue_head_t openq;
int openq_cnt;
};
struct uhab_context {
struct list_head node; /* managed by the driver */
struct kref refcount;
struct list_head vchannels;
int vcnt;
struct list_head exp_whse;
uint32_t export_total;
@ -276,9 +292,15 @@ struct uhab_context {
void *import_ctx;
struct list_head pending_open; /* sent to remote */
int pending_cnt;
rwlock_t ctx_lock;
int closing;
int kernel;
int owner;
int lb_be; /* loopback only */
};
/*
@ -297,7 +319,7 @@ struct local_vmid {
};
struct hab_driver {
struct device *dev;
struct device *dev; /* mmid dev list */
struct cdev cdev;
dev_t major;
struct class *class;
@ -305,33 +327,30 @@ struct hab_driver {
struct hab_device *devp;
struct uhab_context *kctx;
struct list_head uctx_list;
int ctx_cnt;
spinlock_t drvlock;
struct local_vmid settings; /* parser results */
int b_server_dom;
int loopback_num;
int b_loopback_be; /* only allow 2 apps simultaneously 1 fe 1 be */
int b_loopback;
void *hyp_priv; /* hypervisor plug-in storage */
};
struct virtual_channel {
struct work_struct work;
/*
* refcount is used to track the references from hab core to the virtual
* channel such as references from physical channels,
* i.e. references from the "other" side
*/
struct kref refcount;
/*
* usagecnt is used to track the clients who are using this virtual
* channel such as local clients, client sowftware etc,
* i.e. references from "this" side
*/
struct kref usagecnt;
struct physical_channel *pchan;
struct uhab_context *ctx;
struct list_head node;
struct list_head pnode;
struct list_head node; /* for ctx */
struct list_head pnode; /* for pchan */
struct list_head rx_list;
wait_queue_head_t rx_queue;
spinlock_t rx_lock;
@ -339,6 +358,14 @@ struct virtual_channel {
int otherend_id;
int otherend_closed;
uint32_t session_id;
/*
* set when local close() is called explicitly. vchan could be
* used in hab-recv-msg() path (2) then close() is called (1).
* this is same case as close is not called and no msg path
*/
int closed;
int forked; /* if fork is detected and assume only once */
};
/*
@ -351,12 +378,15 @@ struct export_desc {
int readonly;
uint64_t import_index;
struct virtual_channel *vchan;
struct virtual_channel *vchan; /* vchan could be freed earlier */
struct uhab_context *ctx;
struct physical_channel *pchan;
int32_t vcid_local;
int32_t vcid_remote;
int domid_local;
int domid_remote;
int flags;
struct list_head node;
void *kva;
@ -365,7 +395,8 @@ struct export_desc {
} __packed;
int hab_vchan_open(struct uhab_context *ctx,
unsigned int mmid, int32_t *vcid, uint32_t flags);
unsigned int mmid, int32_t *vcid,
int32_t timeout, uint32_t flags);
void hab_vchan_close(struct uhab_context *ctx,
int32_t vcid);
long hab_vchan_send(struct uhab_context *ctx,
@ -401,13 +432,17 @@ int habmem_hyp_grant_user(unsigned long address,
int page_count,
int flags,
int remotedom,
void *ppdata);
void *ppdata,
int *compressed,
int *compressed_size);
int habmem_hyp_grant(unsigned long address,
int page_count,
int flags,
int remotedom,
void *ppdata);
void *ppdata,
int *compressed,
int *compressed_size);
int habmem_hyp_revoke(void *expdata, uint32_t count);
@ -417,7 +452,7 @@ void habmem_imp_hyp_close(void *priv, int kernel);
int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
struct export_desc *exp, int kernel);
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp);
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel);
int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma);
@ -427,7 +462,7 @@ void hab_msg_free(struct hab_message *message);
int hab_msg_dequeue(struct virtual_channel *vchan,
struct hab_message **msg, int *rsize, unsigned int flags);
void hab_msg_recv(struct physical_channel *pchan,
int hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header);
void hab_open_request_init(struct hab_open_request *request,
@ -447,7 +482,7 @@ int hab_open_listen(struct uhab_context *ctx,
int ms_timeout);
struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx,
struct physical_channel *pchan);
struct physical_channel *pchan, int openid);
struct virtual_channel *hab_vchan_get(struct physical_channel *pchan,
struct hab_header *header);
void hab_vchan_put(struct virtual_channel *vchan);
@ -482,6 +517,7 @@ static inline void hab_ctx_put(struct uhab_context *ctx)
void hab_send_close_msg(struct virtual_channel *vchan);
int hab_hypervisor_register(void);
void hab_hypervisor_unregister(void);
void hab_hypervisor_unregister_common(void);
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device);
int habhyp_commdev_dealloc(void *commdev);
@ -496,7 +532,7 @@ int physical_channel_send(struct physical_channel *pchan,
void physical_channel_rx_dispatch(unsigned long physical_channel);
int loopback_pchan_create(char *dev_name);
int loopback_pchan_create(struct hab_device *dev, char *pchan_name);
int hab_parse(struct local_vmid *settings);
@ -512,6 +548,21 @@ int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
struct hab_device *find_hab_device(unsigned int mm_id);
int get_refcnt(struct kref ref);
int hab_open_pending_enter(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending);
int hab_open_pending_exit(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending);
int hab_open_cancel_notify(struct hab_open_request *request);
int hab_open_receive_cancel(struct physical_channel *pchan,
size_t sizebytes);
/* Global singleton HAB instance */
extern struct hab_driver hab_driver;

View file

@ -0,0 +1,217 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "hab.h"
#include "hab_ghs.h"
static const char * const dt_gipc_path_name[] = {
"testgipc1",
"testgipc2",
"testgipc3",
"testgipc4",
"testgipc5",
"testgipc6",
"testgipc7",
"testgipc8",
"testgipc9",
"testgipc10",
"testgipc11",
"testgipc12",
"testgipc13",
"testgipc14",
"testgipc15",
"testgipc16",
"testgipc17",
"testgipc18",
"testgipc19",
"testgipc20",
"testgipc21",
"testgipc22",
};
static struct ghs_vmm_plugin_info_s {
const char **dt_name;
int curr;
int probe_cnt;
} ghs_vmm_plugin_info = {
dt_gipc_path_name,
0,
ARRAY_SIZE(dt_gipc_path_name),
};
static void ghs_irq_handler(void *cookie)
{
struct physical_channel *pchan = cookie;
struct ghs_vdev *dev =
(struct ghs_vdev *) (pchan ? pchan->hyp_data : NULL);
if (dev)
tasklet_schedule(&dev->task);
}
/* static struct physical_channel *habhyp_commdev_alloc(int id) */
int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
struct hab_device *mmid_device)
{
struct ghs_vdev *dev = NULL;
struct physical_channel *pchan = NULL;
struct physical_channel **ppchan = (struct physical_channel **)commdev;
int ret = 0;
if (ghs_vmm_plugin_info.curr > ghs_vmm_plugin_info.probe_cnt) {
pr_err("too many commdev alloc %d, supported is %d\n",
ghs_vmm_plugin_info.curr,
ghs_vmm_plugin_info.probe_cnt);
ret = -ENOENT;
goto err;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
pr_err("allocate struct ghs_vdev failed %zu bytes on pchan %s\n",
sizeof(*dev), name);
goto err;
}
memset(dev, 0, sizeof(*dev));
spin_lock_init(&dev->io_lock);
/*
* TODO: ExtractEndpoint is in ghs_comm.c because it blocks.
* Extrace and Request should be in roughly the same spot
*/
if (is_be) {
/* role is backend */
dev->be = 1;
} else {
/* role is FE */
struct device_node *gvh_dn;
gvh_dn = of_find_node_by_path("/aliases");
if (gvh_dn) {
const char *ep_path = NULL;
struct device_node *ep_dn;
ret = of_property_read_string(gvh_dn,
ghs_vmm_plugin_info.dt_name[ghs_vmm_plugin_info.curr],
&ep_path);
if (ret)
pr_err("failed to read endpoint string ret %d\n",
ret);
of_node_put(gvh_dn);
ep_dn = of_find_node_by_path(ep_path);
if (ep_dn) {
dev->endpoint = kgipc_endpoint_alloc(ep_dn);
of_node_put(ep_dn);
if (IS_ERR(dev->endpoint)) {
ret = PTR_ERR(dev->endpoint);
pr_err("KGIPC alloc failed id: %d, ret: %d\n",
ghs_vmm_plugin_info.curr, ret);
goto err;
} else {
pr_debug("gipc ep found for %d\n",
ghs_vmm_plugin_info.curr);
}
} else {
pr_err("of_parse_phandle failed id: %d\n",
ghs_vmm_plugin_info.curr);
ret = -ENOENT;
goto err;
}
} else {
pr_err("of_find_compatible_node failed id: %d\n",
ghs_vmm_plugin_info.curr);
ret = -ENOENT;
goto err;
}
}
/* add pchan into the mmid_device list */
pchan = hab_pchan_alloc(mmid_device, vmid_remote);
if (!pchan) {
pr_err("hab_pchan_alloc failed for %s, cnt %d\n",
mmid_device->name, mmid_device->pchan_cnt);
ret = -ENOMEM;
goto err;
}
pchan->closed = 0;
pchan->hyp_data = (void *)dev;
pchan->is_be = is_be;
strlcpy(dev->name, name, sizeof(dev->name));
*ppchan = pchan;
dev->read_data = kmalloc(GIPC_RECV_BUFF_SIZE_BYTES, GFP_KERNEL);
if (!dev->read_data) {
ret = -ENOMEM;
goto err;
}
tasklet_init(&dev->task, physical_channel_rx_dispatch,
(unsigned long) pchan);
ret = kgipc_endpoint_start_with_irq_callback(dev->endpoint,
ghs_irq_handler,
pchan);
if (ret) {
pr_err("irq alloc failed id: %d %s, ret: %d\n",
ghs_vmm_plugin_info.curr, name, ret);
goto err;
} else
pr_debug("ep irq handler started for %d %s, ret %d\n",
ghs_vmm_plugin_info.curr, name, ret);
/* this value could be more than devp total */
ghs_vmm_plugin_info.curr++;
return 0;
err:
hab_pchan_put(pchan);
kfree(dev);
return ret;
}
int habhyp_commdev_dealloc(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct ghs_vdev *dev = pchan->hyp_data;
kgipc_endpoint_free(dev->endpoint);
spin_lock_destroy(&dev->io_lock);
kfree(dev->read_data);
kfree(dev);
if (get_refcnt(pchan->refcount) > 1) {
pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
}
hab_pchan_put(pchan);
return 0;
}
void hab_hypervisor_unregister(void)
{
pr_debug("total %d\n", hab_driver.ndevices);
hab_hypervisor_unregister_common();
ghs_vmm_plugin_info.curr = 0;
}
int hab_hypervisor_register(void)
{
int ret = 0;
hab_driver.b_server_dom = 0;
return ret;
}

View file

@ -0,0 +1,30 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __HAB_GHS_H
#define __HAB_GHS_H
#include <ghs_vmm/kgipc.h>
#define GIPC_RECV_BUFF_SIZE_BYTES (32*1024)
struct ghs_vdev {
int be;
void *read_data; /* buffer to receive from gipc */
size_t read_size;
int read_offset;
GIPC_Endpoint endpoint;
spinlock_t io_lock;
char name[32];
struct tasklet_struct task;
};
#endif /* __HAB_GHS_H */

View file

@ -82,8 +82,6 @@ static int habmem_get_dma_pages_from_va(unsigned long address,
goto err;
}
pr_debug("vma flags %lx\n", vma->vm_flags);
/* Look for the fd that matches this the vma file */
fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
if (fd == 0) {
@ -111,7 +109,6 @@ static int habmem_get_dma_pages_from_va(unsigned long address,
for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
page = sg_page(s);
pr_debug("sgl length %d\n", s->length);
for (j = page_offset; j < (s->length >> PAGE_SHIFT); j++) {
pages[rc] = nth_page(page, j);
@ -205,7 +202,9 @@ int habmem_hyp_grant_user(unsigned long address,
int page_count,
int flags,
int remotedom,
void *ppdata)
void *ppdata,
int *compressed,
int *compressed_size)
{
int i, ret = 0;
struct grantable *item = (struct grantable *)ppdata;
@ -239,7 +238,8 @@ int habmem_hyp_grant_user(unsigned long address,
for (i = 0; i < page_count; i++)
item[i].pfn = page_to_pfn(pages[i]);
} else {
pr_err("get %d user pages failed: %d\n", page_count, ret);
pr_err("get %d user pages failed %d flags %d\n",
page_count, ret, flags);
}
vfree(pages);
@ -256,7 +256,9 @@ int habmem_hyp_grant(unsigned long address,
int page_count,
int flags,
int remotedom,
void *ppdata)
void *ppdata,
int *compressed,
int *compressed_size)
{
int i;
struct grantable *item;
@ -310,7 +312,7 @@ void habmem_imp_hyp_close(void *imp_ctx, int kernel)
list_del(&pglist->list);
priv->cnt--;
vfree(pglist->pages);
kfree(pglist->pages);
kfree(pglist);
}
@ -460,19 +462,19 @@ static int habmem_imp_hyp_map_fd(void *imp_ctx,
unsigned long pfn;
int i, j, k = 0;
pgprot_t prot = PAGE_KERNEL;
int32_t fd;
int32_t fd, size;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
if (!pfn_table || !priv)
return -EINVAL;
pages = vmalloc(exp->payload_count * sizeof(struct page *));
size = exp->payload_count * sizeof(struct page *);
pages = kmalloc(size, GFP_KERNEL);
if (!pages)
return -ENOMEM;
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
vfree(pages);
kfree(pages);
return -ENOMEM;
}
@ -503,7 +505,7 @@ static int habmem_imp_hyp_map_fd(void *imp_ctx,
exp_info.priv = pglist;
pglist->dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(pglist->dmabuf)) {
vfree(pages);
kfree(pages);
kfree(pglist);
return PTR_ERR(pglist->dmabuf);
}
@ -511,7 +513,7 @@ static int habmem_imp_hyp_map_fd(void *imp_ctx,
fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC);
if (fd < 0) {
dma_buf_put(pglist->dmabuf);
vfree(pages);
kfree(pages);
kfree(pglist);
return -EINVAL;
}
@ -539,17 +541,18 @@ static int habmem_imp_hyp_map_kva(void *imp_ctx,
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
unsigned long pfn;
int i, j, k = 0;
int i, j, k = 0, size;
pgprot_t prot = PAGE_KERNEL;
if (!pfn_table || !priv)
return -EINVAL;
pages = vmalloc(exp->payload_count * sizeof(struct page *));
size = exp->payload_count * sizeof(struct page *);
pages = kmalloc(size, GFP_KERNEL);
if (!pages)
return -ENOMEM;
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
vfree(pages);
kfree(pages);
return -ENOMEM;
}
@ -575,7 +578,7 @@ static int habmem_imp_hyp_map_kva(void *imp_ctx,
pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
if (pglist->kva == NULL) {
vfree(pages);
kfree(pages);
kfree(pglist);
pr_err("%ld pages vmap failed\n", pglist->npages);
return -ENOMEM;
@ -607,18 +610,18 @@ static int habmem_imp_hyp_map_uva(void *imp_ctx,
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
unsigned long pfn;
int i, j, k = 0;
int i, j, k = 0, size;
if (!pfn_table || !priv)
return -EINVAL;
pages = vmalloc(exp->payload_count * sizeof(struct page *));
size = exp->payload_count * sizeof(struct page *);
pages = kmalloc(size, GFP_KERNEL);
if (!pages)
return -ENOMEM;
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
vfree(pages);
kfree(pages);
return -ENOMEM;
}
@ -670,7 +673,7 @@ int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
return ret;
}
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp)
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel)
{
struct importer_context *priv = imp_ctx;
struct pages_list *pglist, *tmp;
@ -679,11 +682,8 @@ int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp)
write_lock(&priv->implist_lock);
list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
if (pglist->export_id == exp->export_id &&
pglist->vcid == exp->vcid_remote) {
pglist->vcid == exp->vcid_remote) {
found = 1;
}
if (found) {
list_del(&pglist->list);
priv->cnt--;
break;
@ -705,7 +705,7 @@ int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp)
if (pglist->dmabuf)
dma_buf_put(pglist->dmabuf);
vfree(pglist->pages);
kfree(pglist->pages);
kfree(pglist);
return 0;
@ -719,9 +719,6 @@ int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
struct pages_list *pglist;
int bfound = 0;
pr_debug("mmap request start %lX, len %ld, index %lX\n",
vma->vm_start, length, vma->vm_pgoff);
read_lock(&imp_ctx->implist_lock);
list_for_each_entry(pglist, &imp_ctx->imp_list, list) {
if (pglist->index == vma->vm_pgoff) {

View file

@ -28,7 +28,7 @@
static int hab_export_ack_find(struct uhab_context *ctx,
struct hab_export_ack *expect_ack)
struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret = 0;
struct hab_export_ack_recvd *ack_recvd, *tmp;
@ -36,9 +36,10 @@ static int hab_export_ack_find(struct uhab_context *ctx,
spin_lock_bh(&ctx->expq_lock);
list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
if (ack_recvd->ack.export_id == expect_ack->export_id &&
if ((ack_recvd->ack.export_id == expect_ack->export_id &&
ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) {
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote)
|| vchan->otherend_closed) {
list_del(&ack_recvd->node);
kfree(ack_recvd);
ret = 1;
@ -57,15 +58,17 @@ static int hab_export_ack_find(struct uhab_context *ctx,
}
static int hab_export_ack_wait(struct uhab_context *ctx,
struct hab_export_ack *expect_ack)
struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret;
ret = wait_event_interruptible_timeout(ctx->exp_wq,
hab_export_ack_find(ctx, expect_ack),
HZ);
hab_export_ack_find(ctx, expect_ack, vchan),
HAB_HS_TIMEOUT);
if (!ret || (ret == -ERESTARTSYS))
ret = -EAGAIN;
else if (vchan->otherend_closed)
ret = -ENODEV;
else if (ret > 0)
ret = 0;
return ret;
@ -86,7 +89,7 @@ static struct export_desc *habmem_add_export(struct virtual_channel *vchan,
if (!vchan || !sizebytes)
return NULL;
exp = vmalloc(sizebytes);
exp = kzalloc(sizebytes, GFP_KERNEL);
if (!exp)
return NULL;
@ -103,6 +106,8 @@ static struct export_desc *habmem_add_export(struct virtual_channel *vchan,
exp->vcid_remote = vchan->otherend_id;
exp->domid_local = -1; /* dom id, provided on the importer */
exp->domid_remote = vchan->pchan->dom_id;
exp->ctx = vchan->ctx;
exp->pchan = vchan->pchan;
ctx = vchan->ctx;
write_lock(&ctx->exp_lock);
@ -118,19 +123,22 @@ void habmem_remove_export(struct export_desc *exp)
struct physical_channel *pchan;
struct uhab_context *ctx;
if (!exp || !exp->vchan || !exp->vchan->ctx || !exp->vchan->pchan)
if (!exp || !exp->ctx || !exp->pchan) {
pr_err("failed to find valid info in exp %pK ctx %pK pchan %pK\n",
exp, exp->ctx, exp->pchan);
return;
}
ctx = exp->vchan->ctx;
ctx = exp->ctx;
ctx->export_total--;
pchan = exp->vchan->pchan;
pchan = exp->pchan;
spin_lock(&pchan->expid_lock);
idr_remove(&pchan->expid_idr, exp->export_id);
spin_unlock(&pchan->expid_lock);
vfree(exp);
kfree(exp);
}
static int compress_pfns(void **pfns, int npages, unsigned int *data_size)
@ -148,7 +156,7 @@ static int compress_pfns(void **pfns, int npages, unsigned int *data_size)
new_table->first_pfn = item[0].pfn;
for (i = 1; i < npages; i++) {
if (item[i].pfn-1 == item[i-1].pfn) {
region_size++;
region_size++; /* continuous pfn */
} else {
new_table->region[j].size = region_size;
new_table->region[j].space = item[i].pfn -
@ -208,7 +216,12 @@ static int habmem_export_vchan(struct uhab_context *ctx,
expected_ack.export_id = exp->export_id;
expected_ack.vcid_local = exp->vcid_local;
expected_ack.vcid_remote = exp->vcid_remote;
ret = hab_export_ack_wait(ctx, &expected_ack);
ret = hab_export_ack_wait(ctx, &expected_ack, vchan);
if (ret != 0) {
pr_err("failed to receive remote export ack %d on vc %x\n",
ret, vchan->id);
return ret;
}
*export_id = exp->export_id;
@ -225,12 +238,11 @@ int hab_mem_export(struct uhab_context *ctx,
uint32_t export_id = 0;
struct virtual_channel *vchan;
int page_count;
int compressed = 0;
if (!ctx || !param || param->sizebytes > HAB_MAX_EXPORT_SIZE)
if (!ctx || !param)
return -EINVAL;
pr_debug("vc %X, mem size %d\n", param->vcid, param->sizebytes);
vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
@ -249,13 +261,17 @@ int hab_mem_export(struct uhab_context *ctx,
page_count,
param->flags,
vchan->pchan->dom_id,
pdata_exp);
pdata_exp,
&compressed,
&pdata_size);
} else {
ret = habmem_hyp_grant_user((unsigned long)param->buffer,
page_count,
param->flags,
vchan->pchan->dom_id,
pdata_exp);
pdata_exp,
&compressed,
&pdata_size);
}
if (ret < 0) {
pr_err("habmem_hyp_grant failed size=%d ret=%d\n",
@ -263,7 +279,8 @@ int hab_mem_export(struct uhab_context *ctx,
goto err;
}
compress_pfns(&pdata_exp, page_count, &pdata_size);
if (!compressed)
compress_pfns(&pdata_exp, page_count, &pdata_size);
ret = habmem_export_vchan(ctx,
vchan,
@ -287,14 +304,23 @@ int hab_mem_unexport(struct uhab_context *ctx,
{
int ret = 0, found = 0;
struct export_desc *exp, *tmp;
struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
/* refcnt on the access */
vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
goto err_novchan;
}
write_lock(&ctx->exp_lock);
list_for_each_entry_safe(exp, tmp, &ctx->exp_whse, node) {
if ((param->exportid == exp->export_id) &&
(param->vcid == exp->vcid_local)) {
if (param->exportid == exp->export_id &&
param->vcid == exp->vcid_local) {
/* same vchan guarantees the pchan for idr */
list_del(&exp->node);
found = 1;
break;
@ -302,15 +328,22 @@ int hab_mem_unexport(struct uhab_context *ctx,
}
write_unlock(&ctx->exp_lock);
if (!found)
return -EINVAL;
if (!found) {
ret = -EINVAL;
goto err_novchan;
}
ret = habmem_hyp_revoke(exp->payload, exp->payload_count);
if (ret) {
pr_err("Error found in revoke grant with ret %d", ret);
return ret;
goto err_novchan;
}
habmem_remove_export(exp);
err_novchan:
if (vchan)
hab_vchan_put(vchan);
return ret;
}
@ -320,14 +353,24 @@ int hab_mem_import(struct uhab_context *ctx,
{
int ret = 0, found = 0;
struct export_desc *exp = NULL;
struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
goto err_imp;
}
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry(exp, &ctx->imp_whse, node) {
if ((exp->export_id == param->exportid) &&
(param->vcid == exp->vcid_remote)) {
/* only allow import on the vchan recevied from
* remote
*/
found = 1;
break;
}
@ -338,27 +381,24 @@ int hab_mem_import(struct uhab_context *ctx,
pr_err("Fail to get export descriptor from export id %d\n",
param->exportid);
ret = -ENODEV;
return ret;
goto err_imp;
}
pr_debug("call map id: %d pcnt %d remote_dom %d 1st_ref:0x%X\n",
exp->export_id, exp->payload_count, exp->domid_local,
*((uint32_t *)exp->payload));
ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel);
if (ret) {
pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
ret, exp->payload_count,
exp->domid_local, *((uint32_t *)exp->payload));
return ret;
goto err_imp;
}
exp->import_index = param->index;
exp->kva = kernel ? (void *)param->kva : NULL;
pr_debug("import index %llx, kva or fd %llx, kernel %d\n",
exp->import_index, param->kva, kernel);
err_imp:
if (vchan)
hab_vchan_put(vchan);
return ret;
}
@ -369,20 +409,26 @@ int hab_mem_unimport(struct uhab_context *ctx,
{
int ret = 0, found = 0;
struct export_desc *exp = NULL, *exp_tmp;
struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
if (!vchan || !vchan->pchan) {
if (vchan)
hab_vchan_put(vchan);
return -ENODEV;
}
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
if ((exp->export_id == param->exportid) &&
(param->vcid == exp->vcid_remote)) {
if (exp->export_id == param->exportid &&
param->vcid == exp->vcid_remote) {
/* same vchan is expected here */
list_del(&exp->node);
ctx->import_total--;
found = 1;
pr_debug("found id:%d payload cnt:%d kernel:%d\n",
exp->export_id, exp->payload_count, kernel);
break;
}
}
@ -391,7 +437,7 @@ int hab_mem_unimport(struct uhab_context *ctx,
if (!found)
ret = -EINVAL;
else {
ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp);
ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp, kernel);
if (ret) {
pr_err("unmap fail id:%d pcnt:%d vcid:%d\n",
exp->export_id, exp->payload_count, exp->vcid_remote);
@ -400,5 +446,8 @@ int hab_mem_unimport(struct uhab_context *ctx,
kfree(exp);
}
if (vchan)
hab_vchan_put(vchan);
return ret;
}

View file

@ -78,13 +78,14 @@ hab_msg_dequeue(struct virtual_channel *vchan, struct hab_message **msg,
} else {
pr_err("rcv buffer too small %d < %zd\n",
*rsize, message->sizebytes);
*rsize = 0;
*rsize = message->sizebytes;
message = NULL;
ret = -EINVAL;
ret = -EOVERFLOW; /* come back again */
}
}
spin_unlock_bh(&vchan->rx_lock);
} else
/* no message received, retain the original status */
*rsize = 0;
*msg = message;
@ -142,7 +143,7 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan,
return -ENOMEM;
if (sizeof(ack_recvd->ack) != sizebytes)
pr_err("exp ack size %lu is not as arrived %zu\n",
pr_err("exp ack size %zu is not as arrived %zu\n",
sizeof(ack_recvd->ack), sizebytes);
if (physical_channel_read(pchan,
@ -150,11 +151,6 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan,
sizebytes) != sizebytes)
return -EIO;
pr_debug("receive export id %d, local vc %X, vd remote %X\n",
ack_recvd->ack.export_id,
ack_recvd->ack.vcid_local,
ack_recvd->ack.vcid_remote);
spin_lock_bh(&ctx->expq_lock);
list_add_tail(&ack_recvd->node, &ctx->exp_rxq);
spin_unlock_bh(&ctx->expq_lock);
@ -162,10 +158,21 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan,
return 0;
}
void hab_msg_recv(struct physical_channel *pchan,
static void hab_msg_drop(struct physical_channel *pchan, size_t sizebytes)
{
uint8_t *data = NULL;
data = kmalloc(sizebytes, GFP_ATOMIC);
if (data == NULL)
return;
physical_channel_read(pchan, data, sizebytes);
kfree(data);
}
int hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header)
{
int ret;
int ret = 0;
struct hab_message *message;
struct hab_device *dev = pchan->habdev;
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
@ -179,7 +186,8 @@ void hab_msg_recv(struct physical_channel *pchan,
/* get the local virtual channel if it isn't an open message */
if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
payload_type != HAB_PAYLOAD_TYPE_INIT_ACK &&
payload_type != HAB_PAYLOAD_TYPE_ACK) {
payload_type != HAB_PAYLOAD_TYPE_INIT_DONE &&
payload_type != HAB_PAYLOAD_TYPE_INIT_CANCEL) {
/* sanity check the received message */
if (payload_type >= HAB_PAYLOAD_TYPE_MAX ||
@ -189,29 +197,42 @@ void hab_msg_recv(struct physical_channel *pchan,
payload_type, vchan_id, sizebytes, session_id);
}
/*
* need both vcid and session_id to be accurate.
* this is from pchan instead of ctx
*/
vchan = hab_vchan_get(pchan, header);
if (!vchan) {
pr_debug("vchan is not found, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
pr_info("vchan is not found, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
if (sizebytes)
pr_err("message is dropped\n");
return;
if (sizebytes) {
hab_msg_drop(pchan, sizebytes);
pr_err("message %d dropped no vchan, session id %d\n",
payload_type, session_id);
}
return -EINVAL;
} else if (vchan->otherend_closed) {
hab_vchan_put(vchan);
pr_debug("vchan remote is closed, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
pr_info("vchan remote is closed payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
if (sizebytes)
pr_err("message is dropped\n");
return;
if (sizebytes) {
hab_msg_drop(pchan, sizebytes);
pr_err("message %d dropped remote close, session id %d\n",
payload_type, session_id);
}
return -ENODEV;
}
} else {
if (sizebytes != sizeof(struct hab_open_send_data)) {
pr_err("Invalid open request received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
pr_err("Invalid open request received type %d, vcid %x, szbytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
if (sizebytes) {
hab_msg_drop(pchan, sizebytes);
pr_err("message %d dropped unknown reason, session id %d\n",
payload_type, session_id);
}
return -ENODEV;
}
}
@ -226,7 +247,7 @@ void hab_msg_recv(struct physical_channel *pchan,
case HAB_PAYLOAD_TYPE_INIT:
case HAB_PAYLOAD_TYPE_INIT_ACK:
case HAB_PAYLOAD_TYPE_ACK:
case HAB_PAYLOAD_TYPE_INIT_DONE:
ret = hab_open_request_add(pchan, sizebytes, payload_type);
if (ret) {
pr_err("open request add failed, ret %d, payload type %d, sizebytes %zx\n",
@ -236,6 +257,16 @@ void hab_msg_recv(struct physical_channel *pchan,
wake_up_interruptible(&dev->openq);
break;
case HAB_PAYLOAD_TYPE_INIT_CANCEL:
pr_info("remote open cancel header vcid %X session %d local %d remote %d\n",
vchan_id, session_id, pchan->vmid_local,
pchan->vmid_remote);
ret = hab_open_receive_cancel(pchan, sizebytes);
if (ret)
pr_err("open cancel handling failed ret %d vcid %X session %d\n",
ret, vchan_id, session_id);
break;
case HAB_PAYLOAD_TYPE_EXPORT:
exp_desc = kzalloc(sizebytes, GFP_ATOMIC);
if (!exp_desc)
@ -243,7 +274,10 @@ void hab_msg_recv(struct physical_channel *pchan,
if (physical_channel_read(pchan, exp_desc, sizebytes) !=
sizebytes) {
vfree(exp_desc);
pr_err("corrupted exp expect %zd bytes vcid %X remote %X open %d!\n",
sizebytes, vchan->id,
vchan->otherend_id, vchan->session_id);
kfree(exp_desc);
break;
}
@ -265,36 +299,33 @@ void hab_msg_recv(struct physical_channel *pchan,
case HAB_PAYLOAD_TYPE_CLOSE:
/* remote request close */
pr_debug("remote side request close\n");
pr_debug(" vchan id %X, other end %X, session %d\n",
vchan->id, vchan->otherend_id, session_id);
pr_info("remote request close vcid %pK %X other id %X session %d refcnt %d\n",
vchan, vchan->id, vchan->otherend_id,
session_id, get_refcnt(vchan->refcount));
hab_vchan_stop(vchan);
break;
case HAB_PAYLOAD_TYPE_PROFILE:
do_gettimeofday(&tv);
/* pull down the incoming data */
message = hab_msg_alloc(pchan, sizebytes);
if (!message) {
pr_err("msg alloc failed\n");
break;
if (!message)
pr_err("failed to allocate msg Arrived msg will be lost\n");
else {
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)message->data;
pstat->rx_sec = tv.tv_sec;
pstat->rx_usec = tv.tv_usec;
hab_msg_queue(vchan, message);
}
((uint64_t *)message->data)[2] = tv.tv_sec;
((uint64_t *)message->data)[3] = tv.tv_usec;
hab_msg_queue(vchan, message);
break;
default:
pr_err("unknown msg is received\n");
pr_err("payload type %d, vchan id %x\n",
payload_type, vchan_id);
pr_err("sizebytes %zx, session %d\n",
sizebytes, session_id);
pr_err("unknown msg received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
break;
}
if (vchan)
hab_vchan_put(vchan);
return ret;
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -12,6 +12,8 @@
*/
#include "hab.h"
#define HAB_OPEN_REQ_EXPIRE_TIME_S (3600*10)
void hab_open_request_init(struct hab_open_request *request,
int type,
struct physical_channel *pchan,
@ -21,57 +23,55 @@ void hab_open_request_init(struct hab_open_request *request,
{
request->type = type;
request->pchan = pchan;
request->vchan_id = vchan_id;
request->sub_id = sub_id;
request->open_id = open_id;
request->xdata.vchan_id = vchan_id;
request->xdata.sub_id = sub_id;
request->xdata.open_id = open_id;
}
int hab_open_request_send(struct hab_open_request *request)
{
struct hab_header header = HAB_HEADER_INITIALIZER;
struct hab_open_send_data data;
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
HAB_HEADER_SET_TYPE(header, request->type);
data.vchan_id = request->vchan_id;
data.open_id = request->open_id;
data.sub_id = request->sub_id;
return physical_channel_send(request->pchan, &header, &data);
return physical_channel_send(request->pchan, &header, &request->xdata);
}
/* called when remote sends in open-request */
int hab_open_request_add(struct physical_channel *pchan,
size_t sizebytes, int request_type)
{
struct hab_open_node *node;
struct hab_device *dev = pchan->habdev;
struct hab_open_send_data data;
struct hab_open_request *request;
struct timeval tv;
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
request = &node->request;
if (physical_channel_read(pchan, &request->xdata, sizebytes)
!= sizebytes)
return -EIO;
request = &node->request;
request->type = request_type;
request->pchan = pchan;
request->vchan_id = data.vchan_id;
request->sub_id = data.sub_id;
request->open_id = data.open_id;
node->age = 0;
request->type = request_type;
request->pchan = pchan;
do_gettimeofday(&tv);
node->age = tv.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
tv.tv_usec/1000000;
hab_pchan_get(pchan);
spin_lock_bh(&dev->openlock);
list_add_tail(&node->node, &dev->openq_list);
dev->openq_cnt++;
spin_unlock_bh(&dev->openlock);
return 0;
}
/* local only */
static int hab_open_request_find(struct uhab_context *ctx,
struct hab_device *dev,
struct hab_open_request *listen,
@ -79,6 +79,7 @@ static int hab_open_request_find(struct uhab_context *ctx,
{
struct hab_open_node *node, *tmp;
struct hab_open_request *request;
struct timeval tv;
int ret = 0;
if (ctx->closing ||
@ -91,21 +92,27 @@ static int hab_open_request_find(struct uhab_context *ctx,
if (list_empty(&dev->openq_list))
goto done;
do_gettimeofday(&tv);
list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
request = (struct hab_open_request *)node;
if (request->type == listen->type &&
(request->sub_id == listen->sub_id) &&
(!listen->open_id ||
request->open_id == listen->open_id) &&
if ((request->type == listen->type ||
request->type == HAB_PAYLOAD_TYPE_INIT_CANCEL) &&
(request->xdata.sub_id == listen->xdata.sub_id) &&
(!listen->xdata.open_id ||
request->xdata.open_id == listen->xdata.open_id) &&
(!listen->pchan ||
request->pchan == listen->pchan)) {
list_del(&node->node);
dev->openq_cnt--;
*recv_request = request;
ret = 1;
break;
}
node->age++;
if (node->age > Q_AGE_THRESHOLD) {
if (node->age < (int64_t)tv.tv_sec + tv.tv_usec/1000000) {
pr_warn("open request type %d sub %d open %d\n",
request->type, request->xdata.sub_id,
request->xdata.sub_id);
list_del(&node->node);
hab_open_request_free(request);
}
@ -121,7 +128,8 @@ void hab_open_request_free(struct hab_open_request *request)
if (request) {
hab_pchan_put(request->pchan);
kfree(request);
}
} else
pr_err("empty request found\n");
}
int hab_open_listen(struct uhab_context *ctx,
@ -132,22 +140,153 @@ int hab_open_listen(struct uhab_context *ctx,
{
int ret = 0;
if (!ctx || !listen || !recv_request)
if (!ctx || !listen || !recv_request) {
pr_err("listen failed ctx %pK listen %pK request %pK\n",
ctx, listen, recv_request);
return -EINVAL;
}
*recv_request = NULL;
if (ms_timeout > 0) {
if (ms_timeout > 0) { /* be case */
ms_timeout = msecs_to_jiffies(ms_timeout);
ret = wait_event_interruptible_timeout(dev->openq,
hab_open_request_find(ctx, dev, listen, recv_request),
ms_timeout);
if (!ret || (-ERESTARTSYS == ret))
ret = -EAGAIN;
else if (ret > 0)
ret = 0;
} else {
if (!ret || (-ERESTARTSYS == ret)) {
pr_warn("something failed in open listen ret %d\n",
ret);
ret = -EAGAIN; /* condition not met */
} else if (ret > 0)
ret = 0; /* condition met */
} else { /* fe case */
ret = wait_event_interruptible(dev->openq,
hab_open_request_find(ctx, dev, listen, recv_request));
if (ctx->closing) {
pr_warn("local closing during open ret %d\n", ret);
ret = -ENODEV;
} else if (-ERESTARTSYS == ret) {
pr_warn("local interrupted during open ret %d\n", ret);
ret = -EAGAIN;
}
}
return ret;
}
/* called when receives remote's cancel init from FE or init-ack from BE */
int hab_open_receive_cancel(struct physical_channel *pchan,
size_t sizebytes)
{
struct hab_device *dev = pchan->habdev;
struct hab_open_send_data data;
struct hab_open_request *request;
struct hab_open_node *node, *tmp;
int bfound = 0;
struct timeval tv;
if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
return -EIO;
spin_lock_bh(&dev->openlock);
list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
request = &node->request;
/* check if open request has been serviced or not */
if ((request->type == HAB_PAYLOAD_TYPE_INIT ||
request->type == HAB_PAYLOAD_TYPE_INIT_ACK) &&
(request->xdata.sub_id == data.sub_id) &&
(request->xdata.open_id == data.open_id) &&
(request->xdata.vchan_id == data.vchan_id)) {
list_del(&node->node);
dev->openq_cnt--;
pr_info("open cancelled on pchan %s vcid %x subid %d openid %d\n",
pchan->name, data.vchan_id,
data.sub_id, data.open_id);
/* found un-serviced open request, delete it */
bfound = 1;
break;
}
}
spin_unlock_bh(&dev->openlock);
if (!bfound) {
pr_info("init waiting is in-flight. vcid %x sub %d open %d\n",
data.vchan_id, data.sub_id, data.open_id);
/* add cancel to the openq to let the waiting open bail out */
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
request = &node->request;
request->type = HAB_PAYLOAD_TYPE_INIT_CANCEL;
request->pchan = pchan;
request->xdata.vchan_id = data.vchan_id;
request->xdata.sub_id = data.sub_id;
request->xdata.open_id = data.open_id;
do_gettimeofday(&tv);
node->age = tv.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
tv.tv_usec/1000000;
/* put when this node is handled in open path */
hab_pchan_get(pchan);
spin_lock_bh(&dev->openlock);
list_add_tail(&node->node, &dev->openq_list);
dev->openq_cnt++;
spin_unlock_bh(&dev->openlock);
wake_up_interruptible(&dev->openq);
}
return 0;
}
/* calls locally to send cancel pending open to remote */
int hab_open_cancel_notify(struct hab_open_request *request)
{
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_INIT_CANCEL);
return physical_channel_send(request->pchan, &header, &request->xdata);
}
int hab_open_pending_enter(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending)
{
write_lock(&ctx->ctx_lock);
list_add_tail(&pending->node, &ctx->pending_open);
ctx->pending_cnt++;
write_unlock(&ctx->ctx_lock);
return 0;
}
int hab_open_pending_exit(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending)
{
struct hab_open_node *node, *tmp;
int ret = -ENOENT;
write_lock(&ctx->ctx_lock);
list_for_each_entry_safe(node, tmp, &ctx->pending_open, node) {
if ((node->request.type == pending->request.type) &&
(node->request.pchan
== pending->request.pchan) &&
(node->request.xdata.vchan_id
== pending->request.xdata.vchan_id) &&
(node->request.xdata.sub_id
== pending->request.xdata.sub_id) &&
(node->request.xdata.open_id
== pending->request.xdata.open_id)) {
list_del(&node->node);
ctx->pending_cnt--;
ret = 0;
}
}
write_unlock(&ctx->ctx_lock);
return ret;
}

View file

@ -30,7 +30,7 @@ static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
for (j = mmid_start; j < mmid_start + mmid_range; j++) {
/* sanity check */
if (tbl[i].mmid[j] != HABCFG_VMID_INVALID) {
pr_err("overwrite previous setting, i %d, j %d, be %d\n",
pr_err("overwrite previous setting vmid %d, mmid %d, be %d\n",
i, j, tbl[i].is_listener[j]);
}
tbl[i].mmid[j] = j;
@ -43,28 +43,23 @@ static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
void dump_settings(struct local_vmid *settings)
{
int i, j;
pr_debug("self vmid is %d\n", settings->self);
for (i = 0; i < HABCFG_VMID_MAX; i++) {
pr_debug("remote vmid %d\n",
settings->vmid_mmid_list[i].vmid);
for (j = 0; j <= HABCFG_MMID_AREA_MAX; j++) {
pr_debug("mmid %d, is_be %d\n",
settings->vmid_mmid_list[i].mmid[j],
settings->vmid_mmid_list[i].is_listener[j]);
}
}
}
int fill_default_gvm_settings(struct local_vmid *settings, int vmid_local,
int mmid_start, int mmid_end) {
int mmid_start, int mmid_end)
{
int32_t be = HABCFG_BE_FALSE;
int32_t range = 1;
int32_t vmremote = 0; /* default to host[0] as local is guest[2] */
settings->self = vmid_local;
/* default gvm always talks to host as vm0 */
return fill_vmid_mmid_tbl(settings->vmid_mmid_list, 0, 1,
mmid_start/100, (mmid_end-mmid_start)/100+1, HABCFG_BE_FALSE);
return fill_vmid_mmid_tbl(settings->vmid_mmid_list, vmremote, range,
mmid_start/100, (mmid_end-mmid_start)/100+1, be);
}
/* device tree based parser */
static int hab_parse_dt(struct local_vmid *settings)
{
int result, i;
@ -151,6 +146,10 @@ static int hab_parse_dt(struct local_vmid *settings)
return 0;
}
/*
* 0: successful
* negative: various failure core
*/
int hab_parse(struct local_vmid *settings)
{
int ret;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -35,10 +35,10 @@ hab_pchan_alloc(struct hab_device *habdev, int otherend_id)
rwlock_init(&pchan->vchans_lock);
spin_lock_init(&pchan->rxbuf_lock);
mutex_lock(&habdev->pchan_lock);
spin_lock_bh(&habdev->pchan_lock);
list_add_tail(&pchan->node, &habdev->pchannels);
habdev->pchan_cnt++;
mutex_unlock(&habdev->pchan_lock);
spin_unlock_bh(&habdev->pchan_lock);
return pchan;
}
@ -47,11 +47,26 @@ static void hab_pchan_free(struct kref *ref)
{
struct physical_channel *pchan =
container_of(ref, struct physical_channel, refcount);
struct virtual_channel *vchan;
mutex_lock(&pchan->habdev->pchan_lock);
pr_debug("pchan %s refcnt %d\n", pchan->name,
get_refcnt(pchan->refcount));
spin_lock_bh(&pchan->habdev->pchan_lock);
list_del(&pchan->node);
pchan->habdev->pchan_cnt--;
mutex_unlock(&pchan->habdev->pchan_lock);
spin_unlock_bh(&pchan->habdev->pchan_lock);
/* check vchan leaking */
read_lock(&pchan->vchans_lock);
list_for_each_entry(vchan, &pchan->vchannels, pnode) {
/* no logging on the owner. it might have been gone */
pr_warn("leaking vchan id %X remote %X refcnt %d\n",
vchan->id, vchan->otherend_id,
get_refcnt(vchan->refcount));
}
read_unlock(&pchan->vchans_lock);
kfree(pchan->hyp_data);
kfree(pchan);
}
@ -61,7 +76,7 @@ hab_pchan_find_domid(struct hab_device *dev, int dom_id)
{
struct physical_channel *pchan;
mutex_lock(&dev->pchan_lock);
spin_lock_bh(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node)
if (pchan->dom_id == dom_id || dom_id == HABCFG_VMID_DONT_CARE)
break;
@ -75,7 +90,7 @@ hab_pchan_find_domid(struct hab_device *dev, int dom_id)
if (pchan && !kref_get_unless_zero(&pchan->refcount))
pchan = NULL;
mutex_unlock(&dev->pchan_lock);
spin_unlock_bh(&dev->pchan_lock);
return pchan;
}

View file

@ -71,14 +71,14 @@ static struct qvm_plugin_info {
static irqreturn_t shm_irq_handler(int irq, void *_pchan)
{
irqreturn_t rc = IRQ_NONE;
struct physical_channel *pchan = _pchan;
struct physical_channel *pchan = (struct physical_channel *) _pchan;
struct qvm_channel *dev =
(struct qvm_channel *) (pchan ? pchan->hyp_data : NULL);
if (dev && dev->guest_ctrl) {
int status = dev->guest_ctrl->status;
if (status & dev->idx) {
if (status & 0xffff) {/*source bitmask indicator*/
rc = IRQ_HANDLED;
tasklet_schedule(&dev->task);
}
@ -95,13 +95,14 @@ static uint64_t get_guest_factory_paddr(struct qvm_channel *dev,
int i;
pr_debug("name = %s, factory paddr = 0x%lx, irq %d, pages %d\n",
name, factory_addr, irq, pages);
name, factory_addr, irq, pages);
dev->guest_factory = (struct guest_shm_factory *)factory_addr;
if (dev->guest_factory->signature != GUEST_SHM_SIGNATURE) {
pr_err("signature error: %ld != %llu, factory addr %lx\n",
GUEST_SHM_SIGNATURE, dev->guest_factory->signature,
factory_addr);
iounmap(dev->guest_factory);
return 0;
}
@ -120,6 +121,7 @@ static uint64_t get_guest_factory_paddr(struct qvm_channel *dev,
/* See if we successfully created/attached to the region. */
if (dev->guest_factory->status != GSS_OK) {
pr_err("create failed: %d\n", dev->guest_factory->status);
iounmap(dev->guest_factory);
return 0;
}
@ -180,6 +182,7 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
{
struct qvm_channel *dev = NULL;
struct qvm_plugin_info *qvm_priv = hab_driver.hyp_priv;
uint64_t paddr;
struct physical_channel **pchan = (struct physical_channel **)commdev;
int ret = 0, coid = 0, channel = 0;
char *shmdata;
@ -187,7 +190,6 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
hab_pipe_calc_required_bytes(PIPE_SHMEM_SIZE);
uint32_t pipe_alloc_pages =
(pipe_alloc_size + PAGE_SIZE - 1) / PAGE_SIZE;
uint64_t paddr;
int temp;
int total_pages;
struct page **pages;
@ -196,8 +198,10 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
pipe_alloc_size);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
if (!dev) {
ret = -ENOMEM;
goto err;
}
spin_lock_init(&dev->io_lock);
@ -208,7 +212,7 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
pipe_alloc_pages);
qvm_priv->curr++;
if (qvm_priv->curr > qvm_priv->probe_cnt) {
pr_err("factory setting %d overflow probed cnt %d\n",
pr_err("pchan guest factory setting %d overflow probed cnt %d\n",
qvm_priv->curr, qvm_priv->probe_cnt);
ret = -1;
goto err;
@ -261,17 +265,18 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
dev->coid = coid;
ret = create_dispatcher(*pchan);
if (ret)
if (ret < 0)
goto err;
return ret;
err:
pr_err("habhyp_commdev_alloc failed\n");
kfree(dev);
if (*pchan)
hab_pchan_put(*pchan);
pr_err("habhyp_commdev_alloc failed: %d\n", ret);
return ret;
}
@ -280,6 +285,13 @@ int habhyp_commdev_dealloc(void *commdev)
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct qvm_channel *dev = pchan->hyp_data;
dev->guest_ctrl->detach = 0;
if (get_refcnt(pchan->refcount) > 1) {
pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt,
get_refcnt(pchan->refcount));
}
kfree(dev);
hab_pchan_put(pchan);
@ -302,25 +314,13 @@ int hab_hypervisor_register(void)
void hab_hypervisor_unregister(void)
{
int status, i;
for (i = 0; i < hab_driver.ndevices; i++) {
struct hab_device *dev = &hab_driver.devp[i];
struct physical_channel *pchan;
list_for_each_entry(pchan, &dev->pchannels, node) {
status = habhyp_commdev_dealloc(pchan);
if (status) {
pr_err("failed to free pchan %pK, i %d, ret %d\n",
pchan, i, status);
}
}
}
hab_hypervisor_unregister_common();
qvm_priv_info.probe_cnt = 0;
qvm_priv_info.curr = 0;
}
/* this happens before hypervisor register */
static int hab_shmem_probe(struct platform_device *pdev)
{
int irq = 0;
@ -373,19 +373,6 @@ static int hab_shmem_remove(struct platform_device *pdev)
static void hab_shmem_shutdown(struct platform_device *pdev)
{
int i;
struct qvm_channel *dev;
struct physical_channel *pchan;
struct hab_device *hab_dev;
for (i = 0; i < hab_driver.ndevices; i++) {
hab_dev = &hab_driver.devp[i];
pr_debug("detaching %s\n", hab_dev->name);
list_for_each_entry(pchan, &hab_dev->pchannels, node) {
dev = (struct qvm_channel *)pchan->hyp_data;
dev->guest_ctrl->detach = 0;
}
}
}
static const struct of_device_id hab_shmem_match_table[] = {

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -36,6 +36,7 @@ struct qvm_channel {
int channel;
int coid;
/* Guest VM */
unsigned int guest_intr;
unsigned int guest_iid;
unsigned int factory_addr;

View file

@ -13,7 +13,8 @@
#include "hab.h"
struct virtual_channel *
hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan,
int openid)
{
int id;
struct virtual_channel *vchan;
@ -28,11 +29,13 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
/* This should be the first thing we do in this function */
idr_preload(GFP_KERNEL);
spin_lock_bh(&pchan->vid_lock);
id = idr_alloc(&pchan->vchan_idr, vchan, 1, 256, GFP_NOWAIT);
id = idr_alloc(&pchan->vchan_idr, vchan, 1,
(HAB_VCID_ID_MASK >> HAB_VCID_ID_SHIFT) + 1, GFP_NOWAIT);
spin_unlock_bh(&pchan->vid_lock);
idr_preload_end();
if (id < 0) {
if (id <= 0) {
pr_err("idr failed %d\n", id);
kfree(vchan);
return NULL;
}
@ -40,8 +43,11 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
hab_pchan_get(pchan);
vchan->pchan = pchan;
/* vchan need both vcid and openid to be properly located */
vchan->session_id = openid;
write_lock(&pchan->vchans_lock);
list_add_tail(&vchan->pnode, &pchan->vchannels);
pchan->vcnt++;
write_unlock(&pchan->vchans_lock);
vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) |
((pchan->habdev->id << HAB_VCID_MMID_SHIFT) &
@ -53,7 +59,7 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
init_waitqueue_head(&vchan->rx_queue);
kref_init(&vchan->refcount);
kref_init(&vchan->usagecnt);
vchan->otherend_closed = pchan->closed;
hab_ctx_get(ctx);
@ -65,11 +71,9 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
static void
hab_vchan_free(struct kref *ref)
{
int found;
struct virtual_channel *vchan =
container_of(ref, struct virtual_channel, refcount);
struct hab_message *message, *msg_tmp;
struct export_desc *exp, *exp_tmp;
struct physical_channel *pchan = vchan->pchan;
struct uhab_context *ctx = vchan->ctx;
struct virtual_channel *vc, *vc_tmp;
@ -81,73 +85,84 @@ hab_vchan_free(struct kref *ref)
}
spin_unlock_bh(&vchan->rx_lock);
do {
found = 0;
write_lock(&ctx->exp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
if (exp->vcid_local == vchan->id) {
list_del(&exp->node);
found = 1;
break;
}
}
write_unlock(&ctx->exp_lock);
if (found) {
habmem_hyp_revoke(exp->payload, exp->payload_count);
habmem_remove_export(exp);
}
} while (found);
do {
found = 0;
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
if (exp->vcid_remote == vchan->id) {
list_del(&exp->node);
found = 1;
break;
}
}
spin_unlock_bh(&ctx->imp_lock);
if (found) {
habmm_imp_hyp_unmap(ctx->import_ctx, exp);
ctx->import_total--;
kfree(exp);
}
} while (found);
spin_lock_bh(&pchan->vid_lock);
idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
spin_unlock_bh(&pchan->vid_lock);
/* the release vchan from ctx was done earlier in vchan close() */
hab_ctx_put(ctx); /* now ctx is not needed from this vchan's view */
vchan->ctx = NULL;
/* release vchan from pchan. no more msg for this vchan */
write_lock(&pchan->vchans_lock);
list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) {
if (vchan == vc) {
list_del(&vc->pnode);
/* the ref is held in case of pchan is freed */
pchan->vcnt--;
break;
}
}
write_unlock(&pchan->vchans_lock);
hab_pchan_put(pchan);
hab_ctx_put(ctx);
/* release idr at the last so same idr will not be used early */
spin_lock_bh(&pchan->vid_lock);
idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
spin_unlock_bh(&pchan->vid_lock);
hab_pchan_put(pchan); /* no more need for pchan from this vchan */
kfree(vchan);
}
/*
* only for msg recv path to retrieve vchan from vcid and openid based on
* pchan's vchan list
*/
struct virtual_channel*
hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
{
struct virtual_channel *vchan;
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
spin_lock_bh(&pchan->vid_lock);
vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
if (vchan)
if ((vchan->session_id != session_id) ||
(!kref_get_unless_zero(&vchan->refcount)))
if (vchan) {
if (vchan->session_id != session_id)
/*
* skipped if session is different even vcid
* is the same
*/
vchan = NULL;
else if (!vchan->otherend_id /*&& !vchan->session_id*/) {
/*
* not paired vchan can be fetched right after it is
* alloc'ed. so it has to be skipped during search
* for remote msg
*/
pr_warn("vcid %x is not paired yet session %d refcnt %d type %d sz %zd\n",
vchan->id, vchan->otherend_id,
get_refcnt(vchan->refcount),
payload_type, sizebytes);
vchan = NULL;
} else if (!kref_get_unless_zero(&vchan->refcount)) {
/*
* this happens when refcnt is already zero
* (put from other thread) or there is an actual error
*/
pr_err("failed to inc vcid %pK %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
vchan, vchan->id, vchan->otherend_id,
vchan->session_id, get_refcnt(vchan->refcount),
vchan_id, session_id, payload_type, sizebytes);
vchan = NULL;
} else if (vchan->otherend_closed || vchan->closed) {
pr_err("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
vchan->otherend_closed, vchan->closed,
vchan->id, vchan->otherend_id,
vchan->session_id, get_refcnt(vchan->refcount),
vchan_id, session_id, payload_type, sizebytes);
vchan = NULL;
}
}
spin_unlock_bh(&pchan->vid_lock);
return vchan;
@ -158,6 +173,7 @@ void hab_vchan_stop(struct virtual_channel *vchan)
if (vchan) {
vchan->otherend_closed = 1;
wake_up(&vchan->rx_queue);
wake_up_interruptible(&vchan->ctx->exp_wq);
}
}
@ -184,23 +200,36 @@ int hab_vchan_find_domid(struct virtual_channel *vchan)
return vchan ? vchan->pchan->dom_id : -1;
}
static void
hab_vchan_free_deferred(struct work_struct *work)
/* this sould be only called once after refcnt is zero */
static void hab_vchan_schedule_free(struct kref *ref)
{
struct virtual_channel *vchan =
container_of(work, struct virtual_channel, work);
hab_vchan_free(&vchan->refcount);
}
static void
hab_vchan_schedule_free(struct kref *ref)
{
struct virtual_channel *vchan =
struct virtual_channel *vchanin =
container_of(ref, struct virtual_channel, refcount);
struct uhab_context *ctx = vchanin->ctx;
struct virtual_channel *vchan, *tmp;
int bnotify = 0;
INIT_WORK(&vchan->work, hab_vchan_free_deferred);
schedule_work(&vchan->work);
/*
* similar logic is in ctx free. if ctx free runs first,
* this is skipped
*/
write_lock(&ctx->ctx_lock);
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
if (vchan == vchanin) {
pr_debug("vchan free refcnt = %d\n",
get_refcnt(vchan->refcount));
ctx->vcnt--;
list_del(&vchan->node);
bnotify = 1;
break;
}
}
write_unlock(&ctx->ctx_lock);
if (bnotify)
hab_vchan_stop_notify(vchan);
hab_vchan_free(ref);
}
void hab_vchan_put(struct virtual_channel *vchan)
@ -210,17 +239,23 @@ void hab_vchan_put(struct virtual_channel *vchan)
}
int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
char *names, size_t name_size, uint32_t flags)
char *names, size_t name_size, uint32_t flags)
{
struct virtual_channel *vchan = hab_get_vchan_fromvcid(vcid, ctx);
if (!vchan)
return -EINVAL;
if (!vchan || vchan->otherend_closed)
if (vchan->otherend_closed) {
hab_vchan_put(vchan);
return -ENODEV;
}
*ids = vchan->pchan->vmid_local |
((uint64_t)vchan->pchan->vmid_remote) << 32;
names[0] = 0;
names[name_size/2] = 0;
hab_vchan_put(vchan);
return 0;
}

View file

@ -10,13 +10,14 @@
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include "hab.h"
#include <linux/module.h>
int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id,
uint32_t timeout, uint32_t flags)
{
return hab_vchan_open(hab_driver.kctx, mm_ip_id, handle, flags);
return hab_vchan_open(hab_driver.kctx, mm_ip_id, handle,
timeout, flags);
}
EXPORT_SYMBOL(habmm_socket_open);
@ -55,6 +56,9 @@ int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
if (ret == 0 && msg)
memcpy(dst_buff, msg->data, msg->sizebytes);
else if (ret && msg)
pr_warn("vcid %X recv failed %d but msg is still received %zd bytes\n",
handle, ret, msg->sizebytes);
if (msg)
hab_msg_free(msg);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -43,7 +43,6 @@ int physical_channel_send(struct physical_channel *pchan,
int sizebytes = HAB_HEADER_GET_SIZE(*header);
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int total_size = sizeof(*header) + sizebytes;
struct timeval tv;
if (total_size > dev->pipe_ep->tx_info.sh_buf->size)
return -EINVAL; /* too much data for ring */
@ -67,9 +66,13 @@ int physical_channel_send(struct physical_channel *pchan,
}
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
struct timeval tv;
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)payload;
do_gettimeofday(&tv);
((uint64_t *)payload)[0] = tv.tv_sec;
((uint64_t *)payload)[1] = tv.tv_usec;
pstat->tx_sec = tv.tv_sec;
pstat->tx_usec = tv.tv_usec;
}
if (sizebytes) {
@ -102,7 +105,7 @@ void physical_channel_rx_dispatch(unsigned long data)
break; /* no data available */
if (header.signature != HAB_HEAD_SIGNATURE) {
pr_err("HAB signature mismatch, expect %X, received %X, id_type_size %X, session %X, sequence %X\n",
pr_err("HAB signature mismatch expect %X received %X, id_type_size %X session %X sequence %X\n",
HAB_HEAD_SIGNATURE, header.signature,
header.id_type_size,
header.session_id,

View file

@ -14,7 +14,7 @@
#ifndef HABMM_H
#define HABMM_H
#include <uapi/linux/habmmid.h>
#include "linux/habmmid.h"
#define HAB_API_VER_DEF(_MAJOR_, _MINOR_) \
((_MAJOR_&0xFF)<<16 | (_MINOR_&0xFFF))