Merge "soc: qcom: hab: Add open local-cancel and compiler warning fixes"

This commit is contained in:
Linux Build Service Account 2019-06-05 07:33:29 -07:00 committed by Gerrit - the friendly Code Review server
commit ced9c94ab1
13 changed files with 356 additions and 196 deletions

View file

@ -20,19 +20,28 @@ int physical_channel_read(struct physical_channel *pchan,
{
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
if (!payload || !dev->read_data) {
pr_err("invalid parameters %pK %pK offset %d read %zd\n",
payload, dev->read_data, dev->read_offset, read_size);
return 0;
}
/* size in header is only for payload excluding the header itself */
if (dev->read_size < read_size + sizeof(struct hab_header)) {
pr_warn("read %zd is less than requested %zd plus header %zd\n",
dev->read_size, read_size, sizeof(struct hab_header));
read_size = dev->read_size;
if (dev->read_size < read_size + sizeof(struct hab_header) +
dev->read_offset) {
pr_warn("read %zd is less than requested %zd header %zd offset %d\n",
dev->read_size, read_size,
sizeof(struct hab_header), dev->read_offset);
read_size = dev->read_size - dev->read_offset -
sizeof(struct hab_header);
}
/* always skip the header */
memcpy(payload, (unsigned char *)dev->read_data +
sizeof(struct hab_header) + dev->read_offset, read_size);
dev->read_offset += read_size;
dev->read_offset += (int)read_size;
return read_size;
return (int)read_size;
}
int physical_channel_send(struct physical_channel *pchan,
@ -41,8 +50,8 @@ int physical_channel_send(struct physical_channel *pchan,
{
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
GIPC_Result result;
uint8_t *msg;
GIPC_Result result = GIPC_Success;
uint8_t *msg = NULL;
spin_lock_bh(&dev->io_lock);
@ -61,7 +70,7 @@ int physical_channel_send(struct physical_channel *pchan,
}
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
struct timeval tv;
struct timeval tv = {0};
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)payload;
@ -90,11 +99,11 @@ int physical_channel_send(struct physical_channel *pchan,
void physical_channel_rx_dispatch(unsigned long physical_channel)
{
struct hab_header header;
struct hab_header header = {0};
struct physical_channel *pchan =
(struct physical_channel *)physical_channel;
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
GIPC_Result result;
GIPC_Result result = GIPC_Success;
uint32_t events;
unsigned long flags;
@ -119,7 +128,7 @@ void physical_channel_rx_dispatch(unsigned long physical_channel)
dev->read_data,
GIPC_RECV_BUFF_SIZE_BYTES,
&dev->read_size,
&header.id_type_size);
(uint32_t *)&header.id_type_size);
if (result == GIPC_Success || dev->read_size > 0) {
/* handle corrupted msg? */

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,7 +21,7 @@
.openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
}
static const char hab_info_str[] = "Change: 17280941 Revision: #81";
static const char hab_info_str[] = "Change: 19231400 Revision: #95";
/*
* The following has to match habmm definitions, order does not matter if
@ -60,7 +60,7 @@ struct hab_driver hab_driver = {
struct uhab_context *hab_ctx_alloc(int kernel)
{
struct uhab_context *ctx;
struct uhab_context *ctx = NULL;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@ -79,6 +79,9 @@ struct uhab_context *hab_ctx_alloc(int kernel)
rwlock_init(&ctx->exp_lock);
rwlock_init(&ctx->ctx_lock);
INIT_LIST_HEAD(&ctx->forbidden_chans);
spin_lock_init(&ctx->forbidden_lock);
INIT_LIST_HEAD(&ctx->pending_open);
kref_init(&ctx->refcount);
ctx->import_ctx = habmem_imp_hyp_open();
@ -106,18 +109,29 @@ void hab_ctx_free(struct kref *ref)
{
struct uhab_context *ctx =
container_of(ref, struct uhab_context, refcount);
struct hab_export_ack_recvd *ack_recvd, *tmp;
struct virtual_channel *vchan;
struct physical_channel *pchan;
int i;
struct uhab_context *ctxdel, *ctxtmp;
struct hab_open_node *node;
struct export_desc *exp, *exp_tmp;
struct hab_export_ack_recvd *ack_recvd = NULL, *tmp = NULL;
struct virtual_channel *vchan = NULL;
struct physical_channel *pchan = NULL;
int i = 0;
struct uhab_context *ctxdel = NULL, *ctxtmp = NULL;
struct hab_open_node *node = NULL;
struct export_desc *exp = NULL, *exp_tmp = NULL;
struct hab_forbidden_node *forbidden = NULL, *forbidden_tmp = NULL;
spin_lock_bh(&ctx->forbidden_lock);
list_for_each_entry_safe(forbidden, forbidden_tmp,
&ctx->forbidden_chans, node) {
list_del(&forbidden->node);
pr_debug("Remove mmid 0x%x from forbidden list, ctx %p\n",
forbidden->mmid, ctx);
kfree(forbidden);
}
spin_unlock_bh(&ctx->forbidden_lock);
/* garbage-collect exp/imp buffers */
write_lock_bh(&ctx->exp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
list_del(&exp->node);
list_del((struct list_head *)&exp->node);
pr_debug("potential leak exp %d vcid %X recovered\n",
exp->export_id, exp->vcid_local);
habmem_hyp_revoke(exp->payload, exp->payload_count);
@ -127,7 +141,7 @@ void hab_ctx_free(struct kref *ref)
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
list_del(&exp->node);
list_del((struct list_head *)&exp->node);
ctx->import_total--;
pr_debug("leaked imp %d vcid %X for ctx is collected total %d\n",
exp->export_id, exp->vcid_local,
@ -211,7 +225,7 @@ void hab_ctx_free(struct kref *ref)
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
struct uhab_context *ctx, int ignore_remote)
{
struct virtual_channel *vchan;
struct virtual_channel *vchan = NULL;
read_lock(&ctx->ctx_lock);
list_for_each_entry(vchan, &ctx->vchannels, node) {
@ -236,7 +250,7 @@ struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
struct hab_device *find_hab_device(unsigned int mm_id)
{
int i;
int i = 0;
for (i = 0; i < hab_driver.ndevices; i++) {
if (hab_driver.devp[i].id == HAB_MMID_GET_MAJOR(mm_id))
@ -259,13 +273,13 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
unsigned int mm_id,
int dom_id)
{
int ret, ret2, open_id = 0;
int ret = 0, ret2 = 0, open_id = 0;
struct physical_channel *pchan = NULL;
struct hab_device *dev;
struct hab_device *dev = NULL;
struct virtual_channel *vchan = NULL;
static atomic_t open_id_counter = ATOMIC_INIT(0);
struct hab_open_request request;
struct hab_open_request *recv_request;
struct hab_open_request request = {0};
struct hab_open_request *recv_request = NULL;
int sub_id = HAB_MMID_GET_MINOR(mm_id);
struct hab_open_node pending_open = { { 0 } };
@ -284,7 +298,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
goto err;
}
open_id = atomic_inc_return(&open_id_counter);
open_id = (int)atomic_inc_return(&open_id_counter);
vchan = hab_vchan_alloc(ctx, pchan, open_id);
if (!vchan) {
pr_err("vchan alloc failed\n");
@ -336,7 +350,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
vchan->id);
hab_open_pending_exit(ctx, pchan, &pending_open);
if (ret != -EINTR)
if (ret != -EINTR && ret != -ENXIO)
ret = -EINVAL;
goto err;
}
@ -377,15 +391,15 @@ err:
struct virtual_channel *backend_listen(struct uhab_context *ctx,
unsigned int mm_id, int timeout)
{
int ret, ret2;
int open_id, ver_fe;
int ret = 0, ret2 = 0;
int open_id = 0, ver_fe = 0;
int sub_id = HAB_MMID_GET_MINOR(mm_id);
struct physical_channel *pchan = NULL;
struct hab_device *dev;
struct hab_device *dev = NULL;
struct virtual_channel *vchan = NULL;
struct hab_open_request request;
struct hab_open_request *recv_request;
uint32_t otherend_vchan_id;
struct hab_open_request request = {0};
struct hab_open_request *recv_request = NULL;
uint32_t otherend_vchan_id = 0;
struct hab_open_node pending_open = { { 0 } };
dev = find_hab_device(mm_id);
@ -407,6 +421,8 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
ret = -EINVAL;
if (-EAGAIN == ret) {
ret = -ETIMEDOUT;
} else if (-ENXIO == ret) {
pr_warn("open request canceling\n");
} else {
/* device is closed */
pr_err("open request wait failed ctx closing %d\n",
@ -416,7 +432,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
} else if (!ret && recv_request &&
((recv_request->xdata.ver_fe & 0xFFFF0000) !=
(HAB_API_VER & 0xFFFF0000))) {
int ret2;
int ret2 = 0;
/* version check */
pr_err("version mismatch fe %X be %X on mmid %d\n",
recv_request->xdata.ver_fe, HAB_API_VER, mm_id);
@ -467,11 +483,11 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_DONE,
pchan, 0, sub_id, open_id);
ret = hab_open_listen(ctx, dev, &request, &recv_request,
HAB_HS_TIMEOUT);
HAB_HS_INIT_DONE_TIMEOUT);
hab_open_pending_exit(ctx, pchan, &pending_open);
if (ret && recv_request &&
if (!ret && recv_request &&
recv_request->type == HAB_PAYLOAD_TYPE_INIT_CANCEL) {
pr_err("listen cancelled vcid %x subid %d openid %d ret %d\n",
pr_warn("open rmt cancelled vcid %x subid %d openid %d ret %d\n",
request.xdata.vchan_id, request.xdata.sub_id,
request.xdata.open_id, ret);
@ -487,9 +503,12 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
ret2, vchan->id);
hab_open_pending_exit(ctx, pchan, &pending_open);
ret = -ENODEV; /* open request cancelled remotely */
ret = -ENODEV; /* open request FE cancelled remotely */
break;
} else if (ret != -EAGAIN) {
} else if (-ENXIO == ret) {
pr_warn("backend mmid %d listen canceling\n", mm_id);
goto err;
} else if (ret != -EAGAIN && ret != -EINTR) {
hab_open_pending_exit(ctx, pchan, &pending_open);
break; /* received something. good case! */
}
@ -540,8 +559,8 @@ long hab_vchan_send(struct uhab_context *ctx,
void *data,
unsigned int flags)
{
struct virtual_channel *vchan;
int ret;
struct virtual_channel *vchan = NULL;
int ret = 0;
struct hab_header header = HAB_HEADER_INITIALIZER;
int nonblocking_flag = flags & HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING;
@ -557,7 +576,7 @@ long hab_vchan_send(struct uhab_context *ctx,
goto err;
}
HAB_HEADER_SET_SIZE(header, sizebytes);
HAB_HEADER_SET_SIZE(header, (uint32_t)sizebytes);
if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT) {
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_PROFILE);
if (sizebytes < sizeof(struct habmm_xing_vm_stat)) {
@ -602,11 +621,11 @@ int hab_vchan_recv(struct uhab_context *ctx,
int *rsize,
unsigned int flags)
{
struct virtual_channel *vchan;
struct virtual_channel *vchan = NULL;
int ret = 0;
int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING;
vchan = hab_get_vchan_fromvcid(vcid, ctx, 1);
vchan = hab_get_vchan_fromvcid(vcid, ctx, 1); /* to drain local q */
if (!vchan) {
pr_err("vcid %X vchan 0x%pK ctx %pK\n", vcid, vchan, ctx);
return -ENODEV;
@ -640,6 +659,68 @@ bool hab_is_loopback(void)
return hab_driver.b_loopback;
}
static int hab_stop(struct uhab_context *ctx, unsigned int mmid)
{
struct hab_forbidden_node *node = NULL, *tmp = NULL;
struct hab_device *dev = NULL;
dev = find_hab_device(mmid);
if (dev == NULL) {
pr_err("failed to find dev based on id 0x%x\n", mmid);
return -EINVAL;
}
spin_lock_bh(&ctx->forbidden_lock);
list_for_each_entry_safe(node, tmp, &ctx->forbidden_chans, node) {
if (node->mmid == mmid) {
pr_info("mmid 0x%x has been in forbidden list, ctx %p\n",
mmid, ctx);
spin_unlock_bh(&ctx->forbidden_lock);
return 0;
}
}
pr_info("Add mmid 0x%x into forbidden list, ctx %p\n",
mmid, ctx);
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node) {
spin_unlock_bh(&ctx->forbidden_lock);
return -ENOMEM;
}
node->mmid = mmid;
list_add_tail(&node->node, &ctx->forbidden_chans);
spin_unlock_bh(&ctx->forbidden_lock);
wake_up_interruptible(&dev->openq);
return 0;
}
int hab_is_forbidden(struct uhab_context *ctx,
struct hab_device *dev,
uint32_t sub_id)
{
struct hab_forbidden_node *node = NULL, *tmp = NULL;
if (!dev)
return 0;
spin_lock_bh(&ctx->forbidden_lock);
list_for_each_entry_safe(node, tmp, &ctx->forbidden_chans, node) {
if ((HAB_MMID_GET_MAJOR(node->mmid) == dev->id) &&
(HAB_MMID_GET_MINOR(node->mmid) == sub_id)) {
spin_unlock_bh(&ctx->forbidden_lock);
return 1;
}
}
spin_unlock_bh(&ctx->forbidden_lock);
return 0;
}
int hab_vchan_open(struct uhab_context *ctx,
unsigned int mmid,
int32_t *vcid,
@ -647,8 +728,9 @@ int hab_vchan_open(struct uhab_context *ctx,
uint32_t flags)
{
struct virtual_channel *vchan = NULL;
struct hab_device *dev;
struct hab_device *dev = NULL;
(void)flags;
pr_debug("Open mmid=%d, loopback mode=%d, loopback be ctx %d\n",
mmid, hab_driver.b_loopback, ctx->lb_be);
@ -664,6 +746,13 @@ int hab_vchan_open(struct uhab_context *ctx,
} else {
dev = find_hab_device(mmid);
if (hab_is_forbidden(ctx,
dev, HAB_MMID_GET_MINOR(mmid))) {
pr_err("mmid 0x%x has been forbidden",
mmid);
return -ENXIO;
}
if (dev) {
struct physical_channel *pchan =
hab_pchan_find_domid(dev,
@ -689,7 +778,7 @@ int hab_vchan_open(struct uhab_context *ctx,
if (IS_ERR(vchan)) {
if (-ETIMEDOUT != PTR_ERR(vchan) && -EAGAIN != PTR_ERR(vchan))
pr_err("vchan open failed mmid=%d\n", mmid);
return PTR_ERR(vchan);
return (int)PTR_ERR(vchan);
}
pr_debug("vchan id %x remote id %x session %d\n", vchan->id,
@ -720,7 +809,8 @@ void hab_send_close_msg(struct virtual_channel *vchan)
void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
{
struct virtual_channel *vchan, *tmp;
struct virtual_channel *vchan = NULL, *tmp = NULL;
int vchan_found = 0;
if (!ctx)
return;
@ -744,10 +834,14 @@ void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
hab_vchan_stop_notify(vchan);
hab_vchan_put(vchan); /* there is a lock inside */
write_lock(&ctx->ctx_lock);
vchan_found = 1;
break;
}
}
write_unlock(&ctx->ctx_lock);
if (!vchan_found)
hab_stop(ctx, vcid);
}
/*
@ -760,9 +854,9 @@ void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
int vmid_local, int vmid_remote, int is_be)
{
char pchan_name[MAX_VMID_NAME_SIZE];
char pchan_name[MAX_VMID_NAME_SIZE] = {0};
struct physical_channel *pchan = NULL;
int ret;
int ret = 0;
int vmid = is_be ? vmid_remote : vmid_local; /* used for naming only */
if (!mmid_device) {
@ -799,7 +893,7 @@ static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
*/
static int hab_generate_pchan(struct local_vmid *settings, int i, int j)
{
int k, ret = 0;
int k = 0, ret = 0;
pr_debug("%d as mmid %d in vmid %d\n",
HABCFG_GET_MMID(settings, i, j), j, i);
@ -922,13 +1016,13 @@ static int hab_generate_pchan(struct local_vmid *settings, int i, int j)
*/
static int hab_generate_pchan_list(struct local_vmid *settings)
{
int i, j, ret = 0;
int i = 0, j = 0, ret = 0;
/* scan by valid VMs, then mmid */
pr_debug("self vmid is %d\n", settings->self);
for (i = 0; i < HABCFG_VMID_MAX; i++) {
if (HABCFG_GET_VMID(settings, i) != HABCFG_VMID_INVALID &&
HABCFG_GET_VMID(settings, i) != settings->self) {
if (HABCFG_GET_VMID(settings, i) != HABCFG_VMID_INVALID
&& HABCFG_GET_VMID(settings, i) != settings->self) {
pr_debug("create pchans for vm %d\n", i);
for (j = 1; j <= HABCFG_MMID_AREA_MAX; j++) {
@ -954,9 +1048,9 @@ static int hab_generate_pchan_list(struct local_vmid *settings)
int do_hab_parse(void)
{
int result;
int i;
struct hab_device *device;
int result = 0;
int i = 0;
struct hab_device *device = NULL;
/* single GVM is 2, multigvm is 2 or 3. GHS LV-GVM 2, LA-GVM 3 */
int default_gvmid = DEFAULT_GVMID;
@ -966,7 +1060,8 @@ int do_hab_parse(void)
/* first check if hypervisor plug-in is ready */
result = hab_hypervisor_register();
if (result) {
pr_err("register HYP plug-in failed, ret %d\n", result);
pr_err("register HYP plug-in failed, ret %d driver version %s\n",
result, hab_info_str);
return result;
}
@ -1014,13 +1109,13 @@ int get_refcnt(struct kref ref)
void hab_hypervisor_unregister_common(void)
{
int status, i;
struct uhab_context *ctx;
struct virtual_channel *vchan;
int status = 0, i = 0;
struct uhab_context *ctx = NULL;
struct virtual_channel *vchan = NULL;
for (i = 0; i < hab_driver.ndevices; i++) {
struct hab_device *habdev = &hab_driver.devp[i];
struct physical_channel *pchan, *pchan_tmp;
struct physical_channel *pchan = NULL, *pchan_tmp = NULL;
list_for_each_entry_safe(pchan, pchan_tmp,
&habdev->pchannels, node) {
@ -1194,13 +1289,11 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
recv_param->sizebytes = 0;
ret = -EFAULT;
}
} else if (ret && msg) {
pr_warn("vcid %X recv failed %d and msg is still of %zd bytes\n",
recv_param->vcid, (int)ret, msg->sizebytes);
}
if (msg)
hab_msg_free(msg);
} else
pr_warn("vcid %X recv failed %d buf size %d\n",
recv_param->vcid, (int)ret,
recv_param->sizebytes);
break;
case IOCTL_HAB_VC_EXPORT:
ret = hab_mem_export(ctx, (struct hab_export *)data, 0);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -46,6 +46,7 @@
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/delay.h>
#include <linux/version.h>
#include <soc/qcom/boot_stats.h>
enum hab_payload_type {
@ -161,13 +162,13 @@ struct hab_header {
#define HAB_HEADER_SET_SIZE(header, size) \
((header).id_type_size = ((header).id_type_size & \
(~HAB_HEADER_SIZE_MASK)) | \
(uint32_t)(~HAB_HEADER_SIZE_MASK)) | \
(((size) << HAB_HEADER_SIZE_SHIFT) & \
HAB_HEADER_SIZE_MASK))
#define HAB_HEADER_SET_TYPE(header, type) \
((header).id_type_size = ((header).id_type_size & \
(~HAB_HEADER_TYPE_MASK)) | \
(uint32_t)(~HAB_HEADER_TYPE_MASK)) | \
(((type) << HAB_HEADER_TYPE_SHIFT) & \
HAB_HEADER_TYPE_MASK))
@ -192,6 +193,7 @@ struct hab_header {
#define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id)
#define HAB_HS_TIMEOUT (10*1000*1000)
#define HAB_HS_INIT_DONE_TIMEOUT (3*1000)
struct physical_channel {
struct list_head node;
@ -265,6 +267,11 @@ struct hab_message {
uint32_t data[];
};
struct hab_forbidden_node {
struct list_head node;
uint32_t mmid;
};
/* for all the pchans of same kind */
struct hab_device {
char name[MAX_VMID_NAME_SIZE];
@ -302,6 +309,9 @@ struct uhab_context {
struct list_head pending_open; /* sent to remote */
int pending_cnt;
struct list_head forbidden_chans;
spinlock_t forbidden_lock;
rwlock_t ctx_lock;
int closing;
int kernel;
@ -403,6 +413,9 @@ struct export_desc {
unsigned char payload[1];
} __packed;
int hab_is_forbidden(struct uhab_context *ctx,
struct hab_device *dev,
uint32_t sub_id);
int hab_vchan_open(struct uhab_context *ctx,
unsigned int mmid, int32_t *vcid,
int32_t timeout, uint32_t flags);
@ -521,7 +534,7 @@ static inline void hab_ctx_get(struct uhab_context *ctx)
static inline void hab_ctx_put(struct uhab_context *ctx)
{
if (ctx)
kref_put(&ctx->refcount, hab_ctx_free);
kref_put(&ctx->refcount, &hab_ctx_free);
}
void hab_send_close_msg(struct virtual_channel *vchan);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -93,7 +93,7 @@ static int get_dt_name_idx(int vmid_base, int mmid,
struct ghs_vmm_plugin_info_s *plugin_info)
{
int idx = -1;
int i;
int i = 0;
if (vmid_base < 0 || vmid_base > plugin_info->probe_cnt /
GIPC_VM_SET_CNT) {

View file

@ -598,6 +598,9 @@ static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
vma->vm_private_data = pglist;
vma->vm_flags |= VM_MIXEDMAP;
if (!(pglist->userflags & HABMM_IMPORT_FLAGS_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return 0;
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -31,15 +31,14 @@ static int hab_export_ack_find(struct uhab_context *ctx,
struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret = 0;
struct hab_export_ack_recvd *ack_recvd, *tmp;
struct hab_export_ack_recvd *ack_recvd = NULL, *tmp = NULL;
spin_lock_bh(&ctx->expq_lock);
list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
if ((ack_recvd->ack.export_id == expect_ack->export_id &&
if (ack_recvd->ack.export_id == expect_ack->export_id &&
ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote)
|| vchan->otherend_closed) {
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) {
list_del(&ack_recvd->node);
kfree(ack_recvd);
ret = 1;
@ -52,6 +51,12 @@ static int hab_export_ack_find(struct uhab_context *ctx,
}
}
if (!ret && vchan->otherend_closed) {
pr_info("no expected ack, but vchan %x is remotely closed\n",
vchan->id);
ret = 1;
}
spin_unlock_bh(&ctx->expq_lock);
return ret;
@ -60,7 +65,7 @@ static int hab_export_ack_find(struct uhab_context *ctx,
static int hab_export_ack_wait(struct uhab_context *ctx,
struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret;
int ret = 0;
ret = wait_event_interruptible_timeout(ctx->exp_wq,
hab_export_ack_find(ctx, expect_ack, vchan),
@ -83,8 +88,8 @@ static struct export_desc *habmem_add_export(struct virtual_channel *vchan,
int sizebytes,
uint32_t flags)
{
struct uhab_context *ctx;
struct export_desc *exp;
struct uhab_context *ctx = NULL;
struct export_desc *exp = NULL;
if (!vchan || !sizebytes)
return NULL;
@ -112,7 +117,7 @@ static struct export_desc *habmem_add_export(struct virtual_channel *vchan,
ctx = vchan->ctx;
write_lock(&ctx->exp_lock);
ctx->export_total++;
list_add_tail(&exp->node, &ctx->exp_whse);
list_add_tail((struct list_head *)&exp->node, &ctx->exp_whse);
write_unlock(&ctx->exp_lock);
return exp;
@ -120,8 +125,8 @@ static struct export_desc *habmem_add_export(struct virtual_channel *vchan,
void habmem_remove_export(struct export_desc *exp)
{
struct physical_channel *pchan;
struct uhab_context *ctx;
struct physical_channel *pchan = NULL;
struct uhab_context *ctx = NULL;
if (!exp || !exp->ctx || !exp->pchan) {
if (exp)
@ -146,7 +151,7 @@ void habmem_remove_export(struct export_desc *exp)
static int compress_pfns(void **pfns, int npages, unsigned int *data_size)
{
int i, j = 0;
int i = 0, j = 0;
struct grantable *item = (struct grantable *)*pfns;
int region_size = 1;
struct compressed_pfns *new_table =
@ -162,8 +167,8 @@ static int compress_pfns(void **pfns, int npages, unsigned int *data_size)
region_size++; /* continuous pfn */
} else {
new_table->region[j].size = region_size;
new_table->region[j].space = item[i].pfn -
item[i-1].pfn - 1;
new_table->region[j].space = (int)(item[i].pfn -
item[i-1].pfn - 1);
j++;
region_size = 1;
}
@ -173,8 +178,8 @@ static int compress_pfns(void **pfns, int npages, unsigned int *data_size)
new_table->nregions = j+1;
vfree(*pfns);
*data_size = sizeof(struct compressed_pfns) +
sizeof(struct region)*new_table->nregions;
*data_size = (int)(sizeof(struct compressed_pfns) +
sizeof(struct region)*new_table->nregions);
*pfns = new_table;
return 0;
}
@ -191,9 +196,9 @@ static int habmem_export_vchan(struct uhab_context *ctx,
int nunits,
uint32_t flags,
uint32_t *export_id) {
int ret;
struct export_desc *exp;
uint32_t sizebytes = sizeof(*exp) + payload_size;
int ret = 0;
struct export_desc *exp = NULL;
uint32_t sizebytes = (uint32_t)(sizeof(*exp) + payload_size);
struct hab_export_ack expected_ack = {0};
struct hab_header header = HAB_HEADER_INITIALIZER;
@ -239,8 +244,8 @@ int hab_mem_export(struct uhab_context *ctx,
void *pdata_exp = NULL;
unsigned int pdata_size = 0;
uint32_t export_id = 0;
struct virtual_channel *vchan;
int page_count;
struct virtual_channel *vchan = NULL;
int page_count = 0;
int compressed = 0;
if (!ctx || !param || !param->buffer)
@ -266,7 +271,7 @@ int hab_mem_export(struct uhab_context *ctx,
vchan->pchan->dom_id,
pdata_exp,
&compressed,
&pdata_size);
(int *)&pdata_size);
} else {
ret = habmem_hyp_grant_user((unsigned long)param->buffer,
page_count,
@ -274,7 +279,7 @@ int hab_mem_export(struct uhab_context *ctx,
vchan->pchan->dom_id,
pdata_exp,
&compressed,
&pdata_size);
(int *)&pdata_size);
}
if (ret < 0) {
pr_err("habmem_hyp_grant vc %x failed size=%d ret=%d\n",
@ -306,9 +311,10 @@ int hab_mem_unexport(struct uhab_context *ctx,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp, *tmp;
struct virtual_channel *vchan;
struct export_desc *exp = NULL, *tmp = NULL;
struct virtual_channel *vchan = NULL;
(void)kernel;
if (!ctx || !param)
return -EINVAL;
@ -322,8 +328,9 @@ int hab_mem_unexport(struct uhab_context *ctx,
write_lock(&ctx->exp_lock);
list_for_each_entry_safe(exp, tmp, &ctx->exp_whse, node) {
if (param->exportid == exp->export_id &&
vchan->pchan == exp->pchan) {
list_del(&exp->node);
vchan->pchan == exp->pchan &&
param->vcid == exp->vcid_local) {
list_del((struct list_head *)&exp->node);
found = 1;
break;
}
@ -355,7 +362,7 @@ int hab_mem_import(struct uhab_context *ctx,
{
int ret = 0, found = 0;
struct export_desc *exp = NULL;
struct virtual_channel *vchan;
struct virtual_channel *vchan = NULL;
if (!ctx || !param)
return -EINVAL;
@ -368,8 +375,9 @@ int hab_mem_import(struct uhab_context *ctx,
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry(exp, &ctx->imp_whse, node) {
if ((exp->export_id == param->exportid) &&
(exp->pchan == vchan->pchan)) {
if (exp->export_id == param->exportid &&
exp->pchan == vchan->pchan &&
param->vcid == exp->vcid_local) {
found = 1;
break;
}
@ -414,8 +422,8 @@ int hab_mem_unimport(struct uhab_context *ctx,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp = NULL, *exp_tmp;
struct virtual_channel *vchan;
struct export_desc *exp = NULL, *exp_tmp = NULL;
struct virtual_channel *vchan = NULL;
if (!ctx || !param)
return -EINVAL;
@ -430,9 +438,10 @@ int hab_mem_unimport(struct uhab_context *ctx,
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
if (exp->export_id == param->exportid &&
exp->pchan == vchan->pchan) {
exp->pchan == vchan->pchan &&
param->vcid == exp->vcid_local) {
/* same pchan is expected here */
list_del(&exp->node);
list_del((struct list_head *)&exp->node);
ctx->import_total--;
found = 1;
break;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -14,7 +14,7 @@
static int hab_rx_queue_empty(struct virtual_channel *vchan)
{
int ret;
int ret = 0;
spin_lock_bh(&vchan->rx_lock);
ret = list_empty(&vchan->rx_list);
@ -25,11 +25,11 @@ static int hab_rx_queue_empty(struct virtual_channel *vchan)
static struct hab_message*
hab_msg_alloc(struct physical_channel *pchan, size_t sizebytes)
{
struct hab_message *message;
struct hab_message *message = NULL;
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("pchan %s send size too large %zd\n",
pchan->name, sizebytes);
pr_err("pchan %s send size too large %zd header %zd\n",
pchan->name, sizebytes, sizeof(*message));
return NULL;
}
@ -81,16 +81,16 @@ hab_msg_dequeue(struct virtual_channel *vchan, struct hab_message **msg,
message = list_first_entry(&vchan->rx_list,
struct hab_message, node);
if (message) {
if (*rsize >= message->sizebytes) {
if (*rsize >= (int)message->sizebytes) {
/* msg can be safely retrieved in full */
list_del(&message->node);
ret = 0;
*rsize = message->sizebytes;
*rsize = (int)message->sizebytes;
} else {
pr_err("vcid %x rcv buf too small %d < %zd\n",
vchan->id, *rsize,
message->sizebytes);
*rsize = message->sizebytes;
*rsize = (int)message->sizebytes;
message = NULL;
ret = -EOVERFLOW; /* come back again */
}
@ -121,7 +121,7 @@ static int hab_export_enqueue(struct virtual_channel *vchan,
struct uhab_context *ctx = vchan->ctx;
spin_lock_bh(&ctx->imp_lock);
list_add_tail(&exp->node, &ctx->imp_whse);
list_add_tail((struct list_head *)&exp->node, &ctx->imp_whse);
ctx->import_total++;
spin_unlock_bh(&ctx->imp_lock);
@ -139,7 +139,7 @@ static int hab_send_export_ack(struct virtual_channel *vchan,
};
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(exp_ack));
HAB_HEADER_SET_SIZE(header, (uint32_t)sizeof(exp_ack));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK);
HAB_HEADER_SET_ID(header, exp->vcid_local);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
@ -159,9 +159,9 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan,
pr_err("exp ack size %zu is not as arrived %zu\n",
sizeof(ack_recvd->ack), sizebytes);
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("pchan %s read size too large %zd\n",
pchan->name, sizebytes);
if (sizebytes > sizeof(ack_recvd->ack)) {
pr_err("pchan %s read size too large %zd %zd\n",
pchan->name, sizebytes, sizeof(ack_recvd->ack));
return -EINVAL;
}
@ -197,16 +197,15 @@ int hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header)
{
int ret = 0;
struct hab_message *message;
struct hab_message *message = NULL;
struct hab_device *dev = pchan->habdev;
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
struct virtual_channel *vchan = NULL;
struct export_desc *exp_desc;
struct timeval tv;
unsigned long long rx_mpm_tv;
struct export_desc *exp_desc = NULL, exp_ack = {0};
struct timeval tv = {0};
/* get the local virtual channel if it isn't an open message */
if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
@ -296,8 +295,8 @@ int hab_msg_recv(struct physical_channel *pchan,
case HAB_PAYLOAD_TYPE_EXPORT:
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("%s exp size too large %zd\n",
pchan->name, sizebytes);
pr_err("%s exp size too large %zd header %zd\n",
pchan->name, sizebytes, sizeof(*exp_desc));
break;
}
@ -322,9 +321,12 @@ int hab_msg_recv(struct physical_channel *pchan,
exp_desc->domid_remote = pchan->vmid_remote;
exp_desc->domid_local = pchan->vmid_local;
exp_desc->pchan = pchan;
exp_ack = *exp_desc; /* preserve exporter's info for ack */
exp_desc->vcid_remote = exp_desc->vcid_local;
exp_desc->vcid_local = vchan->id;
hab_export_enqueue(vchan, exp_desc);
hab_send_export_ack(vchan, pchan, exp_desc);
hab_export_enqueue(vchan, exp_desc); /* for local use */
hab_send_export_ack(vchan, pchan, &exp_ack); /* ack exporter */
break;
case HAB_PAYLOAD_TYPE_EXPORT_ACK:

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -32,7 +32,8 @@ int hab_open_request_send(struct hab_open_request *request)
{
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
HAB_HEADER_SET_SIZE(header,
(uint32_t)sizeof(struct hab_open_send_data));
HAB_HEADER_SET_TYPE(header, request->type);
return physical_channel_send(request->pchan, &header, &request->xdata);
@ -42,12 +43,12 @@ int hab_open_request_send(struct hab_open_request *request)
int hab_open_request_add(struct physical_channel *pchan,
size_t sizebytes, int request_type)
{
struct hab_open_node *node;
struct hab_open_node *node = NULL;
struct hab_device *dev = pchan->habdev;
struct hab_open_request *request;
struct timeval tv;
struct hab_open_request *request = NULL;
struct timeval tv = {0};
if (sizebytes > HAB_HEADER_SIZE_MASK) {
if (sizebytes > sizeof(request->xdata)) {
pr_err("pchan %s request size too large %zd\n",
pchan->name, sizebytes);
return -EINVAL;
@ -83,9 +84,9 @@ static int hab_open_request_find(struct uhab_context *ctx,
struct hab_open_request *listen,
struct hab_open_request **recv_request)
{
struct hab_open_node *node, *tmp;
struct hab_open_request *request;
struct timeval tv;
struct hab_open_node *node = NULL, *tmp = NULL;
struct hab_open_request *request = NULL;
struct timeval tv = {0};
int ret = 0;
if (ctx->closing ||
@ -126,6 +127,10 @@ static int hab_open_request_find(struct uhab_context *ctx,
done:
spin_unlock_bh(&dev->openlock);
if (hab_is_forbidden(ctx, dev, listen->xdata.sub_id))
ret = 1;
return ret;
}
@ -165,6 +170,9 @@ int hab_open_listen(struct uhab_context *ctx,
pr_warn("something failed in open listen ret %d\n",
ret);
ret = -EINTR; /* condition not met */
} else if (hab_is_forbidden(ctx, dev, listen->xdata.sub_id)) {
pr_warn("local open cancelled ret %d\n", ret);
ret = -ENXIO;
} else if (ret > 0)
ret = 0; /* condition met */
} else { /* fe case */
@ -176,6 +184,9 @@ int hab_open_listen(struct uhab_context *ctx,
} else if (-ERESTARTSYS == ret) {
pr_warn("local interrupted ret %d\n", ret);
ret = -EINTR;
} else if (hab_is_forbidden(ctx, dev, listen->xdata.sub_id)) {
pr_warn("local open cancelled ret %d\n", ret);
ret = -ENXIO;
}
}
@ -187,15 +198,15 @@ int hab_open_receive_cancel(struct physical_channel *pchan,
size_t sizebytes)
{
struct hab_device *dev = pchan->habdev;
struct hab_open_send_data data;
struct hab_open_request *request;
struct hab_open_node *node, *tmp;
struct hab_open_send_data data = {0};
struct hab_open_request *request = NULL;
struct hab_open_node *node = NULL, *tmp = NULL;
int bfound = 0;
struct timeval tv;
struct timeval tv = {0};
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("pchan %s cancel size too large %zd\n",
pchan->name, sizebytes);
if (sizebytes > sizeof(data)) {
pr_err("pchan %s cancel size too large %zd header %zd\n",
pchan->name, sizebytes, sizeof(data));
return -EINVAL;
}
@ -260,7 +271,8 @@ int hab_open_cancel_notify(struct hab_open_request *request)
{
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
HAB_HEADER_SET_SIZE(header,
(uint32_t)sizeof(struct hab_open_send_data));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_INIT_CANCEL);
return physical_channel_send(request->pchan, &header, &request->xdata);
@ -270,6 +282,7 @@ int hab_open_pending_enter(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending)
{
(void)pchan;
write_lock(&ctx->ctx_lock);
list_add_tail(&pending->node, &ctx->pending_open);
ctx->pending_cnt++;
@ -282,9 +295,10 @@ int hab_open_pending_exit(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending)
{
struct hab_open_node *node, *tmp;
struct hab_open_node *node = NULL, *tmp = NULL;
int ret = -ENOENT;
(void)pchan;
write_lock(&ctx->ctx_lock);
list_for_each_entry_safe(node, tmp, &ctx->pending_open, node) {
if ((node->request.type == pending->request.type) &&

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -23,7 +23,7 @@ static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
int32_t mmid_range, int32_t be)
{
int ret = 0;
int i, j;
int i = 0, j = 0;
for (i = vm_start; i < vm_start+vm_range; i++) {
tbl[i].vmid = i; /* set valid vmid value to make it usable */
@ -43,6 +43,7 @@ static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
void dump_settings(struct local_vmid *settings)
{
(void)settings;
pr_debug("self vmid is %d\n", settings->self);
}
@ -152,7 +153,7 @@ static int hab_parse_dt(struct local_vmid *settings)
*/
int hab_parse(struct local_vmid *settings)
{
int ret;
int ret = 0;
ret = hab_parse_dt(settings);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -47,7 +47,7 @@ static void hab_pchan_free(struct kref *ref)
{
struct physical_channel *pchan =
container_of(ref, struct physical_channel, refcount);
struct virtual_channel *vchan;
struct virtual_channel *vchan = NULL;
pr_debug("pchan %s refcnt %d\n", pchan->name,
get_refcnt(pchan->refcount));
@ -67,14 +67,13 @@ static void hab_pchan_free(struct kref *ref)
}
read_unlock(&pchan->vchans_lock);
kfree(pchan->hyp_data);
kfree(pchan);
}
struct physical_channel *
hab_pchan_find_domid(struct hab_device *dev, int dom_id)
{
struct physical_channel *pchan;
struct physical_channel *pchan = NULL;
spin_lock_bh(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node)
@ -104,5 +103,5 @@ void hab_pchan_get(struct physical_channel *pchan)
void hab_pchan_put(struct physical_channel *pchan)
{
if (pchan)
kref_put(&pchan->refcount, hab_pchan_free);
kref_put(&pchan->refcount, &hab_pchan_free);
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -32,28 +32,28 @@ int hab_stat_deinit(struct hab_driver *driver)
static int hab_stat_buffer_print(char *dest,
int dest_size, const char *fmt, ...)
{
va_list args;
char line[MAX_LINE_SIZE];
int ret;
va_list args = {0};
char line[MAX_LINE_SIZE] = {0};
int ret = 0;
va_start(args, fmt);
ret = vsnprintf(line, sizeof(line), fmt, args);
va_end(args);
if (ret > 0)
ret = strlcat(dest, line, dest_size);
ret = (int)strlcat(dest, line, dest_size);
return ret;
}
int hab_stat_show_vchan(struct hab_driver *driver,
char *buf, int size)
{
int i, ret = 0;
int i = 0, ret = 0;
ret = strlcpy(buf, "", size);
ret = (int)strlcpy(buf, "", size);
for (i = 0; i < driver->ndevices; i++) {
struct hab_device *dev = &driver->devp[i];
struct physical_channel *pchan;
struct virtual_channel *vc;
struct physical_channel *pchan = NULL;
struct virtual_channel *vc = NULL;
spin_lock_bh(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node) {
@ -86,9 +86,9 @@ int hab_stat_show_ctx(struct hab_driver *driver,
char *buf, int size)
{
int ret = 0;
struct uhab_context *ctx;
struct uhab_context *ctx = NULL;
ret = strlcpy(buf, "", size);
ret = (int)strlcpy(buf, "", size);
spin_lock_bh(&hab_driver.drvlock);
ret = hab_stat_buffer_print(buf, size,
@ -96,10 +96,11 @@ int hab_stat_show_ctx(struct hab_driver *driver,
driver->ctx_cnt);
list_for_each_entry(ctx, &hab_driver.uctx_list, node) {
ret = hab_stat_buffer_print(buf, size,
"ctx %d K %d close %d vc %d exp %d imp %d open %d\n",
"ctx %d K %d close %d vc %d exp %d imp %d open %d ref %d\n",
ctx->owner, ctx->kernel, ctx->closing,
ctx->vcnt, ctx->export_total,
ctx->import_total, ctx->pending_cnt);
ctx->import_total, ctx->pending_cnt,
get_refcnt(ctx->refcount));
}
spin_unlock_bh(&hab_driver.drvlock);
@ -108,7 +109,7 @@ int hab_stat_show_ctx(struct hab_driver *driver,
static int get_pft_tbl_total_size(struct compressed_pfns *pfn_table)
{
int i, total_size = 0;
int i = 0, total_size = 0;
for (i = 0; i < pfn_table->nregions; i++)
total_size += pfn_table->region[i].size * PAGE_SIZE;
@ -119,27 +120,40 @@ static int get_pft_tbl_total_size(struct compressed_pfns *pfn_table)
static int print_ctx_total_expimp(struct uhab_context *ctx,
char *buf, int size)
{
struct compressed_pfns *pfn_table;
struct compressed_pfns *pfn_table = NULL;
int exp_total = 0, imp_total = 0;
int exp_cnt = 0, imp_cnt = 0;
struct export_desc *exp;
struct export_desc *exp = NULL;
int exim_size = 0;
read_lock(&ctx->exp_lock);
hab_stat_buffer_print(buf, size, "export[expid:vcid:size]: ");
list_for_each_entry(exp, &ctx->exp_whse, node) {
pfn_table = (struct compressed_pfns *)exp->payload;
exp_total += get_pft_tbl_total_size(pfn_table);
exim_size = get_pft_tbl_total_size(pfn_table);
exp_total += exim_size;
exp_cnt++;
hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", exp->export_id,
exp->vcid_local, exim_size);
}
hab_stat_buffer_print(buf, size, "\n");
read_unlock(&ctx->exp_lock);
spin_lock_bh(&ctx->imp_lock);
hab_stat_buffer_print(buf, size, "import[expid:vcid:size]: ");
list_for_each_entry(exp, &ctx->imp_whse, node) {
if (habmm_imp_hyp_map_check(ctx->import_ctx, exp)) {
pfn_table = (struct compressed_pfns *)exp->payload;
imp_total += get_pft_tbl_total_size(pfn_table);
exim_size = get_pft_tbl_total_size(pfn_table);
imp_total += exim_size;
imp_cnt++;
hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", exp->export_id,
exp->vcid_local, exim_size);
}
}
hab_stat_buffer_print(buf, size, "\n");
spin_unlock_bh(&ctx->imp_lock);
if (exp_cnt || exp_total || imp_cnt || imp_total)
@ -154,10 +168,11 @@ static int print_ctx_total_expimp(struct uhab_context *ctx,
int hab_stat_show_expimp(struct hab_driver *driver,
int pid, char *buf, int size)
{
struct uhab_context *ctx;
int ret;
struct uhab_context *ctx = NULL;
int ret = 0;
ret = strlcpy(buf, "", size);
(void)driver;
ret = (int)strlcpy(buf, "", size);
spin_lock_bh(&hab_driver.drvlock);
list_for_each_entry(ctx, &hab_driver.uctx_list, node) {

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -16,8 +16,8 @@ struct virtual_channel *
hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan,
int openid)
{
int id;
struct virtual_channel *vchan;
int id = 0;
struct virtual_channel *vchan = NULL;
if (!pchan || !ctx)
return NULL;
@ -73,10 +73,10 @@ hab_vchan_free(struct kref *ref)
{
struct virtual_channel *vchan =
container_of(ref, struct virtual_channel, refcount);
struct hab_message *message, *msg_tmp;
struct hab_message *message = NULL, *msg_tmp = NULL;
struct physical_channel *pchan = vchan->pchan;
struct uhab_context *ctx = vchan->ctx;
struct virtual_channel *vc, *vc_tmp;
struct virtual_channel *vc = NULL, *vc_tmp = NULL;
spin_lock_bh(&vchan->rx_lock);
list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) {
@ -117,7 +117,7 @@ hab_vchan_free(struct kref *ref)
struct virtual_channel*
hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
{
struct virtual_channel *vchan;
struct virtual_channel *vchan = NULL;
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
@ -182,7 +182,7 @@ void hab_vchan_stop(struct virtual_channel *vchan)
void hab_vchans_stop(struct physical_channel *pchan)
{
struct virtual_channel *vchan, *tmp;
struct virtual_channel *vchan = NULL, *tmp = NULL;
read_lock(&pchan->vchans_lock);
list_for_each_entry_safe(vchan, tmp, &pchan->vchannels, pnode) {
@ -200,12 +200,12 @@ void hab_vchan_stop_notify(struct virtual_channel *vchan)
static int hab_vchans_per_pchan_empty(struct physical_channel *pchan)
{
int empty;
int empty = 0;
read_lock(&pchan->vchans_lock);
empty = list_empty(&pchan->vchannels);
if (!empty) {
struct virtual_channel *vchan;
struct virtual_channel *vchan = NULL;
int vcnt = pchan->vcnt;
list_for_each_entry(vchan, &pchan->vchannels, pnode) {
@ -230,9 +230,9 @@ static int hab_vchans_per_pchan_empty(struct physical_channel *pchan)
static int hab_vchans_empty(int vmid)
{
int i, empty = 1;
struct physical_channel *pchan;
struct hab_device *hab_dev;
int i = 0, empty = 1;
struct physical_channel *pchan = NULL;
struct hab_device *hab_dev = NULL;
for (i = 0; i < hab_driver.ndevices; i++) {
hab_dev = &hab_driver.devp[i];
@ -277,13 +277,15 @@ int hab_vchan_find_domid(struct virtual_channel *vchan)
void hab_vchan_put(struct virtual_channel *vchan)
{
if (vchan)
kref_put(&vchan->refcount, hab_vchan_free);
kref_put(&vchan->refcount, &hab_vchan_free);
}
int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
char *names, size_t name_size, uint32_t flags)
{
struct virtual_channel *vchan;
struct virtual_channel *vchan = NULL;
(void)flags;
vchan = hab_get_vchan_fromvcid(vcid, ctx, 1);
if (!vchan)

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -47,7 +47,7 @@ int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
uint32_t timeout, uint32_t flags)
{
int ret = 0;
struct hab_message *msg;
struct hab_message *msg = NULL;
if (!size_bytes || !dst_buff)
return -EINVAL;