hab: import/export between remote buffer and dmafd

currently hab only supports importing remote buffer
to cpu address, which can't be shared to other process.
Therefore we add dma_buf import/export function in hab

Change-Id: I156c925d7c0cefef5bf146ad8cff38de9c4b3bee
Signed-off-by: Yajun Li <yajunl@codeaurora.org>
This commit is contained in:
Yajun Li 2018-01-15 10:36:45 +08:00
parent 502257f3e6
commit 0866bef74d
5 changed files with 425 additions and 167 deletions

View file

@ -147,7 +147,8 @@ struct hab_header {
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT) (((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid)) #define HAB_HEADER_SET_SESSION_ID(header, sid) \
((header).session_id = (sid))
#define HAB_HEADER_SET_SIZE(header, size) \ #define HAB_HEADER_SET_SIZE(header, size) \
((header).id_type_size = ((header).id_type_size & \ ((header).id_type_size = ((header).id_type_size & \
@ -281,8 +282,8 @@ struct uhab_context {
}; };
/* /*
* array to describe the VM and its MMID configuration as what is connected to * array to describe the VM and its MMID configuration as
* so this is describing a pchan's remote side * what is connected to so this is describing a pchan's remote side
*/ */
struct vmid_mmid_desc { struct vmid_mmid_desc {
int vmid; /* remote vmid */ int vmid; /* remote vmid */
@ -341,8 +342,9 @@ struct virtual_channel {
}; };
/* /*
* Struct shared between local and remote, contents are composed by exporter, * Struct shared between local and remote, contents
* the importer only writes to pdata and local (exporter) domID * are composed by exporter, the importer only writes
* to pdata and local (exporter) domID
*/ */
struct export_desc { struct export_desc {
uint32_t export_id; uint32_t export_id;
@ -410,16 +412,10 @@ int habmem_hyp_revoke(void *expdata, uint32_t count);
void *habmem_imp_hyp_open(void); void *habmem_imp_hyp_open(void);
void habmem_imp_hyp_close(void *priv, int kernel); void habmem_imp_hyp_close(void *priv, int kernel);
long habmem_imp_hyp_map(void *priv, void *impdata, uint32_t count, int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
uint32_t remotedom, struct export_desc *exp, int kernel);
uint64_t *index,
void **pkva,
int kernel,
uint32_t userflags);
long habmm_imp_hyp_unmap(void *priv, uint64_t index, int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp);
uint32_t count,
int kernel);
int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma); int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -29,6 +29,9 @@ struct pages_list {
uint32_t userflags; uint32_t userflags;
struct file *filp_owner; struct file *filp_owner;
struct file *filp_mapper; struct file *filp_mapper;
struct dma_buf *dmabuf;
int32_t export_id;
int32_t vcid;
}; };
struct importer_context { struct importer_context {
@ -58,7 +61,7 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
} }
static int habmem_get_dma_pages(unsigned long address, static int habmem_get_dma_pages_from_va(unsigned long address,
int page_count, int page_count,
struct page **pages) struct page **pages)
{ {
@ -142,6 +145,56 @@ err:
return rc; return rc;
} }
static int habmem_get_dma_pages_from_fd(int32_t fd,
int page_count,
struct page **pages)
{
struct dma_buf *dmabuf = NULL;
struct scatterlist *s;
struct sg_table *sg_table = NULL;
struct dma_buf_attachment *attach = NULL;
struct page *page;
int i, j, rc = 0;
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
attach = dma_buf_attach(dmabuf, hab_driver.dev);
if (IS_ERR_OR_NULL(attach)) {
pr_err("dma_buf_attach failed\n");
goto err;
}
sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
if (IS_ERR_OR_NULL(sg_table)) {
pr_err("dma_buf_map_attachment failed\n");
goto err;
}
for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
page = sg_page(s);
pr_debug("sgl length %d\n", s->length);
for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
pages[rc] = nth_page(page, j);
rc++;
if (WARN_ON(rc >= page_count))
break;
}
}
err:
if (!IS_ERR_OR_NULL(sg_table))
dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE);
if (!IS_ERR_OR_NULL(attach))
dma_buf_detach(dmabuf, attach);
if (!IS_ERR_OR_NULL(dmabuf))
dma_buf_put(dmabuf);
return rc;
}
/* /*
* exporter - grant & revoke * exporter - grant & revoke
* degenerate sharabled page list based on CPU friendly virtual "address". * degenerate sharabled page list based on CPU friendly virtual "address".
@ -165,7 +218,11 @@ int habmem_hyp_grant_user(unsigned long address,
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
if (HABMM_EXP_MEM_TYPE_DMA & flags) { if (HABMM_EXP_MEM_TYPE_DMA & flags) {
ret = habmem_get_dma_pages(address, ret = habmem_get_dma_pages_from_va(address,
page_count,
pages);
} else if (HABMM_EXPIMP_FLAGS_FD & flags) {
ret = habmem_get_dma_pages_from_fd(address,
page_count, page_count,
pages); pages);
} else { } else {
@ -260,139 +317,42 @@ void habmem_imp_hyp_close(void *imp_ctx, int kernel)
kfree(priv); kfree(priv);
} }
/* static struct sg_table *hab_mem_map_dma_buf(
* setup pages, be ready for the following mmap call struct dma_buf_attachment *attachment,
* index is output to refer to this imported buffer described by the import data enum dma_data_direction direction)
*/
long habmem_imp_hyp_map(void *imp_ctx,
void *impdata,
uint32_t count,
uint32_t remotedom,
uint64_t *index,
void **pkva,
int kernel,
uint32_t userflags)
{ {
struct page **pages; struct dma_buf *dmabuf = attachment->dmabuf;
struct compressed_pfns *pfn_table = (struct compressed_pfns *)impdata; struct pages_list *pglist = dmabuf->priv;
struct pages_list *pglist; struct sg_table *sgt;
struct importer_context *priv = imp_ctx; struct scatterlist *sg;
unsigned long pfn; int i;
int i, j, k = 0; int ret = 0;
struct page **pages = pglist->pages;
if (!pfn_table || !priv) sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
return -EINVAL; if (!sgt)
return ERR_PTR(-ENOMEM);
pages = vmalloc(count * sizeof(struct page *)); ret = sg_alloc_table(sgt, pglist->npages, GFP_KERNEL);
if (!pages) if (ret) {
return -ENOMEM; kfree(sgt);
return ERR_PTR(-ENOMEM);
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
vfree(pages);
return -ENOMEM;
} }
pfn = pfn_table->first_pfn; for_each_sg(sgt->sgl, sg, pglist->npages, i) {
for (i = 0; i < pfn_table->nregions; i++) { sg_set_page(sg, pages[i], PAGE_SIZE, 0);
for (j = 0; j < pfn_table->region[i].size; j++) {
pages[k] = pfn_to_page(pfn+j);
k++;
}
pfn += pfn_table->region[i].size + pfn_table->region[i].space;
} }
pglist->pages = pages; return sgt;
pglist->npages = count;
pglist->kernel = kernel;
pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
pglist->refcntk = pglist->refcntu = 0;
pglist->userflags = userflags;
*index = pglist->index << PAGE_SHIFT;
if (kernel) {
pgprot_t prot = PAGE_KERNEL;
if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
prot = pgprot_writecombine(prot);
pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
if (pglist->kva == NULL) {
vfree(pages);
kfree(pglist);
pr_err("%ld pages vmap failed\n", pglist->npages);
return -ENOMEM;
} else {
pr_debug("%ld pages vmap pass, return %pK\n",
pglist->npages, pglist->kva);
}
pglist->uva = NULL;
pglist->refcntk++;
*pkva = pglist->kva;
*index = (uint64_t)((uintptr_t)pglist->kva);
} else {
pglist->kva = NULL;
}
write_lock(&priv->implist_lock);
list_add_tail(&pglist->list, &priv->imp_list);
priv->cnt++;
write_unlock(&priv->implist_lock);
pr_debug("index returned %llx\n", *index);
return 0;
} }
/* the input index is PHY address shifted for uhab, and kva for khab */
long habmm_imp_hyp_unmap(void *imp_ctx, static void hab_mem_unmap_dma_buf(struct dma_buf_attachment *attachment,
uint64_t index, struct sg_table *sgt,
uint32_t count, enum dma_data_direction direction)
int kernel)
{ {
struct importer_context *priv = imp_ctx; sg_free_table(sgt);
struct pages_list *pglist, *tmp; kfree(sgt);
int found = 0;
uint64_t pg_index = index >> PAGE_SHIFT;
write_lock(&priv->implist_lock);
list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
pr_debug("node pglist %pK, kernel %d, pg_index %llx\n",
pglist, pglist->kernel, pg_index);
if (kernel) {
if (pglist->kva == (void *)((uintptr_t)index))
found = 1;
} else {
if (pglist->index == pg_index)
found = 1;
}
if (found) {
list_del(&pglist->list);
priv->cnt--;
break;
}
}
write_unlock(&priv->implist_lock);
if (!found) {
pr_err("failed to find export id on index %llx\n", index);
return -EINVAL;
}
pr_debug("detach pglist %pK, index %llx, kernel %d, list cnt %d\n",
pglist, pglist->index, pglist->kernel, priv->cnt);
if (kernel)
if (pglist->kva)
vunmap(pglist->kva);
vfree(pglist->pages);
kfree(pglist);
return 0;
} }
static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf) static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@ -415,13 +375,11 @@ static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page_idx = fault_index - pglist->index; page_idx = fault_index - pglist->index;
if (page_idx < 0 || page_idx >= pglist->npages) { if (page_idx < 0 || page_idx >= pglist->npages) {
pr_err("Out of page array. page_idx %d, pg cnt %ld", pr_err("Out of page array! page_idx %d, pg cnt %ld",
page_idx, pglist->npages); page_idx, pglist->npages);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
pr_debug("Fault page index %d\n", page_idx);
page = pglist->pages[page_idx]; page = pglist->pages[page_idx];
get_page(page); get_page(page);
vmf->page = page; vmf->page = page;
@ -437,12 +395,322 @@ static void hab_map_close(struct vm_area_struct *vma)
} }
static const struct vm_operations_struct habmem_vm_ops = { static const struct vm_operations_struct habmem_vm_ops = {
.fault = hab_map_fault, .fault = hab_map_fault,
.open = hab_map_open, .open = hab_map_open,
.close = hab_map_close, .close = hab_map_close,
}; };
static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct pages_list *pglist = dmabuf->priv;
uint32_t obj_size = pglist->npages << PAGE_SHIFT;
if (vma == NULL)
return VM_FAULT_SIGBUS;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &habmem_vm_ops;
vma->vm_private_data = pglist;
vma->vm_flags |= VM_MIXEDMAP;
return 0;
}
static void hab_mem_dma_buf_release(struct dma_buf *dmabuf)
{
}
static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf,
unsigned long offset)
{
return NULL;
}
static void hab_mem_dma_buf_kunmap(struct dma_buf *dmabuf,
unsigned long offset,
void *ptr)
{
}
static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = hab_mem_map_dma_buf,
.unmap_dma_buf = hab_mem_unmap_dma_buf,
.mmap = hab_mem_mmap,
.release = hab_mem_dma_buf_release,
.kmap_atomic = hab_mem_dma_buf_kmap,
.kunmap_atomic = hab_mem_dma_buf_kunmap,
.kmap = hab_mem_dma_buf_kmap,
.kunmap = hab_mem_dma_buf_kunmap,
};
static int habmem_imp_hyp_map_fd(void *imp_ctx,
struct export_desc *exp,
uint32_t userflags,
int32_t *pfd)
{
struct page **pages;
struct compressed_pfns *pfn_table =
(struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
unsigned long pfn;
int i, j, k = 0;
pgprot_t prot = PAGE_KERNEL;
int32_t fd;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
if (!pfn_table || !priv)
return -EINVAL;
pages = vmalloc(exp->payload_count * sizeof(struct page *));
if (!pages)
return -ENOMEM;
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
vfree(pages);
return -ENOMEM;
}
pfn = pfn_table->first_pfn;
for (i = 0; i < pfn_table->nregions; i++) {
for (j = 0; j < pfn_table->region[i].size; j++) {
pages[k] = pfn_to_page(pfn+j);
k++;
}
pfn += pfn_table->region[i].size + pfn_table->region[i].space;
}
pglist->pages = pages;
pglist->npages = exp->payload_count;
pglist->kernel = 0;
pglist->index = 0;
pglist->refcntk = pglist->refcntu = 0;
pglist->userflags = userflags;
pglist->export_id = exp->export_id;
pglist->vcid = exp->vcid_remote;
if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
prot = pgprot_writecombine(prot);
exp_info.ops = &dma_buf_ops;
exp_info.size = exp->payload_count << PAGE_SHIFT;
exp_info.flags = O_RDWR;
exp_info.priv = pglist;
pglist->dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(pglist->dmabuf)) {
vfree(pages);
kfree(pglist);
return PTR_ERR(pglist->dmabuf);
}
fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC);
if (fd < 0) {
dma_buf_put(pglist->dmabuf);
vfree(pages);
kfree(pglist);
return -EINVAL;
}
pglist->refcntk++;
write_lock(&priv->implist_lock);
list_add_tail(&pglist->list, &priv->imp_list);
priv->cnt++;
write_unlock(&priv->implist_lock);
*pfd = fd;
return 0;
}
static int habmem_imp_hyp_map_kva(void *imp_ctx,
struct export_desc *exp,
uint32_t userflags,
void **pkva)
{
struct page **pages;
struct compressed_pfns *pfn_table =
(struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
unsigned long pfn;
int i, j, k = 0;
pgprot_t prot = PAGE_KERNEL;
if (!pfn_table || !priv)
return -EINVAL;
pages = vmalloc(exp->payload_count * sizeof(struct page *));
if (!pages)
return -ENOMEM;
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
vfree(pages);
return -ENOMEM;
}
pfn = pfn_table->first_pfn;
for (i = 0; i < pfn_table->nregions; i++) {
for (j = 0; j < pfn_table->region[i].size; j++) {
pages[k] = pfn_to_page(pfn+j);
k++;
}
pfn += pfn_table->region[i].size + pfn_table->region[i].space;
}
pglist->pages = pages;
pglist->npages = exp->payload_count;
pglist->kernel = 1;
pglist->refcntk = pglist->refcntu = 0;
pglist->userflags = userflags;
pglist->export_id = exp->export_id;
pglist->vcid = exp->vcid_remote;
if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
prot = pgprot_writecombine(prot);
pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
if (pglist->kva == NULL) {
vfree(pages);
kfree(pglist);
pr_err("%ld pages vmap failed\n", pglist->npages);
return -ENOMEM;
}
pr_debug("%ld pages vmap pass, return %p\n",
pglist->npages, pglist->kva);
pglist->refcntk++;
write_lock(&priv->implist_lock);
list_add_tail(&pglist->list, &priv->imp_list);
priv->cnt++;
write_unlock(&priv->implist_lock);
*pkva = pglist->kva;
return 0;
}
static int habmem_imp_hyp_map_uva(void *imp_ctx,
struct export_desc *exp,
uint32_t userflags,
uint64_t *index)
{
struct page **pages;
struct compressed_pfns *pfn_table =
(struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
unsigned long pfn;
int i, j, k = 0;
if (!pfn_table || !priv)
return -EINVAL;
pages = vmalloc(exp->payload_count * sizeof(struct page *));
if (!pages)
return -ENOMEM;
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
vfree(pages);
return -ENOMEM;
}
pfn = pfn_table->first_pfn;
for (i = 0; i < pfn_table->nregions; i++) {
for (j = 0; j < pfn_table->region[i].size; j++) {
pages[k] = pfn_to_page(pfn+j);
k++;
}
pfn += pfn_table->region[i].size + pfn_table->region[i].space;
}
pglist->pages = pages;
pglist->npages = exp->payload_count;
pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
pglist->refcntk = pglist->refcntu = 0;
pglist->userflags = userflags;
pglist->export_id = exp->export_id;
pglist->vcid = exp->vcid_remote;
write_lock(&priv->implist_lock);
list_add_tail(&pglist->list, &priv->imp_list);
priv->cnt++;
write_unlock(&priv->implist_lock);
*index = pglist->index << PAGE_SHIFT;
return 0;
}
int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
struct export_desc *exp, int kernel)
{
int ret = 0;
if (kernel)
ret = habmem_imp_hyp_map_kva(imp_ctx, exp,
param->flags,
(void **)&param->kva);
else if (param->flags & HABMM_EXPIMP_FLAGS_FD)
ret = habmem_imp_hyp_map_fd(imp_ctx, exp,
param->flags,
(int32_t *)&param->kva);
else
ret = habmem_imp_hyp_map_uva(imp_ctx, exp,
param->flags,
&param->index);
return ret;
}
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp)
{
struct importer_context *priv = imp_ctx;
struct pages_list *pglist, *tmp;
int found = 0;
write_lock(&priv->implist_lock);
list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
if (pglist->export_id == exp->export_id &&
pglist->vcid == exp->vcid_remote) {
found = 1;
}
if (found) {
list_del(&pglist->list);
priv->cnt--;
break;
}
}
write_unlock(&priv->implist_lock);
if (!found) {
pr_err("failed to find export id %u\n", exp->export_id);
return -EINVAL;
}
pr_debug("detach pglist %p, kernel %d, list cnt %d\n",
pglist, pglist->kernel, priv->cnt);
if (pglist->kva)
vunmap(pglist->kva);
if (pglist->dmabuf)
dma_buf_put(pglist->dmabuf);
vfree(pglist->pages);
kfree(pglist);
return 0;
}
int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma) int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
{ {
struct uhab_context *ctx = (struct uhab_context *) filp->private_data; struct uhab_context *ctx = (struct uhab_context *) filp->private_data;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -345,25 +345,20 @@ int hab_mem_import(struct uhab_context *ctx,
exp->export_id, exp->payload_count, exp->domid_local, exp->export_id, exp->payload_count, exp->domid_local,
*((uint32_t *)exp->payload)); *((uint32_t *)exp->payload));
ret = habmem_imp_hyp_map(ctx->import_ctx, ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel);
exp->payload,
exp->payload_count,
exp->domid_local,
&exp->import_index,
&exp->kva,
kernel,
param->flags);
if (ret) { if (ret) {
pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n", pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
ret, exp->payload_count, ret, exp->payload_count,
exp->domid_local, *((uint32_t *)exp->payload)); exp->domid_local, *((uint32_t *)exp->payload));
return ret; return ret;
} }
pr_debug("import index %llx, kva %llx, kernel %d\n",
exp->import_index, param->kva, kernel);
param->index = exp->import_index; exp->import_index = param->index;
param->kva = (uint64_t)exp->kva; exp->kva = kernel ? (void *)param->kva : NULL;
pr_debug("import index %llx, kva or fd %llx, kernel %d\n",
exp->import_index, param->kva, kernel);
return ret; return ret;
} }
@ -396,13 +391,10 @@ int hab_mem_unimport(struct uhab_context *ctx,
if (!found) if (!found)
ret = -EINVAL; ret = -EINVAL;
else { else {
ret = habmm_imp_hyp_unmap(ctx->import_ctx, ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp);
exp->import_index,
exp->payload_count,
kernel);
if (ret) { if (ret) {
pr_err("unmap fail id:%d pcnt:%d kernel:%d\n", pr_err("unmap fail id:%d pcnt:%d vcid:%d\n",
exp->export_id, exp->payload_count, kernel); exp->export_id, exp->payload_count, exp->vcid_remote);
} }
param->kva = (uint64_t)exp->kva; param->kva = (uint64_t)exp->kva;
kfree(exp); kfree(exp);

View file

@ -110,10 +110,7 @@ hab_vchan_free(struct kref *ref)
} }
spin_unlock_bh(&ctx->imp_lock); spin_unlock_bh(&ctx->imp_lock);
if (found) { if (found) {
habmm_imp_hyp_unmap(ctx->import_ctx, habmm_imp_hyp_unmap(ctx->import_ctx, exp);
exp->import_index,
exp->payload_count,
ctx->kernel);
ctx->import_total--; ctx->import_total--;
kfree(exp); kfree(exp);
} }

View file

@ -214,6 +214,11 @@ int32_t habmm_socket_recvfrom(int32_t handle, void *dst_buff,
*/ */
#define HABMM_EXP_MEM_TYPE_DMA 0x00000001 #define HABMM_EXP_MEM_TYPE_DMA 0x00000001
/*
* this flag is used for export from dma_buf fd or import to dma_buf fd
*/
#define HABMM_EXPIMP_FLAGS_FD 0x00010000
#define HAB_MAX_EXPORT_SIZE 0x8000000 #define HAB_MAX_EXPORT_SIZE 0x8000000
/* /*