Merge "msm: adsprpc: allocate all remote memory in kernel"
This commit is contained in:
commit
0fb3a0e5fc
3 changed files with 374 additions and 108 deletions
|
@ -55,6 +55,8 @@
|
||||||
#define TZ_PIL_AUTH_QDSP6_PROC 1
|
#define TZ_PIL_AUTH_QDSP6_PROC 1
|
||||||
#define ADSP_MMAP_HEAP_ADDR 4
|
#define ADSP_MMAP_HEAP_ADDR 4
|
||||||
#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
|
#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
|
||||||
|
#define ADSP_MMAP_ADD_PAGES 0x1000
|
||||||
|
|
||||||
#define FASTRPC_ENOSUCH 39
|
#define FASTRPC_ENOSUCH 39
|
||||||
#define VMID_SSC_Q6 38
|
#define VMID_SSC_Q6 38
|
||||||
#define VMID_ADSP_Q6 6
|
#define VMID_ADSP_Q6 6
|
||||||
|
@ -126,6 +128,7 @@ static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
|
||||||
static inline uint64_t buf_page_size(uint32_t size)
|
static inline uint64_t buf_page_size(uint32_t size)
|
||||||
{
|
{
|
||||||
uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||||
|
|
||||||
return sz > PAGE_SIZE ? sz : PAGE_SIZE;
|
return sz > PAGE_SIZE ? sz : PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,10 +148,15 @@ struct fastrpc_file;
|
||||||
|
|
||||||
struct fastrpc_buf {
|
struct fastrpc_buf {
|
||||||
struct hlist_node hn;
|
struct hlist_node hn;
|
||||||
|
struct hlist_node hn_rem;
|
||||||
struct fastrpc_file *fl;
|
struct fastrpc_file *fl;
|
||||||
void *virt;
|
void *virt;
|
||||||
uint64_t phys;
|
uint64_t phys;
|
||||||
size_t size;
|
size_t size;
|
||||||
|
struct dma_attrs attrs;
|
||||||
|
uintptr_t raddr;
|
||||||
|
uint32_t flags;
|
||||||
|
int remote;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct fastrpc_ctx_lst;
|
struct fastrpc_ctx_lst;
|
||||||
|
@ -292,9 +300,11 @@ struct fastrpc_file {
|
||||||
struct hlist_node hn;
|
struct hlist_node hn;
|
||||||
spinlock_t hlock;
|
spinlock_t hlock;
|
||||||
struct hlist_head maps;
|
struct hlist_head maps;
|
||||||
struct hlist_head bufs;
|
struct hlist_head cached_bufs;
|
||||||
|
struct hlist_head remote_bufs;
|
||||||
struct fastrpc_ctx_lst clst;
|
struct fastrpc_ctx_lst clst;
|
||||||
struct fastrpc_session_ctx *sctx;
|
struct fastrpc_session_ctx *sctx;
|
||||||
|
struct fastrpc_buf *init_mem;
|
||||||
struct fastrpc_session_ctx *secsctx;
|
struct fastrpc_session_ctx *secsctx;
|
||||||
uint32_t mode;
|
uint32_t mode;
|
||||||
uint32_t profile;
|
uint32_t profile;
|
||||||
|
@ -363,10 +373,17 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
|
||||||
return;
|
return;
|
||||||
if (cache) {
|
if (cache) {
|
||||||
spin_lock(&fl->hlock);
|
spin_lock(&fl->hlock);
|
||||||
hlist_add_head(&buf->hn, &fl->bufs);
|
hlist_add_head(&buf->hn, &fl->cached_bufs);
|
||||||
spin_unlock(&fl->hlock);
|
spin_unlock(&fl->hlock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (buf->remote) {
|
||||||
|
spin_lock(&fl->hlock);
|
||||||
|
hlist_del_init(&buf->hn_rem);
|
||||||
|
spin_unlock(&fl->hlock);
|
||||||
|
buf->remote = 0;
|
||||||
|
buf->raddr = 0;
|
||||||
|
}
|
||||||
if (!IS_ERR_OR_NULL(buf->virt)) {
|
if (!IS_ERR_OR_NULL(buf->virt)) {
|
||||||
int destVM[1] = {VMID_HLOS};
|
int destVM[1] = {VMID_HLOS};
|
||||||
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
|
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
|
||||||
|
@ -380,21 +397,22 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
|
||||||
hyp_assign_phys(buf->phys, buf_page_size(buf->size),
|
hyp_assign_phys(buf->phys, buf_page_size(buf->size),
|
||||||
srcVM, 2, destVM, destVMperm, 1);
|
srcVM, 2, destVM, destVMperm, 1);
|
||||||
}
|
}
|
||||||
dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
|
dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
|
||||||
buf->phys);
|
buf->phys, (struct dma_attrs *)&buf->attrs);
|
||||||
}
|
}
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fastrpc_buf_list_free(struct fastrpc_file *fl)
|
static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
|
||||||
{
|
{
|
||||||
struct fastrpc_buf *buf, *free;
|
struct fastrpc_buf *buf, *free;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
|
|
||||||
free = NULL;
|
free = NULL;
|
||||||
spin_lock(&fl->hlock);
|
spin_lock(&fl->hlock);
|
||||||
hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
|
hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
|
||||||
hlist_del_init(&buf->hn);
|
hlist_del_init(&buf->hn);
|
||||||
free = buf;
|
free = buf;
|
||||||
break;
|
break;
|
||||||
|
@ -405,6 +423,25 @@ static void fastrpc_buf_list_free(struct fastrpc_file *fl)
|
||||||
} while (free);
|
} while (free);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
|
||||||
|
{
|
||||||
|
struct fastrpc_buf *buf, *free;
|
||||||
|
|
||||||
|
do {
|
||||||
|
struct hlist_node *n;
|
||||||
|
|
||||||
|
free = NULL;
|
||||||
|
spin_lock(&fl->hlock);
|
||||||
|
hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
|
||||||
|
free = buf;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
spin_unlock(&fl->hlock);
|
||||||
|
if (free)
|
||||||
|
fastrpc_buf_free(free, 0);
|
||||||
|
} while (free);
|
||||||
|
}
|
||||||
|
|
||||||
static void fastrpc_mmap_add(struct fastrpc_mmap *map)
|
static void fastrpc_mmap_add(struct fastrpc_mmap *map)
|
||||||
{
|
{
|
||||||
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
|
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
|
||||||
|
@ -465,23 +502,21 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dma_alloc_memory(phys_addr_t *region_start, size_t size)
|
static int dma_alloc_memory(dma_addr_t *region_start, void **vaddr, size_t size,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct fastrpc_apps *me = &gfa;
|
struct fastrpc_apps *me = &gfa;
|
||||||
void *vaddr = NULL;
|
|
||||||
DEFINE_DMA_ATTRS(attrs);
|
|
||||||
|
|
||||||
if (me->dev == NULL) {
|
if (me->dev == NULL) {
|
||||||
pr_err("device adsprpc-mem is not initialized\n");
|
pr_err("device adsprpc-mem is not initialized\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
|
|
||||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
|
*vaddr = dma_alloc_attrs(me->dev, size, region_start,
|
||||||
vaddr = dma_alloc_attrs(me->dev, size, region_start, GFP_KERNEL,
|
GFP_KERNEL, attrs);
|
||||||
&attrs);
|
if (IS_ERR_OR_NULL(*vaddr)) {
|
||||||
if (!vaddr) {
|
pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
|
||||||
pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
|
current->comm, __func__, size, (*vaddr));
|
||||||
(unsigned int)size);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -617,7 +652,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
|
||||||
struct fastrpc_channel_ctx *chan = &apps->channel[cid];
|
struct fastrpc_channel_ctx *chan = &apps->channel[cid];
|
||||||
struct fastrpc_mmap *map = NULL;
|
struct fastrpc_mmap *map = NULL;
|
||||||
struct dma_attrs attrs;
|
struct dma_attrs attrs;
|
||||||
phys_addr_t region_start = 0;
|
dma_addr_t region_start = 0;
|
||||||
|
void *region_vaddr = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int err = 0, vmid;
|
int err = 0, vmid;
|
||||||
|
|
||||||
|
@ -635,14 +671,20 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
|
||||||
map->attr = attr;
|
map->attr = attr;
|
||||||
if (mflags == ADSP_MMAP_HEAP_ADDR ||
|
if (mflags == ADSP_MMAP_HEAP_ADDR ||
|
||||||
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||||
|
DEFINE_DMA_ATTRS(rh_attrs);
|
||||||
|
|
||||||
|
dma_set_attr(DMA_ATTR_SKIP_ZEROING, &rh_attrs);
|
||||||
|
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rh_attrs);
|
||||||
|
|
||||||
map->apps = me;
|
map->apps = me;
|
||||||
map->fl = NULL;
|
map->fl = NULL;
|
||||||
VERIFY(err, !dma_alloc_memory(®ion_start, len));
|
VERIFY(err, !dma_alloc_memory(®ion_start,
|
||||||
|
®ion_vaddr, len, &rh_attrs));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
map->phys = (uintptr_t)region_start;
|
map->phys = (uintptr_t)region_start;
|
||||||
map->size = len;
|
map->size = len;
|
||||||
map->va = (uintptr_t)map->phys;
|
map->va = (uintptr_t)region_vaddr;
|
||||||
} else {
|
} else {
|
||||||
VERIFY(err, !IS_ERR_OR_NULL(map->handle =
|
VERIFY(err, !IS_ERR_OR_NULL(map->handle =
|
||||||
ion_import_dma_buf(fl->apps->client, fd)));
|
ion_import_dma_buf(fl->apps->client, fd)));
|
||||||
|
@ -741,7 +783,8 @@ bail:
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
||||||
struct fastrpc_buf **obuf)
|
struct dma_attrs attr, uint32_t rflags,
|
||||||
|
int remote, struct fastrpc_buf **obuf)
|
||||||
{
|
{
|
||||||
int err = 0, vmid;
|
int err = 0, vmid;
|
||||||
struct fastrpc_buf *buf = NULL, *fr = NULL;
|
struct fastrpc_buf *buf = NULL, *fr = NULL;
|
||||||
|
@ -751,9 +794,10 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
|
|
||||||
|
if (!remote) {
|
||||||
/* find the smallest buffer that fits in the cache */
|
/* find the smallest buffer that fits in the cache */
|
||||||
spin_lock(&fl->hlock);
|
spin_lock(&fl->hlock);
|
||||||
hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
|
hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
|
||||||
if (buf->size >= size && (!fr || fr->size > buf->size))
|
if (buf->size >= size && (!fr || fr->size > buf->size))
|
||||||
fr = buf;
|
fr = buf;
|
||||||
}
|
}
|
||||||
|
@ -764,6 +808,7 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
||||||
*obuf = fr;
|
*obuf = fr;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
buf = NULL;
|
buf = NULL;
|
||||||
VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
|
VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -773,17 +818,29 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
||||||
buf->virt = NULL;
|
buf->virt = NULL;
|
||||||
buf->phys = 0;
|
buf->phys = 0;
|
||||||
buf->size = size;
|
buf->size = size;
|
||||||
buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
|
memcpy(&buf->attrs, &attr, sizeof(struct dma_attrs));
|
||||||
(void *)&buf->phys, GFP_KERNEL);
|
buf->flags = rflags;
|
||||||
|
buf->raddr = 0;
|
||||||
|
buf->remote = 0;
|
||||||
|
buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
|
||||||
|
(dma_addr_t *)&buf->phys,
|
||||||
|
GFP_KERNEL,
|
||||||
|
(struct dma_attrs *)&buf->attrs);
|
||||||
if (IS_ERR_OR_NULL(buf->virt)) {
|
if (IS_ERR_OR_NULL(buf->virt)) {
|
||||||
/* free cache and retry */
|
/* free cache and retry */
|
||||||
fastrpc_buf_list_free(fl);
|
fastrpc_cached_buf_list_free(fl);
|
||||||
buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
|
buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
|
||||||
(void *)&buf->phys, GFP_KERNEL);
|
(dma_addr_t *)&buf->phys,
|
||||||
|
GFP_KERNEL,
|
||||||
|
(struct dma_attrs *)&buf->attrs);
|
||||||
VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
|
VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
|
||||||
}
|
}
|
||||||
if (err)
|
if (err) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
|
||||||
|
current->comm, __func__, size);
|
||||||
goto bail;
|
goto bail;
|
||||||
|
}
|
||||||
if (fl->sctx->smmu.cb)
|
if (fl->sctx->smmu.cb)
|
||||||
buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
|
buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
|
||||||
vmid = fl->apps->channel[fl->cid].vmid;
|
vmid = fl->apps->channel[fl->cid].vmid;
|
||||||
|
@ -799,6 +856,13 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (remote) {
|
||||||
|
INIT_HLIST_NODE(&buf->hn_rem);
|
||||||
|
spin_lock(&fl->hlock);
|
||||||
|
hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
|
||||||
|
spin_unlock(&fl->hlock);
|
||||||
|
buf->remote = remote;
|
||||||
|
}
|
||||||
*obuf = buf;
|
*obuf = buf;
|
||||||
bail:
|
bail:
|
||||||
if (err && buf)
|
if (err && buf)
|
||||||
|
@ -806,7 +870,6 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int context_restore_interrupted(struct fastrpc_file *fl,
|
static int context_restore_interrupted(struct fastrpc_file *fl,
|
||||||
struct fastrpc_ioctl_invoke_attrs *inv,
|
struct fastrpc_ioctl_invoke_attrs *inv,
|
||||||
struct smq_invoke_ctx **po)
|
struct smq_invoke_ctx **po)
|
||||||
|
@ -815,6 +878,7 @@ static int context_restore_interrupted(struct fastrpc_file *fl,
|
||||||
struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
|
struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
struct fastrpc_ioctl_invoke *invoke = &inv->inv;
|
struct fastrpc_ioctl_invoke *invoke = &inv->inv;
|
||||||
|
|
||||||
spin_lock(&fl->hlock);
|
spin_lock(&fl->hlock);
|
||||||
hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
|
hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
|
||||||
if (ictx->pid == current->pid) {
|
if (ictx->pid == current->pid) {
|
||||||
|
@ -854,6 +918,7 @@ static int context_build_overlap(struct smq_invoke_ctx *ctx)
|
||||||
int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
|
int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
|
||||||
int nbufs = inbufs + outbufs;
|
int nbufs = inbufs + outbufs;
|
||||||
struct overlap max;
|
struct overlap max;
|
||||||
|
|
||||||
for (i = 0; i < nbufs; ++i) {
|
for (i = 0; i < nbufs; ++i) {
|
||||||
ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
|
ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
|
||||||
ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
|
ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
|
||||||
|
@ -1003,12 +1068,13 @@ bail:
|
||||||
static void context_save_interrupted(struct smq_invoke_ctx *ctx)
|
static void context_save_interrupted(struct smq_invoke_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
|
struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
|
||||||
|
|
||||||
spin_lock(&ctx->fl->hlock);
|
spin_lock(&ctx->fl->hlock);
|
||||||
hlist_del_init(&ctx->hn);
|
hlist_del_init(&ctx->hn);
|
||||||
hlist_add_head(&ctx->hn, &clst->interrupted);
|
hlist_add_head(&ctx->hn, &clst->interrupted);
|
||||||
spin_unlock(&ctx->fl->hlock);
|
spin_unlock(&ctx->fl->hlock);
|
||||||
/* free the cache on power collapse */
|
/* free the cache on power collapse */
|
||||||
fastrpc_buf_list_free(ctx->fl);
|
fastrpc_cached_buf_list_free(ctx->fl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void context_free(struct smq_invoke_ctx *ctx)
|
static void context_free(struct smq_invoke_ctx *ctx)
|
||||||
|
@ -1044,11 +1110,11 @@ static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
|
||||||
complete(&ctx->work);
|
complete(&ctx->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void fastrpc_notify_users(struct fastrpc_file *me)
|
static void fastrpc_notify_users(struct fastrpc_file *me)
|
||||||
{
|
{
|
||||||
struct smq_invoke_ctx *ictx;
|
struct smq_invoke_ctx *ictx;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
|
|
||||||
spin_lock(&me->hlock);
|
spin_lock(&me->hlock);
|
||||||
hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
|
hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
|
||||||
complete(&ictx->work);
|
complete(&ictx->work);
|
||||||
|
@ -1064,6 +1130,7 @@ static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
|
||||||
{
|
{
|
||||||
struct fastrpc_file *fl;
|
struct fastrpc_file *fl;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
|
|
||||||
spin_lock(&me->hlock);
|
spin_lock(&me->hlock);
|
||||||
hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
|
hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
|
||||||
if (fl->cid == cid)
|
if (fl->cid == cid)
|
||||||
|
@ -1072,6 +1139,7 @@ static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
|
||||||
spin_unlock(&me->hlock);
|
spin_unlock(&me->hlock);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void context_list_ctor(struct fastrpc_ctx_lst *me)
|
static void context_list_ctor(struct fastrpc_ctx_lst *me)
|
||||||
{
|
{
|
||||||
INIT_HLIST_HEAD(&me->interrupted);
|
INIT_HLIST_HEAD(&me->interrupted);
|
||||||
|
@ -1083,6 +1151,7 @@ static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
|
||||||
struct fastrpc_ctx_lst *clst = &fl->clst;
|
struct fastrpc_ctx_lst *clst = &fl->clst;
|
||||||
struct smq_invoke_ctx *ictx = NULL, *ctxfree;
|
struct smq_invoke_ctx *ictx = NULL, *ctxfree;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ctxfree = NULL;
|
ctxfree = NULL;
|
||||||
spin_lock(&fl->hlock);
|
spin_lock(&fl->hlock);
|
||||||
|
@ -1110,10 +1179,12 @@ static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fastrpc_file_free(struct fastrpc_file *fl);
|
static int fastrpc_file_free(struct fastrpc_file *fl);
|
||||||
|
|
||||||
static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
|
static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
|
||||||
{
|
{
|
||||||
struct fastrpc_file *fl, *free;
|
struct fastrpc_file *fl, *free;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
free = NULL;
|
free = NULL;
|
||||||
spin_lock(&me->hlock);
|
spin_lock(&me->hlock);
|
||||||
|
@ -1187,7 +1258,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
|
||||||
|
|
||||||
/* allocate new buffer */
|
/* allocate new buffer */
|
||||||
if (copylen) {
|
if (copylen) {
|
||||||
VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
|
DEFINE_DMA_ATTRS(ctx_attrs);
|
||||||
|
|
||||||
|
err = fastrpc_buf_alloc(ctx->fl, copylen, ctx_attrs,
|
||||||
|
0, 0, &ctx->buf);
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
@ -1206,6 +1280,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
|
||||||
args = (uintptr_t)ctx->buf->virt + metalen;
|
args = (uintptr_t)ctx->buf->virt + metalen;
|
||||||
for (i = 0; i < bufs; ++i) {
|
for (i = 0; i < bufs; ++i) {
|
||||||
size_t len = lpra[i].buf.len;
|
size_t len = lpra[i].buf.len;
|
||||||
|
|
||||||
list[i].num = 0;
|
list[i].num = 0;
|
||||||
list[i].pgidx = 0;
|
list[i].pgidx = 0;
|
||||||
if (!len)
|
if (!len)
|
||||||
|
@ -1220,6 +1295,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
|
||||||
struct fastrpc_mmap *map = ctx->maps[i];
|
struct fastrpc_mmap *map = ctx->maps[i];
|
||||||
uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
|
uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
|
||||||
size_t len = lpra[i].buf.len;
|
size_t len = lpra[i].buf.len;
|
||||||
|
|
||||||
rpra[i].buf.pv = 0;
|
rpra[i].buf.pv = 0;
|
||||||
rpra[i].buf.len = len;
|
rpra[i].buf.len = len;
|
||||||
if (!len)
|
if (!len)
|
||||||
|
@ -1561,6 +1637,7 @@ static void smd_event_handler(void *priv, unsigned event)
|
||||||
static void fastrpc_init(struct fastrpc_apps *me)
|
static void fastrpc_init(struct fastrpc_apps *me)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
INIT_HLIST_HEAD(&me->drivers);
|
INIT_HLIST_HEAD(&me->drivers);
|
||||||
INIT_HLIST_HEAD(&me->maps);
|
INIT_HLIST_HEAD(&me->maps);
|
||||||
spin_lock_init(&me->hlock);
|
spin_lock_init(&me->hlock);
|
||||||
|
@ -1684,6 +1761,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
||||||
struct fastrpc_ioctl_init *init = &uproc->init;
|
struct fastrpc_ioctl_init *init = &uproc->init;
|
||||||
struct smq_phy_page pages[1];
|
struct smq_phy_page pages[1];
|
||||||
struct fastrpc_mmap *file = NULL, *mem = NULL;
|
struct fastrpc_mmap *file = NULL, *mem = NULL;
|
||||||
|
struct fastrpc_buf *imem = NULL;
|
||||||
char *proc_name = NULL;
|
char *proc_name = NULL;
|
||||||
int srcVM[1] = {VMID_HLOS};
|
int srcVM[1] = {VMID_HLOS};
|
||||||
int destVM[1] = {gcinfo[0].heap_vmid};
|
int destVM[1] = {gcinfo[0].heap_vmid};
|
||||||
|
@ -1696,6 +1774,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
||||||
if (init->flags == FASTRPC_INIT_ATTACH) {
|
if (init->flags == FASTRPC_INIT_ATTACH) {
|
||||||
remote_arg_t ra[1];
|
remote_arg_t ra[1];
|
||||||
int tgid = current->tgid;
|
int tgid = current->tgid;
|
||||||
|
|
||||||
ra[0].buf.pv = (void *)&tgid;
|
ra[0].buf.pv = (void *)&tgid;
|
||||||
ra[0].buf.len = sizeof(tgid);
|
ra[0].buf.len = sizeof(tgid);
|
||||||
ioctl.inv.handle = 1;
|
ioctl.inv.handle = 1;
|
||||||
|
@ -1712,6 +1791,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
||||||
remote_arg_t ra[6];
|
remote_arg_t ra[6];
|
||||||
int fds[6];
|
int fds[6];
|
||||||
int mflags = 0;
|
int mflags = 0;
|
||||||
|
int memlen;
|
||||||
|
DEFINE_DMA_ATTRS(imem_dma_attr);
|
||||||
struct {
|
struct {
|
||||||
int pgid;
|
int pgid;
|
||||||
unsigned int namelen;
|
unsigned int namelen;
|
||||||
|
@ -1734,14 +1815,27 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
if (!access_ok(1, (void const __user *)init->mem,
|
|
||||||
init->memlen))
|
|
||||||
goto bail;
|
|
||||||
inbuf.pageslen = 1;
|
inbuf.pageslen = 1;
|
||||||
VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
|
|
||||||
init->mem, init->memlen, mflags, &mem));
|
VERIFY(err, !init->mem);
|
||||||
|
if (err) {
|
||||||
|
err = -EINVAL;
|
||||||
|
pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
|
||||||
|
current->comm, __func__);
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
|
||||||
|
1024*1024);
|
||||||
|
|
||||||
|
dma_set_attr(DMA_ATTR_EXEC_MAPPING, &imem_dma_attr);
|
||||||
|
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem_dma_attr);
|
||||||
|
dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT, &imem_dma_attr);
|
||||||
|
|
||||||
|
err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
|
fl->init_mem = imem;
|
||||||
|
|
||||||
inbuf.pageslen = 1;
|
inbuf.pageslen = 1;
|
||||||
ra[0].buf.pv = (void *)&inbuf;
|
ra[0].buf.pv = (void *)&inbuf;
|
||||||
ra[0].buf.len = sizeof(inbuf);
|
ra[0].buf.len = sizeof(inbuf);
|
||||||
|
@ -1755,8 +1849,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
||||||
ra[2].buf.len = inbuf.filelen;
|
ra[2].buf.len = inbuf.filelen;
|
||||||
fds[2] = init->filefd;
|
fds[2] = init->filefd;
|
||||||
|
|
||||||
pages[0].addr = mem->phys;
|
pages[0].addr = imem->phys;
|
||||||
pages[0].size = mem->size;
|
pages[0].size = imem->size;
|
||||||
ra[3].buf.pv = (void *)pages;
|
ra[3].buf.pv = (void *)pages;
|
||||||
ra[3].buf.len = 1 * sizeof(*pages);
|
ra[3].buf.len = 1 * sizeof(*pages);
|
||||||
fds[3] = 0;
|
fds[3] = 0;
|
||||||
|
@ -1896,7 +1990,8 @@ bail:
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
|
static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
|
||||||
struct fastrpc_mmap *map)
|
uintptr_t va, uint64_t phys,
|
||||||
|
size_t size, uintptr_t *raddr)
|
||||||
{
|
{
|
||||||
struct fastrpc_ioctl_invoke_attrs ioctl;
|
struct fastrpc_ioctl_invoke_attrs ioctl;
|
||||||
struct smq_phy_page page;
|
struct smq_phy_page page;
|
||||||
|
@ -1913,14 +2008,15 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
|
||||||
struct {
|
struct {
|
||||||
uintptr_t vaddrout;
|
uintptr_t vaddrout;
|
||||||
} routargs;
|
} routargs;
|
||||||
|
|
||||||
inargs.pid = current->tgid;
|
inargs.pid = current->tgid;
|
||||||
inargs.vaddrin = (uintptr_t)map->va;
|
inargs.vaddrin = (uintptr_t)va;
|
||||||
inargs.flags = flags;
|
inargs.flags = flags;
|
||||||
inargs.num = fl->apps->compat ? num * sizeof(page) : num;
|
inargs.num = fl->apps->compat ? num * sizeof(page) : num;
|
||||||
ra[0].buf.pv = (void *)&inargs;
|
ra[0].buf.pv = (void *)&inargs;
|
||||||
ra[0].buf.len = sizeof(inargs);
|
ra[0].buf.len = sizeof(inargs);
|
||||||
page.addr = map->phys;
|
page.addr = phys;
|
||||||
page.size = map->size;
|
page.size = size;
|
||||||
ra[1].buf.pv = (void *)&page;
|
ra[1].buf.pv = (void *)&page;
|
||||||
ra[1].buf.len = num * sizeof(page);
|
ra[1].buf.len = num * sizeof(page);
|
||||||
|
|
||||||
|
@ -1937,15 +2033,15 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
|
||||||
ioctl.attrs = NULL;
|
ioctl.attrs = NULL;
|
||||||
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
|
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
|
||||||
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
|
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
|
||||||
map->raddr = (uintptr_t)routargs.vaddrout;
|
*raddr = (uintptr_t)routargs.vaddrout;
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
if (flags == ADSP_MMAP_HEAP_ADDR) {
|
if (flags == ADSP_MMAP_HEAP_ADDR) {
|
||||||
struct scm_desc desc = {0};
|
struct scm_desc desc = {0};
|
||||||
|
|
||||||
desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
|
desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
|
||||||
desc.args[1] = map->phys;
|
desc.args[1] = phys;
|
||||||
desc.args[2] = map->size;
|
desc.args[2] = size;
|
||||||
desc.arginfo = SCM_ARGS(3);
|
desc.arginfo = SCM_ARGS(3);
|
||||||
err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
|
err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
|
||||||
TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
|
TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
|
||||||
|
@ -1955,7 +2051,7 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
|
||||||
int destVM[1] = {gcinfo[0].heap_vmid};
|
int destVM[1] = {gcinfo[0].heap_vmid};
|
||||||
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
|
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
|
||||||
|
|
||||||
VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
|
VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
|
||||||
srcVM, 1, destVM, destVMperm, 1));
|
srcVM, 1, destVM, destVMperm, 1));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
|
@ -1964,16 +2060,17 @@ bail:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
|
static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
|
||||||
struct fastrpc_mmap *map)
|
size_t size, uint32_t flags)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int srcVM[1] = {gcinfo[0].heap_vmid};
|
int srcVM[1] = {gcinfo[0].heap_vmid};
|
||||||
int destVM[1] = {VMID_HLOS};
|
int destVM[1] = {VMID_HLOS};
|
||||||
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
|
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
|
||||||
|
|
||||||
if (map->flags == ADSP_MMAP_HEAP_ADDR) {
|
if (flags == ADSP_MMAP_HEAP_ADDR) {
|
||||||
struct fastrpc_ioctl_invoke_attrs ioctl;
|
struct fastrpc_ioctl_invoke_attrs ioctl;
|
||||||
|
|
||||||
struct scm_desc desc = {0};
|
struct scm_desc desc = {0};
|
||||||
remote_arg_t ra[1];
|
remote_arg_t ra[1];
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
@ -1997,14 +2094,14 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
|
desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
|
||||||
desc.args[1] = map->phys;
|
desc.args[1] = phys;
|
||||||
desc.args[2] = map->size;
|
desc.args[2] = size;
|
||||||
desc.args[3] = routargs.skey;
|
desc.args[3] = routargs.skey;
|
||||||
desc.arginfo = SCM_ARGS(4);
|
desc.arginfo = SCM_ARGS(4);
|
||||||
err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
|
err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
|
||||||
TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
|
TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
|
||||||
} else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||||
VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
|
VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
|
||||||
srcVM, 1, destVM, destVMperm, 1));
|
srcVM, 1, destVM, destVMperm, 1));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
|
@ -2014,8 +2111,8 @@ bail:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
|
static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
|
||||||
struct fastrpc_mmap *map)
|
uint64_t phys, size_t size, uint32_t flags)
|
||||||
{
|
{
|
||||||
struct fastrpc_ioctl_invoke_attrs ioctl;
|
struct fastrpc_ioctl_invoke_attrs ioctl;
|
||||||
remote_arg_t ra[1];
|
remote_arg_t ra[1];
|
||||||
|
@ -2027,8 +2124,8 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
|
||||||
} inargs;
|
} inargs;
|
||||||
|
|
||||||
inargs.pid = current->tgid;
|
inargs.pid = current->tgid;
|
||||||
inargs.size = map->size;
|
inargs.size = size;
|
||||||
inargs.vaddrout = map->raddr;
|
inargs.vaddrout = raddr;
|
||||||
ra[0].buf.pv = (void *)&inargs;
|
ra[0].buf.pv = (void *)&inargs;
|
||||||
ra[0].buf.len = sizeof(inargs);
|
ra[0].buf.len = sizeof(inargs);
|
||||||
|
|
||||||
|
@ -2044,9 +2141,9 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
|
||||||
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
|
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
|
if (flags == ADSP_MMAP_HEAP_ADDR ||
|
||||||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||||
VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
|
VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
@ -2073,7 +2170,8 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
|
||||||
spin_unlock(&me->hlock);
|
spin_unlock(&me->hlock);
|
||||||
|
|
||||||
if (match) {
|
if (match) {
|
||||||
VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
|
VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
|
||||||
|
match->size, match->flags));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
if (me->channel[0].ramdumpenabled) {
|
if (me->channel[0].ramdumpenabled) {
|
||||||
|
@ -2110,12 +2208,37 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
struct fastrpc_mmap *map = NULL;
|
struct fastrpc_mmap *map = NULL;
|
||||||
|
struct fastrpc_buf *rbuf = NULL, *free = NULL;
|
||||||
|
struct hlist_node *n;
|
||||||
|
|
||||||
mutex_lock(&fl->map_mutex);
|
mutex_lock(&fl->map_mutex);
|
||||||
|
spin_lock(&fl->hlock);
|
||||||
|
hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
|
||||||
|
if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
|
||||||
|
if ((rbuf->raddr == ud->vaddrout) &&
|
||||||
|
(rbuf->size == ud->size)) {
|
||||||
|
free = rbuf;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock(&fl->hlock);
|
||||||
|
|
||||||
|
if (free) {
|
||||||
|
VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
|
||||||
|
free->phys, free->size, free->flags));
|
||||||
|
if (err)
|
||||||
|
goto bail;
|
||||||
|
fastrpc_buf_free(rbuf, 0);
|
||||||
|
mutex_unlock(&fl->map_mutex);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
|
VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
|
VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
|
||||||
|
map->phys, map->size, map->flags));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
fastrpc_mmap_free(map);
|
fastrpc_mmap_free(map);
|
||||||
|
@ -2131,22 +2254,55 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
|
||||||
{
|
{
|
||||||
|
|
||||||
struct fastrpc_mmap *map = NULL;
|
struct fastrpc_mmap *map = NULL;
|
||||||
|
struct fastrpc_buf *rbuf = NULL;
|
||||||
|
uintptr_t raddr = 0;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
mutex_lock(&fl->map_mutex);
|
mutex_lock(&fl->map_mutex);
|
||||||
if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
|
|
||||||
ud->flags, &map)){
|
if (ud->flags == ADSP_MMAP_ADD_PAGES) {
|
||||||
mutex_unlock(&fl->map_mutex);
|
DEFINE_DMA_ATTRS(dma_attr);
|
||||||
return 0;
|
|
||||||
|
if (ud->vaddrin) {
|
||||||
|
err = -EINVAL;
|
||||||
|
pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
|
||||||
|
current->comm, __func__);
|
||||||
|
goto bail;
|
||||||
}
|
}
|
||||||
|
dma_set_attr(DMA_ATTR_EXEC_MAPPING, &dma_attr);
|
||||||
|
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &dma_attr);
|
||||||
|
dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT, &dma_attr);
|
||||||
|
|
||||||
|
err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
|
||||||
|
1, &rbuf);
|
||||||
|
if (err)
|
||||||
|
goto bail;
|
||||||
|
err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
|
||||||
|
rbuf->phys, rbuf->size, &raddr);
|
||||||
|
if (err)
|
||||||
|
goto bail;
|
||||||
|
rbuf->raddr = raddr;
|
||||||
|
} else {
|
||||||
|
uintptr_t va_to_dsp;
|
||||||
|
|
||||||
VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
|
VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
|
||||||
(uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
|
(uintptr_t)ud->vaddrin, ud->size,
|
||||||
|
ud->flags, &map));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
|
|
||||||
|
if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
|
||||||
|
ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
|
||||||
|
va_to_dsp = 0;
|
||||||
|
else
|
||||||
|
va_to_dsp = (uintptr_t)map->va;
|
||||||
|
VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
|
||||||
|
map->phys, map->size, &raddr));
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
ud->vaddrout = map->raddr;
|
map->raddr = raddr;
|
||||||
|
}
|
||||||
|
ud->vaddrout = raddr;
|
||||||
bail:
|
bail:
|
||||||
if (err && map)
|
if (err && map)
|
||||||
fastrpc_mmap_free(map);
|
fastrpc_mmap_free(map);
|
||||||
|
@ -2321,15 +2477,16 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
|
||||||
spin_unlock(&fl->apps->hlock);
|
spin_unlock(&fl->apps->hlock);
|
||||||
kfree(fl->debug_buf);
|
kfree(fl->debug_buf);
|
||||||
|
|
||||||
if (!fl->sctx) {
|
if (!fl->sctx)
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock(&fl->hlock);
|
spin_lock(&fl->hlock);
|
||||||
fl->file_close = 1;
|
fl->file_close = 1;
|
||||||
spin_unlock(&fl->hlock);
|
spin_unlock(&fl->hlock);
|
||||||
|
if (!IS_ERR_OR_NULL(fl->init_mem))
|
||||||
|
fastrpc_buf_free(fl->init_mem, 0);
|
||||||
fastrpc_context_list_dtor(fl);
|
fastrpc_context_list_dtor(fl);
|
||||||
fastrpc_buf_list_free(fl);
|
fastrpc_cached_buf_list_free(fl);
|
||||||
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
|
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
|
||||||
fastrpc_mmap_free(map);
|
fastrpc_mmap_free(map);
|
||||||
}
|
}
|
||||||
|
@ -2341,6 +2498,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
|
||||||
if (fl->secsctx)
|
if (fl->secsctx)
|
||||||
fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
|
fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
|
||||||
bail:
|
bail:
|
||||||
|
fastrpc_remote_buf_list_free(fl);
|
||||||
mutex_destroy(&fl->map_mutex);
|
mutex_destroy(&fl->map_mutex);
|
||||||
kfree(fl);
|
kfree(fl);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2480,7 +2638,6 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
|
||||||
struct fastrpc_apps *me = &gfa;
|
struct fastrpc_apps *me = &gfa;
|
||||||
struct fastrpc_file *fl = filp->private_data;
|
struct fastrpc_file *fl = filp->private_data;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
struct fastrpc_buf *buf = NULL;
|
|
||||||
struct fastrpc_mmap *map = NULL;
|
struct fastrpc_mmap *map = NULL;
|
||||||
struct fastrpc_mmap *gmaps = NULL;
|
struct fastrpc_mmap *gmaps = NULL;
|
||||||
struct smq_invoke_ctx *ictx = NULL;
|
struct smq_invoke_ctx *ictx = NULL;
|
||||||
|
@ -2640,22 +2797,6 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
|
||||||
"%-20d|0x%-20lX\n\n",
|
"%-20d|0x%-20lX\n\n",
|
||||||
map->secure, map->attr);
|
map->secure, map->attr);
|
||||||
}
|
}
|
||||||
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
|
|
||||||
"\n======%s %s %s======\n", title,
|
|
||||||
" LIST OF BUFS ", title);
|
|
||||||
spin_lock(&fl->hlock);
|
|
||||||
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
|
|
||||||
"%-19s|%-19s|%-19s\n",
|
|
||||||
"virt", "phys", "size");
|
|
||||||
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
|
|
||||||
"%s%s%s%s%s\n", single_line, single_line,
|
|
||||||
single_line, single_line, single_line);
|
|
||||||
hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
|
|
||||||
len += scnprintf(fileinfo + len,
|
|
||||||
DEBUGFS_SIZE - len,
|
|
||||||
"0x%-17p|0x%-17llX|%-19zu\n",
|
|
||||||
buf->virt, (uint64_t)buf->phys, buf->size);
|
|
||||||
}
|
|
||||||
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
|
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
|
||||||
"\n%s %s %s\n", title,
|
"\n%s %s %s\n", title,
|
||||||
" LIST OF PENDING SMQCONTEXTS ", title);
|
" LIST OF PENDING SMQCONTEXTS ", title);
|
||||||
|
@ -2800,12 +2941,15 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
|
||||||
context_list_ctor(&fl->clst);
|
context_list_ctor(&fl->clst);
|
||||||
spin_lock_init(&fl->hlock);
|
spin_lock_init(&fl->hlock);
|
||||||
INIT_HLIST_HEAD(&fl->maps);
|
INIT_HLIST_HEAD(&fl->maps);
|
||||||
INIT_HLIST_HEAD(&fl->bufs);
|
INIT_HLIST_HEAD(&fl->cached_bufs);
|
||||||
|
INIT_HLIST_HEAD(&fl->remote_bufs);
|
||||||
INIT_HLIST_NODE(&fl->hn);
|
INIT_HLIST_NODE(&fl->hn);
|
||||||
fl->tgid = current->tgid;
|
fl->tgid = current->tgid;
|
||||||
fl->apps = me;
|
fl->apps = me;
|
||||||
fl->mode = FASTRPC_MODE_SERIAL;
|
fl->mode = FASTRPC_MODE_SERIAL;
|
||||||
fl->cid = -1;
|
fl->cid = -1;
|
||||||
|
fl->init_mem = NULL;
|
||||||
|
|
||||||
if (debugfs_file != NULL)
|
if (debugfs_file != NULL)
|
||||||
fl->debugfs_file = debugfs_file;
|
fl->debugfs_file = debugfs_file;
|
||||||
memset(&fl->perf, 0, sizeof(fl->perf));
|
memset(&fl->perf, 0, sizeof(fl->perf));
|
||||||
|
@ -2843,6 +2987,30 @@ bail:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int fastrpc_internal_control(struct fastrpc_file *fl,
|
||||||
|
struct fastrpc_ioctl_control *cp)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
|
||||||
|
if (err)
|
||||||
|
goto bail;
|
||||||
|
VERIFY(err, !IS_ERR_OR_NULL(cp));
|
||||||
|
if (err)
|
||||||
|
goto bail;
|
||||||
|
|
||||||
|
switch (cp->req) {
|
||||||
|
case FASTRPC_CONTROL_KALLOC:
|
||||||
|
cp->kalloc.kalloc_support = 1;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
err = -ENOTTY;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
bail:
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
|
static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
|
||||||
unsigned long ioctl_param)
|
unsigned long ioctl_param)
|
||||||
{
|
{
|
||||||
|
@ -2852,6 +3020,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
|
||||||
struct fastrpc_ioctl_munmap munmap;
|
struct fastrpc_ioctl_munmap munmap;
|
||||||
struct fastrpc_ioctl_init_attrs init;
|
struct fastrpc_ioctl_init_attrs init;
|
||||||
struct fastrpc_ioctl_perf perf;
|
struct fastrpc_ioctl_perf perf;
|
||||||
|
struct fastrpc_ioctl_control cp;
|
||||||
} p;
|
} p;
|
||||||
void *param = (char *)ioctl_param;
|
void *param = (char *)ioctl_param;
|
||||||
struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
|
struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
|
||||||
|
@ -2967,6 +3136,20 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
|
||||||
if (err)
|
if (err)
|
||||||
goto bail;
|
goto bail;
|
||||||
break;
|
break;
|
||||||
|
case FASTRPC_IOCTL_CONTROL:
|
||||||
|
K_COPY_FROM_USER(err, 0, &p.cp, param,
|
||||||
|
sizeof(p.cp));
|
||||||
|
if (err)
|
||||||
|
goto bail;
|
||||||
|
VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
|
||||||
|
if (err)
|
||||||
|
goto bail;
|
||||||
|
if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
|
||||||
|
K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
|
||||||
|
if (err)
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
break;
|
||||||
case FASTRPC_IOCTL_GETINFO:
|
case FASTRPC_IOCTL_GETINFO:
|
||||||
K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
|
K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -3283,6 +3466,7 @@ static void fastrpc_deinit(void)
|
||||||
}
|
}
|
||||||
for (j = 0; j < NUM_SESSIONS; j++) {
|
for (j = 0; j < NUM_SESSIONS; j++) {
|
||||||
struct fastrpc_session_ctx *sess = &chan->session[j];
|
struct fastrpc_session_ctx *sess = &chan->session[j];
|
||||||
|
|
||||||
if (sess->smmu.dev) {
|
if (sess->smmu.dev) {
|
||||||
arm_iommu_detach_device(sess->smmu.dev);
|
arm_iommu_detach_device(sess->smmu.dev);
|
||||||
sess->smmu.dev = NULL;
|
sess->smmu.dev = NULL;
|
||||||
|
|
|
@ -36,6 +36,8 @@
|
||||||
_IOWR('R', 9, struct compat_fastrpc_ioctl_perf)
|
_IOWR('R', 9, struct compat_fastrpc_ioctl_perf)
|
||||||
#define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \
|
#define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \
|
||||||
_IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs)
|
_IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs)
|
||||||
|
#define COMPAT_FASTRPC_IOCTL_CONTROL \
|
||||||
|
_IOWR('R', 12, struct compat_fastrpc_ioctl_control)
|
||||||
#define COMPAT_FASTRPC_IOCTL_MMAP_64 \
|
#define COMPAT_FASTRPC_IOCTL_MMAP_64 \
|
||||||
_IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64)
|
_IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64)
|
||||||
#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \
|
#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \
|
||||||
|
@ -117,6 +119,18 @@ struct compat_fastrpc_ioctl_perf { /* kernel performance data */
|
||||||
compat_uptr_t keys;
|
compat_uptr_t keys;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define FASTRPC_CONTROL_KALLOC (3)
|
||||||
|
struct compat_fastrpc_ctrl_kalloc {
|
||||||
|
compat_uint_t kalloc_support; /* Remote memory allocation from kernel */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct compat_fastrpc_ioctl_control {
|
||||||
|
compat_uint_t req;
|
||||||
|
union {
|
||||||
|
struct compat_fastrpc_ctrl_kalloc kalloc;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
static int compat_get_fastrpc_ioctl_invoke(
|
static int compat_get_fastrpc_ioctl_invoke(
|
||||||
struct compat_fastrpc_ioctl_invoke_attrs __user *inv32,
|
struct compat_fastrpc_ioctl_invoke_attrs __user *inv32,
|
||||||
struct fastrpc_ioctl_invoke_attrs __user **inva,
|
struct fastrpc_ioctl_invoke_attrs __user **inva,
|
||||||
|
@ -322,6 +336,19 @@ static int compat_get_fastrpc_ioctl_perf(
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int compat_get_fastrpc_ioctl_control(
|
||||||
|
struct compat_fastrpc_ioctl_control __user *ctrl32,
|
||||||
|
struct fastrpc_ioctl_control __user *ctrl)
|
||||||
|
{
|
||||||
|
compat_uptr_t p;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = get_user(p, &ctrl32->req);
|
||||||
|
err |= put_user(p, &ctrl->req);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static int compat_get_fastrpc_ioctl_init(
|
static int compat_get_fastrpc_ioctl_init(
|
||||||
struct compat_fastrpc_ioctl_init_attrs __user *init32,
|
struct compat_fastrpc_ioctl_init_attrs __user *init32,
|
||||||
struct fastrpc_ioctl_init_attrs __user *init,
|
struct fastrpc_ioctl_init_attrs __user *init,
|
||||||
|
@ -513,6 +540,34 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
|
||||||
case FASTRPC_IOCTL_SETMODE:
|
case FASTRPC_IOCTL_SETMODE:
|
||||||
return filp->f_op->unlocked_ioctl(filp, cmd,
|
return filp->f_op->unlocked_ioctl(filp, cmd,
|
||||||
(unsigned long)compat_ptr(arg));
|
(unsigned long)compat_ptr(arg));
|
||||||
|
case COMPAT_FASTRPC_IOCTL_CONTROL:
|
||||||
|
{
|
||||||
|
struct compat_fastrpc_ioctl_control __user *ctrl32;
|
||||||
|
struct fastrpc_ioctl_control __user *ctrl;
|
||||||
|
compat_uptr_t p;
|
||||||
|
|
||||||
|
ctrl32 = compat_ptr(arg);
|
||||||
|
VERIFY(err, NULL != (ctrl = compat_alloc_user_space(
|
||||||
|
sizeof(*ctrl))));
|
||||||
|
if (err)
|
||||||
|
return -EFAULT;
|
||||||
|
VERIFY(err, 0 == compat_get_fastrpc_ioctl_control(ctrl32,
|
||||||
|
ctrl));
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
err = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_CONTROL,
|
||||||
|
(unsigned long)ctrl);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
err = get_user(p, &ctrl32->req);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
if (p == FASTRPC_CONTROL_KALLOC) {
|
||||||
|
err = get_user(p, &ctrl->kalloc.kalloc_support);
|
||||||
|
err |= put_user(p, &ctrl32->kalloc.kalloc_support);
|
||||||
|
}
|
||||||
|
return err;
|
||||||
|
}
|
||||||
case COMPAT_FASTRPC_IOCTL_GETPERF:
|
case COMPAT_FASTRPC_IOCTL_GETPERF:
|
||||||
{
|
{
|
||||||
struct compat_fastrpc_ioctl_perf __user *perf32;
|
struct compat_fastrpc_ioctl_perf __user *perf32;
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t)
|
#define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t)
|
||||||
#define FASTRPC_IOCTL_GETPERF _IOWR('R', 9, struct fastrpc_ioctl_perf)
|
#define FASTRPC_IOCTL_GETPERF _IOWR('R', 9, struct fastrpc_ioctl_perf)
|
||||||
#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
|
#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
|
||||||
|
#define FASTRPC_IOCTL_CONTROL _IOWR('R', 12, struct fastrpc_ioctl_control)
|
||||||
|
|
||||||
#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
|
#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
|
||||||
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
|
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
|
||||||
|
@ -201,6 +202,31 @@ struct fastrpc_ioctl_perf { /* kernel performance data */
|
||||||
uintptr_t keys;
|
uintptr_t keys;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define FASTRPC_CONTROL_LATENCY (1)
|
||||||
|
struct fastrpc_ctrl_latency {
|
||||||
|
uint32_t enable; /* latency control enable */
|
||||||
|
uint32_t level; /* level of control */
|
||||||
|
};
|
||||||
|
|
||||||
|
#define FASTRPC_CONTROL_SMMU (2)
|
||||||
|
struct fastrpc_ctrl_smmu {
|
||||||
|
uint32_t sharedcb;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define FASTRPC_CONTROL_KALLOC (3)
|
||||||
|
struct fastrpc_ctrl_kalloc {
|
||||||
|
uint32_t kalloc_support; /* Remote memory allocation from kernel */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct fastrpc_ioctl_control {
|
||||||
|
uint32_t req;
|
||||||
|
union {
|
||||||
|
struct fastrpc_ctrl_latency lp;
|
||||||
|
struct fastrpc_ctrl_smmu smmu;
|
||||||
|
struct fastrpc_ctrl_kalloc kalloc;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
struct smq_null_invoke {
|
struct smq_null_invoke {
|
||||||
uint64_t ctx; /* invoke caller context */
|
uint64_t ctx; /* invoke caller context */
|
||||||
uint32_t handle; /* handle to invoke */
|
uint32_t handle; /* handle to invoke */
|
||||||
|
@ -245,6 +271,7 @@ static inline struct smq_phy_page *smq_phy_page_start(uint32_t sc,
|
||||||
struct smq_invoke_buf *buf)
|
struct smq_invoke_buf *buf)
|
||||||
{
|
{
|
||||||
uint64_t nTotal = REMOTE_SCALARS_INBUFS(sc)+REMOTE_SCALARS_OUTBUFS(sc);
|
uint64_t nTotal = REMOTE_SCALARS_INBUFS(sc)+REMOTE_SCALARS_OUTBUFS(sc);
|
||||||
|
|
||||||
return (struct smq_phy_page *)(&buf[nTotal]);
|
return (struct smq_phy_page *)(&buf[nTotal]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue