Merge tag 'LA.UM.7.4.r1-04700-8x98.0' into auto
"LA.UM.7.4.r1-04700-8x98.0" Change-Id: I777ab5e199fb463581fbfaf4750c8358d6f9e56f
This commit is contained in:
commit
6861312cbe
39 changed files with 1329 additions and 654 deletions
|
@ -300,7 +300,9 @@ dtb-$(CONFIG_ARCH_SDM630) += sdm630-rumi.dtb \
|
|||
sdm630-pm660a-headset-jacktype-no-rcm.dtb \
|
||||
sdm455-mtp.dtb \
|
||||
sdm455-qrd.dtb \
|
||||
sdm455-cdp.dtb
|
||||
sdm455-cdp.dtb \
|
||||
sdm455-rcm.dtb \
|
||||
sdm455-internal-codec-rcm.dtb
|
||||
|
||||
ifeq ($(CONFIG_ARM64),y)
|
||||
always := $(dtb-y)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2015-2017,2019 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -81,6 +81,7 @@
|
|||
|
||||
qcom,gpu-qdss-stm = <0x081c0000 0x40000>; // base addr, size
|
||||
|
||||
qcom,tsens-name = "tsens_tz_sensor14";
|
||||
/* Trace bus */
|
||||
coresight-id = <300>;
|
||||
coresight-name = "coresight-gfx";
|
||||
|
|
24
arch/arm/boot/dts/qcom/sdm455-internal-codec-rcm.dts
Normal file
24
arch/arm/boot/dts/qcom/sdm455-internal-codec-rcm.dts
Normal file
|
@ -0,0 +1,24 @@
|
|||
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
#include "sdm455.dtsi"
|
||||
#include "sdm455-rcm.dtsi"
|
||||
#include "sdm660-internal-codec.dtsi"
|
||||
|
||||
/ {
|
||||
model = "Qualcomm Technologies, Inc. SDM455 Int. Audio Codec RCM";
|
||||
compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp";
|
||||
qcom,board-id = <21 1>;
|
||||
};
|
34
arch/arm/boot/dts/qcom/sdm455-rcm.dts
Normal file
34
arch/arm/boot/dts/qcom/sdm455-rcm.dts
Normal file
|
@ -0,0 +1,34 @@
|
|||
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
#include "sdm455.dtsi"
|
||||
#include "sdm455-rcm.dtsi"
|
||||
#include "sdm660-external-codec.dtsi"
|
||||
|
||||
/ {
|
||||
model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660L RCM";
|
||||
compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp";
|
||||
qcom,board-id = <21 0>;
|
||||
};
|
||||
|
||||
&tavil_snd {
|
||||
qcom,msm-mbhc-hphl-swh = <0>;
|
||||
qcom,msm-mbhc-gnd-swh = <0>;
|
||||
};
|
||||
|
||||
&tasha_snd {
|
||||
qcom,msm-mbhc-hphl-swh = <0>;
|
||||
qcom,msm-mbhc-gnd-swh = <0>;
|
||||
};
|
13
arch/arm/boot/dts/qcom/sdm455-rcm.dtsi
Normal file
13
arch/arm/boot/dts/qcom/sdm455-rcm.dtsi
Normal file
|
@ -0,0 +1,13 @@
|
|||
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include "sdm630-cdp.dtsi"
|
|
@ -55,6 +55,8 @@
|
|||
#define TZ_PIL_AUTH_QDSP6_PROC 1
|
||||
#define ADSP_MMAP_HEAP_ADDR 4
|
||||
#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
|
||||
#define ADSP_MMAP_ADD_PAGES 0x1000
|
||||
|
||||
#define FASTRPC_ENOSUCH 39
|
||||
#define VMID_SSC_Q6 38
|
||||
#define VMID_ADSP_Q6 6
|
||||
|
@ -126,6 +128,7 @@ static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
|
|||
static inline uint64_t buf_page_size(uint32_t size)
|
||||
{
|
||||
uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
|
||||
return sz > PAGE_SIZE ? sz : PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
@ -145,10 +148,15 @@ struct fastrpc_file;
|
|||
|
||||
struct fastrpc_buf {
|
||||
struct hlist_node hn;
|
||||
struct hlist_node hn_rem;
|
||||
struct fastrpc_file *fl;
|
||||
void *virt;
|
||||
uint64_t phys;
|
||||
size_t size;
|
||||
struct dma_attrs attrs;
|
||||
uintptr_t raddr;
|
||||
uint32_t flags;
|
||||
int remote;
|
||||
};
|
||||
|
||||
struct fastrpc_ctx_lst;
|
||||
|
@ -292,9 +300,11 @@ struct fastrpc_file {
|
|||
struct hlist_node hn;
|
||||
spinlock_t hlock;
|
||||
struct hlist_head maps;
|
||||
struct hlist_head bufs;
|
||||
struct hlist_head cached_bufs;
|
||||
struct hlist_head remote_bufs;
|
||||
struct fastrpc_ctx_lst clst;
|
||||
struct fastrpc_session_ctx *sctx;
|
||||
struct fastrpc_buf *init_mem;
|
||||
struct fastrpc_session_ctx *secsctx;
|
||||
uint32_t mode;
|
||||
uint32_t profile;
|
||||
|
@ -363,10 +373,17 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
|
|||
return;
|
||||
if (cache) {
|
||||
spin_lock(&fl->hlock);
|
||||
hlist_add_head(&buf->hn, &fl->bufs);
|
||||
hlist_add_head(&buf->hn, &fl->cached_bufs);
|
||||
spin_unlock(&fl->hlock);
|
||||
return;
|
||||
}
|
||||
if (buf->remote) {
|
||||
spin_lock(&fl->hlock);
|
||||
hlist_del_init(&buf->hn_rem);
|
||||
spin_unlock(&fl->hlock);
|
||||
buf->remote = 0;
|
||||
buf->raddr = 0;
|
||||
}
|
||||
if (!IS_ERR_OR_NULL(buf->virt)) {
|
||||
int destVM[1] = {VMID_HLOS};
|
||||
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
|
||||
|
@ -380,21 +397,22 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
|
|||
hyp_assign_phys(buf->phys, buf_page_size(buf->size),
|
||||
srcVM, 2, destVM, destVMperm, 1);
|
||||
}
|
||||
dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
|
||||
buf->phys);
|
||||
dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
|
||||
buf->phys, (struct dma_attrs *)&buf->attrs);
|
||||
}
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void fastrpc_buf_list_free(struct fastrpc_file *fl)
|
||||
static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
|
||||
{
|
||||
struct fastrpc_buf *buf, *free;
|
||||
|
||||
do {
|
||||
struct hlist_node *n;
|
||||
|
||||
free = NULL;
|
||||
spin_lock(&fl->hlock);
|
||||
hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
|
||||
hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
|
||||
hlist_del_init(&buf->hn);
|
||||
free = buf;
|
||||
break;
|
||||
|
@ -405,6 +423,25 @@ static void fastrpc_buf_list_free(struct fastrpc_file *fl)
|
|||
} while (free);
|
||||
}
|
||||
|
||||
static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
|
||||
{
|
||||
struct fastrpc_buf *buf, *free;
|
||||
|
||||
do {
|
||||
struct hlist_node *n;
|
||||
|
||||
free = NULL;
|
||||
spin_lock(&fl->hlock);
|
||||
hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
|
||||
free = buf;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&fl->hlock);
|
||||
if (free)
|
||||
fastrpc_buf_free(free, 0);
|
||||
} while (free);
|
||||
}
|
||||
|
||||
static void fastrpc_mmap_add(struct fastrpc_mmap *map)
|
||||
{
|
||||
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
|
||||
|
@ -465,23 +502,21 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
|
|||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static int dma_alloc_memory(phys_addr_t *region_start, size_t size)
|
||||
static int dma_alloc_memory(dma_addr_t *region_start, void **vaddr, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct fastrpc_apps *me = &gfa;
|
||||
void *vaddr = NULL;
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
|
||||
if (me->dev == NULL) {
|
||||
pr_err("device adsprpc-mem is not initialized\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
|
||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
|
||||
vaddr = dma_alloc_attrs(me->dev, size, region_start, GFP_KERNEL,
|
||||
&attrs);
|
||||
if (!vaddr) {
|
||||
pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
|
||||
(unsigned int)size);
|
||||
|
||||
*vaddr = dma_alloc_attrs(me->dev, size, region_start,
|
||||
GFP_KERNEL, attrs);
|
||||
if (IS_ERR_OR_NULL(*vaddr)) {
|
||||
pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
|
||||
current->comm, __func__, size, (*vaddr));
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
@ -617,7 +652,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
|
|||
struct fastrpc_channel_ctx *chan = &apps->channel[cid];
|
||||
struct fastrpc_mmap *map = NULL;
|
||||
struct dma_attrs attrs;
|
||||
phys_addr_t region_start = 0;
|
||||
dma_addr_t region_start = 0;
|
||||
void *region_vaddr = NULL;
|
||||
unsigned long flags;
|
||||
int err = 0, vmid;
|
||||
|
||||
|
@ -635,14 +671,20 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
|
|||
map->attr = attr;
|
||||
if (mflags == ADSP_MMAP_HEAP_ADDR ||
|
||||
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||
DEFINE_DMA_ATTRS(rh_attrs);
|
||||
|
||||
dma_set_attr(DMA_ATTR_SKIP_ZEROING, &rh_attrs);
|
||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rh_attrs);
|
||||
|
||||
map->apps = me;
|
||||
map->fl = NULL;
|
||||
VERIFY(err, !dma_alloc_memory(®ion_start, len));
|
||||
VERIFY(err, !dma_alloc_memory(®ion_start,
|
||||
®ion_vaddr, len, &rh_attrs));
|
||||
if (err)
|
||||
goto bail;
|
||||
map->phys = (uintptr_t)region_start;
|
||||
map->size = len;
|
||||
map->va = (uintptr_t)map->phys;
|
||||
map->va = (uintptr_t)region_vaddr;
|
||||
} else {
|
||||
VERIFY(err, !IS_ERR_OR_NULL(map->handle =
|
||||
ion_import_dma_buf(fl->apps->client, fd)));
|
||||
|
@ -741,7 +783,8 @@ bail:
|
|||
}
|
||||
|
||||
static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
||||
struct fastrpc_buf **obuf)
|
||||
struct dma_attrs attr, uint32_t rflags,
|
||||
int remote, struct fastrpc_buf **obuf)
|
||||
{
|
||||
int err = 0, vmid;
|
||||
struct fastrpc_buf *buf = NULL, *fr = NULL;
|
||||
|
@ -751,18 +794,20 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
|||
if (err)
|
||||
goto bail;
|
||||
|
||||
/* find the smallest buffer that fits in the cache */
|
||||
spin_lock(&fl->hlock);
|
||||
hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
|
||||
if (buf->size >= size && (!fr || fr->size > buf->size))
|
||||
fr = buf;
|
||||
}
|
||||
if (fr)
|
||||
hlist_del_init(&fr->hn);
|
||||
spin_unlock(&fl->hlock);
|
||||
if (fr) {
|
||||
*obuf = fr;
|
||||
return 0;
|
||||
if (!remote) {
|
||||
/* find the smallest buffer that fits in the cache */
|
||||
spin_lock(&fl->hlock);
|
||||
hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
|
||||
if (buf->size >= size && (!fr || fr->size > buf->size))
|
||||
fr = buf;
|
||||
}
|
||||
if (fr)
|
||||
hlist_del_init(&fr->hn);
|
||||
spin_unlock(&fl->hlock);
|
||||
if (fr) {
|
||||
*obuf = fr;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
buf = NULL;
|
||||
VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
|
||||
|
@ -773,17 +818,29 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
|||
buf->virt = NULL;
|
||||
buf->phys = 0;
|
||||
buf->size = size;
|
||||
buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
|
||||
(void *)&buf->phys, GFP_KERNEL);
|
||||
memcpy(&buf->attrs, &attr, sizeof(struct dma_attrs));
|
||||
buf->flags = rflags;
|
||||
buf->raddr = 0;
|
||||
buf->remote = 0;
|
||||
buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
|
||||
(dma_addr_t *)&buf->phys,
|
||||
GFP_KERNEL,
|
||||
(struct dma_attrs *)&buf->attrs);
|
||||
if (IS_ERR_OR_NULL(buf->virt)) {
|
||||
/* free cache and retry */
|
||||
fastrpc_buf_list_free(fl);
|
||||
buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
|
||||
(void *)&buf->phys, GFP_KERNEL);
|
||||
fastrpc_cached_buf_list_free(fl);
|
||||
buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
|
||||
(dma_addr_t *)&buf->phys,
|
||||
GFP_KERNEL,
|
||||
(struct dma_attrs *)&buf->attrs);
|
||||
VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
|
||||
}
|
||||
if (err)
|
||||
if (err) {
|
||||
err = -ENOMEM;
|
||||
pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
|
||||
current->comm, __func__, size);
|
||||
goto bail;
|
||||
}
|
||||
if (fl->sctx->smmu.cb)
|
||||
buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
|
||||
vmid = fl->apps->channel[fl->cid].vmid;
|
||||
|
@ -799,6 +856,13 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
if (remote) {
|
||||
INIT_HLIST_NODE(&buf->hn_rem);
|
||||
spin_lock(&fl->hlock);
|
||||
hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
|
||||
spin_unlock(&fl->hlock);
|
||||
buf->remote = remote;
|
||||
}
|
||||
*obuf = buf;
|
||||
bail:
|
||||
if (err && buf)
|
||||
|
@ -806,7 +870,6 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
|
|||
return err;
|
||||
}
|
||||
|
||||
|
||||
static int context_restore_interrupted(struct fastrpc_file *fl,
|
||||
struct fastrpc_ioctl_invoke_attrs *inv,
|
||||
struct smq_invoke_ctx **po)
|
||||
|
@ -815,6 +878,7 @@ static int context_restore_interrupted(struct fastrpc_file *fl,
|
|||
struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
|
||||
struct hlist_node *n;
|
||||
struct fastrpc_ioctl_invoke *invoke = &inv->inv;
|
||||
|
||||
spin_lock(&fl->hlock);
|
||||
hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
|
||||
if (ictx->pid == current->pid) {
|
||||
|
@ -854,6 +918,7 @@ static int context_build_overlap(struct smq_invoke_ctx *ctx)
|
|||
int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
|
||||
int nbufs = inbufs + outbufs;
|
||||
struct overlap max;
|
||||
|
||||
for (i = 0; i < nbufs; ++i) {
|
||||
ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
|
||||
ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
|
||||
|
@ -1003,12 +1068,13 @@ bail:
|
|||
static void context_save_interrupted(struct smq_invoke_ctx *ctx)
|
||||
{
|
||||
struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
|
||||
|
||||
spin_lock(&ctx->fl->hlock);
|
||||
hlist_del_init(&ctx->hn);
|
||||
hlist_add_head(&ctx->hn, &clst->interrupted);
|
||||
spin_unlock(&ctx->fl->hlock);
|
||||
/* free the cache on power collapse */
|
||||
fastrpc_buf_list_free(ctx->fl);
|
||||
fastrpc_cached_buf_list_free(ctx->fl);
|
||||
}
|
||||
|
||||
static void context_free(struct smq_invoke_ctx *ctx)
|
||||
|
@ -1044,11 +1110,11 @@ static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
|
|||
complete(&ctx->work);
|
||||
}
|
||||
|
||||
|
||||
static void fastrpc_notify_users(struct fastrpc_file *me)
|
||||
{
|
||||
struct smq_invoke_ctx *ictx;
|
||||
struct hlist_node *n;
|
||||
|
||||
spin_lock(&me->hlock);
|
||||
hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
|
||||
complete(&ictx->work);
|
||||
|
@ -1064,6 +1130,7 @@ static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
|
|||
{
|
||||
struct fastrpc_file *fl;
|
||||
struct hlist_node *n;
|
||||
|
||||
spin_lock(&me->hlock);
|
||||
hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
|
||||
if (fl->cid == cid)
|
||||
|
@ -1072,6 +1139,7 @@ static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
|
|||
spin_unlock(&me->hlock);
|
||||
|
||||
}
|
||||
|
||||
static void context_list_ctor(struct fastrpc_ctx_lst *me)
|
||||
{
|
||||
INIT_HLIST_HEAD(&me->interrupted);
|
||||
|
@ -1083,6 +1151,7 @@ static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
|
|||
struct fastrpc_ctx_lst *clst = &fl->clst;
|
||||
struct smq_invoke_ctx *ictx = NULL, *ctxfree;
|
||||
struct hlist_node *n;
|
||||
|
||||
do {
|
||||
ctxfree = NULL;
|
||||
spin_lock(&fl->hlock);
|
||||
|
@ -1110,10 +1179,12 @@ static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
|
|||
}
|
||||
|
||||
static int fastrpc_file_free(struct fastrpc_file *fl);
|
||||
|
||||
static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
|
||||
{
|
||||
struct fastrpc_file *fl, *free;
|
||||
struct hlist_node *n;
|
||||
|
||||
do {
|
||||
free = NULL;
|
||||
spin_lock(&me->hlock);
|
||||
|
@ -1187,7 +1258,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
|
|||
|
||||
/* allocate new buffer */
|
||||
if (copylen) {
|
||||
VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
|
||||
DEFINE_DMA_ATTRS(ctx_attrs);
|
||||
|
||||
err = fastrpc_buf_alloc(ctx->fl, copylen, ctx_attrs,
|
||||
0, 0, &ctx->buf);
|
||||
if (err)
|
||||
goto bail;
|
||||
}
|
||||
|
@ -1206,6 +1280,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
|
|||
args = (uintptr_t)ctx->buf->virt + metalen;
|
||||
for (i = 0; i < bufs; ++i) {
|
||||
size_t len = lpra[i].buf.len;
|
||||
|
||||
list[i].num = 0;
|
||||
list[i].pgidx = 0;
|
||||
if (!len)
|
||||
|
@ -1220,6 +1295,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
|
|||
struct fastrpc_mmap *map = ctx->maps[i];
|
||||
uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
|
||||
size_t len = lpra[i].buf.len;
|
||||
|
||||
rpra[i].buf.pv = 0;
|
||||
rpra[i].buf.len = len;
|
||||
if (!len)
|
||||
|
@ -1561,6 +1637,7 @@ static void smd_event_handler(void *priv, unsigned event)
|
|||
static void fastrpc_init(struct fastrpc_apps *me)
|
||||
{
|
||||
int i;
|
||||
|
||||
INIT_HLIST_HEAD(&me->drivers);
|
||||
INIT_HLIST_HEAD(&me->maps);
|
||||
spin_lock_init(&me->hlock);
|
||||
|
@ -1684,6 +1761,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
|||
struct fastrpc_ioctl_init *init = &uproc->init;
|
||||
struct smq_phy_page pages[1];
|
||||
struct fastrpc_mmap *file = NULL, *mem = NULL;
|
||||
struct fastrpc_buf *imem = NULL;
|
||||
char *proc_name = NULL;
|
||||
int srcVM[1] = {VMID_HLOS};
|
||||
int destVM[1] = {gcinfo[0].heap_vmid};
|
||||
|
@ -1696,6 +1774,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
|||
if (init->flags == FASTRPC_INIT_ATTACH) {
|
||||
remote_arg_t ra[1];
|
||||
int tgid = current->tgid;
|
||||
|
||||
ra[0].buf.pv = (void *)&tgid;
|
||||
ra[0].buf.len = sizeof(tgid);
|
||||
ioctl.inv.handle = 1;
|
||||
|
@ -1712,6 +1791,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
|||
remote_arg_t ra[6];
|
||||
int fds[6];
|
||||
int mflags = 0;
|
||||
int memlen;
|
||||
DEFINE_DMA_ATTRS(imem_dma_attr);
|
||||
struct {
|
||||
int pgid;
|
||||
unsigned int namelen;
|
||||
|
@ -1734,14 +1815,27 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
|||
if (err)
|
||||
goto bail;
|
||||
}
|
||||
if (!access_ok(1, (void const __user *)init->mem,
|
||||
init->memlen))
|
||||
goto bail;
|
||||
inbuf.pageslen = 1;
|
||||
VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
|
||||
init->mem, init->memlen, mflags, &mem));
|
||||
|
||||
VERIFY(err, !init->mem);
|
||||
if (err) {
|
||||
err = -EINVAL;
|
||||
pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
|
||||
current->comm, __func__);
|
||||
goto bail;
|
||||
}
|
||||
memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
|
||||
1024*1024);
|
||||
|
||||
dma_set_attr(DMA_ATTR_EXEC_MAPPING, &imem_dma_attr);
|
||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem_dma_attr);
|
||||
dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT, &imem_dma_attr);
|
||||
|
||||
err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
|
||||
if (err)
|
||||
goto bail;
|
||||
fl->init_mem = imem;
|
||||
|
||||
inbuf.pageslen = 1;
|
||||
ra[0].buf.pv = (void *)&inbuf;
|
||||
ra[0].buf.len = sizeof(inbuf);
|
||||
|
@ -1755,8 +1849,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
|
|||
ra[2].buf.len = inbuf.filelen;
|
||||
fds[2] = init->filefd;
|
||||
|
||||
pages[0].addr = mem->phys;
|
||||
pages[0].size = mem->size;
|
||||
pages[0].addr = imem->phys;
|
||||
pages[0].size = imem->size;
|
||||
ra[3].buf.pv = (void *)pages;
|
||||
ra[3].buf.len = 1 * sizeof(*pages);
|
||||
fds[3] = 0;
|
||||
|
@ -1896,7 +1990,8 @@ bail:
|
|||
}
|
||||
|
||||
static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
|
||||
struct fastrpc_mmap *map)
|
||||
uintptr_t va, uint64_t phys,
|
||||
size_t size, uintptr_t *raddr)
|
||||
{
|
||||
struct fastrpc_ioctl_invoke_attrs ioctl;
|
||||
struct smq_phy_page page;
|
||||
|
@ -1913,14 +2008,15 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
|
|||
struct {
|
||||
uintptr_t vaddrout;
|
||||
} routargs;
|
||||
|
||||
inargs.pid = current->tgid;
|
||||
inargs.vaddrin = (uintptr_t)map->va;
|
||||
inargs.vaddrin = (uintptr_t)va;
|
||||
inargs.flags = flags;
|
||||
inargs.num = fl->apps->compat ? num * sizeof(page) : num;
|
||||
ra[0].buf.pv = (void *)&inargs;
|
||||
ra[0].buf.len = sizeof(inargs);
|
||||
page.addr = map->phys;
|
||||
page.size = map->size;
|
||||
page.addr = phys;
|
||||
page.size = size;
|
||||
ra[1].buf.pv = (void *)&page;
|
||||
ra[1].buf.len = num * sizeof(page);
|
||||
|
||||
|
@ -1937,15 +2033,15 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
|
|||
ioctl.attrs = NULL;
|
||||
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
|
||||
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
|
||||
map->raddr = (uintptr_t)routargs.vaddrout;
|
||||
*raddr = (uintptr_t)routargs.vaddrout;
|
||||
if (err)
|
||||
goto bail;
|
||||
if (flags == ADSP_MMAP_HEAP_ADDR) {
|
||||
struct scm_desc desc = {0};
|
||||
|
||||
desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
|
||||
desc.args[1] = map->phys;
|
||||
desc.args[2] = map->size;
|
||||
desc.args[1] = phys;
|
||||
desc.args[2] = size;
|
||||
desc.arginfo = SCM_ARGS(3);
|
||||
err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
|
||||
TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
|
||||
|
@ -1955,7 +2051,7 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
|
|||
int destVM[1] = {gcinfo[0].heap_vmid};
|
||||
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
|
||||
|
||||
VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
|
||||
VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
|
||||
srcVM, 1, destVM, destVMperm, 1));
|
||||
if (err)
|
||||
goto bail;
|
||||
|
@ -1964,16 +2060,17 @@ bail:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
|
||||
struct fastrpc_mmap *map)
|
||||
static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
|
||||
size_t size, uint32_t flags)
|
||||
{
|
||||
int err = 0;
|
||||
int srcVM[1] = {gcinfo[0].heap_vmid};
|
||||
int destVM[1] = {VMID_HLOS};
|
||||
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
|
||||
|
||||
if (map->flags == ADSP_MMAP_HEAP_ADDR) {
|
||||
if (flags == ADSP_MMAP_HEAP_ADDR) {
|
||||
struct fastrpc_ioctl_invoke_attrs ioctl;
|
||||
|
||||
struct scm_desc desc = {0};
|
||||
remote_arg_t ra[1];
|
||||
int err = 0;
|
||||
|
@ -1997,14 +2094,14 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
|
|||
if (err)
|
||||
goto bail;
|
||||
desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
|
||||
desc.args[1] = map->phys;
|
||||
desc.args[2] = map->size;
|
||||
desc.args[1] = phys;
|
||||
desc.args[2] = size;
|
||||
desc.args[3] = routargs.skey;
|
||||
desc.arginfo = SCM_ARGS(4);
|
||||
err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
|
||||
TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
|
||||
} else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||
VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
|
||||
} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||
VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
|
||||
srcVM, 1, destVM, destVMperm, 1));
|
||||
if (err)
|
||||
goto bail;
|
||||
|
@ -2014,8 +2111,8 @@ bail:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
|
||||
struct fastrpc_mmap *map)
|
||||
static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
|
||||
uint64_t phys, size_t size, uint32_t flags)
|
||||
{
|
||||
struct fastrpc_ioctl_invoke_attrs ioctl;
|
||||
remote_arg_t ra[1];
|
||||
|
@ -2027,8 +2124,8 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
|
|||
} inargs;
|
||||
|
||||
inargs.pid = current->tgid;
|
||||
inargs.size = map->size;
|
||||
inargs.vaddrout = map->raddr;
|
||||
inargs.size = size;
|
||||
inargs.vaddrout = raddr;
|
||||
ra[0].buf.pv = (void *)&inargs;
|
||||
ra[0].buf.len = sizeof(inargs);
|
||||
|
||||
|
@ -2044,9 +2141,9 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
|
|||
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
|
||||
if (err)
|
||||
goto bail;
|
||||
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
|
||||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||
VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
|
||||
if (flags == ADSP_MMAP_HEAP_ADDR ||
|
||||
flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||
VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
|
||||
if (err)
|
||||
goto bail;
|
||||
}
|
||||
|
@ -2073,7 +2170,8 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
|
|||
spin_unlock(&me->hlock);
|
||||
|
||||
if (match) {
|
||||
VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
|
||||
VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
|
||||
match->size, match->flags));
|
||||
if (err)
|
||||
goto bail;
|
||||
if (me->channel[0].ramdumpenabled) {
|
||||
|
@ -2110,12 +2208,37 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
|
|||
{
|
||||
int err = 0;
|
||||
struct fastrpc_mmap *map = NULL;
|
||||
struct fastrpc_buf *rbuf = NULL, *free = NULL;
|
||||
struct hlist_node *n;
|
||||
|
||||
mutex_lock(&fl->map_mutex);
|
||||
spin_lock(&fl->hlock);
|
||||
hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
|
||||
if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
|
||||
if ((rbuf->raddr == ud->vaddrout) &&
|
||||
(rbuf->size == ud->size)) {
|
||||
free = rbuf;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock(&fl->hlock);
|
||||
|
||||
if (free) {
|
||||
VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
|
||||
free->phys, free->size, free->flags));
|
||||
if (err)
|
||||
goto bail;
|
||||
fastrpc_buf_free(rbuf, 0);
|
||||
mutex_unlock(&fl->map_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
|
||||
if (err)
|
||||
goto bail;
|
||||
VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
|
||||
VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
|
||||
map->phys, map->size, map->flags));
|
||||
if (err)
|
||||
goto bail;
|
||||
fastrpc_mmap_free(map);
|
||||
|
@ -2131,22 +2254,55 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
|
|||
{
|
||||
|
||||
struct fastrpc_mmap *map = NULL;
|
||||
struct fastrpc_buf *rbuf = NULL;
|
||||
uintptr_t raddr = 0;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&fl->map_mutex);
|
||||
if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
|
||||
ud->flags, &map)){
|
||||
mutex_unlock(&fl->map_mutex);
|
||||
return 0;
|
||||
|
||||
if (ud->flags == ADSP_MMAP_ADD_PAGES) {
|
||||
DEFINE_DMA_ATTRS(dma_attr);
|
||||
|
||||
if (ud->vaddrin) {
|
||||
err = -EINVAL;
|
||||
pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
|
||||
current->comm, __func__);
|
||||
goto bail;
|
||||
}
|
||||
dma_set_attr(DMA_ATTR_EXEC_MAPPING, &dma_attr);
|
||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &dma_attr);
|
||||
dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT, &dma_attr);
|
||||
|
||||
err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
|
||||
1, &rbuf);
|
||||
if (err)
|
||||
goto bail;
|
||||
err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
|
||||
rbuf->phys, rbuf->size, &raddr);
|
||||
if (err)
|
||||
goto bail;
|
||||
rbuf->raddr = raddr;
|
||||
} else {
|
||||
uintptr_t va_to_dsp;
|
||||
|
||||
VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
|
||||
(uintptr_t)ud->vaddrin, ud->size,
|
||||
ud->flags, &map));
|
||||
if (err)
|
||||
goto bail;
|
||||
|
||||
if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
|
||||
ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
|
||||
va_to_dsp = 0;
|
||||
else
|
||||
va_to_dsp = (uintptr_t)map->va;
|
||||
VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
|
||||
map->phys, map->size, &raddr));
|
||||
if (err)
|
||||
goto bail;
|
||||
map->raddr = raddr;
|
||||
}
|
||||
VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
|
||||
(uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
|
||||
if (err)
|
||||
goto bail;
|
||||
VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
|
||||
if (err)
|
||||
goto bail;
|
||||
ud->vaddrout = map->raddr;
|
||||
ud->vaddrout = raddr;
|
||||
bail:
|
||||
if (err && map)
|
||||
fastrpc_mmap_free(map);
|
||||
|
@ -2321,15 +2477,16 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
|
|||
spin_unlock(&fl->apps->hlock);
|
||||
kfree(fl->debug_buf);
|
||||
|
||||
if (!fl->sctx) {
|
||||
if (!fl->sctx)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
spin_lock(&fl->hlock);
|
||||
fl->file_close = 1;
|
||||
spin_unlock(&fl->hlock);
|
||||
if (!IS_ERR_OR_NULL(fl->init_mem))
|
||||
fastrpc_buf_free(fl->init_mem, 0);
|
||||
fastrpc_context_list_dtor(fl);
|
||||
fastrpc_buf_list_free(fl);
|
||||
fastrpc_cached_buf_list_free(fl);
|
||||
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
|
||||
fastrpc_mmap_free(map);
|
||||
}
|
||||
|
@ -2341,6 +2498,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
|
|||
if (fl->secsctx)
|
||||
fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
|
||||
bail:
|
||||
fastrpc_remote_buf_list_free(fl);
|
||||
mutex_destroy(&fl->map_mutex);
|
||||
kfree(fl);
|
||||
return 0;
|
||||
|
@ -2642,7 +2800,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
|
|||
}
|
||||
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
|
||||
"\n======%s %s %s======\n", title,
|
||||
" LIST OF BUFS ", title);
|
||||
" LIST OF CACHED BUFS ", title);
|
||||
spin_lock(&fl->hlock);
|
||||
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
|
||||
"%-19s|%-19s|%-19s\n",
|
||||
|
@ -2650,7 +2808,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
|
|||
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
|
||||
"%s%s%s%s%s\n", single_line, single_line,
|
||||
single_line, single_line, single_line);
|
||||
hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
|
||||
hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
|
||||
len += scnprintf(fileinfo + len,
|
||||
DEBUGFS_SIZE - len,
|
||||
"0x%-17p|0x%-17llX|%-19zu\n",
|
||||
|
@ -2790,7 +2948,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
|
|||
if (err)
|
||||
return err;
|
||||
snprintf(strpid, PID_SIZE, "%d", current->pid);
|
||||
buf_size = strlen(current->comm) + strlen(strpid) + 1;
|
||||
buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
|
||||
fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
|
||||
snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
|
||||
current->comm, "_", current->pid);
|
||||
|
@ -2800,12 +2958,15 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
|
|||
context_list_ctor(&fl->clst);
|
||||
spin_lock_init(&fl->hlock);
|
||||
INIT_HLIST_HEAD(&fl->maps);
|
||||
INIT_HLIST_HEAD(&fl->bufs);
|
||||
INIT_HLIST_HEAD(&fl->cached_bufs);
|
||||
INIT_HLIST_HEAD(&fl->remote_bufs);
|
||||
INIT_HLIST_NODE(&fl->hn);
|
||||
fl->tgid = current->tgid;
|
||||
fl->apps = me;
|
||||
fl->mode = FASTRPC_MODE_SERIAL;
|
||||
fl->cid = -1;
|
||||
fl->init_mem = NULL;
|
||||
|
||||
if (debugfs_file != NULL)
|
||||
fl->debugfs_file = debugfs_file;
|
||||
memset(&fl->perf, 0, sizeof(fl->perf));
|
||||
|
@ -2843,6 +3004,30 @@ bail:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int fastrpc_internal_control(struct fastrpc_file *fl,
|
||||
struct fastrpc_ioctl_control *cp)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
|
||||
if (err)
|
||||
goto bail;
|
||||
VERIFY(err, !IS_ERR_OR_NULL(cp));
|
||||
if (err)
|
||||
goto bail;
|
||||
|
||||
switch (cp->req) {
|
||||
case FASTRPC_CONTROL_KALLOC:
|
||||
cp->kalloc.kalloc_support = 1;
|
||||
break;
|
||||
default:
|
||||
err = -ENOTTY;
|
||||
break;
|
||||
}
|
||||
bail:
|
||||
return err;
|
||||
}
|
||||
|
||||
static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
|
||||
unsigned long ioctl_param)
|
||||
{
|
||||
|
@ -2852,6 +3037,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
|
|||
struct fastrpc_ioctl_munmap munmap;
|
||||
struct fastrpc_ioctl_init_attrs init;
|
||||
struct fastrpc_ioctl_perf perf;
|
||||
struct fastrpc_ioctl_control cp;
|
||||
} p;
|
||||
void *param = (char *)ioctl_param;
|
||||
struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
|
||||
|
@ -2967,8 +3153,22 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
|
|||
if (err)
|
||||
goto bail;
|
||||
break;
|
||||
case FASTRPC_IOCTL_CONTROL:
|
||||
K_COPY_FROM_USER(err, 0, &p.cp, param,
|
||||
sizeof(p.cp));
|
||||
if (err)
|
||||
goto bail;
|
||||
VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
|
||||
if (err)
|
||||
goto bail;
|
||||
if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
|
||||
K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
|
||||
if (err)
|
||||
goto bail;
|
||||
}
|
||||
break;
|
||||
case FASTRPC_IOCTL_GETINFO:
|
||||
K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
|
||||
K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
|
||||
if (err)
|
||||
goto bail;
|
||||
VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
|
||||
|
@ -3283,6 +3483,7 @@ static void fastrpc_deinit(void)
|
|||
}
|
||||
for (j = 0; j < NUM_SESSIONS; j++) {
|
||||
struct fastrpc_session_ctx *sess = &chan->session[j];
|
||||
|
||||
if (sess->smmu.dev) {
|
||||
arm_iommu_detach_device(sess->smmu.dev);
|
||||
sess->smmu.dev = NULL;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -36,6 +36,8 @@
|
|||
_IOWR('R', 9, struct compat_fastrpc_ioctl_perf)
|
||||
#define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \
|
||||
_IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs)
|
||||
#define COMPAT_FASTRPC_IOCTL_CONTROL \
|
||||
_IOWR('R', 12, struct compat_fastrpc_ioctl_control)
|
||||
#define COMPAT_FASTRPC_IOCTL_MMAP_64 \
|
||||
_IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64)
|
||||
#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \
|
||||
|
@ -117,6 +119,31 @@ struct compat_fastrpc_ioctl_perf { /* kernel performance data */
|
|||
compat_uptr_t keys;
|
||||
};
|
||||
|
||||
#define FASTRPC_CONTROL_LATENCY (1)
|
||||
struct compat_fastrpc_ctrl_latency {
|
||||
compat_uint_t enable; /* latency control enable */
|
||||
compat_uint_t level; /* level of control */
|
||||
};
|
||||
|
||||
#define FASTRPC_CONTROL_SMMU (2)
|
||||
struct compat_fastrpc_ctrl_smmu {
|
||||
compat_uint_t sharedcb;
|
||||
};
|
||||
|
||||
#define FASTRPC_CONTROL_KALLOC (3)
|
||||
struct compat_fastrpc_ctrl_kalloc {
|
||||
compat_uint_t kalloc_support; /* Remote memory allocation from kernel */
|
||||
};
|
||||
|
||||
struct compat_fastrpc_ioctl_control {
|
||||
compat_uint_t req;
|
||||
union {
|
||||
struct compat_fastrpc_ctrl_latency lp;
|
||||
struct compat_fastrpc_ctrl_smmu smmu;
|
||||
struct compat_fastrpc_ctrl_kalloc kalloc;
|
||||
};
|
||||
};
|
||||
|
||||
static int compat_get_fastrpc_ioctl_invoke(
|
||||
struct compat_fastrpc_ioctl_invoke_attrs __user *inv32,
|
||||
struct fastrpc_ioctl_invoke_attrs __user **inva,
|
||||
|
@ -322,6 +349,19 @@ static int compat_get_fastrpc_ioctl_perf(
|
|||
return err;
|
||||
}
|
||||
|
||||
static int compat_get_fastrpc_ioctl_control(
|
||||
struct compat_fastrpc_ioctl_control __user *ctrl32,
|
||||
struct fastrpc_ioctl_control __user *ctrl)
|
||||
{
|
||||
compat_uptr_t p;
|
||||
int err;
|
||||
|
||||
err = get_user(p, &ctrl32->req);
|
||||
err |= put_user(p, &ctrl->req);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int compat_get_fastrpc_ioctl_init(
|
||||
struct compat_fastrpc_ioctl_init_attrs __user *init32,
|
||||
struct fastrpc_ioctl_init_attrs __user *init,
|
||||
|
@ -513,6 +553,34 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
|
|||
case FASTRPC_IOCTL_SETMODE:
|
||||
return filp->f_op->unlocked_ioctl(filp, cmd,
|
||||
(unsigned long)compat_ptr(arg));
|
||||
case COMPAT_FASTRPC_IOCTL_CONTROL:
|
||||
{
|
||||
struct compat_fastrpc_ioctl_control __user *ctrl32;
|
||||
struct fastrpc_ioctl_control __user *ctrl;
|
||||
compat_uptr_t p;
|
||||
|
||||
ctrl32 = compat_ptr(arg);
|
||||
VERIFY(err, NULL != (ctrl = compat_alloc_user_space(
|
||||
sizeof(*ctrl))));
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
VERIFY(err, 0 == compat_get_fastrpc_ioctl_control(ctrl32,
|
||||
ctrl));
|
||||
if (err)
|
||||
return err;
|
||||
err = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_CONTROL,
|
||||
(unsigned long)ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
err = get_user(p, &ctrl32->req);
|
||||
if (err)
|
||||
return err;
|
||||
if (p == FASTRPC_CONTROL_KALLOC) {
|
||||
err = get_user(p, &ctrl->kalloc.kalloc_support);
|
||||
err |= put_user(p, &ctrl32->kalloc.kalloc_support);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
case COMPAT_FASTRPC_IOCTL_GETPERF:
|
||||
{
|
||||
struct compat_fastrpc_ioctl_perf __user *perf32;
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t)
|
||||
#define FASTRPC_IOCTL_GETPERF _IOWR('R', 9, struct fastrpc_ioctl_perf)
|
||||
#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
|
||||
#define FASTRPC_IOCTL_CONTROL _IOWR('R', 12, struct fastrpc_ioctl_control)
|
||||
|
||||
#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
|
||||
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
|
||||
|
@ -188,7 +189,7 @@ struct fastrpc_ioctl_mmap {
|
|||
|
||||
|
||||
struct fastrpc_ioctl_mmap_64 {
|
||||
int fd; /* ion fd */
|
||||
int fd; /* ion fd */
|
||||
uint32_t flags; /* flags for dsp to map with */
|
||||
uint64_t vaddrin; /* optional virtual address */
|
||||
size_t size; /* size */
|
||||
|
@ -201,6 +202,31 @@ struct fastrpc_ioctl_perf { /* kernel performance data */
|
|||
uintptr_t keys;
|
||||
};
|
||||
|
||||
#define FASTRPC_CONTROL_LATENCY (1)
|
||||
struct fastrpc_ctrl_latency {
|
||||
uint32_t enable; /* latency control enable */
|
||||
uint32_t level; /* level of control */
|
||||
};
|
||||
|
||||
#define FASTRPC_CONTROL_SMMU (2)
|
||||
struct fastrpc_ctrl_smmu {
|
||||
uint32_t sharedcb;
|
||||
};
|
||||
|
||||
#define FASTRPC_CONTROL_KALLOC (3)
|
||||
struct fastrpc_ctrl_kalloc {
|
||||
uint32_t kalloc_support; /* Remote memory allocation from kernel */
|
||||
};
|
||||
|
||||
struct fastrpc_ioctl_control {
|
||||
uint32_t req;
|
||||
union {
|
||||
struct fastrpc_ctrl_latency lp;
|
||||
struct fastrpc_ctrl_smmu smmu;
|
||||
struct fastrpc_ctrl_kalloc kalloc;
|
||||
};
|
||||
};
|
||||
|
||||
struct smq_null_invoke {
|
||||
uint64_t ctx; /* invoke caller context */
|
||||
uint32_t handle; /* handle to invoke */
|
||||
|
@ -245,6 +271,7 @@ static inline struct smq_phy_page *smq_phy_page_start(uint32_t sc,
|
|||
struct smq_invoke_buf *buf)
|
||||
{
|
||||
uint64_t nTotal = REMOTE_SCALARS_INBUFS(sc)+REMOTE_SCALARS_OUTBUFS(sc);
|
||||
|
||||
return (struct smq_phy_page *)(&buf[nTotal]);
|
||||
}
|
||||
|
||||
|
|
|
@ -1748,6 +1748,7 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
|
|||
mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL);
|
||||
if (!mask_info->update_buf) {
|
||||
kfree(mask_info->ptr);
|
||||
mask_info->ptr = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
kmemleak_not_leak(mask_info->update_buf);
|
||||
|
|
|
@ -1173,8 +1173,7 @@ static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
|
|||
goto exit;
|
||||
}
|
||||
|
||||
k_align_dst += creq->vbuf.dst[dst_i].len +
|
||||
byteoffset;
|
||||
k_align_dst += creq->vbuf.dst[dst_i].len;
|
||||
creq->data_len -= creq->vbuf.dst[dst_i].len;
|
||||
dst_i++;
|
||||
} else {
|
||||
|
|
|
@ -1897,6 +1897,37 @@ static int sde_dbg_reg_base_release(struct inode *inode, struct file *file)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_dbg_reg_base_is_valid_range - verify if requested memory range is valid
|
||||
* @off: address offset in bytes
|
||||
* @cnt: memory size in bytes
|
||||
* Return: true if valid; false otherwise
|
||||
*/
|
||||
static bool sde_dbg_reg_base_is_valid_range(u32 off, u32 cnt)
|
||||
{
|
||||
static struct sde_dbg_base *dbg_base = &sde_dbg_base;
|
||||
struct sde_dbg_reg_range *node;
|
||||
struct sde_dbg_reg_base *base;
|
||||
|
||||
pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
|
||||
|
||||
list_for_each_entry(base, &dbg_base->reg_base_list, reg_base_head) {
|
||||
list_for_each_entry(node, &base->sub_range_list, head) {
|
||||
pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
|
||||
node->offset.start, node->offset.end);
|
||||
|
||||
if (node->offset.start <= off
|
||||
&& off <= node->offset.end
|
||||
&& off + cnt <= node->offset.end) {
|
||||
pr_debug("valid range requested\n");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pr_err("invalid range requested\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base
|
||||
|
@ -1951,8 +1982,15 @@ static ssize_t sde_dbg_reg_base_offset_write(struct file *file,
|
|||
goto exit;
|
||||
}
|
||||
|
||||
if (cnt == 0)
|
||||
return -EINVAL;
|
||||
if (cnt == 0) {
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!sde_dbg_reg_base_is_valid_range(off, cnt)) {
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
dbg->off = off;
|
||||
dbg->cnt = cnt;
|
||||
|
|
|
@ -2196,6 +2196,42 @@ static struct attribute *coresight_etmv4_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
struct etmv4_reg {
|
||||
void __iomem *addr;
|
||||
u32 data;
|
||||
};
|
||||
|
||||
static void do_smp_cross_read(void *data)
|
||||
{
|
||||
struct etmv4_reg *reg = data;
|
||||
|
||||
reg->data = readl_relaxed(reg->addr);
|
||||
}
|
||||
|
||||
static u32 etmv4_cross_read(const struct device *dev, u32 offset)
|
||||
{
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
struct etmv4_reg reg;
|
||||
|
||||
reg.addr = drvdata->base + offset;
|
||||
|
||||
smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
|
||||
return reg.data;
|
||||
}
|
||||
#define coresight_cross_read(name, offset) \
|
||||
static ssize_t name##_show(struct device *_dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
{ \
|
||||
u32 val; \
|
||||
pm_runtime_get_sync(_dev->parent); \
|
||||
\
|
||||
val = etmv4_cross_read(_dev->parent, offset); \
|
||||
\
|
||||
pm_runtime_put_sync(_dev->parent); \
|
||||
return scnprintf(buf, PAGE_SIZE, "0x%x\n", val); \
|
||||
} \
|
||||
static DEVICE_ATTR_RO(name)
|
||||
|
||||
#define coresight_simple_func(name, offset) \
|
||||
static ssize_t name##_show(struct device *_dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
|
@ -2206,17 +2242,17 @@ static ssize_t name##_show(struct device *_dev, \
|
|||
} \
|
||||
DEVICE_ATTR_RO(name)
|
||||
|
||||
coresight_simple_func(trcoslsr, TRCOSLSR);
|
||||
coresight_simple_func(trcpdcr, TRCPDCR);
|
||||
coresight_simple_func(trcpdsr, TRCPDSR);
|
||||
coresight_simple_func(trclsr, TRCLSR);
|
||||
coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
|
||||
coresight_simple_func(trcdevid, TRCDEVID);
|
||||
coresight_simple_func(trcdevtype, TRCDEVTYPE);
|
||||
coresight_simple_func(trcpidr0, TRCPIDR0);
|
||||
coresight_simple_func(trcpidr1, TRCPIDR1);
|
||||
coresight_simple_func(trcpidr2, TRCPIDR2);
|
||||
coresight_simple_func(trcpidr3, TRCPIDR3);
|
||||
coresight_cross_read(trcoslsr, TRCOSLSR);
|
||||
coresight_cross_read(trcpdcr, TRCPDCR);
|
||||
coresight_cross_read(trcpdsr, TRCPDSR);
|
||||
coresight_cross_read(trclsr, TRCLSR);
|
||||
coresight_cross_read(trcauthstatus, TRCAUTHSTATUS);
|
||||
coresight_cross_read(trcdevid, TRCDEVID);
|
||||
coresight_cross_read(trcdevtype, TRCDEVTYPE);
|
||||
coresight_cross_read(trcpidr0, TRCPIDR0);
|
||||
coresight_cross_read(trcpidr1, TRCPIDR1);
|
||||
coresight_cross_read(trcpidr2, TRCPIDR2);
|
||||
coresight_cross_read(trcpidr3, TRCPIDR3);
|
||||
|
||||
static struct attribute *coresight_etmv4_mgmt_attrs[] = {
|
||||
&dev_attr_trcoslsr.attr,
|
||||
|
@ -2233,19 +2269,19 @@ static struct attribute *coresight_etmv4_mgmt_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
coresight_simple_func(trcidr0, TRCIDR0);
|
||||
coresight_simple_func(trcidr1, TRCIDR1);
|
||||
coresight_simple_func(trcidr2, TRCIDR2);
|
||||
coresight_simple_func(trcidr3, TRCIDR3);
|
||||
coresight_simple_func(trcidr4, TRCIDR4);
|
||||
coresight_simple_func(trcidr5, TRCIDR5);
|
||||
coresight_cross_read(trcidr0, TRCIDR0);
|
||||
coresight_cross_read(trcidr1, TRCIDR1);
|
||||
coresight_cross_read(trcidr2, TRCIDR2);
|
||||
coresight_cross_read(trcidr3, TRCIDR3);
|
||||
coresight_cross_read(trcidr4, TRCIDR4);
|
||||
coresight_cross_read(trcidr5, TRCIDR5);
|
||||
/* trcidr[6,7] are reserved */
|
||||
coresight_simple_func(trcidr8, TRCIDR8);
|
||||
coresight_simple_func(trcidr9, TRCIDR9);
|
||||
coresight_simple_func(trcidr10, TRCIDR10);
|
||||
coresight_simple_func(trcidr11, TRCIDR11);
|
||||
coresight_simple_func(trcidr12, TRCIDR12);
|
||||
coresight_simple_func(trcidr13, TRCIDR13);
|
||||
coresight_cross_read(trcidr8, TRCIDR8);
|
||||
coresight_cross_read(trcidr9, TRCIDR9);
|
||||
coresight_cross_read(trcidr10, TRCIDR10);
|
||||
coresight_cross_read(trcidr11, TRCIDR11);
|
||||
coresight_cross_read(trcidr12, TRCIDR12);
|
||||
coresight_cross_read(trcidr13, TRCIDR13);
|
||||
|
||||
static struct attribute *coresight_etmv4_trcidr_attrs[] = {
|
||||
&dev_attr_trcidr0.attr,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -1359,6 +1359,7 @@ static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
|
|||
struct iommu_debug_device *ddev = file->private_data;
|
||||
struct device *dev = ddev->dev;
|
||||
char c[2];
|
||||
size_t buflen = sizeof(c);
|
||||
|
||||
if (*offset)
|
||||
return 0;
|
||||
|
@ -1369,13 +1370,14 @@ static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
|
|||
c[0] = dev->archdata.mapping->domain ? '1' : '0';
|
||||
|
||||
c[1] = '\n';
|
||||
if (copy_to_user(ubuf, &c, 2)) {
|
||||
buflen = min(count, buflen);
|
||||
if (copy_to_user(ubuf, &c, buflen)) {
|
||||
pr_err("copy_to_user failed\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
*offset = 1; /* non-zero means we're done */
|
||||
|
||||
return 2;
|
||||
return buflen;
|
||||
}
|
||||
|
||||
static const struct file_operations iommu_debug_dma_attach_fops = {
|
||||
|
@ -1401,7 +1403,7 @@ static ssize_t iommu_debug_virt_addr_read(struct file *file, char __user *ubuf,
|
|||
else
|
||||
snprintf(buf, 100, "0x%pK\n", virt_addr);
|
||||
|
||||
buflen = strlen(buf);
|
||||
buflen = min(count, strlen(buf)+1);
|
||||
if (copy_to_user(ubuf, buf, buflen)) {
|
||||
pr_err("Couldn't copy_to_user\n");
|
||||
retval = -EFAULT;
|
||||
|
@ -1432,19 +1434,21 @@ static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
|
|||
{
|
||||
struct iommu_debug_device *ddev = file->private_data;
|
||||
char c[2];
|
||||
size_t buflen = sizeof(c);
|
||||
|
||||
if (*offset)
|
||||
return 0;
|
||||
|
||||
c[0] = ddev->domain ? '1' : '0';
|
||||
c[1] = '\n';
|
||||
if (copy_to_user(ubuf, &c, 2)) {
|
||||
buflen = min(count, buflen);
|
||||
if (copy_to_user(ubuf, &c, buflen)) {
|
||||
pr_err("copy_to_user failed\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
*offset = 1; /* non-zero means we're done */
|
||||
|
||||
return 2;
|
||||
return buflen;
|
||||
}
|
||||
|
||||
static const struct file_operations iommu_debug_attach_fops = {
|
||||
|
@ -1523,7 +1527,7 @@ static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
|
|||
else
|
||||
snprintf(buf, 100, "pte=%016llx\n", pte);
|
||||
|
||||
buflen = strlen(buf);
|
||||
buflen = min(count, strlen(buf)+1);
|
||||
if (copy_to_user(ubuf, buf, buflen)) {
|
||||
pr_err("Couldn't copy_to_user\n");
|
||||
retval = -EFAULT;
|
||||
|
@ -1592,7 +1596,7 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
|
|||
snprintf(buf, 100, "%pa\n", &phys);
|
||||
}
|
||||
|
||||
buflen = strlen(buf);
|
||||
buflen = min(count, strlen(buf)+1);
|
||||
if (copy_to_user(ubuf, buf, buflen)) {
|
||||
pr_err("Couldn't copy_to_user\n");
|
||||
retval = -EFAULT;
|
||||
|
@ -1645,7 +1649,7 @@ static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
|
|||
else
|
||||
snprintf(buf, 100, "%pa\n", &phys);
|
||||
|
||||
buflen = strlen(buf);
|
||||
buflen = min(count, strlen(buf)+1);
|
||||
if (copy_to_user(ubuf, buf, buflen)) {
|
||||
pr_err("Couldn't copy_to_user\n");
|
||||
retval = -EFAULT;
|
||||
|
@ -1876,7 +1880,7 @@ static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
|
|||
iova = ddev->iova;
|
||||
snprintf(buf, 100, "%pa\n", &iova);
|
||||
|
||||
buflen = strlen(buf);
|
||||
buflen = min(count, strlen(buf)+1);
|
||||
if (copy_to_user(ubuf, buf, buflen)) {
|
||||
pr_err("Couldn't copy_to_user\n");
|
||||
retval = -EFAULT;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -215,32 +215,35 @@ static long msm_ispif_cmd_ext(struct v4l2_subdev *sd,
|
|||
long rc = 0;
|
||||
struct ispif_device *ispif =
|
||||
(struct ispif_device *)v4l2_get_subdevdata(sd);
|
||||
struct ispif_cfg_data_ext pcdata;
|
||||
struct ispif_cfg_data_ext pcdata = {0};
|
||||
struct msm_ispif_param_data_ext *params = NULL;
|
||||
|
||||
if (is_compat_task()) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct ispif_cfg_data_ext_32 *pcdata32 =
|
||||
(struct ispif_cfg_data_ext_32 *)arg;
|
||||
struct ispif_cfg_data_ext_32 *pcdata32 =
|
||||
(struct ispif_cfg_data_ext_32 *)arg;
|
||||
|
||||
if (pcdata32 == NULL) {
|
||||
pr_err("Invalid params passed from user\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcdata.cfg_type = pcdata32->cfg_type;
|
||||
pcdata.size = pcdata32->size;
|
||||
pcdata.data = compat_ptr(pcdata32->data);
|
||||
if (pcdata32 == NULL) {
|
||||
pr_err("Invalid params passed from user\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcdata.cfg_type = pcdata32->cfg_type;
|
||||
pcdata.size = pcdata32->size;
|
||||
pcdata.data = compat_ptr(pcdata32->data);
|
||||
|
||||
#else
|
||||
struct ispif_cfg_data_ext *pcdata64 =
|
||||
(struct ispif_cfg_data_ext *)arg;
|
||||
|
||||
if (pcdata64 == NULL) {
|
||||
pr_err("Invalid params passed from user\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcdata.cfg_type = pcdata64->cfg_type;
|
||||
pcdata.size = pcdata64->size;
|
||||
pcdata.data = pcdata64->data;
|
||||
#endif
|
||||
} else {
|
||||
struct ispif_cfg_data_ext *pcdata64 =
|
||||
(struct ispif_cfg_data_ext *)arg;
|
||||
|
||||
if (pcdata64 == NULL) {
|
||||
pr_err("Invalid params passed from user\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcdata.cfg_type = pcdata64->cfg_type;
|
||||
pcdata.size = pcdata64->size;
|
||||
pcdata.data = pcdata64->data;
|
||||
}
|
||||
if (pcdata.size != sizeof(struct msm_ispif_param_data_ext)) {
|
||||
pr_err("%s: payload size mismatch\n", __func__);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -312,10 +312,6 @@ static long msm_ir_led_subdev_do_ioctl(
|
|||
(struct msm_ir_led_cfg_data_t32 *)arg;
|
||||
struct msm_ir_led_cfg_data_t ir_led_data;
|
||||
|
||||
ir_led_data.cfg_type = u32->cfg_type;
|
||||
ir_led_data.pwm_duty_on_ns = u32->pwm_duty_on_ns;
|
||||
ir_led_data.pwm_period_ns = u32->pwm_period_ns;
|
||||
|
||||
switch (cmd) {
|
||||
case VIDIOC_MSM_IR_LED_CFG32:
|
||||
cmd = VIDIOC_MSM_IR_LED_CFG;
|
||||
|
@ -324,6 +320,10 @@ static long msm_ir_led_subdev_do_ioctl(
|
|||
return msm_ir_led_subdev_ioctl(sd, cmd, arg);
|
||||
}
|
||||
|
||||
ir_led_data.cfg_type = u32->cfg_type;
|
||||
ir_led_data.pwm_duty_on_ns = u32->pwm_duty_on_ns;
|
||||
ir_led_data.pwm_period_ns = u32->pwm_period_ns;
|
||||
|
||||
rc = msm_ir_led_subdev_ioctl(sd, cmd, &ir_led_data);
|
||||
|
||||
return rc;
|
||||
|
|
|
@ -451,7 +451,7 @@ static int isp_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
{
|
||||
struct page *page;
|
||||
struct vfe_device *vfe_dev = vma->vm_private_data;
|
||||
struct isp_proc *isp_page = NULL;
|
||||
struct isp_kstate *isp_page = NULL;
|
||||
|
||||
isp_page = vfe_dev->isp_page;
|
||||
|
||||
|
@ -732,7 +732,7 @@ int vfe_hw_probe(struct platform_device *pdev)
|
|||
vfe_dev->buf_mgr->init_done = 1;
|
||||
vfe_dev->vfe_open_cnt = 0;
|
||||
/*Allocate a page in kernel and map it to camera user process*/
|
||||
vfe_dev->isp_page = (struct isp_proc *)get_zeroed_page(GFP_KERNEL);
|
||||
vfe_dev->isp_page = (struct isp_kstate *)get_zeroed_page(GFP_KERNEL);
|
||||
if (vfe_dev->isp_page == NULL) {
|
||||
pr_err("%s: no enough memory\n", __func__);
|
||||
rc = -ENOMEM;
|
||||
|
|
|
@ -759,11 +759,6 @@ struct msm_vfe_common_subdev {
|
|||
struct msm_vfe_common_dev_data *common_data;
|
||||
};
|
||||
|
||||
struct isp_proc {
|
||||
uint32_t kernel_sofid;
|
||||
uint32_t vfeid;
|
||||
};
|
||||
|
||||
struct vfe_device {
|
||||
/* Driver private data */
|
||||
struct platform_device *pdev;
|
||||
|
@ -847,7 +842,7 @@ struct vfe_device {
|
|||
uint32_t recovery_irq1_mask;
|
||||
/* total bandwidth per vfe */
|
||||
uint64_t total_bandwidth;
|
||||
struct isp_proc *isp_page;
|
||||
struct isp_kstate *isp_page;
|
||||
};
|
||||
|
||||
struct vfe_parent_device {
|
||||
|
|
|
@ -733,13 +733,13 @@ static void msm_vfe40_process_epoch_irq(struct vfe_device *vfe_dev,
|
|||
return;
|
||||
|
||||
if (irq_status0 & BIT(2)) {
|
||||
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
|
||||
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
|
||||
msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
|
||||
MSM_ISP_COMP_IRQ_EPOCH, ts);
|
||||
msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
|
||||
MSM_ISP_COMP_IRQ_EPOCH);
|
||||
msm_isp_update_error_frame_count(vfe_dev);
|
||||
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
|
||||
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
|
||||
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
|
||||
stream_count == 0) {
|
||||
|
|
|
@ -172,6 +172,8 @@ static void msm_isp_axi_destroy_stream(
|
|||
stream_info->bufq_handle[k] = 0;
|
||||
stream_info->vfe_mask = 0;
|
||||
stream_info->state = AVAILABLE;
|
||||
memset(&stream_info->request_queue_cmd,
|
||||
0, sizeof(stream_info->request_queue_cmd));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3446,6 +3448,14 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
|
|||
frame_src = SRC_TO_INTF(stream_info->stream_src);
|
||||
pingpong_status = vfe_dev->hw_info->
|
||||
vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
|
||||
|
||||
/* As MCT is still processing it, need to drop the additional requests*/
|
||||
if (vfe_dev->isp_page->drop_reconfig) {
|
||||
pr_err("%s: MCT has not yet delayed %d drop request %d\n",
|
||||
__func__, vfe_dev->isp_page->drop_reconfig, frame_id);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/*
|
||||
* If PIX stream is active then RDI path uses SOF frame ID of PIX
|
||||
* In case of standalone RDI streaming, SOF are used from
|
||||
|
@ -3459,9 +3469,18 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
|
|||
vfe_dev->axi_data.src_info[frame_src].accept_frame == false) {
|
||||
pr_debug("%s:%d invalid time to request frame %d\n",
|
||||
__func__, __LINE__, frame_id);
|
||||
goto error;
|
||||
}
|
||||
if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id !=
|
||||
vfe_dev->isp_page->drop_reconfig = 1;
|
||||
} else if ((vfe_dev->axi_data.src_info[frame_src].active) &&
|
||||
(frame_id ==
|
||||
vfe_dev->axi_data.src_info[frame_src].frame_id) &&
|
||||
(stream_info->undelivered_request_cnt <=
|
||||
MAX_BUFFERS_IN_HW)) {
|
||||
vfe_dev->isp_page->drop_reconfig = 1;
|
||||
pr_debug("%s: vfe_%d request_frame %d cur frame id %d pix %d\n",
|
||||
__func__, vfe_dev->pdev->id, frame_id,
|
||||
vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
|
||||
vfe_dev->axi_data.src_info[VFE_PIX_0].active);
|
||||
} else if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id !=
|
||||
vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev->
|
||||
axi_data.src_info[frame_src].sof_counter_step)) ||
|
||||
((!vfe_dev->axi_data.src_info[frame_src].active))) {
|
||||
|
@ -3566,6 +3585,9 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
|
|||
stream_info->undelivered_request_cnt--;
|
||||
pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
|
||||
__func__, __LINE__);
|
||||
queue_req->cmd_used = 0;
|
||||
list_del(&queue_req->list);
|
||||
stream_info->request_q_cnt--;
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -3604,6 +3626,9 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
|
|||
flags);
|
||||
pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
|
||||
__func__, __LINE__);
|
||||
queue_req->cmd_used = 0;
|
||||
list_del(&queue_req->list);
|
||||
stream_info->request_q_cnt--;
|
||||
return rc;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2013-2017, 2019 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -230,32 +230,34 @@ static long msm_ispif_cmd_ext(struct v4l2_subdev *sd,
|
|||
long rc = 0;
|
||||
struct ispif_device *ispif =
|
||||
(struct ispif_device *)v4l2_get_subdevdata(sd);
|
||||
struct ispif_cfg_data_ext pcdata;
|
||||
struct ispif_cfg_data_ext pcdata = {0};
|
||||
struct msm_ispif_param_data_ext *params = NULL;
|
||||
|
||||
if (is_compat_task()) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct ispif_cfg_data_ext_32 *pcdata32 =
|
||||
(struct ispif_cfg_data_ext_32 *)arg;
|
||||
struct ispif_cfg_data_ext_32 *pcdata32 =
|
||||
(struct ispif_cfg_data_ext_32 *)arg;
|
||||
|
||||
if (pcdata32 == NULL) {
|
||||
pr_err("Invalid params passed from user\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcdata.cfg_type = pcdata32->cfg_type;
|
||||
pcdata.size = pcdata32->size;
|
||||
pcdata.data = compat_ptr(pcdata32->data);
|
||||
|
||||
#else
|
||||
struct ispif_cfg_data_ext *pcdata64 =
|
||||
if (pcdata32 == NULL) {
|
||||
pr_err("Invalid params passed from user\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcdata.cfg_type = pcdata32->cfg_type;
|
||||
pcdata.size = pcdata32->size;
|
||||
pcdata.data = compat_ptr(pcdata32->data);
|
||||
#endif
|
||||
} else {
|
||||
struct ispif_cfg_data_ext *pcdata64 =
|
||||
(struct ispif_cfg_data_ext *)arg;
|
||||
|
||||
if (pcdata64 == NULL) {
|
||||
pr_err("Invalid params passed from user\n");
|
||||
return -EINVAL;
|
||||
if (pcdata64 == NULL) {
|
||||
pr_err("Invalid params passed from user\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcdata.cfg_type = pcdata64->cfg_type;
|
||||
pcdata.size = pcdata64->size;
|
||||
pcdata.data = pcdata64->data;
|
||||
}
|
||||
pcdata.cfg_type = pcdata64->cfg_type;
|
||||
pcdata.size = pcdata64->size;
|
||||
pcdata.data = pcdata64->data;
|
||||
#endif
|
||||
if (pcdata.size != sizeof(struct msm_ispif_param_data_ext)) {
|
||||
pr_err("%s: payload size mismatch\n", __func__);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -324,10 +324,6 @@ static long msm_ir_led_subdev_do_ioctl(
|
|||
struct msm_ir_led_cfg_data_t ir_led_data;
|
||||
|
||||
CDBG("Enter\n");
|
||||
ir_led_data.cfg_type = u32->cfg_type;
|
||||
ir_led_data.pwm_duty_on_ns = u32->pwm_duty_on_ns;
|
||||
ir_led_data.pwm_period_ns = u32->pwm_period_ns;
|
||||
|
||||
switch (cmd) {
|
||||
case VIDIOC_MSM_IR_LED_CFG32:
|
||||
cmd = VIDIOC_MSM_IR_LED_CFG;
|
||||
|
@ -336,6 +332,10 @@ static long msm_ir_led_subdev_do_ioctl(
|
|||
return msm_ir_led_subdev_ioctl(sd, cmd, arg);
|
||||
}
|
||||
|
||||
ir_led_data.cfg_type = u32->cfg_type;
|
||||
ir_led_data.pwm_duty_on_ns = u32->pwm_duty_on_ns;
|
||||
ir_led_data.pwm_period_ns = u32->pwm_period_ns;
|
||||
|
||||
rc = msm_ir_led_subdev_ioctl(sd, cmd, &ir_led_data);
|
||||
|
||||
CDBG("Exit\n");
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -70,18 +70,15 @@ static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
|
|||
.i2c_util = msm_sensor_cci_i2c_util,
|
||||
.i2c_poll = msm_camera_cci_i2c_poll,
|
||||
};
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
static int32_t msm_laser_led_init(
|
||||
struct msm_laser_led_ctrl_t *laser_led_ctrl,
|
||||
struct msm_laser_led_cfg_data_t32 __user *laser_led_data)
|
||||
#else
|
||||
static int32_t msm_laser_led_init(
|
||||
struct msm_laser_led_ctrl_t *laser_led_ctrl,
|
||||
struct msm_laser_led_cfg_data_t __user *laser_led_data)
|
||||
#endif
|
||||
void __user *argp)
|
||||
{
|
||||
int32_t rc = -EFAULT;
|
||||
struct msm_camera_cci_client *cci_client = NULL;
|
||||
struct msm_laser_led_cfg_data_t __user *laser_led_data =
|
||||
(struct msm_laser_led_cfg_data_t __user *) argp;
|
||||
|
||||
CDBG("Enter\n");
|
||||
|
||||
|
@ -263,6 +260,53 @@ static int32_t msm_laser_led_control32(
|
|||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int32_t msm_laser_led_init32(
|
||||
struct msm_laser_led_ctrl_t *laser_led_ctrl,
|
||||
void __user *argp)
|
||||
{
|
||||
int32_t rc = -EFAULT;
|
||||
struct msm_laser_led_cfg_data_t32 __user *laser_led_data =
|
||||
(struct msm_laser_led_cfg_data_t32 __user *) argp;
|
||||
struct msm_camera_cci_client *cci_client = NULL;
|
||||
|
||||
CDBG("Enter\n");
|
||||
|
||||
if (laser_led_ctrl->laser_led_state == MSM_CAMERA_LASER_LED_INIT) {
|
||||
pr_err("Invalid laser_led state = %d\n",
|
||||
laser_led_ctrl->laser_led_state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = laser_led_ctrl->i2c_client.i2c_func_tbl->i2c_util(
|
||||
&laser_led_ctrl->i2c_client, MSM_CCI_INIT);
|
||||
if (rc < 0)
|
||||
pr_err("cci_init failed\n");
|
||||
|
||||
cci_client = laser_led_ctrl->i2c_client.cci_client;
|
||||
|
||||
if (copy_from_user(&(cci_client->sid),
|
||||
&(laser_led_data->i2c_addr),
|
||||
sizeof(uint16_t))) {
|
||||
pr_err("%s:%d failed\n", __func__, __LINE__);
|
||||
return -EFAULT;
|
||||
}
|
||||
cci_client->sid = cci_client->sid >> 1;
|
||||
cci_client->retries = 3;
|
||||
cci_client->id_map = 0;
|
||||
|
||||
if (copy_from_user(&(cci_client->i2c_freq_mode),
|
||||
&(laser_led_data->i2c_freq_mode),
|
||||
sizeof(enum i2c_freq_mode_t))) {
|
||||
pr_err("%s:%d failed\n", __func__, __LINE__);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
laser_led_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_INIT;
|
||||
|
||||
CDBG("Exit\n");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int32_t msm_laser_led_control(
|
||||
|
@ -381,7 +425,12 @@ static int32_t msm_laser_led_config(struct msm_laser_led_ctrl_t *laser_led_ctrl,
|
|||
|
||||
switch (cfg_type) {
|
||||
case CFG_LASER_LED_INIT:
|
||||
rc = msm_laser_led_init(laser_led_ctrl, laser_led_data);
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (is_compat_task())
|
||||
rc = msm_laser_led_init32(laser_led_ctrl, argp);
|
||||
else
|
||||
#endif
|
||||
rc = msm_laser_led_init(laser_led_ctrl, argp);
|
||||
break;
|
||||
case CFG_LASER_LED_CONTROL:
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -449,6 +449,11 @@ struct rndis_pkt_hdr rndis_template_hdr = {
|
|||
.zeroes = {0},
|
||||
};
|
||||
|
||||
static void rndis_ipa_msg_free_cb(void *buff, u32 len, u32 type)
|
||||
{
|
||||
kfree(buff);
|
||||
}
|
||||
|
||||
/**
|
||||
* rndis_ipa_init() - create network device and initialize internal
|
||||
* data structures
|
||||
|
@ -648,6 +653,8 @@ int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
|
|||
int next_state;
|
||||
int result;
|
||||
unsigned long flags;
|
||||
struct ipa_ecm_msg *rndis_msg;
|
||||
struct ipa_msg_meta msg_meta;
|
||||
|
||||
RNDIS_IPA_LOG_ENTRY();
|
||||
|
||||
|
@ -718,6 +725,26 @@ int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
|
|||
}
|
||||
RNDIS_IPA_DEBUG("netif_carrier_on() was called\n");
|
||||
|
||||
rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL);
|
||||
if (!rndis_msg) {
|
||||
result = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
|
||||
msg_meta.msg_type = ECM_CONNECT;
|
||||
msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
|
||||
strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name,
|
||||
IPA_RESOURCE_NAME_MAX);
|
||||
rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex;
|
||||
|
||||
result = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb);
|
||||
if (result) {
|
||||
RNDIS_IPA_ERROR("fail to send ECM_CONNECT for rndis\n");
|
||||
kfree(rndis_msg);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
|
||||
next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
|
||||
RNDIS_IPA_CONNECT);
|
||||
|
@ -1165,6 +1192,8 @@ int rndis_ipa_pipe_disconnect_notify(void *private)
|
|||
int outstanding_dropped_pkts;
|
||||
int retval;
|
||||
unsigned long flags;
|
||||
struct ipa_ecm_msg *rndis_msg;
|
||||
struct ipa_msg_meta msg_meta;
|
||||
|
||||
RNDIS_IPA_LOG_ENTRY();
|
||||
|
||||
|
@ -1192,6 +1221,23 @@ int rndis_ipa_pipe_disconnect_notify(void *private)
|
|||
netif_carrier_off(rndis_ipa_ctx->net);
|
||||
RNDIS_IPA_DEBUG("carrier_off notification was sent\n");
|
||||
|
||||
rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL);
|
||||
if (!rndis_msg)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
|
||||
msg_meta.msg_type = ECM_DISCONNECT;
|
||||
msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
|
||||
strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name,
|
||||
IPA_RESOURCE_NAME_MAX);
|
||||
rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex;
|
||||
|
||||
retval = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb);
|
||||
if (retval) {
|
||||
RNDIS_IPA_ERROR("fail to send ECM_DISCONNECT for rndis\n");
|
||||
kfree(rndis_msg);
|
||||
return -EPERM;
|
||||
}
|
||||
netif_stop_queue(rndis_ipa_ctx->net);
|
||||
RNDIS_IPA_DEBUG("queue stopped\n");
|
||||
|
||||
|
|
|
@ -2162,6 +2162,12 @@ static void ipa3_q6_avoid_holb(void)
|
|||
if (ep_idx == -1)
|
||||
continue;
|
||||
|
||||
/* from IPA 4.0 pipe suspend is not supported */
|
||||
if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
|
||||
ipahal_write_reg_n_fields(
|
||||
IPA_ENDP_INIT_CTRL_n,
|
||||
ep_idx, &ep_suspend);
|
||||
|
||||
/*
|
||||
* ipa3_cfg_ep_holb is not used here because we are
|
||||
* setting HOLB on Q6 pipes, and from APPS perspective
|
||||
|
@ -2174,10 +2180,6 @@ static void ipa3_q6_avoid_holb(void)
|
|||
ipahal_write_reg_n_fields(
|
||||
IPA_ENDP_INIT_HOL_BLOCK_EN_n,
|
||||
ep_idx, &ep_holb);
|
||||
|
||||
ipahal_write_reg_n_fields(
|
||||
IPA_ENDP_INIT_CTRL_n,
|
||||
ep_idx, &ep_suspend);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3458,7 +3458,6 @@ int icnss_trigger_recovery(struct device *dev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
WARN_ON(1);
|
||||
icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
|
||||
priv->state);
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -635,28 +635,7 @@ static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* scm_call2() - Invoke a syscall in the secure world
|
||||
* @fn_id: The function ID for this syscall
|
||||
* @desc: Descriptor structure containing arguments and return values
|
||||
*
|
||||
* Sends a command to the SCM and waits for the command to finish processing.
|
||||
* This should *only* be called in pre-emptible context.
|
||||
*
|
||||
* A note on cache maintenance:
|
||||
* Note that any buffers that are expected to be accessed by the secure world
|
||||
* must be flushed before invoking scm_call and invalidated in the cache
|
||||
* immediately after scm_call returns. An important point that must be noted
|
||||
* is that on ARMV8 architectures, invalidation actually also causes a dirty
|
||||
* cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
|
||||
* paramount importance that the buffer be flushed before invoking scm_call2,
|
||||
* even if you don't care about the contents of that buffer.
|
||||
*
|
||||
* Note that cache maintenance on the argument buffer (desc->args) is taken care
|
||||
* of by scm_call2; however, callers are responsible for any other cached
|
||||
* buffers passed over to the secure world.
|
||||
*/
|
||||
int scm_call2(u32 fn_id, struct scm_desc *desc)
|
||||
static int __scm_call2(u32 fn_id, struct scm_desc *desc, bool retry)
|
||||
{
|
||||
int arglen = desc->arginfo & 0xf;
|
||||
int ret, retry_count = 0;
|
||||
|
@ -670,7 +649,6 @@ int scm_call2(u32 fn_id, struct scm_desc *desc)
|
|||
return ret;
|
||||
|
||||
x0 = fn_id | scm_version_mask;
|
||||
|
||||
do {
|
||||
mutex_lock(&scm_lock);
|
||||
|
||||
|
@ -700,13 +678,15 @@ int scm_call2(u32 fn_id, struct scm_desc *desc)
|
|||
mutex_unlock(&scm_lmh_lock);
|
||||
|
||||
mutex_unlock(&scm_lock);
|
||||
if (!retry)
|
||||
goto out;
|
||||
|
||||
if (ret == SCM_V2_EBUSY)
|
||||
msleep(SCM_EBUSY_WAIT_MS);
|
||||
if (retry_count == 33)
|
||||
pr_warn("scm: secure world has been busy for 1 second!\n");
|
||||
} while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
|
||||
|
||||
} while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
|
||||
out:
|
||||
if (ret < 0)
|
||||
pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
|
||||
x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]);
|
||||
|
@ -717,8 +697,46 @@ int scm_call2(u32 fn_id, struct scm_desc *desc)
|
|||
return scm_remap_error(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* scm_call2() - Invoke a syscall in the secure world
|
||||
* @fn_id: The function ID for this syscall
|
||||
* @desc: Descriptor structure containing arguments and return values
|
||||
*
|
||||
* Sends a command to the SCM and waits for the command to finish processing.
|
||||
* This should *only* be called in pre-emptible context.
|
||||
*
|
||||
* A note on cache maintenance:
|
||||
* Note that any buffers that are expected to be accessed by the secure world
|
||||
* must be flushed before invoking scm_call and invalidated in the cache
|
||||
* immediately after scm_call returns. An important point that must be noted
|
||||
* is that on ARMV8 architectures, invalidation actually also causes a dirty
|
||||
* cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
|
||||
* paramount importance that the buffer be flushed before invoking scm_call2,
|
||||
* even if you don't care about the contents of that buffer.
|
||||
*
|
||||
* Note that cache maintenance on the argument buffer (desc->args) is taken care
|
||||
* of by scm_call2; however, callers are responsible for any other cached
|
||||
* buffers passed over to the secure world.
|
||||
*/
|
||||
int scm_call2(u32 fn_id, struct scm_desc *desc)
|
||||
{
|
||||
return __scm_call2(fn_id, desc, true);
|
||||
}
|
||||
EXPORT_SYMBOL(scm_call2);
|
||||
|
||||
/**
|
||||
* scm_call2_noretry() - Invoke a syscall in the secure world
|
||||
*
|
||||
* Similar to scm_call2 except that there is no retry mechanism
|
||||
* implemented.
|
||||
*/
|
||||
int scm_call2_noretry(u32 fn_id, struct scm_desc *desc)
|
||||
{
|
||||
return __scm_call2(fn_id, desc, false);
|
||||
}
|
||||
EXPORT_SYMBOL(scm_call2_noretry);
|
||||
|
||||
/**
|
||||
* scm_call2_atomic() - Invoke a syscall in the secure world
|
||||
*
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -89,6 +89,9 @@ struct wdsp_glink_ch {
|
|||
/* Wait for ch connect state before sending any command */
|
||||
wait_queue_head_t ch_connect_wait;
|
||||
|
||||
/* Wait for ch local and remote disconnect before channel free */
|
||||
wait_queue_head_t ch_free_wait;
|
||||
|
||||
/*
|
||||
* Glink channel configuration. This has to be the last
|
||||
* member of the strucuture as it has variable size
|
||||
|
@ -338,7 +341,7 @@ static void wdsp_glink_notify_state(void *handle, const void *priv,
|
|||
mutex_lock(&ch->mutex);
|
||||
ch->channel_state = event;
|
||||
if (event == GLINK_CONNECTED) {
|
||||
dev_dbg(wpriv->dev, "%s: glink channel: %s connected\n",
|
||||
dev_info(wpriv->dev, "%s: glink channel: %s connected\n",
|
||||
__func__, ch->ch_cfg.name);
|
||||
|
||||
for (i = 0; i < ch->ch_cfg.no_of_intents; i++) {
|
||||
|
@ -360,31 +363,29 @@ static void wdsp_glink_notify_state(void *handle, const void *priv,
|
|||
ch->ch_cfg.name);
|
||||
|
||||
wake_up(&ch->ch_connect_wait);
|
||||
mutex_unlock(&ch->mutex);
|
||||
} else if (event == GLINK_LOCAL_DISCONNECTED) {
|
||||
/*
|
||||
* Don't use dev_dbg here as dev may not be valid if channel
|
||||
* closed from driver close.
|
||||
*/
|
||||
pr_debug("%s: channel: %s disconnected locally\n",
|
||||
pr_info("%s: channel: %s disconnected locally\n",
|
||||
__func__, ch->ch_cfg.name);
|
||||
mutex_unlock(&ch->mutex);
|
||||
|
||||
if (ch->free_mem) {
|
||||
kfree(ch);
|
||||
ch = NULL;
|
||||
}
|
||||
ch->free_mem = true;
|
||||
wake_up(&ch->ch_free_wait);
|
||||
return;
|
||||
} else if (event == GLINK_REMOTE_DISCONNECTED) {
|
||||
dev_dbg(wpriv->dev, "%s: remote channel: %s disconnected remotely\n",
|
||||
pr_info("%s: remote channel: %s disconnected remotely\n",
|
||||
__func__, ch->ch_cfg.name);
|
||||
mutex_unlock(&ch->mutex);
|
||||
/*
|
||||
* If remote disconnect happens, local side also has
|
||||
* to close the channel as per glink design in a
|
||||
* separate work_queue.
|
||||
*/
|
||||
queue_work(wpriv->work_queue, &ch->lcl_ch_cls_wrk);
|
||||
if (wpriv && wpriv->work_queue != NULL)
|
||||
queue_work(wpriv->work_queue, &ch->lcl_ch_cls_wrk);
|
||||
}
|
||||
mutex_unlock(&ch->mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -399,11 +400,11 @@ static int wdsp_glink_close_ch(struct wdsp_glink_ch *ch)
|
|||
mutex_lock(&wpriv->glink_mutex);
|
||||
if (ch->handle) {
|
||||
ret = glink_close(ch->handle);
|
||||
ch->handle = NULL;
|
||||
if (IS_ERR_VALUE(ret)) {
|
||||
dev_err(wpriv->dev, "%s: glink_close is failed, ret = %d\n",
|
||||
__func__, ret);
|
||||
} else {
|
||||
ch->handle = NULL;
|
||||
dev_dbg(wpriv->dev, "%s: ch %s is closed\n", __func__,
|
||||
ch->ch_cfg.name);
|
||||
}
|
||||
|
@ -451,6 +452,7 @@ static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch)
|
|||
ch->handle = NULL;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
ch->free_mem = false;
|
||||
} else {
|
||||
dev_err(wpriv->dev, "%s: ch %s is already opened\n", __func__,
|
||||
ch->ch_cfg.name);
|
||||
|
@ -492,7 +494,7 @@ static int wdsp_glink_open_all_ch(struct wdsp_glink_priv *wpriv)
|
|||
|
||||
err_open:
|
||||
for (j = 0; j < i; j++)
|
||||
if (wpriv->ch[i])
|
||||
if (wpriv->ch[j])
|
||||
wdsp_glink_close_ch(wpriv->ch[j]);
|
||||
|
||||
done:
|
||||
|
@ -631,6 +633,7 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
|
|||
goto err_ch_mem;
|
||||
}
|
||||
ch[i]->channel_state = GLINK_LOCAL_DISCONNECTED;
|
||||
ch[i]->free_mem = true;
|
||||
memcpy(&ch[i]->ch_cfg, payload, ch_cfg_size);
|
||||
payload += ch_cfg_size;
|
||||
|
||||
|
@ -654,6 +657,7 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
|
|||
INIT_WORK(&ch[i]->lcl_ch_open_wrk, wdsp_glink_lcl_ch_open_wrk);
|
||||
INIT_WORK(&ch[i]->lcl_ch_cls_wrk, wdsp_glink_lcl_ch_cls_wrk);
|
||||
init_waitqueue_head(&ch[i]->ch_connect_wait);
|
||||
init_waitqueue_head(&ch[i]->ch_free_wait);
|
||||
}
|
||||
|
||||
INIT_WORK(&wpriv->ch_open_cls_wrk, wdsp_glink_ch_open_cls_wrk);
|
||||
|
@ -1060,37 +1064,49 @@ static int wdsp_glink_release(struct inode *inode, struct file *file)
|
|||
goto done;
|
||||
}
|
||||
|
||||
dev_info(wpriv->dev, "%s: closing wdsp_glink driver\n", __func__);
|
||||
if (wpriv->glink_state.handle)
|
||||
glink_unregister_link_state_cb(wpriv->glink_state.handle);
|
||||
|
||||
flush_workqueue(wpriv->work_queue);
|
||||
destroy_workqueue(wpriv->work_queue);
|
||||
|
||||
/*
|
||||
* Clean up glink channel memory in channel state
|
||||
* callback only if close channels are called from here.
|
||||
* Wait for channel local and remote disconnect state notifications
|
||||
* before freeing channel memory.
|
||||
*/
|
||||
if (wpriv->ch) {
|
||||
for (i = 0; i < wpriv->no_of_channels; i++) {
|
||||
if (wpriv->ch[i]) {
|
||||
wpriv->ch[i]->free_mem = true;
|
||||
/*
|
||||
* Channel handle NULL means channel is already
|
||||
* closed. Free the channel memory here itself.
|
||||
*/
|
||||
if (!wpriv->ch[i]->handle) {
|
||||
kfree(wpriv->ch[i]);
|
||||
wpriv->ch[i] = NULL;
|
||||
} else {
|
||||
wdsp_glink_close_ch(wpriv->ch[i]);
|
||||
}
|
||||
for (i = 0; i < wpriv->no_of_channels; i++) {
|
||||
if (wpriv->ch && wpriv->ch[i]) {
|
||||
/*
|
||||
* Only close glink channel from here if REMOTE has
|
||||
* not already disconnected it
|
||||
*/
|
||||
wdsp_glink_close_ch(wpriv->ch[i]);
|
||||
|
||||
ret = wait_event_timeout(wpriv->ch[i]->ch_free_wait,
|
||||
(wpriv->ch[i]->free_mem == true),
|
||||
msecs_to_jiffies(TIMEOUT_MS));
|
||||
if (!ret) {
|
||||
pr_err("%s: glink ch %s failed to notify states properly %d\n",
|
||||
__func__, wpriv->ch[i]->ch_cfg.name,
|
||||
wpriv->ch[i]->channel_state);
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(wpriv->ch);
|
||||
wpriv->ch = NULL;
|
||||
}
|
||||
|
||||
flush_workqueue(wpriv->work_queue);
|
||||
destroy_workqueue(wpriv->work_queue);
|
||||
wpriv->work_queue = NULL;
|
||||
|
||||
for (i = 0; i < wpriv->no_of_channels; i++) {
|
||||
if (wpriv->ch && wpriv->ch[i]) {
|
||||
kfree(wpriv->ch[i]);
|
||||
wpriv->ch[i] = NULL;
|
||||
}
|
||||
}
|
||||
kfree(wpriv->ch);
|
||||
wpriv->ch = NULL;
|
||||
|
||||
mutex_destroy(&wpriv->glink_mutex);
|
||||
mutex_destroy(&wpriv->rsp_mutex);
|
||||
kfree(wpriv);
|
||||
|
|
|
@ -653,6 +653,10 @@ static u8 get_inactive_bank_num(struct swr_mstr_ctrl *swrm)
|
|||
static void enable_bank_switch(struct swr_mstr_ctrl *swrm, u8 bank,
|
||||
u8 row, u8 col)
|
||||
{
|
||||
/* apply div2 setting for inactive bank before bank switch */
|
||||
swrm_cmd_fifo_wr_cmd(swrm, 0x01, 0xF, 0x00,
|
||||
SWRS_SCP_HOST_CLK_DIV2_CTL_BANK(bank));
|
||||
|
||||
swrm_cmd_fifo_wr_cmd(swrm, ((row << 3) | col), 0xF, 0xF,
|
||||
SWRS_SCP_FRAME_CTRL_BANK(bank));
|
||||
}
|
||||
|
@ -891,9 +895,6 @@ static void swrm_apply_port_config(struct swr_master *master)
|
|||
__func__, bank, master->num_port);
|
||||
|
||||
|
||||
swrm_cmd_fifo_wr_cmd(swrm, 0x01, 0xF, 0x00,
|
||||
SWRS_SCP_HOST_CLK_DIV2_CTL_BANK(bank));
|
||||
|
||||
swrm_copy_data_port_config(master, bank);
|
||||
}
|
||||
|
||||
|
|
|
@ -240,6 +240,8 @@ struct dwc3_msm {
|
|||
struct delayed_work sdp_check;
|
||||
bool usb_compliance_mode;
|
||||
struct mutex suspend_resume_mutex;
|
||||
|
||||
enum usb_device_speed override_usb_speed;
|
||||
};
|
||||
|
||||
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
|
||||
|
@ -1570,8 +1572,17 @@ static void dwc3_restart_usb_work(struct work_struct *w)
|
|||
|
||||
mdwc->in_restart = false;
|
||||
/* Force reconnect only if cable is still connected */
|
||||
if (mdwc->vbus_active)
|
||||
if (mdwc->vbus_active) {
|
||||
if (mdwc->override_usb_speed) {
|
||||
dwc->maximum_speed = mdwc->override_usb_speed;
|
||||
dwc->gadget.max_speed = dwc->maximum_speed;
|
||||
dbg_event(0xFF, "override_usb_speed",
|
||||
mdwc->override_usb_speed);
|
||||
mdwc->override_usb_speed = 0;
|
||||
}
|
||||
|
||||
dwc3_resume_work(&mdwc->resume_work);
|
||||
}
|
||||
|
||||
dwc->err_evt_seen = false;
|
||||
flush_delayed_work(&mdwc->sm_work);
|
||||
|
@ -2652,6 +2663,13 @@ static int dwc3_msm_id_notifier(struct notifier_block *nb,
|
|||
if (dwc->maximum_speed > dwc->max_hw_supp_speed)
|
||||
dwc->maximum_speed = dwc->max_hw_supp_speed;
|
||||
|
||||
if (!id && mdwc->override_usb_speed) {
|
||||
dwc->maximum_speed = mdwc->override_usb_speed;
|
||||
dbg_event(0xFF, "override_usb_speed",
|
||||
mdwc->override_usb_speed);
|
||||
mdwc->override_usb_speed = 0;
|
||||
}
|
||||
|
||||
if (mdwc->id_state != id) {
|
||||
mdwc->id_state = id;
|
||||
dbg_event(0xFF, "id_state", mdwc->id_state);
|
||||
|
@ -2836,14 +2854,19 @@ static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
static DEVICE_ATTR_RW(mode);
|
||||
|
||||
/* This node only shows max speed supported dwc3 and it should be
|
||||
* same as what is reported in udc/core.c max_speed node. For current
|
||||
* operating gadget speed, query current_speed node which is implemented
|
||||
* by udc/core.c
|
||||
*/
|
||||
static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct dwc3_msm *mdwc = dev_get_drvdata(dev);
|
||||
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s",
|
||||
usb_speed_string(dwc->max_hw_supp_speed));
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
usb_speed_string(dwc->maximum_speed));
|
||||
}
|
||||
|
||||
static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
|
||||
|
@ -2853,14 +2876,25 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
|
|||
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
|
||||
enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
|
||||
|
||||
if (sysfs_streq(buf, "high"))
|
||||
/* DEVSPD can only have values SS(0x4), HS(0x0) and FS(0x1).
|
||||
* per 3.20a data book. Allow only these settings. Note that,
|
||||
* xhci does not support full-speed only mode.
|
||||
*/
|
||||
if (sysfs_streq(buf, "full"))
|
||||
req_speed = USB_SPEED_FULL;
|
||||
else if (sysfs_streq(buf, "high"))
|
||||
req_speed = USB_SPEED_HIGH;
|
||||
else if (sysfs_streq(buf, "super"))
|
||||
req_speed = USB_SPEED_SUPER;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
if (req_speed != USB_SPEED_UNKNOWN &&
|
||||
req_speed != dwc->max_hw_supp_speed) {
|
||||
dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
|
||||
/* restart usb only works for device mode. Perform manual cable
|
||||
* plug in/out for host mode restart.
|
||||
*/
|
||||
if (req_speed != dwc->maximum_speed &&
|
||||
req_speed <= dwc->max_hw_supp_speed) {
|
||||
mdwc->override_usb_speed = req_speed;
|
||||
schedule_work(&mdwc->restart_usb_work);
|
||||
}
|
||||
|
||||
|
|
|
@ -2843,16 +2843,13 @@ static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
|
|||
if (gsi->prot_id == IPA_USB_MBIM)
|
||||
mbim_gsi_ext_config_desc.function.subCompatibleID[0] = 0;
|
||||
|
||||
if (gadget_is_superspeed(c->cdev->gadget)) {
|
||||
if (gadget_is_superspeed(c->cdev->gadget))
|
||||
usb_free_descriptors(f->ss_descriptors);
|
||||
f->ss_descriptors = NULL;
|
||||
}
|
||||
if (gadget_is_dualspeed(c->cdev->gadget)) {
|
||||
|
||||
if (gadget_is_dualspeed(c->cdev->gadget))
|
||||
usb_free_descriptors(f->hs_descriptors);
|
||||
f->hs_descriptors = NULL;
|
||||
}
|
||||
|
||||
usb_free_descriptors(f->fs_descriptors);
|
||||
f->fs_descriptors = NULL;
|
||||
|
||||
if (gsi->c_port.notify) {
|
||||
kfree(gsi->c_port.notify_req->buf);
|
||||
|
|
|
@ -1114,8 +1114,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
|
|||
case PE_SRC_NEGOTIATE_CAPABILITY:
|
||||
if (PD_RDO_OBJ_POS(pd->rdo) != 1 ||
|
||||
PD_RDO_FIXED_CURR(pd->rdo) >
|
||||
PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps) ||
|
||||
PD_RDO_FIXED_CURR_MINMAX(pd->rdo) >
|
||||
PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps)) {
|
||||
/* send Reject */
|
||||
ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG);
|
||||
|
|
|
@ -2402,6 +2402,49 @@ end_update:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int mdss_dsi_dynamic_bitclk_config(struct mdss_panel_data *pdata)
|
||||
{
|
||||
int rc = 0;
|
||||
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
|
||||
struct mdss_panel_info *pinfo;
|
||||
|
||||
pr_debug("%s+:\n", __func__);
|
||||
|
||||
if (pdata == NULL) {
|
||||
pr_err("%s: Invalid input data\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
|
||||
panel_data);
|
||||
|
||||
if (!ctrl_pdata->panel_data.panel_info.dynamic_bitclk) {
|
||||
pr_err("Dynamic bitclk not enabled for this panel\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pinfo = &pdata->panel_info;
|
||||
|
||||
if (!pinfo->new_clk_rate || (pinfo->clk_rate == pinfo->new_clk_rate)) {
|
||||
pr_debug("Bit clock update is not needed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = __mdss_dsi_dynamic_clock_switch(&ctrl_pdata->panel_data,
|
||||
pinfo->new_clk_rate);
|
||||
if (!rc && mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) {
|
||||
struct mdss_dsi_ctrl_pdata *octrl =
|
||||
mdss_dsi_get_other_ctrl(ctrl_pdata);
|
||||
rc = __mdss_dsi_dynamic_clock_switch(&octrl->panel_data,
|
||||
pinfo->new_clk_rate);
|
||||
if (rc)
|
||||
pr_err("failed to switch DSI bitclk for sctrl\n");
|
||||
} else if (rc) {
|
||||
pr_err("failed to switch DSI bitclk\n");
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
|
||||
{
|
||||
int rc = 0;
|
||||
|
@ -2855,19 +2898,14 @@ static ssize_t dynamic_bitclk_sysfs_wta(struct device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = __mdss_dsi_dynamic_clock_switch(&ctrl_pdata->panel_data,
|
||||
clk_rate);
|
||||
if (!rc && mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) {
|
||||
pinfo->new_clk_rate = clk_rate;
|
||||
if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) {
|
||||
struct mdss_dsi_ctrl_pdata *octrl =
|
||||
mdss_dsi_get_other_ctrl(ctrl_pdata);
|
||||
rc = __mdss_dsi_dynamic_clock_switch(&octrl->panel_data,
|
||||
clk_rate);
|
||||
if (rc)
|
||||
pr_err("failed to switch DSI bitclk for sctrl\n");
|
||||
} else if (rc) {
|
||||
pr_err("failed to switch DSI bitclk\n");
|
||||
}
|
||||
struct mdss_panel_info *opinfo = &octrl->panel_data.panel_info;
|
||||
|
||||
opinfo->new_clk_rate = clk_rate;
|
||||
}
|
||||
return count;
|
||||
} /* dynamic_bitclk_sysfs_wta */
|
||||
|
||||
|
@ -3152,6 +3190,14 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
|
|||
case MDSS_EVENT_AVR_MODE:
|
||||
mdss_dsi_avr_config(ctrl_pdata, (int)(unsigned long) arg);
|
||||
break;
|
||||
case MDSS_EVENT_DSI_DYNAMIC_BITCLK:
|
||||
if (ctrl_pdata->panel_data.panel_info.dynamic_bitclk) {
|
||||
rc = mdss_dsi_dynamic_bitclk_config(pdata);
|
||||
if (rc)
|
||||
pr_err("unable to change bitclk error-%d\n",
|
||||
rc);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s: unhandled event=%d\n", __func__, event);
|
||||
break;
|
||||
|
|
|
@ -2659,6 +2659,13 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
|
|||
goto commit_fail;
|
||||
}
|
||||
|
||||
ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_DYNAMIC_BITCLK,
|
||||
NULL, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
|
||||
if (IS_ERR_VALUE(ret)) {
|
||||
pr_err("failed to update dynamic bit clk!\n");
|
||||
goto commit_fail;
|
||||
}
|
||||
|
||||
mutex_lock(&mdp5_data->ov_lock);
|
||||
|
||||
/* Disable secure display/camera for video mode panels */
|
||||
|
|
|
@ -321,6 +321,7 @@ enum mdss_intf_events {
|
|||
MDSS_EVENT_DSI_TIMING_DB_CTRL,
|
||||
MDSS_EVENT_AVR_MODE,
|
||||
MDSS_EVENT_REGISTER_CLAMP_HANDLER,
|
||||
MDSS_EVENT_DSI_DYNAMIC_BITCLK,
|
||||
MDSS_EVENT_MAX,
|
||||
};
|
||||
|
||||
|
@ -827,6 +828,8 @@ struct mdss_panel_info {
|
|||
bool esd_check_enabled;
|
||||
bool allow_phy_power_off;
|
||||
char dfps_update;
|
||||
/* new requested bitclk before it is updated in hw */
|
||||
int new_clk_rate;
|
||||
/* new requested fps before it is updated in hw */
|
||||
int new_fps;
|
||||
/* stores initial fps after boot */
|
||||
|
|
|
@ -454,10 +454,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
|
|||
}
|
||||
bio_set_op_attrs(bio, fio->op, fio->op_flags);
|
||||
|
||||
__submit_bio(fio->sbi, bio, fio->type);
|
||||
|
||||
if (!is_read_io(fio->op))
|
||||
inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
|
||||
|
||||
__submit_bio(fio->sbi, bio, fio->type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1457,6 +1457,7 @@ struct usb_descriptor_header **usb_copy_descriptors(
|
|||
static inline void usb_free_descriptors(struct usb_descriptor_header **v)
|
||||
{
|
||||
kfree(v);
|
||||
v = NULL;
|
||||
}
|
||||
|
||||
struct usb_function;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -101,6 +101,8 @@ extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
|
|||
|
||||
extern int scm_call2(u32 cmd_id, struct scm_desc *desc);
|
||||
|
||||
extern int scm_call2_noretry(u32 cmd_id, struct scm_desc *desc);
|
||||
|
||||
extern int scm_call2_atomic(u32 cmd_id, struct scm_desc *desc);
|
||||
|
||||
extern int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
|
||||
|
@ -150,6 +152,11 @@ static inline int scm_call2(u32 cmd_id, struct scm_desc *desc)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int scm_call2_noretry(u32 cmd_id, struct scm_desc *desc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int scm_call2_atomic(u32 cmd_id, struct scm_desc *desc)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
@ -25,9 +25,16 @@
|
|||
#define ISP_STATS_STREAM_BIT 0x80000000
|
||||
|
||||
#define VFE_HW_LIMIT 1
|
||||
#define ISP_KERNEL_STATE 1
|
||||
|
||||
struct msm_vfe_cfg_cmd_list;
|
||||
|
||||
struct isp_kstate {
|
||||
uint32_t kernel_sofid;
|
||||
uint32_t drop_reconfig;
|
||||
uint32_t vfeid;
|
||||
};
|
||||
|
||||
enum ISP_START_PIXEL_PATTERN {
|
||||
ISP_BAYER_RGRGRG,
|
||||
ISP_BAYER_GRGRGR,
|
||||
|
|
File diff suppressed because it is too large
Load diff
Loading…
Add table
Reference in a new issue