msm: ADSPRPC: Use msm_ion_do_cache_op to flush userspace buffers

Remove the use of dmac_flush_range for userspace buffers and add
msm_ion_do_cache_op for flushing user space buffers.

Change-Id: Ice73eafac840bd1cabee0a2bfc8a641832a7d0c8
Acked-by: Bharath Kumar <bkumar@qti.qualcomm.com>
Signed-off-by: Tharun Kumar Merugu <mtharu@codeaurora.org>
This commit is contained in:
Tharun Kumar Merugu 2017-08-08 18:56:03 +05:30
parent 1b6a7f7ebb
commit 0cdcf0409b

View file

@ -1254,9 +1254,18 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
if (map && (map->attr & FASTRPC_ATTR_COHERENT)) if (map && (map->attr & FASTRPC_ATTR_COHERENT))
continue; continue;
if (rpra[i].buf.len && ctx->overps[oix]->mstart) if (rpra[i].buf.len && ctx->overps[oix]->mstart) {
dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv), if (map && map->handle)
uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len)); msm_ion_do_cache_op(ctx->fl->apps->client,
map->handle,
uint64_to_ptr(rpra[i].buf.pv),
rpra[i].buf.len,
ION_IOC_CLEAN_INV_CACHES);
else
dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
uint64_to_ptr(rpra[i].buf.pv
+ rpra[i].buf.len));
}
} }
PERF_END); PERF_END);
@ -1267,11 +1276,6 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
rpra[inh + i].h = ctx->lpra[inh + i].h; rpra[inh + i].h = ctx->lpra[inh + i].h;
} }
if (!ctx->fl->sctx->smmu.coherent) {
PERF(ctx->fl->profile, ctx->fl->perf.flush,
dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
PERF_END);
}
bail: bail:
return err; return err;
} }
@ -1335,14 +1339,33 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx)
if (buf_page_start(ptr_to_uint64((void *)rpra)) == if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
buf_page_start(rpra[i].buf.pv)) buf_page_start(rpra[i].buf.pv))
continue; continue;
if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv))) if (!IS_CACHE_ALIGNED((uintptr_t)
dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv), uint64_to_ptr(rpra[i].buf.pv))) {
(char *)(uint64_to_ptr(rpra[i].buf.pv + 1))); if (map && map->handle)
msm_ion_do_cache_op(ctx->fl->apps->client,
map->handle,
uint64_to_ptr(rpra[i].buf.pv),
sizeof(uintptr_t),
ION_IOC_CLEAN_INV_CACHES);
else
dmac_flush_range(
uint64_to_ptr(rpra[i].buf.pv), (char *)
uint64_to_ptr(rpra[i].buf.pv + 1));
}
end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv + end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
rpra[i].buf.len); rpra[i].buf.len);
if (!IS_CACHE_ALIGNED(end)) if (!IS_CACHE_ALIGNED(end)) {
dmac_flush_range((char *)end, if (map && map->handle)
(char *)end + 1); msm_ion_do_cache_op(ctx->fl->apps->client,
map->handle,
uint64_to_ptr(end),
sizeof(uintptr_t),
ION_IOC_CLEAN_INV_CACHES);
else
dmac_flush_range((char *)end,
(char *)end + 1);
}
} }
} }
@ -1351,7 +1374,6 @@ static void inv_args(struct smq_invoke_ctx *ctx)
int i, inbufs, outbufs; int i, inbufs, outbufs;
uint32_t sc = ctx->sc; uint32_t sc = ctx->sc;
remote_arg64_t *rpra = ctx->rpra; remote_arg64_t *rpra = ctx->rpra;
int used = ctx->used;
int inv = 0; int inv = 0;
inbufs = REMOTE_SCALARS_INBUFS(sc); inbufs = REMOTE_SCALARS_INBUFS(sc);
@ -1384,8 +1406,6 @@ static void inv_args(struct smq_invoke_ctx *ctx)
+ rpra[i].buf.len)); + rpra[i].buf.len));
} }
if (inv || REMOTE_SCALARS_OUTHANDLES(sc))
dmac_inv_range(rpra, (char *)rpra + used);
} }
static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,