Merge tag 'LA.UM.7.4.r1-05400-8x98.0' into lineage-16.0

"LA.UM.7.4.r1-05400-8x98.0"

Change-Id: Iaa6db184c519b1a6f8de9b989ba402f156bec25c
This commit is contained in:
codeworkx 2019-08-10 14:13:49 +02:00
commit f6334e63fa
25 changed files with 2326 additions and 408 deletions

View file

@ -178,10 +178,12 @@ struct smq_invoke_ctx {
int tgid;
remote_arg_t *lpra;
remote_arg64_t *rpra;
remote_arg64_t *lrpra; /* Local copy of rpra for put_args */
int *fds;
unsigned *attrs;
struct fastrpc_mmap **maps;
struct fastrpc_buf *buf;
struct fastrpc_buf *lbuf;
size_t used;
struct fastrpc_file *fl;
uint32_t sc;
@ -1089,6 +1091,7 @@ static void context_free(struct smq_invoke_ctx *ctx)
for (i = 0; i < nbufs; ++i)
fastrpc_mmap_free(ctx->maps[i]);
fastrpc_buf_free(ctx->buf, 1);
fastrpc_buf_free(ctx->lbuf, 1);
ctx->magic = 0;
ctx->ctxid = 0;
@ -1201,7 +1204,7 @@ static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
{
remote_arg64_t *rpra;
remote_arg64_t *rpra, *lrpra;
remote_arg_t *lpra = ctx->lpra;
struct smq_invoke_buf *list;
struct smq_phy_page *pages, *ipage;
@ -1210,10 +1213,11 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
int bufs = inbufs + outbufs;
uintptr_t args;
size_t rlen = 0, copylen = 0, metalen = 0;
size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0;
int i, inh, oix;
int err = 0;
int mflags = 0;
DEFINE_DMA_ATTRS(ctx_attrs);
/* calculate size of the metadata */
rpra = NULL;
@ -1232,7 +1236,22 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
ipage += 1;
}
metalen = copylen = (size_t)&ipage[0];
/* calculate len requreed for copying */
/* allocate new local rpra buffer */
lrpralen = (size_t)&list[0];
if (lrpralen) {
err = fastrpc_buf_alloc(ctx->fl, lrpralen, ctx_attrs,
0, 0, &ctx->lbuf);
if (err)
goto bail;
}
if (ctx->lbuf->virt)
memset(ctx->lbuf->virt, 0, lrpralen);
lrpra = ctx->lbuf->virt;
ctx->lrpra = lrpra;
/* calculate len required for copying */
for (oix = 0; oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
uintptr_t mstart, mend;
@ -1258,8 +1277,6 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
/* allocate new buffer */
if (copylen) {
DEFINE_DMA_ATTRS(ctx_attrs);
err = fastrpc_buf_alloc(ctx->fl, copylen, ctx_attrs,
0, 0, &ctx->buf);
if (err)
@ -1291,13 +1308,13 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
}
/* map ion buffers */
PERF(ctx->fl->profile, ctx->fl->perf.map,
for (i = 0; i < inbufs + outbufs; ++i) {
for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
struct fastrpc_mmap *map = ctx->maps[i];
uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
size_t len = lpra[i].buf.len;
rpra[i].buf.pv = 0;
rpra[i].buf.len = len;
rpra[i].buf.pv = lrpra[i].buf.pv = 0;
rpra[i].buf.len = lrpra[i].buf.len = len;
if (!len)
continue;
if (map) {
@ -1325,14 +1342,14 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
pages[idx].addr = map->phys + offset;
pages[idx].size = num << PAGE_SHIFT;
}
rpra[i].buf.pv = buf;
rpra[i].buf.pv = lrpra[i].buf.pv = buf;
}
PERF_END);
/* copy non ion buffers */
PERF(ctx->fl->profile, ctx->fl->perf.copy,
rlen = copylen - metalen;
for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
struct fastrpc_mmap *map = ctx->maps[i];
size_t mlen;
@ -1351,7 +1368,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
VERIFY(err, rlen >= mlen);
if (err)
goto bail;
rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
rpra[i].buf.pv = lrpra[i].buf.pv =
(args - ctx->overps[oix]->offset);
pages[list[i].pgidx].addr = ctx->buf->phys -
ctx->overps[oix]->offset +
(copylen - rlen);
@ -1383,7 +1401,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
if (map && (map->attr & FASTRPC_ATTR_COHERENT))
continue;
if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
if (rpra && lrpra && rpra[i].buf.len &&
ctx->overps[oix]->mstart) {
if (map && map->handle)
msm_ion_do_cache_op(ctx->fl->apps->client,
map->handle,
@ -1399,10 +1418,12 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
PERF_END);
inh = inbufs + outbufs;
for (i = 0; rpra && i < REMOTE_SCALARS_INHANDLES(sc); i++) {
rpra[inh + i].buf.pv = ptr_to_uint64(ctx->lpra[inh + i].buf.pv);
rpra[inh + i].buf.len = ctx->lpra[inh + i].buf.len;
rpra[inh + i].h = ctx->lpra[inh + i].h;
for (i = 0; rpra && lrpra && i < REMOTE_SCALARS_INHANDLES(sc); i++) {
rpra[inh + i].buf.pv = lrpra[inh + i].buf.pv =
ptr_to_uint64(ctx->lpra[inh + i].buf.pv);
rpra[inh + i].buf.len = lrpra[inh + i].buf.len =
ctx->lpra[inh + i].buf.len;
rpra[inh + i].h = lrpra[inh + i].h = ctx->lpra[inh + i].h;
}
bail:
@ -1413,7 +1434,7 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
remote_arg_t *upra)
{
uint32_t sc = ctx->sc;
remote_arg64_t *rpra = ctx->rpra;
remote_arg64_t *rpra = ctx->lrpra;
int i, inbufs, outbufs, outh, size;
int err = 0;
@ -1502,7 +1523,7 @@ static void inv_args(struct smq_invoke_ctx *ctx)
{
int i, inbufs, outbufs;
uint32_t sc = ctx->sc;
remote_arg64_t *rpra = ctx->rpra;
remote_arg64_t *rpra = ctx->lrpra;
int inv = 0;
inbufs = REMOTE_SCALARS_INBUFS(sc);

View file

@ -643,7 +643,8 @@ static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
struct diag_build_mask_req_t *req = NULL;
struct diag_msg_build_mask_t rsp;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
if (!src_buf || !dest_buf || dest_len <= 0 ||
src_len < sizeof(struct diag_build_mask_req_t)) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
@ -704,7 +705,7 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &msg_mask : info->msg_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
if (!src_buf || !dest_buf || dest_len <= 0 ||
!mask_info || (src_len < sizeof(struct diag_build_mask_req_t))) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
@ -786,8 +787,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &msg_mask : info->msg_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
!mask_info) {
if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
(src_len < sizeof(struct diag_msg_build_mask_t))) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
@ -871,7 +872,9 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
break;
}
mask_size = mask_size * sizeof(uint32_t);
memcpy(mask->ptr + offset, src_buf + header_len, mask_size);
if (mask_size && src_len >= header_len + mask_size)
memcpy(mask->ptr + offset, src_buf + header_len,
mask_size);
mutex_unlock(&mask->lock);
mask_info->status = DIAG_CTRL_MASK_VALID;
break;
@ -928,8 +931,8 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &msg_mask : info->msg_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
!mask_info) {
if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
(src_len < sizeof(struct diag_msg_config_rsp_t))) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
@ -1048,8 +1051,8 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
mutex_lock(&driver->md_session_lock);
info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &event_mask : info->event_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
!mask_info) {
if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
src_len < sizeof(struct diag_event_mask_config_t)) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
@ -1072,7 +1075,8 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
}
mutex_lock(&mask_info->lock);
memcpy(mask_info->ptr, src_buf + header_len, mask_len);
if (src_len >= header_len + mask_len)
memcpy(mask_info->ptr, src_buf + header_len, mask_len);
mask_info->status = DIAG_CTRL_MASK_VALID;
mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->md_session_lock);
@ -1116,8 +1120,8 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
mutex_lock(&driver->md_session_lock);
info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &event_mask : info->event_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
!mask_info) {
if (!src_buf || !dest_buf || src_len <= sizeof(uint8_t) ||
dest_len <= 0 || !mask_info) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
@ -1184,8 +1188,8 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &log_mask : info->log_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
!mask_info) {
if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
src_len < sizeof(struct diag_log_config_req_t)) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
@ -1328,8 +1332,8 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &log_mask : info->log_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
!mask_info) {
if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
src_len < sizeof(struct diag_log_config_req_t)) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
@ -1403,7 +1407,7 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
mask->range_tools = mask_size;
}
req->num_items = mask->num_items_tools;
if (mask_size > 0)
if (mask_size > 0 && src_len >= read_len + mask_size)
memcpy(mask->ptr, src_buf + read_len, mask_size);
DIAG_LOG(DIAG_DEBUG_MASKS,
"copying log mask, e %d num %d range %d size %d\n",

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1187,15 +1187,19 @@ static int diag_process_userspace_remote(int proc, void *buf, int len)
}
#endif
static int mask_request_validate(unsigned char mask_buf[])
static int mask_request_validate(unsigned char mask_buf[], int len)
{
uint8_t packet_id;
uint8_t subsys_id;
uint16_t ss_cmd;
if (len <= 0)
return 0;
packet_id = mask_buf[0];
if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
if (len < 2*sizeof(uint8_t) + sizeof(uint16_t))
return 0;
subsys_id = mask_buf[1];
ss_cmd = *(uint16_t *)(mask_buf + 2);
switch (subsys_id) {
@ -1211,6 +1215,8 @@ static int mask_request_validate(unsigned char mask_buf[])
return 0;
}
} else if (packet_id == 0x4B) {
if (len < 2*sizeof(uint8_t) + sizeof(uint16_t))
return 0;
subsys_id = mask_buf[1];
ss_cmd = *(uint16_t *)(mask_buf + 2);
/* Packets with SSID which are allowed */
@ -2892,7 +2898,8 @@ static int diag_user_process_raw_data(const char __user *buf, int len)
}
/* Check for proc_type */
remote_proc = diag_get_remote(*(int *)user_space_data);
if (len >= sizeof(int))
remote_proc = diag_get_remote(*(int *)user_space_data);
if (remote_proc) {
token_offset = sizeof(int);
if (len <= MIN_SIZ_ALLOW) {
@ -2906,7 +2913,7 @@ static int diag_user_process_raw_data(const char __user *buf, int len)
}
if (driver->mask_check) {
if (!mask_request_validate(user_space_data +
token_offset)) {
token_offset, len)) {
pr_alert("diag: mask request Invalid\n");
diagmem_free(driver, user_space_data, mempool);
user_space_data = NULL;
@ -2984,7 +2991,7 @@ static int diag_user_process_userspace_data(const char __user *buf, int len)
/* Check masks for On-Device logging */
if (driver->mask_check) {
if (!mask_request_validate(driver->user_space_data_buf +
token_offset)) {
token_offset, len)) {
pr_alert("diag: mask request Invalid\n");
return -EFAULT;
}

View file

@ -588,7 +588,8 @@ static int msm_isp_composite_irq(struct vfe_device *vfe_dev,
*
* Returns void
*/
static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info)
static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info,
uint32_t drop_reconfig)
{
if (stream_info->stream_type == BURST_STREAM) {
if (stream_info->runtime_num_burst_capture == 0 ||
@ -598,7 +599,8 @@ static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info)
MSM_VFE_STREAM_STOP_PERIOD;
}
if (stream_info->undelivered_request_cnt > 0)
if (stream_info->undelivered_request_cnt > 0 &&
drop_reconfig != 1)
stream_info->current_framedrop_period =
MSM_VFE_STREAM_STOP_PERIOD;
@ -653,7 +655,8 @@ void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
break;
case MSM_ISP_COMP_IRQ_EPOCH:
if (stream_info->state == ACTIVE)
msm_isp_update_framedrop_reg(stream_info);
msm_isp_update_framedrop_reg(stream_info,
vfe_dev->isp_page->drop_reconfig);
break;
default:
WARN(1, "Invalid irq %d\n", irq);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -761,6 +761,9 @@ static int32_t msm_actuator_bivcm_move_focus(
a_ctrl->curr_step_pos, dest_step_pos, curr_lens_pos);
while (a_ctrl->curr_step_pos != dest_step_pos) {
if (a_ctrl->curr_region_index >= a_ctrl->region_size)
break;
step_boundary =
a_ctrl->region_params[a_ctrl->curr_region_index].
step_bound[dir];

View file

@ -824,10 +824,11 @@ int ipa3_qmi_ul_filter_request_send(
if (req->firewall_rules_list[i].ip_type !=
QMI_IPA_IP_TYPE_V4_V01 &&
req->firewall_rules_list[i].ip_type !=
QMI_IPA_IP_TYPE_V6_V01)
QMI_IPA_IP_TYPE_V6_V01) {
IPAWANERR("Invalid IP type %d\n",
req->firewall_rules_list[i].ip_type);
return -EINVAL;
return -EINVAL;
}
}
req_desc.max_msg_len =

View file

@ -3597,6 +3597,15 @@ int rmnet_ipa3_send_lan_client_msg(
IPAWANERR("Can't allocate memory for tether_info\n");
return -ENOMEM;
}
if (data->client_event != IPA_PER_CLIENT_STATS_CONNECT_EVENT &&
data->client_event != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) {
IPAWANERR("Wrong event given. Event:- %d\n",
data->client_event);
kfree(lan_client);
return -EINVAL;
}
data->lan_client.lanIface[IPA_RESOURCE_NAME_MAX-1] = '\0';
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
memcpy(lan_client, &data->lan_client,
sizeof(struct ipa_lan_client_msg));

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -181,6 +181,20 @@ static struct restart_notifier_block restart_notifiers[] = {
static int init_smem_remote_spinlock(void);
/**
* smem_get_toc() - Used for getting partitions TOC
*
* @return - Base address off partitions TOC
*
* Helper function to get base address of partition TOC,
* that is present in top 4K of first smem region.
*/
static struct smem_toc __iomem *smem_get_toc(void)
{
return smem_areas[0].virt_addr +
smem_areas[0].size - 4 * 1024;
}
/**
* is_probe_done() - Did the probe function successfully complete
*
@ -315,6 +329,7 @@ static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
int use_spinlocks = spinlocks_initialized && use_rspinlock;
void *ret = 0;
unsigned long flags = 0;
uint32_t e_size;
int rc;
if (!skip_init_check && !smem_initialized_check())
@ -333,7 +348,11 @@ static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
if (toc[id].allocated) {
phys_addr_t phys_base;
*size = toc[id].size;
e_size = toc[id].size;
if (e_size > smem_ram_size)
return ret;
*size = e_size;
barrier();
phys_base = toc[id].reserved & BASE_ADDR_MASK;
@ -368,12 +387,19 @@ static void *__smem_get_entry_secure(unsigned id,
bool skip_init_check,
bool use_rspinlock)
{
struct smem_partition_header *hdr;
unsigned long lflags = 0;
void *item = NULL;
struct smem_partition_allocation_header *alloc_hdr;
struct smem_partition_header *hdr;
uint32_t offset_free_uncached;
struct smem_toc __iomem *toc;
uint32_t offset_free_cached;
unsigned long lflags = 0;
uint32_t partition_size;
uint32_t partition_num;
uint32_t padding_data;
uint32_t padding_hdr;
uint32_t a_hdr_size;
uint32_t item_size;
void *item = NULL;
int rc;
SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
@ -393,9 +419,13 @@ static void *__smem_get_entry_secure(unsigned id,
return NULL;
}
toc = smem_get_toc();
if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset) {
if (use_comm_partition) {
partition_num = comm_partition.partition_num;
partition_size =
readl_relaxed(&toc->entry[partition_num].size);
hdr = smem_areas[0].virt_addr + comm_partition.offset;
} else {
return __smem_get_entry_nonsecure(id, size,
@ -403,6 +433,7 @@ static void *__smem_get_entry_secure(unsigned id,
}
} else {
partition_num = partitions[to_proc].partition_num;
partition_size = readl_relaxed(&toc->entry[partition_num].size);
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
}
if (unlikely(!spinlocks_initialized)) {
@ -433,11 +464,20 @@ static void *__smem_get_entry_secure(unsigned id,
if (flags & SMEM_ITEM_CACHED_FLAG) {
a_hdr_size = ALIGN(sizeof(*alloc_hdr),
partitions[to_proc].size_cacheline);
for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
offset_free_cached = hdr->offset_free_cached;
if (WARN_ON(offset_free_cached > partition_size))
return NULL;
for (alloc_hdr = (void *)(hdr) + partition_size - a_hdr_size;
(void *)(alloc_hdr) > (void *)(hdr) +
hdr->offset_free_cached;
offset_free_cached;
alloc_hdr = (void *)(alloc_hdr) -
alloc_hdr->size - a_hdr_size) {
item_size - a_hdr_size) {
item_size = alloc_hdr->size;
padding_data = alloc_hdr->padding_data;
if (WARN_ON(padding_data > item_size
|| item_size > partition_size))
return NULL;
if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
LOG_ERR(
"%s: SMEM corruption detected. Partition %d to %d at %p\n",
@ -450,20 +490,30 @@ static void *__smem_get_entry_secure(unsigned id,
}
if (alloc_hdr->smem_type == id) {
/* 8 byte alignment to match legacy */
*size = ALIGN(alloc_hdr->size -
alloc_hdr->padding_data, 8);
item = (void *)(alloc_hdr) - alloc_hdr->size;
*size = ALIGN(item_size - padding_data, 8);
item = (void *)(alloc_hdr) - item_size;
break;
}
}
} else {
offset_free_uncached = hdr->offset_free_uncached;
if (WARN_ON(offset_free_uncached > partition_size))
return NULL;
for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
(void *)(alloc_hdr) < (void *)(hdr) +
hdr->offset_free_uncached;
offset_free_uncached;
alloc_hdr = (void *)(alloc_hdr) +
sizeof(*alloc_hdr) +
alloc_hdr->padding_hdr +
alloc_hdr->size) {
padding_hdr +
item_size) {
padding_hdr = alloc_hdr->padding_hdr;
padding_data = alloc_hdr->padding_data;
item_size = alloc_hdr->size;
if (WARN_ON(padding_hdr > partition_size
|| item_size > partition_size
|| padding_data > item_size))
return NULL;
if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
LOG_ERR(
"%s: SMEM corruption detected. Partition %d to %d at %p\n",
@ -476,11 +526,10 @@ static void *__smem_get_entry_secure(unsigned id,
}
if (alloc_hdr->smem_type == id) {
/* 8 byte alignment to match legacy */
*size = ALIGN(alloc_hdr->size -
alloc_hdr->padding_data, 8);
*size = ALIGN(item_size - padding_data, 8);
item = (void *)(alloc_hdr) +
sizeof(*alloc_hdr) +
alloc_hdr->padding_hdr;
padding_hdr;
break;
}
}
@ -572,10 +621,17 @@ static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
void *smem_base = smem_ram_base;
struct smem_shared *shared = smem_base;
struct smem_heap_entry *toc = shared->heap_toc;
uint32_t free_offset, heap_remaining;
void *ret = NULL;
if (shared->heap_info.heap_remaining >= size_in) {
toc[id].offset = shared->heap_info.free_offset;
heap_remaining = shared->heap_info.heap_remaining;
free_offset = shared->heap_info.free_offset;
if (WARN_ON(heap_remaining > smem_ram_size
|| free_offset > smem_ram_size))
return NULL;
if (heap_remaining >= size_in) {
toc[id].offset = free_offset;
toc[id].size = size_in;
/*
* wmb() is necessary to ensure the allocation data is
@ -587,7 +643,7 @@ static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
shared->heap_info.free_offset += size_in;
shared->heap_info.heap_remaining -= size_in;
ret = smem_base + toc[id].offset;
ret = smem_base + free_offset;
/*
* wmb() is necessary to ensure the heap data is consistent
* before continuing to prevent race conditions with remote
@ -623,11 +679,15 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
void *smem_base = smem_ram_base;
struct smem_partition_header *hdr;
struct smem_partition_allocation_header *alloc_hdr;
uint32_t offset_free_uncached;
struct smem_toc __iomem *toc;
uint32_t offset_free_cached;
uint32_t partition_size;
uint32_t partition_num;
uint32_t a_hdr_size;
uint32_t a_data_size;
uint32_t size_cacheline;
uint32_t free_space;
uint32_t partition_num;
void *ret = NULL;
if (to_proc == SMEM_COMM_HOST) {
@ -654,27 +714,35 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
BUG();
}
free_space = hdr->offset_free_cached -
hdr->offset_free_uncached;
toc = smem_get_toc();
partition_size = readl_relaxed(&toc->entry[partition_num].size);
offset_free_cached = hdr->offset_free_cached;
offset_free_uncached = hdr->offset_free_uncached;
if (WARN_ON(offset_free_uncached > offset_free_cached
|| offset_free_cached > partition_size))
return NULL;
free_space = offset_free_cached - offset_free_uncached;
if (flags & SMEM_ITEM_CACHED_FLAG) {
a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
a_data_size = ALIGN(size_in, size_cacheline);
if (free_space < a_hdr_size + a_data_size) {
if (free_space < a_hdr_size + a_data_size
|| free_space < size_in) {
SMEM_INFO(
"%s: id %u not enough memory %u (required %u)\n",
__func__, id, free_space,
a_hdr_size + a_data_size);
"%s: id %u not enough memory %u (required %u), (size_in %u)\n",
__func__, id, free_space,
a_hdr_size + a_data_size, size_in);
return ret;
}
alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
a_hdr_size;
alloc_hdr = (void *)(hdr) + offset_free_cached - a_hdr_size;
alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
alloc_hdr->smem_type = id;
alloc_hdr->size = a_data_size;
alloc_hdr->padding_data = a_data_size - size_in;
alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
hdr->offset_free_cached = hdr->offset_free_cached -
hdr->offset_free_cached = offset_free_cached -
a_hdr_size - a_data_size;
ret = (void *)(alloc_hdr) - a_data_size;
/*
@ -689,20 +757,21 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
} else {
a_hdr_size = sizeof(*alloc_hdr);
a_data_size = ALIGN(size_in, 8);
if (free_space < a_hdr_size + a_data_size) {
if (free_space < a_hdr_size + a_data_size
|| free_space < size_in) {
SMEM_INFO(
"%s: id %u not enough memory %u (required %u)\n",
__func__, id, free_space,
a_hdr_size + a_data_size);
"%s: id %u not enough memory %u (required %u) (size_in %u)\n",
__func__, id, free_space,
a_hdr_size + a_data_size, size_in);
return ret;
}
alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
alloc_hdr = (void *)(hdr) + offset_free_uncached;
alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
alloc_hdr->smem_type = id;
alloc_hdr->size = a_data_size;
alloc_hdr->padding_data = a_data_size - size_in;
alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
hdr->offset_free_uncached = hdr->offset_free_uncached +
hdr->offset_free_uncached = offset_free_uncached +
a_hdr_size + a_data_size;
ret = alloc_hdr + 1;
}
@ -894,6 +963,12 @@ unsigned smem_get_free_space(unsigned to_proc)
{
struct smem_partition_header *hdr;
struct smem_shared *shared;
uint32_t offset_free_uncached;
struct smem_toc __iomem *toc;
uint32_t offset_free_cached;
uint32_t heap_remaining;
uint32_t p_size;
uint32_t p_num;
if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
@ -908,11 +983,24 @@ unsigned smem_get_free_space(unsigned to_proc)
return UINT_MAX;
}
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
return hdr->offset_free_cached - hdr->offset_free_uncached;
} else {
shared = smem_ram_base;
return shared->heap_info.heap_remaining;
offset_free_cached = hdr->offset_free_cached;
offset_free_uncached = hdr->offset_free_uncached;
toc = smem_get_toc();
p_num = partitions[to_proc].partition_num;
p_size = readl_relaxed(&toc->entry[p_num].size);
if (WARN_ON(offset_free_uncached > offset_free_cached
|| offset_free_cached > p_size))
return -EINVAL;
return offset_free_cached - offset_free_uncached;
}
shared = smem_ram_base;
heap_remaining = shared->heap_info.heap_remaining;
if (WARN_ON(heap_remaining > smem_ram_size))
return -EINVAL;
return heap_remaining;
}
EXPORT_SYMBOL(smem_get_free_space);
@ -1217,8 +1305,8 @@ static void smem_init_security_partition(struct smem_toc_entry *entry,
LOG_ERR("Smem partition %d hdr magic is bad\n", num);
BUG();
}
if (!hdr->size) {
LOG_ERR("Smem partition %d size is 0\n", num);
if (hdr->size != entry->size) {
LOG_ERR("Smem partition %d size is invalid\n", num);
BUG();
}
if (hdr->offset_free_uncached > hdr->size) {

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2017,2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -302,7 +302,7 @@ static int marshal_in(const struct smcinvoke_cmd_req *req,
const union smcinvoke_arg *args_buf, uint32_t tzhandle,
uint8_t *buf, size_t buf_size, struct file **arr_filp)
{
int ret = -EINVAL, i = 0;
int ret = -EINVAL, i = 0, j = 0;
union smcinvoke_tz_args *tz_args = NULL;
struct smcinvoke_msg_hdr msg_hdr = {tzhandle, req->op, req->counts};
uint32_t offset = sizeof(struct smcinvoke_msg_hdr) +
@ -347,7 +347,7 @@ static int marshal_in(const struct smcinvoke_cmd_req *req,
}
FOR_ARGS(i, req->counts, OI) {
if (get_tzhandle_from_fd(args_buf[i].o.fd,
&arr_filp[i], &(tz_args->tzhandle)))
&arr_filp[j++], &(tz_args->tzhandle)))
goto out;
tz_args++;
}

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2015, Sony Mobile Communications AB.
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2013, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -228,7 +228,7 @@ struct smem_region {
* struct qcom_smem - device data for the smem device
* @dev: device pointer
* @hwlock: reference to a hwspinlock
* @partitions: list of pointers to partitions affecting the current
* @ptable_entries: list of pointers to partitions table entry of current
* processor/host
* @num_regions: number of @regions
* @regions: list of the memory regions defining the shared memory
@ -238,12 +238,24 @@ struct qcom_smem {
struct hwspinlock *hwlock;
struct smem_partition_header *partitions[SMEM_HOST_COUNT];
struct smem_ptable_entry *ptable_entries[SMEM_HOST_COUNT];
unsigned num_regions;
struct smem_region regions[0];
};
/* Pointer to the one and only smem handle */
static struct qcom_smem *__smem;
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
static struct smem_partition_header *
ptable_entry_to_phdr(struct smem_ptable_entry *entry)
{
return __smem->regions[0].virt_base + le32_to_cpu(entry->offset);
}
static struct smem_private_entry *
phdr_to_last_private_entry(struct smem_partition_header *phdr)
{
@ -283,32 +295,33 @@ static void *entry_to_item(struct smem_private_entry *e)
return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
}
/* Pointer to the one and only smem handle */
static struct qcom_smem *__smem;
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
static int qcom_smem_alloc_private(struct qcom_smem *smem,
unsigned host,
struct smem_ptable_entry *entry,
unsigned item,
size_t size)
{
struct smem_partition_header *phdr;
struct smem_private_entry *hdr, *end;
struct smem_partition_header *phdr;
size_t alloc_size;
void *cached;
void *p_end;
phdr = ptable_entry_to_phdr(entry);
p_end = (void *)phdr + le32_to_cpu(entry->size);
phdr = smem->partitions[host];
hdr = phdr_to_first_private_entry(phdr);
end = phdr_to_last_private_entry(phdr);
cached = phdr_to_first_cached_entry(phdr);
if (WARN_ON((void *)end > p_end || (void *)cached > p_end))
return -EINVAL;
while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
"Found invalid canary in host %d partition\n",
host);
"Found invalid canary in host %d:%d partition\n",
phdr->host0, phdr->host1);
return -EINVAL;
}
@ -317,6 +330,8 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
hdr = private_entry_next(hdr);
}
if (WARN_ON((void *)hdr > p_end))
return -EINVAL;
/* Check that we don't grow into the cached region */
alloc_size = sizeof(*hdr) + ALIGN(size, 8);
@ -389,6 +404,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*/
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
struct smem_ptable_entry *entry;
unsigned long flags;
int ret;
@ -407,10 +423,12 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
if (ret)
return ret;
if (host < SMEM_HOST_COUNT && __smem->partitions[host])
ret = qcom_smem_alloc_private(__smem, host, item, size);
else
if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
entry = __smem->ptable_entries[host];
ret = qcom_smem_alloc_private(__smem, entry, item, size);
} else {
ret = qcom_smem_alloc_global(__smem, item, size);
}
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
@ -422,9 +440,11 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
unsigned item,
size_t *size)
{
struct smem_global_entry *entry;
struct smem_header *header;
struct smem_region *area;
struct smem_global_entry *entry;
u64 entry_offset;
u32 e_size;
u32 aux_base;
unsigned i;
@ -442,9 +462,16 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
area = &smem->regions[i];
if (area->aux_base == aux_base || !aux_base) {
e_size = le32_to_cpu(entry->size);
entry_offset = le32_to_cpu(entry->offset);
if (WARN_ON(e_size + entry_offset > area->size))
return ERR_PTR(-EINVAL);
if (size != NULL)
*size = le32_to_cpu(entry->size);
return area->virt_base + le32_to_cpu(entry->offset);
*size = e_size;
return area->virt_base + entry_offset;
}
}
@ -452,35 +479,58 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
}
static void *qcom_smem_get_private(struct qcom_smem *smem,
unsigned host,
struct smem_ptable_entry *entry,
unsigned item,
size_t *size)
{
struct smem_partition_header *phdr;
struct smem_private_entry *e, *end;
void *item_ptr, *p_end;
u32 partition_size;
u32 padding_data;
u32 e_size;
phdr = ptable_entry_to_phdr(entry);
partition_size = le32_to_cpu(entry->size);
p_end = (void *)phdr + partition_size;
phdr = smem->partitions[host];
e = phdr_to_first_private_entry(phdr);
end = phdr_to_last_private_entry(phdr);
if (WARN_ON((void *)end > p_end))
return ERR_PTR(-EINVAL);
while (e < end) {
if (e->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
"Found invalid canary in host %d partition\n",
host);
"Found invalid canary in host %d:%d partition\n",
phdr->host0, phdr->host1);
return ERR_PTR(-EINVAL);
}
if (le16_to_cpu(e->item) == item) {
if (size != NULL)
*size = le32_to_cpu(e->size) -
le16_to_cpu(e->padding_data);
if (size != NULL) {
e_size = le32_to_cpu(e->size);
padding_data = le16_to_cpu(e->padding_data);
return entry_to_item(e);
if (e_size < partition_size
&& padding_data < e_size)
*size = e_size - padding_data;
else
return ERR_PTR(-EINVAL);
}
item_ptr = entry_to_item(e);
if (WARN_ON(item_ptr > p_end))
return ERR_PTR(-EINVAL);
return item_ptr;
}
e = private_entry_next(e);
}
if (WARN_ON((void *)e > p_end))
return ERR_PTR(-EINVAL);
return ERR_PTR(-ENOENT);
}
@ -496,6 +546,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
*/
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
struct smem_ptable_entry *entry;
unsigned long flags;
int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);
@ -509,11 +560,12 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
if (ret)
return ERR_PTR(ret);
if (host < SMEM_HOST_COUNT && __smem->partitions[host])
ptr = qcom_smem_get_private(__smem, host, item, size);
else
if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
entry = __smem->ptable_entries[host];
ptr = qcom_smem_get_private(__smem, entry, item, size);
} else {
ptr = qcom_smem_get_global(__smem, item, size);
}
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
return ptr;
@ -531,19 +583,28 @@ EXPORT_SYMBOL(qcom_smem_get);
int qcom_smem_get_free_space(unsigned host)
{
struct smem_partition_header *phdr;
struct smem_ptable_entry *entry;
struct smem_header *header;
unsigned ret;
if (!__smem)
return -EPROBE_DEFER;
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
phdr = __smem->partitions[host];
if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
entry = __smem->ptable_entries[host];
phdr = ptable_entry_to_phdr(entry);
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
if (ret > le32_to_cpu(entry->size))
return -EINVAL;
} else {
header = __smem->regions[0].virt_base;
ret = le32_to_cpu(header->available);
if (ret > __smem->regions[0].size)
return -EINVAL;
}
return ret;
@ -616,7 +677,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL;
}
if (smem->partitions[remote_host]) {
if (smem->ptable_entries[remote_host]) {
dev_err(smem->dev,
"Already found a partition for host %d\n",
remote_host);
@ -658,7 +719,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL;
}
smem->partitions[remote_host] = header;
smem->ptable_entries[remote_host] = entry;
}
return 0;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -131,6 +131,34 @@ struct dwc3_msm_req_complete {
struct usb_request *req);
};
enum dwc3_drd_state {
DRD_STATE_UNDEFINED = 0,
DRD_STATE_IDLE,
DRD_STATE_PERIPHERAL,
DRD_STATE_PERIPHERAL_SUSPEND,
DRD_STATE_HOST_IDLE,
DRD_STATE_HOST,
};
static const char *const state_names[] = {
[DRD_STATE_UNDEFINED] = "undefined",
[DRD_STATE_IDLE] = "idle",
[DRD_STATE_PERIPHERAL] = "peripheral",
[DRD_STATE_PERIPHERAL_SUSPEND] = "peripheral_suspend",
[DRD_STATE_HOST_IDLE] = "host_idle",
[DRD_STATE_HOST] = "host",
};
static const char *dwc3_drd_state_string(enum dwc3_drd_state state)
{
if (state < 0 || state >= ARRAY_SIZE(state_names))
return "UNKNOWN";
return state_names[state];
}
enum dwc3_id_state {
DWC3_ID_GROUND = 0,
DWC3_ID_FLOAT,
@ -148,6 +176,7 @@ enum plug_orientation {
#define ID 0
#define B_SESS_VLD 1
#define B_SUSPEND 2
#define WAIT_FOR_LPM 3
#define PM_QOS_SAMPLE_SEC 2
#define PM_QOS_THRESHOLD 400
@ -194,7 +223,7 @@ struct dwc3_msm {
unsigned long inputs;
unsigned max_power;
bool charging_disabled;
enum usb_otg_state otg_state;
enum dwc3_drd_state drd_state;
enum usb_chg_state chg_state;
struct work_struct bus_vote_w;
unsigned int bus_vote;
@ -1929,7 +1958,7 @@ static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
atomic_set(&mdwc->in_p3, val == DWC3_LINK_STATE_U3);
dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
if (mdwc->otg_state == OTG_STATE_A_HOST) {
if (mdwc->drd_state == DRD_STATE_HOST) {
dev_dbg(mdwc->dev, "%s: set the core in host mode\n",
__func__);
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
@ -2064,7 +2093,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool hibernation)
}
if (!mdwc->vbus_active && dwc->is_drd &&
mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
mdwc->drd_state == DRD_STATE_PERIPHERAL) {
/*
* In some cases, the pm_runtime_suspend may be called by
* usb_bam when there is pending lpm flag. However, if this is
@ -2086,7 +2115,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool hibernation)
* then check controller state of L2 and break
* LPM sequence. Check this for device bus suspend case.
*/
if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
if ((dwc->is_drd && mdwc->drd_state == DRD_STATE_PERIPHERAL_SUSPEND) &&
(dwc->gadget.state != USB_STATE_CONFIGURED)) {
pr_err("%s(): Trying to go in LPM with state:%d\n",
__func__, dwc->gadget.state);
@ -2199,7 +2228,13 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool hibernation)
}
dev_info(mdwc->dev, "DWC3 in low power mode\n");
/* kick_sm if it is waiting for lpm sequence to finish */
if (test_and_clear_bit(WAIT_FOR_LPM, &mdwc->inputs))
schedule_delayed_work(&mdwc->sm_work, 0);
mutex_unlock(&mdwc->suspend_resume_mutex);
return 0;
}
@ -3710,6 +3745,9 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
if (!mdwc->host_only_mode)
dwc3_post_host_reset_core_init(dwc);
/* wait for LPM, to ensure h/w is reset after stop_host */
set_bit(WAIT_FOR_LPM, &mdwc->inputs);
pm_runtime_mark_last_busy(mdwc->dev);
pm_runtime_put_sync_autosuspend(mdwc->dev);
dbg_event(0xFF, "StopHost psync",
@ -3790,6 +3828,9 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
dwc3_override_vbus_status(mdwc, false);
dwc3_usb3_phy_suspend(dwc, false);
/* wait for LPM, to ensure h/w is reset after stop_peripheral */
set_bit(WAIT_FOR_LPM, &mdwc->inputs);
}
pm_runtime_put_sync(mdwc->dev);
@ -3859,7 +3900,7 @@ set_prop:
*
* @w: Pointer to the dwc3 otg workqueue
*
* NOTE: After any change in otg_state, we must reschdule the state machine.
* NOTE: After any change in drd_state, we must reschdule the state machine.
*/
static void dwc3_otg_sm_work(struct work_struct *w)
{
@ -3878,13 +3919,13 @@ static void dwc3_otg_sm_work(struct work_struct *w)
return;
}
state = usb_otg_state_string(mdwc->otg_state);
state = dwc3_drd_state_string(mdwc->drd_state);
dev_dbg(mdwc->dev, "%s state\n", state);
dbg_event(0xFF, state, 0);
/* Check OTG state */
switch (mdwc->otg_state) {
case OTG_STATE_UNDEFINED:
switch (mdwc->drd_state) {
case DRD_STATE_UNDEFINED:
/* put controller and phy in suspend if no cable connected */
if (test_bit(ID, &mdwc->inputs) &&
!test_bit(B_SESS_VLD, &mdwc->inputs)) {
@ -3896,19 +3937,24 @@ static void dwc3_otg_sm_work(struct work_struct *w)
pm_runtime_put_sync(mdwc->dev);
dbg_event(0xFF, "Undef NoUSB",
atomic_read(&mdwc->dev->power.usage_count));
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->drd_state = DRD_STATE_IDLE;
break;
}
dbg_event(0xFF, "Exit UNDEF", 0);
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->drd_state = DRD_STATE_IDLE;
pm_runtime_set_suspended(mdwc->dev);
pm_runtime_enable(mdwc->dev);
/* fall-through */
case OTG_STATE_B_IDLE:
case DRD_STATE_IDLE:
if (test_bit(WAIT_FOR_LPM, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "still not in lpm, wait.\n");
break;
}
if (!test_bit(ID, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "!id\n");
mdwc->otg_state = OTG_STATE_A_IDLE;
mdwc->drd_state = DRD_STATE_HOST_IDLE;
work = 1;
} else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "b_sess_vld\n");
@ -3918,14 +3964,14 @@ static void dwc3_otg_sm_work(struct work_struct *w)
msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
/*
* Increment pm usage count upon cable connect. Count
* is decremented in OTG_STATE_B_PERIPHERAL state on
* is decremented in DRD_STATE_PERIPHERAL state on
* cable disconnect or in bus suspend.
*/
pm_runtime_get_sync(mdwc->dev);
dbg_event(0xFF, "BIDLE gsync",
atomic_read(&mdwc->dev->power.usage_count));
dwc3_otg_start_peripheral(mdwc, 1);
mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
mdwc->drd_state = DRD_STATE_PERIPHERAL;
work = 1;
break;
} else {
@ -3935,17 +3981,17 @@ static void dwc3_otg_sm_work(struct work_struct *w)
}
break;
case OTG_STATE_B_PERIPHERAL:
case DRD_STATE_PERIPHERAL:
if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
!test_bit(ID, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "!id || !bsv\n");
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->drd_state = DRD_STATE_IDLE;
cancel_delayed_work_sync(&mdwc->sdp_check);
dwc3_otg_start_peripheral(mdwc, 0);
/*
* Decrement pm usage count upon cable disconnect
* which was incremented upon cable connect in
* OTG_STATE_B_IDLE state
* DRD_STATE_IDLE state
*/
pm_runtime_put_sync(mdwc->dev);
dbg_event(0xFF, "!BSV psync",
@ -3954,13 +4000,13 @@ static void dwc3_otg_sm_work(struct work_struct *w)
} else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
test_bit(B_SESS_VLD, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "BPER bsv && susp\n");
mdwc->otg_state = OTG_STATE_B_SUSPEND;
mdwc->drd_state = DRD_STATE_PERIPHERAL_SUSPEND;
/*
* Decrement pm usage count upon bus suspend.
* Count was incremented either upon cable
* connect in OTG_STATE_B_IDLE or host
* connect in DRD_STATE_IDLE or host
* initiated resume after bus suspend in
* OTG_STATE_B_SUSPEND state
* DRD_STATE_PERIPHERAL_SUSPEND state
*/
pm_runtime_mark_last_busy(mdwc->dev);
pm_runtime_put_autosuspend(mdwc->dev);
@ -3969,20 +4015,20 @@ static void dwc3_otg_sm_work(struct work_struct *w)
}
break;
case OTG_STATE_B_SUSPEND:
case DRD_STATE_PERIPHERAL_SUSPEND:
if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->drd_state = DRD_STATE_IDLE;
cancel_delayed_work_sync(&mdwc->sdp_check);
dwc3_otg_start_peripheral(mdwc, 0);
} else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "BSUSP !susp\n");
mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
mdwc->drd_state = DRD_STATE_PERIPHERAL;
/*
* Increment pm usage count upon host
* initiated resume. Count was decremented
* upon bus suspend in
* OTG_STATE_B_PERIPHERAL state.
* DRD_STATE_PERIPHERAL state.
*/
pm_runtime_get_sync(mdwc->dev);
dbg_event(0xFF, "!SUSP gsync",
@ -3990,15 +4036,15 @@ static void dwc3_otg_sm_work(struct work_struct *w)
}
break;
case OTG_STATE_A_IDLE:
case DRD_STATE_HOST_IDLE:
/* Switch to A-Device*/
if (test_bit(ID, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "id\n");
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->drd_state = DRD_STATE_IDLE;
mdwc->vbus_retry_count = 0;
work = 1;
} else {
mdwc->otg_state = OTG_STATE_A_HOST;
mdwc->drd_state = DRD_STATE_HOST;
ret = dwc3_otg_start_host(mdwc, 1);
if ((ret == -EPROBE_DEFER) &&
mdwc->vbus_retry_count < 3) {
@ -4006,14 +4052,14 @@ static void dwc3_otg_sm_work(struct work_struct *w)
* Get regulator failed as regulator driver is
* not up yet. Will try to start host after 1sec
*/
mdwc->otg_state = OTG_STATE_A_IDLE;
mdwc->drd_state = DRD_STATE_HOST_IDLE;
dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
delay = VBUS_REG_CHECK_DELAY;
work = 1;
mdwc->vbus_retry_count++;
} else if (ret) {
dev_err(mdwc->dev, "unable to start host\n");
mdwc->otg_state = OTG_STATE_A_IDLE;
mdwc->drd_state = DRD_STATE_HOST_IDLE;
goto ret;
}
if (mdwc->no_wakeup_src_in_hostmode) {
@ -4023,12 +4069,12 @@ static void dwc3_otg_sm_work(struct work_struct *w)
}
break;
case OTG_STATE_A_HOST:
case DRD_STATE_HOST:
if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
dbg_event(0xFF, "id || hc_died", 0);
dev_dbg(mdwc->dev, "%s state id || hc_died\n", state);
dwc3_otg_start_host(mdwc, 0);
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->drd_state = DRD_STATE_IDLE;
mdwc->vbus_retry_count = 0;
mdwc->hc_died = false;
work = 1;
@ -4175,7 +4221,7 @@ static int dwc3_msm_pm_restore(struct device *dev)
pm_runtime_enable(dev);
/* Restore PHY flags if hibernated in host mode */
if (mdwc->otg_state == OTG_STATE_A_HOST) {
if (mdwc->drd_state == DRD_STATE_HOST) {
mdwc->hs_phy->flags |= PHY_HOST_MODE;
if (mdwc->ss_phy) {
mdwc->ss_phy->flags |= PHY_HOST_MODE;

View file

@ -2290,6 +2290,7 @@ int hdmi_edid_parser(void *input)
u16 ieee_reg_id;
int status = 0;
u32 i = 0;
u32 cea_idx = 1;
struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
if (!edid_ctrl) {
@ -2316,7 +2317,7 @@ int hdmi_edid_parser(void *input)
/* EDID_CEA_EXTENSION_FLAG[0x7E] - CEC extension byte */
num_of_cea_blocks = edid_buf[EDID_BLOCK_SIZE - 2];
DEV_DBG("%s: No. of CEA blocks is [%u]\n", __func__,
DEV_DBG("%s: No. of CEA/Extended EDID blocks is [%u]\n", __func__,
num_of_cea_blocks);
/* Find out any CEA extension blocks following block 0 */
@ -2335,30 +2336,40 @@ int hdmi_edid_parser(void *input)
num_of_cea_blocks = MAX_EDID_BLOCKS - 1;
}
/* check for valid CEA block */
if (edid_buf[EDID_BLOCK_SIZE] != 2) {
DEV_ERR("%s: Invalid CEA block\n", __func__);
num_of_cea_blocks = 0;
goto bail;
if (edid_buf[EDID_BLOCK_SIZE] == 0xF0) {
DEV_DBG("%s: Extended EDID Block Map found\n", __func__);
edid_buf += EDID_BLOCK_SIZE;
cea_idx++;
}
/* goto to CEA extension edid block */
edid_buf += EDID_BLOCK_SIZE;
for (i = cea_idx; i <= num_of_cea_blocks; i++) {
ieee_reg_id = hdmi_edid_extract_ieee_reg_id(edid_ctrl, edid_buf);
DEV_DBG("%s: ieee_reg_id = 0x%08x\n", __func__, ieee_reg_id);
if (ieee_reg_id == EDID_IEEE_REG_ID)
edid_ctrl->sink_mode = SINK_MODE_HDMI;
else
edid_ctrl->sink_mode = SINK_MODE_DVI;
/* check for valid CEA block */
if (edid_buf[EDID_BLOCK_SIZE] != 2) {
DEV_ERR("%s: Not a CEA block\n", __func__);
edid_buf += EDID_BLOCK_SIZE;
continue;
}
hdmi_edid_extract_sink_caps(edid_ctrl, edid_buf);
hdmi_edid_extract_latency_fields(edid_ctrl, edid_buf);
hdmi_edid_extract_dc(edid_ctrl, edid_buf);
hdmi_edid_extract_speaker_allocation_data(edid_ctrl, edid_buf);
hdmi_edid_extract_audio_data_blocks(edid_ctrl, edid_buf);
hdmi_edid_extract_3d_present(edid_ctrl, edid_buf);
hdmi_edid_extract_extended_data_blocks(edid_ctrl, edid_buf);
/* goto to CEA extension edid block */
edid_buf += EDID_BLOCK_SIZE;
ieee_reg_id = hdmi_edid_extract_ieee_reg_id(edid_ctrl,
edid_buf);
DEV_DBG("%s: ieee_reg_id = 0x%08x\n", __func__, ieee_reg_id);
if (ieee_reg_id == EDID_IEEE_REG_ID)
edid_ctrl->sink_mode = SINK_MODE_HDMI;
else
edid_ctrl->sink_mode = SINK_MODE_DVI;
hdmi_edid_extract_sink_caps(edid_ctrl, edid_buf);
hdmi_edid_extract_latency_fields(edid_ctrl, edid_buf);
hdmi_edid_extract_dc(edid_ctrl, edid_buf);
hdmi_edid_extract_speaker_allocation_data(edid_ctrl, edid_buf);
hdmi_edid_extract_audio_data_blocks(edid_ctrl, edid_buf);
hdmi_edid_extract_3d_present(edid_ctrl, edid_buf);
hdmi_edid_extract_extended_data_blocks(edid_ctrl, edid_buf);
}
bail:
for (i = 1; i <= num_of_cea_blocks; i++) {

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2010-2017, 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -790,11 +790,13 @@ static void hdmi_ddc_trigger(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
if (mode == TRIGGER_READ && seg) {
DSS_REG_W_ND(io, HDMI_DDC_DATA, BIT(31) | (seg_addr << 8));
DSS_REG_W_ND(io, HDMI_DDC_DATA, seg_num << 8);
DSS_REG_W_ND(io, HDMI_DDC_DATA, (ddc_data->dev_addr << 8));
} else {
/* handle portion #1 */
DSS_REG_W_ND(io, HDMI_DDC_DATA,
BIT(31) | (ddc_data->dev_addr << 8));
}
/* handle portion #1 */
DSS_REG_W_ND(io, HDMI_DDC_DATA, BIT(31) | (ddc_data->dev_addr << 8));
/* handle portion #2 */
DSS_REG_W_ND(io, HDMI_DDC_DATA, ddc_data->offset << 8);

View file

@ -3710,6 +3710,9 @@ struct cfg80211_cached_keys;
* @conn: (private) cfg80211 software SME connection state machine data
* @connect_keys: (private) keys to set after connection is established
* @conn_bss_type: connecting/connected BSS type
* @conn_owner_nlportid: (private) connection owner socket port ID
* @disconnect_wk: (private) auto-disconnect work
* @disconnect_bssid: (private) the BSSID to use for auto-disconnect
* @ibss_fixed: (private) IBSS is using fixed BSSID
* @ibss_dfs_possible: (private) IBSS may change to a DFS channel
* @event_list: (private) list for internal event processing
@ -3741,6 +3744,10 @@ struct wireless_dev {
struct cfg80211_conn *conn;
struct cfg80211_cached_keys *connect_keys;
enum ieee80211_bss_type conn_bss_type;
u32 conn_owner_nlportid;
struct work_struct disconnect_wk;
u8 disconnect_bssid[ETH_ALEN];
struct list_head event_list;
spinlock_t event_lock;

View file

@ -1833,6 +1833,8 @@ enum nl80211_commands {
* regulatory indoor configuration would be owned by the netlink socket
* that configured the indoor setting, and the indoor operation would be
* cleared when the socket is closed.
* If set during %NL80211_CMD_ASSOCIATE or %NL80211_CMD_CONNECT the
* station will deauthenticate when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.

View file

@ -1081,6 +1081,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
dev->priv_flags |= IFF_DONT_BRIDGE;
INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
break;
case NETDEV_GOING_DOWN:
cfg80211_leave(rdev, wdev);
@ -1166,6 +1167,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
#ifdef CONFIG_CFG80211_WEXT
kzfree(wdev->wext.keys);
#endif
flush_work(&wdev->disconnect_wk);
}
/*
* synchronise (so that we won't find this netdev

View file

@ -378,6 +378,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
const u8 *resp_ie, size_t resp_ie_len);
int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
void cfg80211_autodisconnect_wk(struct work_struct *work);
/* SME implementation */
void cfg80211_conn_work(struct work_struct *work);

View file

@ -350,6 +350,11 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
!ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
return 0;
if (ether_addr_equal(wdev->disconnect_bssid, bssid) ||
(wdev->current_bss &&
ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
wdev->conn_owner_nlportid = 0;
return rdev_deauth(rdev, dev, &req);
}

View file

@ -7833,8 +7833,17 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
if (!err) {
wdev_lock(dev->ieee80211_ptr);
err = cfg80211_mlme_assoc(rdev, dev, chan, bssid,
ssid, ssid_len, &req);
if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
dev->ieee80211_ptr->conn_owner_nlportid =
info->snd_portid;
memcpy(dev->ieee80211_ptr->disconnect_bssid,
bssid, ETH_ALEN);
}
wdev_unlock(dev->ieee80211_ptr);
}
@ -8580,9 +8589,21 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
wdev_lock(dev->ieee80211_ptr);
err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
wdev_unlock(dev->ieee80211_ptr);
if (err)
kzfree(connkeys);
if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid;
if (connect.bssid)
memcpy(dev->ieee80211_ptr->disconnect_bssid,
connect.bssid, ETH_ALEN);
else
memset(dev->ieee80211_ptr->disconnect_bssid,
0, ETH_ALEN);
}
wdev_unlock(dev->ieee80211_ptr);
return err;
}
@ -13754,6 +13775,8 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
if (wdev->owner_nlportid == notify->portid)
schedule_destroy_work = true;
else if (wdev->conn_owner_nlportid == notify->portid)
schedule_work(&wdev->disconnect_wk);
}
spin_lock_bh(&rdev->beacon_registrations_lock);

View file

@ -757,6 +757,7 @@ void __cfg80211_connect_result(struct net_device *dev,
kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
wdev->ssid_len = 0;
wdev->conn_owner_nlportid = 0;
if (cr->bss) {
cfg80211_unhold_bss(bss_from_pub(cr->bss));
cfg80211_put_bss(wdev->wiphy, cr->bss);
@ -1017,6 +1018,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
wdev->current_bss = NULL;
wdev->ssid_len = 0;
wdev->conn_owner_nlportid = 0;
nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
@ -1148,6 +1150,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
wdev->conn_owner_nlportid = 0;
if (wdev->conn)
err = cfg80211_sme_disconnect(wdev, reason);
else if (!rdev->ops->disconnect)
@ -1157,3 +1161,32 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
return err;
}
/*
* Used to clean up after the connection / connection attempt owner socket
* disconnects
*/
void cfg80211_autodisconnect_wk(struct work_struct *work)
{
struct wireless_dev *wdev =
container_of(work, struct wireless_dev, disconnect_wk);
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
wdev_lock(wdev);
if (wdev->conn_owner_nlportid) {
/*
* Use disconnect_bssid if still connecting and ops->disconnect
* not implemented. Otherwise we can use cfg80211_disconnect.
*/
if (rdev->ops->disconnect || wdev->current_bss)
cfg80211_disconnect(rdev, wdev->netdev,
WLAN_REASON_DEAUTH_LEAVING, true);
else
cfg80211_mlme_deauth(rdev, wdev->netdev,
wdev->disconnect_bssid, NULL, 0,
WLAN_REASON_DEAUTH_LEAVING, false);
}
wdev_unlock(wdev);
}

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2017, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -11,6 +11,7 @@
*/
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <sound/apr_audio-v2.h>
#include <sound/q6asm-v2.h>
#include <sound/compress_params.h>
@ -23,7 +24,8 @@
#define GET_NEXT(ptr, upper_limit, rc) \
({ \
if (((ptr) + 1) > (upper_limit)) { \
pr_err("%s: param list out of boundary\n", __func__); \
pr_err_ratelimited("%s: param list out of boundary\n", \
__func__); \
(rc) = -EINVAL; \
} \
((rc) == 0) ? *(ptr)++ : -EINVAL; \
@ -32,7 +34,8 @@
#define CHECK_PARAM_LEN(len, max_len, tag, rc) \
do { \
if ((len) > (max_len)) { \
pr_err("%s: params length overflows\n", (tag)); \
pr_err_ratelimited("%s: params length overflows\n", \
(tag)); \
(rc) = -EINVAL; \
} \
} while (0)
@ -234,7 +237,8 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
param_data = (u8 *) &virtualizer->gain_adjust;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
pr_err_ratelimited("%s: Invalid command to set config\n",
__func__);
continue;
}
if (rc)
@ -656,7 +660,8 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
param_data = (u8 *) &reverb->density;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
pr_err_ratelimited("%s: Invalid command to set config\n",
__func__);
continue;
}
if (rc)
@ -797,7 +802,8 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
param_data = (u8 *) &bass_boost->strength;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
pr_err_ratelimited("%s: Invalid command to set config\n",
__func__);
continue;
}
if (rc)
@ -910,7 +916,8 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac,
param_data = (u8 *) values;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
pr_err_ratelimited("%s: Invalid command to set config\n",
__func__);
continue;
}
if (rc)
@ -1151,7 +1158,8 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
param_data = (u8 *) &eq->freq_millihertz;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
pr_err_ratelimited("%s: Invalid command to set config\n",
__func__);
continue;
}
if (rc)
@ -1270,7 +1278,7 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac,
"VOLUME/VOLUME2_GAIN_MASTER", rc);
break;
default:
pr_err("%s: Invalid command id: %d to set config\n",
pr_err_ratelimited("%s: Invalid command id: %d to set config\n",
__func__, command_id);
continue;
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1358,12 +1358,18 @@ static int msm_dai_q6_prepare(struct snd_pcm_substream *substream,
if (dai_data->enc_config.format != ENC_FMT_NONE) {
int bitwidth = 0;
if (dai_data->afe_in_bitformat ==
SNDRV_PCM_FORMAT_S24_LE)
switch (dai_data->afe_in_bitformat) {
case SNDRV_PCM_FORMAT_S32_LE:
bitwidth = 32;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bitwidth = 24;
else if (dai_data->afe_in_bitformat ==
SNDRV_PCM_FORMAT_S16_LE)
break;
case SNDRV_PCM_FORMAT_S16_LE:
default:
bitwidth = 16;
break;
}
pr_debug("%s: calling AFE_PORT_START_V2 with enc_format: %d\n",
__func__, dai_data->enc_config.format);
rc = afe_port_start_v2(dai->id, &dai_data->port_config,
@ -2185,6 +2191,9 @@ static int msm_dai_q6_afe_input_bit_format_get(
}
switch (dai_data->afe_in_bitformat) {
case SNDRV_PCM_FORMAT_S32_LE:
ucontrol->value.integer.value[0] = 2;
break;
case SNDRV_PCM_FORMAT_S24_LE:
ucontrol->value.integer.value[0] = 1;
break;
@ -2210,6 +2219,9 @@ static int msm_dai_q6_afe_input_bit_format_put(
return -EINVAL;
}
switch (ucontrol->value.integer.value[0]) {
case 2:
dai_data->afe_in_bitformat = SNDRV_PCM_FORMAT_S32_LE;
break;
case 1:
dai_data->afe_in_bitformat = SNDRV_PCM_FORMAT_S24_LE;
break;
@ -6079,11 +6091,17 @@ static int msm_dai_q6_tdm_set_tdm_slot(struct snd_soc_dai *dai,
return -EINVAL;
}
/* HW only supports 16 and 8 slots configuration */
/* HW supports 1-32 slots configuration. Typical: 1, 2, 4, 8, 16, 32 */
switch (slots) {
case 1:
cap_mask = 0x01;
break;
case 2:
cap_mask = 0x03;
break;
case 4:
cap_mask = 0x0F;
break;
case 8:
cap_mask = 0xFF;
break;
@ -6187,27 +6205,13 @@ static int msm_dai_q6_tdm_set_sysclk(struct snd_soc_dai *dai,
struct msm_dai_q6_tdm_dai_data *dai_data =
dev_get_drvdata(dai->dev);
switch (dai->id) {
case AFE_PORT_ID_PRIMARY_TDM_RX:
case AFE_PORT_ID_PRIMARY_TDM_RX_1:
case AFE_PORT_ID_PRIMARY_TDM_RX_2:
case AFE_PORT_ID_PRIMARY_TDM_RX_3:
case AFE_PORT_ID_PRIMARY_TDM_RX_4:
case AFE_PORT_ID_PRIMARY_TDM_RX_5:
case AFE_PORT_ID_PRIMARY_TDM_RX_6:
case AFE_PORT_ID_PRIMARY_TDM_RX_7:
case AFE_PORT_ID_PRIMARY_TDM_TX:
case AFE_PORT_ID_PRIMARY_TDM_TX_1:
case AFE_PORT_ID_PRIMARY_TDM_TX_2:
case AFE_PORT_ID_PRIMARY_TDM_TX_3:
case AFE_PORT_ID_PRIMARY_TDM_TX_4:
case AFE_PORT_ID_PRIMARY_TDM_TX_5:
case AFE_PORT_ID_PRIMARY_TDM_TX_6:
case AFE_PORT_ID_PRIMARY_TDM_TX_7:
if ((dai->id >= AFE_PORT_ID_PRIMARY_TDM_RX) &&
(dai->id <= AFE_PORT_ID_QUATERNARY_TDM_TX_7)) {
dai_data->clk_set.clk_freq_in_hz = freq;
break;
default:
return 0;
} else {
dev_err(dai->dev, "%s: invalid dai id 0x%x\n",
__func__, dai->id);
return -EINVAL;
}
dev_dbg(dai->dev, "%s: dai id = 0x%x group clk_freq %d\n",
@ -6771,7 +6775,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6791,7 +6795,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6811,7 +6815,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6831,7 +6835,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6851,7 +6855,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6871,7 +6875,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6891,7 +6895,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6911,7 +6915,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6931,7 +6935,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6951,7 +6955,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6971,7 +6975,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -6991,7 +6995,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7011,7 +7015,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7031,7 +7035,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7051,7 +7055,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7071,7 +7075,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7091,7 +7095,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7111,7 +7115,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7131,7 +7135,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7151,7 +7155,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7171,7 +7175,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7191,7 +7195,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7211,7 +7215,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7231,7 +7235,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7251,7 +7255,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7271,7 +7275,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7291,7 +7295,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7311,7 +7315,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7331,7 +7335,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7351,7 +7355,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7371,7 +7375,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7391,7 +7395,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7411,7 +7415,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7431,7 +7435,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7451,7 +7455,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7471,7 +7475,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7491,7 +7495,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7511,7 +7515,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7531,7 +7535,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7551,7 +7555,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7571,7 +7575,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7591,7 +7595,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7611,7 +7615,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7631,7 +7635,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7651,7 +7655,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7671,7 +7675,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7691,7 +7695,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7711,7 +7715,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7731,7 +7735,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7751,7 +7755,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7771,7 +7775,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7791,7 +7795,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7811,7 +7815,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7831,7 +7835,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7851,7 +7855,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7871,7 +7875,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7891,7 +7895,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7911,7 +7915,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7931,7 +7935,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7951,7 +7955,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7971,7 +7975,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -7991,7 +7995,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -8011,7 +8015,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},
@ -8031,7 +8035,7 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.channels_max = 16,
.rate_min = 8000,
.rate_max = 352800,
},

View file

@ -1528,7 +1528,7 @@ static int msm_pcm_chmap_ctl_put(struct snd_kcontrol *kcontrol,
prtd = substream->runtime->private_data;
if (prtd) {
prtd->set_channel_map = true;
for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
prtd->channel_map[i] =
(char)(ucontrol->value.integer.value[i]);
}
@ -1556,11 +1556,11 @@ static int msm_pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
prtd = substream->runtime->private_data;
if (prtd && prtd->set_channel_map == true) {
for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ucontrol->value.integer.value[i] =
(int)prtd->channel_map[i];
} else {
for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ucontrol->value.integer.value[i] = 0;
}
@ -1578,7 +1578,7 @@ static int msm_pcm_add_chmap_controls(struct snd_soc_pcm_runtime *rtd)
pr_debug("%s, Channel map cntrl add\n", __func__);
ret = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_std_chmaps,
PCM_FORMAT_MAX_NUM_CHANNEL, 0,
PCM_FORMAT_MAX_NUM_CHANNEL_V2, 0,
&chmap_info);
if (ret < 0) {
pr_err("%s, channel map cntrl add failed\n", __func__);
@ -2443,7 +2443,7 @@ static int msm_pcm_channel_mixer_output_map_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 32;
uinfo->count = PCM_FORMAT_MAX_NUM_CHANNEL_V2;
uinfo->value.integer.min = 1;
uinfo->value.integer.max = 64;
return 0;
@ -2545,7 +2545,7 @@ static int msm_pcm_channel_mixer_input_map_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 32;
uinfo->count = PCM_FORMAT_MAX_NUM_CHANNEL_V2;
uinfo->value.integer.min = 1;
uinfo->value.integer.max = 64;
return 0;
@ -2760,7 +2760,7 @@ static int msm_pcm_channel_mixer_weight_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 32;
uinfo->count = PCM_FORMAT_MAX_NUM_CHANNEL_V2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 0x4000;
return 0;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -82,7 +82,7 @@ struct generic_get_data_ {
};
static struct generic_get_data_ *generic_get_data;
static int parse_fwk_version_info(uint32_t *payload)
static int parse_fwk_version_info(uint32_t *payload, uint16_t payload_size)
{
size_t ver_size;
int num_services;
@ -95,6 +95,11 @@ static int parse_fwk_version_info(uint32_t *payload)
* Based on this info, we copy the payload into core
* avcs version info structure.
*/
if (payload_size < 5 * sizeof(uint32_t)) {
pr_err("%s: payload has invalid size %d\n",
__func__, payload_size);
return -EINVAL;
}
num_services = payload[4];
if (num_services > VSS_MAX_AVCS_NUM_SERVICES) {
pr_err("%s: num_services: %d greater than max services: %d\n",
@ -109,6 +114,11 @@ static int parse_fwk_version_info(uint32_t *payload)
ver_size = sizeof(struct avcs_get_fwk_version) +
num_services * sizeof(struct avs_svc_api_info);
if (payload_size < ver_size) {
pr_err("%s: payload has invalid size %d, expected size %zu\n",
__func__, payload_size, ver_size);
return -EINVAL;
}
q6core_lcl.q6core_avcs_ver_info.ver_info =
kzalloc(ver_size, GFP_ATOMIC);
if (q6core_lcl.q6core_avcs_ver_info.ver_info == NULL)
@ -145,6 +155,12 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
payload1 = data->payload;
if (data->payload_size < 2 * sizeof(uint32_t)) {
pr_err("%s: payload has invalid size %d\n",
__func__, data->payload_size);
return -EINVAL;
}
switch (payload1[0]) {
case AVCS_CMD_SHARED_MEM_UNMAP_REGIONS:
@ -213,6 +229,11 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
break;
}
case AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS:
if (data->payload_size < sizeof(uint32_t)) {
pr_err("%s: payload has invalid size %d\n",
__func__, data->payload_size);
return -EINVAL;
}
payload1 = data->payload;
pr_debug("%s: AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS handle %d\n",
__func__, payload1[0]);
@ -221,6 +242,11 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
wake_up(&q6core_lcl.bus_bw_req_wait);
break;
case AVCS_CMDRSP_ADSP_EVENT_GET_STATE:
if (data->payload_size < sizeof(uint32_t)) {
pr_err("%s: payload has invalid size %d\n",
__func__, data->payload_size);
return -EINVAL;
}
payload1 = data->payload;
q6core_lcl.param = payload1[0];
pr_debug("%s: Received ADSP get state response 0x%x\n",
@ -231,6 +257,11 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
wake_up(&q6core_lcl.bus_bw_req_wait);
break;
case AVCS_CMDRSP_GET_LICENSE_VALIDATION_RESULT:
if (data->payload_size < sizeof(uint32_t)) {
pr_err("%s: payload has invalid size %d\n",
__func__, data->payload_size);
return -EINVAL;
}
payload1 = data->payload;
pr_debug("%s: cmd = LICENSE_VALIDATION_RESULT, result = 0x%x\n",
__func__, payload1[0]);
@ -243,7 +274,7 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
pr_debug("%s: Received AVCS_CMDRSP_GET_FWK_VERSION\n",
__func__);
payload1 = data->payload;
ret = parse_fwk_version_info(payload1);
ret = parse_fwk_version_info(payload1, data->payload_size);
if (ret < 0) {
q6core_lcl.adsp_status = ret;
pr_err("%s: Failed to parse payload:%d\n",