qseecom: make change to support sg list entry number larger than 512

qseecom_send_modfd_cmd_64 converts non-contiguous ION memory fd to
scatter-gather physical address list, and supports up to 512 SG list
entries. Now, 512 is not enough if client wants to send large data
in a fragmented system. In this change, if SG list entry number is
larger than 512, we will allocate a new contiguous kernel buffer,
then save all SG entries into this new buffer.

Change-Id: Id90eefb98b63bf16db755dae8e1482e448090f47
Signed-off-by: Zhen Kong <zkong@codeaurora.org>
This commit is contained in:
Zhen Kong 2015-12-02 23:40:16 -08:00 committed by David Keitel
parent 894269ca16
commit deb62ed21a
2 changed files with 94 additions and 9 deletions

View file

@ -66,7 +66,10 @@
#define QSEE_CE_CLK_100MHZ 100000000
#define CE_CLK_DIV 1000000
#define QSEECOM_MAX_SG_ENTRY 512
#define QSEECOM_MAX_SG_ENTRY 512
#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
(QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
#define QSEECOM_INVALID_KEY_ID 0xff
/* Save partition image hash for authentication check */
@ -2926,6 +2929,53 @@ err:
return -ENOMEM;
}
static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
{
struct scatterlist *sg = sg_ptr->sgl;
struct qseecom_sg_entry_64bit *sg_entry;
struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
void *buf;
uint i;
size_t size;
dma_addr_t coh_pmem;
if (fd_idx >= MAX_ION_FD) {
pr_err("fd_idx [%d] is invalid\n", fd_idx);
return -ENOMEM;
}
buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
/* Allocate a contiguous kernel buffer */
size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
size = (size + PAGE_SIZE) & PAGE_MASK;
buf = dma_alloc_coherent(qseecom.pdev,
size, &coh_pmem, GFP_KERNEL);
if (buf == NULL) {
pr_err("failed to alloc memory for sg buf\n");
return -ENOMEM;
}
/* update qseecom_sg_list_buf_hdr_64bit */
buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
buf_hdr->new_buf_phys_addr = coh_pmem;
buf_hdr->nents_total = sg_ptr->nents;
/* save the left sg entries into new allocated buf */
sg_entry = (struct qseecom_sg_entry_64bit *)buf;
for (i = 0; i < sg_ptr->nents; i++) {
sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
sg_entry->len = sg->length;
sg_entry++;
sg = sg_next(sg);
}
data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
data->client.sec_buf_fd[fd_idx].vbase = buf;
data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
data->client.sec_buf_fd[fd_idx].size = size;
return 0;
}
static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
struct qseecom_dev_handle *data)
{
@ -2994,10 +3044,26 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
goto err;
}
if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
pr_err("Num of scattered entries");
pr_err(" (%d) is greater than max supported %d\n",
pr_warn("Num of scattered entries");
pr_warn(" (%d) is greater than %d\n",
sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
goto err;
if (cleanup) {
if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
data->client.sec_buf_fd[i].vbase)
dma_free_coherent(qseecom.pdev,
data->client.sec_buf_fd[i].size,
data->client.sec_buf_fd[i].vbase,
data->client.sec_buf_fd[i].pbase);
} else {
ret = __qseecom_allocate_sg_list_buffer(data,
field, i, sg_ptr);
if (ret) {
pr_err("Failed to allocate sg list buffer\n");
goto err;
}
}
len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
goto cleanup;
}
sg = sg_ptr->sgl;
if (sg_ptr->nents == 1) {
@ -3017,10 +3083,10 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
(req->ifd_data[i].fd > 0)) {
if ((req->cmd_req_len <
SG_ENTRY_SZ * sg_ptr->nents) ||
SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
(req->ifd_data[i].cmd_buf_offset >
(req->cmd_req_len -
SG_ENTRY_SZ * sg_ptr->nents))) {
(req->cmd_req_len -
SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
pr_err("Invalid offset = 0x%x\n",
req->ifd_data[i].cmd_buf_offset);
goto err;
@ -3030,10 +3096,10 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
(lstnr_resp->ifd_data[i].fd > 0)) {
if ((lstnr_resp->resp_len <
SG_ENTRY_SZ * sg_ptr->nents) ||
SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
(lstnr_resp->ifd_data[i].cmd_buf_offset >
(lstnr_resp->resp_len -
SG_ENTRY_SZ * sg_ptr->nents))) {
SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
goto err;
}
}
@ -3049,6 +3115,7 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
sg = sg_next(sg);
}
}
cleanup:
if (cleanup)
msm_ion_do_cache_op(qseecom.ion_clnt,
ihandle, NULL, len,

View file

@ -233,6 +233,24 @@ struct qseecom_sg_entry_64bit {
uint32_t len;
} __attribute__ ((packed));
/*
* sg list buf format version
* 1: Legacy format to support only 512 SG list entries
* 2: new format to support > 512 entries
*/
#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_1 1
#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2 2
struct qseecom_sg_list_buf_hdr_64bit {
struct qseecom_sg_entry_64bit blank_entry; /* must be all 0 */
uint32_t version; /* sg list buf format version */
uint64_t new_buf_phys_addr; /* PA of new buffer */
uint32_t nents_total; /* Total number of SG entries */
} __attribute__ ((packed));
#define QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT \
sizeof(struct qseecom_sg_list_buf_hdr_64bit)
#define MAX_CE_PIPE_PAIR_PER_UNIT 3
#define INVALID_CE_INFO_UNIT_NUM 0xffffffff