msm: secure_buffer: Ensure shared buffers are cache-aligned
Buffers shared with hypervisor might undergo some cache maintenance on the hypervisor side. Currently, we're passing a buffer that might not be cache-aligned (source_vm_list), so when the hypervisor does some cache maintenance on it they might be corrupting data. Fix this by ensuring that all buffers being shared with the hypervisor are cache-aligned by kmalloc'ing and memcpy'ing the source_vm_list before sending it across. Change-Id: I661f8ca852ade159e3432904086b060be8bfb806 Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
This commit is contained in:
parent
8a9d07bc98
commit
51adc0956b
1 changed files with 19 additions and 4 deletions
|
@ -275,6 +275,7 @@ int hyp_assign_table(struct sg_table *table,
|
|||
struct info_list *info_list = NULL;
|
||||
struct dest_info_list *dest_info_list = NULL;
|
||||
struct scm_desc desc = {0};
|
||||
u32 *source_vm_copy;
|
||||
|
||||
info_list = get_info_list_from_table(table);
|
||||
if (!info_list)
|
||||
|
@ -287,10 +288,23 @@ int hyp_assign_table(struct sg_table *table,
|
|||
goto err1;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can only pass cache-aligned sizes to hypervisor, so we need
|
||||
* to kmalloc and memcpy the source_vm_list here.
|
||||
*/
|
||||
source_vm_copy = kmalloc_array(
|
||||
source_nelems, sizeof(*source_vm_copy), GFP_KERNEL);
|
||||
if (!source_vm_copy) {
|
||||
ret = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
memcpy(source_vm_copy, source_vm_list,
|
||||
sizeof(*source_vm_list) * source_nelems);
|
||||
|
||||
desc.args[0] = virt_to_phys(info_list->list_head);
|
||||
desc.args[1] = info_list->list_size;
|
||||
desc.args[2] = virt_to_phys(source_vm_list);
|
||||
desc.args[3] = sizeof(*source_vm_list) * source_nelems;
|
||||
desc.args[2] = virt_to_phys(source_vm_copy);
|
||||
desc.args[3] = sizeof(*source_vm_copy) * source_nelems;
|
||||
desc.args[4] = virt_to_phys(dest_info_list->dest_info);
|
||||
desc.args[5] = dest_info_list->list_size;
|
||||
desc.args[6] = 0;
|
||||
|
@ -298,7 +312,7 @@ int hyp_assign_table(struct sg_table *table,
|
|||
desc.arginfo = SCM_ARGS(7, SCM_RO, SCM_VAL, SCM_RO, SCM_VAL, SCM_RO,
|
||||
SCM_VAL, SCM_VAL);
|
||||
|
||||
dmac_flush_range(source_vm_list, source_vm_list + source_nelems);
|
||||
dmac_flush_range(source_vm_copy, source_vm_copy + source_nelems);
|
||||
dmac_flush_range(info_list->list_head, info_list->list_head +
|
||||
(info_list->list_size / sizeof(*info_list->list_head)));
|
||||
dmac_flush_range(dest_info_list->dest_info, dest_info_list->dest_info +
|
||||
|
@ -311,8 +325,9 @@ int hyp_assign_table(struct sg_table *table,
|
|||
pr_info("%s: Failed to assign memory protection, ret = %d\n",
|
||||
__func__, ret);
|
||||
|
||||
kfree(source_vm_copy);
|
||||
err2:
|
||||
destroy_dest_info_list(dest_info_list);
|
||||
|
||||
err1:
|
||||
destroy_info_list(info_list);
|
||||
return ret;
|
||||
|
|
Loading…
Add table
Reference in a new issue