Merge "soc: qcom: smem: validate fields of shared structures"

This commit is contained in:
Linux Build Service Account 2019-06-27 11:34:46 -07:00 committed by Gerrit - the friendly Code Review server
commit b061f9ea06

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2015, Sony Mobile Communications AB.
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2013, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -228,7 +228,7 @@ struct smem_region {
* struct qcom_smem - device data for the smem device
* @dev: device pointer
* @hwlock: reference to a hwspinlock
* @partitions: list of pointers to partitions affecting the current
* @ptable_entries: list of pointers to partitions table entry of current
* processor/host
* @num_regions: number of @regions
* @regions: list of the memory regions defining the shared memory
@ -238,12 +238,24 @@ struct qcom_smem {
struct hwspinlock *hwlock;
struct smem_partition_header *partitions[SMEM_HOST_COUNT];
struct smem_ptable_entry *ptable_entries[SMEM_HOST_COUNT];
unsigned num_regions;
struct smem_region regions[0];
};
/* Pointer to the one and only smem handle */
static struct qcom_smem *__smem;
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
static struct smem_partition_header *
ptable_entry_to_phdr(struct smem_ptable_entry *entry)
{
return __smem->regions[0].virt_base + le32_to_cpu(entry->offset);
}
static struct smem_private_entry *
phdr_to_last_private_entry(struct smem_partition_header *phdr)
{
@ -283,32 +295,33 @@ static void *entry_to_item(struct smem_private_entry *e)
return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
}
/* Pointer to the one and only smem handle */
static struct qcom_smem *__smem;
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
static int qcom_smem_alloc_private(struct qcom_smem *smem,
unsigned host,
struct smem_ptable_entry *entry,
unsigned item,
size_t size)
{
struct smem_partition_header *phdr;
struct smem_private_entry *hdr, *end;
struct smem_partition_header *phdr;
size_t alloc_size;
void *cached;
void *p_end;
phdr = ptable_entry_to_phdr(entry);
p_end = (void *)phdr + le32_to_cpu(entry->size);
phdr = smem->partitions[host];
hdr = phdr_to_first_private_entry(phdr);
end = phdr_to_last_private_entry(phdr);
cached = phdr_to_first_cached_entry(phdr);
if (WARN_ON((void *)end > p_end || (void *)cached > p_end))
return -EINVAL;
while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
"Found invalid canary in host %d partition\n",
host);
"Found invalid canary in host %d:%d partition\n",
phdr->host0, phdr->host1);
return -EINVAL;
}
@ -317,6 +330,8 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
hdr = private_entry_next(hdr);
}
if (WARN_ON((void *)hdr > p_end))
return -EINVAL;
/* Check that we don't grow into the cached region */
alloc_size = sizeof(*hdr) + ALIGN(size, 8);
@ -389,6 +404,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*/
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
struct smem_ptable_entry *entry;
unsigned long flags;
int ret;
@ -407,10 +423,12 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
if (ret)
return ret;
if (host < SMEM_HOST_COUNT && __smem->partitions[host])
ret = qcom_smem_alloc_private(__smem, host, item, size);
else
if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
entry = __smem->ptable_entries[host];
ret = qcom_smem_alloc_private(__smem, entry, item, size);
} else {
ret = qcom_smem_alloc_global(__smem, item, size);
}
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
@ -422,9 +440,11 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
unsigned item,
size_t *size)
{
struct smem_global_entry *entry;
struct smem_header *header;
struct smem_region *area;
struct smem_global_entry *entry;
u64 entry_offset;
u32 e_size;
u32 aux_base;
unsigned i;
@ -442,9 +462,16 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
area = &smem->regions[i];
if (area->aux_base == aux_base || !aux_base) {
e_size = le32_to_cpu(entry->size);
entry_offset = le32_to_cpu(entry->offset);
if (WARN_ON(e_size + entry_offset > area->size))
return ERR_PTR(-EINVAL);
if (size != NULL)
*size = le32_to_cpu(entry->size);
return area->virt_base + le32_to_cpu(entry->offset);
*size = e_size;
return area->virt_base + entry_offset;
}
}
@ -452,35 +479,58 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
}
static void *qcom_smem_get_private(struct qcom_smem *smem,
unsigned host,
struct smem_ptable_entry *entry,
unsigned item,
size_t *size)
{
struct smem_partition_header *phdr;
struct smem_private_entry *e, *end;
void *item_ptr, *p_end;
u32 partition_size;
u32 padding_data;
u32 e_size;
phdr = ptable_entry_to_phdr(entry);
partition_size = le32_to_cpu(entry->size);
p_end = (void *)phdr + partition_size;
phdr = smem->partitions[host];
e = phdr_to_first_private_entry(phdr);
end = phdr_to_last_private_entry(phdr);
if (WARN_ON((void *)end > p_end))
return ERR_PTR(-EINVAL);
while (e < end) {
if (e->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
"Found invalid canary in host %d partition\n",
host);
"Found invalid canary in host %d:%d partition\n",
phdr->host0, phdr->host1);
return ERR_PTR(-EINVAL);
}
if (le16_to_cpu(e->item) == item) {
if (size != NULL)
*size = le32_to_cpu(e->size) -
le16_to_cpu(e->padding_data);
if (size != NULL) {
e_size = le32_to_cpu(e->size);
padding_data = le16_to_cpu(e->padding_data);
return entry_to_item(e);
if (e_size < partition_size
&& padding_data < e_size)
*size = e_size - padding_data;
else
return ERR_PTR(-EINVAL);
}
item_ptr = entry_to_item(e);
if (WARN_ON(item_ptr > p_end))
return ERR_PTR(-EINVAL);
return item_ptr;
}
e = private_entry_next(e);
}
if (WARN_ON((void *)e > p_end))
return ERR_PTR(-EINVAL);
return ERR_PTR(-ENOENT);
}
@ -496,6 +546,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
*/
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
struct smem_ptable_entry *entry;
unsigned long flags;
int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);
@ -509,11 +560,12 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
if (ret)
return ERR_PTR(ret);
if (host < SMEM_HOST_COUNT && __smem->partitions[host])
ptr = qcom_smem_get_private(__smem, host, item, size);
else
if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
entry = __smem->ptable_entries[host];
ptr = qcom_smem_get_private(__smem, entry, item, size);
} else {
ptr = qcom_smem_get_global(__smem, item, size);
}
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
return ptr;
@ -531,19 +583,28 @@ EXPORT_SYMBOL(qcom_smem_get);
int qcom_smem_get_free_space(unsigned host)
{
struct smem_partition_header *phdr;
struct smem_ptable_entry *entry;
struct smem_header *header;
unsigned ret;
if (!__smem)
return -EPROBE_DEFER;
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
phdr = __smem->partitions[host];
if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
entry = __smem->ptable_entries[host];
phdr = ptable_entry_to_phdr(entry);
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
if (ret > le32_to_cpu(entry->size))
return -EINVAL;
} else {
header = __smem->regions[0].virt_base;
ret = le32_to_cpu(header->available);
if (ret > __smem->regions[0].size)
return -EINVAL;
}
return ret;
@ -616,7 +677,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL;
}
if (smem->partitions[remote_host]) {
if (smem->ptable_entries[remote_host]) {
dev_err(smem->dev,
"Already found a partition for host %d\n",
remote_host);
@ -658,7 +719,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL;
}
smem->partitions[remote_host] = header;
smem->ptable_entries[remote_host] = entry;
}
return 0;