Merge "msm: kgsl: Add sparse memory support"

This commit is contained in:
Linux Build Service Account 2016-08-16 16:34:48 -07:00 committed by Gerrit - the friendly Code Review server
commit f4686dcc3a
12 changed files with 1156 additions and 90 deletions

View file

@ -78,6 +78,16 @@ struct kgsl_dma_buf_meta {
struct sg_table *table;
};
static inline struct kgsl_pagetable *_get_memdesc_pagetable(
struct kgsl_pagetable *pt, struct kgsl_mem_entry *entry)
{
/* if a secured buffer, map it to secure global pagetable */
if (kgsl_memdesc_is_secured(&entry->memdesc))
return pt->mmu->securepagetable;
return pt;
}
static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
static const struct file_operations kgsl_fops;
@ -445,14 +455,17 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
/* map the memory after unlocking if gpuaddr has been assigned */
if (entry->memdesc.gpuaddr) {
/* if a secured buffer map it to secure global pagetable */
pagetable = process->pagetable;
if (kgsl_memdesc_is_secured(&entry->memdesc))
pagetable = process->pagetable->mmu->securepagetable;
else
pagetable = process->pagetable;
pagetable = pagetable->mmu->securepagetable;
entry->memdesc.pagetable = pagetable;
ret = kgsl_mmu_map(pagetable, &entry->memdesc);
if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT)
ret = kgsl_mmu_sparse_dummy_map(pagetable,
&entry->memdesc, 0, entry->memdesc.size);
else if (entry->memdesc.gpuaddr)
ret = kgsl_mmu_map(pagetable, &entry->memdesc);
if (ret)
kgsl_mem_entry_detach_process(entry);
}
@ -1270,6 +1283,24 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
}
EXPORT_SYMBOL(kgsl_sharedmem_find);
struct kgsl_mem_entry * __must_check
kgsl_sharedmem_find_id_flags(struct kgsl_process_private *process,
unsigned int id, uint64_t flags)
{
int count = 0;
struct kgsl_mem_entry *entry;
spin_lock(&process->mem_lock);
entry = idr_find(&process->mem_idr, id);
if (entry)
if (!entry->pending_free &&
(flags & entry->memdesc.flags) == flags)
count = kgsl_mem_entry_get(entry);
spin_unlock(&process->mem_lock);
return (count == 0) ? NULL : entry;
}
/**
* kgsl_sharedmem_find_id() - find a memory entry by id
* @process: the owning process
@ -1283,19 +1314,7 @@ EXPORT_SYMBOL(kgsl_sharedmem_find);
struct kgsl_mem_entry * __must_check
kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id)
{
int result;
struct kgsl_mem_entry *entry;
drain_workqueue(kgsl_driver.mem_workqueue);
spin_lock(&process->mem_lock);
entry = idr_find(&process->mem_idr, id);
result = kgsl_mem_entry_get(entry);
spin_unlock(&process->mem_lock);
if (result == 0)
return NULL;
return entry;
return kgsl_sharedmem_find_id_flags(process, id, 0);
}
/**
@ -3121,6 +3140,546 @@ long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
return result;
}
static inline int _sparse_alloc_param_sanity_check(uint64_t size,
uint64_t pagesize)
{
if (size == 0 || pagesize == 0)
return -EINVAL;
if (pagesize != PAGE_SIZE && pagesize != SZ_64K)
return -EINVAL;
if (pagesize > size || !IS_ALIGNED(size, pagesize))
return -EINVAL;
return 0;
}
long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *process = dev_priv->process_priv;
struct kgsl_sparse_phys_alloc *param = data;
struct kgsl_mem_entry *entry;
int ret;
int id;
ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
if (ret)
return ret;
entry = kgsl_mem_entry_create();
if (entry == NULL)
return -ENOMEM;
ret = kgsl_process_private_get(process);
if (!ret) {
ret = -EBADF;
goto err_free_entry;
}
idr_preload(GFP_KERNEL);
spin_lock(&process->mem_lock);
/* Allocate the ID but don't attach the pointer just yet */
id = idr_alloc(&process->mem_idr, NULL, 1, 0, GFP_NOWAIT);
spin_unlock(&process->mem_lock);
idr_preload_end();
if (id < 0) {
ret = id;
goto err_put_proc_priv;
}
entry->id = id;
entry->priv = process;
entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_PHYS;
kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
process->pagetable, param->size, entry->memdesc.flags);
if (ret)
goto err_remove_idr;
/* Sanity check to verify we got correct pagesize */
if (param->pagesize != PAGE_SIZE && entry->memdesc.sgt != NULL) {
struct scatterlist *s;
int i;
for_each_sg(entry->memdesc.sgt->sgl, s,
entry->memdesc.sgt->nents, i) {
if (!IS_ALIGNED(s->length, param->pagesize))
goto err_invalid_pages;
}
}
param->id = entry->id;
param->flags = entry->memdesc.flags;
trace_sparse_phys_alloc(entry->id, param->size, param->pagesize);
kgsl_mem_entry_commit_process(entry);
return 0;
err_invalid_pages:
kgsl_sharedmem_free(&entry->memdesc);
err_remove_idr:
spin_lock(&process->mem_lock);
idr_remove(&process->mem_idr, entry->id);
spin_unlock(&process->mem_lock);
err_put_proc_priv:
kgsl_process_private_put(process);
err_free_entry:
kfree(entry);
return ret;
}
long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *process = dev_priv->process_priv;
struct kgsl_sparse_phys_free *param = data;
struct kgsl_mem_entry *entry;
entry = kgsl_sharedmem_find_id_flags(process, param->id,
KGSL_MEMFLAGS_SPARSE_PHYS);
if (entry == NULL)
return -EINVAL;
if (entry->memdesc.cur_bindings != 0) {
kgsl_mem_entry_put(entry);
return -EINVAL;
}
trace_sparse_phys_free(entry->id);
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
kgsl_mem_entry_put(entry);
return 0;
}
long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_sparse_virt_alloc *param = data;
struct kgsl_mem_entry *entry;
int ret;
ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
if (ret)
return ret;
entry = kgsl_mem_entry_create();
if (entry == NULL)
return -ENOMEM;
entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_VIRT;
entry->memdesc.size = param->size;
entry->memdesc.cur_bindings = 0;
kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
spin_lock_init(&entry->bind_lock);
entry->bind_tree = RB_ROOT;
ret = kgsl_mem_entry_attach_process(entry, dev_priv);
if (ret) {
kfree(entry);
return ret;
}
param->id = entry->id;
param->gpuaddr = entry->memdesc.gpuaddr;
param->flags = entry->memdesc.flags;
trace_sparse_virt_alloc(entry->id, param->size, param->pagesize);
kgsl_mem_entry_commit_process(entry);
return 0;
}
long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *process = dev_priv->process_priv;
struct kgsl_sparse_virt_free *param = data;
struct kgsl_mem_entry *entry = NULL;
entry = kgsl_sharedmem_find_id_flags(process, param->id,
KGSL_MEMFLAGS_SPARSE_VIRT);
if (entry == NULL)
return -EINVAL;
if (entry->bind_tree.rb_node != NULL) {
kgsl_mem_entry_put(entry);
return -EINVAL;
}
trace_sparse_virt_free(entry->id);
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
kgsl_mem_entry_put(entry);
return 0;
}
static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry,
uint64_t v_offset,
struct kgsl_memdesc *memdesc,
uint64_t p_offset,
uint64_t size,
uint64_t flags)
{
struct sparse_bind_object *new;
struct rb_node **node, *parent = NULL;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (new == NULL)
return -ENOMEM;
new->v_off = v_offset;
new->p_off = p_offset;
new->p_memdesc = memdesc;
new->size = size;
new->flags = flags;
node = &entry->bind_tree.rb_node;
while (*node != NULL) {
struct sparse_bind_object *this;
parent = *node;
this = rb_entry(parent, struct sparse_bind_object, node);
if (new->v_off < this->v_off)
node = &parent->rb_left;
else if (new->v_off > this->v_off)
node = &parent->rb_right;
}
rb_link_node(&new->node, parent, node);
rb_insert_color(&new->node, &entry->bind_tree);
return 0;
}
static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry,
struct sparse_bind_object *obj,
uint64_t v_offset, uint64_t size)
{
spin_lock(&entry->bind_lock);
if (v_offset == obj->v_off && size >= obj->size) {
/*
* We are all encompassing, remove the entry and free
* things up
*/
rb_erase(&obj->node, &entry->bind_tree);
kfree(obj);
} else if (v_offset == obj->v_off) {
/*
* We are the front of the node, adjust the front of
* the node
*/
obj->v_off += size;
obj->p_off += size;
obj->size -= size;
} else if ((v_offset + size) == (obj->v_off + obj->size)) {
/*
* We are at the end of the obj, adjust the beginning
* points
*/
obj->size -= size;
} else {
/*
* We are in the middle of a node, split it up and
* create a new mini node. Adjust this node's bounds
* and add the new node to the list.
*/
uint64_t tmp_size = obj->size;
int ret;
obj->size = v_offset - obj->v_off;
spin_unlock(&entry->bind_lock);
ret = _sparse_add_to_bind_tree(entry, v_offset + size,
obj->p_memdesc,
obj->p_off + (v_offset - obj->v_off) + size,
tmp_size - (v_offset - obj->v_off) - size,
obj->flags);
return ret;
}
spin_unlock(&entry->bind_lock);
return 0;
}
static struct sparse_bind_object *_find_containing_bind_obj(
struct kgsl_mem_entry *entry,
uint64_t offset, uint64_t size)
{
struct sparse_bind_object *obj = NULL;
struct rb_node *node = entry->bind_tree.rb_node;
spin_lock(&entry->bind_lock);
while (node != NULL) {
obj = rb_entry(node, struct sparse_bind_object, node);
if (offset == obj->v_off) {
break;
} else if (offset < obj->v_off) {
if (offset + size > obj->v_off)
break;
node = node->rb_left;
obj = NULL;
} else if (offset > obj->v_off) {
if (offset < obj->v_off + obj->size)
break;
node = node->rb_right;
obj = NULL;
}
}
spin_unlock(&entry->bind_lock);
return obj;
}
static int _sparse_unbind(struct kgsl_mem_entry *entry,
struct sparse_bind_object *bind_obj,
uint64_t offset, uint64_t size)
{
struct kgsl_memdesc *memdesc = bind_obj->p_memdesc;
struct kgsl_pagetable *pt = memdesc->pagetable;
int ret;
if (memdesc->cur_bindings < (size / PAGE_SIZE))
return -EINVAL;
memdesc->cur_bindings -= size / PAGE_SIZE;
ret = kgsl_mmu_unmap_offset(pt, memdesc,
entry->memdesc.gpuaddr, offset, size);
if (ret)
return ret;
ret = kgsl_mmu_sparse_dummy_map(pt, &entry->memdesc, offset, size);
if (ret)
return ret;
ret = _sparse_rm_from_bind_tree(entry, bind_obj, offset, size);
if (ret == 0) {
atomic_long_sub(size, &kgsl_driver.stats.mapped);
trace_sparse_unbind(entry->id, offset, size);
}
return ret;
}
static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj,
struct kgsl_mem_entry *virt_entry)
{
struct sparse_bind_object *bind_obj;
int ret = 0;
uint64_t size = obj->size;
uint64_t tmp_size = obj->size;
uint64_t offset = obj->virtoffset;
while (size > 0 && ret == 0) {
tmp_size = size;
bind_obj = _find_containing_bind_obj(virt_entry, offset, size);
if (bind_obj == NULL)
return 0;
if (bind_obj->v_off > offset) {
tmp_size = size - bind_obj->v_off - offset;
if (tmp_size > bind_obj->size)
tmp_size = bind_obj->size;
offset = bind_obj->v_off;
} else if (bind_obj->v_off < offset) {
uint64_t diff = offset - bind_obj->v_off;
if (diff + size > bind_obj->size)
tmp_size = bind_obj->size - diff;
} else {
if (tmp_size > bind_obj->size)
tmp_size = bind_obj->size;
}
ret = _sparse_unbind(virt_entry, bind_obj, offset, tmp_size);
if (ret == 0) {
offset += tmp_size;
size -= tmp_size;
}
}
return ret;
}
static inline bool _is_phys_bindable(struct kgsl_mem_entry *phys_entry,
uint64_t offset, uint64_t size, uint64_t flags)
{
struct kgsl_memdesc *memdesc = &phys_entry->memdesc;
if (!IS_ALIGNED(offset | size, kgsl_memdesc_get_pagesize(memdesc)))
return false;
if (!(flags & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
offset + size > memdesc->size)
return false;
return true;
}
static int _sparse_bind(struct kgsl_process_private *process,
struct kgsl_mem_entry *virt_entry, uint64_t v_offset,
struct kgsl_mem_entry *phys_entry, uint64_t p_offset,
uint64_t size, uint64_t flags)
{
int ret;
struct kgsl_pagetable *pagetable;
struct kgsl_memdesc *memdesc = &phys_entry->memdesc;
/* map the memory after unlocking if gpuaddr has been assigned */
if (memdesc->gpuaddr)
return -EINVAL;
if (memdesc->useraddr != 0)
return -EINVAL;
pagetable = memdesc->pagetable;
/* Clear out any mappings */
ret = kgsl_mmu_unmap_offset(pagetable, &virt_entry->memdesc,
virt_entry->memdesc.gpuaddr, v_offset, size);
if (ret)
return ret;
ret = kgsl_mmu_map_offset(pagetable, virt_entry->memdesc.gpuaddr,
v_offset, memdesc, p_offset, size, flags);
if (ret) {
/* Try to clean up, but not the end of the world */
kgsl_mmu_sparse_dummy_map(pagetable, &virt_entry->memdesc,
v_offset, size);
return ret;
}
ret = _sparse_add_to_bind_tree(virt_entry, v_offset, memdesc,
p_offset, size, flags);
if (ret == 0)
memdesc->cur_bindings += size / PAGE_SIZE;
return ret;
}
static long sparse_bind_range(struct kgsl_process_private *private,
struct kgsl_sparse_binding_object *obj,
struct kgsl_mem_entry *virt_entry)
{
struct kgsl_mem_entry *phys_entry;
int ret;
phys_entry = kgsl_sharedmem_find_id_flags(private, obj->id,
KGSL_MEMFLAGS_SPARSE_PHYS);
if (phys_entry == NULL)
return -EINVAL;
if (!_is_phys_bindable(phys_entry, obj->physoffset, obj->size,
obj->flags)) {
kgsl_mem_entry_put(phys_entry);
return -EINVAL;
}
if (kgsl_memdesc_get_align(&virt_entry->memdesc) !=
kgsl_memdesc_get_align(&phys_entry->memdesc)) {
kgsl_mem_entry_put(phys_entry);
return -EINVAL;
}
ret = sparse_unbind_range(obj, virt_entry);
if (ret) {
kgsl_mem_entry_put(phys_entry);
return -EINVAL;
}
ret = _sparse_bind(private, virt_entry, obj->virtoffset,
phys_entry, obj->physoffset, obj->size,
obj->flags & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS);
if (ret == 0) {
KGSL_STATS_ADD(obj->size, &kgsl_driver.stats.mapped,
&kgsl_driver.stats.mapped_max);
trace_sparse_bind(virt_entry->id, obj->virtoffset,
phys_entry->id, obj->physoffset,
obj->size, obj->flags);
}
kgsl_mem_entry_put(phys_entry);
return ret;
}
long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_sparse_bind *param = data;
struct kgsl_sparse_binding_object obj;
struct kgsl_mem_entry *virt_entry;
int pg_sz;
void __user *ptr;
int ret = 0;
int i = 0;
ptr = (void __user *) (uintptr_t) param->list;
if (param->size > sizeof(struct kgsl_sparse_binding_object) ||
param->count == 0 || ptr == NULL)
return -EINVAL;
virt_entry = kgsl_sharedmem_find_id_flags(private, param->id,
KGSL_MEMFLAGS_SPARSE_VIRT);
if (virt_entry == NULL)
return -EINVAL;
pg_sz = kgsl_memdesc_get_pagesize(&virt_entry->memdesc);
for (i = 0; i < param->count; i++) {
memset(&obj, 0, sizeof(obj));
ret = _copy_from_user(&obj, ptr, sizeof(obj), param->size);
if (ret)
break;
/* Sanity check initial range */
if (obj.size == 0 ||
obj.virtoffset + obj.size > virt_entry->memdesc.size ||
!(IS_ALIGNED(obj.virtoffset | obj.size, pg_sz))) {
ret = -EINVAL;
break;
}
if (obj.flags & KGSL_SPARSE_BIND)
ret = sparse_bind_range(private, &obj, virt_entry);
else if (obj.flags & KGSL_SPARSE_UNBIND)
ret = sparse_unbind_range(&obj, virt_entry);
else
ret = -EINVAL;
if (ret)
break;
ptr += sizeof(obj);
}
kgsl_mem_entry_put(virt_entry);
return ret;
}
long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
@ -3356,6 +3915,13 @@ get_mmap_entry(struct kgsl_process_private *private,
goto err_put;
}
if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_PHYS) {
if (len != entry->memdesc.size) {
ret = -EINVAL;
goto err_put;
}
}
if (entry->memdesc.useraddr != 0) {
ret = -EBUSY;
goto err_put;

View file

@ -184,6 +184,7 @@ struct kgsl_memdesc_ops {
* @attrs: dma attributes for this memory
* @pages: An array of pointers to allocated pages
* @page_count: Total number of pages allocated
* @cur_bindings: Number of sparse pages actively bound
*/
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
@ -202,6 +203,7 @@ struct kgsl_memdesc {
struct dma_attrs attrs;
struct page **pages;
unsigned int page_count;
unsigned int cur_bindings;
};
/*
@ -235,6 +237,8 @@ struct kgsl_memdesc {
* @dev_priv: back pointer to the device file that created this entry.
* @metadata: String containing user specified metadata for the entry
* @work: Work struct used to schedule a kgsl_mem_entry_put in atomic contexts
* @bind_lock: Lock for sparse memory bindings
* @bind_tree: RB Tree for sparse memory bindings
*/
struct kgsl_mem_entry {
struct kref refcount;
@ -246,6 +250,8 @@ struct kgsl_mem_entry {
int pending_free;
char metadata[KGSL_GPUOBJ_ALLOC_METADATA_MAX + 1];
struct work_struct work;
spinlock_t bind_lock;
struct rb_root bind_tree;
};
struct kgsl_device_private;
@ -315,6 +321,24 @@ struct kgsl_protected_registers {
int range;
};
/**
* struct sparse_bind_object - Bind metadata
* @node: Node for the rb tree
* @p_memdesc: Physical memdesc bound to
* @v_off: Offset of bind in the virtual entry
* @p_off: Offset of bind in the physical memdesc
* @size: Size of the bind
* @flags: Flags for the bind
*/
struct sparse_bind_object {
struct rb_node node;
struct kgsl_memdesc *p_memdesc;
uint64_t v_off;
uint64_t p_off;
uint64_t size;
uint64_t flags;
};
long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
@ -377,6 +401,19 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
void kgsl_mem_entry_destroy(struct kref *kref);
struct kgsl_mem_entry * __must_check

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -372,6 +372,16 @@ static const struct kgsl_ioctl kgsl_compat_ioctl_funcs[] = {
kgsl_ioctl_gpu_command),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUOBJ_SET_INFO,
kgsl_ioctl_gpuobj_set_info),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_PHYS_ALLOC,
kgsl_ioctl_sparse_phys_alloc),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_PHYS_FREE,
kgsl_ioctl_sparse_phys_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_VIRT_ALLOC,
kgsl_ioctl_sparse_virt_alloc),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_VIRT_FREE,
kgsl_ioctl_sparse_virt_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
kgsl_ioctl_sparse_bind),
};
long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)

View file

@ -129,10 +129,13 @@ static int print_mem_entry(int id, void *ptr, void *data)
{
struct seq_file *s = data;
struct kgsl_mem_entry *entry = ptr;
char flags[9];
char flags[10];
char usage[16];
struct kgsl_memdesc *m = &entry->memdesc;
if (m->flags & KGSL_MEMFLAGS_SPARSE_VIRT)
return 0;
flags[0] = kgsl_memdesc_is_global(m) ? 'g' : '-';
flags[1] = '-';
flags[2] = !(m->flags & KGSL_MEMFLAGS_GPUREADONLY) ? 'w' : '-';
@ -141,7 +144,8 @@ static int print_mem_entry(int id, void *ptr, void *data)
flags[5] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-';
flags[6] = (m->useraddr) ? 'Y' : 'N';
flags[7] = kgsl_memdesc_is_secured(m) ? 's' : '-';
flags[8] = '\0';
flags[8] = m->flags & KGSL_MEMFLAGS_SPARSE_PHYS ? 'P' : '-';
flags[9] = '\0';
kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
@ -211,6 +215,70 @@ static const struct file_operations process_mem_fops = {
.release = process_mem_release,
};
static int print_sparse_mem_entry(int id, void *ptr, void *data)
{
struct seq_file *s = data;
struct kgsl_mem_entry *entry = ptr;
struct kgsl_memdesc *m = &entry->memdesc;
struct rb_node *node;
if (!(m->flags & KGSL_MEMFLAGS_SPARSE_VIRT))
return 0;
node = rb_first(&entry->bind_tree);
while (node != NULL) {
struct sparse_bind_object *obj = rb_entry(node,
struct sparse_bind_object, node);
seq_printf(s, "%5d %16llx %16llx %16llx %16llx\n",
entry->id, entry->memdesc.gpuaddr,
obj->v_off, obj->size, obj->p_off);
node = rb_next(node);
}
seq_putc(s, '\n');
return 0;
}
static int process_sparse_mem_print(struct seq_file *s, void *unused)
{
struct kgsl_process_private *private = s->private;
seq_printf(s, "%5s %16s %16s %16s %16s\n",
"v_id", "gpuaddr", "v_offset", "v_size", "p_offset");
spin_lock(&private->mem_lock);
idr_for_each(&private->mem_idr, print_sparse_mem_entry, s);
spin_unlock(&private->mem_lock);
return 0;
}
static int process_sparse_mem_open(struct inode *inode, struct file *file)
{
int ret;
pid_t pid = (pid_t) (unsigned long) inode->i_private;
struct kgsl_process_private *private = NULL;
private = kgsl_process_private_find(pid);
if (!private)
return -ENODEV;
ret = single_open(file, process_sparse_mem_print, private);
if (ret)
kgsl_process_private_put(private);
return ret;
}
static const struct file_operations process_sparse_mem_fops = {
.open = process_sparse_mem_open,
.read = seq_read,
.llseek = seq_lseek,
.release = process_mem_release,
};
/**
* kgsl_process_init_debugfs() - Initialize debugfs for a process
@ -251,6 +319,15 @@ void kgsl_process_init_debugfs(struct kgsl_process_private *private)
if (IS_ERR_OR_NULL(dentry))
WARN((dentry == NULL),
"Unable to create 'mem' file for %s\n", name);
dentry = debugfs_create_file("sparse_mem", 0444, private->debug_root,
(void *) ((unsigned long) private->pid),
&process_sparse_mem_fops);
if (IS_ERR_OR_NULL(dentry))
WARN((dentry == NULL),
"Unable to create 'sparse_mem' file for %s\n", name);
}
void kgsl_core_debugfs_init(void)

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -90,6 +90,16 @@ static const struct kgsl_ioctl kgsl_ioctl_funcs[] = {
kgsl_ioctl_gpu_command),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUOBJ_SET_INFO,
kgsl_ioctl_gpuobj_set_info),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_PHYS_ALLOC,
kgsl_ioctl_sparse_phys_alloc),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_PHYS_FREE,
kgsl_ioctl_sparse_phys_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_VIRT_ALLOC,
kgsl_ioctl_sparse_virt_alloc),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_VIRT_FREE,
kgsl_ioctl_sparse_virt_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
kgsl_ioctl_sparse_bind),
};
long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd,

View file

@ -323,8 +323,8 @@ static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
_unlock_if_secure_mmu(memdesc, pt->mmu);
if (ret) {
KGSL_CORE_ERR("map err: %p, 0x%016llX, 0x%llx, 0x%x, %d\n",
iommu_pt->domain, gpuaddr, size, flags, ret);
KGSL_CORE_ERR("map err: 0x%016llX, 0x%llx, 0x%x, %d\n",
gpuaddr, size, flags, ret);
return -ENODEV;
}
@ -351,8 +351,8 @@ static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
_unlock_if_secure_mmu(memdesc, pt->mmu);
if (unmapped != size) {
KGSL_CORE_ERR("unmap err: %p, 0x%016llx, 0x%llx, %zd\n",
iommu_pt->domain, addr, size, unmapped);
KGSL_CORE_ERR("unmap err: 0x%016llx, 0x%llx, %zd\n",
addr, size, unmapped);
return -ENODEV;
}
@ -421,8 +421,9 @@ static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
if (size != 0) {
/* Cleanup on error */
_iommu_unmap_sync_pc(pt, memdesc, addr, mapped);
KGSL_CORE_ERR("map err: %p, 0x%016llX, %d, %x, %zd\n",
iommu_pt->domain, addr, nents, flags, mapped);
KGSL_CORE_ERR(
"map sg offset err: 0x%016llX, %d, %x, %zd\n",
addr, nents, flags, mapped);
return -ENODEV;
}
@ -451,8 +452,8 @@ static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
_unlock_if_secure_mmu(memdesc, pt->mmu);
if (mapped == 0) {
KGSL_CORE_ERR("map err: %p, 0x%016llX, %d, %x, %zd\n",
iommu_pt->domain, addr, nents, flags, mapped);
KGSL_CORE_ERR("map sg err: 0x%016llX, %d, %x, %zd\n",
addr, nents, flags, mapped);
return -ENODEV;
}
@ -467,6 +468,13 @@ static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
static struct page *kgsl_guard_page;
static struct kgsl_memdesc kgsl_secure_guard_page_memdesc;
/*
* The dummy page is a placeholder/extra page to be used for sparse mappings.
* This page will be mapped to all virtual sparse bindings that are not
* physically backed.
*/
static struct page *kgsl_dummy_page;
/* These functions help find the nearest allocated memory entries on either side
* of a faulting address. If we know the nearby allocations memory we can
* get a better determination of what we think should have been located in the
@ -1309,6 +1317,11 @@ static void kgsl_iommu_close(struct kgsl_mmu *mmu)
kgsl_guard_page = NULL;
}
if (kgsl_dummy_page != NULL) {
__free_page(kgsl_dummy_page);
kgsl_dummy_page = NULL;
}
kgsl_iommu_remove_global(mmu, &iommu->setstate);
kgsl_sharedmem_free(&iommu->setstate);
kgsl_cleanup_qdss_desc(mmu);
@ -1523,6 +1536,8 @@ kgsl_iommu_unmap_offset(struct kgsl_pagetable *pt,
struct kgsl_memdesc *memdesc, uint64_t addr,
uint64_t offset, uint64_t size)
{
if (size == 0 || (size + offset) > kgsl_memdesc_footprint(memdesc))
return -EINVAL;
/*
* All GPU addresses as assigned are page aligned, but some
* functions perturb the gpuaddr with an offset, so apply the
@ -1530,9 +1545,8 @@ kgsl_iommu_unmap_offset(struct kgsl_pagetable *pt,
*/
addr = PAGE_ALIGN(addr);
if (size == 0 || addr == 0)
return 0;
if (addr == 0)
return -EINVAL;
return _iommu_unmap_sync_pc(pt, memdesc, addr + offset, size);
}
@ -1540,13 +1554,11 @@ kgsl_iommu_unmap_offset(struct kgsl_pagetable *pt,
static int
kgsl_iommu_unmap(struct kgsl_pagetable *pt, struct kgsl_memdesc *memdesc)
{
uint64_t size = memdesc->size;
if (kgsl_memdesc_has_guard_page(memdesc))
size += kgsl_memdesc_guard_page_size(pt->mmu, memdesc);
if (memdesc->size == 0 || memdesc->gpuaddr == 0)
return -EINVAL;
return kgsl_iommu_unmap_offset(pt, memdesc, memdesc->gpuaddr, 0,
size);
kgsl_memdesc_footprint(memdesc));
}
/**
@ -1593,7 +1605,7 @@ static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
} else {
if (kgsl_guard_page == NULL) {
kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
__GFP_HIGHMEM);
__GFP_NORETRY | __GFP_HIGHMEM);
if (kgsl_guard_page == NULL)
return -ENOMEM;
}
@ -1602,7 +1614,7 @@ static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
}
return _iommu_map_sync_pc(pt, memdesc, gpuaddr, physaddr,
kgsl_memdesc_guard_page_size(pt->mmu, memdesc),
kgsl_memdesc_guard_page_size(memdesc),
protflags & ~IOMMU_WRITE);
}
@ -1658,6 +1670,100 @@ done:
return ret;
}
static int kgsl_iommu_sparse_dummy_map(struct kgsl_pagetable *pt,
struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size)
{
int ret = 0, i;
struct page **pages = NULL;
struct sg_table sgt;
int count = size >> PAGE_SHIFT;
/* verify the offset is within our range */
if (size + offset > memdesc->size)
return -EINVAL;
if (kgsl_dummy_page == NULL) {
kgsl_dummy_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
__GFP_HIGHMEM);
if (kgsl_dummy_page == NULL)
return -ENOMEM;
}
pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
if (pages == NULL)
return -ENOMEM;
for (i = 0; i < count; i++)
pages[i] = kgsl_dummy_page;
ret = sg_alloc_table_from_pages(&sgt, pages, count,
0, size, GFP_KERNEL);
if (ret == 0) {
ret = _iommu_map_sg_sync_pc(pt, memdesc->gpuaddr + offset,
memdesc, sgt.sgl, sgt.nents,
IOMMU_READ | IOMMU_NOEXEC);
sg_free_table(&sgt);
}
kfree(pages);
return ret;
}
static int _map_to_one_page(struct kgsl_pagetable *pt, uint64_t addr,
struct kgsl_memdesc *memdesc, uint64_t physoffset,
uint64_t size, unsigned int map_flags)
{
int ret = 0, i;
int pg_sz = kgsl_memdesc_get_pagesize(memdesc);
int count = size >> PAGE_SHIFT;
struct page *page = NULL;
struct page **pages = NULL;
struct sg_page_iter sg_iter;
struct sg_table sgt;
/* Find our physaddr offset addr */
if (memdesc->pages != NULL)
page = memdesc->pages[physoffset >> PAGE_SHIFT];
else {
for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
memdesc->sgt->nents, physoffset >> PAGE_SHIFT) {
page = sg_page_iter_page(&sg_iter);
break;
}
}
if (page == NULL)
return -EINVAL;
pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
if (pages == NULL)
return -ENOMEM;
for (i = 0; i < count; i++) {
if (pg_sz != PAGE_SIZE) {
struct page *tmp_page = page;
int j;
for (j = 0; j < 16; j++, tmp_page += PAGE_SIZE)
pages[i++] = tmp_page;
} else
pages[i] = page;
}
ret = sg_alloc_table_from_pages(&sgt, pages, count,
0, size, GFP_KERNEL);
if (ret == 0) {
ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt.sgl,
sgt.nents, map_flags);
sg_free_table(&sgt);
}
kfree(pages);
return ret;
}
static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
uint64_t virtaddr, uint64_t virtoffset,
struct kgsl_memdesc *memdesc, uint64_t physoffset,
@ -1668,13 +1774,17 @@ static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
int ret;
struct sg_table *sgt = NULL;
pg_sz = (1 << kgsl_memdesc_get_align(memdesc));
pg_sz = kgsl_memdesc_get_pagesize(memdesc);
if (!IS_ALIGNED(virtaddr | virtoffset | physoffset | size, pg_sz))
return -EINVAL;
if (size == 0)
return -EINVAL;
if (!(feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
size + physoffset > kgsl_memdesc_footprint(memdesc))
return -EINVAL;
/*
* For paged memory allocated through kgsl, memdesc->pages is not NULL.
* Allocate sgt here just for its map operation. Contiguous memory
@ -1688,9 +1798,13 @@ static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
if (IS_ERR(sgt))
return PTR_ERR(sgt);
ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
memdesc, sgt->sgl, sgt->nents,
physoffset, size, protflags);
if (feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS)
ret = _map_to_one_page(pt, virtaddr + virtoffset,
memdesc, physoffset, size, protflags);
else
ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
memdesc, sgt->sgl, sgt->nents,
physoffset, size, protflags);
if (memdesc->pages != NULL)
kgsl_free_sgt(sgt);
@ -2152,8 +2266,7 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
{
struct kgsl_iommu_pt *pt = pagetable->priv;
int ret = 0;
uint64_t addr, start, end;
uint64_t size = memdesc->size;
uint64_t addr, start, end, size;
unsigned int align;
BUG_ON(kgsl_memdesc_use_cpu_map(memdesc));
@ -2162,8 +2275,7 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
pagetable->name != KGSL_MMU_SECURE_PT)
return -EINVAL;
if (kgsl_memdesc_has_guard_page(memdesc))
size += kgsl_memdesc_guard_page_size(pagetable->mmu, memdesc);
size = kgsl_memdesc_footprint(memdesc);
align = 1 << kgsl_memdesc_get_align(memdesc);
@ -2445,4 +2557,5 @@ static struct kgsl_mmu_pt_ops iommu_pt_ops = {
.addr_in_range = kgsl_iommu_addr_in_range,
.mmu_map_offset = kgsl_iommu_map_offset,
.mmu_unmap_offset = kgsl_iommu_unmap_offset,
.mmu_sparse_dummy_map = kgsl_iommu_sparse_dummy_map,
};

View file

@ -386,29 +386,24 @@ int
kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
int ret = 0;
int size;
if (!memdesc->gpuaddr)
return -EINVAL;
/* Only global mappings should be mapped multiple times */
if (!kgsl_memdesc_is_global(memdesc) &&
(KGSL_MEMDESC_MAPPED & memdesc->priv))
return -EINVAL;
size = kgsl_memdesc_footprint(memdesc);
if (PT_OP_VALID(pagetable, mmu_map))
if (PT_OP_VALID(pagetable, mmu_map)) {
int ret;
ret = pagetable->pt_ops->mmu_map(pagetable, memdesc);
if (ret)
return ret;
if (ret)
return ret;
atomic_inc(&pagetable->stats.entries);
KGSL_STATS_ADD(size, &pagetable->stats.mapped,
&pagetable->stats.max_mapped);
memdesc->priv |= KGSL_MEMDESC_MAPPED;
atomic_inc(&pagetable->stats.entries);
KGSL_STATS_ADD(size, &pagetable->stats.mapped,
&pagetable->stats.max_mapped);
}
return 0;
}
@ -455,22 +450,22 @@ int
kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
uint64_t size;
if (memdesc->size == 0 || memdesc->gpuaddr == 0 ||
!(KGSL_MEMDESC_MAPPED & memdesc->priv))
if (memdesc->size == 0)
return -EINVAL;
size = kgsl_memdesc_footprint(memdesc);
if (PT_OP_VALID(pagetable, mmu_unmap)) {
int ret;
uint64_t size;
if (PT_OP_VALID(pagetable, mmu_unmap))
pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
size = kgsl_memdesc_footprint(memdesc);
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
ret = pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
if (ret)
return ret;
if (!kgsl_memdesc_is_global(memdesc))
memdesc->priv &= ~KGSL_MEMDESC_MAPPED;
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
}
return 0;
}
@ -481,11 +476,20 @@ int kgsl_mmu_map_offset(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, uint64_t physoffset,
uint64_t size, uint64_t flags)
{
if (PT_OP_VALID(pagetable, mmu_map_offset))
return pagetable->pt_ops->mmu_map_offset(pagetable, virtaddr,
virtoffset, memdesc, physoffset, size, flags);
if (PT_OP_VALID(pagetable, mmu_map_offset)) {
int ret;
return -EINVAL;
ret = pagetable->pt_ops->mmu_map_offset(pagetable, virtaddr,
virtoffset, memdesc, physoffset, size, flags);
if (ret)
return ret;
atomic_inc(&pagetable->stats.entries);
KGSL_STATS_ADD(size, &pagetable->stats.mapped,
&pagetable->stats.max_mapped);
}
return 0;
}
EXPORT_SYMBOL(kgsl_mmu_map_offset);
@ -493,14 +497,41 @@ int kgsl_mmu_unmap_offset(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, uint64_t addr, uint64_t offset,
uint64_t size)
{
if (PT_OP_VALID(pagetable, mmu_unmap_offset))
return pagetable->pt_ops->mmu_unmap_offset(pagetable, memdesc,
addr, offset, size);
if (PT_OP_VALID(pagetable, mmu_unmap_offset)) {
int ret;
return -EINVAL;
ret = pagetable->pt_ops->mmu_unmap_offset(pagetable, memdesc,
addr, offset, size);
if (ret)
return ret;
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
}
return 0;
}
EXPORT_SYMBOL(kgsl_mmu_unmap_offset);
int kgsl_mmu_sparse_dummy_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size)
{
if (PT_OP_VALID(pagetable, mmu_sparse_dummy_map)) {
int ret;
ret = pagetable->pt_ops->mmu_sparse_dummy_map(pagetable,
memdesc, offset, size);
if (ret)
return ret;
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
}
return 0;
}
EXPORT_SYMBOL(kgsl_mmu_sparse_dummy_map);
void kgsl_mmu_remove_global(struct kgsl_device *device,
struct kgsl_memdesc *memdesc)
{

View file

@ -106,6 +106,9 @@ struct kgsl_mmu_pt_ops {
int (*mmu_unmap_offset)(struct kgsl_pagetable *pt,
struct kgsl_memdesc *memdesc, uint64_t addr,
uint64_t offset, uint64_t size);
int (*mmu_sparse_dummy_map)(struct kgsl_pagetable *pt,
struct kgsl_memdesc *memdesc, uint64_t offset,
uint64_t size);
};
/*
@ -230,6 +233,9 @@ int kgsl_mmu_unmap_offset(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *kgsl_mmu_get_qdss_global_entry(struct kgsl_device *device);
int kgsl_mmu_sparse_dummy_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size);
/*
* Static inline functions of MMU that simply call the SMMU specific
* function using a function pointer. These functions can be thought

View file

@ -91,6 +91,18 @@ kgsl_memdesc_get_align(const struct kgsl_memdesc *memdesc)
KGSL_MEMALIGN_SHIFT);
}
/*
* kgsl_memdesc_get_pagesize - Get pagesize based on alignment
* @memdesc - the memdesc
*
* Returns the pagesize based on memdesc alignment
*/
static inline int
kgsl_memdesc_get_pagesize(const struct kgsl_memdesc *memdesc)
{
return (1 << kgsl_memdesc_get_align(memdesc));
}
/*
* kgsl_memdesc_get_cachemode - Get cache mode of a memdesc
* @memdesc: the memdesc
@ -211,12 +223,19 @@ kgsl_memdesc_has_guard_page(const struct kgsl_memdesc *memdesc)
*
* Returns guard page size
*/
static inline int
kgsl_memdesc_guard_page_size(const struct kgsl_mmu *mmu,
const struct kgsl_memdesc *memdesc)
static inline uint64_t
kgsl_memdesc_guard_page_size(const struct kgsl_memdesc *memdesc)
{
return kgsl_memdesc_is_secured(memdesc) ? mmu->secure_align_mask + 1 :
PAGE_SIZE;
if (!kgsl_memdesc_has_guard_page(memdesc))
return 0;
if (kgsl_memdesc_is_secured(memdesc)) {
if (memdesc->pagetable != NULL &&
memdesc->pagetable->mmu != NULL)
return memdesc->pagetable->mmu->secure_align_mask + 1;
}
return PAGE_SIZE;
}
/*
@ -241,10 +260,7 @@ kgsl_memdesc_use_cpu_map(const struct kgsl_memdesc *memdesc)
static inline uint64_t
kgsl_memdesc_footprint(const struct kgsl_memdesc *memdesc)
{
uint64_t size = memdesc->size;
if (kgsl_memdesc_has_guard_page(memdesc))
size += SZ_4K;
return size;
return memdesc->size + kgsl_memdesc_guard_page_size(memdesc);
}
/*

View file

@ -313,6 +313,13 @@ int kgsl_snapshot_get_object(struct kgsl_snapshot *snapshot,
goto err_put;
}
/* Do not save sparse memory */
if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT ||
entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_PHYS) {
ret = 0;
goto err_put;
}
/*
* size indicates the number of bytes in the region to save. This might
* not always be the entire size of the region because some buffers are

View file

@ -1102,6 +1102,100 @@ TRACE_EVENT(kgsl_msg,
)
);
DECLARE_EVENT_CLASS(sparse_alloc_template,
TP_PROTO(unsigned int id, uint64_t size, unsigned int pagesize),
TP_ARGS(id, size, pagesize),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(uint64_t, size)
__field(unsigned int, pagesize)
),
TP_fast_assign(
__entry->id = id;
__entry->size = size;
__entry->pagesize = pagesize;
),
TP_printk("id=%d size=0x%llX pagesize=0x%X",
__entry->id, __entry->size, __entry->pagesize)
);
DEFINE_EVENT(sparse_alloc_template, sparse_phys_alloc,
TP_PROTO(unsigned int id, uint64_t size, unsigned int pagesize),
TP_ARGS(id, size, pagesize)
);
DEFINE_EVENT(sparse_alloc_template, sparse_virt_alloc,
TP_PROTO(unsigned int id, uint64_t size, unsigned int pagesize),
TP_ARGS(id, size, pagesize)
);
DECLARE_EVENT_CLASS(sparse_free_template,
TP_PROTO(unsigned int id),
TP_ARGS(id),
TP_STRUCT__entry(
__field(unsigned int, id)
),
TP_fast_assign(
__entry->id = id;
),
TP_printk("id=%d", __entry->id)
);
DEFINE_EVENT(sparse_free_template, sparse_phys_free,
TP_PROTO(unsigned int id),
TP_ARGS(id)
);
DEFINE_EVENT(sparse_free_template, sparse_virt_free,
TP_PROTO(unsigned int id),
TP_ARGS(id)
);
TRACE_EVENT(sparse_bind,
TP_PROTO(unsigned int v_id, uint64_t v_off,
unsigned int p_id, uint64_t p_off,
uint64_t size, uint64_t flags),
TP_ARGS(v_id, v_off, p_id, p_off, size, flags),
TP_STRUCT__entry(
__field(unsigned int, v_id)
__field(uint64_t, v_off)
__field(unsigned int, p_id)
__field(uint64_t, p_off)
__field(uint64_t, size)
__field(uint64_t, flags)
),
TP_fast_assign(
__entry->v_id = v_id;
__entry->v_off = v_off;
__entry->p_id = p_id;
__entry->p_off = p_off;
__entry->size = size;
__entry->flags = flags;
),
TP_printk(
"v_id=%d v_off=0x%llX p_id=%d p_off=0x%llX size=0x%llX flags=0x%llX",
__entry->v_id, __entry->v_off,
__entry->p_id, __entry->p_off,
__entry->size, __entry->flags)
);
TRACE_EVENT(sparse_unbind,
TP_PROTO(unsigned int v_id, uint64_t v_off, uint64_t size),
TP_ARGS(v_id, v_off, size),
TP_STRUCT__entry(
__field(unsigned int, v_id)
__field(uint64_t, v_off)
__field(uint64_t, size)
),
TP_fast_assign(
__entry->v_id = v_id;
__entry->v_off = v_off;
__entry->size = size;
),
TP_printk("v_id=%d v_off=0x%llX size=0x%llX",
__entry->v_id, __entry->v_off, __entry->size)
);
#endif /* _KGSL_TRACE_H */

View file

@ -121,6 +121,11 @@
#define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
/* Flag for binding all the virt range to single phys data */
#define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL
#define KGSL_SPARSE_BIND 0x1ULL
#define KGSL_SPARSE_UNBIND 0x2ULL
/* Memory caching hints */
#define KGSL_CACHEMODE_MASK 0x0C000000U
#define KGSL_CACHEMODE_SHIFT 26
@ -131,6 +136,8 @@
#define KGSL_CACHEMODE_WRITEBACK 3
#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
#define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
#define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
/* Memory types for which allocations are made */
#define KGSL_MEMTYPE_MASK 0x0000FF00
@ -1457,4 +1464,96 @@ struct kgsl_gpuobj_set_info {
#define IOCTL_KGSL_GPUOBJ_SET_INFO \
_IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
/**
* struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC
* @size: Size in bytes to back
* @pagesize: Pagesize alignment required
* @flags: Flags for this allocation
* @id: Returned ID for this allocation
*/
struct kgsl_sparse_phys_alloc {
uint64_t size;
uint64_t pagesize;
uint64_t flags;
unsigned int id;
};
#define IOCTL_KGSL_SPARSE_PHYS_ALLOC \
_IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc)
/**
* struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE
* @id: ID to free
*/
struct kgsl_sparse_phys_free {
unsigned int id;
};
#define IOCTL_KGSL_SPARSE_PHYS_FREE \
_IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free)
/**
* struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC
* @size: Size in bytes to reserve
* @pagesize: Pagesize alignment required
* @flags: Flags for this allocation
* @id: Returned ID for this allocation
* @gpuaddr: Returned GPU address for this allocation
*/
struct kgsl_sparse_virt_alloc {
uint64_t size;
uint64_t pagesize;
uint64_t flags;
uint64_t gpuaddr;
unsigned int id;
};
#define IOCTL_KGSL_SPARSE_VIRT_ALLOC \
_IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc)
/**
* struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE
* @id: ID to free
*/
struct kgsl_sparse_virt_free {
unsigned int id;
};
#define IOCTL_KGSL_SPARSE_VIRT_FREE \
_IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free)
/**
* struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind
* @virtoffset: Offset into the virtual ID
* @physoffset: Offset into the physical ID (bind only)
* @size: Size in bytes to reserve
* @flags: Flags for this kgsl_sparse_binding_object
* @id: Physical ID to bind (bind only)
*/
struct kgsl_sparse_binding_object {
uint64_t virtoffset;
uint64_t physoffset;
uint64_t size;
uint64_t flags;
unsigned int id;
};
/**
* struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND
* @list: List of kgsl_sparse_bind_objects to bind/unbind
* @id: Virtual ID to bind/unbind
* @size: Size of kgsl_sparse_bind_object
* @count: Number of elements in list
*
*/
struct kgsl_sparse_bind {
uint64_t __user list;
unsigned int id;
unsigned int size;
unsigned int count;
};
#define IOCTL_KGSL_SPARSE_BIND \
_IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
#endif /* _UAPI_MSM_KGSL_H */