Merge "msm: kgsl: Make sure USE_CPU_MAP + MAP_USER_MEM work together"
This commit is contained in:
commit
2f088241d7
1 changed files with 65 additions and 80 deletions
|
@ -176,9 +176,10 @@ int kgsl_memfree_find_entry(pid_t ptname, uint64_t *gpuaddr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void kgsl_memfree_purge(pid_t ptname, uint64_t gpuaddr,
|
||||
uint64_t size)
|
||||
static void kgsl_memfree_purge(struct kgsl_pagetable *pagetable,
|
||||
uint64_t gpuaddr, uint64_t size)
|
||||
{
|
||||
pid_t ptname = pagetable ? pagetable->name : 0;
|
||||
int i;
|
||||
|
||||
if (memfree.list == NULL)
|
||||
|
@ -342,40 +343,22 @@ kgsl_mem_entry_destroy(struct kref *kref)
|
|||
}
|
||||
EXPORT_SYMBOL(kgsl_mem_entry_destroy);
|
||||
|
||||
/**
|
||||
* kgsl_mem_entry_track_gpuaddr - Insert a mem_entry in the address tree and
|
||||
* assign it with a gpu address space before insertion
|
||||
* @process: the process that owns the memory
|
||||
* @entry: the memory entry
|
||||
*
|
||||
* @returns - 0 on succcess else error code
|
||||
*
|
||||
* Insert the kgsl_mem_entry in to the rb_tree for searching by GPU address.
|
||||
* The assignment of gpu address and insertion into list needs to
|
||||
* happen with the memory lock held to avoid race conditions between
|
||||
* gpu address being selected and some other thread looking through the
|
||||
* rb list in search of memory based on gpuaddr
|
||||
* This function should be called with processes memory spinlock held
|
||||
*/
|
||||
static int
|
||||
kgsl_mem_entry_track_gpuaddr(struct kgsl_process_private *process,
|
||||
struct kgsl_mem_entry *entry)
|
||||
/* Allocate a IOVA for memory objects that don't use SVM */
|
||||
static int kgsl_mem_entry_track_gpuaddr(struct kgsl_device *device,
|
||||
struct kgsl_process_private *process,
|
||||
struct kgsl_mem_entry *entry)
|
||||
{
|
||||
struct kgsl_pagetable *pagetable = process->pagetable;
|
||||
struct kgsl_pagetable *pagetable;
|
||||
|
||||
/*
|
||||
* If cpu=gpu map is used then caller needs to set the
|
||||
* gpu address
|
||||
* If SVM is enabled for this object then the address needs to be
|
||||
* assigned elsewhere
|
||||
*/
|
||||
if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
|
||||
if (!entry->memdesc.gpuaddr)
|
||||
return 0;
|
||||
} else if (entry->memdesc.gpuaddr) {
|
||||
WARN_ONCE(1, "gpuaddr assigned w/o holding memory lock\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (kgsl_memdesc_is_secured(&entry->memdesc))
|
||||
pagetable = pagetable->mmu->securepagetable;
|
||||
if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
|
||||
return 0;
|
||||
|
||||
pagetable = kgsl_memdesc_is_secured(&entry->memdesc) ?
|
||||
device->mmu.securepagetable : process->pagetable;
|
||||
|
||||
return kgsl_mmu_get_gpuaddr(pagetable, &entry->memdesc);
|
||||
}
|
||||
|
@ -391,33 +374,25 @@ static void kgsl_mem_entry_commit_process(struct kgsl_mem_entry *entry)
|
|||
spin_unlock(&entry->priv->mem_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* kgsl_mem_entry_attach_process - Attach a mem_entry to its owner process
|
||||
* @entry: the memory entry
|
||||
* @process: the owner process
|
||||
*
|
||||
* Attach a newly created mem_entry to its owner process so that
|
||||
* it can be found later. The mem_entry will be added to mem_idr and have
|
||||
* its 'id' field assigned.
|
||||
*
|
||||
* @returns - 0 on success or error code on failure.
|
||||
/*
|
||||
* Attach the memory object to a process by (possibly) getting a GPU address and
|
||||
* (possibly) mapping it
|
||||
*/
|
||||
int
|
||||
kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
|
||||
struct kgsl_device_private *dev_priv)
|
||||
static int kgsl_mem_entry_attach_process(struct kgsl_device *device,
|
||||
struct kgsl_process_private *process,
|
||||
struct kgsl_mem_entry *entry)
|
||||
{
|
||||
int id;
|
||||
int ret;
|
||||
struct kgsl_process_private *process = dev_priv->process_priv;
|
||||
struct kgsl_pagetable *pagetable = NULL;
|
||||
int id, ret;
|
||||
|
||||
ret = kgsl_process_private_get(process);
|
||||
if (!ret)
|
||||
return -EBADF;
|
||||
|
||||
ret = kgsl_mem_entry_track_gpuaddr(process, entry);
|
||||
if (ret)
|
||||
goto err_put_proc_priv;
|
||||
ret = kgsl_mem_entry_track_gpuaddr(device, process, entry);
|
||||
if (ret) {
|
||||
kgsl_process_private_put(process);
|
||||
return ret;
|
||||
}
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&process->mem_lock);
|
||||
|
@ -427,43 +402,40 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
|
|||
idr_preload_end();
|
||||
|
||||
if (id < 0) {
|
||||
ret = id;
|
||||
kgsl_mmu_put_gpuaddr(&entry->memdesc);
|
||||
goto err_put_proc_priv;
|
||||
if (!kgsl_memdesc_use_cpu_map(&entry->memdesc))
|
||||
kgsl_mmu_put_gpuaddr(&entry->memdesc);
|
||||
kgsl_process_private_put(process);
|
||||
return id;
|
||||
}
|
||||
|
||||
entry->id = id;
|
||||
entry->priv = process;
|
||||
|
||||
/* map the memory after unlocking if gpuaddr has been assigned */
|
||||
/*
|
||||
* Map the memory if a GPU address is already assigned, either through
|
||||
* kgsl_mem_entry_track_gpuaddr() or via some other SVM process
|
||||
*/
|
||||
if (entry->memdesc.gpuaddr) {
|
||||
pagetable = process->pagetable;
|
||||
if (kgsl_memdesc_is_secured(&entry->memdesc))
|
||||
pagetable = pagetable->mmu->securepagetable;
|
||||
|
||||
entry->memdesc.pagetable = pagetable;
|
||||
|
||||
if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT)
|
||||
ret = kgsl_mmu_sparse_dummy_map(pagetable,
|
||||
&entry->memdesc, 0, entry->memdesc.size);
|
||||
ret = kgsl_mmu_sparse_dummy_map(
|
||||
entry->memdesc.pagetable,
|
||||
&entry->memdesc, 0,
|
||||
entry->memdesc.size);
|
||||
else if (entry->memdesc.gpuaddr)
|
||||
ret = kgsl_mmu_map(pagetable, &entry->memdesc);
|
||||
ret = kgsl_mmu_map(entry->memdesc.pagetable,
|
||||
&entry->memdesc);
|
||||
|
||||
if (ret)
|
||||
kgsl_mem_entry_detach_process(entry);
|
||||
}
|
||||
|
||||
kgsl_memfree_purge(pagetable ? pagetable->name : 0,
|
||||
entry->memdesc.gpuaddr, entry->memdesc.size);
|
||||
kgsl_memfree_purge(entry->memdesc.pagetable, entry->memdesc.gpuaddr,
|
||||
entry->memdesc.size);
|
||||
|
||||
return ret;
|
||||
|
||||
err_put_proc_priv:
|
||||
kgsl_process_private_put(process);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Detach a memory entry from a process and unmap it from the MMU */
|
||||
|
||||
static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
|
||||
{
|
||||
unsigned int type;
|
||||
|
@ -2114,10 +2086,21 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable,
|
|||
entry->memdesc.pagetable = pagetable;
|
||||
entry->memdesc.size = (uint64_t) size;
|
||||
entry->memdesc.useraddr = hostptr;
|
||||
if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
|
||||
entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr;
|
||||
entry->memdesc.flags |= KGSL_MEMFLAGS_USERMEM_ADDR;
|
||||
|
||||
if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
|
||||
int ret;
|
||||
|
||||
/* Register the address in the database */
|
||||
ret = kgsl_mmu_set_svm_region(pagetable,
|
||||
(uint64_t) entry->memdesc.useraddr, (uint64_t) size);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr;
|
||||
}
|
||||
|
||||
return memdesc_sg_virt(&entry->memdesc, NULL);
|
||||
}
|
||||
|
||||
|
@ -2367,7 +2350,7 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
|
|||
|
||||
param->flags = entry->memdesc.flags;
|
||||
|
||||
ret = kgsl_mem_entry_attach_process(entry, dev_priv);
|
||||
ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
|
||||
if (ret)
|
||||
goto unmap;
|
||||
|
||||
|
@ -2671,7 +2654,8 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
|
|||
/* echo back flags */
|
||||
param->flags = (unsigned int) entry->memdesc.flags;
|
||||
|
||||
result = kgsl_mem_entry_attach_process(entry, dev_priv);
|
||||
result = kgsl_mem_entry_attach_process(dev_priv->device, private,
|
||||
entry);
|
||||
if (result)
|
||||
goto error_attach;
|
||||
|
||||
|
@ -3068,7 +3052,7 @@ static struct kgsl_mem_entry *gpumem_alloc_entry(
|
|||
if (ret != 0)
|
||||
goto err;
|
||||
|
||||
ret = kgsl_mem_entry_attach_process(entry, dev_priv);
|
||||
ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
|
||||
if (ret != 0) {
|
||||
kgsl_sharedmem_free(&entry->memdesc);
|
||||
goto err;
|
||||
|
@ -3334,6 +3318,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
|
|||
long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
|
||||
unsigned int cmd, void *data)
|
||||
{
|
||||
struct kgsl_process_private *private = dev_priv->process_priv;
|
||||
struct kgsl_sparse_virt_alloc *param = data;
|
||||
struct kgsl_mem_entry *entry;
|
||||
int ret;
|
||||
|
@ -3354,7 +3339,7 @@ long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
|
|||
spin_lock_init(&entry->bind_lock);
|
||||
entry->bind_tree = RB_ROOT;
|
||||
|
||||
ret = kgsl_mem_entry_attach_process(entry, dev_priv);
|
||||
ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
|
||||
if (ret) {
|
||||
kfree(entry);
|
||||
return ret;
|
||||
|
@ -4040,8 +4025,8 @@ static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private,
|
|||
return ret;
|
||||
}
|
||||
|
||||
kgsl_memfree_purge(private->pagetable ? private->pagetable->name : 0,
|
||||
entry->memdesc.gpuaddr, entry->memdesc.size);
|
||||
kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr,
|
||||
entry->memdesc.size);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue