drm/msm: Separate locking of buffer resources from struct_mutex
Buffer object specific resources like pages, domains, sg list need not be protected with struct_mutex. They can be protected with a buffer object level lock. This simplifies locking and makes it easier to avoid potential recursive locking scenarios for SVM involving mmap_sem and struct_mutex. This also removes unnecessary serialization when creating buffer objects, and also between buffer object creation and GPU command submission. Change-Id: I40cb437d0186c3d9aac365c9baba0aa4792f0aa1 Signed-off-by: Sushmita Susheelendra <ssusheel@codeaurora.org>
This commit is contained in:
parent
9f47a21e56
commit
9c0d1dc8c0
18 changed files with 150 additions and 129 deletions
|
@ -481,10 +481,8 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
|
||||||
struct drm_gem_object *bo;
|
struct drm_gem_object *bo;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
mutex_lock(&drm->struct_mutex);
|
|
||||||
bo = msm_gem_new(drm, fw->size - 4,
|
bo = msm_gem_new(drm, fw->size - 4,
|
||||||
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
|
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
|
||||||
mutex_unlock(&drm->struct_mutex);
|
|
||||||
|
|
||||||
if (IS_ERR(bo))
|
if (IS_ERR(bo))
|
||||||
return bo;
|
return bo;
|
||||||
|
|
|
@ -458,10 +458,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
||||||
*/
|
*/
|
||||||
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
|
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
|
||||||
|
|
||||||
mutex_lock(&drm->struct_mutex);
|
|
||||||
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
|
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
|
||||||
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
|
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
|
||||||
mutex_unlock(&drm->struct_mutex);
|
|
||||||
|
|
||||||
if (IS_ERR(a5xx_gpu->gpmu_bo))
|
if (IS_ERR(a5xx_gpu->gpmu_bo))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
|
@ -24,9 +24,7 @@ static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu,
|
||||||
void *ptr;
|
void *ptr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&drm->struct_mutex);
|
|
||||||
_bo = msm_gem_new(drm, size, flags);
|
_bo = msm_gem_new(drm, size, flags);
|
||||||
mutex_unlock(&drm->struct_mutex);
|
|
||||||
|
|
||||||
if (IS_ERR(_bo))
|
if (IS_ERR(_bo))
|
||||||
return _bo;
|
return _bo;
|
||||||
|
|
|
@ -217,18 +217,19 @@ static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
|
||||||
struct drm_device *drm = gpu->dev;
|
struct drm_device *drm = gpu->dev;
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
crashdump->bo = msm_gem_new(drm, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED);
|
crashdump->bo = msm_gem_new_locked(drm, CRASHDUMP_BO_SIZE,
|
||||||
|
MSM_BO_UNCACHED);
|
||||||
if (IS_ERR(crashdump->bo)) {
|
if (IS_ERR(crashdump->bo)) {
|
||||||
ret = PTR_ERR(crashdump->bo);
|
ret = PTR_ERR(crashdump->bo);
|
||||||
crashdump->bo = NULL;
|
crashdump->bo = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
crashdump->ptr = msm_gem_vaddr_locked(crashdump->bo);
|
crashdump->ptr = msm_gem_vaddr(crashdump->bo);
|
||||||
if (!crashdump->ptr)
|
if (!crashdump->ptr)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = msm_gem_get_iova_locked(crashdump->bo, gpu->aspace,
|
ret = msm_gem_get_iova(crashdump->bo, gpu->aspace,
|
||||||
&crashdump->iova);
|
&crashdump->iova);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -563,10 +563,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&drm->struct_mutex);
|
|
||||||
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
|
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
|
||||||
MSM_BO_UNCACHED);
|
MSM_BO_UNCACHED);
|
||||||
mutex_unlock(&drm->struct_mutex);
|
|
||||||
if (IS_ERR(adreno_gpu->memptrs_bo)) {
|
if (IS_ERR(adreno_gpu->memptrs_bo)) {
|
||||||
ret = PTR_ERR(adreno_gpu->memptrs_bo);
|
ret = PTR_ERR(adreno_gpu->memptrs_bo);
|
||||||
adreno_gpu->memptrs_bo = NULL;
|
adreno_gpu->memptrs_bo = NULL;
|
||||||
|
|
|
@ -838,22 +838,19 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
|
||||||
int ret;
|
int ret;
|
||||||
u64 iova;
|
u64 iova;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
|
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
|
||||||
if (IS_ERR(msm_host->tx_gem_obj)) {
|
if (IS_ERR(msm_host->tx_gem_obj)) {
|
||||||
ret = PTR_ERR(msm_host->tx_gem_obj);
|
ret = PTR_ERR(msm_host->tx_gem_obj);
|
||||||
pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
|
pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
|
||||||
msm_host->tx_gem_obj = NULL;
|
msm_host->tx_gem_obj = NULL;
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
|
ret = msm_gem_get_iova(msm_host->tx_gem_obj, NULL, &iova);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("%s: failed to get iova, %d\n", __func__, ret);
|
pr_err("%s: failed to get iova, %d\n", __func__, ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
if (iova & 0x07) {
|
if (iova & 0x07) {
|
||||||
pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
|
pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
|
||||||
|
|
|
@ -392,7 +392,7 @@ static void update_cursor(struct drm_crtc *crtc)
|
||||||
if (next_bo) {
|
if (next_bo) {
|
||||||
/* take a obj ref + iova ref when we start scanning out: */
|
/* take a obj ref + iova ref when we start scanning out: */
|
||||||
drm_gem_object_reference(next_bo);
|
drm_gem_object_reference(next_bo);
|
||||||
msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
|
msm_gem_get_iova(next_bo, mdp4_kms->aspace,
|
||||||
&iova);
|
&iova);
|
||||||
|
|
||||||
/* enable cursor: */
|
/* enable cursor: */
|
||||||
|
|
|
@ -536,9 +536,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
|
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
|
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
|
||||||
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
|
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
|
||||||
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
|
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
|
||||||
|
|
|
@ -322,6 +322,7 @@ static int msm_init_vram(struct drm_device *dev)
|
||||||
priv->vram.size = size;
|
priv->vram.size = size;
|
||||||
|
|
||||||
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
|
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
|
||||||
|
spin_lock_init(&priv->vram.lock);
|
||||||
|
|
||||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
|
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
|
||||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||||
|
@ -631,12 +632,10 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
|
||||||
if (priv->gpu)
|
if (priv->gpu)
|
||||||
msm_gpu_cleanup_counters(priv->gpu, ctx);
|
msm_gpu_cleanup_counters(priv->gpu, ctx);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) {
|
if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) {
|
||||||
ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
|
ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
|
||||||
msm_gem_address_space_put(ctx->aspace);
|
msm_gem_address_space_put(ctx->aspace);
|
||||||
}
|
}
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
}
|
}
|
||||||
|
|
|
@ -341,6 +341,7 @@ struct msm_drm_private {
|
||||||
* and position mm_node->start is in # of pages:
|
* and position mm_node->start is in # of pages:
|
||||||
*/
|
*/
|
||||||
struct drm_mm mm;
|
struct drm_mm mm;
|
||||||
|
spinlock_t lock; /* Protects drm_mm node allocation/removal */
|
||||||
} vram;
|
} vram;
|
||||||
|
|
||||||
struct msm_vblank_ctrl vblank_ctrl;
|
struct msm_vblank_ctrl vblank_ctrl;
|
||||||
|
@ -431,8 +432,6 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
||||||
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||||
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
|
||||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
|
||||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||||
|
@ -453,7 +452,6 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||||
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
||||||
void msm_gem_prime_unpin(struct drm_gem_object *obj);
|
void msm_gem_prime_unpin(struct drm_gem_object *obj);
|
||||||
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
|
|
||||||
void *msm_gem_vaddr(struct drm_gem_object *obj);
|
void *msm_gem_vaddr(struct drm_gem_object *obj);
|
||||||
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
|
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
|
||||||
struct msm_fence_cb *cb);
|
struct msm_fence_cb *cb);
|
||||||
|
@ -468,6 +466,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||||
uint32_t size, uint32_t flags, uint32_t *handle);
|
uint32_t size, uint32_t flags, uint32_t *handle);
|
||||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||||
uint32_t size, uint32_t flags);
|
uint32_t size, uint32_t flags);
|
||||||
|
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
||||||
|
uint32_t size, uint32_t flags);
|
||||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||||
uint32_t size, struct sg_table *sgt, u32 flags);
|
uint32_t size, struct sg_table *sgt, u32 flags);
|
||||||
void msm_gem_sync(struct drm_gem_object *obj, u32 op);
|
void msm_gem_sync(struct drm_gem_object *obj, u32 op);
|
||||||
|
|
|
@ -104,10 +104,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||||
/* allocate backing bo */
|
/* allocate backing bo */
|
||||||
size = mode_cmd.pitches[0] * mode_cmd.height;
|
size = mode_cmd.pitches[0] * mode_cmd.height;
|
||||||
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
|
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
|
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
|
||||||
MSM_BO_WC | MSM_BO_STOLEN);
|
MSM_BO_WC | MSM_BO_STOLEN);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
if (IS_ERR(fbdev->bo)) {
|
if (IS_ERR(fbdev->bo)) {
|
||||||
ret = PTR_ERR(fbdev->bo);
|
ret = PTR_ERR(fbdev->bo);
|
||||||
fbdev->bo = NULL;
|
fbdev->bo = NULL;
|
||||||
|
@ -133,7 +131,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||||
* in panic (ie. lock-safe, etc) we could avoid pinning the
|
* in panic (ie. lock-safe, etc) we could avoid pinning the
|
||||||
* buffer now:
|
* buffer now:
|
||||||
*/
|
*/
|
||||||
ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
|
ret = msm_gem_get_iova(fbdev->bo, 0, &paddr);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
|
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
|
||||||
goto fail_unlock;
|
goto fail_unlock;
|
||||||
|
@ -163,7 +161,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||||
/* FIXME: Verify paddr < 32 bits? */
|
/* FIXME: Verify paddr < 32 bits? */
|
||||||
dev->mode_config.fb_base = lower_32_bits(paddr);
|
dev->mode_config.fb_base = lower_32_bits(paddr);
|
||||||
|
|
||||||
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
|
fbi->screen_base = msm_gem_vaddr(fbdev->bo);
|
||||||
fbi->screen_size = fbdev->bo->size;
|
fbi->screen_size = fbdev->bo->size;
|
||||||
fbi->fix.smem_start = lower_32_bits(paddr);
|
fbi->fix.smem_start = lower_32_bits(paddr);
|
||||||
fbi->fix.smem_len = fbdev->bo->size;
|
fbi->fix.smem_len = fbdev->bo->size;
|
||||||
|
|
|
@ -63,8 +63,7 @@ static bool use_pages(struct drm_gem_object *obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
||||||
static struct page **get_pages_vram(struct drm_gem_object *obj,
|
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
|
||||||
int npages)
|
|
||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||||
|
@ -76,8 +75,10 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
|
||||||
if (!p)
|
if (!p)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
spin_lock(&priv->vram.lock);
|
||||||
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
|
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
|
||||||
npages, 0, DRM_MM_SEARCH_DEFAULT);
|
npages, 0, DRM_MM_SEARCH_DEFAULT);
|
||||||
|
spin_unlock(&priv->vram.lock);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_free_large(p);
|
drm_free_large(p);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
@ -92,7 +93,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called with dev->struct_mutex held */
|
|
||||||
static struct page **get_pages(struct drm_gem_object *obj)
|
static struct page **get_pages(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
@ -147,6 +147,18 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
||||||
return msm_obj->pages;
|
return msm_obj->pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void put_pages_vram(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||||
|
|
||||||
|
spin_lock(&priv->vram.lock);
|
||||||
|
drm_mm_remove_node(msm_obj->vram_node);
|
||||||
|
spin_unlock(&priv->vram.lock);
|
||||||
|
|
||||||
|
drm_free_large(msm_obj->pages);
|
||||||
|
}
|
||||||
|
|
||||||
static void put_pages(struct drm_gem_object *obj)
|
static void put_pages(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
@ -160,12 +172,10 @@ static void put_pages(struct drm_gem_object *obj)
|
||||||
sg_free_table(msm_obj->sgt);
|
sg_free_table(msm_obj->sgt);
|
||||||
kfree(msm_obj->sgt);
|
kfree(msm_obj->sgt);
|
||||||
|
|
||||||
if (use_pages(obj)) {
|
if (use_pages(obj))
|
||||||
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
||||||
} else {
|
else
|
||||||
drm_mm_remove_node(msm_obj->vram_node);
|
put_pages_vram(obj);
|
||||||
drm_free_large(msm_obj->pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
msm_obj->pages = NULL;
|
msm_obj->pages = NULL;
|
||||||
}
|
}
|
||||||
|
@ -173,11 +183,12 @@ static void put_pages(struct drm_gem_object *obj)
|
||||||
|
|
||||||
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
|
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->dev;
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
struct page **p;
|
struct page **p;
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
|
mutex_lock(&msm_obj->lock);
|
||||||
p = get_pages(obj);
|
p = get_pages(obj);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&msm_obj->lock);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,16 +248,17 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct drm_gem_object *obj = vma->vm_private_data;
|
struct drm_gem_object *obj = vma->vm_private_data;
|
||||||
struct drm_device *dev = obj->dev;
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Make sure we don't parallel update on a fault, nor move or remove
|
/*
|
||||||
* something from beneath our feet
|
* vm_ops.open and close get and put a reference on obj.
|
||||||
|
* So, we dont need to hold one here.
|
||||||
*/
|
*/
|
||||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
ret = mutex_lock_interruptible(&msm_obj->lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -269,7 +281,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
|
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&msm_obj->lock);
|
||||||
out:
|
out:
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
case -EAGAIN:
|
case -EAGAIN:
|
||||||
|
@ -293,9 +305,10 @@ out:
|
||||||
static uint64_t mmap_offset(struct drm_gem_object *obj)
|
static uint64_t mmap_offset(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||||
|
|
||||||
/* Make it mmapable */
|
/* Make it mmapable */
|
||||||
ret = drm_gem_create_mmap_offset(obj);
|
ret = drm_gem_create_mmap_offset(obj);
|
||||||
|
@ -311,9 +324,11 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
|
||||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
|
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
uint64_t offset;
|
uint64_t offset;
|
||||||
mutex_lock(&obj->dev->struct_mutex);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
|
||||||
|
mutex_lock(&msm_obj->lock);
|
||||||
offset = mmap_offset(obj);
|
offset = mmap_offset(obj);
|
||||||
mutex_unlock(&obj->dev->struct_mutex);
|
mutex_unlock(&msm_obj->lock);
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -325,14 +340,14 @@ static void obj_remove_domain(struct msm_gem_vma *domain)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Called with msm_obj->lock locked */
|
||||||
static void
|
static void
|
||||||
put_iova(struct drm_gem_object *obj)
|
put_iova(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->dev;
|
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
struct msm_gem_vma *domain, *tmp;
|
struct msm_gem_vma *domain, *tmp;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||||
|
|
||||||
list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
|
list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
|
||||||
if (iommu_present(&platform_bus_type)) {
|
if (iommu_present(&platform_bus_type)) {
|
||||||
|
@ -378,14 +393,8 @@ static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
|
||||||
#define IOMMU_PRIV 0
|
#define IOMMU_PRIV 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* should be called under struct_mutex.. although it can be called
|
/* A reference to obj must be held before calling this function. */
|
||||||
* from atomic context without struct_mutex to acquire an extra
|
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||||
* iova ref if you know one is already held.
|
|
||||||
*
|
|
||||||
* That means when I do eventually need to add support for unpinning
|
|
||||||
* the refcnt counter needs to be atomic_t.
|
|
||||||
*/
|
|
||||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
|
||||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
struct msm_gem_address_space *aspace, uint64_t *iova)
|
||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
@ -393,13 +402,18 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||||
struct msm_gem_vma *domain;
|
struct msm_gem_vma *domain;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
mutex_lock(&msm_obj->lock);
|
||||||
|
|
||||||
if (!iommu_present(&platform_bus_type)) {
|
if (!iommu_present(&platform_bus_type)) {
|
||||||
pages = get_pages(obj);
|
pages = get_pages(obj);
|
||||||
|
|
||||||
if (IS_ERR(pages))
|
if (IS_ERR(pages)) {
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
return PTR_ERR(pages);
|
return PTR_ERR(pages);
|
||||||
|
}
|
||||||
|
|
||||||
*iova = (uint64_t) physaddr(obj);
|
*iova = (uint64_t) physaddr(obj);
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -407,12 +421,15 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||||
|
|
||||||
if (!domain) {
|
if (!domain) {
|
||||||
domain = obj_add_domain(obj, aspace);
|
domain = obj_add_domain(obj, aspace);
|
||||||
if (IS_ERR(domain))
|
if (IS_ERR(domain)) {
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
return PTR_ERR(domain);
|
return PTR_ERR(domain);
|
||||||
|
}
|
||||||
|
|
||||||
pages = get_pages(obj);
|
pages = get_pages(obj);
|
||||||
if (IS_ERR(pages)) {
|
if (IS_ERR(pages)) {
|
||||||
obj_remove_domain(domain);
|
obj_remove_domain(domain);
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
return PTR_ERR(pages);
|
return PTR_ERR(pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -425,26 +442,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||||
else
|
else
|
||||||
obj_remove_domain(domain);
|
obj_remove_domain(domain);
|
||||||
|
|
||||||
return ret;
|
mutex_unlock(&msm_obj->lock);
|
||||||
}
|
return 0;
|
||||||
|
|
||||||
/* get iova, taking a reference. Should have a matching put */
|
|
||||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
|
||||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
|
||||||
{
|
|
||||||
struct msm_gem_vma *domain;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
domain = obj_get_domain(obj, aspace);
|
|
||||||
if (domain) {
|
|
||||||
*iova = domain->iova;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&obj->dev->struct_mutex);
|
|
||||||
ret = msm_gem_get_iova_locked(obj, aspace, iova);
|
|
||||||
mutex_unlock(&obj->dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* get iova without taking a reference, used in places where you have
|
/* get iova without taking a reference, used in places where you have
|
||||||
|
@ -453,11 +452,17 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||||
struct msm_gem_address_space *aspace)
|
struct msm_gem_address_space *aspace)
|
||||||
{
|
{
|
||||||
struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
struct msm_gem_vma *domain;
|
||||||
|
uint64_t iova;
|
||||||
|
|
||||||
|
mutex_lock(&msm_obj->lock);
|
||||||
|
domain = obj_get_domain(obj, aspace);
|
||||||
WARN_ON(!domain);
|
WARN_ON(!domain);
|
||||||
|
iova = domain ? domain->iova : 0;
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
|
|
||||||
return domain ? domain->iova : 0;
|
return iova;
|
||||||
}
|
}
|
||||||
|
|
||||||
void msm_gem_put_iova(struct drm_gem_object *obj,
|
void msm_gem_put_iova(struct drm_gem_object *obj,
|
||||||
|
@ -501,27 +506,23 @@ fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
|
void *msm_gem_vaddr(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
|
||||||
|
mutex_lock(&msm_obj->lock);
|
||||||
if (!msm_obj->vaddr) {
|
if (!msm_obj->vaddr) {
|
||||||
struct page **pages = get_pages(obj);
|
struct page **pages = get_pages(obj);
|
||||||
if (IS_ERR(pages))
|
if (IS_ERR(pages)) {
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
return ERR_CAST(pages);
|
return ERR_CAST(pages);
|
||||||
|
}
|
||||||
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
||||||
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
||||||
}
|
}
|
||||||
return msm_obj->vaddr;
|
mutex_unlock(&msm_obj->lock);
|
||||||
}
|
|
||||||
|
|
||||||
void *msm_gem_vaddr(struct drm_gem_object *obj)
|
return msm_obj->vaddr;
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
mutex_lock(&obj->dev->struct_mutex);
|
|
||||||
ret = msm_gem_vaddr_locked(obj);
|
|
||||||
mutex_unlock(&obj->dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* setup callback for when bo is no longer busy..
|
/* setup callback for when bo is no longer busy..
|
||||||
|
@ -659,16 +660,16 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
||||||
|
|
||||||
/* object should not be on active list: */
|
/* object should not be on active list: */
|
||||||
WARN_ON(is_active(msm_obj));
|
WARN_ON(is_active(msm_obj));
|
||||||
|
|
||||||
list_del(&msm_obj->mm_list);
|
list_del(&msm_obj->mm_list);
|
||||||
|
|
||||||
|
mutex_lock(&msm_obj->lock);
|
||||||
|
|
||||||
put_iova(obj);
|
put_iova(obj);
|
||||||
|
|
||||||
if (obj->import_attach) {
|
if (obj->import_attach) {
|
||||||
if (msm_obj->vaddr)
|
if (msm_obj->vaddr)
|
||||||
dma_buf_vunmap(obj->import_attach->dmabuf,
|
dma_buf_vunmap(obj->import_attach->dmabuf,
|
||||||
msm_obj->vaddr);
|
msm_obj->vaddr);
|
||||||
|
|
||||||
/* Don't drop the pages for imported dmabuf, as they are not
|
/* Don't drop the pages for imported dmabuf, as they are not
|
||||||
* ours, just free the array we allocated:
|
* ours, just free the array we allocated:
|
||||||
*/
|
*/
|
||||||
|
@ -685,6 +686,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
||||||
reservation_object_fini(msm_obj->resv);
|
reservation_object_fini(msm_obj->resv);
|
||||||
|
|
||||||
drm_gem_object_release(obj);
|
drm_gem_object_release(obj);
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
|
|
||||||
kfree(msm_obj);
|
kfree(msm_obj);
|
||||||
}
|
}
|
||||||
|
@ -696,14 +698,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
obj = msm_gem_new(dev, size, flags);
|
obj = msm_gem_new(dev, size, flags);
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
if (IS_ERR(obj))
|
if (IS_ERR(obj))
|
||||||
return PTR_ERR(obj);
|
return PTR_ERR(obj);
|
||||||
|
|
||||||
|
@ -715,9 +711,23 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void msm_gem_add_to_inactive_list(struct msm_gem_object *msm_obj,
|
||||||
|
struct drm_device *dev, bool struct_mutex_locked)
|
||||||
|
{
|
||||||
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
|
|
||||||
|
if (struct_mutex_locked) {
|
||||||
|
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||||
|
} else {
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int msm_gem_new_impl(struct drm_device *dev,
|
static int msm_gem_new_impl(struct drm_device *dev,
|
||||||
uint32_t size, uint32_t flags,
|
uint32_t size, uint32_t flags, struct drm_gem_object **obj,
|
||||||
struct drm_gem_object **obj)
|
bool struct_mutex_locked)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
struct msm_gem_object *msm_obj;
|
struct msm_gem_object *msm_obj;
|
||||||
|
@ -746,6 +756,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
||||||
if (!msm_obj)
|
if (!msm_obj)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mutex_init(&msm_obj->lock);
|
||||||
|
|
||||||
if (use_vram) {
|
if (use_vram) {
|
||||||
struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
|
struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
|
||||||
|
|
||||||
|
@ -761,21 +773,19 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
||||||
INIT_LIST_HEAD(&msm_obj->submit_entry);
|
INIT_LIST_HEAD(&msm_obj->submit_entry);
|
||||||
INIT_LIST_HEAD(&msm_obj->domains);
|
INIT_LIST_HEAD(&msm_obj->domains);
|
||||||
|
|
||||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
msm_gem_add_to_inactive_list(msm_obj, dev, struct_mutex_locked);
|
||||||
|
|
||||||
*obj = &msm_obj->base;
|
*obj = &msm_obj->base;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||||
uint32_t size, uint32_t flags)
|
uint32_t size, uint32_t flags, bool struct_mutex_locked)
|
||||||
{
|
{
|
||||||
struct drm_gem_object *obj = NULL;
|
struct drm_gem_object *obj = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -785,7 +795,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||||
if (!size)
|
if (!size)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
ret = msm_gem_new_impl(dev, size, flags, &obj);
|
ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@ -801,11 +811,23 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
if (obj)
|
if (obj)
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference_unlocked(obj);
|
||||||
|
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
||||||
|
uint32_t size, uint32_t flags)
|
||||||
|
{
|
||||||
|
return _msm_gem_new(dev, size, flags, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||||
|
uint32_t size, uint32_t flags)
|
||||||
|
{
|
||||||
|
return _msm_gem_new(dev, size, flags, false);
|
||||||
|
}
|
||||||
|
|
||||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||||
uint32_t size, struct sg_table *sgt, u32 flags)
|
uint32_t size, struct sg_table *sgt, u32 flags)
|
||||||
{
|
{
|
||||||
|
@ -821,9 +843,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
|
||||||
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -833,9 +853,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||||
npages = size / PAGE_SIZE;
|
npages = size / PAGE_SIZE;
|
||||||
|
|
||||||
msm_obj = to_msm_bo(obj);
|
msm_obj = to_msm_bo(obj);
|
||||||
|
mutex_lock(&msm_obj->lock);
|
||||||
msm_obj->sgt = sgt;
|
msm_obj->sgt = sgt;
|
||||||
msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
||||||
if (!msm_obj->pages) {
|
if (!msm_obj->pages) {
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -844,8 +866,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||||
msm_obj->flags |= flags;
|
msm_obj->flags |= flags;
|
||||||
|
|
||||||
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
|
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
goto fail;
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
|
|
||||||
return obj;
|
return obj;
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ struct msm_gem_address_space {
|
||||||
struct msm_mmu *mmu;
|
struct msm_mmu *mmu;
|
||||||
struct kref kref;
|
struct kref kref;
|
||||||
struct drm_mm mm;
|
struct drm_mm mm;
|
||||||
|
spinlock_t lock; /* Protects drm_mm node allocation/removal */
|
||||||
u64 va_len;
|
u64 va_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -80,6 +81,7 @@ struct msm_gem_object {
|
||||||
* an IOMMU. Also used for stolen/splashscreen buffer.
|
* an IOMMU. Also used for stolen/splashscreen buffer.
|
||||||
*/
|
*/
|
||||||
struct drm_mm_node *vram_node;
|
struct drm_mm_node *vram_node;
|
||||||
|
struct mutex lock; /* Protects resources associated with bo */
|
||||||
};
|
};
|
||||||
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
|
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
|
||||||
|
|
||||||
|
|
|
@ -215,7 +215,7 @@ retry:
|
||||||
|
|
||||||
|
|
||||||
/* if locking succeeded, pin bo: */
|
/* if locking succeeded, pin bo: */
|
||||||
ret = msm_gem_get_iova_locked(&msm_obj->base, aspace, &iova);
|
ret = msm_gem_get_iova(&msm_obj->base, aspace, &iova);
|
||||||
|
|
||||||
/* this would break the logic in the fail path.. there is no
|
/* this would break the logic in the fail path.. there is no
|
||||||
* reason for this to happen, but just to be on the safe side
|
* reason for this to happen, but just to be on the safe side
|
||||||
|
@ -303,7 +303,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||||
/* For now, just map the entire thing. Eventually we probably
|
/* For now, just map the entire thing. Eventually we probably
|
||||||
* to do it page-by-page, w/ kmap() if not vmap()d..
|
* to do it page-by-page, w/ kmap() if not vmap()d..
|
||||||
*/
|
*/
|
||||||
ptr = msm_gem_vaddr_locked(&obj->base);
|
ptr = msm_gem_vaddr(&obj->base);
|
||||||
|
|
||||||
if (IS_ERR(ptr)) {
|
if (IS_ERR(ptr)) {
|
||||||
ret = PTR_ERR(ptr);
|
ret = PTR_ERR(ptr);
|
||||||
|
@ -466,7 +466,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
if (submit_cmd.type == MSM_SUBMIT_CMD_PROFILE_BUF) {
|
if (submit_cmd.type == MSM_SUBMIT_CMD_PROFILE_BUF) {
|
||||||
submit->profile_buf_iova = submit->cmd[i].iova;
|
submit->profile_buf_iova = submit->cmd[i].iova;
|
||||||
submit->profile_buf_vaddr =
|
submit->profile_buf_vaddr =
|
||||||
msm_gem_vaddr_locked(&msm_obj->base);
|
msm_gem_vaddr(&msm_obj->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (submit->valid)
|
if (submit->valid)
|
||||||
|
|
|
@ -52,6 +52,7 @@ msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
|
||||||
if (!aspace)
|
if (!aspace)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
spin_lock_init(&aspace->lock);
|
||||||
aspace->name = name;
|
aspace->name = name;
|
||||||
aspace->mmu = mmu;
|
aspace->mmu = mmu;
|
||||||
|
|
||||||
|
@ -77,14 +78,19 @@ static int allocate_iova(struct msm_gem_address_space *aspace,
|
||||||
if (!aspace->va_len)
|
if (!aspace->va_len)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||||
size += sg->length + sg->offset;
|
size += sg->length + sg->offset;
|
||||||
|
|
||||||
ret = drm_mm_insert_node(&aspace->mm, &vma->node, size >> PAGE_SHIFT,
|
spin_lock(&aspace->lock);
|
||||||
0, DRM_MM_SEARCH_DEFAULT);
|
|
||||||
|
if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
|
||||||
|
spin_unlock(&aspace->lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
ret = drm_mm_insert_node(&aspace->mm, &vma->node,
|
||||||
|
size >> PAGE_SHIFT, 0, DRM_MM_SEARCH_DEFAULT);
|
||||||
|
|
||||||
|
spin_unlock(&aspace->lock);
|
||||||
|
|
||||||
if (!ret && iova)
|
if (!ret && iova)
|
||||||
*iova = vma->node.start << PAGE_SHIFT;
|
*iova = vma->node.start << PAGE_SHIFT;
|
||||||
|
@ -110,8 +116,10 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||||
flags, priv);
|
flags, priv);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
spin_lock(&aspace->lock);
|
||||||
if (drm_mm_node_allocated(&vma->node))
|
if (drm_mm_node_allocated(&vma->node))
|
||||||
drm_mm_remove_node(&vma->node);
|
drm_mm_remove_node(&vma->node);
|
||||||
|
spin_unlock(&aspace->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -130,8 +138,10 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||||
|
|
||||||
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
|
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
|
||||||
|
|
||||||
|
spin_lock(&aspace->lock);
|
||||||
if (drm_mm_node_allocated(&vma->node))
|
if (drm_mm_node_allocated(&vma->node))
|
||||||
drm_mm_remove_node(&vma->node);
|
drm_mm_remove_node(&vma->node);
|
||||||
|
spin_unlock(&aspace->lock);
|
||||||
|
|
||||||
vma->iova = 0;
|
vma->iova = 0;
|
||||||
|
|
||||||
|
|
|
@ -580,8 +580,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
|
|
||||||
/* ring takes a reference to the bo and iova: */
|
/* ring takes a reference to the bo and iova: */
|
||||||
drm_gem_object_reference(&msm_obj->base);
|
drm_gem_object_reference(&msm_obj->base);
|
||||||
msm_gem_get_iova_locked(&msm_obj->base,
|
msm_gem_get_iova(&msm_obj->base, aspace, &iova);
|
||||||
aspace, &iova);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
|
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
|
||||||
|
@ -890,10 +889,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
|
|
||||||
/* Create ringbuffer(s): */
|
/* Create ringbuffer(s): */
|
||||||
for (i = 0; i < nr_rings; i++) {
|
for (i = 0; i < nr_rings; i++) {
|
||||||
mutex_lock(&drm->struct_mutex);
|
|
||||||
gpu->rb[i] = msm_ringbuffer_new(gpu, i);
|
|
||||||
mutex_unlock(&drm->struct_mutex);
|
|
||||||
|
|
||||||
|
gpu->rb[i] = msm_ringbuffer_new(gpu, i);
|
||||||
if (IS_ERR(gpu->rb[i])) {
|
if (IS_ERR(gpu->rb[i])) {
|
||||||
ret = PTR_ERR(gpu->rb[i]);
|
ret = PTR_ERR(gpu->rb[i]);
|
||||||
gpu->rb[i] = NULL;
|
gpu->rb[i] = NULL;
|
||||||
|
|
|
@ -310,7 +310,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
||||||
uint64_t iova = submit->cmd[i].iova;
|
uint64_t iova = submit->cmd[i].iova;
|
||||||
uint32_t szd = submit->cmd[i].size; /* in dwords */
|
uint32_t szd = submit->cmd[i].size; /* in dwords */
|
||||||
struct msm_gem_object *obj = submit->bos[idx].obj;
|
struct msm_gem_object *obj = submit->bos[idx].obj;
|
||||||
const char *buf = msm_gem_vaddr_locked(&obj->base);
|
const char *buf = msm_gem_vaddr(&obj->base);
|
||||||
|
|
||||||
buf += iova - submit->bos[idx].iova;
|
buf += iova - submit->bos[idx].iova;
|
||||||
|
|
||||||
|
|
|
@ -34,14 +34,15 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
|
||||||
|
|
||||||
ring->gpu = gpu;
|
ring->gpu = gpu;
|
||||||
ring->id = id;
|
ring->id = id;
|
||||||
ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, MSM_BO_WC);
|
ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
|
||||||
|
MSM_BO_WC);
|
||||||
if (IS_ERR(ring->bo)) {
|
if (IS_ERR(ring->bo)) {
|
||||||
ret = PTR_ERR(ring->bo);
|
ret = PTR_ERR(ring->bo);
|
||||||
ring->bo = NULL;
|
ring->bo = NULL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->start = msm_gem_vaddr_locked(ring->bo);
|
ring->start = msm_gem_vaddr(ring->bo);
|
||||||
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
|
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
|
||||||
ring->next = ring->start;
|
ring->next = ring->start;
|
||||||
ring->cur = ring->start;
|
ring->cur = ring->start;
|
||||||
|
|
Loading…
Add table
Reference in a new issue