drm/msm: Pass the MMU domain index in struct msm_file_private
Pass the index of the MMU domain in struct msm_file_private instead of assuming gpu->id throughout the submit path. Change-Id: Ic0dedbad3761b0f72ad6b1789f69458896214239 Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
This commit is contained in:
parent
663d4c0a64
commit
231c57eeaf
9 changed files with 26 additions and 47 deletions
|
@ -42,13 +42,10 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
|||
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
|
||||
}
|
||||
|
||||
static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
struct msm_file_private *ctx)
|
||||
static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
struct msm_ringbuffer *ring = gpu->rb[submit->ring];
|
||||
unsigned int i, ibs = 0;
|
||||
|
||||
|
@ -81,9 +78,6 @@ static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||
switch (submit->cmd[i].type) {
|
||||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||
break;
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
if (priv->lastctx == ctx)
|
||||
break;
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
|
||||
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
||||
|
|
|
@ -170,11 +170,9 @@ void adreno_recover(struct msm_gpu *gpu)
|
|||
enable_irq(gpu->irq);
|
||||
}
|
||||
|
||||
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
struct msm_file_private *ctx)
|
||||
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
struct msm_ringbuffer *ring = gpu->rb[submit->ring];
|
||||
unsigned i, ibs = 0;
|
||||
|
||||
|
@ -184,8 +182,6 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||
/* ignore IB-targets */
|
||||
break;
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
/* ignore if there has not been a ctx switch: */
|
||||
if (priv->lastctx == ctx)
|
||||
break;
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
|
||||
|
|
|
@ -215,8 +215,7 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
|
|||
uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
|
||||
struct msm_ringbuffer *ring);
|
||||
void adreno_recover(struct msm_gpu *gpu);
|
||||
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
struct msm_file_private *ctx);
|
||||
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
|
||||
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
|
||||
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
|
|
@ -565,16 +565,20 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
|
|||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
file->driver_priv = ctx;
|
||||
|
||||
if (dev && dev->dev_private) {
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms;
|
||||
|
||||
kms = priv->kms;
|
||||
if (kms && kms->funcs && kms->funcs->postopen)
|
||||
kms->funcs->postopen(kms, file);
|
||||
if (priv) {
|
||||
ctx->aspace = priv->gpu->aspace;
|
||||
kms = priv->kms;
|
||||
|
||||
if (kms && kms->funcs && kms->funcs->postopen)
|
||||
kms->funcs->postopen(kms, file);
|
||||
}
|
||||
}
|
||||
|
||||
file->driver_priv = ctx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -596,11 +600,6 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
|
|||
if (kms && kms->funcs && kms->funcs->postclose)
|
||||
kms->funcs->postclose(kms, file);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (ctx == priv->lastctx)
|
||||
priv->lastctx = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
|
|
|
@ -74,11 +74,7 @@ struct msm_gem_vma;
|
|||
#define MAX_CONNECTORS 8
|
||||
|
||||
struct msm_file_private {
|
||||
/* currently we don't do anything useful with this.. but when
|
||||
* per-context address spaces are supported we'd keep track of
|
||||
* the context's page-tables here.
|
||||
*/
|
||||
int dummy;
|
||||
struct msm_gem_address_space *aspace;
|
||||
};
|
||||
|
||||
enum msm_mdp_plane_property {
|
||||
|
@ -278,7 +274,6 @@ struct msm_drm_private {
|
|||
|
||||
/* when we have more than one 'msm_gpu' these need to be an array: */
|
||||
struct msm_gpu *gpu;
|
||||
struct msm_file_private *lastctx;
|
||||
|
||||
struct drm_fb_helper *fbdev;
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
|
|||
*/
|
||||
struct msm_gem_submit {
|
||||
struct drm_device *dev;
|
||||
struct msm_gpu *gpu;
|
||||
struct msm_gem_address_space *aspace;
|
||||
struct list_head node; /* node in gpu submit_list */
|
||||
struct list_head bo_list;
|
||||
struct ww_acquire_ctx ticket;
|
||||
|
|
|
@ -34,7 +34,7 @@ static inline void __user *to_user_ptr(u64 address)
|
|||
}
|
||||
|
||||
static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
||||
struct msm_gpu *gpu, int nr)
|
||||
struct msm_gem_address_space *aspace, int nr)
|
||||
{
|
||||
struct msm_gem_submit *submit;
|
||||
int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
|
||||
|
@ -42,7 +42,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
|||
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (submit) {
|
||||
submit->dev = dev;
|
||||
submit->gpu = gpu;
|
||||
submit->aspace = aspace;
|
||||
|
||||
/* initially, until copy_from_user() and bo lookup succeeds: */
|
||||
submit->nr_bos = 0;
|
||||
|
@ -142,7 +142,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
|
|||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
|
||||
if (submit->bos[i].flags & BO_PINNED)
|
||||
msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
|
||||
msm_gem_put_iova(&msm_obj->base, submit->aspace);
|
||||
|
||||
if (submit->bos[i].flags & BO_LOCKED)
|
||||
ww_mutex_unlock(&msm_obj->resv->lock);
|
||||
|
@ -181,7 +181,7 @@ retry:
|
|||
|
||||
/* if locking succeeded, pin bo: */
|
||||
ret = msm_gem_get_iova_locked(&msm_obj->base,
|
||||
submit->gpu->aspace, &iova);
|
||||
submit->aspace, &iova);
|
||||
|
||||
/* this would break the logic in the fail path.. there is no
|
||||
* reason for this to happen, but just to be on the safe side
|
||||
|
@ -361,7 +361,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
submit = submit_create(dev, gpu, args->nr_bos);
|
||||
submit = submit_create(dev, ctx->aspace, args->nr_bos);
|
||||
if (!submit) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -440,7 +440,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
(args->flags & MSM_SUBMIT_RING_MASK) >> MSM_SUBMIT_RING_SHIFT,
|
||||
0, gpu->nr_rings - 1);
|
||||
|
||||
ret = msm_gpu_submit(gpu, submit, ctx);
|
||||
ret = msm_gpu_submit(gpu, submit);
|
||||
|
||||
args->fence = submit->fence;
|
||||
|
||||
|
|
|
@ -301,7 +301,7 @@ static void recover_worker(struct work_struct *work)
|
|||
|
||||
/* replay the remaining submits for all rings: */
|
||||
list_for_each_entry(submit, &gpu->submit_list, node) {
|
||||
gpu->funcs->submit(gpu, submit, NULL);
|
||||
gpu->funcs->submit(gpu, submit);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -525,8 +525,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
|
|||
}
|
||||
|
||||
/* add bo's to gpu's ring, and kick gpu: */
|
||||
int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
struct msm_file_private *ctx)
|
||||
int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
{
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
@ -561,7 +560,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||
/* ring takes a reference to the bo and iova: */
|
||||
drm_gem_object_reference(&msm_obj->base);
|
||||
msm_gem_get_iova_locked(&msm_obj->base,
|
||||
submit->gpu->aspace, &iova);
|
||||
submit->aspace, &iova);
|
||||
}
|
||||
|
||||
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
|
||||
|
@ -571,8 +570,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
|
||||
}
|
||||
|
||||
ret = gpu->funcs->submit(gpu, submit, ctx);
|
||||
priv->lastctx = ctx;
|
||||
ret = gpu->funcs->submit(gpu, submit);
|
||||
|
||||
hangcheck_timer_reset(gpu);
|
||||
|
||||
|
|
|
@ -55,8 +55,7 @@ struct msm_gpu_funcs {
|
|||
int (*hw_init)(struct msm_gpu *gpu);
|
||||
int (*pm_suspend)(struct msm_gpu *gpu);
|
||||
int (*pm_resume)(struct msm_gpu *gpu);
|
||||
int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
struct msm_file_private *ctx);
|
||||
int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
|
||||
void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
|
||||
irqreturn_t (*irq)(struct msm_gpu *irq);
|
||||
uint32_t (*last_fence)(struct msm_gpu *gpu,
|
||||
|
@ -241,8 +240,7 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
|
|||
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
|
||||
|
||||
void msm_gpu_retire(struct msm_gpu *gpu);
|
||||
int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
struct msm_file_private *ctx);
|
||||
int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
|
||||
|
||||
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
|
||||
|
|
Loading…
Add table
Reference in a new issue