Merge "drm/msm: Separate locking of buffer resources from struct_mutex"
This commit is contained in:
commit
6ce38a9691
21 changed files with 201 additions and 147 deletions
|
@ -481,10 +481,8 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
|
|||
struct drm_gem_object *bo;
|
||||
void *ptr;
|
||||
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
bo = msm_gem_new(drm, fw->size - 4,
|
||||
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
|
||||
if (IS_ERR(bo))
|
||||
return bo;
|
||||
|
@ -1408,8 +1406,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
|||
* Set the user domain range to fall into the TTBR1 region for global
|
||||
* objects
|
||||
*/
|
||||
a5xx_config.va_start = 0x800000000;
|
||||
a5xx_config.va_end = 0x8ffffffff;
|
||||
a5xx_config.va_start = 0xfffffff000000000ULL;
|
||||
a5xx_config.va_end = 0xffffffffffffffffULL;
|
||||
|
||||
a5xx_config.secure_va_start = SECURE_VA_START;
|
||||
a5xx_config.secure_va_end = SECURE_VA_START + SECURE_VA_SIZE - 1;
|
||||
|
|
|
@ -458,10 +458,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
|||
*/
|
||||
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
|
||||
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
|
||||
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
|
||||
if (IS_ERR(a5xx_gpu->gpmu_bo))
|
||||
goto err;
|
||||
|
|
|
@ -24,9 +24,7 @@ static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu,
|
|||
void *ptr;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
_bo = msm_gem_new(drm, size, flags);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
|
||||
if (IS_ERR(_bo))
|
||||
return _bo;
|
||||
|
|
|
@ -217,18 +217,19 @@ static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
|
|||
struct drm_device *drm = gpu->dev;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
crashdump->bo = msm_gem_new(drm, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED);
|
||||
crashdump->bo = msm_gem_new_locked(drm, CRASHDUMP_BO_SIZE,
|
||||
MSM_BO_UNCACHED);
|
||||
if (IS_ERR(crashdump->bo)) {
|
||||
ret = PTR_ERR(crashdump->bo);
|
||||
crashdump->bo = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
crashdump->ptr = msm_gem_vaddr_locked(crashdump->bo);
|
||||
crashdump->ptr = msm_gem_vaddr(crashdump->bo);
|
||||
if (!crashdump->ptr)
|
||||
goto out;
|
||||
|
||||
ret = msm_gem_get_iova_locked(crashdump->bo, gpu->aspace,
|
||||
ret = msm_gem_get_iova(crashdump->bo, gpu->aspace,
|
||||
&crashdump->iova);
|
||||
|
||||
out:
|
||||
|
|
|
@ -563,10 +563,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
}
|
||||
}
|
||||
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
|
||||
MSM_BO_UNCACHED);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
if (IS_ERR(adreno_gpu->memptrs_bo)) {
|
||||
ret = PTR_ERR(adreno_gpu->memptrs_bo);
|
||||
adreno_gpu->memptrs_bo = NULL;
|
||||
|
|
|
@ -838,22 +838,19 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
|
|||
int ret;
|
||||
u64 iova;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
|
||||
if (IS_ERR(msm_host->tx_gem_obj)) {
|
||||
ret = PTR_ERR(msm_host->tx_gem_obj);
|
||||
pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
|
||||
msm_host->tx_gem_obj = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
|
||||
ret = msm_gem_get_iova(msm_host->tx_gem_obj, NULL, &iova);
|
||||
if (ret) {
|
||||
pr_err("%s: failed to get iova, %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (iova & 0x07) {
|
||||
pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
|
||||
|
|
|
@ -392,7 +392,7 @@ static void update_cursor(struct drm_crtc *crtc)
|
|||
if (next_bo) {
|
||||
/* take a obj ref + iova ref when we start scanning out: */
|
||||
drm_gem_object_reference(next_bo);
|
||||
msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
|
||||
msm_gem_get_iova(next_bo, mdp4_kms->aspace,
|
||||
&iova);
|
||||
|
||||
/* enable cursor: */
|
||||
|
|
|
@ -536,9 +536,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
|
||||
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
|
||||
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
|
||||
|
|
|
@ -322,6 +322,7 @@ static int msm_init_vram(struct drm_device *dev)
|
|||
priv->vram.size = size;
|
||||
|
||||
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
|
||||
spin_lock_init(&priv->vram.lock);
|
||||
|
||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
|
||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||
|
@ -631,12 +632,10 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
|
|||
if (priv->gpu)
|
||||
msm_gpu_cleanup_counters(priv->gpu, ctx);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) {
|
||||
ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
|
||||
msm_gem_address_space_put(ctx->aspace);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
|
|
|
@ -341,6 +341,7 @@ struct msm_drm_private {
|
|||
* and position mm_node->start is in # of pages:
|
||||
*/
|
||||
struct drm_mm mm;
|
||||
spinlock_t lock; /* Protects drm_mm node allocation/removal */
|
||||
} vram;
|
||||
|
||||
struct msm_vblank_ctrl vblank_ctrl;
|
||||
|
@ -431,8 +432,6 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
|||
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||
|
@ -453,7 +452,6 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
|
||||
void *msm_gem_vaddr(struct drm_gem_object *obj);
|
||||
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
|
||||
struct msm_fence_cb *cb);
|
||||
|
@ -468,6 +466,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|||
uint32_t size, uint32_t flags, uint32_t *handle);
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
uint32_t size, struct sg_table *sgt, u32 flags);
|
||||
void msm_gem_sync(struct drm_gem_object *obj, u32 op);
|
||||
|
|
|
@ -104,10 +104,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|||
/* allocate backing bo */
|
||||
size = mode_cmd.pitches[0] * mode_cmd.height;
|
||||
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
|
||||
MSM_BO_WC | MSM_BO_STOLEN);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (IS_ERR(fbdev->bo)) {
|
||||
ret = PTR_ERR(fbdev->bo);
|
||||
fbdev->bo = NULL;
|
||||
|
@ -133,7 +131,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|||
* in panic (ie. lock-safe, etc) we could avoid pinning the
|
||||
* buffer now:
|
||||
*/
|
||||
ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
|
||||
ret = msm_gem_get_iova(fbdev->bo, 0, &paddr);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
|
||||
goto fail_unlock;
|
||||
|
@ -163,7 +161,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|||
/* FIXME: Verify paddr < 32 bits? */
|
||||
dev->mode_config.fb_base = lower_32_bits(paddr);
|
||||
|
||||
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
|
||||
fbi->screen_base = msm_gem_vaddr(fbdev->bo);
|
||||
fbi->screen_size = fbdev->bo->size;
|
||||
fbi->fix.smem_start = lower_32_bits(paddr);
|
||||
fbi->fix.smem_len = fbdev->bo->size;
|
||||
|
|
|
@ -63,8 +63,7 @@ static bool use_pages(struct drm_gem_object *obj)
|
|||
}
|
||||
|
||||
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
||||
static struct page **get_pages_vram(struct drm_gem_object *obj,
|
||||
int npages)
|
||||
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||
|
@ -76,8 +75,10 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
|
|||
if (!p)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spin_lock(&priv->vram.lock);
|
||||
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
|
||||
npages, 0, DRM_MM_SEARCH_DEFAULT);
|
||||
spin_unlock(&priv->vram.lock);
|
||||
if (ret) {
|
||||
drm_free_large(p);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -92,7 +93,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
|
|||
return p;
|
||||
}
|
||||
|
||||
/* called with dev->struct_mutex held */
|
||||
static struct page **get_pages(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
@ -149,6 +149,18 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
|||
return msm_obj->pages;
|
||||
}
|
||||
|
||||
static void put_pages_vram(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||
|
||||
spin_lock(&priv->vram.lock);
|
||||
drm_mm_remove_node(msm_obj->vram_node);
|
||||
spin_unlock(&priv->vram.lock);
|
||||
|
||||
drm_free_large(msm_obj->pages);
|
||||
}
|
||||
|
||||
static void put_pages(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
@ -163,12 +175,10 @@ static void put_pages(struct drm_gem_object *obj)
|
|||
sg_free_table(msm_obj->sgt);
|
||||
kfree(msm_obj->sgt);
|
||||
|
||||
if (use_pages(obj)) {
|
||||
if (use_pages(obj))
|
||||
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
||||
} else {
|
||||
drm_mm_remove_node(msm_obj->vram_node);
|
||||
drm_free_large(msm_obj->pages);
|
||||
}
|
||||
else
|
||||
put_pages_vram(obj);
|
||||
|
||||
msm_obj->pages = NULL;
|
||||
}
|
||||
|
@ -176,11 +186,12 @@ static void put_pages(struct drm_gem_object *obj)
|
|||
|
||||
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct page **p;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
p = get_pages(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -240,16 +251,17 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct page **pages;
|
||||
unsigned long pfn;
|
||||
pgoff_t pgoff;
|
||||
int ret;
|
||||
|
||||
/* Make sure we don't parallel update on a fault, nor move or remove
|
||||
* something from beneath our feet
|
||||
/*
|
||||
* vm_ops.open and close get and put a reference on obj.
|
||||
* So, we dont need to hold one here.
|
||||
*/
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
ret = mutex_lock_interruptible(&msm_obj->lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -272,7 +284,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
out:
|
||||
switch (ret) {
|
||||
case -EAGAIN:
|
||||
|
@ -296,9 +308,10 @@ out:
|
|||
static uint64_t mmap_offset(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
|
||||
/* Make it mmapable */
|
||||
ret = drm_gem_create_mmap_offset(obj);
|
||||
|
@ -314,9 +327,11 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
|
|||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
|
||||
{
|
||||
uint64_t offset;
|
||||
mutex_lock(&obj->dev->struct_mutex);
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
offset = mmap_offset(obj);
|
||||
mutex_unlock(&obj->dev->struct_mutex);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -328,14 +343,14 @@ static void obj_remove_domain(struct msm_gem_vma *domain)
|
|||
}
|
||||
}
|
||||
|
||||
/* Called with msm_obj->lock locked */
|
||||
static void
|
||||
put_iova(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *domain, *tmp;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
|
||||
list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
|
||||
if (iommu_present(&platform_bus_type)) {
|
||||
|
@ -381,14 +396,8 @@ static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
|
|||
#define IOMMU_PRIV 0
|
||||
#endif
|
||||
|
||||
/* should be called under struct_mutex.. although it can be called
|
||||
* from atomic context without struct_mutex to acquire an extra
|
||||
* iova ref if you know one is already held.
|
||||
*
|
||||
* That means when I do eventually need to add support for unpinning
|
||||
* the refcnt counter needs to be atomic_t.
|
||||
*/
|
||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||
/* A reference to obj must be held before calling this function. */
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
@ -396,13 +405,18 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
|||
struct msm_gem_vma *domain;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
|
||||
if (!iommu_present(&platform_bus_type)) {
|
||||
pages = get_pages(obj);
|
||||
|
||||
if (IS_ERR(pages))
|
||||
if (IS_ERR(pages)) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return PTR_ERR(pages);
|
||||
}
|
||||
|
||||
*iova = (uint64_t) physaddr(obj);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -410,12 +424,15 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
|||
|
||||
if (!domain) {
|
||||
domain = obj_add_domain(obj, aspace);
|
||||
if (IS_ERR(domain))
|
||||
if (IS_ERR(domain)) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return PTR_ERR(domain);
|
||||
}
|
||||
|
||||
pages = get_pages(obj);
|
||||
if (IS_ERR(pages)) {
|
||||
obj_remove_domain(domain);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return PTR_ERR(pages);
|
||||
}
|
||||
|
||||
|
@ -428,26 +445,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
|||
else
|
||||
obj_remove_domain(domain);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* get iova, taking a reference. Should have a matching put */
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
||||
{
|
||||
struct msm_gem_vma *domain;
|
||||
int ret;
|
||||
|
||||
domain = obj_get_domain(obj, aspace);
|
||||
if (domain) {
|
||||
*iova = domain->iova;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&obj->dev->struct_mutex);
|
||||
ret = msm_gem_get_iova_locked(obj, aspace, iova);
|
||||
mutex_unlock(&obj->dev->struct_mutex);
|
||||
return ret;
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get iova without taking a reference, used in places where you have
|
||||
|
@ -456,11 +455,17 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
|||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *domain;
|
||||
uint64_t iova;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
domain = obj_get_domain(obj, aspace);
|
||||
WARN_ON(!domain);
|
||||
iova = domain ? domain->iova : 0;
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
|
||||
return domain ? domain->iova : 0;
|
||||
return iova;
|
||||
}
|
||||
|
||||
void msm_gem_put_iova(struct drm_gem_object *obj,
|
||||
|
@ -504,27 +509,23 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
|
||||
void *msm_gem_vaddr(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
if (!msm_obj->vaddr) {
|
||||
struct page **pages = get_pages(obj);
|
||||
if (IS_ERR(pages))
|
||||
if (IS_ERR(pages)) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return ERR_CAST(pages);
|
||||
}
|
||||
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
||||
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
||||
}
|
||||
return msm_obj->vaddr;
|
||||
}
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
|
||||
void *msm_gem_vaddr(struct drm_gem_object *obj)
|
||||
{
|
||||
void *ret;
|
||||
mutex_lock(&obj->dev->struct_mutex);
|
||||
ret = msm_gem_vaddr_locked(obj);
|
||||
mutex_unlock(&obj->dev->struct_mutex);
|
||||
return ret;
|
||||
return msm_obj->vaddr;
|
||||
}
|
||||
|
||||
/* setup callback for when bo is no longer busy..
|
||||
|
@ -662,16 +663,16 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
|||
|
||||
/* object should not be on active list: */
|
||||
WARN_ON(is_active(msm_obj));
|
||||
|
||||
list_del(&msm_obj->mm_list);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
|
||||
put_iova(obj);
|
||||
|
||||
if (obj->import_attach) {
|
||||
if (msm_obj->vaddr)
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf,
|
||||
msm_obj->vaddr);
|
||||
|
||||
/* Don't drop the pages for imported dmabuf, as they are not
|
||||
* ours, just free the array we allocated:
|
||||
*/
|
||||
|
@ -688,6 +689,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
|||
reservation_object_fini(msm_obj->resv);
|
||||
|
||||
drm_gem_object_release(obj);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
|
||||
kfree(msm_obj);
|
||||
}
|
||||
|
@ -699,14 +701,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
obj = msm_gem_new(dev, size, flags);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
|
@ -718,9 +714,23 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void msm_gem_add_to_inactive_list(struct msm_gem_object *msm_obj,
|
||||
struct drm_device *dev, bool struct_mutex_locked)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
||||
if (struct_mutex_locked) {
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
} else {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static int msm_gem_new_impl(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags,
|
||||
struct drm_gem_object **obj)
|
||||
uint32_t size, uint32_t flags, struct drm_gem_object **obj,
|
||||
bool struct_mutex_locked)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gem_object *msm_obj;
|
||||
|
@ -749,6 +759,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|||
if (!msm_obj)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&msm_obj->lock);
|
||||
|
||||
if (use_vram) {
|
||||
struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
|
||||
|
||||
|
@ -764,21 +776,19 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|||
INIT_LIST_HEAD(&msm_obj->submit_entry);
|
||||
INIT_LIST_HEAD(&msm_obj->domains);
|
||||
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
msm_gem_add_to_inactive_list(msm_obj, dev, struct_mutex_locked);
|
||||
|
||||
*obj = &msm_obj->base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags)
|
||||
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags, bool struct_mutex_locked)
|
||||
{
|
||||
struct drm_gem_object *obj = NULL;
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
/*
|
||||
|
@ -788,7 +798,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
|||
if (!size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ret = msm_gem_new_impl(dev, size, flags, &obj);
|
||||
ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
@ -804,11 +814,23 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
|||
|
||||
fail:
|
||||
if (obj)
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags)
|
||||
{
|
||||
return _msm_gem_new(dev, size, flags, true);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags)
|
||||
{
|
||||
return _msm_gem_new(dev, size, flags, false);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
uint32_t size, struct sg_table *sgt, u32 flags)
|
||||
{
|
||||
|
@ -824,9 +846,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
|
||||
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
@ -836,9 +856,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|||
npages = size / PAGE_SIZE;
|
||||
|
||||
msm_obj = to_msm_bo(obj);
|
||||
mutex_lock(&msm_obj->lock);
|
||||
msm_obj->sgt = sgt;
|
||||
msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
||||
if (!msm_obj->pages) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -847,8 +869,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|||
msm_obj->flags |= flags;
|
||||
|
||||
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
|
||||
return obj;
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ struct msm_gem_address_space {
|
|||
struct msm_mmu *mmu;
|
||||
struct kref kref;
|
||||
struct drm_mm mm;
|
||||
spinlock_t lock; /* Protects drm_mm node allocation/removal */
|
||||
u64 va_len;
|
||||
};
|
||||
|
||||
|
@ -80,6 +81,7 @@ struct msm_gem_object {
|
|||
* an IOMMU. Also used for stolen/splashscreen buffer.
|
||||
*/
|
||||
struct drm_mm_node *vram_node;
|
||||
struct mutex lock; /* Protects resources associated with bo */
|
||||
};
|
||||
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
|
||||
|
||||
|
|
|
@ -219,7 +219,7 @@ retry:
|
|||
|
||||
|
||||
/* if locking succeeded, pin bo: */
|
||||
ret = msm_gem_get_iova_locked(&msm_obj->base, aspace, &iova);
|
||||
ret = msm_gem_get_iova(&msm_obj->base, aspace, &iova);
|
||||
|
||||
/* this would break the logic in the fail path.. there is no
|
||||
* reason for this to happen, but just to be on the safe side
|
||||
|
@ -307,7 +307,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
|||
/* For now, just map the entire thing. Eventually we probably
|
||||
* to do it page-by-page, w/ kmap() if not vmap()d..
|
||||
*/
|
||||
ptr = msm_gem_vaddr_locked(&obj->base);
|
||||
ptr = msm_gem_vaddr(&obj->base);
|
||||
|
||||
if (IS_ERR(ptr)) {
|
||||
ret = PTR_ERR(ptr);
|
||||
|
@ -470,7 +470,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
if (submit_cmd.type == MSM_SUBMIT_CMD_PROFILE_BUF) {
|
||||
submit->profile_buf_iova = submit->cmd[i].iova;
|
||||
submit->profile_buf_vaddr =
|
||||
msm_gem_vaddr_locked(&msm_obj->base);
|
||||
msm_gem_vaddr(&msm_obj->base);
|
||||
}
|
||||
|
||||
if (submit->valid)
|
||||
|
|
|
@ -52,6 +52,7 @@ msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
|
|||
if (!aspace)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spin_lock_init(&aspace->lock);
|
||||
aspace->name = name;
|
||||
aspace->mmu = mmu;
|
||||
|
||||
|
@ -77,14 +78,19 @@ static int allocate_iova(struct msm_gem_address_space *aspace,
|
|||
if (!aspace->va_len)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
|
||||
return 0;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||
size += sg->length + sg->offset;
|
||||
|
||||
ret = drm_mm_insert_node(&aspace->mm, &vma->node, size >> PAGE_SHIFT,
|
||||
0, DRM_MM_SEARCH_DEFAULT);
|
||||
spin_lock(&aspace->lock);
|
||||
|
||||
if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
|
||||
spin_unlock(&aspace->lock);
|
||||
return 0;
|
||||
}
|
||||
ret = drm_mm_insert_node(&aspace->mm, &vma->node,
|
||||
size >> PAGE_SHIFT, 0, DRM_MM_SEARCH_DEFAULT);
|
||||
|
||||
spin_unlock(&aspace->lock);
|
||||
|
||||
if (!ret && iova)
|
||||
*iova = vma->node.start << PAGE_SHIFT;
|
||||
|
@ -110,8 +116,10 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
|||
flags, priv);
|
||||
|
||||
if (ret) {
|
||||
spin_lock(&aspace->lock);
|
||||
if (drm_mm_node_allocated(&vma->node))
|
||||
drm_mm_remove_node(&vma->node);
|
||||
spin_unlock(&aspace->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -130,8 +138,10 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
|||
|
||||
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
|
||||
|
||||
spin_lock(&aspace->lock);
|
||||
if (drm_mm_node_allocated(&vma->node))
|
||||
drm_mm_remove_node(&vma->node);
|
||||
spin_unlock(&aspace->lock);
|
||||
|
||||
vma->iova = 0;
|
||||
|
||||
|
|
|
@ -580,8 +580,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
|
||||
/* ring takes a reference to the bo and iova: */
|
||||
drm_gem_object_reference(&msm_obj->base);
|
||||
msm_gem_get_iova_locked(&msm_obj->base,
|
||||
aspace, &iova);
|
||||
msm_gem_get_iova(&msm_obj->base, aspace, &iova);
|
||||
}
|
||||
|
||||
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
|
||||
|
@ -890,10 +889,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
|
||||
/* Create ringbuffer(s): */
|
||||
for (i = 0; i < nr_rings; i++) {
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
gpu->rb[i] = msm_ringbuffer_new(gpu, i);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
|
||||
gpu->rb[i] = msm_ringbuffer_new(gpu, i);
|
||||
if (IS_ERR(gpu->rb[i])) {
|
||||
ret = PTR_ERR(gpu->rb[i]);
|
||||
gpu->rb[i] = NULL;
|
||||
|
|
|
@ -310,7 +310,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
|||
uint64_t iova = submit->cmd[i].iova;
|
||||
uint32_t szd = submit->cmd[i].size; /* in dwords */
|
||||
struct msm_gem_object *obj = submit->bos[idx].obj;
|
||||
const char *buf = msm_gem_vaddr_locked(&obj->base);
|
||||
const char *buf = msm_gem_vaddr(&obj->base);
|
||||
|
||||
buf += iova - submit->bos[idx].iova;
|
||||
|
||||
|
|
|
@ -34,14 +34,15 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
|
|||
|
||||
ring->gpu = gpu;
|
||||
ring->id = id;
|
||||
ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, MSM_BO_WC);
|
||||
ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
|
||||
MSM_BO_WC);
|
||||
if (IS_ERR(ring->bo)) {
|
||||
ret = PTR_ERR(ring->bo);
|
||||
ring->bo = NULL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ring->start = msm_gem_vaddr_locked(ring->bo);
|
||||
ring->start = msm_gem_vaddr(ring->bo);
|
||||
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
|
||||
ring->next = ring->start;
|
||||
ring->cur = ring->start;
|
||||
|
|
|
@ -367,6 +367,8 @@ struct arm_smmu_device {
|
|||
u32 num_mapping_groups;
|
||||
DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
|
||||
|
||||
u32 ubs;
|
||||
|
||||
unsigned long va_size;
|
||||
unsigned long ipa_size;
|
||||
unsigned long pa_size;
|
||||
|
@ -1756,6 +1758,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
{
|
||||
int irq, start, ret = 0;
|
||||
unsigned long ias, oas;
|
||||
int sep = 0;
|
||||
struct io_pgtable_ops *pgtbl_ops;
|
||||
enum io_pgtable_fmt fmt;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
@ -1797,9 +1800,27 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
start = smmu->num_s2_context_banks;
|
||||
ias = smmu->va_size;
|
||||
oas = smmu->ipa_size;
|
||||
if (IS_ENABLED(CONFIG_64BIT))
|
||||
if (IS_ENABLED(CONFIG_64BIT)) {
|
||||
fmt = ARM_64_LPAE_S1;
|
||||
else
|
||||
|
||||
if (quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
|
||||
|
||||
/*
|
||||
* When the UBS id is 5 we know that the bus
|
||||
* size is 49 bits and that bit 48 is the fixed
|
||||
* sign extension bit. For any other bus size
|
||||
* we need to specify the sign extension bit
|
||||
* and adjust the input size accordingly
|
||||
*/
|
||||
|
||||
if (smmu->ubs == 5) {
|
||||
sep = 48;
|
||||
} else {
|
||||
sep = ias - 1;
|
||||
ias--;
|
||||
}
|
||||
}
|
||||
} else
|
||||
fmt = ARM_32_LPAE_S1;
|
||||
break;
|
||||
case ARM_SMMU_DOMAIN_NESTED:
|
||||
|
@ -1861,6 +1882,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
.pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
|
||||
.ias = ias,
|
||||
.oas = oas,
|
||||
.sep = sep,
|
||||
.tlb = &arm_smmu_gather_ops,
|
||||
.iommu_dev = smmu->dev,
|
||||
.iova_base = domain->geometry.aperture_start,
|
||||
|
@ -3896,12 +3918,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
smmu->va_size = smmu->ipa_size;
|
||||
size = SZ_4K | SZ_2M | SZ_1G;
|
||||
} else {
|
||||
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
|
||||
smmu->va_size = arm_smmu_id_size_to_bits(size);
|
||||
smmu->ubs = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
|
||||
|
||||
smmu->va_size = arm_smmu_id_size_to_bits(smmu->ubs);
|
||||
#ifndef CONFIG_64BIT
|
||||
smmu->va_size = min(32UL, smmu->va_size);
|
||||
#endif
|
||||
smmu->va_size = min(36UL, smmu->va_size);
|
||||
smmu->va_size = min(39UL, smmu->va_size);
|
||||
size = 0;
|
||||
if (id & ID2_PTFS_4K)
|
||||
size |= SZ_4K | SZ_2M | SZ_1G;
|
||||
|
|
|
@ -550,9 +550,18 @@ static inline arm_lpae_iopte *arm_lpae_get_table(
|
|||
{
|
||||
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
||||
|
||||
return ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) &&
|
||||
(iova & (1UL << (cfg->ias - 1)))) ?
|
||||
data->pgd[1] : data->pgd[0];
|
||||
/*
|
||||
* iovas for TTBR1 will have all the bits set between the input address
|
||||
* region and the sign extension bit
|
||||
*/
|
||||
if (unlikely(cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)) {
|
||||
unsigned long mask = GENMASK(cfg->sep, cfg->ias);
|
||||
|
||||
if ((iova & mask) == mask)
|
||||
return data->pgd[1];
|
||||
}
|
||||
|
||||
return data->pgd[0];
|
||||
}
|
||||
|
||||
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
|
@ -1089,26 +1098,26 @@ static u64 arm64_lpae_setup_ttbr1(struct io_pgtable_cfg *cfg,
|
|||
/* Set T1SZ */
|
||||
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T1SZ_SHIFT;
|
||||
|
||||
/* Set the SEP bit based on the size */
|
||||
switch (cfg->ias) {
|
||||
case 32:
|
||||
switch (cfg->sep) {
|
||||
case 31:
|
||||
reg |= (ARM_LPAE_TCR_SEP_31 << ARM_LPAE_TCR_SEP_SHIFT);
|
||||
break;
|
||||
case 36:
|
||||
case 35:
|
||||
reg |= (ARM_LPAE_TCR_SEP_35 << ARM_LPAE_TCR_SEP_SHIFT);
|
||||
break;
|
||||
case 40:
|
||||
case 39:
|
||||
reg |= (ARM_LPAE_TCR_SEP_39 << ARM_LPAE_TCR_SEP_SHIFT);
|
||||
break;
|
||||
case 42:
|
||||
case 41:
|
||||
reg |= (ARM_LPAE_TCR_SEP_41 << ARM_LPAE_TCR_SEP_SHIFT);
|
||||
break;
|
||||
case 44:
|
||||
case 43:
|
||||
reg |= (ARM_LPAE_TCR_SEP_43 << ARM_LPAE_TCR_SEP_SHIFT);
|
||||
break;
|
||||
case 48:
|
||||
case 47:
|
||||
reg |= (ARM_LPAE_TCR_SEP_47 << ARM_LPAE_TCR_SEP_SHIFT);
|
||||
break;
|
||||
case 48:
|
||||
default:
|
||||
reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
|
||||
break;
|
||||
|
|
|
@ -67,6 +67,7 @@ struct io_pgtable_cfg {
|
|||
unsigned long pgsize_bitmap;
|
||||
unsigned int ias;
|
||||
unsigned int oas;
|
||||
int sep;
|
||||
const struct iommu_gather_ops *tlb;
|
||||
struct device *iommu_dev;
|
||||
dma_addr_t iova_base;
|
||||
|
|
Loading…
Add table
Reference in a new issue