Merge "dma-mapping: add i/o coherency support for 32bit"
This commit is contained in:
commit
e56d3280a5
1 changed files with 138 additions and 42 deletions
|
@ -67,7 +67,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
|
||||||
|
|
||||||
static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn);
|
static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn);
|
||||||
|
|
||||||
static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot);
|
static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
|
||||||
|
bool coherent);
|
||||||
|
|
||||||
static void *arm_dma_remap(struct device *dev, void *cpu_addr,
|
static void *arm_dma_remap(struct device *dev, void *cpu_addr,
|
||||||
dma_addr_t handle, size_t size,
|
dma_addr_t handle, size_t size,
|
||||||
|
@ -76,6 +77,33 @@ static void *arm_dma_remap(struct device *dev, void *cpu_addr,
|
||||||
static void arm_dma_unremap(struct device *dev, void *remapped_addr,
|
static void arm_dma_unremap(struct device *dev, void *remapped_addr,
|
||||||
size_t size);
|
size_t size);
|
||||||
|
|
||||||
|
static bool is_dma_coherent(struct device *dev, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
bool is_coherent;
|
||||||
|
|
||||||
|
if (dma_get_attr(DMA_ATTR_FORCE_COHERENT, attrs))
|
||||||
|
is_coherent = true;
|
||||||
|
else if (dma_get_attr(DMA_ATTR_FORCE_NON_COHERENT, attrs))
|
||||||
|
is_coherent = false;
|
||||||
|
else if (is_device_dma_coherent(dev))
|
||||||
|
is_coherent = true;
|
||||||
|
else
|
||||||
|
is_coherent = false;
|
||||||
|
|
||||||
|
return is_coherent;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __get_iommu_pgprot(struct dma_attrs *attrs, int prot,
|
||||||
|
bool coherent)
|
||||||
|
{
|
||||||
|
if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
|
||||||
|
prot |= IOMMU_NOEXEC;
|
||||||
|
if (coherent)
|
||||||
|
prot |= IOMMU_CACHE;
|
||||||
|
|
||||||
|
return prot;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* arm_dma_map_page - map a portion of a page for streaming DMA
|
* arm_dma_map_page - map a portion of a page for streaming DMA
|
||||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||||
|
@ -245,7 +273,7 @@ static u64 get_coherent_dma_mask(struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __dma_clear_buffer(struct page *page, size_t size,
|
static void __dma_clear_buffer(struct page *page, size_t size,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs, bool is_coherent)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Ensure that the allocated pages are zeroed, and that any data
|
* Ensure that the allocated pages are zeroed, and that any data
|
||||||
|
@ -258,18 +286,22 @@ static void __dma_clear_buffer(struct page *page, size_t size,
|
||||||
void *ptr = kmap_atomic(page);
|
void *ptr = kmap_atomic(page);
|
||||||
if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
|
if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
|
||||||
memset(ptr, 0, PAGE_SIZE);
|
memset(ptr, 0, PAGE_SIZE);
|
||||||
dmac_flush_range(ptr, ptr + PAGE_SIZE);
|
if (!is_coherent)
|
||||||
|
dmac_flush_range(ptr, ptr + PAGE_SIZE);
|
||||||
kunmap_atomic(ptr);
|
kunmap_atomic(ptr);
|
||||||
page++;
|
page++;
|
||||||
size -= PAGE_SIZE;
|
size -= PAGE_SIZE;
|
||||||
}
|
}
|
||||||
outer_flush_range(base, end);
|
if (!is_coherent)
|
||||||
|
outer_flush_range(base, end);
|
||||||
} else {
|
} else {
|
||||||
void *ptr = page_address(page);
|
void *ptr = page_address(page);
|
||||||
if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
|
if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
|
||||||
memset(ptr, 0, size);
|
memset(ptr, 0, size);
|
||||||
dmac_flush_range(ptr, ptr + size);
|
if (!is_coherent) {
|
||||||
outer_flush_range(__pa(ptr), __pa(ptr) + size);
|
dmac_flush_range(ptr, ptr + size);
|
||||||
|
outer_flush_range(__pa(ptr), __pa(ptr) + size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,7 +309,8 @@ static void __dma_clear_buffer(struct page *page, size_t size,
|
||||||
* Allocate a DMA buffer for 'dev' of size 'size' using the
|
* Allocate a DMA buffer for 'dev' of size 'size' using the
|
||||||
* specified gfp mask. Note that 'size' must be page aligned.
|
* specified gfp mask. Note that 'size' must be page aligned.
|
||||||
*/
|
*/
|
||||||
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
|
static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
|
||||||
|
gfp_t gfp, bool coherent)
|
||||||
{
|
{
|
||||||
unsigned long order = get_order(size);
|
unsigned long order = get_order(size);
|
||||||
struct page *page, *p, *e;
|
struct page *page, *p, *e;
|
||||||
|
@ -293,7 +326,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
|
||||||
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
|
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
|
||||||
__free_page(p);
|
__free_page(p);
|
||||||
|
|
||||||
__dma_clear_buffer(page, size, NULL);
|
__dma_clear_buffer(page, size, NULL, coherent);
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
@ -527,7 +560,7 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *ptr = NULL;
|
void *ptr = NULL;
|
||||||
page = __dma_alloc_buffer(dev, size, gfp);
|
page = __dma_alloc_buffer(dev, size, gfp, false);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!want_vaddr)
|
if (!want_vaddr)
|
||||||
|
@ -602,7 +635,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
||||||
*/
|
*/
|
||||||
if (!(dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs) &&
|
if (!(dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs) &&
|
||||||
dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)))
|
dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)))
|
||||||
__dma_clear_buffer(page, size, attrs);
|
__dma_clear_buffer(page, size, attrs, false);
|
||||||
|
|
||||||
if (PageHighMem(page)) {
|
if (PageHighMem(page)) {
|
||||||
if (!want_vaddr) {
|
if (!want_vaddr) {
|
||||||
|
@ -643,15 +676,13 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
|
||||||
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
|
static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
|
||||||
|
bool coherent)
|
||||||
{
|
{
|
||||||
if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
|
if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
|
||||||
prot = pgprot_writecombine(prot);
|
|
||||||
else if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
|
|
||||||
prot = pgprot_stronglyordered(prot);
|
prot = pgprot_stronglyordered(prot);
|
||||||
/* if non-consistent just pass back what was given */
|
else if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
|
||||||
else if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
|
prot = pgprot_writecombine(prot);
|
||||||
prot = pgprot_dmacoherent(prot);
|
|
||||||
|
|
||||||
return prot;
|
return prot;
|
||||||
}
|
}
|
||||||
|
@ -673,10 +704,10 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
|
|
||||||
static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
|
static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
|
||||||
struct page **ret_page)
|
struct page **ret_page, bool coherent)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
page = __dma_alloc_buffer(dev, size, gfp);
|
page = __dma_alloc_buffer(dev, size, gfp, coherent);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -724,12 +755,14 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
|
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
|
||||||
|
|
||||||
if (nommu())
|
if (nommu())
|
||||||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
addr = __alloc_simple_buffer(dev, size, gfp, &page,
|
||||||
|
is_coherent);
|
||||||
else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM))
|
else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM))
|
||||||
addr = __alloc_from_contiguous(dev, size, prot, &page,
|
addr = __alloc_from_contiguous(dev, size, prot, &page,
|
||||||
caller, attrs);
|
caller, attrs);
|
||||||
else if (is_coherent)
|
else if (is_coherent)
|
||||||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
addr = __alloc_simple_buffer(dev, size, gfp, &page,
|
||||||
|
is_coherent);
|
||||||
else if (!gfpflags_allow_blocking(gfp))
|
else if (!gfpflags_allow_blocking(gfp))
|
||||||
addr = __alloc_from_pool(size, &page);
|
addr = __alloc_from_pool(size, &page);
|
||||||
else
|
else
|
||||||
|
@ -749,7 +782,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
gfp_t gfp, struct dma_attrs *attrs)
|
gfp_t gfp, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
|
||||||
|
|
||||||
return __dma_alloc(dev, size, handle, gfp, prot, false,
|
return __dma_alloc(dev, size, handle, gfp, prot, false,
|
||||||
attrs, __builtin_return_address(0));
|
attrs, __builtin_return_address(0));
|
||||||
|
@ -792,8 +825,9 @@ static void *arm_dma_remap(struct device *dev, void *cpu_addr,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
bool is_coherent = is_dma_coherent(dev, attrs);
|
||||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
||||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
|
||||||
unsigned long offset = handle & ~PAGE_MASK;
|
unsigned long offset = handle & ~PAGE_MASK;
|
||||||
|
|
||||||
size = PAGE_ALIGN(size + offset);
|
size = PAGE_ALIGN(size + offset);
|
||||||
|
@ -837,7 +871,8 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
||||||
|
false);
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||||
}
|
}
|
||||||
|
@ -1238,6 +1273,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
||||||
size_t count = size >> PAGE_SHIFT;
|
size_t count = size >> PAGE_SHIFT;
|
||||||
size_t array_size = count * sizeof(struct page *);
|
size_t array_size = count * sizeof(struct page *);
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
bool is_coherent = is_dma_coherent(dev, attrs);
|
||||||
|
|
||||||
if (array_size <= PAGE_SIZE)
|
if (array_size <= PAGE_SIZE)
|
||||||
pages = kzalloc(array_size, GFP_KERNEL);
|
pages = kzalloc(array_size, GFP_KERNEL);
|
||||||
|
@ -1255,7 +1291,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
||||||
if (!page)
|
if (!page)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
__dma_clear_buffer(page, size, NULL);
|
__dma_clear_buffer(page, size, attrs, is_coherent);
|
||||||
|
|
||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < count; i++)
|
||||||
pages[i] = page + i;
|
pages[i] = page + i;
|
||||||
|
@ -1299,7 +1335,8 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
||||||
pages[i + j] = pages[i] + j;
|
pages[i + j] = pages[i] + j;
|
||||||
}
|
}
|
||||||
|
|
||||||
__dma_clear_buffer(pages[i], PAGE_SIZE << order, NULL);
|
__dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs,
|
||||||
|
is_coherent);
|
||||||
i += 1 << order;
|
i += 1 << order;
|
||||||
count -= 1 << order;
|
count -= 1 << order;
|
||||||
}
|
}
|
||||||
|
@ -1353,16 +1390,20 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
|
||||||
* Create a mapping in device IO address space for specified pages
|
* Create a mapping in device IO address space for specified pages
|
||||||
*/
|
*/
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
|
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
dma_addr_t dma_addr, iova;
|
dma_addr_t dma_addr, iova;
|
||||||
int i;
|
int i;
|
||||||
|
int prot = IOMMU_READ | IOMMU_WRITE;
|
||||||
|
|
||||||
dma_addr = __alloc_iova(mapping, size);
|
dma_addr = __alloc_iova(mapping, size);
|
||||||
if (dma_addr == DMA_ERROR_CODE)
|
if (dma_addr == DMA_ERROR_CODE)
|
||||||
return dma_addr;
|
return dma_addr;
|
||||||
|
prot = __get_iommu_pgprot(attrs, prot,
|
||||||
|
is_dma_coherent(dev, attrs));
|
||||||
|
|
||||||
iova = dma_addr;
|
iova = dma_addr;
|
||||||
for (i = 0; i < count; ) {
|
for (i = 0; i < count; ) {
|
||||||
|
@ -1377,8 +1418,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
len = (j - i) << PAGE_SHIFT;
|
len = (j - i) << PAGE_SHIFT;
|
||||||
ret = iommu_map(mapping->domain, iova, phys, len,
|
ret = iommu_map(mapping->domain, iova, phys, len, prot);
|
||||||
IOMMU_READ|IOMMU_WRITE);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
iova += len;
|
iova += len;
|
||||||
|
@ -1435,23 +1475,52 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
||||||
dma_addr_t *handle)
|
dma_addr_t *handle, gfp_t gfp,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
struct page **pages;
|
||||||
|
size_t count = size >> PAGE_SHIFT;
|
||||||
|
size_t array_size = count * sizeof(struct page *);
|
||||||
void *addr;
|
void *addr;
|
||||||
|
int i;
|
||||||
|
bool coherent = is_dma_coherent(dev, attrs);
|
||||||
|
|
||||||
addr = __alloc_from_pool(size, &page);
|
if (array_size <= PAGE_SIZE)
|
||||||
if (!addr)
|
pages = kzalloc(array_size, gfp);
|
||||||
|
else
|
||||||
|
pages = vzalloc(array_size);
|
||||||
|
|
||||||
|
if (!pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
*handle = __iommu_create_mapping(dev, &page, size);
|
if (coherent) {
|
||||||
|
page = alloc_pages(gfp, get_order(size));
|
||||||
|
addr = page ? page_address(page) : NULL;
|
||||||
|
} else {
|
||||||
|
addr = __alloc_from_pool(size, &page);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!addr)
|
||||||
|
goto err_free;
|
||||||
|
|
||||||
|
for (i = 0; i < count ; i++)
|
||||||
|
pages[i] = page + i;
|
||||||
|
|
||||||
|
*handle = __iommu_create_mapping(dev, pages, size, attrs);
|
||||||
if (*handle == DMA_ERROR_CODE)
|
if (*handle == DMA_ERROR_CODE)
|
||||||
goto err_mapping;
|
goto err_mapping;
|
||||||
|
|
||||||
|
kvfree(pages);
|
||||||
return addr;
|
return addr;
|
||||||
|
|
||||||
err_mapping:
|
err_mapping:
|
||||||
__free_from_pool(addr, size);
|
if (coherent)
|
||||||
|
__free_pages(page, get_order(size));
|
||||||
|
else
|
||||||
|
__free_from_pool(addr, size);
|
||||||
|
err_free:
|
||||||
|
kvfree(pages);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1465,7 +1534,9 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
|
||||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
|
||||||
|
bool coherent = is_dma_coherent(dev, attrs);
|
||||||
|
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
void *addr = NULL;
|
void *addr = NULL;
|
||||||
|
|
||||||
|
@ -1473,7 +1544,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
if (!gfpflags_allow_blocking(gfp))
|
if (!gfpflags_allow_blocking(gfp))
|
||||||
return __iommu_alloc_atomic(dev, size, handle);
|
return __iommu_alloc_atomic(dev, size, handle, gfp, attrs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Following is a work-around (a.k.a. hack) to prevent pages
|
* Following is a work-around (a.k.a. hack) to prevent pages
|
||||||
|
@ -1488,7 +1559,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
*handle = __iommu_create_mapping(dev, pages, size);
|
*handle = __iommu_create_mapping(dev, pages, size, attrs);
|
||||||
if (*handle == DMA_ERROR_CODE)
|
if (*handle == DMA_ERROR_CODE)
|
||||||
goto err_buffer;
|
goto err_buffer;
|
||||||
|
|
||||||
|
@ -1518,8 +1589,10 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||||
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
||||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
unsigned long off = vma->vm_pgoff;
|
unsigned long off = vma->vm_pgoff;
|
||||||
|
bool coherent = is_dma_coherent(dev, attrs);
|
||||||
|
|
||||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
||||||
|
coherent);
|
||||||
|
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
@ -1750,6 +1823,8 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
|
||||||
dev_err(dev, "Couldn't allocate iova for sg %p\n", sg);
|
dev_err(dev, "Couldn't allocate iova for sg %p\n", sg);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
prot = __get_iommu_pgprot(attrs, prot,
|
||||||
|
is_dma_coherent(dev, attrs));
|
||||||
|
|
||||||
ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
|
ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
|
||||||
if (ret != total_length) {
|
if (ret != total_length) {
|
||||||
|
@ -1836,6 +1911,13 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||||
{
|
{
|
||||||
struct scatterlist *s;
|
struct scatterlist *s;
|
||||||
int i;
|
int i;
|
||||||
|
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
||||||
|
dma_addr_t iova = sg_dma_address(sg);
|
||||||
|
bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
|
||||||
|
|
||||||
|
if (iova_coherent)
|
||||||
|
return;
|
||||||
|
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i)
|
for_each_sg(sg, s, nents, i)
|
||||||
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
|
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
|
||||||
|
@ -1854,6 +1936,12 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||||
{
|
{
|
||||||
struct scatterlist *s;
|
struct scatterlist *s;
|
||||||
int i;
|
int i;
|
||||||
|
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
||||||
|
dma_addr_t iova = sg_dma_address(sg);
|
||||||
|
bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
|
||||||
|
|
||||||
|
if (iova_coherent)
|
||||||
|
return;
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i)
|
for_each_sg(sg, s, nents, i)
|
||||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||||
|
@ -1887,6 +1975,8 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
||||||
return dma_addr;
|
return dma_addr;
|
||||||
|
|
||||||
prot = __dma_direction_to_prot(dir);
|
prot = __dma_direction_to_prot(dir);
|
||||||
|
prot = __get_iommu_pgprot(attrs, prot,
|
||||||
|
is_dma_coherent(dev, attrs));
|
||||||
|
|
||||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
|
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
|
||||||
start_offset, len, prot);
|
start_offset, len, prot);
|
||||||
|
@ -1913,7 +2003,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
||||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
if (!is_dma_coherent(dev, attrs) &&
|
||||||
|
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||||
|
|
||||||
return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
|
return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
|
||||||
|
@ -1960,7 +2051,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
||||||
int offset = handle & ~PAGE_MASK;
|
int offset = handle & ~PAGE_MASK;
|
||||||
int len = PAGE_ALIGN(size + offset);
|
int len = PAGE_ALIGN(size + offset);
|
||||||
|
|
||||||
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
if (!(is_dma_coherent(dev, attrs) ||
|
||||||
|
dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)))
|
||||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||||
|
|
||||||
iommu_unmap(mapping->domain, iova, len);
|
iommu_unmap(mapping->domain, iova, len);
|
||||||
|
@ -1974,8 +2066,10 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
|
||||||
dma_addr_t iova = handle & PAGE_MASK;
|
dma_addr_t iova = handle & PAGE_MASK;
|
||||||
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||||
unsigned int offset = handle & ~PAGE_MASK;
|
unsigned int offset = handle & ~PAGE_MASK;
|
||||||
|
bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
|
||||||
|
|
||||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
if (!iova_coherent)
|
||||||
|
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_iommu_sync_single_for_device(struct device *dev,
|
static void arm_iommu_sync_single_for_device(struct device *dev,
|
||||||
|
@ -1985,8 +2079,10 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
|
||||||
dma_addr_t iova = handle & PAGE_MASK;
|
dma_addr_t iova = handle & PAGE_MASK;
|
||||||
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||||
unsigned int offset = handle & ~PAGE_MASK;
|
unsigned int offset = handle & ~PAGE_MASK;
|
||||||
|
bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
|
||||||
|
|
||||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
if (!iova_coherent)
|
||||||
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct dma_map_ops iommu_ops = {
|
const struct dma_map_ops iommu_ops = {
|
||||||
|
|
Loading…
Add table
Reference in a new issue