xtensa: support DMA to high memory
- don't bugcheck if high memory page is passed to xtensa_map_page; - turn empty dcache flush macros into functions so that they could be passed as function parameters; - use kmap_atomic to map high memory pages for cache invalidation/ flushing performed by xtensa_sync_single_for_{cpu,device}. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
parent
e2b31f7540
commit
c7ca9fe17b
2 changed files with 39 additions and 17 deletions
|
@ -55,8 +55,13 @@ extern void __flush_dcache_range(unsigned long, unsigned long);
|
||||||
extern void __flush_invalidate_dcache_page(unsigned long);
|
extern void __flush_invalidate_dcache_page(unsigned long);
|
||||||
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
|
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
|
||||||
#else
|
#else
|
||||||
# define __flush_dcache_range(p,s) do { } while(0)
|
static inline void __flush_dcache_page(unsigned long va)
|
||||||
# define __flush_dcache_page(p) do { } while(0)
|
{
|
||||||
|
}
|
||||||
|
static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
# define __flush_invalidate_dcache_all() __invalidate_dcache_all()
|
||||||
# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
|
# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
|
||||||
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
|
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -15,14 +15,15 @@
|
||||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/string.h>
|
|
||||||
#include <linux/pci.h>
|
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
|
#include <linux/highmem.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <asm/io.h>
|
#include <linux/pci.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
#include <linux/types.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/io.h>
|
||||||
|
|
||||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
|
@ -47,17 +48,36 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_cache_sync);
|
EXPORT_SYMBOL(dma_cache_sync);
|
||||||
|
|
||||||
|
static void do_cache_op(dma_addr_t dma_handle, size_t size,
|
||||||
|
void (*fn)(unsigned long, unsigned long))
|
||||||
|
{
|
||||||
|
unsigned long off = dma_handle & (PAGE_SIZE - 1);
|
||||||
|
unsigned long pfn = PFN_DOWN(dma_handle);
|
||||||
|
struct page *page = pfn_to_page(pfn);
|
||||||
|
|
||||||
|
if (!PageHighMem(page))
|
||||||
|
fn((unsigned long)bus_to_virt(dma_handle), size);
|
||||||
|
else
|
||||||
|
while (size > 0) {
|
||||||
|
size_t sz = min_t(size_t, size, PAGE_SIZE - off);
|
||||||
|
void *vaddr = kmap_atomic(page);
|
||||||
|
|
||||||
|
fn((unsigned long)vaddr + off, sz);
|
||||||
|
kunmap_atomic(vaddr);
|
||||||
|
off = 0;
|
||||||
|
++page;
|
||||||
|
size -= sz;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void xtensa_sync_single_for_cpu(struct device *dev,
|
static void xtensa_sync_single_for_cpu(struct device *dev,
|
||||||
dma_addr_t dma_handle, size_t size,
|
dma_addr_t dma_handle, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
void *vaddr;
|
|
||||||
|
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_BIDIRECTIONAL:
|
case DMA_BIDIRECTIONAL:
|
||||||
case DMA_FROM_DEVICE:
|
case DMA_FROM_DEVICE:
|
||||||
vaddr = bus_to_virt(dma_handle);
|
do_cache_op(dma_handle, size, __invalidate_dcache_range);
|
||||||
__invalidate_dcache_range((unsigned long)vaddr, size);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case DMA_NONE:
|
case DMA_NONE:
|
||||||
|
@ -73,13 +93,11 @@ static void xtensa_sync_single_for_device(struct device *dev,
|
||||||
dma_addr_t dma_handle, size_t size,
|
dma_addr_t dma_handle, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
void *vaddr;
|
|
||||||
|
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_BIDIRECTIONAL:
|
case DMA_BIDIRECTIONAL:
|
||||||
case DMA_TO_DEVICE:
|
case DMA_TO_DEVICE:
|
||||||
vaddr = bus_to_virt(dma_handle);
|
if (XCHAL_DCACHE_IS_WRITEBACK)
|
||||||
__flush_dcache_range((unsigned long)vaddr, size);
|
do_cache_op(dma_handle, size, __flush_dcache_range);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case DMA_NONE:
|
case DMA_NONE:
|
||||||
|
@ -171,7 +189,6 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
|
||||||
{
|
{
|
||||||
dma_addr_t dma_handle = page_to_phys(page) + offset;
|
dma_addr_t dma_handle = page_to_phys(page) + offset;
|
||||||
|
|
||||||
BUG_ON(PageHighMem(page));
|
|
||||||
xtensa_sync_single_for_device(dev, dma_handle, size, dir);
|
xtensa_sync_single_for_device(dev, dma_handle, size, dir);
|
||||||
return dma_handle;
|
return dma_handle;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue