Merge "defconfig: msm: add fastmap config on sdm660"
This commit is contained in:
commit
b9fe2c19b4
12 changed files with 95 additions and 55 deletions
|
@ -498,6 +498,7 @@ CONFIG_MSM_MMCC_660=y
|
|||
CONFIG_CLOCK_CPU_OSM=y
|
||||
CONFIG_QCOM_MDSS_PLL=y
|
||||
CONFIG_REMOTE_SPINLOCK_MSM=y
|
||||
CONFIG_IOMMU_IO_PGTABLE_FAST=y
|
||||
CONFIG_ARM_SMMU=y
|
||||
CONFIG_IOMMU_DEBUG=y
|
||||
CONFIG_IOMMU_DEBUG_TRACKING=y
|
||||
|
|
|
@ -499,6 +499,8 @@ CONFIG_MSM_MMCC_660=y
|
|||
CONFIG_CLOCK_CPU_OSM=y
|
||||
CONFIG_QCOM_MDSS_PLL=y
|
||||
CONFIG_REMOTE_SPINLOCK_MSM=y
|
||||
CONFIG_IOMMU_IO_PGTABLE_FAST=y
|
||||
CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
|
||||
CONFIG_ARM_SMMU=y
|
||||
CONFIG_IOMMU_DEBUG=y
|
||||
CONFIG_IOMMU_DEBUG_TRACKING=y
|
||||
|
|
|
@ -178,6 +178,9 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
|
|||
* is visible to DMA, or data written by DMA to system memory is
|
||||
* visible to the CPU.
|
||||
*/
|
||||
extern void __dma_map_area(const void *, size_t, int);
|
||||
extern void __dma_unmap_area(const void *, size_t, int);
|
||||
|
||||
extern void dmac_inv_range(const void *, const void *);
|
||||
extern void dmac_clean_range(const void *, const void *);
|
||||
extern void dmac_flush_range(const void *, const void *);
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#define ASMARM_DEVICE_H
|
||||
|
||||
struct dev_archdata {
|
||||
struct dma_map_ops *dma_ops;
|
||||
const struct dma_map_ops *dma_ops;
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
struct dmabounce_device_info *dmabounce;
|
||||
#endif
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/dma-debug.h>
|
||||
#include <linux/kmemcheck.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/dma-mapping-fast.h>
|
||||
|
||||
struct dma_iommu_mapping {
|
||||
/* iommu specific data */
|
||||
|
@ -22,6 +23,8 @@ struct dma_iommu_mapping {
|
|||
|
||||
spinlock_t lock;
|
||||
struct kref kref;
|
||||
|
||||
struct dma_fast_smmu_mapping *fast;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
extern struct dma_map_ops arm_dma_ops;
|
||||
extern struct dma_map_ops arm_coherent_dma_ops;
|
||||
|
||||
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
||||
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev && dev->archdata.dma_ops)
|
||||
return dev->archdata.dma_ops;
|
||||
return &arm_dma_ops;
|
||||
}
|
||||
|
||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
if (xen_initial_domain())
|
||||
return xen_dma_ops;
|
||||
|
@ -32,7 +32,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
return __generic_dma_ops(dev);
|
||||
}
|
||||
|
||||
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
|
||||
static inline void set_dma_ops(struct device *dev,
|
||||
const struct dma_map_ops *ops)
|
||||
{
|
||||
BUG_ON(!dev);
|
||||
dev->archdata.dma_ops = ops;
|
||||
|
|
|
@ -161,6 +161,12 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
|
|||
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
|
||||
#define dmac_inv_range __glue(_CACHE, _dma_inv_range)
|
||||
#define dmac_clean_range __glue(_CACHE, _dma_clean_range)
|
||||
#define dmac_map_area __glue(_CACHE, _dma_map_area)
|
||||
#define dmac_unmap_area __glue(_CACHE, _dma_unmap_area)
|
||||
|
||||
#define __dma_map_area dmac_map_area
|
||||
#define __dma_unmap_area dmac_unmap_area
|
||||
#define __dma_flush_range dmac_flush_range
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/sizes.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/msm_dma_iommu_mapping.h>
|
||||
#include <linux/dma-mapping-fast.h>
|
||||
|
||||
#include <asm/memory.h>
|
||||
#include <asm/highmem.h>
|
||||
|
@ -1019,7 +1020,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
|||
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct scatterlist *s;
|
||||
int i, j;
|
||||
|
||||
|
@ -1053,7 +1054,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct scatterlist *s;
|
||||
|
||||
int i;
|
||||
|
@ -1072,7 +1073,7 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
|
@ -1091,7 +1092,7 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|||
void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
|
@ -1988,7 +1989,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
|
|||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
}
|
||||
|
||||
struct dma_map_ops iommu_ops = {
|
||||
const struct dma_map_ops iommu_ops = {
|
||||
.alloc = arm_iommu_alloc_attrs,
|
||||
.free = arm_iommu_free_attrs,
|
||||
.mmap = arm_iommu_mmap_attrs,
|
||||
|
@ -2007,7 +2008,7 @@ struct dma_map_ops iommu_ops = {
|
|||
.set_dma_mask = arm_dma_set_mask,
|
||||
};
|
||||
|
||||
struct dma_map_ops iommu_coherent_ops = {
|
||||
const struct dma_map_ops iommu_coherent_ops = {
|
||||
.alloc = arm_iommu_alloc_attrs,
|
||||
.free = arm_iommu_free_attrs,
|
||||
.mmap = arm_iommu_mmap_attrs,
|
||||
|
@ -2166,6 +2167,11 @@ int arm_iommu_attach_device(struct device *dev,
|
|||
{
|
||||
int err;
|
||||
int s1_bypass = 0;
|
||||
int is_fast = 0;
|
||||
|
||||
iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
|
||||
if (is_fast)
|
||||
return fast_smmu_attach_device(dev, mapping);
|
||||
|
||||
err = __arm_iommu_attach_device(dev, mapping);
|
||||
if (err)
|
||||
|
@ -2182,6 +2188,7 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
|
|||
static void __arm_iommu_detach_device(struct device *dev)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping;
|
||||
int is_fast;
|
||||
|
||||
mapping = to_dma_iommu_mapping(dev);
|
||||
if (!mapping) {
|
||||
|
@ -2191,6 +2198,9 @@ static void __arm_iommu_detach_device(struct device *dev)
|
|||
|
||||
if (msm_dma_unmap_all_for_dev(dev))
|
||||
dev_warn(dev, "IOMMU detach with outstanding mappings\n");
|
||||
iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
|
||||
if (is_fast)
|
||||
return fast_smmu_detach_device(dev, mapping);
|
||||
|
||||
iommu_detach_device(mapping->domain, dev);
|
||||
kref_put(&mapping->kref, release_iommu_mapping);
|
||||
|
@ -2226,7 +2236,7 @@ void arm_iommu_detach_device(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
||||
|
||||
static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
|
||||
static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
|
||||
{
|
||||
return coherent ? &iommu_coherent_ops : &iommu_ops;
|
||||
}
|
||||
|
@ -2289,7 +2299,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
|
|||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
struct dma_map_ops *dma_ops;
|
||||
const struct dma_map_ops *dma_ops;
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
|
||||
|
|
|
@ -4,9 +4,6 @@
|
|||
#include <asm/glue-cache.h>
|
||||
|
||||
#ifndef MULTI_CACHE
|
||||
#define dmac_map_area __glue(_CACHE,_dma_map_area)
|
||||
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
|
||||
|
||||
/*
|
||||
* These are private to the dma-mapping API. Do not use directly.
|
||||
* Their sole purpose is to ensure that data held in the cache
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -17,7 +17,8 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/dma-iommu.h>
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
/* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
|
||||
#define FAST_PAGE_SHIFT 12
|
||||
|
@ -632,7 +633,7 @@ static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast,
|
|||
dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
|
||||
dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
|
||||
dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
|
||||
fast->pgtbl_pmds, ptep - fast->pgtbl_pmds);
|
||||
fast->pgtbl_pmds, bitmap_idx);
|
||||
print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
|
||||
32, 8, fast->bitmap, fast->bitmap_size, false);
|
||||
}
|
||||
|
@ -682,7 +683,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = {
|
|||
* fast_smmu_attach_device function.
|
||||
*/
|
||||
static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
|
||||
dma_addr_t base, size_t size)
|
||||
dma_addr_t base, u64 size)
|
||||
{
|
||||
struct dma_fast_smmu_mapping *fast;
|
||||
|
||||
|
@ -725,7 +726,7 @@ int fast_smmu_attach_device(struct device *dev,
|
|||
int atomic_domain = 1;
|
||||
struct iommu_domain *domain = mapping->domain;
|
||||
struct iommu_pgtbl_info info;
|
||||
size_t size = mapping->bits << PAGE_SHIFT;
|
||||
u64 size = (u64)mapping->bits << PAGE_SHIFT;
|
||||
|
||||
if (mapping->base + size > (SZ_1G * 4ULL))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -20,6 +20,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/io-pgtable-fast.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "io-pgtable.h"
|
||||
|
||||
|
@ -263,11 +264,18 @@ static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
|
|||
return size;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ARM64)
|
||||
#define FAST_PGDNDX(va) (((va) & 0x7fc0000000) >> 27)
|
||||
#elif defined(CONFIG_ARM)
|
||||
#define FAST_PGDNDX(va) (((va) & 0xc0000000) >> 27)
|
||||
#endif
|
||||
|
||||
static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
|
||||
unsigned long iova)
|
||||
{
|
||||
struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
|
||||
av8l_fast_iopte pte, *pgdp, *pudp, *pmdp;
|
||||
unsigned long pgd;
|
||||
phys_addr_t phys;
|
||||
const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT;
|
||||
const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK;
|
||||
|
@ -277,8 +285,9 @@ static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
|
|||
|
||||
/* TODO: clean up some of these magic numbers... */
|
||||
|
||||
pgdp = (av8l_fast_iopte *)
|
||||
(((unsigned long)data->pgd) | ((iova & 0x7fc0000000) >> 27));
|
||||
pgd = (unsigned long)data->pgd | FAST_PGDNDX(iova);
|
||||
pgdp = (av8l_fast_iopte *)pgd;
|
||||
|
||||
pte = *pgdp;
|
||||
if (((pte >> pts) & ptm) != ptt)
|
||||
return 0;
|
||||
|
@ -464,6 +473,9 @@ av8l_fast_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
|||
|
||||
reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT;
|
||||
reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT;
|
||||
#if defined(CONFIG_ARM)
|
||||
reg |= ARM_32_LPAE_TCR_EAE;
|
||||
#endif
|
||||
cfg->av8l_fast_cfg.tcr = reg;
|
||||
|
||||
/* MAIRs */
|
||||
|
@ -549,7 +561,7 @@ static bool av8l_fast_range_has_specific_mapping(struct io_pgtable_ops *ops,
|
|||
const phys_addr_t phys_start,
|
||||
const size_t size)
|
||||
{
|
||||
unsigned long iova = iova_start;
|
||||
u64 iova = iova_start;
|
||||
phys_addr_t phys = phys_start;
|
||||
|
||||
while (iova < (iova_start + size)) {
|
||||
|
@ -565,11 +577,12 @@ static bool av8l_fast_range_has_specific_mapping(struct io_pgtable_ops *ops,
|
|||
static int __init av8l_fast_positive_testing(void)
|
||||
{
|
||||
int failed = 0;
|
||||
unsigned long iova;
|
||||
u64 iova;
|
||||
struct io_pgtable_ops *ops;
|
||||
struct io_pgtable_cfg cfg;
|
||||
struct av8l_fast_io_pgtable *data;
|
||||
av8l_fast_iopte *pmds;
|
||||
u64 max = SZ_1G * 4ULL - 1;
|
||||
|
||||
cfg = (struct io_pgtable_cfg) {
|
||||
.quirks = 0,
|
||||
|
@ -589,19 +602,18 @@ static int __init av8l_fast_positive_testing(void)
|
|||
pmds = data->pmds;
|
||||
|
||||
/* map the entire 4GB VA space with 4K map calls */
|
||||
for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
|
||||
for (iova = 0; iova < max; iova += SZ_4K) {
|
||||
if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
|
||||
failed++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
|
||||
SZ_1G * 4UL)))
|
||||
max)))
|
||||
failed++;
|
||||
|
||||
/* unmap it all */
|
||||
for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
|
||||
for (iova = 0; iova < max; iova += SZ_4K) {
|
||||
if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
|
||||
failed++;
|
||||
}
|
||||
|
@ -610,7 +622,7 @@ static int __init av8l_fast_positive_testing(void)
|
|||
av8l_fast_clear_stale_ptes(pmds, false);
|
||||
|
||||
/* map the entire 4GB VA space with 8K map calls */
|
||||
for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
|
||||
for (iova = 0; iova < max; iova += SZ_8K) {
|
||||
if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
|
||||
failed++;
|
||||
continue;
|
||||
|
@ -618,11 +630,11 @@ static int __init av8l_fast_positive_testing(void)
|
|||
}
|
||||
|
||||
if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
|
||||
SZ_1G * 4UL)))
|
||||
max)))
|
||||
failed++;
|
||||
|
||||
/* unmap it all with 8K unmap calls */
|
||||
for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
|
||||
for (iova = 0; iova < max; iova += SZ_8K) {
|
||||
if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
|
||||
failed++;
|
||||
}
|
||||
|
@ -631,7 +643,7 @@ static int __init av8l_fast_positive_testing(void)
|
|||
av8l_fast_clear_stale_ptes(pmds, false);
|
||||
|
||||
/* map the entire 4GB VA space with 16K map calls */
|
||||
for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
|
||||
for (iova = 0; iova < max; iova += SZ_16K) {
|
||||
if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
|
||||
failed++;
|
||||
continue;
|
||||
|
@ -639,11 +651,11 @@ static int __init av8l_fast_positive_testing(void)
|
|||
}
|
||||
|
||||
if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
|
||||
SZ_1G * 4UL)))
|
||||
max)))
|
||||
failed++;
|
||||
|
||||
/* unmap it all */
|
||||
for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
|
||||
for (iova = 0; iova < max; iova += SZ_16K) {
|
||||
if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
|
||||
failed++;
|
||||
}
|
||||
|
@ -652,7 +664,7 @@ static int __init av8l_fast_positive_testing(void)
|
|||
av8l_fast_clear_stale_ptes(pmds, false);
|
||||
|
||||
/* map the entire 4GB VA space with 64K map calls */
|
||||
for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_64K) {
|
||||
for (iova = 0; iova < max; iova += SZ_64K) {
|
||||
if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
|
||||
failed++;
|
||||
continue;
|
||||
|
@ -660,11 +672,11 @@ static int __init av8l_fast_positive_testing(void)
|
|||
}
|
||||
|
||||
if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
|
||||
SZ_1G * 4UL)))
|
||||
max)))
|
||||
failed++;
|
||||
|
||||
/* unmap it all at once */
|
||||
if (WARN_ON(ops->unmap(ops, 0, SZ_1G * 4UL) != SZ_1G * 4UL))
|
||||
if (WARN_ON(ops->unmap(ops, 0, max) != max))
|
||||
failed++;
|
||||
|
||||
free_io_pgtable_ops(ops);
|
||||
|
|
|
@ -839,7 +839,7 @@ static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
|
|||
if (!virt)
|
||||
goto out;
|
||||
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
|
||||
if (!mapping) {
|
||||
seq_puts(s, "fast_smmu_create_mapping failed\n");
|
||||
goto out_kfree;
|
||||
|
@ -939,8 +939,8 @@ static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
|
|||
static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
|
||||
{
|
||||
int i, ret = 0;
|
||||
unsigned long iova;
|
||||
const unsigned long max = SZ_1G * 4UL;
|
||||
u64 iova;
|
||||
const u64 max = SZ_1G * 4ULL - 1;
|
||||
void *virt;
|
||||
phys_addr_t phys;
|
||||
dma_addr_t dma_addr;
|
||||
|
@ -1012,8 +1012,8 @@ static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
|
|||
}
|
||||
|
||||
/* we're all full again. unmap everything. */
|
||||
for (dma_addr = 0; dma_addr < max; dma_addr += SZ_8K)
|
||||
dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
|
||||
for (iova = 0; iova < max; iova += SZ_8K)
|
||||
dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
|
||||
|
||||
out:
|
||||
free_pages((unsigned long)virt, get_order(SZ_8K));
|
||||
|
@ -1046,7 +1046,7 @@ static int __rand_va_sweep(struct device *dev, struct seq_file *s,
|
|||
const size_t size)
|
||||
{
|
||||
u64 iova;
|
||||
const unsigned long max = SZ_1G * 4UL;
|
||||
const u64 max = SZ_1G * 4ULL - 1;
|
||||
int i, remapped, unmapped, ret = 0;
|
||||
void *virt;
|
||||
dma_addr_t dma_addr, dma_addr2;
|
||||
|
@ -1078,9 +1078,9 @@ static int __rand_va_sweep(struct device *dev, struct seq_file *s,
|
|||
fib_init(&fib);
|
||||
for (iova = get_next_fib(&fib) * size;
|
||||
iova < max - size;
|
||||
iova = get_next_fib(&fib) * size) {
|
||||
dma_addr = iova;
|
||||
dma_addr2 = max - size - iova;
|
||||
iova = (u64)get_next_fib(&fib) * size) {
|
||||
dma_addr = (dma_addr_t)(iova);
|
||||
dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
|
||||
if (dma_addr == dma_addr2) {
|
||||
WARN(1,
|
||||
"%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
|
||||
|
@ -1106,8 +1106,8 @@ static int __rand_va_sweep(struct device *dev, struct seq_file *s,
|
|||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
for (dma_addr = 0; dma_addr < max; dma_addr += size)
|
||||
dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
|
||||
for (iova = 0; iova < max; iova += size)
|
||||
dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
|
||||
|
||||
out:
|
||||
free_pages((unsigned long)virt, get_order(size));
|
||||
|
@ -1135,10 +1135,11 @@ static int __check_mapping(struct device *dev, struct iommu_domain *domain,
|
|||
static int __full_va_sweep(struct device *dev, struct seq_file *s,
|
||||
const size_t size, struct iommu_domain *domain)
|
||||
{
|
||||
unsigned long iova;
|
||||
u64 iova;
|
||||
dma_addr_t dma_addr;
|
||||
void *virt;
|
||||
phys_addr_t phys;
|
||||
const u64 max = SZ_1G * 4ULL - 1;
|
||||
int ret = 0, i;
|
||||
|
||||
virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
|
||||
|
@ -1153,7 +1154,7 @@ static int __full_va_sweep(struct device *dev, struct seq_file *s,
|
|||
}
|
||||
phys = virt_to_phys(virt);
|
||||
|
||||
for (iova = 0, i = 0; iova < SZ_1G * 4UL; iova += size, ++i) {
|
||||
for (iova = 0, i = 0; iova < max; iova += size, ++i) {
|
||||
unsigned long expected = iova;
|
||||
|
||||
dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
|
||||
|
@ -1201,8 +1202,8 @@ static int __full_va_sweep(struct device *dev, struct seq_file *s,
|
|||
}
|
||||
|
||||
out:
|
||||
for (dma_addr = 0; dma_addr < SZ_1G * 4UL; dma_addr += size)
|
||||
dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
|
||||
for (iova = 0; iova < max; iova += size)
|
||||
dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
|
||||
|
||||
free_pages((unsigned long)virt, get_order(size));
|
||||
return ret;
|
||||
|
@ -1391,7 +1392,8 @@ static int __apply_to_new_mapping(struct seq_file *s,
|
|||
int ret = -EINVAL, fast = 1;
|
||||
phys_addr_t pt_phys;
|
||||
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
|
||||
(SZ_1G * 4ULL));
|
||||
if (!mapping)
|
||||
goto out;
|
||||
|
||||
|
@ -1460,7 +1462,9 @@ static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
|
|||
size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
|
||||
int ret = -EINVAL;
|
||||
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
|
||||
/* Make the size equal to MAX_ULONG */
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
|
||||
(SZ_1G * 4ULL - 1));
|
||||
if (!mapping)
|
||||
goto out;
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue