Merge branch 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (63 commits) x86, Calgary IOMMU quirk: Find nearest matching Calgary while walking up the PCI tree x86/amd-iommu: Remove amd_iommu_pd_table x86/amd-iommu: Move reset_iommu_command_buffer out of locked code x86/amd-iommu: Cleanup DTE flushing code x86/amd-iommu: Introduce iommu_flush_device() function x86/amd-iommu: Cleanup attach/detach_device code x86/amd-iommu: Keep devices per domain in a list x86/amd-iommu: Add device bind reference counting x86/amd-iommu: Use dev->arch->iommu to store iommu related information x86/amd-iommu: Remove support for domain sharing x86/amd-iommu: Rearrange dma_ops related functions x86/amd-iommu: Move some pte allocation functions in the right section x86/amd-iommu: Remove iommu parameter from dma_ops_domain_alloc x86/amd-iommu: Use get_device_id and check_device where appropriate x86/amd-iommu: Move find_protection_domain to helper functions x86/amd-iommu: Simplify get_device_resources() x86/amd-iommu: Let domain_for_device handle aliases x86/amd-iommu: Remove iommu specific handling from dma_ops path x86/amd-iommu: Remove iommu parameter from __(un)map_single x86/amd-iommu: Make alloc_new_range aware of multiple IOMMUs ...
This commit is contained in:
commit
7b626acb8f
33 changed files with 1073 additions and 888 deletions
|
@ -4,8 +4,6 @@
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/swiotlb.h>
|
#include <linux/swiotlb.h>
|
||||||
|
|
||||||
extern int swiotlb_force;
|
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
extern int swiotlb;
|
extern int swiotlb;
|
||||||
extern void pci_swiotlb_init(void);
|
extern void pci_swiotlb_init(void);
|
||||||
|
|
|
@ -41,7 +41,7 @@ struct dma_map_ops swiotlb_dma_ops = {
|
||||||
void __init swiotlb_dma_init(void)
|
void __init swiotlb_dma_init(void)
|
||||||
{
|
{
|
||||||
dma_ops = &swiotlb_dma_ops;
|
dma_ops = &swiotlb_dma_ops;
|
||||||
swiotlb_init();
|
swiotlb_init(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init pci_swiotlb_init(void)
|
void __init pci_swiotlb_init(void)
|
||||||
|
@ -51,7 +51,7 @@ void __init pci_swiotlb_init(void)
|
||||||
swiotlb = 1;
|
swiotlb = 1;
|
||||||
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
|
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
|
||||||
machvec_init("dig");
|
machvec_init("dig");
|
||||||
swiotlb_init();
|
swiotlb_init(1);
|
||||||
dma_ops = &swiotlb_dma_ops;
|
dma_ops = &swiotlb_dma_ops;
|
||||||
#else
|
#else
|
||||||
panic("Unable to find Intel IOMMU");
|
panic("Unable to find Intel IOMMU");
|
||||||
|
|
|
@ -345,7 +345,7 @@ void __init setup_arch(char **cmdline_p)
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
if (ppc_swiotlb_enable)
|
if (ppc_swiotlb_enable)
|
||||||
swiotlb_init();
|
swiotlb_init(1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
paging_init();
|
paging_init();
|
||||||
|
|
|
@ -550,7 +550,7 @@ void __init setup_arch(char **cmdline_p)
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
if (ppc_swiotlb_enable)
|
if (ppc_swiotlb_enable)
|
||||||
swiotlb_init();
|
swiotlb_init(1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
paging_init();
|
paging_init();
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
|
* Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
|
||||||
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
||||||
* Leo Duran <leo.duran@amd.com>
|
* Leo Duran <leo.duran@amd.com>
|
||||||
*
|
*
|
||||||
|
@ -23,19 +23,13 @@
|
||||||
#include <linux/irqreturn.h>
|
#include <linux/irqreturn.h>
|
||||||
|
|
||||||
#ifdef CONFIG_AMD_IOMMU
|
#ifdef CONFIG_AMD_IOMMU
|
||||||
extern int amd_iommu_init(void);
|
|
||||||
extern int amd_iommu_init_dma_ops(void);
|
|
||||||
extern int amd_iommu_init_passthrough(void);
|
|
||||||
extern void amd_iommu_detect(void);
|
extern void amd_iommu_detect(void);
|
||||||
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
|
||||||
extern void amd_iommu_flush_all_domains(void);
|
|
||||||
extern void amd_iommu_flush_all_devices(void);
|
|
||||||
extern void amd_iommu_shutdown(void);
|
|
||||||
extern void amd_iommu_apply_erratum_63(u16 devid);
|
|
||||||
#else
|
#else
|
||||||
static inline int amd_iommu_init(void) { return -ENODEV; }
|
|
||||||
static inline void amd_iommu_detect(void) { }
|
static inline void amd_iommu_detect(void) { }
|
||||||
static inline void amd_iommu_shutdown(void) { }
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_X86_AMD_IOMMU_H */
|
#endif /* _ASM_X86_AMD_IOMMU_H */
|
||||||
|
|
38
arch/x86/include/asm/amd_iommu_proto.h
Normal file
38
arch/x86/include/asm/amd_iommu_proto.h
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2009 Advanced Micro Devices, Inc.
|
||||||
|
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 as published
|
||||||
|
* by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
|
||||||
|
#define _ASM_X86_AMD_IOMMU_PROTO_H
|
||||||
|
|
||||||
|
struct amd_iommu;
|
||||||
|
|
||||||
|
extern int amd_iommu_init_dma_ops(void);
|
||||||
|
extern int amd_iommu_init_passthrough(void);
|
||||||
|
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||||
|
extern void amd_iommu_flush_all_domains(void);
|
||||||
|
extern void amd_iommu_flush_all_devices(void);
|
||||||
|
extern void amd_iommu_apply_erratum_63(u16 devid);
|
||||||
|
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
|
||||||
|
|
||||||
|
#ifndef CONFIG_AMD_IOMMU_STATS
|
||||||
|
|
||||||
|
static inline void amd_iommu_stats_init(void) { }
|
||||||
|
|
||||||
|
#endif /* !CONFIG_AMD_IOMMU_STATS */
|
||||||
|
|
||||||
|
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
|
* Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
|
||||||
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
||||||
* Leo Duran <leo.duran@amd.com>
|
* Leo Duran <leo.duran@amd.com>
|
||||||
*
|
*
|
||||||
|
@ -24,6 +24,11 @@
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Maximum number of IOMMUs supported
|
||||||
|
*/
|
||||||
|
#define MAX_IOMMUS 32
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* some size calculation constants
|
* some size calculation constants
|
||||||
*/
|
*/
|
||||||
|
@ -206,6 +211,9 @@ extern bool amd_iommu_dump;
|
||||||
printk(KERN_INFO "AMD-Vi: " format, ## arg); \
|
printk(KERN_INFO "AMD-Vi: " format, ## arg); \
|
||||||
} while(0);
|
} while(0);
|
||||||
|
|
||||||
|
/* global flag if IOMMUs cache non-present entries */
|
||||||
|
extern bool amd_iommu_np_cache;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make iterating over all IOMMUs easier
|
* Make iterating over all IOMMUs easier
|
||||||
*/
|
*/
|
||||||
|
@ -226,6 +234,8 @@ extern bool amd_iommu_dump;
|
||||||
* independent of their use.
|
* independent of their use.
|
||||||
*/
|
*/
|
||||||
struct protection_domain {
|
struct protection_domain {
|
||||||
|
struct list_head list; /* for list of all protection domains */
|
||||||
|
struct list_head dev_list; /* List of all devices in this domain */
|
||||||
spinlock_t lock; /* mostly used to lock the page table*/
|
spinlock_t lock; /* mostly used to lock the page table*/
|
||||||
u16 id; /* the domain id written to the device table */
|
u16 id; /* the domain id written to the device table */
|
||||||
int mode; /* paging mode (0-6 levels) */
|
int mode; /* paging mode (0-6 levels) */
|
||||||
|
@ -233,7 +243,20 @@ struct protection_domain {
|
||||||
unsigned long flags; /* flags to find out type of domain */
|
unsigned long flags; /* flags to find out type of domain */
|
||||||
bool updated; /* complete domain flush required */
|
bool updated; /* complete domain flush required */
|
||||||
unsigned dev_cnt; /* devices assigned to this domain */
|
unsigned dev_cnt; /* devices assigned to this domain */
|
||||||
|
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
|
||||||
void *priv; /* private data */
|
void *priv; /* private data */
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This struct contains device specific data for the IOMMU
|
||||||
|
*/
|
||||||
|
struct iommu_dev_data {
|
||||||
|
struct list_head list; /* For domain->dev_list */
|
||||||
|
struct device *dev; /* Device this data belong to */
|
||||||
|
struct device *alias; /* The Alias Device */
|
||||||
|
struct protection_domain *domain; /* Domain the device is bound to */
|
||||||
|
atomic_t bind; /* Domain attach reverent count */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -291,6 +314,9 @@ struct dma_ops_domain {
|
||||||
struct amd_iommu {
|
struct amd_iommu {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
|
||||||
|
/* Index within the IOMMU array */
|
||||||
|
int index;
|
||||||
|
|
||||||
/* locks the accesses to the hardware */
|
/* locks the accesses to the hardware */
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
||||||
|
@ -356,6 +382,21 @@ struct amd_iommu {
|
||||||
*/
|
*/
|
||||||
extern struct list_head amd_iommu_list;
|
extern struct list_head amd_iommu_list;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Array with pointers to each IOMMU struct
|
||||||
|
* The indices are referenced in the protection domains
|
||||||
|
*/
|
||||||
|
extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
|
||||||
|
|
||||||
|
/* Number of IOMMUs present in the system */
|
||||||
|
extern int amd_iommus_present;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Declarations for the global list of all protection domains
|
||||||
|
*/
|
||||||
|
extern spinlock_t amd_iommu_pd_lock;
|
||||||
|
extern struct list_head amd_iommu_pd_list;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Structure defining one entry in the device table
|
* Structure defining one entry in the device table
|
||||||
*/
|
*/
|
||||||
|
@ -416,15 +457,9 @@ extern unsigned amd_iommu_aperture_order;
|
||||||
/* largest PCI device id we expect translation requests for */
|
/* largest PCI device id we expect translation requests for */
|
||||||
extern u16 amd_iommu_last_bdf;
|
extern u16 amd_iommu_last_bdf;
|
||||||
|
|
||||||
/* data structures for protection domain handling */
|
|
||||||
extern struct protection_domain **amd_iommu_pd_table;
|
|
||||||
|
|
||||||
/* allocation bitmap for domain ids */
|
/* allocation bitmap for domain ids */
|
||||||
extern unsigned long *amd_iommu_pd_alloc_bitmap;
|
extern unsigned long *amd_iommu_pd_alloc_bitmap;
|
||||||
|
|
||||||
/* will be 1 if device isolation is enabled */
|
|
||||||
extern bool amd_iommu_isolate;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If true, the addresses will be flushed on unmap time, not when
|
* If true, the addresses will be flushed on unmap time, not when
|
||||||
* they are reused
|
* they are reused
|
||||||
|
@ -462,11 +497,6 @@ struct __iommu_counter {
|
||||||
#define ADD_STATS_COUNTER(name, x)
|
#define ADD_STATS_COUNTER(name, x)
|
||||||
#define SUB_STATS_COUNTER(name, x)
|
#define SUB_STATS_COUNTER(name, x)
|
||||||
|
|
||||||
static inline void amd_iommu_stats_init(void) { }
|
|
||||||
|
|
||||||
#endif /* CONFIG_AMD_IOMMU_STATS */
|
#endif /* CONFIG_AMD_IOMMU_STATS */
|
||||||
|
|
||||||
/* some function prototypes */
|
|
||||||
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
|
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
|
||||||
|
|
|
@ -62,10 +62,8 @@ struct cal_chipset_ops {
|
||||||
extern int use_calgary;
|
extern int use_calgary;
|
||||||
|
|
||||||
#ifdef CONFIG_CALGARY_IOMMU
|
#ifdef CONFIG_CALGARY_IOMMU
|
||||||
extern int calgary_iommu_init(void);
|
|
||||||
extern void detect_calgary(void);
|
extern void detect_calgary(void);
|
||||||
#else
|
#else
|
||||||
static inline int calgary_iommu_init(void) { return 1; }
|
|
||||||
static inline void detect_calgary(void) { return; }
|
static inline void detect_calgary(void) { return; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ struct dev_archdata {
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
struct dma_map_ops *dma_ops;
|
struct dma_map_ops *dma_ops;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_DMAR
|
#if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
|
||||||
void *iommu; /* hook for IOMMU specific extension */
|
void *iommu; /* hook for IOMMU specific extension */
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
|
@ -20,7 +20,8 @@
|
||||||
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
|
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern dma_addr_t bad_dma_address;
|
#define DMA_ERROR_CODE 0
|
||||||
|
|
||||||
extern int iommu_merge;
|
extern int iommu_merge;
|
||||||
extern struct device x86_dma_fallback_dev;
|
extern struct device x86_dma_fallback_dev;
|
||||||
extern int panic_on_overflow;
|
extern int panic_on_overflow;
|
||||||
|
@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
if (ops->mapping_error)
|
if (ops->mapping_error)
|
||||||
return ops->mapping_error(dev, dma_addr);
|
return ops->mapping_error(dev, dma_addr);
|
||||||
|
|
||||||
return (dma_addr == bad_dma_address);
|
return (dma_addr == DMA_ERROR_CODE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||||
|
|
|
@ -35,8 +35,7 @@ extern int gart_iommu_aperture_allowed;
|
||||||
extern int gart_iommu_aperture_disabled;
|
extern int gart_iommu_aperture_disabled;
|
||||||
|
|
||||||
extern void early_gart_iommu_check(void);
|
extern void early_gart_iommu_check(void);
|
||||||
extern void gart_iommu_init(void);
|
extern int gart_iommu_init(void);
|
||||||
extern void gart_iommu_shutdown(void);
|
|
||||||
extern void __init gart_parse_options(char *);
|
extern void __init gart_parse_options(char *);
|
||||||
extern void gart_iommu_hole_init(void);
|
extern void gart_iommu_hole_init(void);
|
||||||
|
|
||||||
|
@ -48,12 +47,6 @@ extern void gart_iommu_hole_init(void);
|
||||||
static inline void early_gart_iommu_check(void)
|
static inline void early_gart_iommu_check(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void gart_iommu_init(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
static inline void gart_iommu_shutdown(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
static inline void gart_parse_options(char *options)
|
static inline void gart_parse_options(char *options)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
#ifndef _ASM_X86_IOMMU_H
|
#ifndef _ASM_X86_IOMMU_H
|
||||||
#define _ASM_X86_IOMMU_H
|
#define _ASM_X86_IOMMU_H
|
||||||
|
|
||||||
extern void pci_iommu_shutdown(void);
|
|
||||||
extern void no_iommu_init(void);
|
|
||||||
extern struct dma_map_ops nommu_dma_ops;
|
extern struct dma_map_ops nommu_dma_ops;
|
||||||
extern int force_iommu, no_iommu;
|
extern int force_iommu, no_iommu;
|
||||||
extern int iommu_detected;
|
extern int iommu_detected;
|
||||||
|
|
|
@ -3,17 +3,14 @@
|
||||||
|
|
||||||
#include <linux/swiotlb.h>
|
#include <linux/swiotlb.h>
|
||||||
|
|
||||||
/* SWIOTLB interface */
|
|
||||||
|
|
||||||
extern int swiotlb_force;
|
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
extern int swiotlb;
|
extern int swiotlb;
|
||||||
extern void pci_swiotlb_init(void);
|
extern int pci_swiotlb_init(void);
|
||||||
#else
|
#else
|
||||||
#define swiotlb 0
|
#define swiotlb 0
|
||||||
static inline void pci_swiotlb_init(void)
|
static inline int pci_swiotlb_init(void)
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -90,6 +90,14 @@ struct x86_init_timers {
|
||||||
void (*timer_init)(void);
|
void (*timer_init)(void);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct x86_init_iommu - platform specific iommu setup
|
||||||
|
* @iommu_init: platform specific iommu setup
|
||||||
|
*/
|
||||||
|
struct x86_init_iommu {
|
||||||
|
int (*iommu_init)(void);
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct x86_init_ops - functions for platform specific setup
|
* struct x86_init_ops - functions for platform specific setup
|
||||||
*
|
*
|
||||||
|
@ -101,6 +109,7 @@ struct x86_init_ops {
|
||||||
struct x86_init_oem oem;
|
struct x86_init_oem oem;
|
||||||
struct x86_init_paging paging;
|
struct x86_init_paging paging;
|
||||||
struct x86_init_timers timers;
|
struct x86_init_timers timers;
|
||||||
|
struct x86_init_iommu iommu;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -121,6 +130,7 @@ struct x86_platform_ops {
|
||||||
unsigned long (*calibrate_tsc)(void);
|
unsigned long (*calibrate_tsc)(void);
|
||||||
unsigned long (*get_wallclock)(void);
|
unsigned long (*get_wallclock)(void);
|
||||||
int (*set_wallclock)(unsigned long nowtime);
|
int (*set_wallclock)(unsigned long nowtime);
|
||||||
|
void (*iommu_shutdown)(void);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct x86_init_ops x86_init;
|
extern struct x86_init_ops x86_init;
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
|
* Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
|
||||||
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
||||||
* Leo Duran <leo.duran@amd.com>
|
* Leo Duran <leo.duran@amd.com>
|
||||||
*
|
*
|
||||||
|
@ -25,10 +25,12 @@
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/msi.h>
|
#include <linux/msi.h>
|
||||||
#include <asm/pci-direct.h>
|
#include <asm/pci-direct.h>
|
||||||
|
#include <asm/amd_iommu_proto.h>
|
||||||
#include <asm/amd_iommu_types.h>
|
#include <asm/amd_iommu_types.h>
|
||||||
#include <asm/amd_iommu.h>
|
#include <asm/amd_iommu.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/gart.h>
|
#include <asm/gart.h>
|
||||||
|
#include <asm/x86_init.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* definitions for the ACPI scanning code
|
* definitions for the ACPI scanning code
|
||||||
|
@ -123,18 +125,24 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
|
||||||
to handle */
|
to handle */
|
||||||
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
|
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
|
||||||
we find in ACPI */
|
we find in ACPI */
|
||||||
#ifdef CONFIG_IOMMU_STRESS
|
|
||||||
bool amd_iommu_isolate = false;
|
|
||||||
#else
|
|
||||||
bool amd_iommu_isolate = true; /* if true, device isolation is
|
|
||||||
enabled */
|
|
||||||
#endif
|
|
||||||
|
|
||||||
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
|
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
|
||||||
|
|
||||||
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
|
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
|
||||||
system */
|
system */
|
||||||
|
|
||||||
|
/* Array to assign indices to IOMMUs*/
|
||||||
|
struct amd_iommu *amd_iommus[MAX_IOMMUS];
|
||||||
|
int amd_iommus_present;
|
||||||
|
|
||||||
|
/* IOMMUs have a non-present cache? */
|
||||||
|
bool amd_iommu_np_cache __read_mostly;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* List of protection domains - used during resume
|
||||||
|
*/
|
||||||
|
LIST_HEAD(amd_iommu_pd_list);
|
||||||
|
spinlock_t amd_iommu_pd_lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pointer to the device table which is shared by all AMD IOMMUs
|
* Pointer to the device table which is shared by all AMD IOMMUs
|
||||||
* it is indexed by the PCI device id or the HT unit id and contains
|
* it is indexed by the PCI device id or the HT unit id and contains
|
||||||
|
@ -156,12 +164,6 @@ u16 *amd_iommu_alias_table;
|
||||||
*/
|
*/
|
||||||
struct amd_iommu **amd_iommu_rlookup_table;
|
struct amd_iommu **amd_iommu_rlookup_table;
|
||||||
|
|
||||||
/*
|
|
||||||
* The pd table (protection domain table) is used to find the protection domain
|
|
||||||
* data structure a device belongs to. Indexed with the PCI device id too.
|
|
||||||
*/
|
|
||||||
struct protection_domain **amd_iommu_pd_table;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
|
* AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
|
||||||
* to know which ones are already in use.
|
* to know which ones are already in use.
|
||||||
|
@ -838,7 +840,18 @@ static void __init free_iommu_all(void)
|
||||||
static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
|
static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
|
||||||
{
|
{
|
||||||
spin_lock_init(&iommu->lock);
|
spin_lock_init(&iommu->lock);
|
||||||
|
|
||||||
|
/* Add IOMMU to internal data structures */
|
||||||
list_add_tail(&iommu->list, &amd_iommu_list);
|
list_add_tail(&iommu->list, &amd_iommu_list);
|
||||||
|
iommu->index = amd_iommus_present++;
|
||||||
|
|
||||||
|
if (unlikely(iommu->index >= MAX_IOMMUS)) {
|
||||||
|
WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Index is fine - add IOMMU to the array */
|
||||||
|
amd_iommus[iommu->index] = iommu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy data from ACPI table entry to the iommu struct
|
* Copy data from ACPI table entry to the iommu struct
|
||||||
|
@ -868,6 +881,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
|
||||||
init_iommu_from_acpi(iommu, h);
|
init_iommu_from_acpi(iommu, h);
|
||||||
init_iommu_devices(iommu);
|
init_iommu_devices(iommu);
|
||||||
|
|
||||||
|
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
|
||||||
|
amd_iommu_np_cache = true;
|
||||||
|
|
||||||
return pci_enable_device(iommu->dev);
|
return pci_enable_device(iommu->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -925,7 +941,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
|
||||||
*
|
*
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
static int __init iommu_setup_msi(struct amd_iommu *iommu)
|
static int iommu_setup_msi(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -1176,19 +1192,10 @@ static struct sys_device device_amd_iommu = {
|
||||||
* functions. Finally it prints some information about AMD IOMMUs and
|
* functions. Finally it prints some information about AMD IOMMUs and
|
||||||
* the driver state and enables the hardware.
|
* the driver state and enables the hardware.
|
||||||
*/
|
*/
|
||||||
int __init amd_iommu_init(void)
|
static int __init amd_iommu_init(void)
|
||||||
{
|
{
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
|
|
||||||
|
|
||||||
if (no_iommu) {
|
|
||||||
printk(KERN_INFO "AMD-Vi disabled by kernel command line\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!amd_iommu_detected)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First parse ACPI tables to find the largest Bus/Dev/Func
|
* First parse ACPI tables to find the largest Bus/Dev/Func
|
||||||
* we need to handle. Upon this information the shared data
|
* we need to handle. Upon this information the shared data
|
||||||
|
@ -1225,15 +1232,6 @@ int __init amd_iommu_init(void)
|
||||||
if (amd_iommu_rlookup_table == NULL)
|
if (amd_iommu_rlookup_table == NULL)
|
||||||
goto free;
|
goto free;
|
||||||
|
|
||||||
/*
|
|
||||||
* Protection Domain table - maps devices to protection domains
|
|
||||||
* This table has the same size as the rlookup_table
|
|
||||||
*/
|
|
||||||
amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
|
||||||
get_order(rlookup_table_size));
|
|
||||||
if (amd_iommu_pd_table == NULL)
|
|
||||||
goto free;
|
|
||||||
|
|
||||||
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
|
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
|
||||||
GFP_KERNEL | __GFP_ZERO,
|
GFP_KERNEL | __GFP_ZERO,
|
||||||
get_order(MAX_DOMAIN_ID/8));
|
get_order(MAX_DOMAIN_ID/8));
|
||||||
|
@ -1255,6 +1253,8 @@ int __init amd_iommu_init(void)
|
||||||
*/
|
*/
|
||||||
amd_iommu_pd_alloc_bitmap[0] = 1;
|
amd_iommu_pd_alloc_bitmap[0] = 1;
|
||||||
|
|
||||||
|
spin_lock_init(&amd_iommu_pd_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* now the data structures are allocated and basically initialized
|
* now the data structures are allocated and basically initialized
|
||||||
* start the real acpi table scan
|
* start the real acpi table scan
|
||||||
|
@ -1286,17 +1286,12 @@ int __init amd_iommu_init(void)
|
||||||
if (iommu_pass_through)
|
if (iommu_pass_through)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
printk(KERN_INFO "AMD-Vi: device isolation ");
|
|
||||||
if (amd_iommu_isolate)
|
|
||||||
printk("enabled\n");
|
|
||||||
else
|
|
||||||
printk("disabled\n");
|
|
||||||
|
|
||||||
if (amd_iommu_unmap_flush)
|
if (amd_iommu_unmap_flush)
|
||||||
printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
|
printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
|
||||||
else
|
else
|
||||||
printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
|
printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
|
||||||
|
|
||||||
|
x86_platform.iommu_shutdown = disable_iommus;
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1304,9 +1299,6 @@ free:
|
||||||
free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
|
free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
|
||||||
get_order(MAX_DOMAIN_ID/8));
|
get_order(MAX_DOMAIN_ID/8));
|
||||||
|
|
||||||
free_pages((unsigned long)amd_iommu_pd_table,
|
|
||||||
get_order(rlookup_table_size));
|
|
||||||
|
|
||||||
free_pages((unsigned long)amd_iommu_rlookup_table,
|
free_pages((unsigned long)amd_iommu_rlookup_table,
|
||||||
get_order(rlookup_table_size));
|
get_order(rlookup_table_size));
|
||||||
|
|
||||||
|
@ -1323,11 +1315,6 @@ free:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amd_iommu_shutdown(void)
|
|
||||||
{
|
|
||||||
disable_iommus();
|
|
||||||
}
|
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
*
|
*
|
||||||
* Early detect code. This code runs at IOMMU detection time in the DMA
|
* Early detect code. This code runs at IOMMU detection time in the DMA
|
||||||
|
@ -1342,16 +1329,13 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table)
|
||||||
|
|
||||||
void __init amd_iommu_detect(void)
|
void __init amd_iommu_detect(void)
|
||||||
{
|
{
|
||||||
if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture))
|
if (no_iommu || (iommu_detected && !gart_iommu_aperture))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
|
if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
|
||||||
iommu_detected = 1;
|
iommu_detected = 1;
|
||||||
amd_iommu_detected = 1;
|
amd_iommu_detected = 1;
|
||||||
#ifdef CONFIG_GART_IOMMU
|
x86_init.iommu.iommu_init = amd_iommu_init;
|
||||||
gart_iommu_aperture_disabled = 1;
|
|
||||||
gart_iommu_aperture = 0;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1372,10 +1356,6 @@ static int __init parse_amd_iommu_dump(char *str)
|
||||||
static int __init parse_amd_iommu_options(char *str)
|
static int __init parse_amd_iommu_options(char *str)
|
||||||
{
|
{
|
||||||
for (; *str; ++str) {
|
for (; *str; ++str) {
|
||||||
if (strncmp(str, "isolate", 7) == 0)
|
|
||||||
amd_iommu_isolate = true;
|
|
||||||
if (strncmp(str, "share", 5) == 0)
|
|
||||||
amd_iommu_isolate = false;
|
|
||||||
if (strncmp(str, "fullflush", 9) == 0)
|
if (strncmp(str, "fullflush", 9) == 0)
|
||||||
amd_iommu_unmap_flush = true;
|
amd_iommu_unmap_flush = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <asm/pci-direct.h>
|
#include <asm/pci-direct.h>
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
#include <asm/k8.h>
|
#include <asm/k8.h>
|
||||||
|
#include <asm/x86_init.h>
|
||||||
|
|
||||||
int gart_iommu_aperture;
|
int gart_iommu_aperture;
|
||||||
int gart_iommu_aperture_disabled __initdata;
|
int gart_iommu_aperture_disabled __initdata;
|
||||||
|
@ -400,6 +401,7 @@ void __init gart_iommu_hole_init(void)
|
||||||
|
|
||||||
iommu_detected = 1;
|
iommu_detected = 1;
|
||||||
gart_iommu_aperture = 1;
|
gart_iommu_aperture = 1;
|
||||||
|
x86_init.iommu.iommu_init = gart_iommu_init;
|
||||||
|
|
||||||
aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
|
aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
|
||||||
aper_size = (32 * 1024 * 1024) << aper_order;
|
aper_size = (32 * 1024 * 1024) << aper_order;
|
||||||
|
@ -456,7 +458,7 @@ out:
|
||||||
|
|
||||||
if (aper_alloc) {
|
if (aper_alloc) {
|
||||||
/* Got the aperture from the AGP bridge */
|
/* Got the aperture from the AGP bridge */
|
||||||
} else if (swiotlb && !valid_agp) {
|
} else if (!valid_agp) {
|
||||||
/* Do nothing */
|
/* Do nothing */
|
||||||
} else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
|
} else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
|
||||||
force_iommu ||
|
force_iommu ||
|
||||||
|
|
|
@ -27,8 +27,7 @@
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/reboot.h>
|
#include <asm/reboot.h>
|
||||||
#include <asm/virtext.h>
|
#include <asm/virtext.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/x86_init.h>
|
||||||
|
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
|
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
|
||||||
|
|
||||||
|
@ -106,7 +105,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
pci_iommu_shutdown();
|
x86_platform.iommu_shutdown();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
crash_save_cpu(regs, safe_smp_processor_id());
|
crash_save_cpu(regs, safe_smp_processor_id());
|
||||||
|
|
|
@ -46,6 +46,7 @@
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
#include <asm/rio.h>
|
#include <asm/rio.h>
|
||||||
#include <asm/bios_ebda.h>
|
#include <asm/bios_ebda.h>
|
||||||
|
#include <asm/x86_init.h>
|
||||||
|
|
||||||
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
|
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
|
||||||
int use_calgary __read_mostly = 1;
|
int use_calgary __read_mostly = 1;
|
||||||
|
@ -244,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
|
||||||
if (panic_on_overflow)
|
if (panic_on_overflow)
|
||||||
panic("Calgary: fix the allocator.\n");
|
panic("Calgary: fix the allocator.\n");
|
||||||
else
|
else
|
||||||
return bad_dma_address;
|
return DMA_ERROR_CODE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,12 +261,15 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
||||||
void *vaddr, unsigned int npages, int direction)
|
void *vaddr, unsigned int npages, int direction)
|
||||||
{
|
{
|
||||||
unsigned long entry;
|
unsigned long entry;
|
||||||
dma_addr_t ret = bad_dma_address;
|
dma_addr_t ret;
|
||||||
|
|
||||||
entry = iommu_range_alloc(dev, tbl, npages);
|
entry = iommu_range_alloc(dev, tbl, npages);
|
||||||
|
|
||||||
if (unlikely(entry == bad_dma_address))
|
if (unlikely(entry == DMA_ERROR_CODE)) {
|
||||||
goto error;
|
printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
|
||||||
|
"iommu %p\n", npages, tbl);
|
||||||
|
return DMA_ERROR_CODE;
|
||||||
|
}
|
||||||
|
|
||||||
/* set the return dma address */
|
/* set the return dma address */
|
||||||
ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
|
ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
|
||||||
|
@ -273,13 +277,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
||||||
/* put the TCEs in the HW table */
|
/* put the TCEs in the HW table */
|
||||||
tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
|
tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
|
||||||
direction);
|
direction);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
error:
|
|
||||||
printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
|
|
||||||
"iommu %p\n", npages, tbl);
|
|
||||||
return bad_dma_address;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
||||||
|
@ -290,8 +288,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* were we called with bad_dma_address? */
|
/* were we called with bad_dma_address? */
|
||||||
badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
|
badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
|
||||||
if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
|
if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
|
||||||
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
|
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
|
||||||
"address 0x%Lx\n", dma_addr);
|
"address 0x%Lx\n", dma_addr);
|
||||||
return;
|
return;
|
||||||
|
@ -318,13 +316,15 @@ static inline struct iommu_table *find_iommu_table(struct device *dev)
|
||||||
|
|
||||||
pdev = to_pci_dev(dev);
|
pdev = to_pci_dev(dev);
|
||||||
|
|
||||||
|
/* search up the device tree for an iommu */
|
||||||
pbus = pdev->bus;
|
pbus = pdev->bus;
|
||||||
|
do {
|
||||||
/* is the device behind a bridge? Look for the root bus */
|
tbl = pci_iommu(pbus);
|
||||||
while (pbus->parent)
|
if (tbl && tbl->it_busno == pbus->number)
|
||||||
|
break;
|
||||||
|
tbl = NULL;
|
||||||
pbus = pbus->parent;
|
pbus = pbus->parent;
|
||||||
|
} while (pbus);
|
||||||
tbl = pci_iommu(pbus);
|
|
||||||
|
|
||||||
BUG_ON(tbl && (tbl->it_busno != pbus->number));
|
BUG_ON(tbl && (tbl->it_busno != pbus->number));
|
||||||
|
|
||||||
|
@ -373,7 +373,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
||||||
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
|
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
|
||||||
|
|
||||||
entry = iommu_range_alloc(dev, tbl, npages);
|
entry = iommu_range_alloc(dev, tbl, npages);
|
||||||
if (entry == bad_dma_address) {
|
if (entry == DMA_ERROR_CODE) {
|
||||||
/* makes sure unmap knows to stop */
|
/* makes sure unmap knows to stop */
|
||||||
s->dma_length = 0;
|
s->dma_length = 0;
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -391,7 +391,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
||||||
error:
|
error:
|
||||||
calgary_unmap_sg(dev, sg, nelems, dir, NULL);
|
calgary_unmap_sg(dev, sg, nelems, dir, NULL);
|
||||||
for_each_sg(sg, s, nelems, i) {
|
for_each_sg(sg, s, nelems, i) {
|
||||||
sg->dma_address = bad_dma_address;
|
sg->dma_address = DMA_ERROR_CODE;
|
||||||
sg->dma_length = 0;
|
sg->dma_length = 0;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -446,7 +446,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
|
||||||
|
|
||||||
/* set up tces to cover the allocated range */
|
/* set up tces to cover the allocated range */
|
||||||
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
|
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
|
||||||
if (mapping == bad_dma_address)
|
if (mapping == DMA_ERROR_CODE)
|
||||||
goto free;
|
goto free;
|
||||||
*dma_handle = mapping;
|
*dma_handle = mapping;
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -727,7 +727,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
|
||||||
struct iommu_table *tbl = pci_iommu(dev->bus);
|
struct iommu_table *tbl = pci_iommu(dev->bus);
|
||||||
|
|
||||||
/* reserve EMERGENCY_PAGES from bad_dma_address and up */
|
/* reserve EMERGENCY_PAGES from bad_dma_address and up */
|
||||||
iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
|
iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);
|
||||||
|
|
||||||
/* avoid the BIOS/VGA first 640KB-1MB region */
|
/* avoid the BIOS/VGA first 640KB-1MB region */
|
||||||
/* for CalIOC2 - avoid the entire first MB */
|
/* for CalIOC2 - avoid the entire first MB */
|
||||||
|
@ -1344,6 +1344,23 @@ static void __init get_tce_space_from_tar(void)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init calgary_iommu_init(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* ok, we're trying to use Calgary - let's roll */
|
||||||
|
printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
|
||||||
|
|
||||||
|
ret = calgary_init();
|
||||||
|
if (ret) {
|
||||||
|
printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
|
||||||
|
"falling back to no_iommu\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void __init detect_calgary(void)
|
void __init detect_calgary(void)
|
||||||
{
|
{
|
||||||
int bus;
|
int bus;
|
||||||
|
@ -1357,7 +1374,7 @@ void __init detect_calgary(void)
|
||||||
* if the user specified iommu=off or iommu=soft or we found
|
* if the user specified iommu=off or iommu=soft or we found
|
||||||
* another HW IOMMU already, bail out.
|
* another HW IOMMU already, bail out.
|
||||||
*/
|
*/
|
||||||
if (swiotlb || no_iommu || iommu_detected)
|
if (no_iommu || iommu_detected)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!use_calgary)
|
if (!use_calgary)
|
||||||
|
@ -1442,9 +1459,7 @@ void __init detect_calgary(void)
|
||||||
printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n",
|
printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n",
|
||||||
specified_table_size);
|
specified_table_size);
|
||||||
|
|
||||||
/* swiotlb for devices that aren't behind the Calgary. */
|
x86_init.iommu.iommu_init = calgary_iommu_init;
|
||||||
if (max_pfn > MAX_DMA32_PFN)
|
|
||||||
swiotlb = 1;
|
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -1457,35 +1472,6 @@ cleanup:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init calgary_iommu_init(void)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (no_iommu || (swiotlb && !calgary_detected))
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
if (!calgary_detected)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
/* ok, we're trying to use Calgary - let's roll */
|
|
||||||
printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
|
|
||||||
|
|
||||||
ret = calgary_init();
|
|
||||||
if (ret) {
|
|
||||||
printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
|
|
||||||
"falling back to no_iommu\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
force_iommu = 1;
|
|
||||||
bad_dma_address = 0x0;
|
|
||||||
/* dma_ops is set to swiotlb or nommu */
|
|
||||||
if (!dma_ops)
|
|
||||||
dma_ops = &nommu_dma_ops;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init calgary_parse_options(char *p)
|
static int __init calgary_parse_options(char *p)
|
||||||
{
|
{
|
||||||
unsigned int bridge;
|
unsigned int bridge;
|
||||||
|
|
|
@ -11,10 +11,11 @@
|
||||||
#include <asm/gart.h>
|
#include <asm/gart.h>
|
||||||
#include <asm/calgary.h>
|
#include <asm/calgary.h>
|
||||||
#include <asm/amd_iommu.h>
|
#include <asm/amd_iommu.h>
|
||||||
|
#include <asm/x86_init.h>
|
||||||
|
|
||||||
static int forbid_dac __read_mostly;
|
static int forbid_dac __read_mostly;
|
||||||
|
|
||||||
struct dma_map_ops *dma_ops;
|
struct dma_map_ops *dma_ops = &nommu_dma_ops;
|
||||||
EXPORT_SYMBOL(dma_ops);
|
EXPORT_SYMBOL(dma_ops);
|
||||||
|
|
||||||
static int iommu_sac_force __read_mostly;
|
static int iommu_sac_force __read_mostly;
|
||||||
|
@ -42,9 +43,6 @@ int iommu_detected __read_mostly = 0;
|
||||||
*/
|
*/
|
||||||
int iommu_pass_through __read_mostly;
|
int iommu_pass_through __read_mostly;
|
||||||
|
|
||||||
dma_addr_t bad_dma_address __read_mostly = 0;
|
|
||||||
EXPORT_SYMBOL(bad_dma_address);
|
|
||||||
|
|
||||||
/* Dummy device used for NULL arguments (normally ISA). */
|
/* Dummy device used for NULL arguments (normally ISA). */
|
||||||
struct device x86_dma_fallback_dev = {
|
struct device x86_dma_fallback_dev = {
|
||||||
.init_name = "fallback device",
|
.init_name = "fallback device",
|
||||||
|
@ -126,20 +124,17 @@ void __init pci_iommu_alloc(void)
|
||||||
/* free the range so iommu could get some range less than 4G */
|
/* free the range so iommu could get some range less than 4G */
|
||||||
dma32_free_bootmem();
|
dma32_free_bootmem();
|
||||||
#endif
|
#endif
|
||||||
|
if (pci_swiotlb_init())
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
|
||||||
* The order of these functions is important for
|
|
||||||
* fall-back/fail-over reasons
|
|
||||||
*/
|
|
||||||
gart_iommu_hole_init();
|
gart_iommu_hole_init();
|
||||||
|
|
||||||
detect_calgary();
|
detect_calgary();
|
||||||
|
|
||||||
detect_intel_iommu();
|
detect_intel_iommu();
|
||||||
|
|
||||||
|
/* needs to be called after gart_iommu_hole_init */
|
||||||
amd_iommu_detect();
|
amd_iommu_detect();
|
||||||
|
|
||||||
pci_swiotlb_init();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||||
|
@ -214,7 +209,7 @@ static __init int iommu_setup(char *p)
|
||||||
if (!strncmp(p, "allowdac", 8))
|
if (!strncmp(p, "allowdac", 8))
|
||||||
forbid_dac = 0;
|
forbid_dac = 0;
|
||||||
if (!strncmp(p, "nodac", 5))
|
if (!strncmp(p, "nodac", 5))
|
||||||
forbid_dac = -1;
|
forbid_dac = 1;
|
||||||
if (!strncmp(p, "usedac", 6)) {
|
if (!strncmp(p, "usedac", 6)) {
|
||||||
forbid_dac = -1;
|
forbid_dac = -1;
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -289,25 +284,17 @@ static int __init pci_iommu_init(void)
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
dma_debug_add_bus(&pci_bus_type);
|
dma_debug_add_bus(&pci_bus_type);
|
||||||
#endif
|
#endif
|
||||||
|
x86_init.iommu.iommu_init();
|
||||||
|
|
||||||
calgary_iommu_init();
|
if (swiotlb) {
|
||||||
|
printk(KERN_INFO "PCI-DMA: "
|
||||||
|
"Using software bounce buffering for IO (SWIOTLB)\n");
|
||||||
|
swiotlb_print_info();
|
||||||
|
} else
|
||||||
|
swiotlb_free();
|
||||||
|
|
||||||
intel_iommu_init();
|
|
||||||
|
|
||||||
amd_iommu_init();
|
|
||||||
|
|
||||||
gart_iommu_init();
|
|
||||||
|
|
||||||
no_iommu_init();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void pci_iommu_shutdown(void)
|
|
||||||
{
|
|
||||||
gart_iommu_shutdown();
|
|
||||||
|
|
||||||
amd_iommu_shutdown();
|
|
||||||
}
|
|
||||||
/* Must execute after PCI subsystem */
|
/* Must execute after PCI subsystem */
|
||||||
rootfs_initcall(pci_iommu_init);
|
rootfs_initcall(pci_iommu_init);
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
#include <asm/swiotlb.h>
|
#include <asm/swiotlb.h>
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
#include <asm/k8.h>
|
#include <asm/k8.h>
|
||||||
|
#include <asm/x86_init.h>
|
||||||
|
|
||||||
static unsigned long iommu_bus_base; /* GART remapping area (physical) */
|
static unsigned long iommu_bus_base; /* GART remapping area (physical) */
|
||||||
static unsigned long iommu_size; /* size of remapping area bytes */
|
static unsigned long iommu_size; /* size of remapping area bytes */
|
||||||
|
@ -46,6 +47,8 @@ static unsigned long iommu_pages; /* .. and in pages */
|
||||||
|
|
||||||
static u32 *iommu_gatt_base; /* Remapping table */
|
static u32 *iommu_gatt_base; /* Remapping table */
|
||||||
|
|
||||||
|
static dma_addr_t bad_dma_addr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is disabled the IOMMU will use an optimized flushing strategy
|
* If this is disabled the IOMMU will use an optimized flushing strategy
|
||||||
* of only flushing when an mapping is reused. With it true the GART is
|
* of only flushing when an mapping is reused. With it true the GART is
|
||||||
|
@ -92,7 +95,7 @@ static unsigned long alloc_iommu(struct device *dev, int size,
|
||||||
|
|
||||||
base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
|
base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
|
||||||
PAGE_SIZE) >> PAGE_SHIFT;
|
PAGE_SIZE) >> PAGE_SHIFT;
|
||||||
boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
|
boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
|
||||||
PAGE_SIZE) >> PAGE_SHIFT;
|
PAGE_SIZE) >> PAGE_SHIFT;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
||||||
|
@ -216,7 +219,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
|
||||||
if (panic_on_overflow)
|
if (panic_on_overflow)
|
||||||
panic("dma_map_area overflow %lu bytes\n", size);
|
panic("dma_map_area overflow %lu bytes\n", size);
|
||||||
iommu_full(dev, size, dir);
|
iommu_full(dev, size, dir);
|
||||||
return bad_dma_address;
|
return bad_dma_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < npages; i++) {
|
for (i = 0; i < npages; i++) {
|
||||||
|
@ -294,7 +297,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_DEBUG
|
#ifdef CONFIG_IOMMU_DEBUG
|
||||||
printk(KERN_DEBUG "dma_map_sg overflow\n");
|
pr_debug("dma_map_sg overflow\n");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
|
@ -302,7 +305,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
||||||
|
|
||||||
if (nonforced_iommu(dev, addr, s->length)) {
|
if (nonforced_iommu(dev, addr, s->length)) {
|
||||||
addr = dma_map_area(dev, addr, s->length, dir, 0);
|
addr = dma_map_area(dev, addr, s->length, dir, 0);
|
||||||
if (addr == bad_dma_address) {
|
if (addr == bad_dma_addr) {
|
||||||
if (i > 0)
|
if (i > 0)
|
||||||
gart_unmap_sg(dev, sg, i, dir, NULL);
|
gart_unmap_sg(dev, sg, i, dir, NULL);
|
||||||
nents = 0;
|
nents = 0;
|
||||||
|
@ -389,12 +392,14 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
if (!dev)
|
if (!dev)
|
||||||
dev = &x86_dma_fallback_dev;
|
dev = &x86_dma_fallback_dev;
|
||||||
|
|
||||||
out = 0;
|
out = 0;
|
||||||
start = 0;
|
start = 0;
|
||||||
start_sg = sgmap = sg;
|
start_sg = sg;
|
||||||
seg_size = 0;
|
sgmap = sg;
|
||||||
max_seg_size = dma_get_max_seg_size(dev);
|
seg_size = 0;
|
||||||
ps = NULL; /* shut up gcc */
|
max_seg_size = dma_get_max_seg_size(dev);
|
||||||
|
ps = NULL; /* shut up gcc */
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
dma_addr_t addr = sg_phys(s);
|
dma_addr_t addr = sg_phys(s);
|
||||||
|
|
||||||
|
@ -417,11 +422,12 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
sgmap, pages, need) < 0)
|
sgmap, pages, need) < 0)
|
||||||
goto error;
|
goto error;
|
||||||
out++;
|
out++;
|
||||||
seg_size = 0;
|
|
||||||
sgmap = sg_next(sgmap);
|
seg_size = 0;
|
||||||
pages = 0;
|
sgmap = sg_next(sgmap);
|
||||||
start = i;
|
pages = 0;
|
||||||
start_sg = s;
|
start = i;
|
||||||
|
start_sg = s;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -455,7 +461,7 @@ error:
|
||||||
|
|
||||||
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
||||||
for_each_sg(sg, s, nents, i)
|
for_each_sg(sg, s, nents, i)
|
||||||
s->dma_address = bad_dma_address;
|
s->dma_address = bad_dma_addr;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -479,7 +485,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
|
||||||
DMA_BIDIRECTIONAL, align_mask);
|
DMA_BIDIRECTIONAL, align_mask);
|
||||||
|
|
||||||
flush_gart();
|
flush_gart();
|
||||||
if (paddr != bad_dma_address) {
|
if (paddr != bad_dma_addr) {
|
||||||
*dma_addr = paddr;
|
*dma_addr = paddr;
|
||||||
return page_address(page);
|
return page_address(page);
|
||||||
}
|
}
|
||||||
|
@ -499,6 +505,11 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||||
free_pages((unsigned long)vaddr, get_order(size));
|
free_pages((unsigned long)vaddr, get_order(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
|
{
|
||||||
|
return (dma_addr == bad_dma_addr);
|
||||||
|
}
|
||||||
|
|
||||||
static int no_agp;
|
static int no_agp;
|
||||||
|
|
||||||
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
|
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
|
||||||
|
@ -515,7 +526,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
|
||||||
iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
|
iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
|
||||||
|
|
||||||
if (iommu_size < 64*1024*1024) {
|
if (iommu_size < 64*1024*1024) {
|
||||||
printk(KERN_WARNING
|
pr_warning(
|
||||||
"PCI-DMA: Warning: Small IOMMU %luMB."
|
"PCI-DMA: Warning: Small IOMMU %luMB."
|
||||||
" Consider increasing the AGP aperture in BIOS\n",
|
" Consider increasing the AGP aperture in BIOS\n",
|
||||||
iommu_size >> 20);
|
iommu_size >> 20);
|
||||||
|
@ -570,28 +581,32 @@ void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
|
||||||
aperture_alloc = aper_alloc;
|
aperture_alloc = aper_alloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gart_fixup_northbridges(struct sys_device *dev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!fix_up_north_bridges)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pr_info("PCI-DMA: Restoring GART aperture settings\n");
|
||||||
|
|
||||||
|
for (i = 0; i < num_k8_northbridges; i++) {
|
||||||
|
struct pci_dev *dev = k8_northbridges[i];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't enable translations just yet. That is the next
|
||||||
|
* step. Restore the pre-suspend aperture settings.
|
||||||
|
*/
|
||||||
|
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1);
|
||||||
|
pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int gart_resume(struct sys_device *dev)
|
static int gart_resume(struct sys_device *dev)
|
||||||
{
|
{
|
||||||
printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
|
pr_info("PCI-DMA: Resuming GART IOMMU\n");
|
||||||
|
|
||||||
if (fix_up_north_bridges) {
|
gart_fixup_northbridges(dev);
|
||||||
int i;
|
|
||||||
|
|
||||||
printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
|
|
||||||
|
|
||||||
for (i = 0; i < num_k8_northbridges; i++) {
|
|
||||||
struct pci_dev *dev = k8_northbridges[i];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't enable translations just yet. That is the next
|
|
||||||
* step. Restore the pre-suspend aperture settings.
|
|
||||||
*/
|
|
||||||
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
|
|
||||||
aperture_order << 1);
|
|
||||||
pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
|
|
||||||
aperture_alloc >> 25);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enable_gart_translations();
|
enable_gart_translations();
|
||||||
|
|
||||||
|
@ -604,15 +619,14 @@ static int gart_suspend(struct sys_device *dev, pm_message_t state)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sysdev_class gart_sysdev_class = {
|
static struct sysdev_class gart_sysdev_class = {
|
||||||
.name = "gart",
|
.name = "gart",
|
||||||
.suspend = gart_suspend,
|
.suspend = gart_suspend,
|
||||||
.resume = gart_resume,
|
.resume = gart_resume,
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct sys_device device_gart = {
|
static struct sys_device device_gart = {
|
||||||
.id = 0,
|
.cls = &gart_sysdev_class,
|
||||||
.cls = &gart_sysdev_class,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -627,7 +641,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
|
||||||
void *gatt;
|
void *gatt;
|
||||||
int i, error;
|
int i, error;
|
||||||
|
|
||||||
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
|
pr_info("PCI-DMA: Disabling AGP.\n");
|
||||||
|
|
||||||
aper_size = aper_base = info->aper_size = 0;
|
aper_size = aper_base = info->aper_size = 0;
|
||||||
dev = NULL;
|
dev = NULL;
|
||||||
for (i = 0; i < num_k8_northbridges; i++) {
|
for (i = 0; i < num_k8_northbridges; i++) {
|
||||||
|
@ -645,6 +660,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
|
||||||
}
|
}
|
||||||
if (!aper_base)
|
if (!aper_base)
|
||||||
goto nommu;
|
goto nommu;
|
||||||
|
|
||||||
info->aper_base = aper_base;
|
info->aper_base = aper_base;
|
||||||
info->aper_size = aper_size >> 20;
|
info->aper_size = aper_size >> 20;
|
||||||
|
|
||||||
|
@ -667,14 +683,14 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
|
||||||
|
|
||||||
flush_gart();
|
flush_gart();
|
||||||
|
|
||||||
printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
|
pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
|
||||||
aper_base, aper_size>>10);
|
aper_base, aper_size>>10);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
nommu:
|
nommu:
|
||||||
/* Should not happen anymore */
|
/* Should not happen anymore */
|
||||||
printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
|
pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
|
||||||
"falling back to iommu=soft.\n");
|
"falling back to iommu=soft.\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -686,14 +702,15 @@ static struct dma_map_ops gart_dma_ops = {
|
||||||
.unmap_page = gart_unmap_page,
|
.unmap_page = gart_unmap_page,
|
||||||
.alloc_coherent = gart_alloc_coherent,
|
.alloc_coherent = gart_alloc_coherent,
|
||||||
.free_coherent = gart_free_coherent,
|
.free_coherent = gart_free_coherent,
|
||||||
|
.mapping_error = gart_mapping_error,
|
||||||
};
|
};
|
||||||
|
|
||||||
void gart_iommu_shutdown(void)
|
static void gart_iommu_shutdown(void)
|
||||||
{
|
{
|
||||||
struct pci_dev *dev;
|
struct pci_dev *dev;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (no_agp && (dma_ops != &gart_dma_ops))
|
if (no_agp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < num_k8_northbridges; i++) {
|
for (i = 0; i < num_k8_northbridges; i++) {
|
||||||
|
@ -708,7 +725,7 @@ void gart_iommu_shutdown(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init gart_iommu_init(void)
|
int __init gart_iommu_init(void)
|
||||||
{
|
{
|
||||||
struct agp_kern_info info;
|
struct agp_kern_info info;
|
||||||
unsigned long iommu_start;
|
unsigned long iommu_start;
|
||||||
|
@ -718,7 +735,7 @@ void __init gart_iommu_init(void)
|
||||||
long i;
|
long i;
|
||||||
|
|
||||||
if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
|
if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
#ifndef CONFIG_AGP_AMD64
|
#ifndef CONFIG_AGP_AMD64
|
||||||
no_agp = 1;
|
no_agp = 1;
|
||||||
|
@ -730,35 +747,28 @@ void __init gart_iommu_init(void)
|
||||||
(agp_copy_info(agp_bridge, &info) < 0);
|
(agp_copy_info(agp_bridge, &info) < 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (swiotlb)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Did we detect a different HW IOMMU? */
|
|
||||||
if (iommu_detected && !gart_iommu_aperture)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (no_iommu ||
|
if (no_iommu ||
|
||||||
(!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
|
(!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
|
||||||
!gart_iommu_aperture ||
|
!gart_iommu_aperture ||
|
||||||
(no_agp && init_k8_gatt(&info) < 0)) {
|
(no_agp && init_k8_gatt(&info) < 0)) {
|
||||||
if (max_pfn > MAX_DMA32_PFN) {
|
if (max_pfn > MAX_DMA32_PFN) {
|
||||||
printk(KERN_WARNING "More than 4GB of memory "
|
pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
|
||||||
"but GART IOMMU not available.\n");
|
pr_warning("falling back to iommu=soft.\n");
|
||||||
printk(KERN_WARNING "falling back to iommu=soft.\n");
|
|
||||||
}
|
}
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* need to map that range */
|
/* need to map that range */
|
||||||
aper_size = info.aper_size << 20;
|
aper_size = info.aper_size << 20;
|
||||||
aper_base = info.aper_base;
|
aper_base = info.aper_base;
|
||||||
end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
|
end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
|
||||||
|
|
||||||
if (end_pfn > max_low_pfn_mapped) {
|
if (end_pfn > max_low_pfn_mapped) {
|
||||||
start_pfn = (aper_base>>PAGE_SHIFT);
|
start_pfn = (aper_base>>PAGE_SHIFT);
|
||||||
init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
|
init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
|
pr_info("PCI-DMA: using GART IOMMU.\n");
|
||||||
iommu_size = check_iommu_size(info.aper_base, aper_size);
|
iommu_size = check_iommu_size(info.aper_base, aper_size);
|
||||||
iommu_pages = iommu_size >> PAGE_SHIFT;
|
iommu_pages = iommu_size >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
@ -773,8 +783,7 @@ void __init gart_iommu_init(void)
|
||||||
|
|
||||||
ret = dma_debug_resize_entries(iommu_pages);
|
ret = dma_debug_resize_entries(iommu_pages);
|
||||||
if (ret)
|
if (ret)
|
||||||
printk(KERN_DEBUG
|
pr_debug("PCI-DMA: Cannot trace all the entries\n");
|
||||||
"PCI-DMA: Cannot trace all the entries\n");
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -784,15 +793,14 @@ void __init gart_iommu_init(void)
|
||||||
*/
|
*/
|
||||||
iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
|
iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
|
||||||
|
|
||||||
agp_memory_reserved = iommu_size;
|
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
|
||||||
printk(KERN_INFO
|
|
||||||
"PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
|
|
||||||
iommu_size >> 20);
|
iommu_size >> 20);
|
||||||
|
|
||||||
iommu_start = aper_size - iommu_size;
|
agp_memory_reserved = iommu_size;
|
||||||
iommu_bus_base = info.aper_base + iommu_start;
|
iommu_start = aper_size - iommu_size;
|
||||||
bad_dma_address = iommu_bus_base;
|
iommu_bus_base = info.aper_base + iommu_start;
|
||||||
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
|
bad_dma_addr = iommu_bus_base;
|
||||||
|
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unmap the IOMMU part of the GART. The alias of the page is
|
* Unmap the IOMMU part of the GART. The alias of the page is
|
||||||
|
@ -814,7 +822,7 @@ void __init gart_iommu_init(void)
|
||||||
* the pages as Not-Present:
|
* the pages as Not-Present:
|
||||||
*/
|
*/
|
||||||
wbinvd();
|
wbinvd();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now all caches are flushed and we can safely enable
|
* Now all caches are flushed and we can safely enable
|
||||||
* GART hardware. Doing it early leaves the possibility
|
* GART hardware. Doing it early leaves the possibility
|
||||||
|
@ -838,6 +846,10 @@ void __init gart_iommu_init(void)
|
||||||
|
|
||||||
flush_gart();
|
flush_gart();
|
||||||
dma_ops = &gart_dma_ops;
|
dma_ops = &gart_dma_ops;
|
||||||
|
x86_platform.iommu_shutdown = gart_iommu_shutdown;
|
||||||
|
swiotlb = 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init gart_parse_options(char *p)
|
void __init gart_parse_options(char *p)
|
||||||
|
@ -856,7 +868,7 @@ void __init gart_parse_options(char *p)
|
||||||
#endif
|
#endif
|
||||||
if (isdigit(*p) && get_option(&p, &arg))
|
if (isdigit(*p) && get_option(&p, &arg))
|
||||||
iommu_size = arg;
|
iommu_size = arg;
|
||||||
if (!strncmp(p, "fullflush", 8))
|
if (!strncmp(p, "fullflush", 9))
|
||||||
iommu_fullflush = 1;
|
iommu_fullflush = 1;
|
||||||
if (!strncmp(p, "nofullflush", 11))
|
if (!strncmp(p, "nofullflush", 11))
|
||||||
iommu_fullflush = 0;
|
iommu_fullflush = 0;
|
||||||
|
|
|
@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
||||||
dma_addr_t bus = page_to_phys(page) + offset;
|
dma_addr_t bus = page_to_phys(page) + offset;
|
||||||
WARN_ON(size == 0);
|
WARN_ON(size == 0);
|
||||||
if (!check_addr("map_single", dev, bus, size))
|
if (!check_addr("map_single", dev, bus, size))
|
||||||
return bad_dma_address;
|
return DMA_ERROR_CODE;
|
||||||
flush_write_buffers();
|
flush_write_buffers();
|
||||||
return bus;
|
return bus;
|
||||||
}
|
}
|
||||||
|
@ -103,12 +103,3 @@ struct dma_map_ops nommu_dma_ops = {
|
||||||
.sync_sg_for_device = nommu_sync_sg_for_device,
|
.sync_sg_for_device = nommu_sync_sg_for_device,
|
||||||
.is_phys = 1,
|
.is_phys = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init no_iommu_init(void)
|
|
||||||
{
|
|
||||||
if (dma_ops)
|
|
||||||
return;
|
|
||||||
|
|
||||||
force_iommu = 0; /* no HW IOMMU */
|
|
||||||
dma_ops = &nommu_dma_ops;
|
|
||||||
}
|
|
||||||
|
|
|
@ -42,18 +42,28 @@ static struct dma_map_ops swiotlb_dma_ops = {
|
||||||
.dma_supported = NULL,
|
.dma_supported = NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init pci_swiotlb_init(void)
|
/*
|
||||||
|
* pci_swiotlb_init - initialize swiotlb if necessary
|
||||||
|
*
|
||||||
|
* This returns non-zero if we are forced to use swiotlb (by the boot
|
||||||
|
* option).
|
||||||
|
*/
|
||||||
|
int __init pci_swiotlb_init(void)
|
||||||
{
|
{
|
||||||
|
int use_swiotlb = swiotlb | swiotlb_force;
|
||||||
|
|
||||||
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
|
if (!no_iommu && max_pfn > MAX_DMA32_PFN)
|
||||||
swiotlb = 1;
|
swiotlb = 1;
|
||||||
#endif
|
#endif
|
||||||
if (swiotlb_force)
|
if (swiotlb_force)
|
||||||
swiotlb = 1;
|
swiotlb = 1;
|
||||||
|
|
||||||
if (swiotlb) {
|
if (swiotlb) {
|
||||||
printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
|
swiotlb_init(0);
|
||||||
swiotlb_init();
|
|
||||||
dma_ops = &swiotlb_dma_ops;
|
dma_ops = &swiotlb_dma_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return use_swiotlb;
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
# include <linux/ctype.h>
|
# include <linux/ctype.h>
|
||||||
# include <linux/mc146818rtc.h>
|
# include <linux/mc146818rtc.h>
|
||||||
#else
|
#else
|
||||||
# include <asm/iommu.h>
|
# include <asm/x86_init.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -622,7 +622,7 @@ void native_machine_shutdown(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
pci_iommu_shutdown();
|
x86_platform.iommu_shutdown();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,10 +14,13 @@
|
||||||
#include <asm/time.h>
|
#include <asm/time.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/tsc.h>
|
#include <asm/tsc.h>
|
||||||
|
#include <asm/iommu.h>
|
||||||
|
|
||||||
void __cpuinit x86_init_noop(void) { }
|
void __cpuinit x86_init_noop(void) { }
|
||||||
void __init x86_init_uint_noop(unsigned int unused) { }
|
void __init x86_init_uint_noop(unsigned int unused) { }
|
||||||
void __init x86_init_pgd_noop(pgd_t *unused) { }
|
void __init x86_init_pgd_noop(pgd_t *unused) { }
|
||||||
|
int __init iommu_init_noop(void) { return 0; }
|
||||||
|
void iommu_shutdown_noop(void) { }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The platform setup functions are preset with the default functions
|
* The platform setup functions are preset with the default functions
|
||||||
|
@ -62,6 +65,10 @@ struct x86_init_ops x86_init __initdata = {
|
||||||
.tsc_pre_init = x86_init_noop,
|
.tsc_pre_init = x86_init_noop,
|
||||||
.timer_init = hpet_time_init,
|
.timer_init = hpet_time_init,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
.iommu = {
|
||||||
|
.iommu_init = iommu_init_noop,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
|
struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
|
||||||
|
@ -72,4 +79,5 @@ struct x86_platform_ops x86_platform = {
|
||||||
.calibrate_tsc = native_calibrate_tsc,
|
.calibrate_tsc = native_calibrate_tsc,
|
||||||
.get_wallclock = mach_get_cmos_time,
|
.get_wallclock = mach_get_cmos_time,
|
||||||
.set_wallclock = mach_set_rtc_mmss,
|
.set_wallclock = mach_set_rtc_mmss,
|
||||||
|
.iommu_shutdown = iommu_shutdown_noop,
|
||||||
};
|
};
|
||||||
|
|
|
@ -56,9 +56,8 @@ config AGP_AMD
|
||||||
X on AMD Irongate, 761, and 762 chipsets.
|
X on AMD Irongate, 761, and 762 chipsets.
|
||||||
|
|
||||||
config AGP_AMD64
|
config AGP_AMD64
|
||||||
tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU
|
tristate "AMD Opteron/Athlon64 on-CPU GART support"
|
||||||
depends on AGP && X86
|
depends on AGP && X86
|
||||||
default y if GART_IOMMU
|
|
||||||
help
|
help
|
||||||
This option gives you AGP support for the GLX component of
|
This option gives you AGP support for the GLX component of
|
||||||
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
|
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
|
||||||
|
|
|
@ -645,9 +645,12 @@ void __init detect_intel_iommu(void)
|
||||||
"x2apic and Intr-remapping.\n");
|
"x2apic and Intr-remapping.\n");
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_DMAR
|
#ifdef CONFIG_DMAR
|
||||||
if (ret && !no_iommu && !iommu_detected && !swiotlb &&
|
if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
|
||||||
!dmar_disabled)
|
|
||||||
iommu_detected = 1;
|
iommu_detected = 1;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
if (ret)
|
||||||
|
x86_init.iommu.iommu_init = intel_iommu_init;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
|
early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
|
||||||
|
|
|
@ -3266,7 +3266,7 @@ int __init intel_iommu_init(void)
|
||||||
* Check the need for DMA-remapping initialization now.
|
* Check the need for DMA-remapping initialization now.
|
||||||
* Above initialization will also be used by Interrupt-remapping.
|
* Above initialization will also be used by Interrupt-remapping.
|
||||||
*/
|
*/
|
||||||
if (no_iommu || swiotlb || dmar_disabled)
|
if (no_iommu || dmar_disabled)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
iommu_init_mempool();
|
iommu_init_mempool();
|
||||||
|
@ -3287,7 +3287,9 @@ int __init intel_iommu_init(void)
|
||||||
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
|
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
|
||||||
|
|
||||||
init_timer(&unmap_timer);
|
init_timer(&unmap_timer);
|
||||||
force_iommu = 1;
|
#ifdef CONFIG_SWIOTLB
|
||||||
|
swiotlb = 0;
|
||||||
|
#endif
|
||||||
dma_ops = &intel_dma_ops;
|
dma_ops = &intel_dma_ops;
|
||||||
|
|
||||||
init_iommu_sysfs();
|
init_iommu_sysfs();
|
||||||
|
|
|
@ -53,6 +53,7 @@ extern void free_bootmem_node(pg_data_t *pgdat,
|
||||||
unsigned long addr,
|
unsigned long addr,
|
||||||
unsigned long size);
|
unsigned long size);
|
||||||
extern void free_bootmem(unsigned long addr, unsigned long size);
|
extern void free_bootmem(unsigned long addr, unsigned long size);
|
||||||
|
extern void free_bootmem_late(unsigned long addr, unsigned long size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
|
* Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
|
||||||
|
|
|
@ -208,16 +208,9 @@ struct dmar_atsr_unit {
|
||||||
u8 include_all:1; /* include all ports */
|
u8 include_all:1; /* include all ports */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Intel DMAR initialization functions */
|
|
||||||
extern int intel_iommu_init(void);
|
extern int intel_iommu_init(void);
|
||||||
#else
|
#else /* !CONFIG_DMAR: */
|
||||||
static inline int intel_iommu_init(void)
|
static inline int intel_iommu_init(void) { return -ENODEV; }
|
||||||
{
|
#endif /* CONFIG_DMAR */
|
||||||
#ifdef CONFIG_INTR_REMAP
|
|
||||||
return dmar_dev_scope_init();
|
|
||||||
#else
|
|
||||||
return -ENODEV;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
#endif /* !CONFIG_DMAR */
|
|
||||||
#endif /* __DMAR_H__ */
|
#endif /* __DMAR_H__ */
|
||||||
|
|
|
@ -7,6 +7,8 @@ struct device;
|
||||||
struct dma_attrs;
|
struct dma_attrs;
|
||||||
struct scatterlist;
|
struct scatterlist;
|
||||||
|
|
||||||
|
extern int swiotlb_force;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum allowable number of contiguous slabs to map,
|
* Maximum allowable number of contiguous slabs to map,
|
||||||
* must be a power of 2. What is the appropriate value ?
|
* must be a power of 2. What is the appropriate value ?
|
||||||
|
@ -20,8 +22,7 @@ struct scatterlist;
|
||||||
*/
|
*/
|
||||||
#define IO_TLB_SHIFT 11
|
#define IO_TLB_SHIFT 11
|
||||||
|
|
||||||
extern void
|
extern void swiotlb_init(int verbose);
|
||||||
swiotlb_init(void);
|
|
||||||
|
|
||||||
extern void
|
extern void
|
||||||
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||||
|
@ -88,4 +89,11 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
|
||||||
extern int
|
extern int
|
||||||
swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SWIOTLB
|
||||||
|
extern void __init swiotlb_free(void);
|
||||||
|
#else
|
||||||
|
static inline void swiotlb_free(void) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern void swiotlb_print_info(void);
|
||||||
#endif /* __LINUX_SWIOTLB_H */
|
#endif /* __LINUX_SWIOTLB_H */
|
||||||
|
|
|
@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr;
|
||||||
*/
|
*/
|
||||||
static DEFINE_SPINLOCK(io_tlb_lock);
|
static DEFINE_SPINLOCK(io_tlb_lock);
|
||||||
|
|
||||||
|
static int late_alloc;
|
||||||
|
|
||||||
static int __init
|
static int __init
|
||||||
setup_io_tlb_npages(char *str)
|
setup_io_tlb_npages(char *str)
|
||||||
{
|
{
|
||||||
|
@ -109,6 +111,7 @@ setup_io_tlb_npages(char *str)
|
||||||
++str;
|
++str;
|
||||||
if (!strcmp(str, "force"))
|
if (!strcmp(str, "force"))
|
||||||
swiotlb_force = 1;
|
swiotlb_force = 1;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("swiotlb=", setup_io_tlb_npages);
|
__setup("swiotlb=", setup_io_tlb_npages);
|
||||||
|
@ -121,8 +124,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
|
||||||
return phys_to_dma(hwdev, virt_to_phys(address));
|
return phys_to_dma(hwdev, virt_to_phys(address));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void swiotlb_print_info(unsigned long bytes)
|
void swiotlb_print_info(void)
|
||||||
{
|
{
|
||||||
|
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||||
phys_addr_t pstart, pend;
|
phys_addr_t pstart, pend;
|
||||||
|
|
||||||
pstart = virt_to_phys(io_tlb_start);
|
pstart = virt_to_phys(io_tlb_start);
|
||||||
|
@ -140,7 +144,7 @@ static void swiotlb_print_info(unsigned long bytes)
|
||||||
* structures for the software IO TLB used to implement the DMA API.
|
* structures for the software IO TLB used to implement the DMA API.
|
||||||
*/
|
*/
|
||||||
void __init
|
void __init
|
||||||
swiotlb_init_with_default_size(size_t default_size)
|
swiotlb_init_with_default_size(size_t default_size, int verbose)
|
||||||
{
|
{
|
||||||
unsigned long i, bytes;
|
unsigned long i, bytes;
|
||||||
|
|
||||||
|
@ -176,14 +180,14 @@ swiotlb_init_with_default_size(size_t default_size)
|
||||||
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
|
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
|
||||||
if (!io_tlb_overflow_buffer)
|
if (!io_tlb_overflow_buffer)
|
||||||
panic("Cannot allocate SWIOTLB overflow buffer!\n");
|
panic("Cannot allocate SWIOTLB overflow buffer!\n");
|
||||||
|
if (verbose)
|
||||||
swiotlb_print_info(bytes);
|
swiotlb_print_info();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init
|
void __init
|
||||||
swiotlb_init(void)
|
swiotlb_init(int verbose)
|
||||||
{
|
{
|
||||||
swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
|
swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -260,7 +264,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
|
||||||
if (!io_tlb_overflow_buffer)
|
if (!io_tlb_overflow_buffer)
|
||||||
goto cleanup4;
|
goto cleanup4;
|
||||||
|
|
||||||
swiotlb_print_info(bytes);
|
swiotlb_print_info();
|
||||||
|
|
||||||
|
late_alloc = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -281,6 +287,32 @@ cleanup1:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __init swiotlb_free(void)
|
||||||
|
{
|
||||||
|
if (!io_tlb_overflow_buffer)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (late_alloc) {
|
||||||
|
free_pages((unsigned long)io_tlb_overflow_buffer,
|
||||||
|
get_order(io_tlb_overflow));
|
||||||
|
free_pages((unsigned long)io_tlb_orig_addr,
|
||||||
|
get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
|
||||||
|
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
|
||||||
|
sizeof(int)));
|
||||||
|
free_pages((unsigned long)io_tlb_start,
|
||||||
|
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||||
|
} else {
|
||||||
|
free_bootmem_late(__pa(io_tlb_overflow_buffer),
|
||||||
|
io_tlb_overflow);
|
||||||
|
free_bootmem_late(__pa(io_tlb_orig_addr),
|
||||||
|
io_tlb_nslabs * sizeof(phys_addr_t));
|
||||||
|
free_bootmem_late(__pa(io_tlb_list),
|
||||||
|
io_tlb_nslabs * sizeof(int));
|
||||||
|
free_bootmem_late(__pa(io_tlb_start),
|
||||||
|
io_tlb_nslabs << IO_TLB_SHIFT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int is_swiotlb_buffer(phys_addr_t paddr)
|
static int is_swiotlb_buffer(phys_addr_t paddr)
|
||||||
{
|
{
|
||||||
return paddr >= virt_to_phys(io_tlb_start) &&
|
return paddr >= virt_to_phys(io_tlb_start) &&
|
||||||
|
|
24
mm/bootmem.c
24
mm/bootmem.c
|
@ -143,6 +143,30 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
|
||||||
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
|
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* free_bootmem_late - free bootmem pages directly to page allocator
|
||||||
|
* @addr: starting address of the range
|
||||||
|
* @size: size of the range in bytes
|
||||||
|
*
|
||||||
|
* This is only useful when the bootmem allocator has already been torn
|
||||||
|
* down, but we are still initializing the system. Pages are given directly
|
||||||
|
* to the page allocator, no bootmem metadata is updated because it is gone.
|
||||||
|
*/
|
||||||
|
void __init free_bootmem_late(unsigned long addr, unsigned long size)
|
||||||
|
{
|
||||||
|
unsigned long cursor, end;
|
||||||
|
|
||||||
|
kmemleak_free_part(__va(addr), size);
|
||||||
|
|
||||||
|
cursor = PFN_UP(addr);
|
||||||
|
end = PFN_DOWN(addr + size);
|
||||||
|
|
||||||
|
for (; cursor < end; cursor++) {
|
||||||
|
__free_pages_bootmem(pfn_to_page(cursor), 0);
|
||||||
|
totalram_pages++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
|
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
|
||||||
{
|
{
|
||||||
int aligned;
|
int aligned;
|
||||||
|
|
Loading…
Add table
Reference in a new issue