iommu: Sync files for kernel upgrade
Reset the following files:
arm-smmu.c
iommu.c
include/linux/iommu.h
include/trace/events/iommu.h
to the git merge base between 3.18 and 4.4:
b2776bf714
Change-Id: I9aa58e08125e3a72d1e9742e26d4a6fab34b0ed5
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
This commit is contained in:
parent
0ff886d8f4
commit
86a828d820
4 changed files with 794 additions and 1126 deletions
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
|
||||
* Author: Joerg Roedel <jroedel@suse.de>
|
||||
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
|
@ -16,7 +16,7 @@
|
|||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "iommu: " fmt
|
||||
#define pr_fmt(fmt) "%s: " fmt, __func__
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -51,8 +51,6 @@ struct iommu_group {
|
|||
void (*iommu_data_release)(void *iommu_data);
|
||||
char *name;
|
||||
int id;
|
||||
struct iommu_domain *default_domain;
|
||||
struct iommu_domain *domain;
|
||||
};
|
||||
|
||||
struct iommu_device {
|
||||
|
@ -77,15 +75,6 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
|
|||
#define to_iommu_group(_kobj) \
|
||||
container_of(_kobj, struct iommu_group, kobj)
|
||||
|
||||
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
||||
unsigned type);
|
||||
static int __iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
static int __iommu_attach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group);
|
||||
static void __iommu_detach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group);
|
||||
|
||||
static ssize_t iommu_group_attr_show(struct kobject *kobj,
|
||||
struct attribute *__attr, char *buf)
|
||||
{
|
||||
|
@ -139,8 +128,6 @@ static void iommu_group_release(struct kobject *kobj)
|
|||
{
|
||||
struct iommu_group *group = to_iommu_group(kobj);
|
||||
|
||||
pr_debug("Releasing group %d\n", group->id);
|
||||
|
||||
if (group->iommu_data_release)
|
||||
group->iommu_data_release(group->iommu_data);
|
||||
|
||||
|
@ -148,9 +135,6 @@ static void iommu_group_release(struct kobject *kobj)
|
|||
ida_remove(&iommu_group_ida, group->id);
|
||||
mutex_unlock(&iommu_group_mutex);
|
||||
|
||||
if (group->default_domain)
|
||||
iommu_domain_free(group->default_domain);
|
||||
|
||||
kfree(group->name);
|
||||
kfree(group);
|
||||
}
|
||||
|
@ -223,8 +207,6 @@ again:
|
|||
*/
|
||||
kobject_put(&group->kobj);
|
||||
|
||||
pr_debug("Allocated group %d\n", group->id);
|
||||
|
||||
return group;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_alloc);
|
||||
|
@ -325,52 +307,6 @@ int iommu_group_set_name(struct iommu_group *group, const char *name)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_set_name);
|
||||
|
||||
static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
||||
struct device *dev)
|
||||
{
|
||||
struct iommu_domain *domain = group->default_domain;
|
||||
struct iommu_dm_region *entry;
|
||||
struct list_head mappings;
|
||||
unsigned long pg_size;
|
||||
int ret = 0;
|
||||
|
||||
if (!domain || domain->type != IOMMU_DOMAIN_DMA)
|
||||
return 0;
|
||||
|
||||
BUG_ON(!domain->ops->pgsize_bitmap);
|
||||
|
||||
pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap);
|
||||
INIT_LIST_HEAD(&mappings);
|
||||
|
||||
iommu_get_dm_regions(dev, &mappings);
|
||||
|
||||
/* We need to consider overlapping regions for different devices */
|
||||
list_for_each_entry(entry, &mappings, list) {
|
||||
dma_addr_t start, end, addr;
|
||||
|
||||
start = ALIGN(entry->start, pg_size);
|
||||
end = ALIGN(entry->start + entry->length, pg_size);
|
||||
|
||||
for (addr = start; addr < end; addr += pg_size) {
|
||||
phys_addr_t phys_addr;
|
||||
|
||||
phys_addr = iommu_iova_to_phys(domain, addr);
|
||||
if (phys_addr)
|
||||
continue;
|
||||
|
||||
ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
out:
|
||||
iommu_put_dm_regions(dev, &mappings);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_group_add_device - add a device to an iommu group
|
||||
* @group: the group into which to add the device (reference should be held)
|
||||
|
@ -427,12 +363,8 @@ rename:
|
|||
|
||||
dev->iommu_group = group;
|
||||
|
||||
iommu_group_create_direct_mappings(group, dev);
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
list_add_tail(&device->list, &group->devices);
|
||||
if (group->domain)
|
||||
__iommu_attach_device(group->domain, dev);
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
/* Notify any listeners about change to group. */
|
||||
|
@ -440,9 +372,6 @@ rename:
|
|||
IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
|
||||
|
||||
trace_add_device_to_group(group->id, dev);
|
||||
|
||||
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_add_device);
|
||||
|
@ -459,8 +388,6 @@ void iommu_group_remove_device(struct device *dev)
|
|||
struct iommu_group *group = dev->iommu_group;
|
||||
struct iommu_device *tmp_device, *device = NULL;
|
||||
|
||||
pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
|
||||
|
||||
/* Pre-notify listeners that a device is being removed. */
|
||||
blocking_notifier_call_chain(&group->notifier,
|
||||
IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
|
||||
|
@ -490,17 +417,6 @@ void iommu_group_remove_device(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
|
||||
|
||||
static int iommu_group_device_count(struct iommu_group *group)
|
||||
{
|
||||
struct iommu_device *entry;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(entry, &group->devices, list)
|
||||
ret++;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_group_for_each_dev - iterate over each device in the group
|
||||
* @group: the group
|
||||
|
@ -512,30 +428,19 @@ static int iommu_group_device_count(struct iommu_group *group)
|
|||
* The group->mutex is held across callbacks, which will block calls to
|
||||
* iommu_group_add/remove_device.
|
||||
*/
|
||||
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
||||
int (*fn)(struct device *, void *))
|
||||
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
||||
int (*fn)(struct device *, void *))
|
||||
{
|
||||
struct iommu_device *device;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
list_for_each_entry(device, &group->devices, list) {
|
||||
ret = fn(device->dev, data);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
||||
int (*fn)(struct device *, void *))
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
ret = __iommu_group_for_each_dev(group, data, fn);
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
|
||||
|
@ -727,36 +632,17 @@ static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
|
|||
return data->group != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic device_group call-back function. It just allocates one
|
||||
* iommu-group per device.
|
||||
*/
|
||||
struct iommu_group *generic_device_group(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
|
||||
group = iommu_group_alloc();
|
||||
if (IS_ERR(group))
|
||||
return NULL;
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use standard PCI bus topology, isolation features, and DMA alias quirks
|
||||
* to find or create an IOMMU group for a device.
|
||||
*/
|
||||
struct iommu_group *pci_device_group(struct device *dev)
|
||||
static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct group_for_pci_data data;
|
||||
struct pci_bus *bus;
|
||||
struct iommu_group *group = NULL;
|
||||
u64 devfns[4] = { 0 };
|
||||
|
||||
if (WARN_ON(!dev_is_pci(dev)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/*
|
||||
* Find the upstream DMA alias for the device. A device must not
|
||||
* be aliased due to topology in order to have its own IOMMU group.
|
||||
|
@ -806,11 +692,7 @@ struct iommu_group *pci_device_group(struct device *dev)
|
|||
return group;
|
||||
|
||||
/* No shared group found, allocate new */
|
||||
group = iommu_group_alloc();
|
||||
if (IS_ERR(group))
|
||||
return NULL;
|
||||
|
||||
return group;
|
||||
return iommu_group_alloc();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -825,7 +707,6 @@ struct iommu_group *pci_device_group(struct device *dev)
|
|||
*/
|
||||
struct iommu_group *iommu_group_get_for_dev(struct device *dev)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
|
@ -833,24 +714,14 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
|
|||
if (group)
|
||||
return group;
|
||||
|
||||
group = ERR_PTR(-EINVAL);
|
||||
if (!dev_is_pci(dev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (ops && ops->device_group)
|
||||
group = ops->device_group(dev);
|
||||
group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
|
||||
|
||||
if (IS_ERR(group))
|
||||
return group;
|
||||
|
||||
/*
|
||||
* Try to allocate a default domain - needs support from the
|
||||
* IOMMU driver.
|
||||
*/
|
||||
if (!group->default_domain) {
|
||||
group->default_domain = __iommu_domain_alloc(dev->bus,
|
||||
IOMMU_DOMAIN_DMA);
|
||||
group->domain = group->default_domain;
|
||||
}
|
||||
|
||||
ret = iommu_group_add_device(group, dev);
|
||||
if (ret) {
|
||||
iommu_group_put(group);
|
||||
|
@ -860,42 +731,17 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
|
|||
return group;
|
||||
}
|
||||
|
||||
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
|
||||
{
|
||||
return group->default_domain;
|
||||
}
|
||||
|
||||
static int add_iommu_group(struct device *dev, void *data)
|
||||
{
|
||||
struct iommu_callback_data *cb = data;
|
||||
const struct iommu_ops *ops = cb->ops;
|
||||
int ret;
|
||||
|
||||
if (!ops->add_device)
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
|
||||
WARN_ON(dev->iommu_group);
|
||||
|
||||
ret = ops->add_device(dev);
|
||||
|
||||
/*
|
||||
* We ignore -ENODEV errors for now, as they just mean that the
|
||||
* device is not translated by an IOMMU. We still care about
|
||||
* other errors and fail to initialize when they happen.
|
||||
*/
|
||||
if (ret == -ENODEV)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int remove_iommu_group(struct device *dev, void *data)
|
||||
{
|
||||
struct iommu_callback_data *cb = data;
|
||||
const struct iommu_ops *ops = cb->ops;
|
||||
|
||||
if (ops->remove_device && dev->iommu_group)
|
||||
ops->remove_device(dev);
|
||||
ops->add_device(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -915,7 +761,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
|
|||
if (action == BUS_NOTIFY_ADD_DEVICE) {
|
||||
if (ops->add_device)
|
||||
return ops->add_device(dev);
|
||||
} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
|
||||
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
|
||||
if (ops->remove_device && dev->iommu_group) {
|
||||
ops->remove_device(dev);
|
||||
return 0;
|
||||
|
@ -968,25 +814,11 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
|
|||
nb->notifier_call = iommu_bus_notifier;
|
||||
|
||||
err = bus_register_notifier(bus, nb);
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
/* Clean up */
|
||||
bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
|
||||
bus_unregister_notifier(bus, nb);
|
||||
|
||||
out_free:
|
||||
kfree(nb);
|
||||
|
||||
return err;
|
||||
if (err) {
|
||||
kfree(nb);
|
||||
return err;
|
||||
}
|
||||
return bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1004,19 +836,13 @@ out_free:
|
|||
*/
|
||||
int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (bus->iommu_ops != NULL)
|
||||
return -EBUSY;
|
||||
|
||||
bus->iommu_ops = ops;
|
||||
|
||||
/* Do IOMMU specific setup for this bus-type */
|
||||
err = iommu_bus_init(bus, ops);
|
||||
if (err)
|
||||
bus->iommu_ops = NULL;
|
||||
|
||||
return err;
|
||||
return iommu_bus_init(bus, ops);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bus_set_iommu);
|
||||
|
||||
|
@ -1058,38 +884,43 @@ void iommu_set_fault_handler(struct iommu_domain *domain,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
|
||||
|
||||
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
||||
unsigned type)
|
||||
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
int ret;
|
||||
|
||||
if (bus == NULL || bus->iommu_ops == NULL)
|
||||
return NULL;
|
||||
|
||||
domain = bus->iommu_ops->domain_alloc(type);
|
||||
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
domain->ops = bus->iommu_ops;
|
||||
domain->type = type;
|
||||
domain->ops = bus->iommu_ops;
|
||||
|
||||
ret = domain->ops->domain_init(domain);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
||||
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
|
||||
{
|
||||
return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
|
||||
out_free:
|
||||
kfree(domain);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
|
||||
|
||||
void iommu_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
domain->ops->domain_free(domain);
|
||||
if (likely(domain->ops->domain_destroy != NULL))
|
||||
domain->ops->domain_destroy(domain);
|
||||
|
||||
kfree(domain);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_domain_free);
|
||||
|
||||
static int __iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
if (unlikely(domain->ops->attach_dev == NULL))
|
||||
|
@ -1100,38 +931,9 @@ static int __iommu_attach_device(struct iommu_domain *domain,
|
|||
trace_attach_device_to_domain(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
/* FIXME: Remove this when groups a mandatory for iommu drivers */
|
||||
if (group == NULL)
|
||||
return __iommu_attach_device(domain, dev);
|
||||
|
||||
/*
|
||||
* We have a group - lock it to make sure the device-count doesn't
|
||||
* change while we are attaching
|
||||
*/
|
||||
mutex_lock(&group->mutex);
|
||||
ret = -EINVAL;
|
||||
if (iommu_group_device_count(group) != 1)
|
||||
goto out_unlock;
|
||||
|
||||
ret = __iommu_attach_group(domain, group);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_attach_device);
|
||||
|
||||
static void __iommu_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
if (unlikely(domain->ops->detach_dev == NULL))
|
||||
return;
|
||||
|
@ -1139,48 +941,8 @@ static void __iommu_detach_device(struct iommu_domain *domain,
|
|||
domain->ops->detach_dev(domain, dev);
|
||||
trace_detach_device_from_domain(dev);
|
||||
}
|
||||
|
||||
void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
/* FIXME: Remove this when groups a mandatory for iommu drivers */
|
||||
if (group == NULL)
|
||||
return __iommu_detach_device(domain, dev);
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
if (iommu_group_device_count(group) != 1) {
|
||||
WARN_ON(1);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
__iommu_detach_group(domain, group);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_detach_device);
|
||||
|
||||
struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
struct iommu_group *group;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
/* FIXME: Remove this when groups a mandatory for iommu drivers */
|
||||
if (group == NULL)
|
||||
return NULL;
|
||||
|
||||
domain = group->domain;
|
||||
|
||||
iommu_group_put(group);
|
||||
|
||||
return domain;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
|
||||
|
||||
/*
|
||||
* IOMMU groups are really the natrual working unit of the IOMMU, but
|
||||
* the IOMMU API works on domains and devices. Bridge that gap by
|
||||
|
@ -1195,34 +957,13 @@ static int iommu_group_do_attach_device(struct device *dev, void *data)
|
|||
{
|
||||
struct iommu_domain *domain = data;
|
||||
|
||||
return __iommu_attach_device(domain, dev);
|
||||
}
|
||||
|
||||
static int __iommu_attach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (group->default_domain && group->domain != group->default_domain)
|
||||
return -EBUSY;
|
||||
|
||||
ret = __iommu_group_for_each_dev(group, domain,
|
||||
iommu_group_do_attach_device);
|
||||
if (ret == 0)
|
||||
group->domain = domain;
|
||||
|
||||
return ret;
|
||||
return iommu_attach_device(domain, dev);
|
||||
}
|
||||
|
||||
int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
ret = __iommu_attach_group(domain, group);
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
return ret;
|
||||
return iommu_group_for_each_dev(group, domain,
|
||||
iommu_group_do_attach_device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_attach_group);
|
||||
|
||||
|
@ -1230,40 +971,14 @@ static int iommu_group_do_detach_device(struct device *dev, void *data)
|
|||
{
|
||||
struct iommu_domain *domain = data;
|
||||
|
||||
__iommu_detach_device(domain, dev);
|
||||
iommu_detach_device(domain, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __iommu_detach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!group->default_domain) {
|
||||
__iommu_group_for_each_dev(group, domain,
|
||||
iommu_group_do_detach_device);
|
||||
group->domain = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (group->domain == group->default_domain)
|
||||
return;
|
||||
|
||||
/* Detach by re-attaching to the default domain */
|
||||
ret = __iommu_group_for_each_dev(group, group->default_domain,
|
||||
iommu_group_do_attach_device);
|
||||
if (ret != 0)
|
||||
WARN_ON(1);
|
||||
else
|
||||
group->domain = group->default_domain;
|
||||
}
|
||||
|
||||
void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
|
||||
{
|
||||
mutex_lock(&group->mutex);
|
||||
__iommu_detach_group(domain, group);
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_detach_group);
|
||||
|
||||
|
@ -1320,9 +1035,6 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
domain->ops->pgsize_bitmap == 0UL))
|
||||
return -ENODEV;
|
||||
|
||||
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||
return -EINVAL;
|
||||
|
||||
/* find out the minimum page size supported */
|
||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
|
||||
|
@ -1358,7 +1070,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
if (ret)
|
||||
iommu_unmap(domain, orig_iova, orig_size - size);
|
||||
else
|
||||
trace_map(orig_iova, paddr, orig_size);
|
||||
trace_map(iova, paddr, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1368,15 +1080,11 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
|||
{
|
||||
size_t unmapped_page, unmapped = 0;
|
||||
unsigned int min_pagesz;
|
||||
unsigned long orig_iova = iova;
|
||||
|
||||
if (unlikely(domain->ops->unmap == NULL ||
|
||||
domain->ops->pgsize_bitmap == 0UL))
|
||||
return -ENODEV;
|
||||
|
||||
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||
return -EINVAL;
|
||||
|
||||
/* find out the minimum page size supported */
|
||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
|
||||
|
@ -1411,53 +1119,11 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
|||
unmapped += unmapped_page;
|
||||
}
|
||||
|
||||
trace_unmap(orig_iova, size, unmapped);
|
||||
trace_unmap(iova, 0, size);
|
||||
return unmapped;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_unmap);
|
||||
|
||||
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
size_t mapped = 0;
|
||||
unsigned int i, min_pagesz;
|
||||
int ret;
|
||||
|
||||
if (unlikely(domain->ops->pgsize_bitmap == 0UL))
|
||||
return 0;
|
||||
|
||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
||||
|
||||
/*
|
||||
* We are mapping on IOMMU page boundaries, so offset within
|
||||
* the page must be 0. However, the IOMMU may support pages
|
||||
* smaller than PAGE_SIZE, so s->offset may still represent
|
||||
* an offset of that boundary within the CPU page.
|
||||
*/
|
||||
if (!IS_ALIGNED(s->offset, min_pagesz))
|
||||
goto out_err;
|
||||
|
||||
ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
mapped += s->length;
|
||||
}
|
||||
|
||||
return mapped;
|
||||
|
||||
out_err:
|
||||
/* undo mappings already done */
|
||||
iommu_unmap(domain, iova, mapped);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(default_iommu_map_sg);
|
||||
|
||||
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||
phys_addr_t paddr, u64 size, int prot)
|
||||
|
@ -1490,7 +1156,7 @@ static int __init iommu_init(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(iommu_init);
|
||||
arch_initcall(iommu_init);
|
||||
|
||||
int iommu_domain_get_attr(struct iommu_domain *domain,
|
||||
enum iommu_attr attr, void *data)
|
||||
|
@ -1556,72 +1222,3 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
|
|||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
|
||||
|
||||
void iommu_get_dm_regions(struct device *dev, struct list_head *list)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (ops && ops->get_dm_regions)
|
||||
ops->get_dm_regions(dev, list);
|
||||
}
|
||||
|
||||
void iommu_put_dm_regions(struct device *dev, struct list_head *list)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (ops && ops->put_dm_regions)
|
||||
ops->put_dm_regions(dev, list);
|
||||
}
|
||||
|
||||
/* Request that a device is direct mapped by the IOMMU */
|
||||
int iommu_request_dm_for_dev(struct device *dev)
|
||||
{
|
||||
struct iommu_domain *dm_domain;
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
/* Device must already be in a group before calling this function */
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
|
||||
/* Check if the default domain is already direct mapped */
|
||||
ret = 0;
|
||||
if (group->default_domain &&
|
||||
group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
|
||||
goto out;
|
||||
|
||||
/* Don't change mappings of existing devices */
|
||||
ret = -EBUSY;
|
||||
if (iommu_group_device_count(group) != 1)
|
||||
goto out;
|
||||
|
||||
/* Allocate a direct mapped domain */
|
||||
ret = -ENOMEM;
|
||||
dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
|
||||
if (!dm_domain)
|
||||
goto out;
|
||||
|
||||
/* Attach the device to the domain */
|
||||
ret = __iommu_attach_group(dm_domain, group);
|
||||
if (ret) {
|
||||
iommu_domain_free(dm_domain);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Make the direct mapped domain the default for this group */
|
||||
if (group->default_domain)
|
||||
iommu_domain_free(group->default_domain);
|
||||
group->default_domain = dm_domain;
|
||||
|
||||
pr_info("Using direct mapping for device %s\n", dev_name(dev));
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -21,15 +21,13 @@
|
|||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <trace/events/iommu.h>
|
||||
|
||||
#define IOMMU_READ (1 << 0)
|
||||
#define IOMMU_WRITE (1 << 1)
|
||||
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
|
||||
#define IOMMU_NOEXEC (1 << 3)
|
||||
#define IOMMU_EXEC (1 << 3)
|
||||
|
||||
struct iommu_ops;
|
||||
struct iommu_group;
|
||||
|
@ -51,44 +49,18 @@ struct iommu_domain_geometry {
|
|||
bool force_aperture; /* DMA only allowed in mappable range? */
|
||||
};
|
||||
|
||||
/* Domain feature flags */
|
||||
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
|
||||
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
|
||||
implementation */
|
||||
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
|
||||
|
||||
/*
|
||||
* This are the possible domain-types
|
||||
*
|
||||
* IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
|
||||
* devices
|
||||
* IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
|
||||
* IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
|
||||
* for VMs
|
||||
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
|
||||
* This flag allows IOMMU drivers to implement
|
||||
* certain optimizations for these domains
|
||||
*/
|
||||
#define IOMMU_DOMAIN_BLOCKED (0U)
|
||||
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
|
||||
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
|
||||
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
|
||||
__IOMMU_DOMAIN_DMA_API)
|
||||
|
||||
struct iommu_domain {
|
||||
unsigned type;
|
||||
const struct iommu_ops *ops;
|
||||
void *priv;
|
||||
iommu_fault_handler_t handler;
|
||||
void *handler_token;
|
||||
struct iommu_domain_geometry geometry;
|
||||
void *iova_cookie;
|
||||
};
|
||||
|
||||
enum iommu_cap {
|
||||
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
|
||||
transactions */
|
||||
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
|
||||
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -115,20 +87,6 @@ enum iommu_attr {
|
|||
DOMAIN_ATTR_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_dm_region - descriptor for a direct mapped memory region
|
||||
* @list: Linked list pointers
|
||||
* @start: System physical start address of the region
|
||||
* @length: Length of the region in bytes
|
||||
* @prot: IOMMU Protection flags (READ/WRITE/...)
|
||||
*/
|
||||
struct iommu_dm_region {
|
||||
struct list_head list;
|
||||
phys_addr_t start;
|
||||
size_t length;
|
||||
int prot;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
|
||||
/**
|
||||
|
@ -139,45 +97,32 @@ struct iommu_dm_region {
|
|||
* @detach_dev: detach device from an iommu domain
|
||||
* @map: map a physically contiguous memory region to an iommu domain
|
||||
* @unmap: unmap a physically contiguous memory region from an iommu domain
|
||||
* @map_sg: map a scatter-gather list of physically contiguous memory chunks
|
||||
* to an iommu domain
|
||||
* @iova_to_phys: translate iova to physical address
|
||||
* @add_device: add device to iommu grouping
|
||||
* @remove_device: remove device from iommu grouping
|
||||
* @domain_get_attr: Query domain attributes
|
||||
* @domain_set_attr: Change domain attributes
|
||||
* @of_xlate: add OF master IDs to iommu grouping
|
||||
* @pgsize_bitmap: bitmap of supported page sizes
|
||||
* @priv: per-instance data private to the iommu driver
|
||||
*/
|
||||
struct iommu_ops {
|
||||
bool (*capable)(enum iommu_cap);
|
||||
|
||||
/* Domain allocation and freeing by the iommu driver */
|
||||
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
|
||||
void (*domain_free)(struct iommu_domain *);
|
||||
|
||||
int (*domain_init)(struct iommu_domain *domain);
|
||||
void (*domain_destroy)(struct iommu_domain *domain);
|
||||
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
int (*map)(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot);
|
||||
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot);
|
||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
|
||||
int (*add_device)(struct device *dev);
|
||||
void (*remove_device)(struct device *dev);
|
||||
struct iommu_group *(*device_group)(struct device *dev);
|
||||
int (*device_group)(struct device *dev, unsigned int *groupid);
|
||||
int (*domain_get_attr)(struct iommu_domain *domain,
|
||||
enum iommu_attr attr, void *data);
|
||||
int (*domain_set_attr)(struct iommu_domain *domain,
|
||||
enum iommu_attr attr, void *data);
|
||||
|
||||
/* Request/Free a list of direct mapping requirements for a device */
|
||||
void (*get_dm_regions)(struct device *dev, struct list_head *list);
|
||||
void (*put_dm_regions)(struct device *dev, struct list_head *list);
|
||||
|
||||
/* Window handling functions */
|
||||
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
|
||||
phys_addr_t paddr, u64 size, int prot);
|
||||
|
@ -187,12 +132,7 @@ struct iommu_ops {
|
|||
/* Get the numer of window per domain */
|
||||
u32 (*domain_get_windows)(struct iommu_domain *domain);
|
||||
|
||||
#ifdef CONFIG_OF_IOMMU
|
||||
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
|
||||
#endif
|
||||
|
||||
unsigned long pgsize_bitmap;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
|
||||
|
@ -212,22 +152,14 @@ extern int iommu_attach_device(struct iommu_domain *domain,
|
|||
struct device *dev);
|
||||
extern void iommu_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
|
||||
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot);
|
||||
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg,unsigned int nents,
|
||||
int prot);
|
||||
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
|
||||
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||
iommu_fault_handler_t handler, void *token);
|
||||
|
||||
extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
|
||||
extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
|
||||
extern int iommu_request_dm_for_dev(struct device *dev);
|
||||
|
||||
extern int iommu_attach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group);
|
||||
extern void iommu_detach_group(struct iommu_domain *domain,
|
||||
|
@ -251,7 +183,6 @@ extern int iommu_group_unregister_notifier(struct iommu_group *group,
|
|||
struct notifier_block *nb);
|
||||
extern int iommu_group_id(struct iommu_group *group);
|
||||
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
|
||||
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
|
||||
|
||||
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
|
||||
void *data);
|
||||
|
@ -259,7 +190,7 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
|
|||
void *data);
|
||||
struct device *iommu_device_create(struct device *parent, void *drvdata,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...) __printf(4, 5);
|
||||
const char *fmt, ...);
|
||||
void iommu_device_destroy(struct device *dev);
|
||||
int iommu_device_link(struct device *dev, struct device *link);
|
||||
void iommu_device_unlink(struct device *dev, struct device *link);
|
||||
|
@ -310,18 +241,6 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova, struct scatterlist *sg,
|
||||
unsigned int nents, int prot)
|
||||
{
|
||||
return domain->ops->map_sg(domain, iova, sg, nents, prot);
|
||||
}
|
||||
|
||||
/* PCI device grouping function */
|
||||
extern struct iommu_group *pci_device_group(struct device *dev);
|
||||
/* Generic device grouping function */
|
||||
extern struct iommu_group *generic_device_group(struct device *dev);
|
||||
|
||||
#else /* CONFIG_IOMMU_API */
|
||||
|
||||
struct iommu_ops {};
|
||||
|
@ -362,11 +281,6 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
|
|||
{
|
||||
}
|
||||
|
||||
static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, int gfp_order, int prot)
|
||||
{
|
||||
|
@ -379,13 +293,6 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova, struct scatterlist *sg,
|
||||
unsigned int nents, int prot)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
|
||||
u32 wnd_nr, phys_addr_t paddr,
|
||||
u64 size, int prot)
|
||||
|
@ -408,21 +315,6 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void iommu_get_dm_regions(struct device *dev,
|
||||
struct list_head *list)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void iommu_put_dm_regions(struct device *dev,
|
||||
struct list_head *list)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_request_dm_for_dev(struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_attach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group)
|
||||
{
|
||||
|
|
|
@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
|
|||
TP_ARGS(dev)
|
||||
);
|
||||
|
||||
TRACE_EVENT(map,
|
||||
DECLARE_EVENT_CLASS(iommu_map_unmap,
|
||||
|
||||
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
|
||||
|
||||
|
@ -92,7 +92,7 @@ TRACE_EVENT(map,
|
|||
TP_STRUCT__entry(
|
||||
__field(u64, iova)
|
||||
__field(u64, paddr)
|
||||
__field(size_t, size)
|
||||
__field(int, size)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -101,31 +101,26 @@ TRACE_EVENT(map,
|
|||
__entry->size = size;
|
||||
),
|
||||
|
||||
TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
|
||||
TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x",
|
||||
__entry->iova, __entry->paddr, __entry->size
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(unmap,
|
||||
DEFINE_EVENT(iommu_map_unmap, map,
|
||||
|
||||
TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
|
||||
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
|
||||
|
||||
TP_ARGS(iova, size, unmapped_size),
|
||||
TP_ARGS(iova, paddr, size)
|
||||
);
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u64, iova)
|
||||
__field(size_t, size)
|
||||
__field(size_t, unmapped_size)
|
||||
),
|
||||
DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->iova = iova;
|
||||
__entry->size = size;
|
||||
__entry->unmapped_size = unmapped_size;
|
||||
),
|
||||
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
|
||||
|
||||
TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
|
||||
__entry->iova, __entry->size, __entry->unmapped_size
|
||||
TP_ARGS(iova, paddr, size),
|
||||
|
||||
TP_printk("IOMMU: iova=0x%016llx size=0x%x",
|
||||
__entry->iova, __entry->size
|
||||
)
|
||||
);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue