#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
}
}
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
- struct arm_smmu_domain *smmu_domain = cookie;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-
- /* Ensure new page tables are visible to the hardware walker */
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
- dsb(ishst);
- } else {
- /*
- * If the SMMU can't walk tables in the CPU caches, treat them
- * like non-coherent DMA since we need to flush the new entries
- * all the way out to memory. There's no possibility of
- * recursion here as the SMMU table walker will not be wired
- * through another SMMU.
- */
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
- }
-}
-
static struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
- .flush_pgtable = arm_smmu_flush_pgtable,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
.ias = ias,
.oas = oas,
.tlb = &arm_smmu_gather_ops,
+ .iommu_dev = smmu->dev,
};
smmu_domain->smmu = smmu;
kfree(data);
}
-static int arm_smmu_add_pci_device(struct pci_dev *pdev)
+static int arm_smmu_init_pci_device(struct pci_dev *pdev,
+ struct iommu_group *group)
{
- int i, ret;
- u16 sid;
- struct iommu_group *group;
struct arm_smmu_master_cfg *cfg;
-
- group = iommu_group_get_for_dev(&pdev->dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ u16 sid;
+ int i;
cfg = iommu_group_get_iommudata(group);
if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg) {
- ret = -ENOMEM;
- goto out_put_group;
- }
+ if (!cfg)
+ return -ENOMEM;
iommu_group_set_iommudata(group, cfg,
__arm_smmu_release_pci_iommudata);
}
- if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
- ret = -ENOSPC;
- goto out_put_group;
- }
+ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
+ return -ENOSPC;
/*
* Assume Stream ID == Requester ID for now.
cfg->streamids[cfg->num_streamids++] = sid;
return 0;
-out_put_group:
- iommu_group_put(group);
- return ret;
}
-static int arm_smmu_add_platform_device(struct device *dev)
+static int arm_smmu_init_platform_device(struct device *dev,
+ struct iommu_group *group)
{
- struct iommu_group *group;
- struct arm_smmu_master *master;
struct arm_smmu_device *smmu = find_smmu_for_device(dev);
+ struct arm_smmu_master *master;
if (!smmu)
return -ENODEV;
if (!master)
return -ENODEV;
- /* No automatic group creation for platform devices */
- group = iommu_group_alloc();
- if (IS_ERR(group))
- return PTR_ERR(group);
-
iommu_group_set_iommudata(group, &master->cfg, NULL);
- return iommu_group_add_device(group, dev);
+
+ return 0;
}
static int arm_smmu_add_device(struct device *dev)
{
- if (dev_is_pci(dev))
- return arm_smmu_add_pci_device(to_pci_dev(dev));
+ struct iommu_group *group;
- return arm_smmu_add_platform_device(dev);
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ return 0;
}
static void arm_smmu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
+static struct iommu_group *arm_smmu_device_group(struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
+ else
+ group = generic_device_group(dev);
+
+ if (IS_ERR(group))
+ return group;
+
+ if (dev_is_pci(dev))
+ ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
+ else
+ ret = arm_smmu_init_platform_device(dev, group);
+
+ if (ret) {
+ iommu_group_put(group);
+ group = ERR_PTR(ret);
+ }
+
+ return group;
+}
+
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
+ .device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
unsigned long size;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 id;
+ bool cttw_dt, cttw_reg;
dev_notice(smmu->dev, "probing hardware configuration...\n");
dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
dev_notice(smmu->dev, "\taddress translation ops\n");
}
- if (id & ID0_CTTW) {
+ /*
+ * In order for DMA API calls to work properly, we must defer to what
+ * the DT says about coherency, regardless of what the hardware claims.
+ * Fortunately, this also opens up a workaround for systems where the
+ * ID register value has ended up configured incorrectly.
+ */
+ cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
+ cttw_reg = !!(id & ID0_CTTW);
+ if (cttw_dt)
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
- dev_notice(smmu->dev, "\tcoherent table walk\n");
- }
+ if (cttw_dt || cttw_reg)
+ dev_notice(smmu->dev, "\t%scoherent table walk\n",
+ cttw_dt ? "" : "non-");
+ if (cttw_dt != cttw_reg)
+ dev_notice(smmu->dev,
+ "\t(IDR0.CTTW overridden by dma-coherent property)\n");
if (id & ID0_SMS) {
u32 smr, sid, mask;