]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/iommu/intel-iommu.c
Merge branches 'x86/vt-d', 'arm/omap', 'arm/smmu', 's390', 'core' and 'x86/amd' into...
[karo-tx-linux.git] / drivers / iommu / intel-iommu.c
index 884d9133c63697d48baf8b5b3668419b939131f3..16b243ead874149d53848d1db0eee5a94cc938ed 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mempool.h>
 #include <linux/memory.h>
 #include <linux/timer.h>
+#include <linux/io.h>
 #include <linux/iova.h>
 #include <linux/iommu.h>
 #include <linux/intel-iommu.h>
@@ -2115,15 +2116,19 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                                return -ENOMEM;
                        /* It is large page*/
                        if (largepage_lvl > 1) {
+                               unsigned long nr_superpages, end_pfn;
+
                                pteval |= DMA_PTE_LARGE_PAGE;
                                lvl_pages = lvl_to_nr_pages(largepage_lvl);
+
+                               nr_superpages = sg_res / lvl_pages;
+                               end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
+
                                /*
                                 * Ensure that old small page tables are
-                                * removed to make room for superpage,
-                                * if they exist.
+                                * removed to make room for superpage(s).
                                 */
-                               dma_pte_free_pagetable(domain, iov_pfn,
-                                                      iov_pfn + lvl_pages - 1);
+                               dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
                        } else {
                                pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
                        }
@@ -2301,6 +2306,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
 
        if (ret) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
+               free_devinfo_mem(info);
                return NULL;
        }
 
@@ -2429,17 +2435,11 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
                                  DMA_PTE_READ|DMA_PTE_WRITE);
 }
 
-static int iommu_prepare_identity_map(struct device *dev,
-                                     unsigned long long start,
-                                     unsigned long long end)
+static int domain_prepare_identity_map(struct device *dev,
+                                      struct dmar_domain *domain,
+                                      unsigned long long start,
+                                      unsigned long long end)
 {
-       struct dmar_domain *domain;
-       int ret;
-
-       domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
-       if (!domain)
-               return -ENOMEM;
-
        /* For _hardware_ passthrough, don't bother. But for software
           passthrough, we do it anyway -- it may indicate a memory
           range which is reserved in E820, so which didn't get set
@@ -2459,8 +2459,7 @@ static int iommu_prepare_identity_map(struct device *dev,
                        dmi_get_system_info(DMI_BIOS_VENDOR),
                        dmi_get_system_info(DMI_BIOS_VERSION),
                     dmi_get_system_info(DMI_PRODUCT_VERSION));
-               ret = -EIO;
-               goto error;
+               return -EIO;
        }
 
        if (end >> agaw_to_width(domain->agaw)) {
@@ -2470,18 +2469,27 @@ static int iommu_prepare_identity_map(struct device *dev,
                     dmi_get_system_info(DMI_BIOS_VENDOR),
                     dmi_get_system_info(DMI_BIOS_VERSION),
                     dmi_get_system_info(DMI_PRODUCT_VERSION));
-               ret = -EIO;
-               goto error;
+               return -EIO;
        }
 
-       ret = iommu_domain_identity_map(domain, start, end);
-       if (ret)
-               goto error;
+       return iommu_domain_identity_map(domain, start, end);
+}
 
-       return 0;
+static int iommu_prepare_identity_map(struct device *dev,
+                                     unsigned long long start,
+                                     unsigned long long end)
+{
+       struct dmar_domain *domain;
+       int ret;
+
+       domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+       if (!domain)
+               return -ENOMEM;
+
+       ret = domain_prepare_identity_map(dev, domain, start, end);
+       if (ret)
+               domain_exit(domain);
 
- error:
-       domain_exit(domain);
        return ret;
 }
 
@@ -2807,18 +2815,18 @@ static void intel_iommu_init_qi(struct intel_iommu *iommu)
 }
 
 static int copy_context_table(struct intel_iommu *iommu,
-                             struct root_entry __iomem *old_re,
+                             struct root_entry *old_re,
                              struct context_entry **tbl,
                              int bus, bool ext)
 {
        int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
-       struct context_entry __iomem *old_ce = NULL;
        struct context_entry *new_ce = NULL, ce;
+       struct context_entry *old_ce = NULL;
        struct root_entry re;
        phys_addr_t old_ce_phys;
 
        tbl_idx = ext ? bus * 2 : bus;
-       memcpy_fromio(&re, old_re, sizeof(re));
+       memcpy(&re, old_re, sizeof(re));
 
        for (devfn = 0; devfn < 256; devfn++) {
                /* First calculate the correct index */
@@ -2853,7 +2861,8 @@ static int copy_context_table(struct intel_iommu *iommu,
                        }
 
                        ret = -ENOMEM;
-                       old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
+                       old_ce = memremap(old_ce_phys, PAGE_SIZE,
+                                       MEMREMAP_WB);
                        if (!old_ce)
                                goto out;
 
@@ -2865,7 +2874,7 @@ static int copy_context_table(struct intel_iommu *iommu,
                }
 
                /* Now copy the context entry */
-               memcpy_fromio(&ce, old_ce + idx, sizeof(ce));
+               memcpy(&ce, old_ce + idx, sizeof(ce));
 
                if (!__context_present(&ce))
                        continue;
@@ -2901,7 +2910,7 @@ static int copy_context_table(struct intel_iommu *iommu,
        __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
 
 out_unmap:
-       iounmap(old_ce);
+       memunmap(old_ce);
 
 out:
        return ret;
@@ -2909,8 +2918,8 @@ out:
 
 static int copy_translation_tables(struct intel_iommu *iommu)
 {
-       struct root_entry __iomem *old_rt;
        struct context_entry **ctxt_tbls;
+       struct root_entry *old_rt;
        phys_addr_t old_rt_phys;
        int ctxt_table_entries;
        unsigned long flags;
@@ -2935,7 +2944,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
        if (!old_rt_phys)
                return -EINVAL;
 
-       old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
+       old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
        if (!old_rt)
                return -ENOMEM;
 
@@ -2984,7 +2993,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
        ret = 0;
 
 out_unmap:
-       iounmap(old_rt);
+       memunmap(old_rt);
 
        return ret;
 }
@@ -3241,7 +3250,10 @@ static struct iova *intel_alloc_iova(struct device *dev,
 
 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
 {
+       struct dmar_rmrr_unit *rmrr;
        struct dmar_domain *domain;
+       struct device *i_dev;
+       int i, ret;
 
        domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
        if (!domain) {
@@ -3250,6 +3262,23 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
                return NULL;
        }
 
+       /* We have a new domain - setup possible RMRRs for the device */
+       rcu_read_lock();
+       for_each_rmrr_units(rmrr) {
+               for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+                                         i, i_dev) {
+                       if (i_dev != dev)
+                               continue;
+
+                       ret = domain_prepare_identity_map(dev, domain,
+                                                         rmrr->base_address,
+                                                         rmrr->end_address);
+                       if (ret)
+                               dev_err(dev, "Mapping reserved region failed\n");
+               }
+       }
+       rcu_read_unlock();
+
        return domain;
 }