]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/iommu/intel-iommu.c
Merge tag 'for-linus-20151021' of git://git.infradead.org/intel-iommu
[karo-tx-linux.git] / drivers / iommu / intel-iommu.c
index 2d7349a3ee1496408f051b4da8accebc8dd02ec1..d65cf42399e8e5aa0f856a1ab6869c041adc0c48 100644 (file)
@@ -2115,15 +2115,19 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                                return -ENOMEM;
                        /* It is large page*/
                        if (largepage_lvl > 1) {
+                               unsigned long nr_superpages, end_pfn;
+
                                pteval |= DMA_PTE_LARGE_PAGE;
                                lvl_pages = lvl_to_nr_pages(largepage_lvl);
+
+                               nr_superpages = sg_res / lvl_pages;
+                               end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
+
                                /*
                                 * Ensure that old small page tables are
-                                * removed to make room for superpage,
-                                * if they exist.
+                                * removed to make room for superpage(s).
                                 */
-                               dma_pte_free_pagetable(domain, iov_pfn,
-                                                      iov_pfn + lvl_pages - 1);
+                               dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
                        } else {
                                pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
                        }
@@ -2301,6 +2305,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
 
        if (ret) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
+               free_devinfo_mem(info);
                return NULL;
        }
 
@@ -3215,6 +3220,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
 
        /* Restrict dma_mask to the width that the iommu can handle */
        dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+       /* Ensure we reserve the whole size-aligned region */
+       nrpages = __roundup_pow_of_two(nrpages);
 
        if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
                /*
@@ -3711,7 +3718,7 @@ static inline int iommu_devinfo_cache_init(void)
 static int __init iommu_init_mempool(void)
 {
        int ret;
-       ret = iommu_iova_cache_init();
+       ret = iova_cache_get();
        if (ret)
                return ret;
 
@@ -3725,7 +3732,7 @@ static int __init iommu_init_mempool(void)
 
        kmem_cache_destroy(iommu_domain_cache);
 domain_error:
-       iommu_iova_cache_destroy();
+       iova_cache_put();
 
        return -ENOMEM;
 }
@@ -3734,7 +3741,7 @@ static void __init iommu_exit_mempool(void)
 {
        kmem_cache_destroy(iommu_devinfo_cache);
        kmem_cache_destroy(iommu_domain_cache);
-       iommu_iova_cache_destroy();
+       iova_cache_put();
 }
 
 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)