]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
iommu/tegra-smmu: Convert to use DMA API
authorRussell King <rmk+kernel@arm.linux.org.uk>
Mon, 27 Jul 2015 12:29:52 +0000 (13:29 +0100)
committerThierry Reding <treding@nvidia.com>
Thu, 13 Aug 2015 14:06:42 +0000 (16:06 +0200)
Use the DMA API instead of calling architecture internal functions in
the Tegra SMMU driver.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Thierry Reding <treding@nvidia.com>
drivers/iommu/tegra-smmu.c

index f420d871853565e6311a50bc4276dafebd14030f..43b69c8cbe460a48da8431b11c8345c2e0ca5956 100644 (file)
@@ -16,8 +16,6 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
-#include <asm/cacheflush.h>
-
 #include <soc/tegra/ahb.h>
 #include <soc/tegra/mc.h>
 
@@ -45,6 +43,7 @@ struct tegra_smmu_as {
        u32 *count;
        struct page **pts;
        struct page *pd;
+       dma_addr_t pd_dma;
        unsigned id;
        u32 attr;
 };
@@ -82,9 +81,9 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
 #define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
 
 #define SMMU_PTB_DATA 0x020
-#define  SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
+#define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
 
-#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
+#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
 
 #define SMMU_TLB_FLUSH 0x030
 #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
@@ -147,22 +146,15 @@ static unsigned int iova_pt_index(unsigned long iova)
        return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
 }
 
-static void smmu_flush_dcache(struct page *page, unsigned long offset,
-                             size_t size)
+static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
 {
-#ifdef CONFIG_ARM
-       phys_addr_t phys = page_to_phys(page) + offset;
-#endif
-       void *virt = page_address(page) + offset;
-
-#ifdef CONFIG_ARM
-       __cpuc_flush_dcache_area(virt, size);
-       outer_flush_range(phys, phys + size);
-#endif
+       addr >>= 12;
+       return (addr & smmu->pfn_mask) == addr;
+}
 
-#ifdef CONFIG_ARM64
-       __flush_dcache_area(virt, size);
-#endif
+static dma_addr_t smmu_pde_to_dma(u32 pde)
+{
+       return pde << 12;
 }
 
 static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@@ -170,7 +162,7 @@ static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
        smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
 }
 
-static inline void smmu_flush_ptc(struct tegra_smmu *smmu, phys_addr_t phys,
+static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
                                  unsigned long offset)
 {
        u32 value;
@@ -178,15 +170,15 @@ static inline void smmu_flush_ptc(struct tegra_smmu *smmu, phys_addr_t phys,
        offset &= ~(smmu->mc->soc->atom_size - 1);
 
        if (smmu->mc->soc->num_address_bits > 32) {
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-               value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+               value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
 #else
                value = 0;
 #endif
                smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
        }
 
-       value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
+       value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
        smmu_writel(smmu, value, SMMU_PTC_FLUSH);
 }
 
@@ -407,16 +399,26 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
                return 0;
        }
 
+       as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(smmu->dev, as->pd_dma))
+               return -ENOMEM;
+
+       /* We can't handle 64-bit DMA addresses */
+       if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
+               err = -ENOMEM;
+               goto err_unmap;
+       }
+
        err = tegra_smmu_alloc_asid(smmu, &as->id);
        if (err < 0)
-               return err;
+               goto err_unmap;
 
-       smmu_flush_dcache(as->pd, 0, SMMU_SIZE_PD);
-       smmu_flush_ptc(smmu, page_to_phys(as->pd), 0);
+       smmu_flush_ptc(smmu, as->pd_dma, 0);
        smmu_flush_tlb_asid(smmu, as->id);
 
        smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
-       value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
+       value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
        smmu_writel(smmu, value, SMMU_PTB_DATA);
        smmu_flush(smmu);
 
@@ -424,6 +426,10 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
        as->use_count++;
 
        return 0;
+
+err_unmap:
+       dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+       return err;
 }
 
 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
@@ -433,6 +439,9 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
                return;
 
        tegra_smmu_free_asid(smmu, as->id);
+
+       dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+
        as->smmu = NULL;
 }
 
@@ -504,63 +513,81 @@ static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
 }
 
 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
-                                 struct page **pagep)
+                                 dma_addr_t *dmap)
 {
        unsigned int pd_index = iova_pd_index(iova);
        struct page *pt_page;
+       u32 *pd;
 
        pt_page = as->pts[pd_index];
        if (!pt_page)
                return NULL;
 
-       *pagep = pt_page;
+       pd = page_address(as->pd);
+       *dmap = smmu_pde_to_dma(pd[pd_index]);
 
        return tegra_smmu_pte_offset(pt_page, iova);
 }
 
 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
-                      struct page **pagep)
+                      dma_addr_t *dmap)
 {
        u32 *pd = page_address(as->pd), *pt;
        unsigned int pde = iova_pd_index(iova);
        struct tegra_smmu *smmu = as->smmu;
-       struct page *page;
        unsigned int i;
 
        if (!as->pts[pde]) {
+               struct page *page;
+               dma_addr_t dma;
+
                page = alloc_page(GFP_KERNEL | __GFP_DMA);
                if (!page)
                        return NULL;
 
                pt = page_address(page);
-               SetPageReserved(page);
 
                for (i = 0; i < SMMU_NUM_PTE; i++)
                        pt[i] = 0;
 
+               dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
+                                  DMA_TO_DEVICE);
+               if (dma_mapping_error(smmu->dev, dma)) {
+                       __free_page(page);
+                       return NULL;
+               }
+
+               if (!smmu_dma_addr_valid(smmu, dma)) {
+                       dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
+                                      DMA_TO_DEVICE);
+                       __free_page(page);
+                       return NULL;
+               }
+
                as->pts[pde] = page;
 
-               smmu_flush_dcache(page, 0, SMMU_SIZE_PT);
+               SetPageReserved(page);
 
-               pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
+               pd[pde] = SMMU_MK_PDE(dma, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
 
-               smmu_flush_dcache(as->pd, pde << 2, 4);
-               smmu_flush_ptc(smmu, page_to_phys(as->pd), pde << 2);
+               dma_sync_single_range_for_device(smmu->dev, as->pd_dma,
+                                                pde << 2, 4, DMA_TO_DEVICE);
+               smmu_flush_ptc(smmu, as->pd_dma, pde << 2);
                smmu_flush_tlb_section(smmu, as->id, iova);
                smmu_flush(smmu);
+
+               *dmap = dma;
        } else {
-               page = as->pts[pde];
+               *dmap = smmu_pde_to_dma(pd[pde]);
        }
 
-       *pagep = page;
-
-       pt = page_address(page);
+       pt = tegra_smmu_pte_offset(as->pts[pde], iova);
 
        /* Keep track of entries in this page table. */
-       if (pt[iova_pt_index(iova)] == 0)
+       if (*pt == 0)
                as->count[pde]++;
 
-       return tegra_smmu_pte_offset(page, iova);
+       return pt;
 }
 
 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
@@ -576,17 +603,20 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
         */
        if (--as->count[pde] == 0) {
                unsigned int offset = pde * sizeof(*pd);
+               dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
 
                /* Clear the page directory entry first */
                pd[pde] = 0;
 
                /* Flush the page directory entry */
-               smmu_flush_dcache(as->pd, offset, sizeof(*pd));
-               smmu_flush_ptc(smmu, page_to_phys(as->pd), offset);
+               dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
+                                                sizeof(*pd), DMA_TO_DEVICE);
+               smmu_flush_ptc(smmu, as->pd_dma, offset);
                smmu_flush_tlb_section(smmu, as->id, iova);
                smmu_flush(smmu);
 
                /* Finally, free the page */
+               dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
                ClearPageReserved(page);
                __free_page(page);
                as->pts[pde] = NULL;
@@ -594,15 +624,16 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
 }
 
 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
-                              u32 *pte, struct page *pte_page, u32 val)
+                              u32 *pte, dma_addr_t pte_dma, u32 val)
 {
        struct tegra_smmu *smmu = as->smmu;
        unsigned long offset = offset_in_page(pte);
 
        *pte = val;
 
-       smmu_flush_dcache(pte_page, offset, 4);
-       smmu_flush_ptc(smmu, page_to_phys(pte_page), offset);
+       dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
+                                        4, DMA_TO_DEVICE);
+       smmu_flush_ptc(smmu, pte_dma, offset);
        smmu_flush_tlb_group(smmu, as->id, iova);
        smmu_flush(smmu);
 }
@@ -611,14 +642,14 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
                          phys_addr_t paddr, size_t size, int prot)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct page *page;
+       dma_addr_t pte_dma;
        u32 *pte;
 
-       pte = as_get_pte(as, iova, &page);
+       pte = as_get_pte(as, iova, &pte_dma);
        if (!pte)
                return -ENOMEM;
 
-       tegra_smmu_set_pte(as, iova, pte, page,
+       tegra_smmu_set_pte(as, iova, pte, pte_dma,
                           __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
 
        return 0;
@@ -628,14 +659,14 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
                               size_t size)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct page *pte_page;
+       dma_addr_t pte_dma;
        u32 *pte;
 
-       pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
+       pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
        if (!pte || !*pte)
                return 0;
 
-       tegra_smmu_set_pte(as, iova, pte, pte_page, 0);
+       tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
        tegra_smmu_pte_put_use(as, iova);
 
        return size;
@@ -645,11 +676,11 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
                                           dma_addr_t iova)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct page *pte_page;
        unsigned long pfn;
+       dma_addr_t pte_dma;
        u32 *pte;
 
-       pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
+       pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
        if (!pte || !*pte)
                return 0;