]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
arm64: Implement custom mmap functions for dma mapping
authorLaura Abbott <lauraa@codeaurora.org>
Fri, 14 Mar 2014 19:52:23 +0000 (19:52 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Mon, 24 Mar 2014 10:01:17 +0000 (10:01 +0000)
The current dma_ops do not specify an mmap function so maping
falls back to the default implementation. There are at least
two issues with using the default implementation:

1) The pgprot is always pgprot_noncached (strongly ordered)
memory even with coherent operations
2) dma_common_mmap calls virt_to_page on the remapped non-coherent
address which leads to invalid memory being mapped.

Fix both these issue by implementing a custom mmap function which
correctly accounts for remapped addresses and sets vm_pg_prot
appropriately.

Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
[catalin.marinas@arm.com: replaced "arm64_" with "__" prefix for consistency]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/mm/dma-mapping.c

index 88fbc5e5bae7e545b208abd8d5d70531dc3d9c85..81eea3d3249f3d7b2ad28214c780a75225ef9d3e 100644 (file)
@@ -221,9 +221,52 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
                               sg->length, dir);
 }
 
+/* vma->vm_page_prot must be set appropriately before calling this function */
+static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                            void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       int ret = -ENXIO;
+       unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
+                                       PAGE_SHIFT;
+       unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
+
+       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
+       if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+               ret = remap_pfn_range(vma, vma->vm_start,
+                                     pfn + off,
+                                     vma->vm_end - vma->vm_start,
+                                     vma->vm_page_prot);
+       }
+
+       return ret;
+}
+
+static int __swiotlb_mmap_noncoherent(struct device *dev,
+               struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               struct dma_attrs *attrs)
+{
+       vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
+       return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+static int __swiotlb_mmap_coherent(struct device *dev,
+               struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               struct dma_attrs *attrs)
+{
+       /* Just use whatever page_prot attributes were specified */
+       return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
 struct dma_map_ops noncoherent_swiotlb_dma_ops = {
        .alloc = __dma_alloc_noncoherent,
        .free = __dma_free_noncoherent,
+       .mmap = __swiotlb_mmap_noncoherent,
        .map_page = __swiotlb_map_page,
        .unmap_page = __swiotlb_unmap_page,
        .map_sg = __swiotlb_map_sg_attrs,
@@ -240,6 +283,7 @@ EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
 struct dma_map_ops coherent_swiotlb_dma_ops = {
        .alloc = __dma_alloc_coherent,
        .free = __dma_free_coherent,
+       .mmap = __swiotlb_mmap_coherent,
        .map_page = swiotlb_map_page,
        .unmap_page = swiotlb_unmap_page,
        .map_sg = swiotlb_map_sg_attrs,