1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/dma-debug.h>
10 #include <asm/memory.h>
13 #include <asm/xen/hypervisor.h>
15 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
16 extern const struct dma_map_ops arm_dma_ops;
17 extern const struct dma_map_ops arm_coherent_dma_ops;
19 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
24 #define HAVE_ARCH_DMA_SUPPORTED 1
25 extern int dma_supported(struct device *dev, u64 mask);
27 #ifdef __arch_page_to_dma
28 #error Please update to __arch_pfn_to_dma
32 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
33 * functions used internally by the DMA-mapping API to provide DMA
34 * addresses. They must not be used by drivers.
36 #ifndef __arch_pfn_to_dma
37 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
40 pfn -= dev->dma_pfn_offset;
41 return (dma_addr_t)__pfn_to_bus(pfn);
44 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
46 unsigned long pfn = __bus_to_pfn(addr);
49 pfn += dev->dma_pfn_offset;
54 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
57 unsigned long pfn = dma_to_pfn(dev, addr);
59 return phys_to_virt(__pfn_to_phys(pfn));
62 return (void *)__bus_to_virt((unsigned long)addr);
65 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
68 return pfn_to_dma(dev, virt_to_pfn(addr));
70 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
74 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
76 return __arch_pfn_to_dma(dev, pfn);
79 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
81 return __arch_dma_to_pfn(dev, addr);
84 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
86 return __arch_dma_to_virt(dev, addr);
89 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
91 return __arch_virt_to_dma(dev, addr);
95 /* The ARM override for dma_max_pfn() */
96 static inline unsigned long dma_max_pfn(struct device *dev)
98 return dma_to_pfn(dev, *dev->dma_mask);
100 #define dma_max_pfn(dev) dma_max_pfn(dev)
102 #define arch_setup_dma_ops arch_setup_dma_ops
103 extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
104 const struct iommu_ops *iommu, bool coherent);
106 #define arch_teardown_dma_ops arch_teardown_dma_ops
107 extern void arch_teardown_dma_ops(struct device *dev);
109 /* do not use this function in a driver */
110 static inline bool is_device_dma_coherent(struct device *dev)
112 return dev->archdata.dma_coherent;
115 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
117 unsigned int offset = paddr & ~PAGE_MASK;
118 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
121 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
123 unsigned int offset = dev_addr & ~PAGE_MASK;
124 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
127 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
134 mask = *dev->dma_mask;
136 limit = (mask + 1) & ~mask;
137 if (limit && size > limit)
140 if ((addr | (addr + size - 1)) & ~mask)
146 static inline void dma_mark_clean(void *addr, size_t size) { }
149 * arm_dma_alloc - allocate consistent memory for DMA
150 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
151 * @size: required memory size
152 * @handle: bus-specific DMA address
153 * @attrs: optinal attributes that specific mapping properties
155 * Allocate some memory for a device for performing DMA. This function
156 * allocates pages, and will return the CPU-viewed address, and sets @handle
157 * to be the device-viewed address.
159 extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
160 gfp_t gfp, unsigned long attrs);
163 * arm_dma_free - free memory allocated by arm_dma_alloc
164 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
165 * @size: size of memory originally requested in dma_alloc_coherent
166 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
167 * @handle: device-view address returned from dma_alloc_coherent
168 * @attrs: optinal attributes that specific mapping properties
170 * Free (and unmap) a DMA buffer previously allocated by
173 * References to memory and mappings associated with cpu_addr/handle
174 * during and after this call executing are illegal.
176 extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
177 dma_addr_t handle, unsigned long attrs);
180 * arm_dma_mmap - map a coherent DMA allocation into user space
181 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
182 * @vma: vm_area_struct describing requested user mapping
183 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
184 * @handle: device-view address returned from dma_alloc_coherent
185 * @size: size of memory originally requested in dma_alloc_coherent
186 * @attrs: optinal attributes that specific mapping properties
188 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
189 * into user space. The coherent DMA buffer must not be freed by the
190 * driver until the user space mapping has been released.
192 extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
193 void *cpu_addr, dma_addr_t dma_addr, size_t size,
194 unsigned long attrs);
197 * This can be called during early boot to increase the size of the atomic
198 * coherent DMA pool above the default value of 256KiB. It must be called
199 * before postcore_initcall.
201 extern void __init init_dma_coherent_pool_size(unsigned long size);
204 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
205 * and utilize bounce buffers as needed to work around limited DMA windows.
207 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
208 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
209 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
211 * The following are helper functions used by the dmabounce subystem
216 * dmabounce_register_dev
218 * @dev: valid struct device pointer
219 * @small_buf_size: size of buffers to use with small buffer pool
220 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
221 * @needs_bounce_fn: called to determine whether buffer needs bouncing
223 * This function should be called by low-level platform code to register
224 * a device as requireing DMA buffer bouncing. The function will allocate
225 * appropriate DMA pools for the device.
227 extern int dmabounce_register_dev(struct device *, unsigned long,
228 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
231 * dmabounce_unregister_dev
233 * @dev: valid struct device pointer
235 * This function should be called by low-level platform code when device
236 * that was previously registered with dmabounce_register_dev is removed
240 extern void dmabounce_unregister_dev(struct device *);
245 * The scatter list versions of the above methods.
247 extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
248 enum dma_data_direction, unsigned long attrs);
249 extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
250 enum dma_data_direction, unsigned long attrs);
251 extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
252 enum dma_data_direction);
253 extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
254 enum dma_data_direction);
255 extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
256 void *cpu_addr, dma_addr_t dma_addr, size_t size,
257 unsigned long attrs);
259 #endif /* __KERNEL__ */