1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/dma-attrs.h>
9 #include <linux/dma-debug.h>
11 #include <asm/memory.h>
14 #include <asm/xen/hypervisor.h>
16 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
17 extern struct dma_map_ops arm_dma_ops;
18 extern struct dma_map_ops arm_coherent_dma_ops;
20 static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
22 if (dev && dev->archdata.dma_ops)
23 return dev->archdata.dma_ops;
27 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
29 if (xen_initial_domain())
32 return __generic_dma_ops(dev);
35 static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
38 dev->archdata.dma_ops = ops;
41 #include <asm-generic/dma-mapping-common.h>
43 static inline int dma_set_mask(struct device *dev, u64 mask)
45 return get_dma_ops(dev)->set_dma_mask(dev, mask);
48 #ifdef __arch_page_to_dma
49 #error Please update to __arch_pfn_to_dma
53 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
54 * functions used internally by the DMA-mapping API to provide DMA
55 * addresses. They must not be used by drivers.
57 #ifndef __arch_pfn_to_dma
58 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
61 pfn -= dev->dma_pfn_offset;
62 return (dma_addr_t)__pfn_to_bus(pfn);
65 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
67 unsigned long pfn = __bus_to_pfn(addr);
70 pfn += dev->dma_pfn_offset;
75 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
78 unsigned long pfn = dma_to_pfn(dev, addr);
80 return phys_to_virt(__pfn_to_phys(pfn));
83 return (void *)__bus_to_virt((unsigned long)addr);
86 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
89 return pfn_to_dma(dev, virt_to_pfn(addr));
91 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
95 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
97 return __arch_pfn_to_dma(dev, pfn);
100 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
102 return __arch_dma_to_pfn(dev, addr);
105 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
107 return __arch_dma_to_virt(dev, addr);
110 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
112 return __arch_virt_to_dma(dev, addr);
116 /* The ARM override for dma_max_pfn() */
117 static inline unsigned long dma_max_pfn(struct device *dev)
119 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
121 #define dma_max_pfn(dev) dma_max_pfn(dev)
123 #define arch_setup_dma_ops arch_setup_dma_ops
124 extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
125 struct iommu_ops *iommu, bool coherent);
127 #define arch_teardown_dma_ops arch_teardown_dma_ops
128 extern void arch_teardown_dma_ops(struct device *dev);
130 /* do not use this function in a driver */
131 static inline bool is_device_dma_coherent(struct device *dev)
133 return dev->archdata.dma_coherent;
136 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
138 unsigned int offset = paddr & ~PAGE_MASK;
139 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
142 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
144 unsigned int offset = dev_addr & ~PAGE_MASK;
145 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
148 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
155 mask = *dev->dma_mask;
157 limit = (mask + 1) & ~mask;
158 if (limit && size > limit)
161 if ((addr | (addr + size - 1)) & ~mask)
167 static inline void dma_mark_clean(void *addr, size_t size) { }
170 * DMA errors are defined by all-bits-set in the DMA address.
172 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
174 debug_dma_mapping_error(dev, dma_addr);
175 return dma_addr == DMA_ERROR_CODE;
179 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
180 * function so drivers using this API are highlighted with build warnings.
182 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
183 dma_addr_t *handle, gfp_t gfp)
188 static inline void dma_free_noncoherent(struct device *dev, size_t size,
189 void *cpu_addr, dma_addr_t handle)
193 extern int dma_supported(struct device *dev, u64 mask);
195 extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
198 * arm_dma_alloc - allocate consistent memory for DMA
199 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
200 * @size: required memory size
201 * @handle: bus-specific DMA address
202 * @attrs: optinal attributes that specific mapping properties
204 * Allocate some memory for a device for performing DMA. This function
205 * allocates pages, and will return the CPU-viewed address, and sets @handle
206 * to be the device-viewed address.
208 extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
209 gfp_t gfp, struct dma_attrs *attrs);
212 * arm_dma_free - free memory allocated by arm_dma_alloc
213 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
214 * @size: size of memory originally requested in dma_alloc_coherent
215 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
216 * @handle: device-view address returned from dma_alloc_coherent
217 * @attrs: optinal attributes that specific mapping properties
219 * Free (and unmap) a DMA buffer previously allocated by
222 * References to memory and mappings associated with cpu_addr/handle
223 * during and after this call executing are illegal.
225 extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
226 dma_addr_t handle, struct dma_attrs *attrs);
229 * arm_dma_mmap - map a coherent DMA allocation into user space
230 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
231 * @vma: vm_area_struct describing requested user mapping
232 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
233 * @handle: device-view address returned from dma_alloc_coherent
234 * @size: size of memory originally requested in dma_alloc_coherent
235 * @attrs: optinal attributes that specific mapping properties
237 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
238 * into user space. The coherent DMA buffer must not be freed by the
239 * driver until the user space mapping has been released.
241 extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
242 void *cpu_addr, dma_addr_t dma_addr, size_t size,
243 struct dma_attrs *attrs);
246 * This can be called during early boot to increase the size of the atomic
247 * coherent DMA pool above the default value of 256KiB. It must be called
248 * before postcore_initcall.
250 extern void __init init_dma_coherent_pool_size(unsigned long size);
253 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
254 * and utilize bounce buffers as needed to work around limited DMA windows.
256 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
257 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
258 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
260 * The following are helper functions used by the dmabounce subystem
265 * dmabounce_register_dev
267 * @dev: valid struct device pointer
268 * @small_buf_size: size of buffers to use with small buffer pool
269 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
270 * @needs_bounce_fn: called to determine whether buffer needs bouncing
272 * This function should be called by low-level platform code to register
273 * a device as requireing DMA buffer bouncing. The function will allocate
274 * appropriate DMA pools for the device.
276 extern int dmabounce_register_dev(struct device *, unsigned long,
277 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
280 * dmabounce_unregister_dev
282 * @dev: valid struct device pointer
284 * This function should be called by low-level platform code when device
285 * that was previously registered with dmabounce_register_dev is removed
289 extern void dmabounce_unregister_dev(struct device *);
294 * The scatter list versions of the above methods.
296 extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
297 enum dma_data_direction, struct dma_attrs *attrs);
298 extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
299 enum dma_data_direction, struct dma_attrs *attrs);
300 extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
301 enum dma_data_direction);
302 extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
303 enum dma_data_direction);
304 extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
305 void *cpu_addr, dma_addr_t dma_addr, size_t size,
306 struct dma_attrs *attrs);
308 #endif /* __KERNEL__ */