2 * Copyright (C) 2004 IBM
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
7 #ifndef _ASM_DMA_MAPPING_H
8 #define _ASM_DMA_MAPPING_H
11 #include <linux/types.h>
12 #include <linux/cache.h>
13 /* need struct page definitions */
15 #include <linux/scatterlist.h>
16 #include <linux/dma-attrs.h>
18 #include <asm/swiotlb.h>
20 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
22 /* Some dma direct funcs must be visible for use in other dma_ops */
23 extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t flag);
25 extern void dma_direct_free_coherent(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
28 extern unsigned long get_dma_direct_offset(struct device *dev);
30 #ifdef CONFIG_NOT_COHERENT_CACHE
32 * DMA-consistent mapping functions for PowerPCs that don't support
33 * cache snooping. These allocate/free a region of uncached mapped
34 * memory space for use with DMA devices. Alternatively, you could
35 * allocate the space "normally" and use the cache management functions
36 * to ensure it is consistent.
39 extern void *__dma_alloc_coherent(struct device *dev, size_t size,
40 dma_addr_t *handle, gfp_t gfp);
41 extern void __dma_free_coherent(size_t size, void *vaddr);
42 extern void __dma_sync(void *vaddr, size_t size, int direction);
43 extern void __dma_sync_page(struct page *page, unsigned long offset,
44 size_t size, int direction);
46 #else /* ! CONFIG_NOT_COHERENT_CACHE */
48 * Cache coherent cores.
51 #define __dma_alloc_coherent(dev, gfp, size, handle) NULL
52 #define __dma_free_coherent(size, addr) ((void)0)
53 #define __dma_sync(addr, size, rw) ((void)0)
54 #define __dma_sync_page(pg, off, sz, rw) ((void)0)
56 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
58 static inline unsigned long device_to_mask(struct device *dev)
60 if (dev->dma_mask && *dev->dma_mask)
61 return *dev->dma_mask;
62 /* Assume devices without mask can take 32 bit addresses */
67 * Available generic sets of operations
70 extern struct dma_map_ops dma_iommu_ops;
72 extern struct dma_map_ops dma_direct_ops;
74 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
76 /* We don't handle the NULL dev case for ISA for now. We could
77 * do it via an out of line call but it is not needed for now. The
78 * only ISA DMA device we support is the floppy and we have a hack
79 * in the floppy driver directly to get a device for us.
81 if (unlikely(dev == NULL))
84 return dev->archdata.dma_ops;
87 static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
89 dev->archdata.dma_ops = ops;
92 static inline int dma_supported(struct device *dev, u64 mask)
94 struct dma_map_ops *dma_ops = get_dma_ops(dev);
96 if (unlikely(dma_ops == NULL))
98 if (dma_ops->dma_supported == NULL)
100 return dma_ops->dma_supported(dev, mask);
103 /* We have our own implementation of pci_set_dma_mask() */
104 #define HAVE_ARCH_PCI_SET_DMA_MASK
106 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
108 struct dma_map_ops *dma_ops = get_dma_ops(dev);
110 if (unlikely(dma_ops == NULL))
112 if (dma_ops->set_dma_mask != NULL)
113 return dma_ops->set_dma_mask(dev, dma_mask);
114 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
116 *dev->dma_mask = dma_mask;
121 * map_/unmap_single actually call through to map/unmap_page now that all the
122 * dma_map_ops have been converted over. We just have to get the page and
123 * offset to pass through to map_page
125 static inline dma_addr_t dma_map_single_attrs(struct device *dev,
128 enum dma_data_direction direction,
129 struct dma_attrs *attrs)
131 struct dma_map_ops *dma_ops = get_dma_ops(dev);
135 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
136 (unsigned long)cpu_addr % PAGE_SIZE, size,
140 static inline void dma_unmap_single_attrs(struct device *dev,
143 enum dma_data_direction direction,
144 struct dma_attrs *attrs)
146 struct dma_map_ops *dma_ops = get_dma_ops(dev);
150 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
153 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
155 unsigned long offset, size_t size,
156 enum dma_data_direction direction,
157 struct dma_attrs *attrs)
159 struct dma_map_ops *dma_ops = get_dma_ops(dev);
163 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
166 static inline void dma_unmap_page_attrs(struct device *dev,
167 dma_addr_t dma_address,
169 enum dma_data_direction direction,
170 struct dma_attrs *attrs)
172 struct dma_map_ops *dma_ops = get_dma_ops(dev);
176 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
179 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
180 int nents, enum dma_data_direction direction,
181 struct dma_attrs *attrs)
183 struct dma_map_ops *dma_ops = get_dma_ops(dev);
186 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
189 static inline void dma_unmap_sg_attrs(struct device *dev,
190 struct scatterlist *sg,
192 enum dma_data_direction direction,
193 struct dma_attrs *attrs)
195 struct dma_map_ops *dma_ops = get_dma_ops(dev);
198 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
201 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
202 dma_addr_t *dma_handle, gfp_t flag)
204 struct dma_map_ops *dma_ops = get_dma_ops(dev);
207 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
210 static inline void dma_free_coherent(struct device *dev, size_t size,
211 void *cpu_addr, dma_addr_t dma_handle)
213 struct dma_map_ops *dma_ops = get_dma_ops(dev);
216 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
219 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
221 enum dma_data_direction direction)
223 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
226 static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
228 enum dma_data_direction direction)
230 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
233 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t size,
235 enum dma_data_direction direction)
237 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
240 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
242 enum dma_data_direction direction)
244 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
247 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
248 int nents, enum dma_data_direction direction)
250 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
253 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
255 enum dma_data_direction direction)
257 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
260 #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
261 static inline void dma_sync_single_for_cpu(struct device *dev,
262 dma_addr_t dma_handle, size_t size,
263 enum dma_data_direction direction)
265 struct dma_map_ops *dma_ops = get_dma_ops(dev);
269 if (dma_ops->sync_single_range_for_cpu)
270 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
274 static inline void dma_sync_single_for_device(struct device *dev,
275 dma_addr_t dma_handle, size_t size,
276 enum dma_data_direction direction)
278 struct dma_map_ops *dma_ops = get_dma_ops(dev);
282 if (dma_ops->sync_single_range_for_device)
283 dma_ops->sync_single_range_for_device(dev, dma_handle,
287 static inline void dma_sync_sg_for_cpu(struct device *dev,
288 struct scatterlist *sgl, int nents,
289 enum dma_data_direction direction)
291 struct dma_map_ops *dma_ops = get_dma_ops(dev);
295 if (dma_ops->sync_sg_for_cpu)
296 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
299 static inline void dma_sync_sg_for_device(struct device *dev,
300 struct scatterlist *sgl, int nents,
301 enum dma_data_direction direction)
303 struct dma_map_ops *dma_ops = get_dma_ops(dev);
307 if (dma_ops->sync_sg_for_device)
308 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
311 static inline void dma_sync_single_range_for_cpu(struct device *dev,
312 dma_addr_t dma_handle, unsigned long offset, size_t size,
313 enum dma_data_direction direction)
315 struct dma_map_ops *dma_ops = get_dma_ops(dev);
319 if (dma_ops->sync_single_range_for_cpu)
320 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
321 offset, size, direction);
324 static inline void dma_sync_single_range_for_device(struct device *dev,
325 dma_addr_t dma_handle, unsigned long offset, size_t size,
326 enum dma_data_direction direction)
328 struct dma_map_ops *dma_ops = get_dma_ops(dev);
332 if (dma_ops->sync_single_range_for_device)
333 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
336 #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
337 static inline void dma_sync_single_for_cpu(struct device *dev,
338 dma_addr_t dma_handle, size_t size,
339 enum dma_data_direction direction)
343 static inline void dma_sync_single_for_device(struct device *dev,
344 dma_addr_t dma_handle, size_t size,
345 enum dma_data_direction direction)
349 static inline void dma_sync_sg_for_cpu(struct device *dev,
350 struct scatterlist *sgl, int nents,
351 enum dma_data_direction direction)
355 static inline void dma_sync_sg_for_device(struct device *dev,
356 struct scatterlist *sgl, int nents,
357 enum dma_data_direction direction)
361 static inline void dma_sync_single_range_for_cpu(struct device *dev,
362 dma_addr_t dma_handle, unsigned long offset, size_t size,
363 enum dma_data_direction direction)
367 static inline void dma_sync_single_range_for_device(struct device *dev,
368 dma_addr_t dma_handle, unsigned long offset, size_t size,
369 enum dma_data_direction direction)
374 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
377 return (dma_addr == DMA_ERROR_CODE);
383 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
385 #ifdef CONFIG_SWIOTLB
386 struct dev_archdata *sd = &dev->archdata;
388 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
395 return addr + size <= *dev->dma_mask;
398 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
400 return paddr + get_dma_direct_offset(dev);
403 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
405 return daddr - get_dma_direct_offset(dev);
408 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
409 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
410 #ifdef CONFIG_NOT_COHERENT_CACHE
411 #define dma_is_consistent(d, h) (0)
413 #define dma_is_consistent(d, h) (1)
416 static inline int dma_get_cache_alignment(void)
419 /* no easy way to get cache size on all processors, so return
420 * the maximum possible, to be safe */
421 return (1 << INTERNODE_CACHE_SHIFT);
424 * Each processor family will define its own L1_CACHE_SHIFT,
425 * L1_CACHE_BYTES wraps to this, so this is always safe.
427 return L1_CACHE_BYTES;
431 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
432 enum dma_data_direction direction)
434 BUG_ON(direction == DMA_NONE);
435 __dma_sync(vaddr, size, (int)direction);
438 #endif /* __KERNEL__ */
439 #endif /* _ASM_DMA_MAPPING_H */