1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
63 #define DMA_ATTR_NO_WARN (1UL << 8)
66 * A dma_addr_t can hold any valid DMA or bus address for the platform.
67 * It can be given to a device to use as a DMA source or target. A CPU cannot
68 * reference a dma_addr_t directly because there may be translation between
69 * its physical address space and the bus address space.
72 void* (*alloc)(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t gfp,
75 void (*free)(struct device *dev, size_t size,
76 void *vaddr, dma_addr_t dma_handle,
78 int (*mmap)(struct device *, struct vm_area_struct *,
79 void *, dma_addr_t, size_t,
82 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
83 dma_addr_t, size_t, unsigned long attrs);
85 dma_addr_t (*map_page)(struct device *dev, struct page *page,
86 unsigned long offset, size_t size,
87 enum dma_data_direction dir,
89 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
90 size_t size, enum dma_data_direction dir,
93 * map_sg returns 0 on error and a value > 0 on success.
94 * It should never return a value < 0.
96 int (*map_sg)(struct device *dev, struct scatterlist *sg,
97 int nents, enum dma_data_direction dir,
99 void (*unmap_sg)(struct device *dev,
100 struct scatterlist *sg, int nents,
101 enum dma_data_direction dir,
102 unsigned long attrs);
103 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
104 size_t size, enum dma_data_direction dir,
105 unsigned long attrs);
106 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
107 size_t size, enum dma_data_direction dir,
108 unsigned long attrs);
109 void (*sync_single_for_cpu)(struct device *dev,
110 dma_addr_t dma_handle, size_t size,
111 enum dma_data_direction dir);
112 void (*sync_single_for_device)(struct device *dev,
113 dma_addr_t dma_handle, size_t size,
114 enum dma_data_direction dir);
115 void (*sync_sg_for_cpu)(struct device *dev,
116 struct scatterlist *sg, int nents,
117 enum dma_data_direction dir);
118 void (*sync_sg_for_device)(struct device *dev,
119 struct scatterlist *sg, int nents,
120 enum dma_data_direction dir);
121 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
122 int (*dma_supported)(struct device *dev, u64 mask);
123 int (*set_dma_mask)(struct device *dev, u64 mask);
124 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
125 u64 (*get_required_mask)(struct device *dev);
130 extern const struct dma_map_ops dma_noop_ops;
132 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
134 #define DMA_MASK_NONE 0x0ULL
136 static inline int valid_dma_direction(int dma_direction)
138 return ((dma_direction == DMA_BIDIRECTIONAL) ||
139 (dma_direction == DMA_TO_DEVICE) ||
140 (dma_direction == DMA_FROM_DEVICE));
143 static inline int is_device_dma_capable(struct device *dev)
145 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
148 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
150 * These three functions are only for dma allocator.
151 * Don't use them in device drivers.
153 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
154 dma_addr_t *dma_handle, void **ret);
155 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
157 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
158 void *cpu_addr, size_t size, int *ret);
160 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
161 #define dma_release_from_coherent(dev, order, vaddr) (0)
162 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
163 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
165 #ifdef CONFIG_HAS_DMA
166 #include <asm/dma-mapping.h>
167 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
169 if (dev && dev->dma_ops)
171 return get_arch_dma_ops(dev ? dev->bus : NULL);
174 static inline void set_dma_ops(struct device *dev,
175 const struct dma_map_ops *dma_ops)
177 dev->dma_ops = dma_ops;
181 * Define the dma api to allow compilation but not linking of
182 * dma dependent code. Code that depends on the dma-mapping
183 * API needs to set 'depends on HAS_DMA' in its Kconfig
185 extern const struct dma_map_ops bad_dma_ops;
186 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
192 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
194 enum dma_data_direction dir,
197 const struct dma_map_ops *ops = get_dma_ops(dev);
200 kmemcheck_mark_initialized(ptr, size);
201 BUG_ON(!valid_dma_direction(dir));
202 addr = ops->map_page(dev, virt_to_page(ptr),
203 offset_in_page(ptr), size,
205 debug_dma_map_page(dev, virt_to_page(ptr),
206 offset_in_page(ptr), size,
211 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
213 enum dma_data_direction dir,
216 const struct dma_map_ops *ops = get_dma_ops(dev);
218 BUG_ON(!valid_dma_direction(dir));
220 ops->unmap_page(dev, addr, size, dir, attrs);
221 debug_dma_unmap_page(dev, addr, size, dir, true);
225 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
226 * It should never return a value < 0.
228 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
229 int nents, enum dma_data_direction dir,
232 const struct dma_map_ops *ops = get_dma_ops(dev);
234 struct scatterlist *s;
236 for_each_sg(sg, s, nents, i)
237 kmemcheck_mark_initialized(sg_virt(s), s->length);
238 BUG_ON(!valid_dma_direction(dir));
239 ents = ops->map_sg(dev, sg, nents, dir, attrs);
241 debug_dma_map_sg(dev, sg, nents, ents, dir);
246 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
247 int nents, enum dma_data_direction dir,
250 const struct dma_map_ops *ops = get_dma_ops(dev);
252 BUG_ON(!valid_dma_direction(dir));
253 debug_dma_unmap_sg(dev, sg, nents, dir);
255 ops->unmap_sg(dev, sg, nents, dir, attrs);
258 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
260 size_t offset, size_t size,
261 enum dma_data_direction dir,
264 const struct dma_map_ops *ops = get_dma_ops(dev);
267 kmemcheck_mark_initialized(page_address(page) + offset, size);
268 BUG_ON(!valid_dma_direction(dir));
269 addr = ops->map_page(dev, page, offset, size, dir, attrs);
270 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
275 static inline void dma_unmap_page_attrs(struct device *dev,
276 dma_addr_t addr, size_t size,
277 enum dma_data_direction dir,
280 const struct dma_map_ops *ops = get_dma_ops(dev);
282 BUG_ON(!valid_dma_direction(dir));
284 ops->unmap_page(dev, addr, size, dir, attrs);
285 debug_dma_unmap_page(dev, addr, size, dir, false);
288 static inline dma_addr_t dma_map_resource(struct device *dev,
289 phys_addr_t phys_addr,
291 enum dma_data_direction dir,
294 const struct dma_map_ops *ops = get_dma_ops(dev);
297 BUG_ON(!valid_dma_direction(dir));
299 /* Don't allow RAM to be mapped */
300 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
303 if (ops->map_resource)
304 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
306 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
311 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
312 size_t size, enum dma_data_direction dir,
315 const struct dma_map_ops *ops = get_dma_ops(dev);
317 BUG_ON(!valid_dma_direction(dir));
318 if (ops->unmap_resource)
319 ops->unmap_resource(dev, addr, size, dir, attrs);
320 debug_dma_unmap_resource(dev, addr, size, dir);
323 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
325 enum dma_data_direction dir)
327 const struct dma_map_ops *ops = get_dma_ops(dev);
329 BUG_ON(!valid_dma_direction(dir));
330 if (ops->sync_single_for_cpu)
331 ops->sync_single_for_cpu(dev, addr, size, dir);
332 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
335 static inline void dma_sync_single_for_device(struct device *dev,
336 dma_addr_t addr, size_t size,
337 enum dma_data_direction dir)
339 const struct dma_map_ops *ops = get_dma_ops(dev);
341 BUG_ON(!valid_dma_direction(dir));
342 if (ops->sync_single_for_device)
343 ops->sync_single_for_device(dev, addr, size, dir);
344 debug_dma_sync_single_for_device(dev, addr, size, dir);
347 static inline void dma_sync_single_range_for_cpu(struct device *dev,
349 unsigned long offset,
351 enum dma_data_direction dir)
353 const struct dma_map_ops *ops = get_dma_ops(dev);
355 BUG_ON(!valid_dma_direction(dir));
356 if (ops->sync_single_for_cpu)
357 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
358 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
361 static inline void dma_sync_single_range_for_device(struct device *dev,
363 unsigned long offset,
365 enum dma_data_direction dir)
367 const struct dma_map_ops *ops = get_dma_ops(dev);
369 BUG_ON(!valid_dma_direction(dir));
370 if (ops->sync_single_for_device)
371 ops->sync_single_for_device(dev, addr + offset, size, dir);
372 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
376 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
377 int nelems, enum dma_data_direction dir)
379 const struct dma_map_ops *ops = get_dma_ops(dev);
381 BUG_ON(!valid_dma_direction(dir));
382 if (ops->sync_sg_for_cpu)
383 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
384 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
388 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
389 int nelems, enum dma_data_direction dir)
391 const struct dma_map_ops *ops = get_dma_ops(dev);
393 BUG_ON(!valid_dma_direction(dir));
394 if (ops->sync_sg_for_device)
395 ops->sync_sg_for_device(dev, sg, nelems, dir);
396 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
400 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
401 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
402 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
403 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
404 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
405 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
407 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
408 void *cpu_addr, dma_addr_t dma_addr, size_t size);
410 void *dma_common_contiguous_remap(struct page *page, size_t size,
411 unsigned long vm_flags,
412 pgprot_t prot, const void *caller);
414 void *dma_common_pages_remap(struct page **pages, size_t size,
415 unsigned long vm_flags, pgprot_t prot,
417 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
420 * dma_mmap_attrs - map a coherent DMA allocation into user space
421 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
422 * @vma: vm_area_struct describing requested user mapping
423 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
424 * @handle: device-view address returned from dma_alloc_attrs
425 * @size: size of memory originally requested in dma_alloc_attrs
426 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
428 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
429 * into user space. The coherent DMA buffer must not be freed by the
430 * driver until the user space mapping has been released.
433 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
434 dma_addr_t dma_addr, size_t size, unsigned long attrs)
436 const struct dma_map_ops *ops = get_dma_ops(dev);
439 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
440 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
443 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
446 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
447 void *cpu_addr, dma_addr_t dma_addr, size_t size);
450 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
451 dma_addr_t dma_addr, size_t size,
454 const struct dma_map_ops *ops = get_dma_ops(dev);
456 if (ops->get_sgtable)
457 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
459 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
462 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
464 #ifndef arch_dma_alloc_attrs
465 #define arch_dma_alloc_attrs(dev, flag) (true)
468 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
469 dma_addr_t *dma_handle, gfp_t flag,
472 const struct dma_map_ops *ops = get_dma_ops(dev);
477 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
480 if (!arch_dma_alloc_attrs(&dev, &flag))
485 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
486 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
490 static inline void dma_free_attrs(struct device *dev, size_t size,
491 void *cpu_addr, dma_addr_t dma_handle,
494 const struct dma_map_ops *ops = get_dma_ops(dev);
497 WARN_ON(irqs_disabled());
499 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
502 if (!ops->free || !cpu_addr)
505 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
506 ops->free(dev, size, cpu_addr, dma_handle, attrs);
509 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
510 dma_addr_t *dma_handle, gfp_t flag)
512 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
515 static inline void dma_free_coherent(struct device *dev, size_t size,
516 void *cpu_addr, dma_addr_t dma_handle)
518 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
521 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
522 dma_addr_t *dma_handle, gfp_t gfp)
524 return dma_alloc_attrs(dev, size, dma_handle, gfp,
525 DMA_ATTR_NON_CONSISTENT);
528 static inline void dma_free_noncoherent(struct device *dev, size_t size,
529 void *cpu_addr, dma_addr_t dma_handle)
531 dma_free_attrs(dev, size, cpu_addr, dma_handle,
532 DMA_ATTR_NON_CONSISTENT);
535 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
537 debug_dma_mapping_error(dev, dma_addr);
539 if (get_dma_ops(dev)->mapping_error)
540 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
542 #ifdef DMA_ERROR_CODE
543 return dma_addr == DMA_ERROR_CODE;
549 #ifndef HAVE_ARCH_DMA_SUPPORTED
550 static inline int dma_supported(struct device *dev, u64 mask)
552 const struct dma_map_ops *ops = get_dma_ops(dev);
556 if (!ops->dma_supported)
558 return ops->dma_supported(dev, mask);
562 #ifndef HAVE_ARCH_DMA_SET_MASK
563 static inline int dma_set_mask(struct device *dev, u64 mask)
565 const struct dma_map_ops *ops = get_dma_ops(dev);
567 if (ops->set_dma_mask)
568 return ops->set_dma_mask(dev, mask);
570 if (!dev->dma_mask || !dma_supported(dev, mask))
572 *dev->dma_mask = mask;
577 static inline u64 dma_get_mask(struct device *dev)
579 if (dev && dev->dma_mask && *dev->dma_mask)
580 return *dev->dma_mask;
581 return DMA_BIT_MASK(32);
584 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
585 int dma_set_coherent_mask(struct device *dev, u64 mask);
587 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
589 if (!dma_supported(dev, mask))
591 dev->coherent_dma_mask = mask;
597 * Set both the DMA mask and the coherent DMA mask to the same thing.
598 * Note that we don't check the return value from dma_set_coherent_mask()
599 * as the DMA API guarantees that the coherent DMA mask can be set to
600 * the same or smaller than the streaming DMA mask.
602 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
604 int rc = dma_set_mask(dev, mask);
606 dma_set_coherent_mask(dev, mask);
611 * Similar to the above, except it deals with the case where the device
612 * does not have dev->dma_mask appropriately setup.
614 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
616 dev->dma_mask = &dev->coherent_dma_mask;
617 return dma_set_mask_and_coherent(dev, mask);
620 extern u64 dma_get_required_mask(struct device *dev);
622 #ifndef arch_setup_dma_ops
623 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
624 u64 size, const struct iommu_ops *iommu,
628 #ifndef arch_teardown_dma_ops
629 static inline void arch_teardown_dma_ops(struct device *dev) { }
632 static inline unsigned int dma_get_max_seg_size(struct device *dev)
634 if (dev->dma_parms && dev->dma_parms->max_segment_size)
635 return dev->dma_parms->max_segment_size;
639 static inline unsigned int dma_set_max_seg_size(struct device *dev,
642 if (dev->dma_parms) {
643 dev->dma_parms->max_segment_size = size;
649 static inline unsigned long dma_get_seg_boundary(struct device *dev)
651 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
652 return dev->dma_parms->segment_boundary_mask;
653 return DMA_BIT_MASK(32);
656 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
658 if (dev->dma_parms) {
659 dev->dma_parms->segment_boundary_mask = mask;
666 static inline unsigned long dma_max_pfn(struct device *dev)
668 return *dev->dma_mask >> PAGE_SHIFT;
672 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
673 dma_addr_t *dma_handle, gfp_t flag)
675 void *ret = dma_alloc_coherent(dev, size, dma_handle,
680 #ifdef CONFIG_HAS_DMA
681 static inline int dma_get_cache_alignment(void)
683 #ifdef ARCH_DMA_MINALIGN
684 return ARCH_DMA_MINALIGN;
690 /* flags for the coherent memory api */
691 #define DMA_MEMORY_MAP 0x01
692 #define DMA_MEMORY_IO 0x02
693 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
694 #define DMA_MEMORY_EXCLUSIVE 0x08
696 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
697 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
698 dma_addr_t device_addr, size_t size, int flags);
699 void dma_release_declared_memory(struct device *dev);
700 void *dma_mark_declared_memory_occupied(struct device *dev,
701 dma_addr_t device_addr, size_t size);
704 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
705 dma_addr_t device_addr, size_t size, int flags)
711 dma_release_declared_memory(struct device *dev)
716 dma_mark_declared_memory_occupied(struct device *dev,
717 dma_addr_t device_addr, size_t size)
719 return ERR_PTR(-EBUSY);
721 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
726 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
727 dma_addr_t *dma_handle, gfp_t gfp);
728 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
729 dma_addr_t dma_handle);
730 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
731 dma_addr_t *dma_handle, gfp_t gfp);
732 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
733 dma_addr_t dma_handle);
734 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
735 extern int dmam_declare_coherent_memory(struct device *dev,
736 phys_addr_t phys_addr,
737 dma_addr_t device_addr, size_t size,
739 extern void dmam_release_declared_memory(struct device *dev);
740 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
741 static inline int dmam_declare_coherent_memory(struct device *dev,
742 phys_addr_t phys_addr, dma_addr_t device_addr,
743 size_t size, gfp_t gfp)
748 static inline void dmam_release_declared_memory(struct device *dev)
751 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
753 static inline void *dma_alloc_wc(struct device *dev, size_t size,
754 dma_addr_t *dma_addr, gfp_t gfp)
756 return dma_alloc_attrs(dev, size, dma_addr, gfp,
757 DMA_ATTR_WRITE_COMBINE);
759 #ifndef dma_alloc_writecombine
760 #define dma_alloc_writecombine dma_alloc_wc
763 static inline void dma_free_wc(struct device *dev, size_t size,
764 void *cpu_addr, dma_addr_t dma_addr)
766 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
767 DMA_ATTR_WRITE_COMBINE);
769 #ifndef dma_free_writecombine
770 #define dma_free_writecombine dma_free_wc
773 static inline int dma_mmap_wc(struct device *dev,
774 struct vm_area_struct *vma,
775 void *cpu_addr, dma_addr_t dma_addr,
778 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
779 DMA_ATTR_WRITE_COMBINE);
781 #ifndef dma_mmap_writecombine
782 #define dma_mmap_writecombine dma_mmap_wc
785 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
786 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
787 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
788 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
789 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
790 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
791 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
793 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
794 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
795 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
796 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
797 #define dma_unmap_len(PTR, LEN_NAME) (0)
798 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)