]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/media/v4l2-core/videobuf2-dma-contig.c
Merge remote-tracking branch 'v4l-dvb/master'
[karo-tx-linux.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_conf {
25         struct device           *dev;
26 };
27
28 struct vb2_dc_buf {
29         struct device                   *dev;
30         void                            *vaddr;
31         unsigned long                   size;
32         dma_addr_t                      dma_addr;
33         enum dma_data_direction         dma_dir;
34         struct sg_table                 *dma_sgt;
35         struct frame_vector             *vec;
36
37         /* MMAP related */
38         struct vb2_vmarea_handler       handler;
39         atomic_t                        refcount;
40         struct sg_table                 *sgt_base;
41
42         /* DMABUF related */
43         struct dma_buf_attachment       *db_attach;
44 };
45
46 /*********************************************/
47 /*        scatterlist table functions        */
48 /*********************************************/
49
50 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
51 {
52         struct scatterlist *s;
53         dma_addr_t expected = sg_dma_address(sgt->sgl);
54         unsigned int i;
55         unsigned long size = 0;
56
57         for_each_sg(sgt->sgl, s, sgt->nents, i) {
58                 if (sg_dma_address(s) != expected)
59                         break;
60                 expected = sg_dma_address(s) + sg_dma_len(s);
61                 size += sg_dma_len(s);
62         }
63         return size;
64 }
65
66 /*********************************************/
67 /*         callbacks for all buffers         */
68 /*********************************************/
69
70 static void *vb2_dc_cookie(void *buf_priv)
71 {
72         struct vb2_dc_buf *buf = buf_priv;
73
74         return &buf->dma_addr;
75 }
76
77 static void *vb2_dc_vaddr(void *buf_priv)
78 {
79         struct vb2_dc_buf *buf = buf_priv;
80
81         if (!buf->vaddr && buf->db_attach)
82                 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
83
84         return buf->vaddr;
85 }
86
87 static unsigned int vb2_dc_num_users(void *buf_priv)
88 {
89         struct vb2_dc_buf *buf = buf_priv;
90
91         return atomic_read(&buf->refcount);
92 }
93
94 static void vb2_dc_prepare(void *buf_priv)
95 {
96         struct vb2_dc_buf *buf = buf_priv;
97         struct sg_table *sgt = buf->dma_sgt;
98
99         /* DMABUF exporter will flush the cache for us */
100         if (!sgt || buf->db_attach)
101                 return;
102
103         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
104                                buf->dma_dir);
105 }
106
107 static void vb2_dc_finish(void *buf_priv)
108 {
109         struct vb2_dc_buf *buf = buf_priv;
110         struct sg_table *sgt = buf->dma_sgt;
111
112         /* DMABUF exporter will flush the cache for us */
113         if (!sgt || buf->db_attach)
114                 return;
115
116         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
117 }
118
119 /*********************************************/
120 /*        callbacks for MMAP buffers         */
121 /*********************************************/
122
123 static void vb2_dc_put(void *buf_priv)
124 {
125         struct vb2_dc_buf *buf = buf_priv;
126
127         if (!atomic_dec_and_test(&buf->refcount))
128                 return;
129
130         if (buf->sgt_base) {
131                 sg_free_table(buf->sgt_base);
132                 kfree(buf->sgt_base);
133         }
134         dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
135         put_device(buf->dev);
136         kfree(buf);
137 }
138
139 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
140                           enum dma_data_direction dma_dir, gfp_t gfp_flags)
141 {
142         struct vb2_dc_conf *conf = alloc_ctx;
143         struct device *dev = conf->dev;
144         struct vb2_dc_buf *buf;
145
146         buf = kzalloc(sizeof *buf, GFP_KERNEL);
147         if (!buf)
148                 return ERR_PTR(-ENOMEM);
149
150         buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
151                                                 GFP_KERNEL | gfp_flags);
152         if (!buf->vaddr) {
153                 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
154                 kfree(buf);
155                 return ERR_PTR(-ENOMEM);
156         }
157
158         /* Prevent the device from being released while the buffer is used */
159         buf->dev = get_device(dev);
160         buf->size = size;
161         buf->dma_dir = dma_dir;
162
163         buf->handler.refcount = &buf->refcount;
164         buf->handler.put = vb2_dc_put;
165         buf->handler.arg = buf;
166
167         atomic_inc(&buf->refcount);
168
169         return buf;
170 }
171
172 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
173 {
174         struct vb2_dc_buf *buf = buf_priv;
175         int ret;
176
177         if (!buf) {
178                 printk(KERN_ERR "No buffer to map\n");
179                 return -EINVAL;
180         }
181
182         /*
183          * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
184          * map whole buffer
185          */
186         vma->vm_pgoff = 0;
187
188         ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
189                 buf->dma_addr, buf->size);
190
191         if (ret) {
192                 pr_err("Remapping memory failed, error: %d\n", ret);
193                 return ret;
194         }
195
196         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
197         vma->vm_private_data    = &buf->handler;
198         vma->vm_ops             = &vb2_common_vm_ops;
199
200         vma->vm_ops->open(vma);
201
202         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
203                 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
204                 buf->size);
205
206         return 0;
207 }
208
209 /*********************************************/
210 /*         DMABUF ops for exporters          */
211 /*********************************************/
212
213 struct vb2_dc_attachment {
214         struct sg_table sgt;
215         enum dma_data_direction dma_dir;
216 };
217
218 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
219         struct dma_buf_attachment *dbuf_attach)
220 {
221         struct vb2_dc_attachment *attach;
222         unsigned int i;
223         struct scatterlist *rd, *wr;
224         struct sg_table *sgt;
225         struct vb2_dc_buf *buf = dbuf->priv;
226         int ret;
227
228         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
229         if (!attach)
230                 return -ENOMEM;
231
232         sgt = &attach->sgt;
233         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
234          * map the same scatter list to multiple attachments at the same time.
235          */
236         ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
237         if (ret) {
238                 kfree(attach);
239                 return -ENOMEM;
240         }
241
242         rd = buf->sgt_base->sgl;
243         wr = sgt->sgl;
244         for (i = 0; i < sgt->orig_nents; ++i) {
245                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
246                 rd = sg_next(rd);
247                 wr = sg_next(wr);
248         }
249
250         attach->dma_dir = DMA_NONE;
251         dbuf_attach->priv = attach;
252
253         return 0;
254 }
255
256 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
257         struct dma_buf_attachment *db_attach)
258 {
259         struct vb2_dc_attachment *attach = db_attach->priv;
260         struct sg_table *sgt;
261
262         if (!attach)
263                 return;
264
265         sgt = &attach->sgt;
266
267         /* release the scatterlist cache */
268         if (attach->dma_dir != DMA_NONE)
269                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
270                         attach->dma_dir);
271         sg_free_table(sgt);
272         kfree(attach);
273         db_attach->priv = NULL;
274 }
275
276 static struct sg_table *vb2_dc_dmabuf_ops_map(
277         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
278 {
279         struct vb2_dc_attachment *attach = db_attach->priv;
280         /* stealing dmabuf mutex to serialize map/unmap operations */
281         struct mutex *lock = &db_attach->dmabuf->lock;
282         struct sg_table *sgt;
283
284         mutex_lock(lock);
285
286         sgt = &attach->sgt;
287         /* return previously mapped sg table */
288         if (attach->dma_dir == dma_dir) {
289                 mutex_unlock(lock);
290                 return sgt;
291         }
292
293         /* release any previous cache */
294         if (attach->dma_dir != DMA_NONE) {
295                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
296                         attach->dma_dir);
297                 attach->dma_dir = DMA_NONE;
298         }
299
300         /* mapping to the client with new direction */
301         sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
302                                 dma_dir);
303         if (!sgt->nents) {
304                 pr_err("failed to map scatterlist\n");
305                 mutex_unlock(lock);
306                 return ERR_PTR(-EIO);
307         }
308
309         attach->dma_dir = dma_dir;
310
311         mutex_unlock(lock);
312
313         return sgt;
314 }
315
316 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
317         struct sg_table *sgt, enum dma_data_direction dma_dir)
318 {
319         /* nothing to be done here */
320 }
321
322 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
323 {
324         /* drop reference obtained in vb2_dc_get_dmabuf */
325         vb2_dc_put(dbuf->priv);
326 }
327
328 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
329 {
330         struct vb2_dc_buf *buf = dbuf->priv;
331
332         return buf->vaddr + pgnum * PAGE_SIZE;
333 }
334
335 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
336 {
337         struct vb2_dc_buf *buf = dbuf->priv;
338
339         return buf->vaddr;
340 }
341
342 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
343         struct vm_area_struct *vma)
344 {
345         return vb2_dc_mmap(dbuf->priv, vma);
346 }
347
348 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
349         .attach = vb2_dc_dmabuf_ops_attach,
350         .detach = vb2_dc_dmabuf_ops_detach,
351         .map_dma_buf = vb2_dc_dmabuf_ops_map,
352         .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
353         .kmap = vb2_dc_dmabuf_ops_kmap,
354         .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
355         .vmap = vb2_dc_dmabuf_ops_vmap,
356         .mmap = vb2_dc_dmabuf_ops_mmap,
357         .release = vb2_dc_dmabuf_ops_release,
358 };
359
360 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
361 {
362         int ret;
363         struct sg_table *sgt;
364
365         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
366         if (!sgt) {
367                 dev_err(buf->dev, "failed to alloc sg table\n");
368                 return NULL;
369         }
370
371         ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
372                 buf->size);
373         if (ret < 0) {
374                 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
375                 kfree(sgt);
376                 return NULL;
377         }
378
379         return sgt;
380 }
381
382 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
383 {
384         struct vb2_dc_buf *buf = buf_priv;
385         struct dma_buf *dbuf;
386         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
387
388         exp_info.ops = &vb2_dc_dmabuf_ops;
389         exp_info.size = buf->size;
390         exp_info.flags = flags;
391         exp_info.priv = buf;
392
393         if (!buf->sgt_base)
394                 buf->sgt_base = vb2_dc_get_base_sgt(buf);
395
396         if (WARN_ON(!buf->sgt_base))
397                 return NULL;
398
399         dbuf = dma_buf_export(&exp_info);
400         if (IS_ERR(dbuf))
401                 return NULL;
402
403         /* dmabuf keeps reference to vb2 buffer */
404         atomic_inc(&buf->refcount);
405
406         return dbuf;
407 }
408
409 /*********************************************/
410 /*       callbacks for USERPTR buffers       */
411 /*********************************************/
412
413 static void vb2_dc_put_userptr(void *buf_priv)
414 {
415         struct vb2_dc_buf *buf = buf_priv;
416         struct sg_table *sgt = buf->dma_sgt;
417         int i;
418         struct page **pages;
419
420         if (sgt) {
421                 DEFINE_DMA_ATTRS(attrs);
422
423                 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
424                 /*
425                  * No need to sync to CPU, it's already synced to the CPU
426                  * since the finish() memop will have been called before this.
427                  */
428                 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
429                                    buf->dma_dir, &attrs);
430                 pages = frame_vector_pages(buf->vec);
431                 /* sgt should exist only if vector contains pages... */
432                 BUG_ON(IS_ERR(pages));
433                 for (i = 0; i < frame_vector_count(buf->vec); i++)
434                         set_page_dirty_lock(pages[i]);
435                 sg_free_table(sgt);
436                 kfree(sgt);
437         }
438         vb2_destroy_framevec(buf->vec);
439         kfree(buf);
440 }
441
442 /*
443  * For some kind of reserved memory there might be no struct page available,
444  * so all that can be done to support such 'pages' is to try to convert
445  * pfn to dma address or at the last resort just assume that
446  * dma address == physical address (like it has been assumed in earlier version
447  * of videobuf2-dma-contig
448  */
449
450 #ifdef __arch_pfn_to_dma
451 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
452 {
453         return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
454 }
455 #elif defined(__pfn_to_bus)
456 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
457 {
458         return (dma_addr_t)__pfn_to_bus(pfn);
459 }
460 #elif defined(__pfn_to_phys)
461 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
462 {
463         return (dma_addr_t)__pfn_to_phys(pfn);
464 }
465 #else
466 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
467 {
468         /* really, we cannot do anything better at this point */
469         return (dma_addr_t)(pfn) << PAGE_SHIFT;
470 }
471 #endif
472
473 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
474         unsigned long size, enum dma_data_direction dma_dir)
475 {
476         struct vb2_dc_conf *conf = alloc_ctx;
477         struct vb2_dc_buf *buf;
478         struct frame_vector *vec;
479         unsigned long offset;
480         int n_pages, i;
481         int ret = 0;
482         struct sg_table *sgt;
483         unsigned long contig_size;
484         unsigned long dma_align = dma_get_cache_alignment();
485         DEFINE_DMA_ATTRS(attrs);
486
487         dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
488
489         /* Only cache aligned DMA transfers are reliable */
490         if (!IS_ALIGNED(vaddr | size, dma_align)) {
491                 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
492                 return ERR_PTR(-EINVAL);
493         }
494
495         if (!size) {
496                 pr_debug("size is zero\n");
497                 return ERR_PTR(-EINVAL);
498         }
499
500         buf = kzalloc(sizeof *buf, GFP_KERNEL);
501         if (!buf)
502                 return ERR_PTR(-ENOMEM);
503
504         buf->dev = conf->dev;
505         buf->dma_dir = dma_dir;
506
507         offset = vaddr & ~PAGE_MASK;
508         vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
509         if (IS_ERR(vec)) {
510                 ret = PTR_ERR(vec);
511                 goto fail_buf;
512         }
513         buf->vec = vec;
514         n_pages = frame_vector_count(vec);
515         ret = frame_vector_to_pages(vec);
516         if (ret < 0) {
517                 unsigned long *nums = frame_vector_pfns(vec);
518
519                 /*
520                  * Failed to convert to pages... Check the memory is physically
521                  * contiguous and use direct mapping
522                  */
523                 for (i = 1; i < n_pages; i++)
524                         if (nums[i-1] + 1 != nums[i])
525                                 goto fail_pfnvec;
526                 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
527                 goto out;
528         }
529
530         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
531         if (!sgt) {
532                 pr_err("failed to allocate sg table\n");
533                 ret = -ENOMEM;
534                 goto fail_pfnvec;
535         }
536
537         ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
538                 offset, size, GFP_KERNEL);
539         if (ret) {
540                 pr_err("failed to initialize sg table\n");
541                 goto fail_sgt;
542         }
543
544         /*
545          * No need to sync to the device, this will happen later when the
546          * prepare() memop is called.
547          */
548         sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
549                                       buf->dma_dir, &attrs);
550         if (sgt->nents <= 0) {
551                 pr_err("failed to map scatterlist\n");
552                 ret = -EIO;
553                 goto fail_sgt_init;
554         }
555
556         contig_size = vb2_dc_get_contiguous_size(sgt);
557         if (contig_size < size) {
558                 pr_err("contiguous mapping is too small %lu/%lu\n",
559                         contig_size, size);
560                 ret = -EFAULT;
561                 goto fail_map_sg;
562         }
563
564         buf->dma_addr = sg_dma_address(sgt->sgl);
565         buf->dma_sgt = sgt;
566 out:
567         buf->size = size;
568
569         return buf;
570
571 fail_map_sg:
572         dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
573                            buf->dma_dir, &attrs);
574
575 fail_sgt_init:
576         sg_free_table(sgt);
577
578 fail_sgt:
579         kfree(sgt);
580
581 fail_pfnvec:
582         vb2_destroy_framevec(vec);
583
584 fail_buf:
585         kfree(buf);
586
587         return ERR_PTR(ret);
588 }
589
590 /*********************************************/
591 /*       callbacks for DMABUF buffers        */
592 /*********************************************/
593
594 static int vb2_dc_map_dmabuf(void *mem_priv)
595 {
596         struct vb2_dc_buf *buf = mem_priv;
597         struct sg_table *sgt;
598         unsigned long contig_size;
599
600         if (WARN_ON(!buf->db_attach)) {
601                 pr_err("trying to pin a non attached buffer\n");
602                 return -EINVAL;
603         }
604
605         if (WARN_ON(buf->dma_sgt)) {
606                 pr_err("dmabuf buffer is already pinned\n");
607                 return 0;
608         }
609
610         /* get the associated scatterlist for this buffer */
611         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
612         if (IS_ERR(sgt)) {
613                 pr_err("Error getting dmabuf scatterlist\n");
614                 return -EINVAL;
615         }
616
617         /* checking if dmabuf is big enough to store contiguous chunk */
618         contig_size = vb2_dc_get_contiguous_size(sgt);
619         if (contig_size < buf->size) {
620                 pr_err("contiguous chunk is too small %lu/%lu b\n",
621                         contig_size, buf->size);
622                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
623                 return -EFAULT;
624         }
625
626         buf->dma_addr = sg_dma_address(sgt->sgl);
627         buf->dma_sgt = sgt;
628         buf->vaddr = NULL;
629
630         return 0;
631 }
632
633 static void vb2_dc_unmap_dmabuf(void *mem_priv)
634 {
635         struct vb2_dc_buf *buf = mem_priv;
636         struct sg_table *sgt = buf->dma_sgt;
637
638         if (WARN_ON(!buf->db_attach)) {
639                 pr_err("trying to unpin a not attached buffer\n");
640                 return;
641         }
642
643         if (WARN_ON(!sgt)) {
644                 pr_err("dmabuf buffer is already unpinned\n");
645                 return;
646         }
647
648         if (buf->vaddr) {
649                 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
650                 buf->vaddr = NULL;
651         }
652         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
653
654         buf->dma_addr = 0;
655         buf->dma_sgt = NULL;
656 }
657
658 static void vb2_dc_detach_dmabuf(void *mem_priv)
659 {
660         struct vb2_dc_buf *buf = mem_priv;
661
662         /* if vb2 works correctly you should never detach mapped buffer */
663         if (WARN_ON(buf->dma_addr))
664                 vb2_dc_unmap_dmabuf(buf);
665
666         /* detach this attachment */
667         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
668         kfree(buf);
669 }
670
671 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
672         unsigned long size, enum dma_data_direction dma_dir)
673 {
674         struct vb2_dc_conf *conf = alloc_ctx;
675         struct vb2_dc_buf *buf;
676         struct dma_buf_attachment *dba;
677
678         if (dbuf->size < size)
679                 return ERR_PTR(-EFAULT);
680
681         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
682         if (!buf)
683                 return ERR_PTR(-ENOMEM);
684
685         buf->dev = conf->dev;
686         /* create attachment for the dmabuf with the user device */
687         dba = dma_buf_attach(dbuf, buf->dev);
688         if (IS_ERR(dba)) {
689                 pr_err("failed to attach dmabuf\n");
690                 kfree(buf);
691                 return dba;
692         }
693
694         buf->dma_dir = dma_dir;
695         buf->size = size;
696         buf->db_attach = dba;
697
698         return buf;
699 }
700
701 /*********************************************/
702 /*       DMA CONTIG exported functions       */
703 /*********************************************/
704
705 const struct vb2_mem_ops vb2_dma_contig_memops = {
706         .alloc          = vb2_dc_alloc,
707         .put            = vb2_dc_put,
708         .get_dmabuf     = vb2_dc_get_dmabuf,
709         .cookie         = vb2_dc_cookie,
710         .vaddr          = vb2_dc_vaddr,
711         .mmap           = vb2_dc_mmap,
712         .get_userptr    = vb2_dc_get_userptr,
713         .put_userptr    = vb2_dc_put_userptr,
714         .prepare        = vb2_dc_prepare,
715         .finish         = vb2_dc_finish,
716         .map_dmabuf     = vb2_dc_map_dmabuf,
717         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
718         .attach_dmabuf  = vb2_dc_attach_dmabuf,
719         .detach_dmabuf  = vb2_dc_detach_dmabuf,
720         .num_users      = vb2_dc_num_users,
721 };
722 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
723
724 void *vb2_dma_contig_init_ctx(struct device *dev)
725 {
726         struct vb2_dc_conf *conf;
727
728         conf = kzalloc(sizeof *conf, GFP_KERNEL);
729         if (!conf)
730                 return ERR_PTR(-ENOMEM);
731
732         conf->dev = dev;
733
734         return conf;
735 }
736 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
737
738 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
739 {
740         if (!IS_ERR_OR_NULL(alloc_ctx))
741                 kfree(alloc_ctx);
742 }
743 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
744
745 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
746 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
747 MODULE_LICENSE("GPL");