]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/base/dma-buf.c
Merge remote-tracking branch 'dma-buf/for-next'
[karo-tx-linux.git] / drivers / base / dma-buf.c
index 840c7fa80983de8337e0f401f3441552059615d0..f3014c448e1ef5fb24804945af9dbdca7950b4c5 100644 (file)
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/dma-buf.h>
+#include <linux/fence.h>
 #include <linux/anon_inodes.h>
 #include <linux/export.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/poll.h>
+#include <linux/reservation.h>
 
 static inline int is_dma_buf_file(struct file *);
 
@@ -50,12 +53,25 @@ static int dma_buf_release(struct inode *inode, struct file *file)
 
        BUG_ON(dmabuf->vmapping_counter);
 
+       /*
+        * Any fences that a dma-buf poll can wait on should be signaled
+        * before releasing dma-buf. This is the responsibility of each
+        * driver that uses the reservation objects.
+        *
+        * If you hit this BUG() it means someone dropped their ref to the
+        * dma-buf while still having pending operation to the buffer.
+        */
+       BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+
        dmabuf->ops->release(dmabuf);
 
        mutex_lock(&db_list.lock);
        list_del(&dmabuf->list_node);
        mutex_unlock(&db_list.lock);
 
+       if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
+               reservation_object_fini(dmabuf->resv);
+
        kfree(dmabuf);
        return 0;
 }
@@ -103,10 +119,141 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
        return base + offset;
 }
 
+static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+{
+       struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dcb->poll->lock, flags);
+       wake_up_locked_poll(dcb->poll, dcb->active);
+       dcb->active = 0;
+       spin_unlock_irqrestore(&dcb->poll->lock, flags);
+}
+
+static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
+{
+       struct dma_buf *dmabuf;
+       struct reservation_object *resv;
+       struct reservation_object_list *fobj;
+       struct fence *fence_excl;
+       unsigned long events;
+       unsigned shared_count, seq;
+
+       dmabuf = file->private_data;
+       if (!dmabuf || !dmabuf->resv)
+               return POLLERR;
+
+       resv = dmabuf->resv;
+
+       poll_wait(file, &dmabuf->poll, poll);
+
+       events = poll_requested_events(poll) & (POLLIN | POLLOUT);
+       if (!events)
+               return 0;
+
+retry:
+       seq = read_seqcount_begin(&resv->seq);
+       rcu_read_lock();
+
+       fobj = rcu_dereference(resv->fence);
+       if (fobj)
+               shared_count = fobj->shared_count;
+       else
+               shared_count = 0;
+       fence_excl = rcu_dereference(resv->fence_excl);
+       if (read_seqcount_retry(&resv->seq, seq)) {
+               rcu_read_unlock();
+               goto retry;
+       }
+
+       if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
+               struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
+               unsigned long pevents = POLLIN;
+
+               if (shared_count == 0)
+                       pevents |= POLLOUT;
+
+               spin_lock_irq(&dmabuf->poll.lock);
+               if (dcb->active) {
+                       dcb->active |= pevents;
+                       events &= ~pevents;
+               } else
+                       dcb->active = pevents;
+               spin_unlock_irq(&dmabuf->poll.lock);
+
+               if (events & pevents) {
+                       if (!fence_get_rcu(fence_excl)) {
+                               /* force a recheck */
+                               events &= ~pevents;
+                               dma_buf_poll_cb(NULL, &dcb->cb);
+                       } else if (!fence_add_callback(fence_excl, &dcb->cb,
+                                                      dma_buf_poll_cb)) {
+                               events &= ~pevents;
+                               fence_put(fence_excl);
+                       } else {
+                               /*
+                                * No callback queued, wake up any additional
+                                * waiters.
+                                */
+                               fence_put(fence_excl);
+                               dma_buf_poll_cb(NULL, &dcb->cb);
+                       }
+               }
+       }
+
+       if ((events & POLLOUT) && shared_count > 0) {
+               struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
+               int i;
+
+               /* Only queue a new callback if no event has fired yet */
+               spin_lock_irq(&dmabuf->poll.lock);
+               if (dcb->active)
+                       events &= ~POLLOUT;
+               else
+                       dcb->active = POLLOUT;
+               spin_unlock_irq(&dmabuf->poll.lock);
+
+               if (!(events & POLLOUT))
+                       goto out;
+
+               for (i = 0; i < shared_count; ++i) {
+                       struct fence *fence = rcu_dereference(fobj->shared[i]);
+
+                       if (!fence_get_rcu(fence)) {
+                               /*
+                                * fence refcount dropped to zero, this means
+                                * that fobj has been freed
+                                *
+                                * call dma_buf_poll_cb and force a recheck!
+                                */
+                               events &= ~POLLOUT;
+                               dma_buf_poll_cb(NULL, &dcb->cb);
+                               break;
+                       }
+                       if (!fence_add_callback(fence, &dcb->cb,
+                                               dma_buf_poll_cb)) {
+                               fence_put(fence);
+                               events &= ~POLLOUT;
+                               break;
+                       }
+                       fence_put(fence);
+               }
+
+               /* No callback queued, wake up any additional waiters. */
+               if (i == shared_count)
+                       dma_buf_poll_cb(NULL, &dcb->cb);
+       }
+
+out:
+       rcu_read_unlock();
+       return events;
+}
+
 static const struct file_operations dma_buf_fops = {
        .release        = dma_buf_release,
        .mmap           = dma_buf_mmap_internal,
        .llseek         = dma_buf_llseek,
+       .poll           = dma_buf_poll,
 };
 
 /*
@@ -128,6 +275,7 @@ static inline int is_dma_buf_file(struct file *file)
  * @size:      [in]    Size of the buffer
  * @flags:     [in]    mode flags for the file.
  * @exp_name:  [in]    name of the exporting module - useful for debugging.
+ * @resv:      [in]    reservation-object, NULL to allocate default one.
  *
  * Returns, on success, a newly created dma_buf object, which wraps the
  * supplied private data and operations for dma_buf_ops. On either missing
@@ -135,10 +283,17 @@ static inline int is_dma_buf_file(struct file *file)
  *
  */
 struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
-                               size_t size, int flags, const char *exp_name)
+                               size_t size, int flags, const char *exp_name,
+                               struct reservation_object *resv)
 {
        struct dma_buf *dmabuf;
        struct file *file;
+       size_t alloc_size = sizeof(struct dma_buf);
+       if (!resv)
+               alloc_size += sizeof(struct reservation_object);
+       else
+               /* prevent &dma_buf[1] == dma_buf->resv */
+               alloc_size += 1;
 
        if (WARN_ON(!priv || !ops
                          || !ops->map_dma_buf
@@ -150,7 +305,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
                return ERR_PTR(-EINVAL);
        }
 
-       dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
+       dmabuf = kzalloc(alloc_size, GFP_KERNEL);
        if (dmabuf == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -158,6 +313,15 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
        dmabuf->ops = ops;
        dmabuf->size = size;
        dmabuf->exp_name = exp_name;
+       init_waitqueue_head(&dmabuf->poll);
+       dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
+       dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+
+       if (!resv) {
+               resv = (struct reservation_object *)&dmabuf[1];
+               reservation_object_init(resv);
+       }
+       dmabuf->resv = resv;
 
        file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
        if (IS_ERR(file)) {