]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/android/sync.c
Merge remote-tracking branch 'dma-buf/for-next'
[karo-tx-linux.git] / drivers / staging / android / sync.c
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
28
29 #include "sync.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
33
34 static const struct fence_ops android_fence_ops;
35 static const struct file_operations sync_fence_fops;
36
37 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
38                                            int size, const char *name)
39 {
40         struct sync_timeline *obj;
41
42         if (size < sizeof(struct sync_timeline))
43                 return NULL;
44
45         obj = kzalloc(size, GFP_KERNEL);
46         if (obj == NULL)
47                 return NULL;
48
49         kref_init(&obj->kref);
50         obj->ops = ops;
51         obj->context = fence_context_alloc(1);
52         strlcpy(obj->name, name, sizeof(obj->name));
53
54         INIT_LIST_HEAD(&obj->child_list_head);
55         INIT_LIST_HEAD(&obj->active_list_head);
56         spin_lock_init(&obj->child_list_lock);
57
58         sync_timeline_debug_add(obj);
59
60         return obj;
61 }
62 EXPORT_SYMBOL(sync_timeline_create);
63
64 static void sync_timeline_free(struct kref *kref)
65 {
66         struct sync_timeline *obj =
67                 container_of(kref, struct sync_timeline, kref);
68
69         sync_timeline_debug_remove(obj);
70
71         if (obj->ops->release_obj)
72                 obj->ops->release_obj(obj);
73
74         kfree(obj);
75 }
76
77 static void sync_timeline_get(struct sync_timeline *obj)
78 {
79         kref_get(&obj->kref);
80 }
81
82 static void sync_timeline_put(struct sync_timeline *obj)
83 {
84         kref_put(&obj->kref, sync_timeline_free);
85 }
86
87 void sync_timeline_destroy(struct sync_timeline *obj)
88 {
89         obj->destroyed = true;
90         smp_wmb();
91
92         /*
93          * signal any children that their parent is going away.
94          */
95         sync_timeline_signal(obj);
96         sync_timeline_put(obj);
97 }
98 EXPORT_SYMBOL(sync_timeline_destroy);
99
100 void sync_timeline_signal(struct sync_timeline *obj)
101 {
102         unsigned long flags;
103         LIST_HEAD(signaled_pts);
104         struct sync_pt *pt, *next;
105
106         trace_sync_timeline(obj);
107
108         spin_lock_irqsave(&obj->child_list_lock, flags);
109
110         list_for_each_entry_safe(pt, next, &obj->active_list_head,
111                                  active_list) {
112                 if (__fence_is_signaled(&pt->base))
113                         list_del(&pt->active_list);
114         }
115
116         spin_unlock_irqrestore(&obj->child_list_lock, flags);
117 }
118 EXPORT_SYMBOL(sync_timeline_signal);
119
120 struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
121 {
122         unsigned long flags;
123         struct sync_pt *pt;
124
125         if (size < sizeof(struct sync_pt))
126                 return NULL;
127
128         pt = kzalloc(size, GFP_KERNEL);
129         if (pt == NULL)
130                 return NULL;
131
132         spin_lock_irqsave(&obj->child_list_lock, flags);
133         sync_timeline_get(obj);
134         __fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
135                      obj->context, ++obj->value);
136         list_add_tail(&pt->child_list, &obj->child_list_head);
137         INIT_LIST_HEAD(&pt->active_list);
138         spin_unlock_irqrestore(&obj->child_list_lock, flags);
139         return pt;
140 }
141 EXPORT_SYMBOL(sync_pt_create);
142
143 void sync_pt_free(struct sync_pt *pt)
144 {
145         fence_put(&pt->base);
146 }
147 EXPORT_SYMBOL(sync_pt_free);
148
149 static struct sync_fence *sync_fence_alloc(int size, const char *name)
150 {
151         struct sync_fence *fence;
152
153         fence = kzalloc(size, GFP_KERNEL);
154         if (fence == NULL)
155                 return NULL;
156
157         fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
158                                          fence, 0);
159         if (IS_ERR(fence->file))
160                 goto err;
161
162         kref_init(&fence->kref);
163         strlcpy(fence->name, name, sizeof(fence->name));
164
165         init_waitqueue_head(&fence->wq);
166
167         return fence;
168
169 err:
170         kfree(fence);
171         return NULL;
172 }
173
174 static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
175 {
176         struct sync_fence_cb *check;
177         struct sync_fence *fence;
178
179         check = container_of(cb, struct sync_fence_cb, cb);
180         fence = check->fence;
181
182         if (atomic_dec_and_test(&fence->status))
183                 wake_up_all(&fence->wq);
184 }
185
186 /* TODO: implement a create which takes more that one sync_pt */
187 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
188 {
189         struct sync_fence *fence;
190
191         fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
192         if (fence == NULL)
193                 return NULL;
194
195         fence->num_fences = 1;
196         atomic_set(&fence->status, 1);
197
198         fence_get(&pt->base);
199         fence->cbs[0].sync_pt = &pt->base;
200         fence->cbs[0].fence = fence;
201         if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
202                                fence_check_cb_func))
203                 atomic_dec(&fence->status);
204
205         sync_fence_debug_add(fence);
206
207         return fence;
208 }
209 EXPORT_SYMBOL(sync_fence_create);
210
211 struct sync_fence *sync_fence_fdget(int fd)
212 {
213         struct file *file = fget(fd);
214
215         if (file == NULL)
216                 return NULL;
217
218         if (file->f_op != &sync_fence_fops)
219                 goto err;
220
221         return file->private_data;
222
223 err:
224         fput(file);
225         return NULL;
226 }
227 EXPORT_SYMBOL(sync_fence_fdget);
228
229 void sync_fence_put(struct sync_fence *fence)
230 {
231         fput(fence->file);
232 }
233 EXPORT_SYMBOL(sync_fence_put);
234
235 void sync_fence_install(struct sync_fence *fence, int fd)
236 {
237         fd_install(fd, fence->file);
238 }
239 EXPORT_SYMBOL(sync_fence_install);
240
241 static void sync_fence_add_pt(struct sync_fence *fence,
242                               int *i, struct fence *pt)
243 {
244         fence->cbs[*i].sync_pt = pt;
245         fence->cbs[*i].fence = fence;
246
247         if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
248                 fence_get(pt);
249                 (*i)++;
250         }
251 }
252
253 struct sync_fence *sync_fence_merge(const char *name,
254                                     struct sync_fence *a, struct sync_fence *b)
255 {
256         int num_fences = a->num_fences + b->num_fences;
257         struct sync_fence *fence;
258         int i, i_a, i_b;
259         unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
260
261         fence = sync_fence_alloc(size, name);
262         if (fence == NULL)
263                 return NULL;
264
265         atomic_set(&fence->status, num_fences);
266
267         /*
268          * Assume sync_fence a and b are both ordered and have no
269          * duplicates with the same context.
270          *
271          * If a sync_fence can only be created with sync_fence_merge
272          * and sync_fence_create, this is a reasonable assumption.
273          */
274         for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
275                 struct fence *pt_a = a->cbs[i_a].sync_pt;
276                 struct fence *pt_b = b->cbs[i_b].sync_pt;
277
278                 if (pt_a->context < pt_b->context) {
279                         sync_fence_add_pt(fence, &i, pt_a);
280
281                         i_a++;
282                 } else if (pt_a->context > pt_b->context) {
283                         sync_fence_add_pt(fence, &i, pt_b);
284
285                         i_b++;
286                 } else {
287                         if (pt_a->seqno - pt_b->seqno <= INT_MAX)
288                                 sync_fence_add_pt(fence, &i, pt_a);
289                         else
290                                 sync_fence_add_pt(fence, &i, pt_b);
291
292                         i_a++;
293                         i_b++;
294                 }
295         }
296
297         for (; i_a < a->num_fences; i_a++)
298                 sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
299
300         for (; i_b < b->num_fences; i_b++)
301                 sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
302
303         if (num_fences > i)
304                 atomic_sub(num_fences - i, &fence->status);
305         fence->num_fences = i;
306
307         sync_fence_debug_add(fence);
308         return fence;
309 }
310 EXPORT_SYMBOL(sync_fence_merge);
311
312 int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
313                                  int wake_flags, void *key)
314 {
315         struct sync_fence_waiter *wait;
316
317         wait = container_of(curr, struct sync_fence_waiter, work);
318         list_del_init(&wait->work.task_list);
319
320         wait->callback(wait->work.private, wait);
321         return 1;
322 }
323
324 int sync_fence_wait_async(struct sync_fence *fence,
325                           struct sync_fence_waiter *waiter)
326 {
327         int err = atomic_read(&fence->status);
328         unsigned long flags;
329
330         if (err < 0)
331                 return err;
332
333         if (!err)
334                 return 1;
335
336         init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
337         waiter->work.private = fence;
338
339         spin_lock_irqsave(&fence->wq.lock, flags);
340         err = atomic_read(&fence->status);
341         if (err > 0)
342                 __add_wait_queue_tail(&fence->wq, &waiter->work);
343         spin_unlock_irqrestore(&fence->wq.lock, flags);
344
345         if (err < 0)
346                 return err;
347
348         return !err;
349 }
350 EXPORT_SYMBOL(sync_fence_wait_async);
351
352 int sync_fence_cancel_async(struct sync_fence *fence,
353                              struct sync_fence_waiter *waiter)
354 {
355         unsigned long flags;
356         int ret = 0;
357
358         spin_lock_irqsave(&fence->wq.lock, flags);
359         if (!list_empty(&waiter->work.task_list))
360                 list_del_init(&waiter->work.task_list);
361         else
362                 ret = -ENOENT;
363         spin_unlock_irqrestore(&fence->wq.lock, flags);
364         return ret;
365 }
366 EXPORT_SYMBOL(sync_fence_cancel_async);
367
368 int sync_fence_wait(struct sync_fence *fence, long timeout)
369 {
370         long ret;
371         int i;
372
373         if (timeout < 0)
374                 timeout = MAX_SCHEDULE_TIMEOUT;
375         else
376                 timeout = msecs_to_jiffies(timeout);
377
378         trace_sync_wait(fence, 1);
379         for (i = 0; i < fence->num_fences; ++i)
380                 trace_sync_pt(fence->cbs[i].sync_pt);
381         ret = wait_event_interruptible_timeout(fence->wq,
382                                                atomic_read(&fence->status) <= 0,
383                                                timeout);
384         trace_sync_wait(fence, 0);
385
386         if (ret < 0)
387                 return ret;
388         else if (ret == 0) {
389                 if (timeout) {
390                         pr_info("fence timeout on [%p] after %dms\n", fence,
391                                 jiffies_to_msecs(timeout));
392                         sync_dump();
393                 }
394                 return -ETIME;
395         }
396
397         ret = atomic_read(&fence->status);
398         if (ret) {
399                 pr_info("fence error %ld on [%p]\n", ret, fence);
400                 sync_dump();
401         }
402         return ret;
403 }
404 EXPORT_SYMBOL(sync_fence_wait);
405
406 static const char *android_fence_get_driver_name(struct fence *fence)
407 {
408         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
409         struct sync_timeline *parent = sync_pt_parent(pt);
410
411         return parent->ops->driver_name;
412 }
413
414 static const char *android_fence_get_timeline_name(struct fence *fence)
415 {
416         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
417         struct sync_timeline *parent = sync_pt_parent(pt);
418
419         return parent->name;
420 }
421
422 static void android_fence_release(struct fence *fence)
423 {
424         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
425         struct sync_timeline *parent = sync_pt_parent(pt);
426         unsigned long flags;
427
428         spin_lock_irqsave(fence->lock, flags);
429         list_del(&pt->child_list);
430         if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
431                 list_del(&pt->active_list);
432         spin_unlock_irqrestore(fence->lock, flags);
433
434         if (parent->ops->free_pt)
435                 parent->ops->free_pt(pt);
436
437         sync_timeline_put(parent);
438         kfree(pt);
439 }
440
441 static bool android_fence_signaled(struct fence *fence)
442 {
443         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
444         struct sync_timeline *parent = sync_pt_parent(pt);
445         int ret;
446
447         ret = parent->ops->has_signaled(pt);
448         if (ret < 0)
449                 fence->status = ret;
450         return ret;
451 }
452
453 static bool android_fence_enable_signaling(struct fence *fence)
454 {
455         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
456         struct sync_timeline *parent = sync_pt_parent(pt);
457
458         if (android_fence_signaled(fence))
459                 return false;
460
461         list_add_tail(&pt->active_list, &parent->active_list_head);
462         return true;
463 }
464
465 static int android_fence_fill_driver_data(struct fence *fence,
466                                           void *data, int size)
467 {
468         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
469         struct sync_timeline *parent = sync_pt_parent(pt);
470
471         if (!parent->ops->fill_driver_data)
472                 return 0;
473         return parent->ops->fill_driver_data(pt, data, size);
474 }
475
476 static void android_fence_value_str(struct fence *fence,
477                                     char *str, int size)
478 {
479         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
480         struct sync_timeline *parent = sync_pt_parent(pt);
481
482         if (!parent->ops->pt_value_str) {
483                 if (size)
484                         *str = 0;
485                 return;
486         }
487         parent->ops->pt_value_str(pt, str, size);
488 }
489
490 static void android_fence_timeline_value_str(struct fence *fence,
491                                              char *str, int size)
492 {
493         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
494         struct sync_timeline *parent = sync_pt_parent(pt);
495
496         if (!parent->ops->timeline_value_str) {
497                 if (size)
498                         *str = 0;
499                 return;
500         }
501         parent->ops->timeline_value_str(parent, str, size);
502 }
503
504 static const struct fence_ops android_fence_ops = {
505         .get_driver_name = android_fence_get_driver_name,
506         .get_timeline_name = android_fence_get_timeline_name,
507         .enable_signaling = android_fence_enable_signaling,
508         .signaled = android_fence_signaled,
509         .wait = fence_default_wait,
510         .release = android_fence_release,
511         .fill_driver_data = android_fence_fill_driver_data,
512         .fence_value_str = android_fence_value_str,
513         .timeline_value_str = android_fence_timeline_value_str,
514 };
515
516 static void sync_fence_free(struct kref *kref)
517 {
518         struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
519         int i, status = atomic_read(&fence->status);
520
521         for (i = 0; i < fence->num_fences; ++i) {
522                 if (status)
523                         fence_remove_callback(fence->cbs[i].sync_pt,
524                                               &fence->cbs[i].cb);
525                 fence_put(fence->cbs[i].sync_pt);
526         }
527
528         kfree(fence);
529 }
530
531 static int sync_fence_release(struct inode *inode, struct file *file)
532 {
533         struct sync_fence *fence = file->private_data;
534
535         sync_fence_debug_remove(fence);
536
537         kref_put(&fence->kref, sync_fence_free);
538         return 0;
539 }
540
541 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
542 {
543         struct sync_fence *fence = file->private_data;
544         int status;
545
546         poll_wait(file, &fence->wq, wait);
547
548         status = atomic_read(&fence->status);
549
550         if (!status)
551                 return POLLIN;
552         else if (status < 0)
553                 return POLLERR;
554         else
555                 return 0;
556 }
557
558 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
559 {
560         __s32 value;
561
562         if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
563                 return -EFAULT;
564
565         return sync_fence_wait(fence, value);
566 }
567
568 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
569 {
570         int fd = get_unused_fd_flags(O_CLOEXEC);
571         int err;
572         struct sync_fence *fence2, *fence3;
573         struct sync_merge_data data;
574
575         if (fd < 0)
576                 return fd;
577
578         if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
579                 err = -EFAULT;
580                 goto err_put_fd;
581         }
582
583         fence2 = sync_fence_fdget(data.fd2);
584         if (fence2 == NULL) {
585                 err = -ENOENT;
586                 goto err_put_fd;
587         }
588
589         data.name[sizeof(data.name) - 1] = '\0';
590         fence3 = sync_fence_merge(data.name, fence, fence2);
591         if (fence3 == NULL) {
592                 err = -ENOMEM;
593                 goto err_put_fence2;
594         }
595
596         data.fence = fd;
597         if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
598                 err = -EFAULT;
599                 goto err_put_fence3;
600         }
601
602         sync_fence_install(fence3, fd);
603         sync_fence_put(fence2);
604         return 0;
605
606 err_put_fence3:
607         sync_fence_put(fence3);
608
609 err_put_fence2:
610         sync_fence_put(fence2);
611
612 err_put_fd:
613         put_unused_fd(fd);
614         return err;
615 }
616
617 static int sync_fill_pt_info(struct fence *fence, void *data, int size)
618 {
619         struct sync_pt_info *info = data;
620         int ret;
621
622         if (size < sizeof(struct sync_pt_info))
623                 return -ENOMEM;
624
625         info->len = sizeof(struct sync_pt_info);
626
627         if (fence->ops->fill_driver_data) {
628                 ret = fence->ops->fill_driver_data(fence, info->driver_data,
629                                                    size - sizeof(*info));
630                 if (ret < 0)
631                         return ret;
632
633                 info->len += ret;
634         }
635
636         strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
637                 sizeof(info->obj_name));
638         strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
639                 sizeof(info->driver_name));
640         if (fence_is_signaled(fence))
641                 info->status = fence->status >= 0 ? 1 : fence->status;
642         else
643                 info->status = 0;
644         info->timestamp_ns = ktime_to_ns(fence->timestamp);
645
646         return info->len;
647 }
648
649 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
650                                         unsigned long arg)
651 {
652         struct sync_fence_info_data *data;
653         __u32 size;
654         __u32 len = 0;
655         int ret, i;
656
657         if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
658                 return -EFAULT;
659
660         if (size < sizeof(struct sync_fence_info_data))
661                 return -EINVAL;
662
663         if (size > 4096)
664                 size = 4096;
665
666         data = kzalloc(size, GFP_KERNEL);
667         if (data == NULL)
668                 return -ENOMEM;
669
670         strlcpy(data->name, fence->name, sizeof(data->name));
671         data->status = atomic_read(&fence->status);
672         if (data->status >= 0)
673                 data->status = !data->status;
674
675         len = sizeof(struct sync_fence_info_data);
676
677         for (i = 0; i < fence->num_fences; ++i) {
678                 struct fence *pt = fence->cbs[i].sync_pt;
679
680                 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
681
682                 if (ret < 0)
683                         goto out;
684
685                 len += ret;
686         }
687
688         data->len = len;
689
690         if (copy_to_user((void __user *)arg, data, len))
691                 ret = -EFAULT;
692         else
693                 ret = 0;
694
695 out:
696         kfree(data);
697
698         return ret;
699 }
700
701 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
702                              unsigned long arg)
703 {
704         struct sync_fence *fence = file->private_data;
705
706         switch (cmd) {
707         case SYNC_IOC_WAIT:
708                 return sync_fence_ioctl_wait(fence, arg);
709
710         case SYNC_IOC_MERGE:
711                 return sync_fence_ioctl_merge(fence, arg);
712
713         case SYNC_IOC_FENCE_INFO:
714                 return sync_fence_ioctl_fence_info(fence, arg);
715
716         default:
717                 return -ENOTTY;
718         }
719 }
720
721 static const struct file_operations sync_fence_fops = {
722         .release = sync_fence_release,
723         .poll = sync_fence_poll,
724         .unlocked_ioctl = sync_fence_ioctl,
725         .compat_ioctl = sync_fence_ioctl,
726 };
727