]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/android/sync.c
Merge tag 'iio-for-3.16b' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[karo-tx-linux.git] / drivers / staging / android / sync.c
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
28
29 #include "sync.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
33
34 static void sync_fence_signal_pt(struct sync_pt *pt);
35 static int _sync_pt_has_signaled(struct sync_pt *pt);
36 static void sync_fence_free(struct kref *kref);
37 static void sync_dump(void);
38
39 static LIST_HEAD(sync_timeline_list_head);
40 static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42 static LIST_HEAD(sync_fence_list_head);
43 static DEFINE_SPINLOCK(sync_fence_list_lock);
44
45 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46                                            int size, const char *name)
47 {
48         struct sync_timeline *obj;
49         unsigned long flags;
50
51         if (size < sizeof(struct sync_timeline))
52                 return NULL;
53
54         obj = kzalloc(size, GFP_KERNEL);
55         if (obj == NULL)
56                 return NULL;
57
58         kref_init(&obj->kref);
59         obj->ops = ops;
60         strlcpy(obj->name, name, sizeof(obj->name));
61
62         INIT_LIST_HEAD(&obj->child_list_head);
63         spin_lock_init(&obj->child_list_lock);
64
65         INIT_LIST_HEAD(&obj->active_list_head);
66         spin_lock_init(&obj->active_list_lock);
67
68         spin_lock_irqsave(&sync_timeline_list_lock, flags);
69         list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71
72         return obj;
73 }
74 EXPORT_SYMBOL(sync_timeline_create);
75
76 static void sync_timeline_free(struct kref *kref)
77 {
78         struct sync_timeline *obj =
79                 container_of(kref, struct sync_timeline, kref);
80         unsigned long flags;
81
82         spin_lock_irqsave(&sync_timeline_list_lock, flags);
83         list_del(&obj->sync_timeline_list);
84         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86         if (obj->ops->release_obj)
87                 obj->ops->release_obj(obj);
88
89         kfree(obj);
90 }
91
92 void sync_timeline_destroy(struct sync_timeline *obj)
93 {
94         obj->destroyed = true;
95         smp_wmb();
96
97         /*
98          * signal any children that their parent is going away.
99          */
100         sync_timeline_signal(obj);
101
102         kref_put(&obj->kref, sync_timeline_free);
103 }
104 EXPORT_SYMBOL(sync_timeline_destroy);
105
106 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
107 {
108         unsigned long flags;
109
110         pt->parent = obj;
111
112         spin_lock_irqsave(&obj->child_list_lock, flags);
113         list_add_tail(&pt->child_list, &obj->child_list_head);
114         spin_unlock_irqrestore(&obj->child_list_lock, flags);
115 }
116
117 static void sync_timeline_remove_pt(struct sync_pt *pt)
118 {
119         struct sync_timeline *obj = pt->parent;
120         unsigned long flags;
121
122         spin_lock_irqsave(&obj->active_list_lock, flags);
123         if (!list_empty(&pt->active_list))
124                 list_del_init(&pt->active_list);
125         spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127         spin_lock_irqsave(&obj->child_list_lock, flags);
128         if (!list_empty(&pt->child_list))
129                 list_del_init(&pt->child_list);
130
131         spin_unlock_irqrestore(&obj->child_list_lock, flags);
132 }
133
134 void sync_timeline_signal(struct sync_timeline *obj)
135 {
136         unsigned long flags;
137         LIST_HEAD(signaled_pts);
138         struct list_head *pos, *n;
139
140         trace_sync_timeline(obj);
141
142         spin_lock_irqsave(&obj->active_list_lock, flags);
143
144         list_for_each_safe(pos, n, &obj->active_list_head) {
145                 struct sync_pt *pt =
146                         container_of(pos, struct sync_pt, active_list);
147
148                 if (_sync_pt_has_signaled(pt)) {
149                         list_del_init(pos);
150                         list_add(&pt->signaled_list, &signaled_pts);
151                         kref_get(&pt->fence->kref);
152                 }
153         }
154
155         spin_unlock_irqrestore(&obj->active_list_lock, flags);
156
157         list_for_each_safe(pos, n, &signaled_pts) {
158                 struct sync_pt *pt =
159                         container_of(pos, struct sync_pt, signaled_list);
160
161                 list_del_init(pos);
162                 sync_fence_signal_pt(pt);
163                 kref_put(&pt->fence->kref, sync_fence_free);
164         }
165 }
166 EXPORT_SYMBOL(sync_timeline_signal);
167
168 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169 {
170         struct sync_pt *pt;
171
172         if (size < sizeof(struct sync_pt))
173                 return NULL;
174
175         pt = kzalloc(size, GFP_KERNEL);
176         if (pt == NULL)
177                 return NULL;
178
179         INIT_LIST_HEAD(&pt->active_list);
180         kref_get(&parent->kref);
181         sync_timeline_add_pt(parent, pt);
182
183         return pt;
184 }
185 EXPORT_SYMBOL(sync_pt_create);
186
187 void sync_pt_free(struct sync_pt *pt)
188 {
189         if (pt->parent->ops->free_pt)
190                 pt->parent->ops->free_pt(pt);
191
192         sync_timeline_remove_pt(pt);
193
194         kref_put(&pt->parent->kref, sync_timeline_free);
195
196         kfree(pt);
197 }
198 EXPORT_SYMBOL(sync_pt_free);
199
200 /* call with pt->parent->active_list_lock held */
201 static int _sync_pt_has_signaled(struct sync_pt *pt)
202 {
203         int old_status = pt->status;
204
205         if (!pt->status)
206                 pt->status = pt->parent->ops->has_signaled(pt);
207
208         if (!pt->status && pt->parent->destroyed)
209                 pt->status = -ENOENT;
210
211         if (pt->status != old_status)
212                 pt->timestamp = ktime_get();
213
214         return pt->status;
215 }
216
217 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
218 {
219         return pt->parent->ops->dup(pt);
220 }
221
222 /* Adds a sync pt to the active queue.  Called when added to a fence */
223 static void sync_pt_activate(struct sync_pt *pt)
224 {
225         struct sync_timeline *obj = pt->parent;
226         unsigned long flags;
227         int err;
228
229         spin_lock_irqsave(&obj->active_list_lock, flags);
230
231         err = _sync_pt_has_signaled(pt);
232         if (err != 0)
233                 goto out;
234
235         list_add_tail(&pt->active_list, &obj->active_list_head);
236
237 out:
238         spin_unlock_irqrestore(&obj->active_list_lock, flags);
239 }
240
241 static int sync_fence_release(struct inode *inode, struct file *file);
242 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
243 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
244                              unsigned long arg);
245
246
247 static const struct file_operations sync_fence_fops = {
248         .release = sync_fence_release,
249         .poll = sync_fence_poll,
250         .unlocked_ioctl = sync_fence_ioctl,
251         .compat_ioctl = sync_fence_ioctl,
252 };
253
254 static struct sync_fence *sync_fence_alloc(const char *name)
255 {
256         struct sync_fence *fence;
257         unsigned long flags;
258
259         fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
260         if (fence == NULL)
261                 return NULL;
262
263         fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
264                                          fence, 0);
265         if (IS_ERR(fence->file))
266                 goto err;
267
268         kref_init(&fence->kref);
269         strlcpy(fence->name, name, sizeof(fence->name));
270
271         INIT_LIST_HEAD(&fence->pt_list_head);
272         INIT_LIST_HEAD(&fence->waiter_list_head);
273         spin_lock_init(&fence->waiter_list_lock);
274
275         init_waitqueue_head(&fence->wq);
276
277         spin_lock_irqsave(&sync_fence_list_lock, flags);
278         list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
279         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
280
281         return fence;
282
283 err:
284         kfree(fence);
285         return NULL;
286 }
287
288 /* TODO: implement a create which takes more that one sync_pt */
289 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
290 {
291         struct sync_fence *fence;
292
293         if (pt->fence)
294                 return NULL;
295
296         fence = sync_fence_alloc(name);
297         if (fence == NULL)
298                 return NULL;
299
300         pt->fence = fence;
301         list_add(&pt->pt_list, &fence->pt_list_head);
302         sync_pt_activate(pt);
303
304         /*
305          * signal the fence in case pt was activated before
306          * sync_pt_activate(pt) was called
307          */
308         sync_fence_signal_pt(pt);
309
310         return fence;
311 }
312 EXPORT_SYMBOL(sync_fence_create);
313
314 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
315 {
316         struct list_head *pos;
317
318         list_for_each(pos, &src->pt_list_head) {
319                 struct sync_pt *orig_pt =
320                         container_of(pos, struct sync_pt, pt_list);
321                 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
322
323                 if (new_pt == NULL)
324                         return -ENOMEM;
325
326                 new_pt->fence = dst;
327                 list_add(&new_pt->pt_list, &dst->pt_list_head);
328         }
329
330         return 0;
331 }
332
333 static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
334 {
335         struct list_head *src_pos, *dst_pos, *n;
336
337         list_for_each(src_pos, &src->pt_list_head) {
338                 struct sync_pt *src_pt =
339                         container_of(src_pos, struct sync_pt, pt_list);
340                 bool collapsed = false;
341
342                 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
343                         struct sync_pt *dst_pt =
344                                 container_of(dst_pos, struct sync_pt, pt_list);
345                         /* collapse two sync_pts on the same timeline
346                          * to a single sync_pt that will signal at
347                          * the later of the two
348                          */
349                         if (dst_pt->parent == src_pt->parent) {
350                                 if (dst_pt->parent->ops->compare(dst_pt, src_pt)
351                                                  == -1) {
352                                         struct sync_pt *new_pt =
353                                                 sync_pt_dup(src_pt);
354                                         if (new_pt == NULL)
355                                                 return -ENOMEM;
356
357                                         new_pt->fence = dst;
358                                         list_replace(&dst_pt->pt_list,
359                                                      &new_pt->pt_list);
360                                         sync_pt_free(dst_pt);
361                                 }
362                                 collapsed = true;
363                                 break;
364                         }
365                 }
366
367                 if (!collapsed) {
368                         struct sync_pt *new_pt = sync_pt_dup(src_pt);
369
370                         if (new_pt == NULL)
371                                 return -ENOMEM;
372
373                         new_pt->fence = dst;
374                         list_add(&new_pt->pt_list, &dst->pt_list_head);
375                 }
376         }
377
378         return 0;
379 }
380
381 static void sync_fence_detach_pts(struct sync_fence *fence)
382 {
383         struct list_head *pos, *n;
384
385         list_for_each_safe(pos, n, &fence->pt_list_head) {
386                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
387
388                 sync_timeline_remove_pt(pt);
389         }
390 }
391
392 static void sync_fence_free_pts(struct sync_fence *fence)
393 {
394         struct list_head *pos, *n;
395
396         list_for_each_safe(pos, n, &fence->pt_list_head) {
397                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
398
399                 sync_pt_free(pt);
400         }
401 }
402
403 struct sync_fence *sync_fence_fdget(int fd)
404 {
405         struct file *file = fget(fd);
406
407         if (file == NULL)
408                 return NULL;
409
410         if (file->f_op != &sync_fence_fops)
411                 goto err;
412
413         return file->private_data;
414
415 err:
416         fput(file);
417         return NULL;
418 }
419 EXPORT_SYMBOL(sync_fence_fdget);
420
421 void sync_fence_put(struct sync_fence *fence)
422 {
423         fput(fence->file);
424 }
425 EXPORT_SYMBOL(sync_fence_put);
426
427 void sync_fence_install(struct sync_fence *fence, int fd)
428 {
429         fd_install(fd, fence->file);
430 }
431 EXPORT_SYMBOL(sync_fence_install);
432
433 static int sync_fence_get_status(struct sync_fence *fence)
434 {
435         struct list_head *pos;
436         int status = 1;
437
438         list_for_each(pos, &fence->pt_list_head) {
439                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
440                 int pt_status = pt->status;
441
442                 if (pt_status < 0) {
443                         status = pt_status;
444                         break;
445                 } else if (status == 1) {
446                         status = pt_status;
447                 }
448         }
449
450         return status;
451 }
452
453 struct sync_fence *sync_fence_merge(const char *name,
454                                     struct sync_fence *a, struct sync_fence *b)
455 {
456         struct sync_fence *fence;
457         struct list_head *pos;
458         int err;
459
460         fence = sync_fence_alloc(name);
461         if (fence == NULL)
462                 return NULL;
463
464         err = sync_fence_copy_pts(fence, a);
465         if (err < 0)
466                 goto err;
467
468         err = sync_fence_merge_pts(fence, b);
469         if (err < 0)
470                 goto err;
471
472         list_for_each(pos, &fence->pt_list_head) {
473                 struct sync_pt *pt =
474                         container_of(pos, struct sync_pt, pt_list);
475                 sync_pt_activate(pt);
476         }
477
478         /*
479          * signal the fence in case one of it's pts were activated before
480          * they were activated
481          */
482         sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
483                                               struct sync_pt,
484                                               pt_list));
485
486         return fence;
487 err:
488         sync_fence_free_pts(fence);
489         kfree(fence);
490         return NULL;
491 }
492 EXPORT_SYMBOL(sync_fence_merge);
493
494 static void sync_fence_signal_pt(struct sync_pt *pt)
495 {
496         LIST_HEAD(signaled_waiters);
497         struct sync_fence *fence = pt->fence;
498         struct list_head *pos;
499         struct list_head *n;
500         unsigned long flags;
501         int status;
502
503         status = sync_fence_get_status(fence);
504
505         spin_lock_irqsave(&fence->waiter_list_lock, flags);
506         /*
507          * this should protect against two threads racing on the signaled
508          * false -> true transition
509          */
510         if (status && !fence->status) {
511                 list_for_each_safe(pos, n, &fence->waiter_list_head)
512                         list_move(pos, &signaled_waiters);
513
514                 fence->status = status;
515         } else {
516                 status = 0;
517         }
518         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
519
520         if (status) {
521                 list_for_each_safe(pos, n, &signaled_waiters) {
522                         struct sync_fence_waiter *waiter =
523                                 container_of(pos, struct sync_fence_waiter,
524                                              waiter_list);
525
526                         list_del(pos);
527                         waiter->callback(fence, waiter);
528                 }
529                 wake_up(&fence->wq);
530         }
531 }
532
533 int sync_fence_wait_async(struct sync_fence *fence,
534                           struct sync_fence_waiter *waiter)
535 {
536         unsigned long flags;
537         int err = 0;
538
539         spin_lock_irqsave(&fence->waiter_list_lock, flags);
540
541         if (fence->status) {
542                 err = fence->status;
543                 goto out;
544         }
545
546         list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
547 out:
548         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
549
550         return err;
551 }
552 EXPORT_SYMBOL(sync_fence_wait_async);
553
554 int sync_fence_cancel_async(struct sync_fence *fence,
555                              struct sync_fence_waiter *waiter)
556 {
557         struct list_head *pos;
558         struct list_head *n;
559         unsigned long flags;
560         int ret = -ENOENT;
561
562         spin_lock_irqsave(&fence->waiter_list_lock, flags);
563         /*
564          * Make sure waiter is still in waiter_list because it is possible for
565          * the waiter to be removed from the list while the callback is still
566          * pending.
567          */
568         list_for_each_safe(pos, n, &fence->waiter_list_head) {
569                 struct sync_fence_waiter *list_waiter =
570                         container_of(pos, struct sync_fence_waiter,
571                                      waiter_list);
572                 if (list_waiter == waiter) {
573                         list_del(pos);
574                         ret = 0;
575                         break;
576                 }
577         }
578         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
579         return ret;
580 }
581 EXPORT_SYMBOL(sync_fence_cancel_async);
582
583 static bool sync_fence_check(struct sync_fence *fence)
584 {
585         /*
586          * Make sure that reads to fence->status are ordered with the
587          * wait queue event triggering
588          */
589         smp_rmb();
590         return fence->status != 0;
591 }
592
593 int sync_fence_wait(struct sync_fence *fence, long timeout)
594 {
595         int err = 0;
596         struct sync_pt *pt;
597
598         trace_sync_wait(fence, 1);
599         list_for_each_entry(pt, &fence->pt_list_head, pt_list)
600                 trace_sync_pt(pt);
601
602         if (timeout > 0) {
603                 timeout = msecs_to_jiffies(timeout);
604                 err = wait_event_interruptible_timeout(fence->wq,
605                                                        sync_fence_check(fence),
606                                                        timeout);
607         } else if (timeout < 0) {
608                 err = wait_event_interruptible(fence->wq,
609                                                sync_fence_check(fence));
610         }
611         trace_sync_wait(fence, 0);
612
613         if (err < 0)
614                 return err;
615
616         if (fence->status < 0) {
617                 pr_info("fence error %d on [%p]\n", fence->status, fence);
618                 sync_dump();
619                 return fence->status;
620         }
621
622         if (fence->status == 0) {
623                 if (timeout > 0) {
624                         pr_info("fence timeout on [%p] after %dms\n", fence,
625                                 jiffies_to_msecs(timeout));
626                         sync_dump();
627                 }
628                 return -ETIME;
629         }
630
631         return 0;
632 }
633 EXPORT_SYMBOL(sync_fence_wait);
634
635 static void sync_fence_free(struct kref *kref)
636 {
637         struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
638
639         sync_fence_free_pts(fence);
640
641         kfree(fence);
642 }
643
644 static int sync_fence_release(struct inode *inode, struct file *file)
645 {
646         struct sync_fence *fence = file->private_data;
647         unsigned long flags;
648
649         /*
650          * We need to remove all ways to access this fence before droping
651          * our ref.
652          *
653          * start with its membership in the global fence list
654          */
655         spin_lock_irqsave(&sync_fence_list_lock, flags);
656         list_del(&fence->sync_fence_list);
657         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
658
659         /*
660          * remove its pts from their parents so that sync_timeline_signal()
661          * can't reference the fence.
662          */
663         sync_fence_detach_pts(fence);
664
665         kref_put(&fence->kref, sync_fence_free);
666
667         return 0;
668 }
669
670 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
671 {
672         struct sync_fence *fence = file->private_data;
673
674         poll_wait(file, &fence->wq, wait);
675
676         /*
677          * Make sure that reads to fence->status are ordered with the
678          * wait queue event triggering
679          */
680         smp_rmb();
681
682         if (fence->status == 1)
683                 return POLLIN;
684         else if (fence->status < 0)
685                 return POLLERR;
686         else
687                 return 0;
688 }
689
690 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
691 {
692         __s32 value;
693
694         if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
695                 return -EFAULT;
696
697         return sync_fence_wait(fence, value);
698 }
699
700 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
701 {
702         int fd = get_unused_fd_flags(O_CLOEXEC);
703         int err;
704         struct sync_fence *fence2, *fence3;
705         struct sync_merge_data data;
706
707         if (fd < 0)
708                 return fd;
709
710         if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
711                 err = -EFAULT;
712                 goto err_put_fd;
713         }
714
715         fence2 = sync_fence_fdget(data.fd2);
716         if (fence2 == NULL) {
717                 err = -ENOENT;
718                 goto err_put_fd;
719         }
720
721         data.name[sizeof(data.name) - 1] = '\0';
722         fence3 = sync_fence_merge(data.name, fence, fence2);
723         if (fence3 == NULL) {
724                 err = -ENOMEM;
725                 goto err_put_fence2;
726         }
727
728         data.fence = fd;
729         if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
730                 err = -EFAULT;
731                 goto err_put_fence3;
732         }
733
734         sync_fence_install(fence3, fd);
735         sync_fence_put(fence2);
736         return 0;
737
738 err_put_fence3:
739         sync_fence_put(fence3);
740
741 err_put_fence2:
742         sync_fence_put(fence2);
743
744 err_put_fd:
745         put_unused_fd(fd);
746         return err;
747 }
748
749 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
750 {
751         struct sync_pt_info *info = data;
752         int ret;
753
754         if (size < sizeof(struct sync_pt_info))
755                 return -ENOMEM;
756
757         info->len = sizeof(struct sync_pt_info);
758
759         if (pt->parent->ops->fill_driver_data) {
760                 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
761                                                         size - sizeof(*info));
762                 if (ret < 0)
763                         return ret;
764
765                 info->len += ret;
766         }
767
768         strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
769         strlcpy(info->driver_name, pt->parent->ops->driver_name,
770                 sizeof(info->driver_name));
771         info->status = pt->status;
772         info->timestamp_ns = ktime_to_ns(pt->timestamp);
773
774         return info->len;
775 }
776
777 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
778                                         unsigned long arg)
779 {
780         struct sync_fence_info_data *data;
781         struct list_head *pos;
782         __u32 size;
783         __u32 len = 0;
784         int ret;
785
786         if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
787                 return -EFAULT;
788
789         if (size < sizeof(struct sync_fence_info_data))
790                 return -EINVAL;
791
792         if (size > 4096)
793                 size = 4096;
794
795         data = kzalloc(size, GFP_KERNEL);
796         if (data == NULL)
797                 return -ENOMEM;
798
799         strlcpy(data->name, fence->name, sizeof(data->name));
800         data->status = fence->status;
801         len = sizeof(struct sync_fence_info_data);
802
803         list_for_each(pos, &fence->pt_list_head) {
804                 struct sync_pt *pt =
805                         container_of(pos, struct sync_pt, pt_list);
806
807                 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
808
809                 if (ret < 0)
810                         goto out;
811
812                 len += ret;
813         }
814
815         data->len = len;
816
817         if (copy_to_user((void __user *)arg, data, len))
818                 ret = -EFAULT;
819         else
820                 ret = 0;
821
822 out:
823         kfree(data);
824
825         return ret;
826 }
827
828 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
829                              unsigned long arg)
830 {
831         struct sync_fence *fence = file->private_data;
832
833         switch (cmd) {
834         case SYNC_IOC_WAIT:
835                 return sync_fence_ioctl_wait(fence, arg);
836
837         case SYNC_IOC_MERGE:
838                 return sync_fence_ioctl_merge(fence, arg);
839
840         case SYNC_IOC_FENCE_INFO:
841                 return sync_fence_ioctl_fence_info(fence, arg);
842
843         default:
844                 return -ENOTTY;
845         }
846 }
847
848 #ifdef CONFIG_DEBUG_FS
849 static const char *sync_status_str(int status)
850 {
851         if (status > 0)
852                 return "signaled";
853         else if (status == 0)
854                 return "active";
855         else
856                 return "error";
857 }
858
859 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
860 {
861         int status = pt->status;
862
863         seq_printf(s, "  %s%spt %s",
864                    fence ? pt->parent->name : "",
865                    fence ? "_" : "",
866                    sync_status_str(status));
867         if (pt->status) {
868                 struct timeval tv = ktime_to_timeval(pt->timestamp);
869
870                 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
871         }
872
873         if (pt->parent->ops->timeline_value_str &&
874             pt->parent->ops->pt_value_str) {
875                 char value[64];
876
877                 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
878                 seq_printf(s, ": %s", value);
879                 if (fence) {
880                         pt->parent->ops->timeline_value_str(pt->parent, value,
881                                                     sizeof(value));
882                         seq_printf(s, " / %s", value);
883                 }
884         } else if (pt->parent->ops->print_pt) {
885                 seq_puts(s, ": ");
886                 pt->parent->ops->print_pt(s, pt);
887         }
888
889         seq_puts(s, "\n");
890 }
891
892 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
893 {
894         struct list_head *pos;
895         unsigned long flags;
896
897         seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
898
899         if (obj->ops->timeline_value_str) {
900                 char value[64];
901
902                 obj->ops->timeline_value_str(obj, value, sizeof(value));
903                 seq_printf(s, ": %s", value);
904         } else if (obj->ops->print_obj) {
905                 seq_puts(s, ": ");
906                 obj->ops->print_obj(s, obj);
907         }
908
909         seq_puts(s, "\n");
910
911         spin_lock_irqsave(&obj->child_list_lock, flags);
912         list_for_each(pos, &obj->child_list_head) {
913                 struct sync_pt *pt =
914                         container_of(pos, struct sync_pt, child_list);
915                 sync_print_pt(s, pt, false);
916         }
917         spin_unlock_irqrestore(&obj->child_list_lock, flags);
918 }
919
920 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
921 {
922         struct list_head *pos;
923         unsigned long flags;
924
925         seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
926                    sync_status_str(fence->status));
927
928         list_for_each(pos, &fence->pt_list_head) {
929                 struct sync_pt *pt =
930                         container_of(pos, struct sync_pt, pt_list);
931                 sync_print_pt(s, pt, true);
932         }
933
934         spin_lock_irqsave(&fence->waiter_list_lock, flags);
935         list_for_each(pos, &fence->waiter_list_head) {
936                 struct sync_fence_waiter *waiter =
937                         container_of(pos, struct sync_fence_waiter,
938                                      waiter_list);
939
940                 seq_printf(s, "waiter %pF\n", waiter->callback);
941         }
942         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
943 }
944
945 static int sync_debugfs_show(struct seq_file *s, void *unused)
946 {
947         unsigned long flags;
948         struct list_head *pos;
949
950         seq_puts(s, "objs:\n--------------\n");
951
952         spin_lock_irqsave(&sync_timeline_list_lock, flags);
953         list_for_each(pos, &sync_timeline_list_head) {
954                 struct sync_timeline *obj =
955                         container_of(pos, struct sync_timeline,
956                                      sync_timeline_list);
957
958                 sync_print_obj(s, obj);
959                 seq_puts(s, "\n");
960         }
961         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
962
963         seq_puts(s, "fences:\n--------------\n");
964
965         spin_lock_irqsave(&sync_fence_list_lock, flags);
966         list_for_each(pos, &sync_fence_list_head) {
967                 struct sync_fence *fence =
968                         container_of(pos, struct sync_fence, sync_fence_list);
969
970                 sync_print_fence(s, fence);
971                 seq_puts(s, "\n");
972         }
973         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
974         return 0;
975 }
976
977 static int sync_debugfs_open(struct inode *inode, struct file *file)
978 {
979         return single_open(file, sync_debugfs_show, inode->i_private);
980 }
981
982 static const struct file_operations sync_debugfs_fops = {
983         .open           = sync_debugfs_open,
984         .read           = seq_read,
985         .llseek         = seq_lseek,
986         .release        = single_release,
987 };
988
989 static __init int sync_debugfs_init(void)
990 {
991         debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
992         return 0;
993 }
994 late_initcall(sync_debugfs_init);
995
996 #define DUMP_CHUNK 256
997 static char sync_dump_buf[64 * 1024];
998 static void sync_dump(void)
999 {
1000         struct seq_file s = {
1001                 .buf = sync_dump_buf,
1002                 .size = sizeof(sync_dump_buf) - 1,
1003         };
1004         int i;
1005
1006         sync_debugfs_show(&s, NULL);
1007
1008         for (i = 0; i < s.count; i += DUMP_CHUNK) {
1009                 if ((s.count - i) > DUMP_CHUNK) {
1010                         char c = s.buf[i + DUMP_CHUNK];
1011
1012                         s.buf[i + DUMP_CHUNK] = 0;
1013                         pr_cont("%s", s.buf + i);
1014                         s.buf[i + DUMP_CHUNK] = c;
1015                 } else {
1016                         s.buf[s.count] = 0;
1017                         pr_cont("%s", s.buf + i);
1018                 }
1019         }
1020 }
1021 #else
1022 static void sync_dump(void)
1023 {
1024 }
1025 #endif