]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/gma500/psb_ttm_fence.c
staging: gma500: Intel GMA500 staging driver
[karo-tx-linux.git] / drivers / staging / gma500 / psb_ttm_fence.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
6  * All Rights Reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  *
21  **************************************************************************/
22 /*
23  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24  */
25
26 #include "psb_ttm_fence_api.h"
27 #include "psb_ttm_fence_driver.h"
28 #include <linux/wait.h>
29 #include <linux/sched.h>
30
31 #include <drm/drmP.h>
32
33 /*
34  * Simple implementation for now.
35  */
36
37 static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
38 {
39         struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
40
41         printk(KERN_ERR "GPU lockup dectected on engine %u "
42                "fence type 0x%08x\n",
43                (unsigned int)fence->fence_class, (unsigned int)mask);
44         /*
45          * Give engines some time to idle?
46          */
47
48         write_lock(&fc->lock);
49         ttm_fence_handler(fence->fdev, fence->fence_class,
50                           fence->sequence, mask, -EBUSY);
51         write_unlock(&fc->lock);
52 }
53
54 /*
55  * Convenience function to be called by fence::wait methods that
56  * need polling.
57  */
58
59 int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
60                            bool interruptible, uint32_t mask)
61 {
62         struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
63         const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
64         uint32_t count = 0;
65         int ret;
66         unsigned long end_jiffies = fence->timeout_jiffies;
67
68         DECLARE_WAITQUEUE(entry, current);
69         add_wait_queue(&fc->fence_queue, &entry);
70
71         ret = 0;
72
73         for (;;) {
74                 __set_current_state((interruptible) ?
75                                     TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
76                 if (ttm_fence_object_signaled(fence, mask))
77                         break;
78                 if (time_after_eq(jiffies, end_jiffies)) {
79                         if (driver->lockup)
80                                 driver->lockup(fence, mask);
81                         else
82                                 ttm_fence_lockup(fence, mask);
83                         continue;
84                 }
85                 if (lazy)
86                         schedule_timeout(1);
87                 else if ((++count & 0x0F) == 0) {
88                         __set_current_state(TASK_RUNNING);
89                         schedule();
90                         __set_current_state((interruptible) ?
91                                             TASK_INTERRUPTIBLE :
92                                             TASK_UNINTERRUPTIBLE);
93                 }
94                 if (interruptible && signal_pending(current)) {
95                         ret = -ERESTART;
96                         break;
97                 }
98         }
99         __set_current_state(TASK_RUNNING);
100         remove_wait_queue(&fc->fence_queue, &entry);
101         return ret;
102 }
103
104 /*
105  * Typically called by the IRQ handler.
106  */
107
108 void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
109                        uint32_t sequence, uint32_t type, uint32_t error)
110 {
111         int wake = 0;
112         uint32_t diff;
113         uint32_t relevant_type;
114         uint32_t new_type;
115         struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
116         const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
117         struct list_head *head;
118         struct ttm_fence_object *fence, *next;
119         bool found = false;
120
121         if (list_empty(&fc->ring))
122                 return;
123
124         list_for_each_entry(fence, &fc->ring, ring) {
125                 diff = (sequence - fence->sequence) & fc->sequence_mask;
126                 if (diff > fc->wrap_diff) {
127                         found = true;
128                         break;
129                 }
130         }
131
132         fc->waiting_types &= ~type;
133         head = (found) ? &fence->ring : &fc->ring;
134
135         list_for_each_entry_safe_reverse(fence, next, head, ring) {
136                 if (&fence->ring == &fc->ring)
137                         break;
138
139                 DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
140                           (unsigned long)fence, fence->sequence,
141                           fence->fence_type);
142
143                 if (error) {
144                         fence->info.error = error;
145                         fence->info.signaled_types = fence->fence_type;
146                         list_del_init(&fence->ring);
147                         wake = 1;
148                         break;
149                 }
150
151                 relevant_type = type & fence->fence_type;
152                 new_type = (fence->info.signaled_types | relevant_type) ^
153                     fence->info.signaled_types;
154
155                 if (new_type) {
156                         fence->info.signaled_types |= new_type;
157                         DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
158                                   (unsigned long)fence,
159                                   fence->info.signaled_types);
160
161                         if (unlikely(driver->signaled))
162                                 driver->signaled(fence);
163
164                         if (driver->needed_flush)
165                                 fc->pending_flush |=
166                                     driver->needed_flush(fence);
167
168                         if (new_type & fence->waiting_types)
169                                 wake = 1;
170                 }
171
172                 fc->waiting_types |=
173                     fence->waiting_types & ~fence->info.signaled_types;
174
175                 if (!(fence->fence_type & ~fence->info.signaled_types)) {
176                         DRM_DEBUG("Fence completely signaled 0x%08lx\n",
177                                   (unsigned long)fence);
178                         list_del_init(&fence->ring);
179                 }
180         }
181
182         /*
183          * Reinstate lost waiting types.
184          */
185
186         if ((fc->waiting_types & type) != type) {
187                 head = head->prev;
188                 list_for_each_entry(fence, head, ring) {
189                         if (&fence->ring == &fc->ring)
190                                 break;
191                         diff =
192                             (fc->highest_waiting_sequence -
193                              fence->sequence) & fc->sequence_mask;
194                         if (diff > fc->wrap_diff)
195                                 break;
196
197                         fc->waiting_types |=
198                             fence->waiting_types & ~fence->info.signaled_types;
199                 }
200         }
201
202         if (wake)
203                 wake_up_all(&fc->fence_queue);
204 }
205
206 static void ttm_fence_unring(struct ttm_fence_object *fence)
207 {
208         struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
209         unsigned long irq_flags;
210
211         write_lock_irqsave(&fc->lock, irq_flags);
212         list_del_init(&fence->ring);
213         write_unlock_irqrestore(&fc->lock, irq_flags);
214 }
215
216 bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
217 {
218         unsigned long flags;
219         bool signaled;
220         const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
221         struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
222
223         mask &= fence->fence_type;
224         read_lock_irqsave(&fc->lock, flags);
225         signaled = (mask & fence->info.signaled_types) == mask;
226         read_unlock_irqrestore(&fc->lock, flags);
227         if (!signaled && driver->poll) {
228                 write_lock_irqsave(&fc->lock, flags);
229                 driver->poll(fence->fdev, fence->fence_class, mask);
230                 signaled = (mask & fence->info.signaled_types) == mask;
231                 write_unlock_irqrestore(&fc->lock, flags);
232         }
233         return signaled;
234 }
235
236 int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
237 {
238         const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
239         struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
240         unsigned long irq_flags;
241         uint32_t saved_pending_flush;
242         uint32_t diff;
243         bool call_flush;
244
245         if (type & ~fence->fence_type) {
246                 DRM_ERROR("Flush trying to extend fence type, "
247                           "0x%x, 0x%x\n", type, fence->fence_type);
248                 return -EINVAL;
249         }
250
251         write_lock_irqsave(&fc->lock, irq_flags);
252         fence->waiting_types |= type;
253         fc->waiting_types |= fence->waiting_types;
254         diff = (fence->sequence - fc->highest_waiting_sequence) &
255             fc->sequence_mask;
256
257         if (diff < fc->wrap_diff)
258                 fc->highest_waiting_sequence = fence->sequence;
259
260         /*
261          * fence->waiting_types has changed. Determine whether
262          * we need to initiate some kind of flush as a result of this.
263          */
264
265         saved_pending_flush = fc->pending_flush;
266         if (driver->needed_flush)
267                 fc->pending_flush |= driver->needed_flush(fence);
268
269         if (driver->poll)
270                 driver->poll(fence->fdev, fence->fence_class,
271                              fence->waiting_types);
272
273         call_flush = (fc->pending_flush != 0);
274         write_unlock_irqrestore(&fc->lock, irq_flags);
275
276         if (call_flush && driver->flush)
277                 driver->flush(fence->fdev, fence->fence_class);
278
279         return 0;
280 }
281
282 /*
283  * Make sure old fence objects are signaled before their fence sequences are
284  * wrapped around and reused.
285  */
286
287 void ttm_fence_flush_old(struct ttm_fence_device *fdev,
288                          uint32_t fence_class, uint32_t sequence)
289 {
290         struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
291         struct ttm_fence_object *fence;
292         unsigned long irq_flags;
293         const struct ttm_fence_driver *driver = fdev->driver;
294         bool call_flush;
295
296         uint32_t diff;
297
298         write_lock_irqsave(&fc->lock, irq_flags);
299
300         list_for_each_entry_reverse(fence, &fc->ring, ring) {
301                 diff = (sequence - fence->sequence) & fc->sequence_mask;
302                 if (diff <= fc->flush_diff)
303                         break;
304
305                 fence->waiting_types = fence->fence_type;
306                 fc->waiting_types |= fence->fence_type;
307
308                 if (driver->needed_flush)
309                         fc->pending_flush |= driver->needed_flush(fence);
310         }
311
312         if (driver->poll)
313                 driver->poll(fdev, fence_class, fc->waiting_types);
314
315         call_flush = (fc->pending_flush != 0);
316         write_unlock_irqrestore(&fc->lock, irq_flags);
317
318         if (call_flush && driver->flush)
319                 driver->flush(fdev, fence->fence_class);
320
321         /*
322          * FIXME: Shold we implement a wait here for really old fences?
323          */
324
325 }
326
327 int ttm_fence_object_wait(struct ttm_fence_object *fence,
328                           bool lazy, bool interruptible, uint32_t mask)
329 {
330         const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
331         struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
332         int ret = 0;
333         unsigned long timeout;
334         unsigned long cur_jiffies;
335         unsigned long to_jiffies;
336
337         if (mask & ~fence->fence_type) {
338                 DRM_ERROR("Wait trying to extend fence type"
339                           " 0x%08x 0x%08x\n", mask, fence->fence_type);
340                 BUG();
341                 return -EINVAL;
342         }
343
344         if (driver->wait)
345                 return driver->wait(fence, lazy, interruptible, mask);
346
347         ttm_fence_object_flush(fence, mask);
348 retry:
349         if (!driver->has_irq ||
350             driver->has_irq(fence->fdev, fence->fence_class, mask)) {
351
352                 cur_jiffies = jiffies;
353                 to_jiffies = fence->timeout_jiffies;
354
355                 timeout = (time_after(to_jiffies, cur_jiffies)) ?
356                     to_jiffies - cur_jiffies : 1;
357
358                 if (interruptible)
359                         ret = wait_event_interruptible_timeout
360                             (fc->fence_queue,
361                              ttm_fence_object_signaled(fence, mask), timeout);
362                 else
363                         ret = wait_event_timeout
364                             (fc->fence_queue,
365                              ttm_fence_object_signaled(fence, mask), timeout);
366
367                 if (unlikely(ret == -ERESTARTSYS))
368                         return -ERESTART;
369
370                 if (unlikely(ret == 0)) {
371                         if (driver->lockup)
372                                 driver->lockup(fence, mask);
373                         else
374                                 ttm_fence_lockup(fence, mask);
375                         goto retry;
376                 }
377
378                 return 0;
379         }
380
381         return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
382 }
383
384 int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
385                           uint32_t fence_class, uint32_t type)
386 {
387         const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
388         struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
389         unsigned long flags;
390         uint32_t sequence;
391         unsigned long timeout;
392         int ret;
393
394         ttm_fence_unring(fence);
395         ret = driver->emit(fence->fdev,
396                            fence_class, fence_flags, &sequence, &timeout);
397         if (ret)
398                 return ret;
399
400         write_lock_irqsave(&fc->lock, flags);
401         fence->fence_class = fence_class;
402         fence->fence_type = type;
403         fence->waiting_types = 0;
404         fence->info.signaled_types = 0;
405         fence->info.error = 0;
406         fence->sequence = sequence;
407         fence->timeout_jiffies = timeout;
408         if (list_empty(&fc->ring))
409                 fc->highest_waiting_sequence = sequence - 1;
410         list_add_tail(&fence->ring, &fc->ring);
411         fc->latest_queued_sequence = sequence;
412         write_unlock_irqrestore(&fc->lock, flags);
413         return 0;
414 }
415
416 int ttm_fence_object_init(struct ttm_fence_device *fdev,
417                           uint32_t fence_class,
418                           uint32_t type,
419                           uint32_t create_flags,
420                           void (*destroy) (struct ttm_fence_object *),
421                           struct ttm_fence_object *fence)
422 {
423         int ret = 0;
424
425         kref_init(&fence->kref);
426         fence->fence_class = fence_class;
427         fence->fence_type = type;
428         fence->info.signaled_types = 0;
429         fence->waiting_types = 0;
430         fence->sequence = 0;
431         fence->info.error = 0;
432         fence->fdev = fdev;
433         fence->destroy = destroy;
434         INIT_LIST_HEAD(&fence->ring);
435         atomic_inc(&fdev->count);
436
437         if (create_flags & TTM_FENCE_FLAG_EMIT) {
438                 ret = ttm_fence_object_emit(fence, create_flags,
439                                             fence->fence_class, type);
440         }
441
442         return ret;
443 }
444
445 int ttm_fence_object_create(struct ttm_fence_device *fdev,
446                             uint32_t fence_class,
447                             uint32_t type,
448                             uint32_t create_flags,
449                             struct ttm_fence_object **c_fence)
450 {
451         struct ttm_fence_object *fence;
452         int ret;
453
454         ret = ttm_mem_global_alloc(fdev->mem_glob,
455                                    sizeof(*fence),
456                                    false,
457                                    false);
458         if (unlikely(ret != 0)) {
459                 printk(KERN_ERR "Out of memory creating fence object\n");
460                 return ret;
461         }
462
463         fence = kmalloc(sizeof(*fence), GFP_KERNEL);
464         if (!fence) {
465                 printk(KERN_ERR "Out of memory creating fence object\n");
466                 ttm_mem_global_free(fdev->mem_glob, sizeof(*fence));
467                 return -ENOMEM;
468         }
469
470         ret = ttm_fence_object_init(fdev, fence_class, type,
471                                     create_flags, NULL, fence);
472         if (ret) {
473                 ttm_fence_object_unref(&fence);
474                 return ret;
475         }
476         *c_fence = fence;
477
478         return 0;
479 }
480
481 static void ttm_fence_object_destroy(struct kref *kref)
482 {
483         struct ttm_fence_object *fence =
484             container_of(kref, struct ttm_fence_object, kref);
485         struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
486         unsigned long irq_flags;
487
488         write_lock_irqsave(&fc->lock, irq_flags);
489         list_del_init(&fence->ring);
490         write_unlock_irqrestore(&fc->lock, irq_flags);
491
492         atomic_dec(&fence->fdev->count);
493         if (fence->destroy)
494                 fence->destroy(fence);
495         else {
496                 ttm_mem_global_free(fence->fdev->mem_glob,
497                                     sizeof(*fence));
498                 kfree(fence);
499         }
500 }
501
502 void ttm_fence_device_release(struct ttm_fence_device *fdev)
503 {
504         kfree(fdev->fence_class);
505 }
506
507 int
508 ttm_fence_device_init(int num_classes,
509                       struct ttm_mem_global *mem_glob,
510                       struct ttm_fence_device *fdev,
511                       const struct ttm_fence_class_init *init,
512                       bool replicate_init,
513                       const struct ttm_fence_driver *driver)
514 {
515         struct ttm_fence_class_manager *fc;
516         const struct ttm_fence_class_init *fci;
517         int i;
518
519         fdev->mem_glob = mem_glob;
520         fdev->fence_class = kzalloc(num_classes *
521                                     sizeof(*fdev->fence_class), GFP_KERNEL);
522
523         if (unlikely(!fdev->fence_class))
524                 return -ENOMEM;
525
526         fdev->num_classes = num_classes;
527         atomic_set(&fdev->count, 0);
528         fdev->driver = driver;
529
530         for (i = 0; i < fdev->num_classes; ++i) {
531                 fc = &fdev->fence_class[i];
532                 fci = &init[(replicate_init) ? 0 : i];
533
534                 fc->wrap_diff = fci->wrap_diff;
535                 fc->flush_diff = fci->flush_diff;
536                 fc->sequence_mask = fci->sequence_mask;
537
538                 rwlock_init(&fc->lock);
539                 INIT_LIST_HEAD(&fc->ring);
540                 init_waitqueue_head(&fc->fence_queue);
541         }
542
543         return 0;
544 }
545
546 struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
547 {
548         struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
549         struct ttm_fence_info tmp;
550         unsigned long irq_flags;
551
552         read_lock_irqsave(&fc->lock, irq_flags);
553         tmp = fence->info;
554         read_unlock_irqrestore(&fc->lock, irq_flags);
555
556         return tmp;
557 }
558
559 void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
560 {
561         struct ttm_fence_object *fence = *p_fence;
562
563         *p_fence = NULL;
564         (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
565 }
566
567 /*
568  * Placement / BO sync object glue.
569  */
570
571 bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
572 {
573         struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
574         uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
575
576         return ttm_fence_object_signaled(fence, fence_types);
577 }
578
579 int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
580                             bool lazy, bool interruptible)
581 {
582         struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
583         uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
584
585         return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
586 }
587
588 int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
589 {
590         struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
591         uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
592
593         return ttm_fence_object_flush(fence, fence_types);
594 }
595
596 void ttm_fence_sync_obj_unref(void **sync_obj)
597 {
598         ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
599 }
600
601 void *ttm_fence_sync_obj_ref(void *sync_obj)
602 {
603         return (void *)
604             ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
605 }