]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/ttm/ttm_execbuf_util.c
Merge tag 'ipu-fixes-3.18' of git://git.pengutronix.de/git/pza/linux into drm-next
[karo-tx-linux.git] / drivers / gpu / drm / ttm / ttm_execbuf_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
34
35 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
36                                               struct ttm_validate_buffer *entry)
37 {
38         list_for_each_entry_continue_reverse(entry, list, head) {
39                 struct ttm_buffer_object *bo = entry->bo;
40
41                 __ttm_bo_unreserve(bo);
42         }
43 }
44
45 static void ttm_eu_del_from_lru_locked(struct list_head *list)
46 {
47         struct ttm_validate_buffer *entry;
48
49         list_for_each_entry(entry, list, head) {
50                 struct ttm_buffer_object *bo = entry->bo;
51                 unsigned put_count = ttm_bo_del_from_lru(bo);
52
53                 ttm_bo_list_ref_sub(bo, put_count, true);
54         }
55 }
56
57 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
58                                 struct list_head *list)
59 {
60         struct ttm_validate_buffer *entry;
61         struct ttm_bo_global *glob;
62
63         if (list_empty(list))
64                 return;
65
66         entry = list_first_entry(list, struct ttm_validate_buffer, head);
67         glob = entry->bo->glob;
68
69         spin_lock(&glob->lru_lock);
70         list_for_each_entry(entry, list, head) {
71                 struct ttm_buffer_object *bo = entry->bo;
72
73                 ttm_bo_add_to_lru(bo);
74                 __ttm_bo_unreserve(bo);
75         }
76         spin_unlock(&glob->lru_lock);
77
78         if (ticket)
79                 ww_acquire_fini(ticket);
80 }
81 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
82
83 /*
84  * Reserve buffers for validation.
85  *
86  * If a buffer in the list is marked for CPU access, we back off and
87  * wait for that buffer to become free for GPU access.
88  *
89  * If a buffer is reserved for another validation, the validator with
90  * the highest validation sequence backs off and waits for that buffer
91  * to become unreserved. This prevents deadlocks when validating multiple
92  * buffers in different orders.
93  */
94
95 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
96                            struct list_head *list, bool intr)
97 {
98         struct ttm_bo_global *glob;
99         struct ttm_validate_buffer *entry;
100         int ret;
101
102         if (list_empty(list))
103                 return 0;
104
105         entry = list_first_entry(list, struct ttm_validate_buffer, head);
106         glob = entry->bo->glob;
107
108         if (ticket)
109                 ww_acquire_init(ticket, &reservation_ww_class);
110
111         list_for_each_entry(entry, list, head) {
112                 struct ttm_buffer_object *bo = entry->bo;
113
114                 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
115                                        ticket);
116                 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
117                         __ttm_bo_unreserve(bo);
118
119                         ret = -EBUSY;
120                 }
121
122                 if (!ret) {
123                         if (!entry->shared)
124                                 continue;
125
126                         ret = reservation_object_reserve_shared(bo->resv);
127                         if (!ret)
128                                 continue;
129                 }
130
131                 /* uh oh, we lost out, drop every reservation and try
132                  * to only reserve this buffer, then start over if
133                  * this succeeds.
134                  */
135                 ttm_eu_backoff_reservation_reverse(list, entry);
136
137                 if (ret == -EDEADLK && intr) {
138                         ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
139                                                                ticket);
140                 } else if (ret == -EDEADLK) {
141                         ww_mutex_lock_slow(&bo->resv->lock, ticket);
142                         ret = 0;
143                 }
144
145                 if (!ret && entry->shared)
146                         ret = reservation_object_reserve_shared(bo->resv);
147
148                 if (unlikely(ret != 0)) {
149                         if (ret == -EINTR)
150                                 ret = -ERESTARTSYS;
151                         if (ticket) {
152                                 ww_acquire_done(ticket);
153                                 ww_acquire_fini(ticket);
154                         }
155                         return ret;
156                 }
157
158                 /* move this item to the front of the list,
159                  * forces correct iteration of the loop without keeping track
160                  */
161                 list_del(&entry->head);
162                 list_add(&entry->head, list);
163         }
164
165         if (ticket)
166                 ww_acquire_done(ticket);
167         spin_lock(&glob->lru_lock);
168         ttm_eu_del_from_lru_locked(list);
169         spin_unlock(&glob->lru_lock);
170         return 0;
171 }
172 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
173
174 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
175                                  struct list_head *list, struct fence *fence)
176 {
177         struct ttm_validate_buffer *entry;
178         struct ttm_buffer_object *bo;
179         struct ttm_bo_global *glob;
180         struct ttm_bo_device *bdev;
181         struct ttm_bo_driver *driver;
182
183         if (list_empty(list))
184                 return;
185
186         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
187         bdev = bo->bdev;
188         driver = bdev->driver;
189         glob = bo->glob;
190
191         spin_lock(&glob->lru_lock);
192
193         list_for_each_entry(entry, list, head) {
194                 bo = entry->bo;
195                 if (entry->shared)
196                         reservation_object_add_shared_fence(bo->resv, fence);
197                 else
198                         reservation_object_add_excl_fence(bo->resv, fence);
199                 ttm_bo_add_to_lru(bo);
200                 __ttm_bo_unreserve(bo);
201         }
202         spin_unlock(&glob->lru_lock);
203         if (ticket)
204                 ww_acquire_fini(ticket);
205 }
206 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);