]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/ttm/ttm_execbuf_util.c
reservation: cross-device reservation support, v4
[karo-tx-linux.git] / drivers / gpu / drm / ttm / ttm_execbuf_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
34
35 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36 {
37         struct ttm_validate_buffer *entry;
38
39         list_for_each_entry(entry, list, head) {
40                 struct ttm_buffer_object *bo = entry->bo;
41                 if (!entry->reserved)
42                         continue;
43
44                 if (entry->removed) {
45                         ttm_bo_add_to_lru(bo);
46                         entry->removed = false;
47
48                 }
49                 entry->reserved = false;
50                 atomic_set(&bo->reserved, 0);
51                 wake_up_all(&bo->event_queue);
52         }
53 }
54
55 static void ttm_eu_del_from_lru_locked(struct list_head *list)
56 {
57         struct ttm_validate_buffer *entry;
58
59         list_for_each_entry(entry, list, head) {
60                 struct ttm_buffer_object *bo = entry->bo;
61                 if (!entry->reserved)
62                         continue;
63
64                 if (!entry->removed) {
65                         entry->put_count = ttm_bo_del_from_lru(bo);
66                         entry->removed = true;
67                 }
68         }
69 }
70
71 static void ttm_eu_list_ref_sub(struct list_head *list)
72 {
73         struct ttm_validate_buffer *entry;
74
75         list_for_each_entry(entry, list, head) {
76                 struct ttm_buffer_object *bo = entry->bo;
77
78                 if (entry->put_count) {
79                         ttm_bo_list_ref_sub(bo, entry->put_count, true);
80                         entry->put_count = 0;
81                 }
82         }
83 }
84
85 void ttm_eu_backoff_reservation(struct list_head *list)
86 {
87         struct ttm_validate_buffer *entry;
88         struct ttm_bo_global *glob;
89
90         if (list_empty(list))
91                 return;
92
93         entry = list_first_entry(list, struct ttm_validate_buffer, head);
94         glob = entry->bo->glob;
95         spin_lock(&glob->lru_lock);
96         ttm_eu_backoff_reservation_locked(list);
97         spin_unlock(&glob->lru_lock);
98 }
99 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
100
101 /*
102  * Reserve buffers for validation.
103  *
104  * If a buffer in the list is marked for CPU access, we back off and
105  * wait for that buffer to become free for GPU access.
106  *
107  * If a buffer is reserved for another validation, the validator with
108  * the highest validation sequence backs off and waits for that buffer
109  * to become unreserved. This prevents deadlocks when validating multiple
110  * buffers in different orders.
111  */
112
113 int ttm_eu_reserve_buffers(struct list_head *list)
114 {
115         struct ttm_bo_global *glob;
116         struct ttm_validate_buffer *entry;
117         int ret;
118         uint32_t val_seq;
119
120         if (list_empty(list))
121                 return 0;
122
123         list_for_each_entry(entry, list, head) {
124                 entry->reserved = false;
125                 entry->put_count = 0;
126                 entry->removed = false;
127         }
128
129         entry = list_first_entry(list, struct ttm_validate_buffer, head);
130         glob = entry->bo->glob;
131
132         spin_lock(&glob->lru_lock);
133         val_seq = entry->bo->bdev->val_seq++;
134
135 retry:
136         list_for_each_entry(entry, list, head) {
137                 struct ttm_buffer_object *bo = entry->bo;
138
139                 /* already slowpath reserved? */
140                 if (entry->reserved)
141                         continue;
142
143                 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
144                 switch (ret) {
145                 case 0:
146                         break;
147                 case -EBUSY:
148                         ttm_eu_del_from_lru_locked(list);
149                         spin_unlock(&glob->lru_lock);
150                         ret = ttm_bo_reserve_nolru(bo, true, false,
151                                                    true, val_seq);
152                         spin_lock(&glob->lru_lock);
153                         if (!ret)
154                                 break;
155
156                         if (unlikely(ret != -EAGAIN))
157                                 goto err;
158
159                         /* fallthrough */
160                 case -EAGAIN:
161                         ttm_eu_backoff_reservation_locked(list);
162
163                         /*
164                          * temporarily increase sequence number every retry,
165                          * to prevent us from seeing our old reservation
166                          * sequence when someone else reserved the buffer,
167                          * but hasn't updated the seq_valid/seqno members yet.
168                          */
169                         val_seq = entry->bo->bdev->val_seq++;
170
171                         spin_unlock(&glob->lru_lock);
172                         ttm_eu_list_ref_sub(list);
173                         ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
174                         if (unlikely(ret != 0))
175                                 return ret;
176                         spin_lock(&glob->lru_lock);
177                         entry->reserved = true;
178                         if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
179                                 ret = -EBUSY;
180                                 goto err;
181                         }
182                         goto retry;
183                 default:
184                         goto err;
185                 }
186
187                 entry->reserved = true;
188                 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
189                         ret = -EBUSY;
190                         goto err;
191                 }
192         }
193
194         ttm_eu_del_from_lru_locked(list);
195         spin_unlock(&glob->lru_lock);
196         ttm_eu_list_ref_sub(list);
197
198         return 0;
199
200 err:
201         ttm_eu_backoff_reservation_locked(list);
202         spin_unlock(&glob->lru_lock);
203         ttm_eu_list_ref_sub(list);
204         return ret;
205 }
206 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
207
208 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
209 {
210         struct ttm_validate_buffer *entry;
211         struct ttm_buffer_object *bo;
212         struct ttm_bo_global *glob;
213         struct ttm_bo_device *bdev;
214         struct ttm_bo_driver *driver;
215
216         if (list_empty(list))
217                 return;
218
219         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
220         bdev = bo->bdev;
221         driver = bdev->driver;
222         glob = bo->glob;
223
224         spin_lock(&glob->lru_lock);
225         spin_lock(&bdev->fence_lock);
226
227         list_for_each_entry(entry, list, head) {
228                 bo = entry->bo;
229                 entry->old_sync_obj = bo->sync_obj;
230                 bo->sync_obj = driver->sync_obj_ref(sync_obj);
231                 ttm_bo_unreserve_locked(bo);
232                 entry->reserved = false;
233         }
234         spin_unlock(&bdev->fence_lock);
235         spin_unlock(&glob->lru_lock);
236
237         list_for_each_entry(entry, list, head) {
238                 if (entry->old_sync_obj)
239                         driver->sync_obj_unref(&entry->old_sync_obj);
240         }
241 }
242 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);