]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/qxl/qxl_release.c
regmap: rbtree: When adding a reg do a bsearch for target node
[karo-tx-linux.git] / drivers / gpu / drm / qxl / qxl_release.c
1 /*
2  * Copyright 2011 Red Hat, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "qxl_drv.h"
23 #include "qxl_object.h"
24 #include <trace/events/fence.h>
25
26 /*
27  * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
28  * into 256 byte chunks for now - gives 16 cmds per page.
29  *
30  * use an ida to index into the chunks?
31  */
32 /* manage releaseables */
33 /* stack them 16 high for now -drawable object is 191 */
34 #define RELEASE_SIZE 256
35 #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
36 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
37 #define SURFACE_RELEASE_SIZE 128
38 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
39
40 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
41 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
42
43 static const char *qxl_get_driver_name(struct fence *fence)
44 {
45         return "qxl";
46 }
47
48 static const char *qxl_get_timeline_name(struct fence *fence)
49 {
50         return "release";
51 }
52
53 static bool qxl_nop_signaling(struct fence *fence)
54 {
55         /* fences are always automatically signaled, so just pretend we did this.. */
56         return true;
57 }
58
59 static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
60 {
61         struct qxl_device *qdev;
62         struct qxl_release *release;
63         int count = 0, sc = 0;
64         bool have_drawable_releases;
65         unsigned long cur, end = jiffies + timeout;
66
67         qdev = container_of(fence->lock, struct qxl_device, release_lock);
68         release = container_of(fence, struct qxl_release, base);
69         have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
70
71 retry:
72         sc++;
73
74         if (fence_is_signaled(fence))
75                 goto signaled;
76
77         qxl_io_notify_oom(qdev);
78
79         for (count = 0; count < 11; count++) {
80                 if (!qxl_queue_garbage_collect(qdev, true))
81                         break;
82
83                 if (fence_is_signaled(fence))
84                         goto signaled;
85         }
86
87         if (fence_is_signaled(fence))
88                 goto signaled;
89
90         if (have_drawable_releases || sc < 4) {
91                 if (sc > 2)
92                         /* back off */
93                         usleep_range(500, 1000);
94
95                 if (time_after(jiffies, end))
96                         return 0;
97
98                 if (have_drawable_releases && sc > 300) {
99                         FENCE_WARN(fence, "failed to wait on release %d "
100                                           "after spincount %d\n",
101                                           fence->context & ~0xf0000000, sc);
102                         goto signaled;
103                 }
104                 goto retry;
105         }
106         /*
107          * yeah, original sync_obj_wait gave up after 3 spins when
108          * have_drawable_releases is not set.
109          */
110
111 signaled:
112         cur = jiffies;
113         if (time_after(cur, end))
114                 return 0;
115         return end - cur;
116 }
117
118 static const struct fence_ops qxl_fence_ops = {
119         .get_driver_name = qxl_get_driver_name,
120         .get_timeline_name = qxl_get_timeline_name,
121         .enable_signaling = qxl_nop_signaling,
122         .wait = qxl_fence_wait,
123 };
124
125 static int
126 qxl_release_alloc(struct qxl_device *qdev, int type,
127                   struct qxl_release **ret)
128 {
129         struct qxl_release *release;
130         int handle;
131         size_t size = sizeof(*release);
132
133         release = kmalloc(size, GFP_KERNEL);
134         if (!release) {
135                 DRM_ERROR("Out of memory\n");
136                 return 0;
137         }
138         release->base.ops = NULL;
139         release->type = type;
140         release->release_offset = 0;
141         release->surface_release_id = 0;
142         INIT_LIST_HEAD(&release->bos);
143
144         idr_preload(GFP_KERNEL);
145         spin_lock(&qdev->release_idr_lock);
146         handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
147         release->base.seqno = ++qdev->release_seqno;
148         spin_unlock(&qdev->release_idr_lock);
149         idr_preload_end();
150         if (handle < 0) {
151                 kfree(release);
152                 *ret = NULL;
153                 return handle;
154         }
155         *ret = release;
156         QXL_INFO(qdev, "allocated release %d\n", handle);
157         release->id = handle;
158         return handle;
159 }
160
161 static void
162 qxl_release_free_list(struct qxl_release *release)
163 {
164         while (!list_empty(&release->bos)) {
165                 struct qxl_bo_list *entry;
166                 struct qxl_bo *bo;
167
168                 entry = container_of(release->bos.next,
169                                      struct qxl_bo_list, tv.head);
170                 bo = to_qxl_bo(entry->tv.bo);
171                 qxl_bo_unref(&bo);
172                 list_del(&entry->tv.head);
173                 kfree(entry);
174         }
175 }
176
177 void
178 qxl_release_free(struct qxl_device *qdev,
179                  struct qxl_release *release)
180 {
181         QXL_INFO(qdev, "release %d, type %d\n", release->id,
182                  release->type);
183
184         if (release->surface_release_id)
185                 qxl_surface_id_dealloc(qdev, release->surface_release_id);
186
187         spin_lock(&qdev->release_idr_lock);
188         idr_remove(&qdev->release_idr, release->id);
189         spin_unlock(&qdev->release_idr_lock);
190
191         if (release->base.ops) {
192                 WARN_ON(list_empty(&release->bos));
193                 qxl_release_free_list(release);
194
195                 fence_signal(&release->base);
196                 fence_put(&release->base);
197         } else {
198                 qxl_release_free_list(release);
199                 kfree(release);
200         }
201 }
202
203 static int qxl_release_bo_alloc(struct qxl_device *qdev,
204                                 struct qxl_bo **bo)
205 {
206         int ret;
207         /* pin releases bo's they are too messy to evict */
208         ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
209                             QXL_GEM_DOMAIN_VRAM, NULL,
210                             bo);
211         return ret;
212 }
213
214 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
215 {
216         struct qxl_bo_list *entry;
217
218         list_for_each_entry(entry, &release->bos, tv.head) {
219                 if (entry->tv.bo == &bo->tbo)
220                         return 0;
221         }
222
223         entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
224         if (!entry)
225                 return -ENOMEM;
226
227         qxl_bo_ref(bo);
228         entry->tv.bo = &bo->tbo;
229         entry->tv.shared = false;
230         list_add_tail(&entry->tv.head, &release->bos);
231         return 0;
232 }
233
234 static int qxl_release_validate_bo(struct qxl_bo *bo)
235 {
236         int ret;
237
238         if (!bo->pin_count) {
239                 qxl_ttm_placement_from_domain(bo, bo->type, false);
240                 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
241                                       true, false);
242                 if (ret)
243                         return ret;
244         }
245
246         ret = reservation_object_reserve_shared(bo->tbo.resv);
247         if (ret)
248                 return ret;
249
250         /* allocate a surface for reserved + validated buffers */
251         ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
252         if (ret)
253                 return ret;
254         return 0;
255 }
256
257 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
258 {
259         int ret;
260         struct qxl_bo_list *entry;
261
262         /* if only one object on the release its the release itself
263            since these objects are pinned no need to reserve */
264         if (list_is_singular(&release->bos))
265                 return 0;
266
267         ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
268                                      !no_intr, NULL);
269         if (ret)
270                 return ret;
271
272         list_for_each_entry(entry, &release->bos, tv.head) {
273                 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
274
275                 ret = qxl_release_validate_bo(bo);
276                 if (ret) {
277                         ttm_eu_backoff_reservation(&release->ticket, &release->bos);
278                         return ret;
279                 }
280         }
281         return 0;
282 }
283
284 void qxl_release_backoff_reserve_list(struct qxl_release *release)
285 {
286         /* if only one object on the release its the release itself
287            since these objects are pinned no need to reserve */
288         if (list_is_singular(&release->bos))
289                 return;
290
291         ttm_eu_backoff_reservation(&release->ticket, &release->bos);
292 }
293
294
295 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
296                                        enum qxl_surface_cmd_type surface_cmd_type,
297                                        struct qxl_release *create_rel,
298                                        struct qxl_release **release)
299 {
300         if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
301                 int idr_ret;
302                 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
303                 struct qxl_bo *bo;
304                 union qxl_release_info *info;
305
306                 /* stash the release after the create command */
307                 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
308                 if (idr_ret < 0)
309                         return idr_ret;
310                 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
311
312                 (*release)->release_offset = create_rel->release_offset + 64;
313
314                 qxl_release_list_add(*release, bo);
315
316                 info = qxl_release_map(qdev, *release);
317                 info->id = idr_ret;
318                 qxl_release_unmap(qdev, *release, info);
319
320                 qxl_bo_unref(&bo);
321                 return 0;
322         }
323
324         return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
325                                          QXL_RELEASE_SURFACE_CMD, release, NULL);
326 }
327
328 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
329                                        int type, struct qxl_release **release,
330                                        struct qxl_bo **rbo)
331 {
332         struct qxl_bo *bo;
333         int idr_ret;
334         int ret = 0;
335         union qxl_release_info *info;
336         int cur_idx;
337
338         if (type == QXL_RELEASE_DRAWABLE)
339                 cur_idx = 0;
340         else if (type == QXL_RELEASE_SURFACE_CMD)
341                 cur_idx = 1;
342         else if (type == QXL_RELEASE_CURSOR_CMD)
343                 cur_idx = 2;
344         else {
345                 DRM_ERROR("got illegal type: %d\n", type);
346                 return -EINVAL;
347         }
348
349         idr_ret = qxl_release_alloc(qdev, type, release);
350         if (idr_ret < 0) {
351                 if (rbo)
352                         *rbo = NULL;
353                 return idr_ret;
354         }
355
356         mutex_lock(&qdev->release_mutex);
357         if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
358                 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
359                 qdev->current_release_bo_offset[cur_idx] = 0;
360                 qdev->current_release_bo[cur_idx] = NULL;
361         }
362         if (!qdev->current_release_bo[cur_idx]) {
363                 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
364                 if (ret) {
365                         mutex_unlock(&qdev->release_mutex);
366                         qxl_release_free(qdev, *release);
367                         return ret;
368                 }
369         }
370
371         bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
372
373         (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
374         qdev->current_release_bo_offset[cur_idx]++;
375
376         if (rbo)
377                 *rbo = bo;
378
379         mutex_unlock(&qdev->release_mutex);
380
381         ret = qxl_release_list_add(*release, bo);
382         qxl_bo_unref(&bo);
383         if (ret) {
384                 qxl_release_free(qdev, *release);
385                 return ret;
386         }
387
388         info = qxl_release_map(qdev, *release);
389         info->id = idr_ret;
390         qxl_release_unmap(qdev, *release, info);
391
392         return ret;
393 }
394
395 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
396                                                    uint64_t id)
397 {
398         struct qxl_release *release;
399
400         spin_lock(&qdev->release_idr_lock);
401         release = idr_find(&qdev->release_idr, id);
402         spin_unlock(&qdev->release_idr_lock);
403         if (!release) {
404                 DRM_ERROR("failed to find id in release_idr\n");
405                 return NULL;
406         }
407
408         return release;
409 }
410
411 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
412                                         struct qxl_release *release)
413 {
414         void *ptr;
415         union qxl_release_info *info;
416         struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
417         struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
418
419         ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
420         if (!ptr)
421                 return NULL;
422         info = ptr + (release->release_offset & ~PAGE_SIZE);
423         return info;
424 }
425
426 void qxl_release_unmap(struct qxl_device *qdev,
427                        struct qxl_release *release,
428                        union qxl_release_info *info)
429 {
430         struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
431         struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
432         void *ptr;
433
434         ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
435         qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
436 }
437
438 void qxl_release_fence_buffer_objects(struct qxl_release *release)
439 {
440         struct ttm_buffer_object *bo;
441         struct ttm_bo_global *glob;
442         struct ttm_bo_device *bdev;
443         struct ttm_bo_driver *driver;
444         struct qxl_bo *qbo;
445         struct ttm_validate_buffer *entry;
446         struct qxl_device *qdev;
447
448         /* if only one object on the release its the release itself
449            since these objects are pinned no need to reserve */
450         if (list_is_singular(&release->bos) || list_empty(&release->bos))
451                 return;
452
453         bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
454         bdev = bo->bdev;
455         qdev = container_of(bdev, struct qxl_device, mman.bdev);
456
457         /*
458          * Since we never really allocated a context and we don't want to conflict,
459          * set the highest bits. This will break if we really allow exporting of dma-bufs.
460          */
461         fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
462                    release->id | 0xf0000000, release->base.seqno);
463         trace_fence_emit(&release->base);
464
465         driver = bdev->driver;
466         glob = bo->glob;
467
468         spin_lock(&glob->lru_lock);
469
470         list_for_each_entry(entry, &release->bos, head) {
471                 bo = entry->bo;
472                 qbo = to_qxl_bo(bo);
473
474                 reservation_object_add_shared_fence(bo->resv, &release->base);
475                 ttm_bo_add_to_lru(bo);
476                 __ttm_bo_unreserve(bo);
477         }
478         spin_unlock(&glob->lru_lock);
479         ww_acquire_fini(&release->ticket);
480 }
481