]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/radeon/radeon_ring.c
Merge tag 'for-3.3-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb...
[karo-tx-linux.git] / drivers / gpu / drm / radeon / radeon_ring.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include "drmP.h"
31 #include "radeon_drm.h"
32 #include "radeon_reg.h"
33 #include "radeon.h"
34 #include "atom.h"
35
36 int radeon_debugfs_ib_init(struct radeon_device *rdev);
37 int radeon_debugfs_ring_init(struct radeon_device *rdev);
38
39 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40 {
41         struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
42         u32 pg_idx, pg_offset;
43         u32 idx_value = 0;
44         int new_page;
45
46         pg_idx = (idx * 4) / PAGE_SIZE;
47         pg_offset = (idx * 4) % PAGE_SIZE;
48
49         if (ibc->kpage_idx[0] == pg_idx)
50                 return ibc->kpage[0][pg_offset/4];
51         if (ibc->kpage_idx[1] == pg_idx)
52                 return ibc->kpage[1][pg_offset/4];
53
54         new_page = radeon_cs_update_pages(p, pg_idx);
55         if (new_page < 0) {
56                 p->parser_error = new_page;
57                 return 0;
58         }
59
60         idx_value = ibc->kpage[new_page][pg_offset/4];
61         return idx_value;
62 }
63
64 void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
65 {
66 #if DRM_DEBUG_CODE
67         if (ring->count_dw <= 0) {
68                 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
69         }
70 #endif
71         ring->ring[ring->wptr++] = v;
72         ring->wptr &= ring->ptr_mask;
73         ring->count_dw--;
74         ring->ring_free_dw--;
75 }
76
77 /*
78  * IB.
79  */
80 bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
81 {
82         bool done = false;
83
84         /* only free ib which have been emited */
85         if (ib->fence && ib->fence->emitted) {
86                 if (radeon_fence_signaled(ib->fence)) {
87                         radeon_fence_unref(&ib->fence);
88                         radeon_sa_bo_free(rdev, &ib->sa_bo);
89                         done = true;
90                 }
91         }
92         return done;
93 }
94
95 int radeon_ib_get(struct radeon_device *rdev, int ring,
96                   struct radeon_ib **ib, unsigned size)
97 {
98         struct radeon_fence *fence;
99         unsigned cretry = 0;
100         int r = 0, i, idx;
101
102         *ib = NULL;
103         /* align size on 256 bytes */
104         size = ALIGN(size, 256);
105
106         r = radeon_fence_create(rdev, &fence, ring);
107         if (r) {
108                 dev_err(rdev->dev, "failed to create fence for new IB\n");
109                 return r;
110         }
111
112         radeon_mutex_lock(&rdev->ib_pool.mutex);
113         idx = rdev->ib_pool.head_id;
114 retry:
115         if (cretry > 5) {
116                 dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
117                 radeon_mutex_unlock(&rdev->ib_pool.mutex);
118                 radeon_fence_unref(&fence);
119                 return -ENOMEM;
120         }
121         cretry++;
122         for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
123                 radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
124                 if (rdev->ib_pool.ibs[idx].fence == NULL) {
125                         r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
126                                              &rdev->ib_pool.ibs[idx].sa_bo,
127                                              size, 256);
128                         if (!r) {
129                                 *ib = &rdev->ib_pool.ibs[idx];
130                                 (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
131                                 (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
132                                 (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
133                                 (*ib)->gpu_addr += (*ib)->sa_bo.offset;
134                                 (*ib)->fence = fence;
135                                 (*ib)->vm_id = 0;
136                                 /* ib are most likely to be allocated in a ring fashion
137                                  * thus rdev->ib_pool.head_id should be the id of the
138                                  * oldest ib
139                                  */
140                                 rdev->ib_pool.head_id = (1 + idx);
141                                 rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
142                                 radeon_mutex_unlock(&rdev->ib_pool.mutex);
143                                 return 0;
144                         }
145                 }
146                 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
147         }
148         /* this should be rare event, ie all ib scheduled none signaled yet.
149          */
150         for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
151                 if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
152                         r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
153                         if (!r) {
154                                 goto retry;
155                         }
156                         /* an error happened */
157                         break;
158                 }
159                 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
160         }
161         radeon_mutex_unlock(&rdev->ib_pool.mutex);
162         radeon_fence_unref(&fence);
163         return r;
164 }
165
166 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
167 {
168         struct radeon_ib *tmp = *ib;
169
170         *ib = NULL;
171         if (tmp == NULL) {
172                 return;
173         }
174         radeon_mutex_lock(&rdev->ib_pool.mutex);
175         if (tmp->fence && !tmp->fence->emitted) {
176                 radeon_sa_bo_free(rdev, &tmp->sa_bo);
177                 radeon_fence_unref(&tmp->fence);
178         }
179         radeon_mutex_unlock(&rdev->ib_pool.mutex);
180 }
181
182 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
183 {
184         struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
185         int r = 0;
186
187         if (!ib->length_dw || !ring->ready) {
188                 /* TODO: Nothings in the ib we should report. */
189                 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
190                 return -EINVAL;
191         }
192
193         /* 64 dwords should be enough for fence too */
194         r = radeon_ring_lock(rdev, ring, 64);
195         if (r) {
196                 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
197                 return r;
198         }
199         radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
200         radeon_fence_emit(rdev, ib->fence);
201         radeon_ring_unlock_commit(rdev, ring);
202         return 0;
203 }
204
205 int radeon_ib_pool_init(struct radeon_device *rdev)
206 {
207         struct radeon_sa_manager tmp;
208         int i, r;
209
210         r = radeon_sa_bo_manager_init(rdev, &tmp,
211                                       RADEON_IB_POOL_SIZE*64*1024,
212                                       RADEON_GEM_DOMAIN_GTT);
213         if (r) {
214                 return r;
215         }
216
217         radeon_mutex_lock(&rdev->ib_pool.mutex);
218         if (rdev->ib_pool.ready) {
219                 radeon_mutex_unlock(&rdev->ib_pool.mutex);
220                 radeon_sa_bo_manager_fini(rdev, &tmp);
221                 return 0;
222         }
223
224         rdev->ib_pool.sa_manager = tmp;
225         INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
226         for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
227                 rdev->ib_pool.ibs[i].fence = NULL;
228                 rdev->ib_pool.ibs[i].idx = i;
229                 rdev->ib_pool.ibs[i].length_dw = 0;
230                 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
231         }
232         rdev->ib_pool.head_id = 0;
233         rdev->ib_pool.ready = true;
234         DRM_INFO("radeon: ib pool ready.\n");
235
236         if (radeon_debugfs_ib_init(rdev)) {
237                 DRM_ERROR("Failed to register debugfs file for IB !\n");
238         }
239         if (radeon_debugfs_ring_init(rdev)) {
240                 DRM_ERROR("Failed to register debugfs file for rings !\n");
241         }
242         radeon_mutex_unlock(&rdev->ib_pool.mutex);
243         return 0;
244 }
245
246 void radeon_ib_pool_fini(struct radeon_device *rdev)
247 {
248         unsigned i;
249
250         radeon_mutex_lock(&rdev->ib_pool.mutex);
251         if (rdev->ib_pool.ready) {
252                 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
253                         radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
254                         radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
255                 }
256                 radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
257                 rdev->ib_pool.ready = false;
258         }
259         radeon_mutex_unlock(&rdev->ib_pool.mutex);
260 }
261
262 int radeon_ib_pool_start(struct radeon_device *rdev)
263 {
264         return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
265 }
266
267 int radeon_ib_pool_suspend(struct radeon_device *rdev)
268 {
269         return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
270 }
271
272 /*
273  * Ring.
274  */
275 int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
276 {
277         /* r1xx-r5xx only has CP ring */
278         if (rdev->family < CHIP_R600)
279                 return RADEON_RING_TYPE_GFX_INDEX;
280
281         if (rdev->family >= CHIP_CAYMAN) {
282                 if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
283                         return CAYMAN_RING_TYPE_CP1_INDEX;
284                 else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
285                         return CAYMAN_RING_TYPE_CP2_INDEX;
286         }
287         return RADEON_RING_TYPE_GFX_INDEX;
288 }
289
290 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
291 {
292         u32 rptr;
293
294         if (rdev->wb.enabled)
295                 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
296         else
297                 rptr = RREG32(ring->rptr_reg);
298         ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
299         /* This works because ring_size is a power of 2 */
300         ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
301         ring->ring_free_dw -= ring->wptr;
302         ring->ring_free_dw &= ring->ptr_mask;
303         if (!ring->ring_free_dw) {
304                 ring->ring_free_dw = ring->ring_size / 4;
305         }
306 }
307
308
309 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
310 {
311         int r;
312
313         /* Align requested size with padding so unlock_commit can
314          * pad safely */
315         ndw = (ndw + ring->align_mask) & ~ring->align_mask;
316         while (ndw > (ring->ring_free_dw - 1)) {
317                 radeon_ring_free_size(rdev, ring);
318                 if (ndw < ring->ring_free_dw) {
319                         break;
320                 }
321                 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
322                 if (r)
323                         return r;
324         }
325         ring->count_dw = ndw;
326         ring->wptr_old = ring->wptr;
327         return 0;
328 }
329
330 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
331 {
332         int r;
333
334         mutex_lock(&ring->mutex);
335         r = radeon_ring_alloc(rdev, ring, ndw);
336         if (r) {
337                 mutex_unlock(&ring->mutex);
338                 return r;
339         }
340         return 0;
341 }
342
343 void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
344 {
345         unsigned count_dw_pad;
346         unsigned i;
347
348         /* We pad to match fetch size */
349         count_dw_pad = (ring->align_mask + 1) -
350                        (ring->wptr & ring->align_mask);
351         for (i = 0; i < count_dw_pad; i++) {
352                 radeon_ring_write(ring, ring->nop);
353         }
354         DRM_MEMORYBARRIER();
355         WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
356         (void)RREG32(ring->wptr_reg);
357 }
358
359 void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
360 {
361         radeon_ring_commit(rdev, ring);
362         mutex_unlock(&ring->mutex);
363 }
364
365 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
366 {
367         ring->wptr = ring->wptr_old;
368         mutex_unlock(&ring->mutex);
369 }
370
371 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
372                      unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
373                      u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
374 {
375         int r;
376
377         ring->ring_size = ring_size;
378         ring->rptr_offs = rptr_offs;
379         ring->rptr_reg = rptr_reg;
380         ring->wptr_reg = wptr_reg;
381         ring->ptr_reg_shift = ptr_reg_shift;
382         ring->ptr_reg_mask = ptr_reg_mask;
383         ring->nop = nop;
384         /* Allocate ring buffer */
385         if (ring->ring_obj == NULL) {
386                 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
387                                         RADEON_GEM_DOMAIN_GTT,
388                                         &ring->ring_obj);
389                 if (r) {
390                         dev_err(rdev->dev, "(%d) ring create failed\n", r);
391                         return r;
392                 }
393                 r = radeon_bo_reserve(ring->ring_obj, false);
394                 if (unlikely(r != 0))
395                         return r;
396                 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
397                                         &ring->gpu_addr);
398                 if (r) {
399                         radeon_bo_unreserve(ring->ring_obj);
400                         dev_err(rdev->dev, "(%d) ring pin failed\n", r);
401                         return r;
402                 }
403                 r = radeon_bo_kmap(ring->ring_obj,
404                                        (void **)&ring->ring);
405                 radeon_bo_unreserve(ring->ring_obj);
406                 if (r) {
407                         dev_err(rdev->dev, "(%d) ring map failed\n", r);
408                         return r;
409                 }
410         }
411         ring->ptr_mask = (ring->ring_size / 4) - 1;
412         ring->ring_free_dw = ring->ring_size / 4;
413         return 0;
414 }
415
416 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
417 {
418         int r;
419         struct radeon_bo *ring_obj;
420
421         mutex_lock(&ring->mutex);
422         ring_obj = ring->ring_obj;
423         ring->ring = NULL;
424         ring->ring_obj = NULL;
425         mutex_unlock(&ring->mutex);
426
427         if (ring_obj) {
428                 r = radeon_bo_reserve(ring_obj, false);
429                 if (likely(r == 0)) {
430                         radeon_bo_kunmap(ring_obj);
431                         radeon_bo_unpin(ring_obj);
432                         radeon_bo_unreserve(ring_obj);
433                 }
434                 radeon_bo_unref(&ring_obj);
435         }
436 }
437
438 /*
439  * Debugfs info
440  */
441 #if defined(CONFIG_DEBUG_FS)
442
443 static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
444 {
445         struct drm_info_node *node = (struct drm_info_node *) m->private;
446         struct drm_device *dev = node->minor->dev;
447         struct radeon_device *rdev = dev->dev_private;
448         int ridx = *(int*)node->info_ent->data;
449         struct radeon_ring *ring = &rdev->ring[ridx];
450         unsigned count, i, j;
451
452         radeon_ring_free_size(rdev, ring);
453         count = (ring->ring_size / 4) - ring->ring_free_dw;
454         seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
455         seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
456         seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
457         seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
458         seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
459         seq_printf(m, "%u dwords in ring\n", count);
460         i = ring->rptr;
461         for (j = 0; j <= count; j++) {
462                 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
463                 i = (i + 1) & ring->ptr_mask;
464         }
465         return 0;
466 }
467
468 static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
469 static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
470 static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
471
472 static struct drm_info_list radeon_debugfs_ring_info_list[] = {
473         {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
474         {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
475         {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
476 };
477
478 static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
479 {
480         struct drm_info_node *node = (struct drm_info_node *) m->private;
481         struct radeon_ib *ib = node->info_ent->data;
482         unsigned i;
483
484         if (ib == NULL) {
485                 return 0;
486         }
487         seq_printf(m, "IB %04u\n", ib->idx);
488         seq_printf(m, "IB fence %p\n", ib->fence);
489         seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
490         for (i = 0; i < ib->length_dw; i++) {
491                 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
492         }
493         return 0;
494 }
495
496 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
497 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
498 #endif
499
500 int radeon_debugfs_ring_init(struct radeon_device *rdev)
501 {
502 #if defined(CONFIG_DEBUG_FS)
503         return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
504                                         ARRAY_SIZE(radeon_debugfs_ring_info_list));
505 #else
506         return 0;
507 #endif
508 }
509
510 int radeon_debugfs_ib_init(struct radeon_device *rdev)
511 {
512 #if defined(CONFIG_DEBUG_FS)
513         unsigned i;
514
515         for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
516                 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
517                 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
518                 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
519                 radeon_debugfs_ib_list[i].driver_features = 0;
520                 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
521         }
522         return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
523                                         RADEON_IB_POOL_SIZE);
524 #else
525         return 0;
526 #endif
527 }