]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
regmap: rbtree: When adding a reg do a bsearch for target node
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_test.c
1 /*
2  * Copyright 2009 VMware, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Michel Dänzer
23  */
24 #include <drm/drmP.h>
25 #include <drm/amdgpu_drm.h>
26 #include "amdgpu.h"
27 #include "amdgpu_uvd.h"
28 #include "amdgpu_vce.h"
29
30 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31 static void amdgpu_do_test_moves(struct amdgpu_device *adev)
32 {
33         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
34         struct amdgpu_bo *vram_obj = NULL;
35         struct amdgpu_bo **gtt_obj = NULL;
36         uint64_t gtt_addr, vram_addr;
37         unsigned n, size;
38         int i, r;
39
40         size = 1024 * 1024;
41
42         /* Number of tests =
43          * (Total GTT - IB pool - writeback page - ring buffers) / test size
44          */
45         n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
46         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
47                 if (adev->rings[i])
48                         n -= adev->rings[i]->ring_size;
49         if (adev->wb.wb_obj)
50                 n -= AMDGPU_GPU_PAGE_SIZE;
51         if (adev->irq.ih.ring_obj)
52                 n -= adev->irq.ih.ring_size;
53         n /= size;
54
55         gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
56         if (!gtt_obj) {
57                 DRM_ERROR("Failed to allocate %d pointers\n", n);
58                 r = 1;
59                 goto out_cleanup;
60         }
61
62         r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
63                              NULL, &vram_obj);
64         if (r) {
65                 DRM_ERROR("Failed to create VRAM object\n");
66                 goto out_cleanup;
67         }
68         r = amdgpu_bo_reserve(vram_obj, false);
69         if (unlikely(r != 0))
70                 goto out_unref;
71         r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
72         if (r) {
73                 DRM_ERROR("Failed to pin VRAM object\n");
74                 goto out_unres;
75         }
76         for (i = 0; i < n; i++) {
77                 void *gtt_map, *vram_map;
78                 void **gtt_start, **gtt_end;
79                 void **vram_start, **vram_end;
80                 struct fence *fence = NULL;
81
82                 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
83                                      AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
84                 if (r) {
85                         DRM_ERROR("Failed to create GTT object %d\n", i);
86                         goto out_lclean;
87                 }
88
89                 r = amdgpu_bo_reserve(gtt_obj[i], false);
90                 if (unlikely(r != 0))
91                         goto out_lclean_unref;
92                 r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
93                 if (r) {
94                         DRM_ERROR("Failed to pin GTT object %d\n", i);
95                         goto out_lclean_unres;
96                 }
97
98                 r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
99                 if (r) {
100                         DRM_ERROR("Failed to map GTT object %d\n", i);
101                         goto out_lclean_unpin;
102                 }
103
104                 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
105                      gtt_start < gtt_end;
106                      gtt_start++)
107                         *gtt_start = gtt_start;
108
109                 amdgpu_bo_kunmap(gtt_obj[i]);
110
111                 r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
112                                        size, NULL, &fence);
113
114                 if (r) {
115                         DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
116                         goto out_lclean_unpin;
117                 }
118
119                 r = fence_wait(fence, false);
120                 if (r) {
121                         DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
122                         goto out_lclean_unpin;
123                 }
124
125                 fence_put(fence);
126
127                 r = amdgpu_bo_kmap(vram_obj, &vram_map);
128                 if (r) {
129                         DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
130                         goto out_lclean_unpin;
131                 }
132
133                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
134                      vram_start = vram_map, vram_end = vram_map + size;
135                      vram_start < vram_end;
136                      gtt_start++, vram_start++) {
137                         if (*vram_start != gtt_start) {
138                                 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
139                                           "expected 0x%p (GTT/VRAM offset "
140                                           "0x%16llx/0x%16llx)\n",
141                                           i, *vram_start, gtt_start,
142                                           (unsigned long long)
143                                           (gtt_addr - adev->mc.gtt_start +
144                                            (void*)gtt_start - gtt_map),
145                                           (unsigned long long)
146                                           (vram_addr - adev->mc.vram_start +
147                                            (void*)gtt_start - gtt_map));
148                                 amdgpu_bo_kunmap(vram_obj);
149                                 goto out_lclean_unpin;
150                         }
151                         *vram_start = vram_start;
152                 }
153
154                 amdgpu_bo_kunmap(vram_obj);
155
156                 r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
157                                        size, NULL, &fence);
158
159                 if (r) {
160                         DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
161                         goto out_lclean_unpin;
162                 }
163
164                 r = fence_wait(fence, false);
165                 if (r) {
166                         DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
167                         goto out_lclean_unpin;
168                 }
169
170                 fence_put(fence);
171
172                 r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
173                 if (r) {
174                         DRM_ERROR("Failed to map GTT object after copy %d\n", i);
175                         goto out_lclean_unpin;
176                 }
177
178                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
179                      vram_start = vram_map, vram_end = vram_map + size;
180                      gtt_start < gtt_end;
181                      gtt_start++, vram_start++) {
182                         if (*gtt_start != vram_start) {
183                                 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
184                                           "expected 0x%p (VRAM/GTT offset "
185                                           "0x%16llx/0x%16llx)\n",
186                                           i, *gtt_start, vram_start,
187                                           (unsigned long long)
188                                           (vram_addr - adev->mc.vram_start +
189                                            (void*)vram_start - vram_map),
190                                           (unsigned long long)
191                                           (gtt_addr - adev->mc.gtt_start +
192                                            (void*)vram_start - vram_map));
193                                 amdgpu_bo_kunmap(gtt_obj[i]);
194                                 goto out_lclean_unpin;
195                         }
196                 }
197
198                 amdgpu_bo_kunmap(gtt_obj[i]);
199
200                 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
201                          gtt_addr - adev->mc.gtt_start);
202                 continue;
203
204 out_lclean_unpin:
205                 amdgpu_bo_unpin(gtt_obj[i]);
206 out_lclean_unres:
207                 amdgpu_bo_unreserve(gtt_obj[i]);
208 out_lclean_unref:
209                 amdgpu_bo_unref(&gtt_obj[i]);
210 out_lclean:
211                 for (--i; i >= 0; --i) {
212                         amdgpu_bo_unpin(gtt_obj[i]);
213                         amdgpu_bo_unreserve(gtt_obj[i]);
214                         amdgpu_bo_unref(&gtt_obj[i]);
215                 }
216                 if (fence)
217                         fence_put(fence);
218                 break;
219         }
220
221         amdgpu_bo_unpin(vram_obj);
222 out_unres:
223         amdgpu_bo_unreserve(vram_obj);
224 out_unref:
225         amdgpu_bo_unref(&vram_obj);
226 out_cleanup:
227         kfree(gtt_obj);
228         if (r) {
229                 printk(KERN_WARNING "Error while testing BO move.\n");
230         }
231 }
232
233 void amdgpu_test_moves(struct amdgpu_device *adev)
234 {
235         if (adev->mman.buffer_funcs)
236                 amdgpu_do_test_moves(adev);
237 }
238
239 static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
240                                              struct amdgpu_ring *ring,
241                                              struct fence **fence)
242 {
243         uint32_t handle = ring->idx ^ 0xdeafbeef;
244         int r;
245
246         if (ring == &adev->uvd.ring) {
247                 r = amdgpu_uvd_get_create_msg(ring, handle, NULL);
248                 if (r) {
249                         DRM_ERROR("Failed to get dummy create msg\n");
250                         return r;
251                 }
252
253                 r = amdgpu_uvd_get_destroy_msg(ring, handle, fence);
254                 if (r) {
255                         DRM_ERROR("Failed to get dummy destroy msg\n");
256                         return r;
257                 }
258
259         } else if (ring == &adev->vce.ring[0] ||
260                    ring == &adev->vce.ring[1]) {
261                 r = amdgpu_vce_get_create_msg(ring, handle, NULL);
262                 if (r) {
263                         DRM_ERROR("Failed to get dummy create msg\n");
264                         return r;
265                 }
266
267                 r = amdgpu_vce_get_destroy_msg(ring, handle, fence);
268                 if (r) {
269                         DRM_ERROR("Failed to get dummy destroy msg\n");
270                         return r;
271                 }
272         } else {
273                 struct amdgpu_fence *a_fence = NULL;
274                 r = amdgpu_ring_lock(ring, 64);
275                 if (r) {
276                         DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
277                         return r;
278                 }
279                 amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence);
280                 amdgpu_ring_unlock_commit(ring);
281                 *fence = &a_fence->base;
282         }
283         return 0;
284 }
285
286 void amdgpu_test_ring_sync(struct amdgpu_device *adev,
287                            struct amdgpu_ring *ringA,
288                            struct amdgpu_ring *ringB)
289 {
290         struct fence *fence1 = NULL, *fence2 = NULL;
291         struct amdgpu_semaphore *semaphore = NULL;
292         int r;
293
294         r = amdgpu_semaphore_create(adev, &semaphore);
295         if (r) {
296                 DRM_ERROR("Failed to create semaphore\n");
297                 goto out_cleanup;
298         }
299
300         r = amdgpu_ring_lock(ringA, 64);
301         if (r) {
302                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
303                 goto out_cleanup;
304         }
305         amdgpu_semaphore_emit_wait(ringA, semaphore);
306         amdgpu_ring_unlock_commit(ringA);
307
308         r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1);
309         if (r)
310                 goto out_cleanup;
311
312         r = amdgpu_ring_lock(ringA, 64);
313         if (r) {
314                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
315                 goto out_cleanup;
316         }
317         amdgpu_semaphore_emit_wait(ringA, semaphore);
318         amdgpu_ring_unlock_commit(ringA);
319
320         r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2);
321         if (r)
322                 goto out_cleanup;
323
324         mdelay(1000);
325
326         if (fence_is_signaled(fence1)) {
327                 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
328                 goto out_cleanup;
329         }
330
331         r = amdgpu_ring_lock(ringB, 64);
332         if (r) {
333                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
334                 goto out_cleanup;
335         }
336         amdgpu_semaphore_emit_signal(ringB, semaphore);
337         amdgpu_ring_unlock_commit(ringB);
338
339         r = fence_wait(fence1, false);
340         if (r) {
341                 DRM_ERROR("Failed to wait for sync fence 1\n");
342                 goto out_cleanup;
343         }
344
345         mdelay(1000);
346
347         if (fence_is_signaled(fence2)) {
348                 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
349                 goto out_cleanup;
350         }
351
352         r = amdgpu_ring_lock(ringB, 64);
353         if (r) {
354                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
355                 goto out_cleanup;
356         }
357         amdgpu_semaphore_emit_signal(ringB, semaphore);
358         amdgpu_ring_unlock_commit(ringB);
359
360         r = fence_wait(fence2, false);
361         if (r) {
362                 DRM_ERROR("Failed to wait for sync fence 1\n");
363                 goto out_cleanup;
364         }
365
366 out_cleanup:
367         amdgpu_semaphore_free(adev, &semaphore, NULL);
368
369         if (fence1)
370                 fence_put(fence1);
371
372         if (fence2)
373                 fence_put(fence2);
374
375         if (r)
376                 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
377 }
378
379 static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
380                             struct amdgpu_ring *ringA,
381                             struct amdgpu_ring *ringB,
382                             struct amdgpu_ring *ringC)
383 {
384         struct fence *fenceA = NULL, *fenceB = NULL;
385         struct amdgpu_semaphore *semaphore = NULL;
386         bool sigA, sigB;
387         int i, r;
388
389         r = amdgpu_semaphore_create(adev, &semaphore);
390         if (r) {
391                 DRM_ERROR("Failed to create semaphore\n");
392                 goto out_cleanup;
393         }
394
395         r = amdgpu_ring_lock(ringA, 64);
396         if (r) {
397                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
398                 goto out_cleanup;
399         }
400         amdgpu_semaphore_emit_wait(ringA, semaphore);
401         amdgpu_ring_unlock_commit(ringA);
402
403         r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA);
404         if (r)
405                 goto out_cleanup;
406
407         r = amdgpu_ring_lock(ringB, 64);
408         if (r) {
409                 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
410                 goto out_cleanup;
411         }
412         amdgpu_semaphore_emit_wait(ringB, semaphore);
413         amdgpu_ring_unlock_commit(ringB);
414         r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB);
415         if (r)
416                 goto out_cleanup;
417
418         mdelay(1000);
419
420         if (fence_is_signaled(fenceA)) {
421                 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
422                 goto out_cleanup;
423         }
424         if (fence_is_signaled(fenceB)) {
425                 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
426                 goto out_cleanup;
427         }
428
429         r = amdgpu_ring_lock(ringC, 64);
430         if (r) {
431                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
432                 goto out_cleanup;
433         }
434         amdgpu_semaphore_emit_signal(ringC, semaphore);
435         amdgpu_ring_unlock_commit(ringC);
436
437         for (i = 0; i < 30; ++i) {
438                 mdelay(100);
439                 sigA = fence_is_signaled(fenceA);
440                 sigB = fence_is_signaled(fenceB);
441                 if (sigA || sigB)
442                         break;
443         }
444
445         if (!sigA && !sigB) {
446                 DRM_ERROR("Neither fence A nor B has been signaled\n");
447                 goto out_cleanup;
448         } else if (sigA && sigB) {
449                 DRM_ERROR("Both fence A and B has been signaled\n");
450                 goto out_cleanup;
451         }
452
453         DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
454
455         r = amdgpu_ring_lock(ringC, 64);
456         if (r) {
457                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
458                 goto out_cleanup;
459         }
460         amdgpu_semaphore_emit_signal(ringC, semaphore);
461         amdgpu_ring_unlock_commit(ringC);
462
463         mdelay(1000);
464
465         r = fence_wait(fenceA, false);
466         if (r) {
467                 DRM_ERROR("Failed to wait for sync fence A\n");
468                 goto out_cleanup;
469         }
470         r = fence_wait(fenceB, false);
471         if (r) {
472                 DRM_ERROR("Failed to wait for sync fence B\n");
473                 goto out_cleanup;
474         }
475
476 out_cleanup:
477         amdgpu_semaphore_free(adev, &semaphore, NULL);
478
479         if (fenceA)
480                 fence_put(fenceA);
481
482         if (fenceB)
483                 fence_put(fenceB);
484
485         if (r)
486                 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
487 }
488
489 static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
490                                       struct amdgpu_ring *ringB)
491 {
492         if (ringA == &ringA->adev->vce.ring[0] &&
493             ringB == &ringB->adev->vce.ring[1])
494                 return false;
495
496         return true;
497 }
498
499 void amdgpu_test_syncing(struct amdgpu_device *adev)
500 {
501         int i, j, k;
502
503         for (i = 1; i < AMDGPU_MAX_RINGS; ++i) {
504                 struct amdgpu_ring *ringA = adev->rings[i];
505                 if (!ringA || !ringA->ready)
506                         continue;
507
508                 for (j = 0; j < i; ++j) {
509                         struct amdgpu_ring *ringB = adev->rings[j];
510                         if (!ringB || !ringB->ready)
511                                 continue;
512
513                         if (!amdgpu_test_sync_possible(ringA, ringB))
514                                 continue;
515
516                         DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
517                         amdgpu_test_ring_sync(adev, ringA, ringB);
518
519                         DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
520                         amdgpu_test_ring_sync(adev, ringB, ringA);
521
522                         for (k = 0; k < j; ++k) {
523                                 struct amdgpu_ring *ringC = adev->rings[k];
524                                 if (!ringC || !ringC->ready)
525                                         continue;
526
527                                 if (!amdgpu_test_sync_possible(ringA, ringC))
528                                         continue;
529
530                                 if (!amdgpu_test_sync_possible(ringB, ringC))
531                                         continue;
532
533                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
534                                 amdgpu_test_ring_sync2(adev, ringA, ringB, ringC);
535
536                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
537                                 amdgpu_test_ring_sync2(adev, ringA, ringC, ringB);
538
539                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
540                                 amdgpu_test_ring_sync2(adev, ringB, ringA, ringC);
541
542                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
543                                 amdgpu_test_ring_sync2(adev, ringB, ringC, ringA);
544
545                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
546                                 amdgpu_test_ring_sync2(adev, ringC, ringA, ringB);
547
548                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
549                                 amdgpu_test_ring_sync2(adev, ringC, ringB, ringA);
550                         }
551                 }
552         }
553 }