]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/nouveau/nouveau_gem.c
Merge branch 'for-airlied' of git://people.freedesktop.org/~mlankhorst/linux into...
[karo-tx-linux.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include <subdev/fb.h>
28
29 #include "nouveau_drm.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_fence.h"
32 #include "nouveau_abi16.h"
33
34 #include "nouveau_ttm.h"
35 #include "nouveau_gem.h"
36
37 int
38 nouveau_gem_object_new(struct drm_gem_object *gem)
39 {
40         return 0;
41 }
42
43 void
44 nouveau_gem_object_del(struct drm_gem_object *gem)
45 {
46         struct nouveau_bo *nvbo = gem->driver_private;
47         struct ttm_buffer_object *bo = &nvbo->bo;
48
49         if (!nvbo)
50                 return;
51         nvbo->gem = NULL;
52
53         if (unlikely(nvbo->pin_refcnt)) {
54                 nvbo->pin_refcnt = 1;
55                 nouveau_bo_unpin(nvbo);
56         }
57
58         if (gem->import_attach)
59                 drm_prime_gem_destroy(gem, nvbo->bo.sg);
60
61         ttm_bo_unref(&bo);
62
63         drm_gem_object_release(gem);
64         kfree(gem);
65 }
66
67 int
68 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
69 {
70         struct nouveau_cli *cli = nouveau_cli(file_priv);
71         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
72         struct nouveau_vma *vma;
73         int ret;
74
75         if (!cli->base.vm)
76                 return 0;
77
78         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
79         if (ret)
80                 return ret;
81
82         vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
83         if (!vma) {
84                 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
85                 if (!vma) {
86                         ret = -ENOMEM;
87                         goto out;
88                 }
89
90                 ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
91                 if (ret) {
92                         kfree(vma);
93                         goto out;
94                 }
95         } else {
96                 vma->refcount++;
97         }
98
99 out:
100         ttm_bo_unreserve(&nvbo->bo);
101         return ret;
102 }
103
104 void
105 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
106 {
107         struct nouveau_cli *cli = nouveau_cli(file_priv);
108         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
109         struct nouveau_vma *vma;
110         int ret;
111
112         if (!cli->base.vm)
113                 return;
114
115         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
116         if (ret)
117                 return;
118
119         vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
120         if (vma) {
121                 if (--vma->refcount == 0) {
122                         nouveau_bo_vma_del(nvbo, vma);
123                         kfree(vma);
124                 }
125         }
126         ttm_bo_unreserve(&nvbo->bo);
127 }
128
129 int
130 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
131                 uint32_t tile_mode, uint32_t tile_flags,
132                 struct nouveau_bo **pnvbo)
133 {
134         struct nouveau_drm *drm = nouveau_drm(dev);
135         struct nouveau_bo *nvbo;
136         u32 flags = 0;
137         int ret;
138
139         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
140                 flags |= TTM_PL_FLAG_VRAM;
141         if (domain & NOUVEAU_GEM_DOMAIN_GART)
142                 flags |= TTM_PL_FLAG_TT;
143         if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
144                 flags |= TTM_PL_FLAG_SYSTEM;
145
146         ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
147                              tile_flags, NULL, pnvbo);
148         if (ret)
149                 return ret;
150         nvbo = *pnvbo;
151
152         /* we restrict allowed domains on nv50+ to only the types
153          * that were requested at creation time.  not possibly on
154          * earlier chips without busting the ABI.
155          */
156         nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
157                               NOUVEAU_GEM_DOMAIN_GART;
158         if (nv_device(drm->device)->card_type >= NV_50)
159                 nvbo->valid_domains &= domain;
160
161         nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
162         if (!nvbo->gem) {
163                 nouveau_bo_ref(NULL, pnvbo);
164                 return -ENOMEM;
165         }
166
167         nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
168         nvbo->gem->driver_private = nvbo;
169         return 0;
170 }
171
172 static int
173 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
174                  struct drm_nouveau_gem_info *rep)
175 {
176         struct nouveau_cli *cli = nouveau_cli(file_priv);
177         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
178         struct nouveau_vma *vma;
179
180         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
181                 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
182         else
183                 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
184
185         rep->offset = nvbo->bo.offset;
186         if (cli->base.vm) {
187                 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
188                 if (!vma)
189                         return -EINVAL;
190
191                 rep->offset = vma->offset;
192         }
193
194         rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
195         rep->map_handle = nvbo->bo.addr_space_offset;
196         rep->tile_mode = nvbo->tile_mode;
197         rep->tile_flags = nvbo->tile_flags;
198         return 0;
199 }
200
201 int
202 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
203                       struct drm_file *file_priv)
204 {
205         struct nouveau_drm *drm = nouveau_drm(dev);
206         struct nouveau_fb *pfb = nouveau_fb(drm->device);
207         struct drm_nouveau_gem_new *req = data;
208         struct nouveau_bo *nvbo = NULL;
209         int ret = 0;
210
211         drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
212
213         if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
214                 NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
215                 return -EINVAL;
216         }
217
218         ret = nouveau_gem_new(dev, req->info.size, req->align,
219                               req->info.domain, req->info.tile_mode,
220                               req->info.tile_flags, &nvbo);
221         if (ret)
222                 return ret;
223
224         ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
225         if (ret == 0) {
226                 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
227                 if (ret)
228                         drm_gem_handle_delete(file_priv, req->info.handle);
229         }
230
231         /* drop reference from allocate - handle holds it now */
232         drm_gem_object_unreference_unlocked(nvbo->gem);
233         return ret;
234 }
235
236 static int
237 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
238                        uint32_t write_domains, uint32_t valid_domains)
239 {
240         struct nouveau_bo *nvbo = gem->driver_private;
241         struct ttm_buffer_object *bo = &nvbo->bo;
242         uint32_t domains = valid_domains & nvbo->valid_domains &
243                 (write_domains ? write_domains : read_domains);
244         uint32_t pref_flags = 0, valid_flags = 0;
245
246         if (!domains)
247                 return -EINVAL;
248
249         if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
250                 valid_flags |= TTM_PL_FLAG_VRAM;
251
252         if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
253                 valid_flags |= TTM_PL_FLAG_TT;
254
255         if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
256             bo->mem.mem_type == TTM_PL_VRAM)
257                 pref_flags |= TTM_PL_FLAG_VRAM;
258
259         else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
260                  bo->mem.mem_type == TTM_PL_TT)
261                 pref_flags |= TTM_PL_FLAG_TT;
262
263         else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
264                 pref_flags |= TTM_PL_FLAG_VRAM;
265
266         else
267                 pref_flags |= TTM_PL_FLAG_TT;
268
269         nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
270
271         return 0;
272 }
273
274 struct validate_op {
275         struct list_head vram_list;
276         struct list_head gart_list;
277         struct list_head both_list;
278 };
279
280 static void
281 validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
282 {
283         struct list_head *entry, *tmp;
284         struct nouveau_bo *nvbo;
285
286         list_for_each_safe(entry, tmp, list) {
287                 nvbo = list_entry(entry, struct nouveau_bo, entry);
288
289                 nouveau_bo_fence(nvbo, fence);
290
291                 if (unlikely(nvbo->validate_mapped)) {
292                         ttm_bo_kunmap(&nvbo->kmap);
293                         nvbo->validate_mapped = false;
294                 }
295
296                 list_del(&nvbo->entry);
297                 nvbo->reserved_by = NULL;
298                 ttm_bo_unreserve(&nvbo->bo);
299                 drm_gem_object_unreference_unlocked(nvbo->gem);
300         }
301 }
302
303 static void
304 validate_fini(struct validate_op *op, struct nouveau_fence* fence)
305 {
306         validate_fini_list(&op->vram_list, fence);
307         validate_fini_list(&op->gart_list, fence);
308         validate_fini_list(&op->both_list, fence);
309 }
310
311 static int
312 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
313               struct drm_nouveau_gem_pushbuf_bo *pbbo,
314               int nr_buffers, struct validate_op *op)
315 {
316         struct drm_device *dev = chan->drm->dev;
317         struct nouveau_drm *drm = nouveau_drm(dev);
318         uint32_t sequence;
319         int trycnt = 0;
320         int ret, i;
321         struct nouveau_bo *res_bo = NULL;
322
323         sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
324 retry:
325         if (++trycnt > 100000) {
326                 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
327                 return -EINVAL;
328         }
329
330         for (i = 0; i < nr_buffers; i++) {
331                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
332                 struct drm_gem_object *gem;
333                 struct nouveau_bo *nvbo;
334
335                 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
336                 if (!gem) {
337                         NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
338                         validate_fini(op, NULL);
339                         return -ENOENT;
340                 }
341                 nvbo = gem->driver_private;
342                 if (nvbo == res_bo) {
343                         res_bo = NULL;
344                         drm_gem_object_unreference_unlocked(gem);
345                         continue;
346                 }
347
348                 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
349                         NV_ERROR(drm, "multiple instances of buffer %d on "
350                                       "validation list\n", b->handle);
351                         drm_gem_object_unreference_unlocked(gem);
352                         validate_fini(op, NULL);
353                         return -EINVAL;
354                 }
355
356                 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
357                 if (ret) {
358                         validate_fini(op, NULL);
359                         if (unlikely(ret == -EAGAIN)) {
360                                 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
361                                 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
362                                                               sequence);
363                                 if (!ret)
364                                         res_bo = nvbo;
365                         }
366                         if (unlikely(ret)) {
367                                 drm_gem_object_unreference_unlocked(gem);
368                                 if (ret != -ERESTARTSYS)
369                                         NV_ERROR(drm, "fail reserve\n");
370                                 return ret;
371                         }
372                 }
373
374                 b->user_priv = (uint64_t)(unsigned long)nvbo;
375                 nvbo->reserved_by = file_priv;
376                 nvbo->pbbo_index = i;
377                 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
378                     (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
379                         list_add_tail(&nvbo->entry, &op->both_list);
380                 else
381                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
382                         list_add_tail(&nvbo->entry, &op->vram_list);
383                 else
384                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
385                         list_add_tail(&nvbo->entry, &op->gart_list);
386                 else {
387                         NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
388                                  b->valid_domains);
389                         list_add_tail(&nvbo->entry, &op->both_list);
390                         validate_fini(op, NULL);
391                         return -EINVAL;
392                 }
393                 if (nvbo == res_bo)
394                         goto retry;
395         }
396
397         return 0;
398 }
399
400 static int
401 validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
402 {
403         struct nouveau_fence *fence = NULL;
404         int ret = 0;
405
406         spin_lock(&nvbo->bo.bdev->fence_lock);
407         if (nvbo->bo.sync_obj)
408                 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
409         spin_unlock(&nvbo->bo.bdev->fence_lock);
410
411         if (fence) {
412                 ret = nouveau_fence_sync(fence, chan);
413                 nouveau_fence_unref(&fence);
414         }
415
416         return ret;
417 }
418
419 static int
420 validate_list(struct nouveau_channel *chan, struct list_head *list,
421               struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
422 {
423         struct nouveau_drm *drm = chan->drm;
424         struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
425                                 (void __force __user *)(uintptr_t)user_pbbo_ptr;
426         struct nouveau_bo *nvbo;
427         int ret, relocs = 0;
428
429         list_for_each_entry(nvbo, list, entry) {
430                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
431
432                 ret = validate_sync(chan, nvbo);
433                 if (unlikely(ret)) {
434                         NV_ERROR(drm, "fail pre-validate sync\n");
435                         return ret;
436                 }
437
438                 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
439                                              b->write_domains,
440                                              b->valid_domains);
441                 if (unlikely(ret)) {
442                         NV_ERROR(drm, "fail set_domain\n");
443                         return ret;
444                 }
445
446                 ret = nouveau_bo_validate(nvbo, true, false);
447                 if (unlikely(ret)) {
448                         if (ret != -ERESTARTSYS)
449                                 NV_ERROR(drm, "fail ttm_validate\n");
450                         return ret;
451                 }
452
453                 ret = validate_sync(chan, nvbo);
454                 if (unlikely(ret)) {
455                         NV_ERROR(drm, "fail post-validate sync\n");
456                         return ret;
457                 }
458
459                 if (nv_device(drm->device)->card_type < NV_50) {
460                         if (nvbo->bo.offset == b->presumed.offset &&
461                             ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
462                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
463                              (nvbo->bo.mem.mem_type == TTM_PL_TT &&
464                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
465                                 continue;
466
467                         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
468                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
469                         else
470                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
471                         b->presumed.offset = nvbo->bo.offset;
472                         b->presumed.valid = 0;
473                         relocs++;
474
475                         if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
476                                              &b->presumed, sizeof(b->presumed)))
477                                 return -EFAULT;
478                 }
479         }
480
481         return relocs;
482 }
483
484 static int
485 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
486                              struct drm_file *file_priv,
487                              struct drm_nouveau_gem_pushbuf_bo *pbbo,
488                              uint64_t user_buffers, int nr_buffers,
489                              struct validate_op *op, int *apply_relocs)
490 {
491         struct nouveau_drm *drm = chan->drm;
492         int ret, relocs = 0;
493
494         INIT_LIST_HEAD(&op->vram_list);
495         INIT_LIST_HEAD(&op->gart_list);
496         INIT_LIST_HEAD(&op->both_list);
497
498         if (nr_buffers == 0)
499                 return 0;
500
501         ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
502         if (unlikely(ret)) {
503                 if (ret != -ERESTARTSYS)
504                         NV_ERROR(drm, "validate_init\n");
505                 return ret;
506         }
507
508         ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
509         if (unlikely(ret < 0)) {
510                 if (ret != -ERESTARTSYS)
511                         NV_ERROR(drm, "validate vram_list\n");
512                 validate_fini(op, NULL);
513                 return ret;
514         }
515         relocs += ret;
516
517         ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
518         if (unlikely(ret < 0)) {
519                 if (ret != -ERESTARTSYS)
520                         NV_ERROR(drm, "validate gart_list\n");
521                 validate_fini(op, NULL);
522                 return ret;
523         }
524         relocs += ret;
525
526         ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
527         if (unlikely(ret < 0)) {
528                 if (ret != -ERESTARTSYS)
529                         NV_ERROR(drm, "validate both_list\n");
530                 validate_fini(op, NULL);
531                 return ret;
532         }
533         relocs += ret;
534
535         *apply_relocs = relocs;
536         return 0;
537 }
538
539 static inline void *
540 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
541 {
542         void *mem;
543         void __user *userptr = (void __force __user *)(uintptr_t)user;
544
545         mem = kmalloc(nmemb * size, GFP_KERNEL);
546         if (!mem)
547                 return ERR_PTR(-ENOMEM);
548
549         if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
550                 kfree(mem);
551                 return ERR_PTR(-EFAULT);
552         }
553
554         return mem;
555 }
556
557 static int
558 nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
559                                 struct drm_nouveau_gem_pushbuf *req,
560                                 struct drm_nouveau_gem_pushbuf_bo *bo)
561 {
562         struct nouveau_drm *drm = nouveau_drm(dev);
563         struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
564         int ret = 0;
565         unsigned i;
566
567         reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
568         if (IS_ERR(reloc))
569                 return PTR_ERR(reloc);
570
571         for (i = 0; i < req->nr_relocs; i++) {
572                 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
573                 struct drm_nouveau_gem_pushbuf_bo *b;
574                 struct nouveau_bo *nvbo;
575                 uint32_t data;
576
577                 if (unlikely(r->bo_index > req->nr_buffers)) {
578                         NV_ERROR(drm, "reloc bo index invalid\n");
579                         ret = -EINVAL;
580                         break;
581                 }
582
583                 b = &bo[r->bo_index];
584                 if (b->presumed.valid)
585                         continue;
586
587                 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
588                         NV_ERROR(drm, "reloc container bo index invalid\n");
589                         ret = -EINVAL;
590                         break;
591                 }
592                 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
593
594                 if (unlikely(r->reloc_bo_offset + 4 >
595                              nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
596                         NV_ERROR(drm, "reloc outside of bo\n");
597                         ret = -EINVAL;
598                         break;
599                 }
600
601                 if (!nvbo->kmap.virtual) {
602                         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
603                                           &nvbo->kmap);
604                         if (ret) {
605                                 NV_ERROR(drm, "failed kmap for reloc\n");
606                                 break;
607                         }
608                         nvbo->validate_mapped = true;
609                 }
610
611                 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
612                         data = b->presumed.offset + r->data;
613                 else
614                 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
615                         data = (b->presumed.offset + r->data) >> 32;
616                 else
617                         data = r->data;
618
619                 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
620                         if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
621                                 data |= r->tor;
622                         else
623                                 data |= r->vor;
624                 }
625
626                 spin_lock(&nvbo->bo.bdev->fence_lock);
627                 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
628                 spin_unlock(&nvbo->bo.bdev->fence_lock);
629                 if (ret) {
630                         NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
631                         break;
632                 }
633
634                 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
635         }
636
637         kfree(reloc);
638         return ret;
639 }
640
641 int
642 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
643                           struct drm_file *file_priv)
644 {
645         struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
646         struct nouveau_abi16_chan *temp;
647         struct nouveau_drm *drm = nouveau_drm(dev);
648         struct drm_nouveau_gem_pushbuf *req = data;
649         struct drm_nouveau_gem_pushbuf_push *push;
650         struct drm_nouveau_gem_pushbuf_bo *bo;
651         struct nouveau_channel *chan = NULL;
652         struct validate_op op;
653         struct nouveau_fence *fence = NULL;
654         int i, j, ret = 0, do_reloc = 0;
655
656         if (unlikely(!abi16))
657                 return -ENOMEM;
658
659         list_for_each_entry(temp, &abi16->channels, head) {
660                 if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
661                         chan = temp->chan;
662                         break;
663                 }
664         }
665
666         if (!chan)
667                 return nouveau_abi16_put(abi16, -ENOENT);
668
669         req->vram_available = drm->gem.vram_available;
670         req->gart_available = drm->gem.gart_available;
671         if (unlikely(req->nr_push == 0))
672                 goto out_next;
673
674         if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
675                 NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
676                          req->nr_push, NOUVEAU_GEM_MAX_PUSH);
677                 return nouveau_abi16_put(abi16, -EINVAL);
678         }
679
680         if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
681                 NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
682                          req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
683                 return nouveau_abi16_put(abi16, -EINVAL);
684         }
685
686         if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
687                 NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
688                          req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
689                 return nouveau_abi16_put(abi16, -EINVAL);
690         }
691
692         push = u_memcpya(req->push, req->nr_push, sizeof(*push));
693         if (IS_ERR(push))
694                 return nouveau_abi16_put(abi16, PTR_ERR(push));
695
696         bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
697         if (IS_ERR(bo)) {
698                 kfree(push);
699                 return nouveau_abi16_put(abi16, PTR_ERR(bo));
700         }
701
702         /* Ensure all push buffers are on validate list */
703         for (i = 0; i < req->nr_push; i++) {
704                 if (push[i].bo_index >= req->nr_buffers) {
705                         NV_ERROR(drm, "push %d buffer not in list\n", i);
706                         ret = -EINVAL;
707                         goto out_prevalid;
708                 }
709         }
710
711         /* Validate buffer list */
712         ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
713                                            req->nr_buffers, &op, &do_reloc);
714         if (ret) {
715                 if (ret != -ERESTARTSYS)
716                         NV_ERROR(drm, "validate: %d\n", ret);
717                 goto out_prevalid;
718         }
719
720         /* Apply any relocations that are required */
721         if (do_reloc) {
722                 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
723                 if (ret) {
724                         NV_ERROR(drm, "reloc apply: %d\n", ret);
725                         goto out;
726                 }
727         }
728
729         if (chan->dma.ib_max) {
730                 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
731                 if (ret) {
732                         NV_ERROR(drm, "nv50cal_space: %d\n", ret);
733                         goto out;
734                 }
735
736                 for (i = 0; i < req->nr_push; i++) {
737                         struct nouveau_bo *nvbo = (void *)(unsigned long)
738                                 bo[push[i].bo_index].user_priv;
739
740                         nv50_dma_push(chan, nvbo, push[i].offset,
741                                       push[i].length);
742                 }
743         } else
744         if (nv_device(drm->device)->chipset >= 0x25) {
745                 ret = RING_SPACE(chan, req->nr_push * 2);
746                 if (ret) {
747                         NV_ERROR(drm, "cal_space: %d\n", ret);
748                         goto out;
749                 }
750
751                 for (i = 0; i < req->nr_push; i++) {
752                         struct nouveau_bo *nvbo = (void *)(unsigned long)
753                                 bo[push[i].bo_index].user_priv;
754
755                         OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
756                         OUT_RING(chan, 0);
757                 }
758         } else {
759                 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
760                 if (ret) {
761                         NV_ERROR(drm, "jmp_space: %d\n", ret);
762                         goto out;
763                 }
764
765                 for (i = 0; i < req->nr_push; i++) {
766                         struct nouveau_bo *nvbo = (void *)(unsigned long)
767                                 bo[push[i].bo_index].user_priv;
768                         uint32_t cmd;
769
770                         cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
771                         cmd |= 0x20000000;
772                         if (unlikely(cmd != req->suffix0)) {
773                                 if (!nvbo->kmap.virtual) {
774                                         ret = ttm_bo_kmap(&nvbo->bo, 0,
775                                                           nvbo->bo.mem.
776                                                           num_pages,
777                                                           &nvbo->kmap);
778                                         if (ret) {
779                                                 WIND_RING(chan);
780                                                 goto out;
781                                         }
782                                         nvbo->validate_mapped = true;
783                                 }
784
785                                 nouveau_bo_wr32(nvbo, (push[i].offset +
786                                                 push[i].length - 8) / 4, cmd);
787                         }
788
789                         OUT_RING(chan, 0x20000000 |
790                                       (nvbo->bo.offset + push[i].offset));
791                         OUT_RING(chan, 0);
792                         for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
793                                 OUT_RING(chan, 0);
794                 }
795         }
796
797         ret = nouveau_fence_new(chan, &fence);
798         if (ret) {
799                 NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
800                 WIND_RING(chan);
801                 goto out;
802         }
803
804 out:
805         validate_fini(&op, fence);
806         nouveau_fence_unref(&fence);
807
808 out_prevalid:
809         kfree(bo);
810         kfree(push);
811
812 out_next:
813         if (chan->dma.ib_max) {
814                 req->suffix0 = 0x00000000;
815                 req->suffix1 = 0x00000000;
816         } else
817         if (nv_device(drm->device)->chipset >= 0x25) {
818                 req->suffix0 = 0x00020000;
819                 req->suffix1 = 0x00000000;
820         } else {
821                 req->suffix0 = 0x20000000 |
822                               (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
823                 req->suffix1 = 0x00000000;
824         }
825
826         return nouveau_abi16_put(abi16, ret);
827 }
828
829 static inline uint32_t
830 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
831 {
832         uint32_t flags = 0;
833
834         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
835                 flags |= TTM_PL_FLAG_VRAM;
836         if (domain & NOUVEAU_GEM_DOMAIN_GART)
837                 flags |= TTM_PL_FLAG_TT;
838
839         return flags;
840 }
841
842 int
843 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
844                            struct drm_file *file_priv)
845 {
846         struct drm_nouveau_gem_cpu_prep *req = data;
847         struct drm_gem_object *gem;
848         struct nouveau_bo *nvbo;
849         bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
850         int ret = -EINVAL;
851
852         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
853         if (!gem)
854                 return -ENOENT;
855         nvbo = nouveau_gem_object(gem);
856
857         spin_lock(&nvbo->bo.bdev->fence_lock);
858         ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
859         spin_unlock(&nvbo->bo.bdev->fence_lock);
860         drm_gem_object_unreference_unlocked(gem);
861         return ret;
862 }
863
864 int
865 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
866                            struct drm_file *file_priv)
867 {
868         return 0;
869 }
870
871 int
872 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
873                        struct drm_file *file_priv)
874 {
875         struct drm_nouveau_gem_info *req = data;
876         struct drm_gem_object *gem;
877         int ret;
878
879         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
880         if (!gem)
881                 return -ENOENT;
882
883         ret = nouveau_gem_info(file_priv, gem, req);
884         drm_gem_object_unreference_unlocked(gem);
885         return ret;
886 }
887