]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/nouveau/nouveau_gem.c
Merge remote-tracking branch 'input-current/for-linus'
[karo-tx-linux.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "nouveau_drm.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
31
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
34
35 void
36 nouveau_gem_object_del(struct drm_gem_object *gem)
37 {
38         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
39         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
40         struct ttm_buffer_object *bo = &nvbo->bo;
41         struct device *dev = drm->dev->dev;
42         int ret;
43
44         ret = pm_runtime_get_sync(dev);
45         if (WARN_ON(ret < 0 && ret != -EACCES))
46                 return;
47
48         if (gem->import_attach)
49                 drm_prime_gem_destroy(gem, nvbo->bo.sg);
50
51         drm_gem_object_release(gem);
52
53         /* reset filp so nouveau_bo_del_ttm() can test for it */
54         gem->filp = NULL;
55         ttm_bo_unref(&bo);
56
57         pm_runtime_mark_last_busy(dev);
58         pm_runtime_put_autosuspend(dev);
59 }
60
61 int
62 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
63 {
64         struct nouveau_cli *cli = nouveau_cli(file_priv);
65         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
66         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
67         struct nvkm_vma *vma;
68         struct device *dev = drm->dev->dev;
69         int ret;
70
71         if (!cli->vm)
72                 return 0;
73
74         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
75         if (ret)
76                 return ret;
77
78         vma = nouveau_bo_vma_find(nvbo, cli->vm);
79         if (!vma) {
80                 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
81                 if (!vma) {
82                         ret = -ENOMEM;
83                         goto out;
84                 }
85
86                 ret = pm_runtime_get_sync(dev);
87                 if (ret < 0 && ret != -EACCES)
88                         goto out;
89
90                 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
91                 if (ret)
92                         kfree(vma);
93
94                 pm_runtime_mark_last_busy(dev);
95                 pm_runtime_put_autosuspend(dev);
96         } else {
97                 vma->refcount++;
98         }
99
100 out:
101         ttm_bo_unreserve(&nvbo->bo);
102         return ret;
103 }
104
105 static void
106 nouveau_gem_object_delete(void *data)
107 {
108         struct nvkm_vma *vma = data;
109         nvkm_vm_unmap(vma);
110         nvkm_vm_put(vma);
111         kfree(vma);
112 }
113
114 static void
115 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
116 {
117         const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
118         struct reservation_object *resv = nvbo->bo.resv;
119         struct reservation_object_list *fobj;
120         struct fence *fence = NULL;
121
122         fobj = reservation_object_get_list(resv);
123
124         list_del(&vma->head);
125
126         if (fobj && fobj->shared_count > 1)
127                 ttm_bo_wait(&nvbo->bo, true, false, false);
128         else if (fobj && fobj->shared_count == 1)
129                 fence = rcu_dereference_protected(fobj->shared[0],
130                                                 reservation_object_held(resv));
131         else
132                 fence = reservation_object_get_excl(nvbo->bo.resv);
133
134         if (fence && mapped) {
135                 nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
136         } else {
137                 if (mapped)
138                         nvkm_vm_unmap(vma);
139                 nvkm_vm_put(vma);
140                 kfree(vma);
141         }
142 }
143
144 void
145 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
146 {
147         struct nouveau_cli *cli = nouveau_cli(file_priv);
148         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
149         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
150         struct device *dev = drm->dev->dev;
151         struct nvkm_vma *vma;
152         int ret;
153
154         if (!cli->vm)
155                 return;
156
157         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
158         if (ret)
159                 return;
160
161         vma = nouveau_bo_vma_find(nvbo, cli->vm);
162         if (vma) {
163                 if (--vma->refcount == 0) {
164                         ret = pm_runtime_get_sync(dev);
165                         if (!WARN_ON(ret < 0 && ret != -EACCES)) {
166                                 nouveau_gem_object_unmap(nvbo, vma);
167                                 pm_runtime_mark_last_busy(dev);
168                                 pm_runtime_put_autosuspend(dev);
169                         }
170                 }
171         }
172         ttm_bo_unreserve(&nvbo->bo);
173 }
174
175 int
176 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
177                 uint32_t tile_mode, uint32_t tile_flags,
178                 struct nouveau_bo **pnvbo)
179 {
180         struct nouveau_drm *drm = nouveau_drm(dev);
181         struct nouveau_bo *nvbo;
182         u32 flags = 0;
183         int ret;
184
185         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
186                 flags |= TTM_PL_FLAG_VRAM;
187         if (domain & NOUVEAU_GEM_DOMAIN_GART)
188                 flags |= TTM_PL_FLAG_TT;
189         if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
190                 flags |= TTM_PL_FLAG_SYSTEM;
191
192         if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
193                 flags |= TTM_PL_FLAG_UNCACHED;
194
195         ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
196                              tile_flags, NULL, NULL, pnvbo);
197         if (ret)
198                 return ret;
199         nvbo = *pnvbo;
200
201         /* we restrict allowed domains on nv50+ to only the types
202          * that were requested at creation time.  not possibly on
203          * earlier chips without busting the ABI.
204          */
205         nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
206                               NOUVEAU_GEM_DOMAIN_GART;
207         if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
208                 nvbo->valid_domains &= domain;
209
210         /* Initialize the embedded gem-object. We return a single gem-reference
211          * to the caller, instead of a normal nouveau_bo ttm reference. */
212         ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
213         if (ret) {
214                 nouveau_bo_ref(NULL, pnvbo);
215                 return -ENOMEM;
216         }
217
218         nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
219         return 0;
220 }
221
222 static int
223 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
224                  struct drm_nouveau_gem_info *rep)
225 {
226         struct nouveau_cli *cli = nouveau_cli(file_priv);
227         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
228         struct nvkm_vma *vma;
229
230         if (is_power_of_2(nvbo->valid_domains))
231                 rep->domain = nvbo->valid_domains;
232         else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
233                 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
234         else
235                 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
236         rep->offset = nvbo->bo.offset;
237         if (cli->vm) {
238                 vma = nouveau_bo_vma_find(nvbo, cli->vm);
239                 if (!vma)
240                         return -EINVAL;
241
242                 rep->offset = vma->offset;
243         }
244
245         rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
246         rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
247         rep->tile_mode = nvbo->tile_mode;
248         rep->tile_flags = nvbo->tile_flags;
249         return 0;
250 }
251
252 int
253 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
254                       struct drm_file *file_priv)
255 {
256         struct nouveau_drm *drm = nouveau_drm(dev);
257         struct nouveau_cli *cli = nouveau_cli(file_priv);
258         struct nvkm_fb *fb = nvxx_fb(&drm->device);
259         struct drm_nouveau_gem_new *req = data;
260         struct nouveau_bo *nvbo = NULL;
261         int ret = 0;
262
263         if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
264                 NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
265                 return -EINVAL;
266         }
267
268         ret = nouveau_gem_new(dev, req->info.size, req->align,
269                               req->info.domain, req->info.tile_mode,
270                               req->info.tile_flags, &nvbo);
271         if (ret)
272                 return ret;
273
274         ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
275         if (ret == 0) {
276                 ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
277                 if (ret)
278                         drm_gem_handle_delete(file_priv, req->info.handle);
279         }
280
281         /* drop reference from allocate - handle holds it now */
282         drm_gem_object_unreference_unlocked(&nvbo->gem);
283         return ret;
284 }
285
286 static int
287 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
288                        uint32_t write_domains, uint32_t valid_domains)
289 {
290         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
291         struct ttm_buffer_object *bo = &nvbo->bo;
292         uint32_t domains = valid_domains & nvbo->valid_domains &
293                 (write_domains ? write_domains : read_domains);
294         uint32_t pref_flags = 0, valid_flags = 0;
295
296         if (!domains)
297                 return -EINVAL;
298
299         if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
300                 valid_flags |= TTM_PL_FLAG_VRAM;
301
302         if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
303                 valid_flags |= TTM_PL_FLAG_TT;
304
305         if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
306             bo->mem.mem_type == TTM_PL_VRAM)
307                 pref_flags |= TTM_PL_FLAG_VRAM;
308
309         else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
310                  bo->mem.mem_type == TTM_PL_TT)
311                 pref_flags |= TTM_PL_FLAG_TT;
312
313         else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
314                 pref_flags |= TTM_PL_FLAG_VRAM;
315
316         else
317                 pref_flags |= TTM_PL_FLAG_TT;
318
319         nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
320
321         return 0;
322 }
323
324 struct validate_op {
325         struct list_head list;
326         struct ww_acquire_ctx ticket;
327 };
328
329 static void
330 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
331                         struct drm_nouveau_gem_pushbuf_bo *pbbo)
332 {
333         struct nouveau_bo *nvbo;
334         struct drm_nouveau_gem_pushbuf_bo *b;
335
336         while (!list_empty(&op->list)) {
337                 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
338                 b = &pbbo[nvbo->pbbo_index];
339
340                 if (likely(fence))
341                         nouveau_bo_fence(nvbo, fence, !!b->write_domains);
342
343                 if (unlikely(nvbo->validate_mapped)) {
344                         ttm_bo_kunmap(&nvbo->kmap);
345                         nvbo->validate_mapped = false;
346                 }
347
348                 list_del(&nvbo->entry);
349                 nvbo->reserved_by = NULL;
350                 ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
351                 drm_gem_object_unreference_unlocked(&nvbo->gem);
352         }
353 }
354
355 static void
356 validate_fini(struct validate_op *op, struct nouveau_fence *fence,
357               struct drm_nouveau_gem_pushbuf_bo *pbbo)
358 {
359         validate_fini_no_ticket(op, fence, pbbo);
360         ww_acquire_fini(&op->ticket);
361 }
362
363 static int
364 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
365               struct drm_nouveau_gem_pushbuf_bo *pbbo,
366               int nr_buffers, struct validate_op *op)
367 {
368         struct nouveau_cli *cli = nouveau_cli(file_priv);
369         struct drm_device *dev = chan->drm->dev;
370         int trycnt = 0;
371         int ret, i;
372         struct nouveau_bo *res_bo = NULL;
373         LIST_HEAD(gart_list);
374         LIST_HEAD(vram_list);
375         LIST_HEAD(both_list);
376
377         ww_acquire_init(&op->ticket, &reservation_ww_class);
378 retry:
379         if (++trycnt > 100000) {
380                 NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
381                 return -EINVAL;
382         }
383
384         for (i = 0; i < nr_buffers; i++) {
385                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
386                 struct drm_gem_object *gem;
387                 struct nouveau_bo *nvbo;
388
389                 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
390                 if (!gem) {
391                         NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
392                         ret = -ENOENT;
393                         break;
394                 }
395                 nvbo = nouveau_gem_object(gem);
396                 if (nvbo == res_bo) {
397                         res_bo = NULL;
398                         drm_gem_object_unreference_unlocked(gem);
399                         continue;
400                 }
401
402                 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
403                         NV_PRINTK(err, cli, "multiple instances of buffer %d on "
404                                       "validation list\n", b->handle);
405                         drm_gem_object_unreference_unlocked(gem);
406                         ret = -EINVAL;
407                         break;
408                 }
409
410                 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
411                 if (ret) {
412                         list_splice_tail_init(&vram_list, &op->list);
413                         list_splice_tail_init(&gart_list, &op->list);
414                         list_splice_tail_init(&both_list, &op->list);
415                         validate_fini_no_ticket(op, NULL, NULL);
416                         if (unlikely(ret == -EDEADLK)) {
417                                 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
418                                                               &op->ticket);
419                                 if (!ret)
420                                         res_bo = nvbo;
421                         }
422                         if (unlikely(ret)) {
423                                 if (ret != -ERESTARTSYS)
424                                         NV_PRINTK(err, cli, "fail reserve\n");
425                                 break;
426                         }
427                 }
428
429                 b->user_priv = (uint64_t)(unsigned long)nvbo;
430                 nvbo->reserved_by = file_priv;
431                 nvbo->pbbo_index = i;
432                 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
433                     (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
434                         list_add_tail(&nvbo->entry, &both_list);
435                 else
436                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
437                         list_add_tail(&nvbo->entry, &vram_list);
438                 else
439                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
440                         list_add_tail(&nvbo->entry, &gart_list);
441                 else {
442                         NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
443                                  b->valid_domains);
444                         list_add_tail(&nvbo->entry, &both_list);
445                         ret = -EINVAL;
446                         break;
447                 }
448                 if (nvbo == res_bo)
449                         goto retry;
450         }
451
452         ww_acquire_done(&op->ticket);
453         list_splice_tail(&vram_list, &op->list);
454         list_splice_tail(&gart_list, &op->list);
455         list_splice_tail(&both_list, &op->list);
456         if (ret)
457                 validate_fini(op, NULL, NULL);
458         return ret;
459
460 }
461
462 static int
463 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
464               struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
465               uint64_t user_pbbo_ptr)
466 {
467         struct nouveau_drm *drm = chan->drm;
468         struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
469                                 (void __force __user *)(uintptr_t)user_pbbo_ptr;
470         struct nouveau_bo *nvbo;
471         int ret, relocs = 0;
472
473         list_for_each_entry(nvbo, list, entry) {
474                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
475
476                 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
477                                              b->write_domains,
478                                              b->valid_domains);
479                 if (unlikely(ret)) {
480                         NV_PRINTK(err, cli, "fail set_domain\n");
481                         return ret;
482                 }
483
484                 ret = nouveau_bo_validate(nvbo, true, false);
485                 if (unlikely(ret)) {
486                         if (ret != -ERESTARTSYS)
487                                 NV_PRINTK(err, cli, "fail ttm_validate\n");
488                         return ret;
489                 }
490
491                 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
492                 if (unlikely(ret)) {
493                         if (ret != -ERESTARTSYS)
494                                 NV_PRINTK(err, cli, "fail post-validate sync\n");
495                         return ret;
496                 }
497
498                 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
499                         if (nvbo->bo.offset == b->presumed.offset &&
500                             ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
501                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
502                              (nvbo->bo.mem.mem_type == TTM_PL_TT &&
503                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
504                                 continue;
505
506                         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
507                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
508                         else
509                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
510                         b->presumed.offset = nvbo->bo.offset;
511                         b->presumed.valid = 0;
512                         relocs++;
513
514                         if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
515                                              &b->presumed, sizeof(b->presumed)))
516                                 return -EFAULT;
517                 }
518         }
519
520         return relocs;
521 }
522
523 static int
524 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
525                              struct drm_file *file_priv,
526                              struct drm_nouveau_gem_pushbuf_bo *pbbo,
527                              uint64_t user_buffers, int nr_buffers,
528                              struct validate_op *op, int *apply_relocs)
529 {
530         struct nouveau_cli *cli = nouveau_cli(file_priv);
531         int ret;
532
533         INIT_LIST_HEAD(&op->list);
534
535         if (nr_buffers == 0)
536                 return 0;
537
538         ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
539         if (unlikely(ret)) {
540                 if (ret != -ERESTARTSYS)
541                         NV_PRINTK(err, cli, "validate_init\n");
542                 return ret;
543         }
544
545         ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
546         if (unlikely(ret < 0)) {
547                 if (ret != -ERESTARTSYS)
548                         NV_PRINTK(err, cli, "validating bo list\n");
549                 validate_fini(op, NULL, NULL);
550                 return ret;
551         }
552         *apply_relocs = ret;
553         return 0;
554 }
555
556 static inline void
557 u_free(void *addr)
558 {
559         kvfree(addr);
560 }
561
562 static inline void *
563 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
564 {
565         void *mem;
566         void __user *userptr = (void __force __user *)(uintptr_t)user;
567
568         size *= nmemb;
569
570         mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
571         if (!mem)
572                 mem = vmalloc(size);
573         if (!mem)
574                 return ERR_PTR(-ENOMEM);
575
576         if (copy_from_user(mem, userptr, size)) {
577                 u_free(mem);
578                 return ERR_PTR(-EFAULT);
579         }
580
581         return mem;
582 }
583
584 static int
585 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
586                                 struct drm_nouveau_gem_pushbuf *req,
587                                 struct drm_nouveau_gem_pushbuf_bo *bo)
588 {
589         struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
590         int ret = 0;
591         unsigned i;
592
593         reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
594         if (IS_ERR(reloc))
595                 return PTR_ERR(reloc);
596
597         for (i = 0; i < req->nr_relocs; i++) {
598                 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
599                 struct drm_nouveau_gem_pushbuf_bo *b;
600                 struct nouveau_bo *nvbo;
601                 uint32_t data;
602
603                 if (unlikely(r->bo_index > req->nr_buffers)) {
604                         NV_PRINTK(err, cli, "reloc bo index invalid\n");
605                         ret = -EINVAL;
606                         break;
607                 }
608
609                 b = &bo[r->bo_index];
610                 if (b->presumed.valid)
611                         continue;
612
613                 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
614                         NV_PRINTK(err, cli, "reloc container bo index invalid\n");
615                         ret = -EINVAL;
616                         break;
617                 }
618                 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
619
620                 if (unlikely(r->reloc_bo_offset + 4 >
621                              nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
622                         NV_PRINTK(err, cli, "reloc outside of bo\n");
623                         ret = -EINVAL;
624                         break;
625                 }
626
627                 if (!nvbo->kmap.virtual) {
628                         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
629                                           &nvbo->kmap);
630                         if (ret) {
631                                 NV_PRINTK(err, cli, "failed kmap for reloc\n");
632                                 break;
633                         }
634                         nvbo->validate_mapped = true;
635                 }
636
637                 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
638                         data = b->presumed.offset + r->data;
639                 else
640                 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
641                         data = (b->presumed.offset + r->data) >> 32;
642                 else
643                         data = r->data;
644
645                 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
646                         if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
647                                 data |= r->tor;
648                         else
649                                 data |= r->vor;
650                 }
651
652                 ret = ttm_bo_wait(&nvbo->bo, true, false, false);
653                 if (ret) {
654                         NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
655                         break;
656                 }
657
658                 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
659         }
660
661         u_free(reloc);
662         return ret;
663 }
664
665 int
666 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
667                           struct drm_file *file_priv)
668 {
669         struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
670         struct nouveau_cli *cli = nouveau_cli(file_priv);
671         struct nouveau_abi16_chan *temp;
672         struct nouveau_drm *drm = nouveau_drm(dev);
673         struct drm_nouveau_gem_pushbuf *req = data;
674         struct drm_nouveau_gem_pushbuf_push *push;
675         struct drm_nouveau_gem_pushbuf_bo *bo;
676         struct nouveau_channel *chan = NULL;
677         struct validate_op op;
678         struct nouveau_fence *fence = NULL;
679         int i, j, ret = 0, do_reloc = 0;
680
681         if (unlikely(!abi16))
682                 return -ENOMEM;
683
684         list_for_each_entry(temp, &abi16->channels, head) {
685                 if (temp->chan->user.handle == (NVDRM_CHAN | req->channel)) {
686                         chan = temp->chan;
687                         break;
688                 }
689         }
690
691         if (!chan)
692                 return nouveau_abi16_put(abi16, -ENOENT);
693
694         req->vram_available = drm->gem.vram_available;
695         req->gart_available = drm->gem.gart_available;
696         if (unlikely(req->nr_push == 0))
697                 goto out_next;
698
699         if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
700                 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
701                          req->nr_push, NOUVEAU_GEM_MAX_PUSH);
702                 return nouveau_abi16_put(abi16, -EINVAL);
703         }
704
705         if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
706                 NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
707                          req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
708                 return nouveau_abi16_put(abi16, -EINVAL);
709         }
710
711         if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
712                 NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
713                          req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
714                 return nouveau_abi16_put(abi16, -EINVAL);
715         }
716
717         push = u_memcpya(req->push, req->nr_push, sizeof(*push));
718         if (IS_ERR(push))
719                 return nouveau_abi16_put(abi16, PTR_ERR(push));
720
721         bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
722         if (IS_ERR(bo)) {
723                 u_free(push);
724                 return nouveau_abi16_put(abi16, PTR_ERR(bo));
725         }
726
727         /* Ensure all push buffers are on validate list */
728         for (i = 0; i < req->nr_push; i++) {
729                 if (push[i].bo_index >= req->nr_buffers) {
730                         NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
731                         ret = -EINVAL;
732                         goto out_prevalid;
733                 }
734         }
735
736         /* Validate buffer list */
737         ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
738                                            req->nr_buffers, &op, &do_reloc);
739         if (ret) {
740                 if (ret != -ERESTARTSYS)
741                         NV_PRINTK(err, cli, "validate: %d\n", ret);
742                 goto out_prevalid;
743         }
744
745         /* Apply any relocations that are required */
746         if (do_reloc) {
747                 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
748                 if (ret) {
749                         NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
750                         goto out;
751                 }
752         }
753
754         if (chan->dma.ib_max) {
755                 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
756                 if (ret) {
757                         NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
758                         goto out;
759                 }
760
761                 for (i = 0; i < req->nr_push; i++) {
762                         struct nouveau_bo *nvbo = (void *)(unsigned long)
763                                 bo[push[i].bo_index].user_priv;
764
765                         nv50_dma_push(chan, nvbo, push[i].offset,
766                                       push[i].length);
767                 }
768         } else
769         if (drm->device.info.chipset >= 0x25) {
770                 ret = RING_SPACE(chan, req->nr_push * 2);
771                 if (ret) {
772                         NV_PRINTK(err, cli, "cal_space: %d\n", ret);
773                         goto out;
774                 }
775
776                 for (i = 0; i < req->nr_push; i++) {
777                         struct nouveau_bo *nvbo = (void *)(unsigned long)
778                                 bo[push[i].bo_index].user_priv;
779
780                         OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
781                         OUT_RING(chan, 0);
782                 }
783         } else {
784                 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
785                 if (ret) {
786                         NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
787                         goto out;
788                 }
789
790                 for (i = 0; i < req->nr_push; i++) {
791                         struct nouveau_bo *nvbo = (void *)(unsigned long)
792                                 bo[push[i].bo_index].user_priv;
793                         uint32_t cmd;
794
795                         cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
796                         cmd |= 0x20000000;
797                         if (unlikely(cmd != req->suffix0)) {
798                                 if (!nvbo->kmap.virtual) {
799                                         ret = ttm_bo_kmap(&nvbo->bo, 0,
800                                                           nvbo->bo.mem.
801                                                           num_pages,
802                                                           &nvbo->kmap);
803                                         if (ret) {
804                                                 WIND_RING(chan);
805                                                 goto out;
806                                         }
807                                         nvbo->validate_mapped = true;
808                                 }
809
810                                 nouveau_bo_wr32(nvbo, (push[i].offset +
811                                                 push[i].length - 8) / 4, cmd);
812                         }
813
814                         OUT_RING(chan, 0x20000000 |
815                                       (nvbo->bo.offset + push[i].offset));
816                         OUT_RING(chan, 0);
817                         for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
818                                 OUT_RING(chan, 0);
819                 }
820         }
821
822         ret = nouveau_fence_new(chan, false, &fence);
823         if (ret) {
824                 NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
825                 WIND_RING(chan);
826                 goto out;
827         }
828
829 out:
830         validate_fini(&op, fence, bo);
831         nouveau_fence_unref(&fence);
832
833 out_prevalid:
834         u_free(bo);
835         u_free(push);
836
837 out_next:
838         if (chan->dma.ib_max) {
839                 req->suffix0 = 0x00000000;
840                 req->suffix1 = 0x00000000;
841         } else
842         if (drm->device.info.chipset >= 0x25) {
843                 req->suffix0 = 0x00020000;
844                 req->suffix1 = 0x00000000;
845         } else {
846                 req->suffix0 = 0x20000000 |
847                               (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
848                 req->suffix1 = 0x00000000;
849         }
850
851         return nouveau_abi16_put(abi16, ret);
852 }
853
854 int
855 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
856                            struct drm_file *file_priv)
857 {
858         struct drm_nouveau_gem_cpu_prep *req = data;
859         struct drm_gem_object *gem;
860         struct nouveau_bo *nvbo;
861         bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
862         bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
863         int ret;
864
865         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
866         if (!gem)
867                 return -ENOENT;
868         nvbo = nouveau_gem_object(gem);
869
870         if (no_wait)
871                 ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
872         else {
873                 long lret;
874
875                 lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
876                 if (!lret)
877                         ret = -EBUSY;
878                 else if (lret > 0)
879                         ret = 0;
880                 else
881                         ret = lret;
882         }
883         nouveau_bo_sync_for_cpu(nvbo);
884         drm_gem_object_unreference_unlocked(gem);
885
886         return ret;
887 }
888
889 int
890 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
891                            struct drm_file *file_priv)
892 {
893         struct drm_nouveau_gem_cpu_fini *req = data;
894         struct drm_gem_object *gem;
895         struct nouveau_bo *nvbo;
896
897         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
898         if (!gem)
899                 return -ENOENT;
900         nvbo = nouveau_gem_object(gem);
901
902         nouveau_bo_sync_for_device(nvbo);
903         drm_gem_object_unreference_unlocked(gem);
904         return 0;
905 }
906
907 int
908 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
909                        struct drm_file *file_priv)
910 {
911         struct drm_nouveau_gem_info *req = data;
912         struct drm_gem_object *gem;
913         int ret;
914
915         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
916         if (!gem)
917                 return -ENOENT;
918
919         ret = nouveau_gem_info(file_priv, gem, req);
920         drm_gem_object_unreference_unlocked(gem);
921         return ret;
922 }
923