]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1eae307cdfd4783c66b084e929541db7e7ab331c
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/fence-array.h>
29 #include <drm/drmP.h>
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33
34 /*
35  * GPUVM
36  * GPUVM is similar to the legacy gart on older asics, however
37  * rather than there being a single global gart table
38  * for the entire GPU, there are multiple VM page tables active
39  * at any given time.  The VM page tables can contain a mix
40  * vram pages and system memory pages and system memory pages
41  * can be mapped as snooped (cached system pages) or unsnooped
42  * (uncached system pages).
43  * Each VM has an ID associated with it and there is a page table
44  * associated with each VMID.  When execting a command buffer,
45  * the kernel tells the the ring what VMID to use for that command
46  * buffer.  VMIDs are allocated dynamically as commands are submitted.
47  * The userspace drivers maintain their own address space and the kernel
48  * sets up their pages tables accordingly when they submit their
49  * command buffers and a VMID is assigned.
50  * Cayman/Trinity support up to 8 active VMs at any given time;
51  * SI supports 16.
52  */
53
54 /* Local structure. Encapsulate some VM table update parameters to reduce
55  * the number of function parameters
56  */
57 struct amdgpu_pte_update_params {
58         /* amdgpu device we do this update for */
59         struct amdgpu_device *adev;
60         /* address where to copy page table entries from */
61         uint64_t src;
62         /* indirect buffer to fill with commands */
63         struct amdgpu_ib *ib;
64         /* Function which actually does the update */
65         void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
66                      uint64_t addr, unsigned count, uint32_t incr,
67                      uint32_t flags);
68 };
69
70 /**
71  * amdgpu_vm_num_pde - return the number of page directory entries
72  *
73  * @adev: amdgpu_device pointer
74  *
75  * Calculate the number of page directory entries.
76  */
77 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
78 {
79         return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
80 }
81
82 /**
83  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
84  *
85  * @adev: amdgpu_device pointer
86  *
87  * Calculate the size of the page directory in bytes.
88  */
89 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
90 {
91         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
92 }
93
94 /**
95  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
96  *
97  * @vm: vm providing the BOs
98  * @validated: head of validation list
99  * @entry: entry to add
100  *
101  * Add the page directory to the list of BOs to
102  * validate for command submission.
103  */
104 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
105                          struct list_head *validated,
106                          struct amdgpu_bo_list_entry *entry)
107 {
108         entry->robj = vm->page_directory;
109         entry->priority = 0;
110         entry->tv.bo = &vm->page_directory->tbo;
111         entry->tv.shared = true;
112         entry->user_pages = NULL;
113         list_add(&entry->tv.head, validated);
114 }
115
116 /**
117  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
118  *
119  * @adev: amdgpu device pointer
120  * @vm: vm providing the BOs
121  * @duplicates: head of duplicates list
122  *
123  * Add the page directory to the BO duplicates list
124  * for command submission.
125  */
126 void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
127                           struct list_head *duplicates)
128 {
129         uint64_t num_evictions;
130         unsigned i;
131
132         /* We only need to validate the page tables
133          * if they aren't already valid.
134          */
135         num_evictions = atomic64_read(&adev->num_evictions);
136         if (num_evictions == vm->last_eviction_counter)
137                 return;
138
139         /* add the vm page table to the list */
140         for (i = 0; i <= vm->max_pde_used; ++i) {
141                 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
142
143                 if (!entry->robj)
144                         continue;
145
146                 list_add(&entry->tv.head, duplicates);
147         }
148
149 }
150
151 /**
152  * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
153  *
154  * @adev: amdgpu device instance
155  * @vm: vm providing the BOs
156  *
157  * Move the PT BOs to the tail of the LRU.
158  */
159 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
160                                   struct amdgpu_vm *vm)
161 {
162         struct ttm_bo_global *glob = adev->mman.bdev.glob;
163         unsigned i;
164
165         spin_lock(&glob->lru_lock);
166         for (i = 0; i <= vm->max_pde_used; ++i) {
167                 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
168
169                 if (!entry->robj)
170                         continue;
171
172                 ttm_bo_move_to_lru_tail(&entry->robj->tbo);
173         }
174         spin_unlock(&glob->lru_lock);
175 }
176
177 static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
178                               struct amdgpu_vm_id *id)
179 {
180         return id->current_gpu_reset_count !=
181                 atomic_read(&adev->gpu_reset_counter) ? true : false;
182 }
183
184 /**
185  * amdgpu_vm_grab_id - allocate the next free VMID
186  *
187  * @vm: vm to allocate id for
188  * @ring: ring we want to submit job to
189  * @sync: sync object where we add dependencies
190  * @fence: fence protecting ID from reuse
191  *
192  * Allocate an id for the vm, adding fences to the sync obj as necessary.
193  */
194 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
195                       struct amdgpu_sync *sync, struct fence *fence,
196                       struct amdgpu_job *job)
197 {
198         struct amdgpu_device *adev = ring->adev;
199         uint64_t fence_context = adev->fence_context + ring->idx;
200         struct fence *updates = sync->last_vm_update;
201         struct amdgpu_vm_id *id, *idle;
202         struct fence **fences;
203         unsigned i;
204         int r = 0;
205
206         fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
207                                GFP_KERNEL);
208         if (!fences)
209                 return -ENOMEM;
210
211         mutex_lock(&adev->vm_manager.lock);
212
213         /* Check if we have an idle VMID */
214         i = 0;
215         list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
216                 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
217                 if (!fences[i])
218                         break;
219                 ++i;
220         }
221
222         /* If we can't find a idle VMID to use, wait till one becomes available */
223         if (&idle->list == &adev->vm_manager.ids_lru) {
224                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
225                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
226                 struct fence_array *array;
227                 unsigned j;
228
229                 for (j = 0; j < i; ++j)
230                         fence_get(fences[j]);
231
232                 array = fence_array_create(i, fences, fence_context,
233                                            seqno, true);
234                 if (!array) {
235                         for (j = 0; j < i; ++j)
236                                 fence_put(fences[j]);
237                         kfree(fences);
238                         r = -ENOMEM;
239                         goto error;
240                 }
241
242
243                 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
244                 fence_put(&array->base);
245                 if (r)
246                         goto error;
247
248                 mutex_unlock(&adev->vm_manager.lock);
249                 return 0;
250
251         }
252         kfree(fences);
253
254         job->vm_needs_flush = true;
255         /* Check if we can use a VMID already assigned to this VM */
256         i = ring->idx;
257         do {
258                 struct fence *flushed;
259
260                 id = vm->ids[i++];
261                 if (i == AMDGPU_MAX_RINGS)
262                         i = 0;
263
264                 /* Check all the prerequisites to using this VMID */
265                 if (!id)
266                         continue;
267                 if (amdgpu_vm_is_gpu_reset(adev, id))
268                         continue;
269
270                 if (atomic64_read(&id->owner) != vm->client_id)
271                         continue;
272
273                 if (job->vm_pd_addr != id->pd_gpu_addr)
274                         continue;
275
276                 if (!id->last_flush)
277                         continue;
278
279                 if (id->last_flush->context != fence_context &&
280                     !fence_is_signaled(id->last_flush))
281                         continue;
282
283                 flushed  = id->flushed_updates;
284                 if (updates &&
285                     (!flushed || fence_is_later(updates, flushed)))
286                         continue;
287
288                 /* Good we can use this VMID. Remember this submission as
289                  * user of the VMID.
290                  */
291                 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
292                 if (r)
293                         goto error;
294
295                 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
296                 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
297                 vm->ids[ring->idx] = id;
298
299                 job->vm_id = id - adev->vm_manager.ids;
300                 job->vm_needs_flush = false;
301                 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
302
303                 mutex_unlock(&adev->vm_manager.lock);
304                 return 0;
305
306         } while (i != ring->idx);
307
308         /* Still no ID to use? Then use the idle one found earlier */
309         id = idle;
310
311         /* Remember this submission as user of the VMID */
312         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
313         if (r)
314                 goto error;
315
316         fence_put(id->first);
317         id->first = fence_get(fence);
318
319         fence_put(id->last_flush);
320         id->last_flush = NULL;
321
322         fence_put(id->flushed_updates);
323         id->flushed_updates = fence_get(updates);
324
325         id->pd_gpu_addr = job->vm_pd_addr;
326         id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
327         list_move_tail(&id->list, &adev->vm_manager.ids_lru);
328         atomic64_set(&id->owner, vm->client_id);
329         vm->ids[ring->idx] = id;
330
331         job->vm_id = id - adev->vm_manager.ids;
332         trace_amdgpu_vm_grab_id(vm, ring->idx, job);
333
334 error:
335         mutex_unlock(&adev->vm_manager.lock);
336         return r;
337 }
338
339 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
340 {
341         struct amdgpu_device *adev = ring->adev;
342         const struct amdgpu_ip_block_version *ip_block;
343
344         if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
345                 /* only compute rings */
346                 return false;
347
348         ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
349         if (!ip_block)
350                 return false;
351
352         if (ip_block->major <= 7) {
353                 /* gfx7 has no workaround */
354                 return true;
355         } else if (ip_block->major == 8) {
356                 if (adev->gfx.mec_fw_version >= 673)
357                         /* gfx8 is fixed in MEC firmware 673 */
358                         return false;
359                 else
360                         return true;
361         }
362         return false;
363 }
364
365 /**
366  * amdgpu_vm_flush - hardware flush the vm
367  *
368  * @ring: ring to use for flush
369  * @vm_id: vmid number to use
370  * @pd_addr: address of the page directory
371  *
372  * Emit a VM flush when it is necessary.
373  */
374 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
375 {
376         struct amdgpu_device *adev = ring->adev;
377         struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
378         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
379                 id->gds_base != job->gds_base ||
380                 id->gds_size != job->gds_size ||
381                 id->gws_base != job->gws_base ||
382                 id->gws_size != job->gws_size ||
383                 id->oa_base != job->oa_base ||
384                 id->oa_size != job->oa_size);
385         int r;
386
387         if (ring->funcs->emit_pipeline_sync && (
388             job->vm_needs_flush || gds_switch_needed ||
389             amdgpu_vm_ring_has_compute_vm_bug(ring)))
390                 amdgpu_ring_emit_pipeline_sync(ring);
391
392         if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
393             amdgpu_vm_is_gpu_reset(adev, id))) {
394                 struct fence *fence;
395
396                 trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
397                 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
398
399                 r = amdgpu_fence_emit(ring, &fence);
400                 if (r)
401                         return r;
402
403                 mutex_lock(&adev->vm_manager.lock);
404                 fence_put(id->last_flush);
405                 id->last_flush = fence;
406                 mutex_unlock(&adev->vm_manager.lock);
407         }
408
409         if (gds_switch_needed) {
410                 id->gds_base = job->gds_base;
411                 id->gds_size = job->gds_size;
412                 id->gws_base = job->gws_base;
413                 id->gws_size = job->gws_size;
414                 id->oa_base = job->oa_base;
415                 id->oa_size = job->oa_size;
416                 amdgpu_ring_emit_gds_switch(ring, job->vm_id,
417                                             job->gds_base, job->gds_size,
418                                             job->gws_base, job->gws_size,
419                                             job->oa_base, job->oa_size);
420         }
421
422         return 0;
423 }
424
425 /**
426  * amdgpu_vm_reset_id - reset VMID to zero
427  *
428  * @adev: amdgpu device structure
429  * @vm_id: vmid number to use
430  *
431  * Reset saved GDW, GWS and OA to force switch on next flush.
432  */
433 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
434 {
435         struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
436
437         id->gds_base = 0;
438         id->gds_size = 0;
439         id->gws_base = 0;
440         id->gws_size = 0;
441         id->oa_base = 0;
442         id->oa_size = 0;
443 }
444
445 /**
446  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
447  *
448  * @vm: requested vm
449  * @bo: requested buffer object
450  *
451  * Find @bo inside the requested vm.
452  * Search inside the @bos vm list for the requested vm
453  * Returns the found bo_va or NULL if none is found
454  *
455  * Object has to be reserved!
456  */
457 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
458                                        struct amdgpu_bo *bo)
459 {
460         struct amdgpu_bo_va *bo_va;
461
462         list_for_each_entry(bo_va, &bo->va, bo_list) {
463                 if (bo_va->vm == vm) {
464                         return bo_va;
465                 }
466         }
467         return NULL;
468 }
469
470 /**
471  * amdgpu_vm_do_set_ptes - helper to call the right asic function
472  *
473  * @params: see amdgpu_pte_update_params definition
474  * @pe: addr of the page entry
475  * @addr: dst addr to write into pe
476  * @count: number of page entries to update
477  * @incr: increase next addr by incr bytes
478  * @flags: hw access flags
479  *
480  * Traces the parameters and calls the right asic functions
481  * to setup the page table using the DMA.
482  */
483 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
484                                   uint64_t pe, uint64_t addr,
485                                   unsigned count, uint32_t incr,
486                                   uint32_t flags)
487 {
488         trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
489
490         if (count < 3) {
491                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
492                                     addr | flags, count, incr);
493
494         } else {
495                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
496                                       count, incr, flags);
497         }
498 }
499
500 /**
501  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
502  *
503  * @params: see amdgpu_pte_update_params definition
504  * @pe: addr of the page entry
505  * @addr: dst addr to write into pe
506  * @count: number of page entries to update
507  * @incr: increase next addr by incr bytes
508  * @flags: hw access flags
509  *
510  * Traces the parameters and calls the DMA function to copy the PTEs.
511  */
512 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
513                                    uint64_t pe, uint64_t addr,
514                                    unsigned count, uint32_t incr,
515                                    uint32_t flags)
516 {
517         trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
518
519         amdgpu_vm_copy_pte(params->adev, params->ib, pe,
520                            (params->src + (addr >> 12) * 8), count);
521 }
522
523 /**
524  * amdgpu_vm_clear_bo - initially clear the page dir/table
525  *
526  * @adev: amdgpu_device pointer
527  * @bo: bo to clear
528  *
529  * need to reserve bo first before calling it.
530  */
531 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
532                               struct amdgpu_vm *vm,
533                               struct amdgpu_bo *bo)
534 {
535         struct amdgpu_ring *ring;
536         struct fence *fence = NULL;
537         struct amdgpu_job *job;
538         struct amdgpu_pte_update_params params;
539         unsigned entries;
540         uint64_t addr;
541         int r;
542
543         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
544
545         r = reservation_object_reserve_shared(bo->tbo.resv);
546         if (r)
547                 return r;
548
549         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
550         if (r)
551                 goto error;
552
553         addr = amdgpu_bo_gpu_offset(bo);
554         entries = amdgpu_bo_size(bo) / 8;
555
556         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
557         if (r)
558                 goto error;
559
560         memset(&params, 0, sizeof(params));
561         params.adev = adev;
562         params.ib = &job->ibs[0];
563         amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
564         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
565
566         WARN_ON(job->ibs[0].length_dw > 64);
567         r = amdgpu_job_submit(job, ring, &vm->entity,
568                               AMDGPU_FENCE_OWNER_VM, &fence);
569         if (r)
570                 goto error_free;
571
572         amdgpu_bo_fence(bo, fence, true);
573         fence_put(fence);
574         return 0;
575
576 error_free:
577         amdgpu_job_free(job);
578
579 error:
580         return r;
581 }
582
583 /**
584  * amdgpu_vm_map_gart - Resolve gart mapping of addr
585  *
586  * @pages_addr: optional DMA address to use for lookup
587  * @addr: the unmapped addr
588  *
589  * Look up the physical address of the page that the pte resolves
590  * to and return the pointer for the page table entry.
591  */
592 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
593 {
594         uint64_t result;
595
596         /* page table offset */
597         result = pages_addr[addr >> PAGE_SHIFT];
598
599         /* in case cpu page size != gpu page size*/
600         result |= addr & (~PAGE_MASK);
601
602         result &= 0xFFFFFFFFFFFFF000ULL;
603
604         return result;
605 }
606
607 static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
608                                          struct amdgpu_vm *vm,
609                                          bool shadow)
610 {
611         struct amdgpu_ring *ring;
612         struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
613                 vm->page_directory;
614         uint64_t pd_addr;
615         uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
616         uint64_t last_pde = ~0, last_pt = ~0;
617         unsigned count = 0, pt_idx, ndw;
618         struct amdgpu_job *job;
619         struct amdgpu_pte_update_params params;
620         struct fence *fence = NULL;
621
622         int r;
623
624         if (!pd)
625                 return 0;
626         pd_addr = amdgpu_bo_gpu_offset(pd);
627         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
628
629         /* padding, etc. */
630         ndw = 64;
631
632         /* assume the worst case */
633         ndw += vm->max_pde_used * 6;
634
635         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
636         if (r)
637                 return r;
638
639         memset(&params, 0, sizeof(params));
640         params.adev = adev;
641         params.ib = &job->ibs[0];
642
643         /* walk over the address space and update the page directory */
644         for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
645                 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
646                 uint64_t pde, pt;
647
648                 if (bo == NULL)
649                         continue;
650
651                 pt = amdgpu_bo_gpu_offset(bo);
652                 if (!shadow) {
653                         if (vm->page_tables[pt_idx].addr == pt)
654                                 continue;
655                         vm->page_tables[pt_idx].addr = pt;
656                 } else {
657                         if (vm->page_tables[pt_idx].shadow_addr == pt)
658                                 continue;
659                         vm->page_tables[pt_idx].shadow_addr = pt;
660                 }
661
662                 pde = pd_addr + pt_idx * 8;
663                 if (((last_pde + 8 * count) != pde) ||
664                     ((last_pt + incr * count) != pt) ||
665                     (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
666
667                         if (count) {
668                                 amdgpu_vm_do_set_ptes(&params, last_pde,
669                                                       last_pt, count, incr,
670                                                       AMDGPU_PTE_VALID);
671                         }
672
673                         count = 1;
674                         last_pde = pde;
675                         last_pt = pt;
676                 } else {
677                         ++count;
678                 }
679         }
680
681         if (count)
682                 amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
683                                       count, incr, AMDGPU_PTE_VALID);
684
685         if (params.ib->length_dw != 0) {
686                 amdgpu_ring_pad_ib(ring, params.ib);
687                 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
688                                  AMDGPU_FENCE_OWNER_VM);
689                 WARN_ON(params.ib->length_dw > ndw);
690                 r = amdgpu_job_submit(job, ring, &vm->entity,
691                                       AMDGPU_FENCE_OWNER_VM, &fence);
692                 if (r)
693                         goto error_free;
694
695                 amdgpu_bo_fence(pd, fence, true);
696                 fence_put(vm->page_directory_fence);
697                 vm->page_directory_fence = fence_get(fence);
698                 fence_put(fence);
699
700         } else {
701                 amdgpu_job_free(job);
702         }
703
704         return 0;
705
706 error_free:
707         amdgpu_job_free(job);
708         return r;
709 }
710
711 /*
712  * amdgpu_vm_update_pdes - make sure that page directory is valid
713  *
714  * @adev: amdgpu_device pointer
715  * @vm: requested vm
716  * @start: start of GPU address range
717  * @end: end of GPU address range
718  *
719  * Allocates new page tables if necessary
720  * and updates the page directory.
721  * Returns 0 for success, error for failure.
722  */
723 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
724                                    struct amdgpu_vm *vm)
725 {
726         int r;
727
728         r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
729         if (r)
730                 return r;
731         return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
732 }
733
734 /**
735  * amdgpu_vm_update_ptes - make sure that page tables are valid
736  *
737  * @params: see amdgpu_pte_update_params definition
738  * @vm: requested vm
739  * @start: start of GPU address range
740  * @end: end of GPU address range
741  * @dst: destination address to map to, the next dst inside the function
742  * @flags: mapping flags
743  *
744  * Update the page tables in the range @start - @end.
745  */
746 static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
747                                   struct amdgpu_vm *vm,
748                                   uint64_t start, uint64_t end,
749                                   uint64_t dst, uint32_t flags)
750 {
751         const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
752
753         uint64_t cur_pe_start, cur_nptes, cur_dst;
754         uint64_t addr; /* next GPU address to be updated */
755         uint64_t pt_idx;
756         struct amdgpu_bo *pt;
757         unsigned nptes; /* next number of ptes to be updated */
758         uint64_t next_pe_start;
759
760         /* initialize the variables */
761         addr = start;
762         pt_idx = addr >> amdgpu_vm_block_size;
763         pt = vm->page_tables[pt_idx].entry.robj;
764
765         if ((addr & ~mask) == (end & ~mask))
766                 nptes = end - addr;
767         else
768                 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
769
770         cur_pe_start = amdgpu_bo_gpu_offset(pt);
771         cur_pe_start += (addr & mask) * 8;
772         cur_nptes = nptes;
773         cur_dst = dst;
774
775         /* for next ptb*/
776         addr += nptes;
777         dst += nptes * AMDGPU_GPU_PAGE_SIZE;
778
779         /* walk over the address space and update the page tables */
780         while (addr < end) {
781                 pt_idx = addr >> amdgpu_vm_block_size;
782                 pt = vm->page_tables[pt_idx].entry.robj;
783
784                 if ((addr & ~mask) == (end & ~mask))
785                         nptes = end - addr;
786                 else
787                         nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
788
789                 next_pe_start = amdgpu_bo_gpu_offset(pt);
790                 next_pe_start += (addr & mask) * 8;
791
792                 if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
793                     ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
794                         /* The next ptb is consecutive to current ptb.
795                          * Don't call the update function now.
796                          * Will update two ptbs together in future.
797                         */
798                         cur_nptes += nptes;
799                 } else {
800                         params->func(params, cur_pe_start, cur_dst, cur_nptes,
801                                      AMDGPU_GPU_PAGE_SIZE, flags);
802
803                         cur_pe_start = next_pe_start;
804                         cur_nptes = nptes;
805                         cur_dst = dst;
806                 }
807
808                 /* for next ptb*/
809                 addr += nptes;
810                 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
811         }
812
813         params->func(params, cur_pe_start, cur_dst, cur_nptes,
814                      AMDGPU_GPU_PAGE_SIZE, flags);
815 }
816
817 /*
818  * amdgpu_vm_frag_ptes - add fragment information to PTEs
819  *
820  * @params: see amdgpu_pte_update_params definition
821  * @vm: requested vm
822  * @start: first PTE to handle
823  * @end: last PTE to handle
824  * @dst: addr those PTEs should point to
825  * @flags: hw mapping flags
826  */
827 static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
828                                 struct amdgpu_vm *vm,
829                                 uint64_t start, uint64_t end,
830                                 uint64_t dst, uint32_t flags)
831 {
832         /**
833          * The MC L1 TLB supports variable sized pages, based on a fragment
834          * field in the PTE. When this field is set to a non-zero value, page
835          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
836          * flags are considered valid for all PTEs within the fragment range
837          * and corresponding mappings are assumed to be physically contiguous.
838          *
839          * The L1 TLB can store a single PTE for the whole fragment,
840          * significantly increasing the space available for translation
841          * caching. This leads to large improvements in throughput when the
842          * TLB is under pressure.
843          *
844          * The L2 TLB distributes small and large fragments into two
845          * asymmetric partitions. The large fragment cache is significantly
846          * larger. Thus, we try to use large fragments wherever possible.
847          * Userspace can support this by aligning virtual base address and
848          * allocation size to the fragment size.
849          */
850
851         const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
852
853         uint64_t frag_start = ALIGN(start, frag_align);
854         uint64_t frag_end = end & ~(frag_align - 1);
855
856         uint32_t frag;
857
858         /* system pages are non continuously */
859         if (params->src || !(flags & AMDGPU_PTE_VALID) ||
860             (frag_start >= frag_end)) {
861
862                 amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
863                 return;
864         }
865
866         /* use more than 64KB fragment size if possible */
867         frag = lower_32_bits(frag_start | frag_end);
868         frag = likely(frag) ? __ffs(frag) : 31;
869
870         /* handle the 4K area at the beginning */
871         if (start != frag_start) {
872                 amdgpu_vm_update_ptes(params, vm, start, frag_start,
873                                       dst, flags);
874                 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
875         }
876
877         /* handle the area in the middle */
878         amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
879                               flags | AMDGPU_PTE_FRAG(frag));
880
881         /* handle the 4K area at the end */
882         if (frag_end != end) {
883                 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
884                 amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
885         }
886 }
887
888 /**
889  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
890  *
891  * @adev: amdgpu_device pointer
892  * @exclusive: fence we need to sync to
893  * @src: address where to copy page table entries from
894  * @pages_addr: DMA addresses to use for mapping
895  * @vm: requested vm
896  * @start: start of mapped range
897  * @last: last mapped entry
898  * @flags: flags for the entries
899  * @addr: addr to set the area to
900  * @fence: optional resulting fence
901  *
902  * Fill in the page table entries between @start and @last.
903  * Returns 0 for success, -EINVAL for failure.
904  */
905 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
906                                        struct fence *exclusive,
907                                        uint64_t src,
908                                        dma_addr_t *pages_addr,
909                                        struct amdgpu_vm *vm,
910                                        uint64_t start, uint64_t last,
911                                        uint32_t flags, uint64_t addr,
912                                        struct fence **fence)
913 {
914         struct amdgpu_ring *ring;
915         void *owner = AMDGPU_FENCE_OWNER_VM;
916         unsigned nptes, ncmds, ndw;
917         struct amdgpu_job *job;
918         struct amdgpu_pte_update_params params;
919         struct fence *f = NULL;
920         int r;
921
922         memset(&params, 0, sizeof(params));
923         params.adev = adev;
924         params.src = src;
925
926         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
927
928         memset(&params, 0, sizeof(params));
929         params.adev = adev;
930         params.src = src;
931
932         /* sync to everything on unmapping */
933         if (!(flags & AMDGPU_PTE_VALID))
934                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
935
936         nptes = last - start + 1;
937
938         /*
939          * reserve space for one command every (1 << BLOCK_SIZE)
940          *  entries or 2k dwords (whatever is smaller)
941          */
942         ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
943
944         /* padding, etc. */
945         ndw = 64;
946
947         if (src) {
948                 /* only copy commands needed */
949                 ndw += ncmds * 7;
950
951                 params.func = amdgpu_vm_do_copy_ptes;
952
953         } else if (pages_addr) {
954                 /* copy commands needed */
955                 ndw += ncmds * 7;
956
957                 /* and also PTEs */
958                 ndw += nptes * 2;
959
960                 params.func = amdgpu_vm_do_copy_ptes;
961
962         } else {
963                 /* set page commands needed */
964                 ndw += ncmds * 10;
965
966                 /* two extra commands for begin/end of fragment */
967                 ndw += 2 * 10;
968
969                 params.func = amdgpu_vm_do_set_ptes;
970         }
971
972         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
973         if (r)
974                 return r;
975
976         params.ib = &job->ibs[0];
977
978         if (!src && pages_addr) {
979                 uint64_t *pte;
980                 unsigned i;
981
982                 /* Put the PTEs at the end of the IB. */
983                 i = ndw - nptes * 2;
984                 pte= (uint64_t *)&(job->ibs->ptr[i]);
985                 params.src = job->ibs->gpu_addr + i * 4;
986
987                 for (i = 0; i < nptes; ++i) {
988                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
989                                                     AMDGPU_GPU_PAGE_SIZE);
990                         pte[i] |= flags;
991                 }
992         }
993
994         r = amdgpu_sync_fence(adev, &job->sync, exclusive);
995         if (r)
996                 goto error_free;
997
998         r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
999                              owner);
1000         if (r)
1001                 goto error_free;
1002
1003         r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
1004         if (r)
1005                 goto error_free;
1006
1007         amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
1008
1009         amdgpu_ring_pad_ib(ring, params.ib);
1010         WARN_ON(params.ib->length_dw > ndw);
1011         r = amdgpu_job_submit(job, ring, &vm->entity,
1012                               AMDGPU_FENCE_OWNER_VM, &f);
1013         if (r)
1014                 goto error_free;
1015
1016         amdgpu_bo_fence(vm->page_directory, f, true);
1017         if (fence) {
1018                 fence_put(*fence);
1019                 *fence = fence_get(f);
1020         }
1021         fence_put(f);
1022         return 0;
1023
1024 error_free:
1025         amdgpu_job_free(job);
1026         return r;
1027 }
1028
1029 /**
1030  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1031  *
1032  * @adev: amdgpu_device pointer
1033  * @exclusive: fence we need to sync to
1034  * @gtt_flags: flags as they are used for GTT
1035  * @pages_addr: DMA addresses to use for mapping
1036  * @vm: requested vm
1037  * @mapping: mapped range and flags to use for the update
1038  * @addr: addr to set the area to
1039  * @flags: HW flags for the mapping
1040  * @fence: optional resulting fence
1041  *
1042  * Split the mapping into smaller chunks so that each update fits
1043  * into a SDMA IB.
1044  * Returns 0 for success, -EINVAL for failure.
1045  */
1046 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1047                                       struct fence *exclusive,
1048                                       uint32_t gtt_flags,
1049                                       dma_addr_t *pages_addr,
1050                                       struct amdgpu_vm *vm,
1051                                       struct amdgpu_bo_va_mapping *mapping,
1052                                       uint32_t flags, uint64_t addr,
1053                                       struct fence **fence)
1054 {
1055         const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
1056
1057         uint64_t src = 0, start = mapping->it.start;
1058         int r;
1059
1060         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1061          * but in case of something, we filter the flags in first place
1062          */
1063         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1064                 flags &= ~AMDGPU_PTE_READABLE;
1065         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1066                 flags &= ~AMDGPU_PTE_WRITEABLE;
1067
1068         trace_amdgpu_vm_bo_update(mapping);
1069
1070         if (pages_addr) {
1071                 if (flags == gtt_flags)
1072                         src = adev->gart.table_addr + (addr >> 12) * 8;
1073                 addr = 0;
1074         }
1075         addr += mapping->offset;
1076
1077         if (!pages_addr || src)
1078                 return amdgpu_vm_bo_update_mapping(adev, exclusive,
1079                                                    src, pages_addr, vm,
1080                                                    start, mapping->it.last,
1081                                                    flags, addr, fence);
1082
1083         while (start != mapping->it.last + 1) {
1084                 uint64_t last;
1085
1086                 last = min((uint64_t)mapping->it.last, start + max_size - 1);
1087                 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1088                                                 src, pages_addr, vm,
1089                                                 start, last, flags, addr,
1090                                                 fence);
1091                 if (r)
1092                         return r;
1093
1094                 start = last + 1;
1095                 addr += max_size * AMDGPU_GPU_PAGE_SIZE;
1096         }
1097
1098         return 0;
1099 }
1100
1101 /**
1102  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1103  *
1104  * @adev: amdgpu_device pointer
1105  * @bo_va: requested BO and VM object
1106  * @mem: ttm mem
1107  *
1108  * Fill in the page table entries for @bo_va.
1109  * Returns 0 for success, -EINVAL for failure.
1110  *
1111  * Object have to be reserved and mutex must be locked!
1112  */
1113 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1114                         struct amdgpu_bo_va *bo_va,
1115                         struct ttm_mem_reg *mem)
1116 {
1117         struct amdgpu_vm *vm = bo_va->vm;
1118         struct amdgpu_bo_va_mapping *mapping;
1119         dma_addr_t *pages_addr = NULL;
1120         uint32_t gtt_flags, flags;
1121         struct fence *exclusive;
1122         uint64_t addr;
1123         int r;
1124
1125         if (mem) {
1126                 struct ttm_dma_tt *ttm;
1127
1128                 addr = (u64)mem->start << PAGE_SHIFT;
1129                 switch (mem->mem_type) {
1130                 case TTM_PL_TT:
1131                         ttm = container_of(bo_va->bo->tbo.ttm, struct
1132                                            ttm_dma_tt, ttm);
1133                         pages_addr = ttm->dma_address;
1134                         break;
1135
1136                 case TTM_PL_VRAM:
1137                         addr += adev->vm_manager.vram_base_offset;
1138                         break;
1139
1140                 default:
1141                         break;
1142                 }
1143
1144                 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
1145         } else {
1146                 addr = 0;
1147                 exclusive = NULL;
1148         }
1149
1150         flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1151         gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
1152
1153         spin_lock(&vm->status_lock);
1154         if (!list_empty(&bo_va->vm_status))
1155                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1156         spin_unlock(&vm->status_lock);
1157
1158         list_for_each_entry(mapping, &bo_va->invalids, list) {
1159                 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1160                                                gtt_flags, pages_addr, vm,
1161                                                mapping, flags, addr,
1162                                                &bo_va->last_pt_update);
1163                 if (r)
1164                         return r;
1165         }
1166
1167         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1168                 list_for_each_entry(mapping, &bo_va->valids, list)
1169                         trace_amdgpu_vm_bo_mapping(mapping);
1170
1171                 list_for_each_entry(mapping, &bo_va->invalids, list)
1172                         trace_amdgpu_vm_bo_mapping(mapping);
1173         }
1174
1175         spin_lock(&vm->status_lock);
1176         list_splice_init(&bo_va->invalids, &bo_va->valids);
1177         list_del_init(&bo_va->vm_status);
1178         if (!mem)
1179                 list_add(&bo_va->vm_status, &vm->cleared);
1180         spin_unlock(&vm->status_lock);
1181
1182         return 0;
1183 }
1184
1185 /**
1186  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1187  *
1188  * @adev: amdgpu_device pointer
1189  * @vm: requested vm
1190  *
1191  * Make sure all freed BOs are cleared in the PT.
1192  * Returns 0 for success.
1193  *
1194  * PTs have to be reserved and mutex must be locked!
1195  */
1196 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1197                           struct amdgpu_vm *vm)
1198 {
1199         struct amdgpu_bo_va_mapping *mapping;
1200         int r;
1201
1202         while (!list_empty(&vm->freed)) {
1203                 mapping = list_first_entry(&vm->freed,
1204                         struct amdgpu_bo_va_mapping, list);
1205                 list_del(&mapping->list);
1206
1207                 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1208                                                0, 0, NULL);
1209                 kfree(mapping);
1210                 if (r)
1211                         return r;
1212
1213         }
1214         return 0;
1215
1216 }
1217
1218 /**
1219  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1220  *
1221  * @adev: amdgpu_device pointer
1222  * @vm: requested vm
1223  *
1224  * Make sure all invalidated BOs are cleared in the PT.
1225  * Returns 0 for success.
1226  *
1227  * PTs have to be reserved and mutex must be locked!
1228  */
1229 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1230                              struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1231 {
1232         struct amdgpu_bo_va *bo_va = NULL;
1233         int r = 0;
1234
1235         spin_lock(&vm->status_lock);
1236         while (!list_empty(&vm->invalidated)) {
1237                 bo_va = list_first_entry(&vm->invalidated,
1238                         struct amdgpu_bo_va, vm_status);
1239                 spin_unlock(&vm->status_lock);
1240
1241                 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
1242                 if (r)
1243                         return r;
1244
1245                 spin_lock(&vm->status_lock);
1246         }
1247         spin_unlock(&vm->status_lock);
1248
1249         if (bo_va)
1250                 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1251
1252         return r;
1253 }
1254
1255 /**
1256  * amdgpu_vm_bo_add - add a bo to a specific vm
1257  *
1258  * @adev: amdgpu_device pointer
1259  * @vm: requested vm
1260  * @bo: amdgpu buffer object
1261  *
1262  * Add @bo into the requested vm.
1263  * Add @bo to the list of bos associated with the vm
1264  * Returns newly added bo_va or NULL for failure
1265  *
1266  * Object has to be reserved!
1267  */
1268 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1269                                       struct amdgpu_vm *vm,
1270                                       struct amdgpu_bo *bo)
1271 {
1272         struct amdgpu_bo_va *bo_va;
1273
1274         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1275         if (bo_va == NULL) {
1276                 return NULL;
1277         }
1278         bo_va->vm = vm;
1279         bo_va->bo = bo;
1280         bo_va->ref_count = 1;
1281         INIT_LIST_HEAD(&bo_va->bo_list);
1282         INIT_LIST_HEAD(&bo_va->valids);
1283         INIT_LIST_HEAD(&bo_va->invalids);
1284         INIT_LIST_HEAD(&bo_va->vm_status);
1285
1286         list_add_tail(&bo_va->bo_list, &bo->va);
1287
1288         return bo_va;
1289 }
1290
1291 /**
1292  * amdgpu_vm_bo_map - map bo inside a vm
1293  *
1294  * @adev: amdgpu_device pointer
1295  * @bo_va: bo_va to store the address
1296  * @saddr: where to map the BO
1297  * @offset: requested offset in the BO
1298  * @flags: attributes of pages (read/write/valid/etc.)
1299  *
1300  * Add a mapping of the BO at the specefied addr into the VM.
1301  * Returns 0 for success, error for failure.
1302  *
1303  * Object has to be reserved and unreserved outside!
1304  */
1305 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1306                      struct amdgpu_bo_va *bo_va,
1307                      uint64_t saddr, uint64_t offset,
1308                      uint64_t size, uint32_t flags)
1309 {
1310         struct amdgpu_bo_va_mapping *mapping;
1311         struct amdgpu_vm *vm = bo_va->vm;
1312         struct interval_tree_node *it;
1313         unsigned last_pfn, pt_idx;
1314         uint64_t eaddr;
1315         int r;
1316
1317         /* validate the parameters */
1318         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1319             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1320                 return -EINVAL;
1321
1322         /* make sure object fit at this offset */
1323         eaddr = saddr + size - 1;
1324         if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1325                 return -EINVAL;
1326
1327         last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1328         if (last_pfn >= adev->vm_manager.max_pfn) {
1329                 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1330                         last_pfn, adev->vm_manager.max_pfn);
1331                 return -EINVAL;
1332         }
1333
1334         saddr /= AMDGPU_GPU_PAGE_SIZE;
1335         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1336
1337         it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1338         if (it) {
1339                 struct amdgpu_bo_va_mapping *tmp;
1340                 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1341                 /* bo and tmp overlap, invalid addr */
1342                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1343                         "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1344                         tmp->it.start, tmp->it.last + 1);
1345                 r = -EINVAL;
1346                 goto error;
1347         }
1348
1349         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1350         if (!mapping) {
1351                 r = -ENOMEM;
1352                 goto error;
1353         }
1354
1355         INIT_LIST_HEAD(&mapping->list);
1356         mapping->it.start = saddr;
1357         mapping->it.last = eaddr;
1358         mapping->offset = offset;
1359         mapping->flags = flags;
1360
1361         list_add(&mapping->list, &bo_va->invalids);
1362         interval_tree_insert(&mapping->it, &vm->va);
1363
1364         /* Make sure the page tables are allocated */
1365         saddr >>= amdgpu_vm_block_size;
1366         eaddr >>= amdgpu_vm_block_size;
1367
1368         BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1369
1370         if (eaddr > vm->max_pde_used)
1371                 vm->max_pde_used = eaddr;
1372
1373         /* walk over the address space and allocate the page tables */
1374         for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1375                 struct reservation_object *resv = vm->page_directory->tbo.resv;
1376                 struct amdgpu_bo_list_entry *entry;
1377                 struct amdgpu_bo *pt;
1378
1379                 entry = &vm->page_tables[pt_idx].entry;
1380                 if (entry->robj)
1381                         continue;
1382
1383                 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1384                                      AMDGPU_GPU_PAGE_SIZE, true,
1385                                      AMDGPU_GEM_DOMAIN_VRAM,
1386                                      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1387                                      AMDGPU_GEM_CREATE_SHADOW,
1388                                      NULL, resv, &pt);
1389                 if (r)
1390                         goto error_free;
1391
1392                 /* Keep a reference to the page table to avoid freeing
1393                  * them up in the wrong order.
1394                  */
1395                 pt->parent = amdgpu_bo_ref(vm->page_directory);
1396
1397                 r = amdgpu_vm_clear_bo(adev, vm, pt);
1398                 if (r) {
1399                         amdgpu_bo_unref(&pt);
1400                         goto error_free;
1401                 }
1402
1403                 entry->robj = pt;
1404                 entry->priority = 0;
1405                 entry->tv.bo = &entry->robj->tbo;
1406                 entry->tv.shared = true;
1407                 entry->user_pages = NULL;
1408                 vm->page_tables[pt_idx].addr = 0;
1409         }
1410
1411         return 0;
1412
1413 error_free:
1414         list_del(&mapping->list);
1415         interval_tree_remove(&mapping->it, &vm->va);
1416         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1417         kfree(mapping);
1418
1419 error:
1420         return r;
1421 }
1422
1423 /**
1424  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1425  *
1426  * @adev: amdgpu_device pointer
1427  * @bo_va: bo_va to remove the address from
1428  * @saddr: where to the BO is mapped
1429  *
1430  * Remove a mapping of the BO at the specefied addr from the VM.
1431  * Returns 0 for success, error for failure.
1432  *
1433  * Object has to be reserved and unreserved outside!
1434  */
1435 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1436                        struct amdgpu_bo_va *bo_va,
1437                        uint64_t saddr)
1438 {
1439         struct amdgpu_bo_va_mapping *mapping;
1440         struct amdgpu_vm *vm = bo_va->vm;
1441         bool valid = true;
1442
1443         saddr /= AMDGPU_GPU_PAGE_SIZE;
1444
1445         list_for_each_entry(mapping, &bo_va->valids, list) {
1446                 if (mapping->it.start == saddr)
1447                         break;
1448         }
1449
1450         if (&mapping->list == &bo_va->valids) {
1451                 valid = false;
1452
1453                 list_for_each_entry(mapping, &bo_va->invalids, list) {
1454                         if (mapping->it.start == saddr)
1455                                 break;
1456                 }
1457
1458                 if (&mapping->list == &bo_va->invalids)
1459                         return -ENOENT;
1460         }
1461
1462         list_del(&mapping->list);
1463         interval_tree_remove(&mapping->it, &vm->va);
1464         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1465
1466         if (valid)
1467                 list_add(&mapping->list, &vm->freed);
1468         else
1469                 kfree(mapping);
1470
1471         return 0;
1472 }
1473
1474 /**
1475  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1476  *
1477  * @adev: amdgpu_device pointer
1478  * @bo_va: requested bo_va
1479  *
1480  * Remove @bo_va->bo from the requested vm.
1481  *
1482  * Object have to be reserved!
1483  */
1484 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1485                       struct amdgpu_bo_va *bo_va)
1486 {
1487         struct amdgpu_bo_va_mapping *mapping, *next;
1488         struct amdgpu_vm *vm = bo_va->vm;
1489
1490         list_del(&bo_va->bo_list);
1491
1492         spin_lock(&vm->status_lock);
1493         list_del(&bo_va->vm_status);
1494         spin_unlock(&vm->status_lock);
1495
1496         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1497                 list_del(&mapping->list);
1498                 interval_tree_remove(&mapping->it, &vm->va);
1499                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1500                 list_add(&mapping->list, &vm->freed);
1501         }
1502         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1503                 list_del(&mapping->list);
1504                 interval_tree_remove(&mapping->it, &vm->va);
1505                 kfree(mapping);
1506         }
1507
1508         fence_put(bo_va->last_pt_update);
1509         kfree(bo_va);
1510 }
1511
1512 /**
1513  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1514  *
1515  * @adev: amdgpu_device pointer
1516  * @vm: requested vm
1517  * @bo: amdgpu buffer object
1518  *
1519  * Mark @bo as invalid.
1520  */
1521 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1522                              struct amdgpu_bo *bo)
1523 {
1524         struct amdgpu_bo_va *bo_va;
1525
1526         list_for_each_entry(bo_va, &bo->va, bo_list) {
1527                 spin_lock(&bo_va->vm->status_lock);
1528                 if (list_empty(&bo_va->vm_status))
1529                         list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1530                 spin_unlock(&bo_va->vm->status_lock);
1531         }
1532 }
1533
1534 /**
1535  * amdgpu_vm_init - initialize a vm instance
1536  *
1537  * @adev: amdgpu_device pointer
1538  * @vm: requested vm
1539  *
1540  * Init @vm fields.
1541  */
1542 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1543 {
1544         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1545                 AMDGPU_VM_PTE_COUNT * 8);
1546         unsigned pd_size, pd_entries;
1547         unsigned ring_instance;
1548         struct amdgpu_ring *ring;
1549         struct amd_sched_rq *rq;
1550         int i, r;
1551
1552         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1553                 vm->ids[i] = NULL;
1554         vm->va = RB_ROOT;
1555         vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
1556         spin_lock_init(&vm->status_lock);
1557         INIT_LIST_HEAD(&vm->invalidated);
1558         INIT_LIST_HEAD(&vm->cleared);
1559         INIT_LIST_HEAD(&vm->freed);
1560
1561         pd_size = amdgpu_vm_directory_size(adev);
1562         pd_entries = amdgpu_vm_num_pdes(adev);
1563
1564         /* allocate page table array */
1565         vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1566         if (vm->page_tables == NULL) {
1567                 DRM_ERROR("Cannot allocate memory for page table array\n");
1568                 return -ENOMEM;
1569         }
1570
1571         /* create scheduler entity for page table updates */
1572
1573         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1574         ring_instance %= adev->vm_manager.vm_pte_num_rings;
1575         ring = adev->vm_manager.vm_pte_rings[ring_instance];
1576         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1577         r = amd_sched_entity_init(&ring->sched, &vm->entity,
1578                                   rq, amdgpu_sched_jobs);
1579         if (r)
1580                 return r;
1581
1582         vm->page_directory_fence = NULL;
1583
1584         r = amdgpu_bo_create(adev, pd_size, align, true,
1585                              AMDGPU_GEM_DOMAIN_VRAM,
1586                              AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1587                              AMDGPU_GEM_CREATE_SHADOW,
1588                              NULL, NULL, &vm->page_directory);
1589         if (r)
1590                 goto error_free_sched_entity;
1591
1592         r = amdgpu_bo_reserve(vm->page_directory, false);
1593         if (r)
1594                 goto error_free_page_directory;
1595
1596         r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1597         amdgpu_bo_unreserve(vm->page_directory);
1598         if (r)
1599                 goto error_free_page_directory;
1600         vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
1601
1602         return 0;
1603
1604 error_free_page_directory:
1605         amdgpu_bo_unref(&vm->page_directory);
1606         vm->page_directory = NULL;
1607
1608 error_free_sched_entity:
1609         amd_sched_entity_fini(&ring->sched, &vm->entity);
1610
1611         return r;
1612 }
1613
1614 /**
1615  * amdgpu_vm_fini - tear down a vm instance
1616  *
1617  * @adev: amdgpu_device pointer
1618  * @vm: requested vm
1619  *
1620  * Tear down @vm.
1621  * Unbind the VM and remove all bos from the vm bo list
1622  */
1623 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1624 {
1625         struct amdgpu_bo_va_mapping *mapping, *tmp;
1626         int i;
1627
1628         amd_sched_entity_fini(vm->entity.sched, &vm->entity);
1629
1630         if (!RB_EMPTY_ROOT(&vm->va)) {
1631                 dev_err(adev->dev, "still active bo inside vm\n");
1632         }
1633         rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1634                 list_del(&mapping->list);
1635                 interval_tree_remove(&mapping->it, &vm->va);
1636                 kfree(mapping);
1637         }
1638         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1639                 list_del(&mapping->list);
1640                 kfree(mapping);
1641         }
1642
1643         for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
1644                 if (vm->page_tables[i].entry.robj &&
1645                     vm->page_tables[i].entry.robj->shadow)
1646                         amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow);
1647                 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
1648         }
1649         drm_free_large(vm->page_tables);
1650
1651         if (vm->page_directory->shadow)
1652                 amdgpu_bo_unref(&vm->page_directory->shadow);
1653         amdgpu_bo_unref(&vm->page_directory);
1654         fence_put(vm->page_directory_fence);
1655 }
1656
1657 /**
1658  * amdgpu_vm_manager_init - init the VM manager
1659  *
1660  * @adev: amdgpu_device pointer
1661  *
1662  * Initialize the VM manager structures
1663  */
1664 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1665 {
1666         unsigned i;
1667
1668         INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1669
1670         /* skip over VMID 0, since it is the system VM */
1671         for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1672                 amdgpu_vm_reset_id(adev, i);
1673                 amdgpu_sync_create(&adev->vm_manager.ids[i].active);
1674                 list_add_tail(&adev->vm_manager.ids[i].list,
1675                               &adev->vm_manager.ids_lru);
1676         }
1677
1678         adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1679         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1680                 adev->vm_manager.seqno[i] = 0;
1681
1682         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1683         atomic64_set(&adev->vm_manager.client_counter, 0);
1684 }
1685
1686 /**
1687  * amdgpu_vm_manager_fini - cleanup VM manager
1688  *
1689  * @adev: amdgpu_device pointer
1690  *
1691  * Cleanup the VM manager and free resources.
1692  */
1693 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1694 {
1695         unsigned i;
1696
1697         for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1698                 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1699
1700                 fence_put(adev->vm_manager.ids[i].first);
1701                 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1702                 fence_put(id->flushed_updates);
1703         }
1704 }