2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/fence-array.h>
30 #include <drm/amdgpu_drm.h>
32 #include "amdgpu_trace.h"
36 * GPUVM is similar to the legacy gart on older asics, however
37 * rather than there being a single global gart table
38 * for the entire GPU, there are multiple VM page tables active
39 * at any given time. The VM page tables can contain a mix
40 * vram pages and system memory pages and system memory pages
41 * can be mapped as snooped (cached system pages) or unsnooped
42 * (uncached system pages).
43 * Each VM has an ID associated with it and there is a page table
44 * associated with each VMID. When execting a command buffer,
45 * the kernel tells the the ring what VMID to use for that command
46 * buffer. VMIDs are allocated dynamically as commands are submitted.
47 * The userspace drivers maintain their own address space and the kernel
48 * sets up their pages tables accordingly when they submit their
49 * command buffers and a VMID is assigned.
50 * Cayman/Trinity support up to 8 active VMs at any given time;
54 /* Local structure. Encapsulate some VM table update parameters to reduce
55 * the number of function parameters
57 struct amdgpu_pte_update_params {
58 /* amdgpu device we do this update for */
59 struct amdgpu_device *adev;
60 /* address where to copy page table entries from */
62 /* indirect buffer to fill with commands */
64 /* Function which actually does the update */
65 void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
66 uint64_t addr, unsigned count, uint32_t incr,
71 * amdgpu_vm_num_pde - return the number of page directory entries
73 * @adev: amdgpu_device pointer
75 * Calculate the number of page directory entries.
77 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
79 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
83 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
85 * @adev: amdgpu_device pointer
87 * Calculate the size of the page directory in bytes.
89 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
91 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
95 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
97 * @vm: vm providing the BOs
98 * @validated: head of validation list
99 * @entry: entry to add
101 * Add the page directory to the list of BOs to
102 * validate for command submission.
104 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
105 struct list_head *validated,
106 struct amdgpu_bo_list_entry *entry)
108 entry->robj = vm->page_directory;
110 entry->tv.bo = &vm->page_directory->tbo;
111 entry->tv.shared = true;
112 entry->user_pages = NULL;
113 list_add(&entry->tv.head, validated);
117 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
119 * @adev: amdgpu device pointer
120 * @vm: vm providing the BOs
121 * @duplicates: head of duplicates list
123 * Add the page directory to the BO duplicates list
124 * for command submission.
126 void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
127 struct list_head *duplicates)
129 uint64_t num_evictions;
132 /* We only need to validate the page tables
133 * if they aren't already valid.
135 num_evictions = atomic64_read(&adev->num_evictions);
136 if (num_evictions == vm->last_eviction_counter)
139 /* add the vm page table to the list */
140 for (i = 0; i <= vm->max_pde_used; ++i) {
141 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
146 list_add(&entry->tv.head, duplicates);
152 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
154 * @adev: amdgpu device instance
155 * @vm: vm providing the BOs
157 * Move the PT BOs to the tail of the LRU.
159 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
160 struct amdgpu_vm *vm)
162 struct ttm_bo_global *glob = adev->mman.bdev.glob;
165 spin_lock(&glob->lru_lock);
166 for (i = 0; i <= vm->max_pde_used; ++i) {
167 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
172 ttm_bo_move_to_lru_tail(&entry->robj->tbo);
174 spin_unlock(&glob->lru_lock);
177 static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
178 struct amdgpu_vm_id *id)
180 return id->current_gpu_reset_count !=
181 atomic_read(&adev->gpu_reset_counter) ? true : false;
185 * amdgpu_vm_grab_id - allocate the next free VMID
187 * @vm: vm to allocate id for
188 * @ring: ring we want to submit job to
189 * @sync: sync object where we add dependencies
190 * @fence: fence protecting ID from reuse
192 * Allocate an id for the vm, adding fences to the sync obj as necessary.
194 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
195 struct amdgpu_sync *sync, struct fence *fence,
196 struct amdgpu_job *job)
198 struct amdgpu_device *adev = ring->adev;
199 uint64_t fence_context = adev->fence_context + ring->idx;
200 struct fence *updates = sync->last_vm_update;
201 struct amdgpu_vm_id *id, *idle;
202 struct fence **fences;
206 fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
211 mutex_lock(&adev->vm_manager.lock);
213 /* Check if we have an idle VMID */
215 list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
216 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
222 /* If we can't find a idle VMID to use, wait till one becomes available */
223 if (&idle->list == &adev->vm_manager.ids_lru) {
224 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
225 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
226 struct fence_array *array;
229 for (j = 0; j < i; ++j)
230 fence_get(fences[j]);
232 array = fence_array_create(i, fences, fence_context,
235 for (j = 0; j < i; ++j)
236 fence_put(fences[j]);
243 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
244 fence_put(&array->base);
248 mutex_unlock(&adev->vm_manager.lock);
254 job->vm_needs_flush = true;
255 /* Check if we can use a VMID already assigned to this VM */
258 struct fence *flushed;
261 if (i == AMDGPU_MAX_RINGS)
264 /* Check all the prerequisites to using this VMID */
267 if (amdgpu_vm_is_gpu_reset(adev, id))
270 if (atomic64_read(&id->owner) != vm->client_id)
273 if (job->vm_pd_addr != id->pd_gpu_addr)
279 if (id->last_flush->context != fence_context &&
280 !fence_is_signaled(id->last_flush))
283 flushed = id->flushed_updates;
285 (!flushed || fence_is_later(updates, flushed)))
288 /* Good we can use this VMID. Remember this submission as
291 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
295 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
296 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
297 vm->ids[ring->idx] = id;
299 job->vm_id = id - adev->vm_manager.ids;
300 job->vm_needs_flush = false;
301 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
303 mutex_unlock(&adev->vm_manager.lock);
306 } while (i != ring->idx);
308 /* Still no ID to use? Then use the idle one found earlier */
311 /* Remember this submission as user of the VMID */
312 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
316 fence_put(id->first);
317 id->first = fence_get(fence);
319 fence_put(id->last_flush);
320 id->last_flush = NULL;
322 fence_put(id->flushed_updates);
323 id->flushed_updates = fence_get(updates);
325 id->pd_gpu_addr = job->vm_pd_addr;
326 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
327 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
328 atomic64_set(&id->owner, vm->client_id);
329 vm->ids[ring->idx] = id;
331 job->vm_id = id - adev->vm_manager.ids;
332 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
335 mutex_unlock(&adev->vm_manager.lock);
339 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
341 struct amdgpu_device *adev = ring->adev;
342 const struct amdgpu_ip_block_version *ip_block;
344 if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
345 /* only compute rings */
348 ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
352 if (ip_block->major <= 7) {
353 /* gfx7 has no workaround */
355 } else if (ip_block->major == 8) {
356 if (adev->gfx.mec_fw_version >= 673)
357 /* gfx8 is fixed in MEC firmware 673 */
366 * amdgpu_vm_flush - hardware flush the vm
368 * @ring: ring to use for flush
369 * @vm_id: vmid number to use
370 * @pd_addr: address of the page directory
372 * Emit a VM flush when it is necessary.
374 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
376 struct amdgpu_device *adev = ring->adev;
377 struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
378 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
379 id->gds_base != job->gds_base ||
380 id->gds_size != job->gds_size ||
381 id->gws_base != job->gws_base ||
382 id->gws_size != job->gws_size ||
383 id->oa_base != job->oa_base ||
384 id->oa_size != job->oa_size);
387 if (ring->funcs->emit_pipeline_sync && (
388 job->vm_needs_flush || gds_switch_needed ||
389 amdgpu_vm_ring_has_compute_vm_bug(ring)))
390 amdgpu_ring_emit_pipeline_sync(ring);
392 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
393 amdgpu_vm_is_gpu_reset(adev, id))) {
396 trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
397 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
399 r = amdgpu_fence_emit(ring, &fence);
403 mutex_lock(&adev->vm_manager.lock);
404 fence_put(id->last_flush);
405 id->last_flush = fence;
406 mutex_unlock(&adev->vm_manager.lock);
409 if (gds_switch_needed) {
410 id->gds_base = job->gds_base;
411 id->gds_size = job->gds_size;
412 id->gws_base = job->gws_base;
413 id->gws_size = job->gws_size;
414 id->oa_base = job->oa_base;
415 id->oa_size = job->oa_size;
416 amdgpu_ring_emit_gds_switch(ring, job->vm_id,
417 job->gds_base, job->gds_size,
418 job->gws_base, job->gws_size,
419 job->oa_base, job->oa_size);
426 * amdgpu_vm_reset_id - reset VMID to zero
428 * @adev: amdgpu device structure
429 * @vm_id: vmid number to use
431 * Reset saved GDW, GWS and OA to force switch on next flush.
433 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
435 struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
446 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
449 * @bo: requested buffer object
451 * Find @bo inside the requested vm.
452 * Search inside the @bos vm list for the requested vm
453 * Returns the found bo_va or NULL if none is found
455 * Object has to be reserved!
457 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
458 struct amdgpu_bo *bo)
460 struct amdgpu_bo_va *bo_va;
462 list_for_each_entry(bo_va, &bo->va, bo_list) {
463 if (bo_va->vm == vm) {
471 * amdgpu_vm_do_set_ptes - helper to call the right asic function
473 * @params: see amdgpu_pte_update_params definition
474 * @pe: addr of the page entry
475 * @addr: dst addr to write into pe
476 * @count: number of page entries to update
477 * @incr: increase next addr by incr bytes
478 * @flags: hw access flags
480 * Traces the parameters and calls the right asic functions
481 * to setup the page table using the DMA.
483 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
484 uint64_t pe, uint64_t addr,
485 unsigned count, uint32_t incr,
488 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
491 amdgpu_vm_write_pte(params->adev, params->ib, pe,
492 addr | flags, count, incr);
495 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
501 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
503 * @params: see amdgpu_pte_update_params definition
504 * @pe: addr of the page entry
505 * @addr: dst addr to write into pe
506 * @count: number of page entries to update
507 * @incr: increase next addr by incr bytes
508 * @flags: hw access flags
510 * Traces the parameters and calls the DMA function to copy the PTEs.
512 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
513 uint64_t pe, uint64_t addr,
514 unsigned count, uint32_t incr,
517 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
519 amdgpu_vm_copy_pte(params->adev, params->ib, pe,
520 (params->src + (addr >> 12) * 8), count);
524 * amdgpu_vm_clear_bo - initially clear the page dir/table
526 * @adev: amdgpu_device pointer
529 * need to reserve bo first before calling it.
531 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
532 struct amdgpu_vm *vm,
533 struct amdgpu_bo *bo)
535 struct amdgpu_ring *ring;
536 struct fence *fence = NULL;
537 struct amdgpu_job *job;
538 struct amdgpu_pte_update_params params;
543 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
545 r = reservation_object_reserve_shared(bo->tbo.resv);
549 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
553 addr = amdgpu_bo_gpu_offset(bo);
554 entries = amdgpu_bo_size(bo) / 8;
556 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
560 memset(¶ms, 0, sizeof(params));
562 params.ib = &job->ibs[0];
563 amdgpu_vm_do_set_ptes(¶ms, addr, 0, entries, 0, 0);
564 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
566 WARN_ON(job->ibs[0].length_dw > 64);
567 r = amdgpu_job_submit(job, ring, &vm->entity,
568 AMDGPU_FENCE_OWNER_VM, &fence);
572 amdgpu_bo_fence(bo, fence, true);
577 amdgpu_job_free(job);
584 * amdgpu_vm_map_gart - Resolve gart mapping of addr
586 * @pages_addr: optional DMA address to use for lookup
587 * @addr: the unmapped addr
589 * Look up the physical address of the page that the pte resolves
590 * to and return the pointer for the page table entry.
592 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
596 /* page table offset */
597 result = pages_addr[addr >> PAGE_SHIFT];
599 /* in case cpu page size != gpu page size*/
600 result |= addr & (~PAGE_MASK);
602 result &= 0xFFFFFFFFFFFFF000ULL;
607 static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
608 struct amdgpu_vm *vm,
611 struct amdgpu_ring *ring;
612 struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
615 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
616 uint64_t last_pde = ~0, last_pt = ~0;
617 unsigned count = 0, pt_idx, ndw;
618 struct amdgpu_job *job;
619 struct amdgpu_pte_update_params params;
620 struct fence *fence = NULL;
626 pd_addr = amdgpu_bo_gpu_offset(pd);
627 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
632 /* assume the worst case */
633 ndw += vm->max_pde_used * 6;
635 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
639 memset(¶ms, 0, sizeof(params));
641 params.ib = &job->ibs[0];
643 /* walk over the address space and update the page directory */
644 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
645 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
651 pt = amdgpu_bo_gpu_offset(bo);
653 if (vm->page_tables[pt_idx].addr == pt)
655 vm->page_tables[pt_idx].addr = pt;
657 if (vm->page_tables[pt_idx].shadow_addr == pt)
659 vm->page_tables[pt_idx].shadow_addr = pt;
662 pde = pd_addr + pt_idx * 8;
663 if (((last_pde + 8 * count) != pde) ||
664 ((last_pt + incr * count) != pt) ||
665 (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
668 amdgpu_vm_do_set_ptes(¶ms, last_pde,
669 last_pt, count, incr,
682 amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt,
683 count, incr, AMDGPU_PTE_VALID);
685 if (params.ib->length_dw != 0) {
686 amdgpu_ring_pad_ib(ring, params.ib);
687 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
688 AMDGPU_FENCE_OWNER_VM);
689 WARN_ON(params.ib->length_dw > ndw);
690 r = amdgpu_job_submit(job, ring, &vm->entity,
691 AMDGPU_FENCE_OWNER_VM, &fence);
695 amdgpu_bo_fence(pd, fence, true);
696 fence_put(vm->page_directory_fence);
697 vm->page_directory_fence = fence_get(fence);
701 amdgpu_job_free(job);
707 amdgpu_job_free(job);
712 * amdgpu_vm_update_pdes - make sure that page directory is valid
714 * @adev: amdgpu_device pointer
716 * @start: start of GPU address range
717 * @end: end of GPU address range
719 * Allocates new page tables if necessary
720 * and updates the page directory.
721 * Returns 0 for success, error for failure.
723 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
724 struct amdgpu_vm *vm)
728 r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
731 return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
735 * amdgpu_vm_update_ptes - make sure that page tables are valid
737 * @params: see amdgpu_pte_update_params definition
739 * @start: start of GPU address range
740 * @end: end of GPU address range
741 * @dst: destination address to map to, the next dst inside the function
742 * @flags: mapping flags
744 * Update the page tables in the range @start - @end.
746 static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
747 struct amdgpu_vm *vm,
748 uint64_t start, uint64_t end,
749 uint64_t dst, uint32_t flags)
751 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
753 uint64_t cur_pe_start, cur_nptes, cur_dst;
754 uint64_t addr; /* next GPU address to be updated */
756 struct amdgpu_bo *pt;
757 unsigned nptes; /* next number of ptes to be updated */
758 uint64_t next_pe_start;
760 /* initialize the variables */
762 pt_idx = addr >> amdgpu_vm_block_size;
763 pt = vm->page_tables[pt_idx].entry.robj;
765 if ((addr & ~mask) == (end & ~mask))
768 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
770 cur_pe_start = amdgpu_bo_gpu_offset(pt);
771 cur_pe_start += (addr & mask) * 8;
777 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
779 /* walk over the address space and update the page tables */
781 pt_idx = addr >> amdgpu_vm_block_size;
782 pt = vm->page_tables[pt_idx].entry.robj;
784 if ((addr & ~mask) == (end & ~mask))
787 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
789 next_pe_start = amdgpu_bo_gpu_offset(pt);
790 next_pe_start += (addr & mask) * 8;
792 if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
793 ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
794 /* The next ptb is consecutive to current ptb.
795 * Don't call the update function now.
796 * Will update two ptbs together in future.
800 params->func(params, cur_pe_start, cur_dst, cur_nptes,
801 AMDGPU_GPU_PAGE_SIZE, flags);
803 cur_pe_start = next_pe_start;
810 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
813 params->func(params, cur_pe_start, cur_dst, cur_nptes,
814 AMDGPU_GPU_PAGE_SIZE, flags);
818 * amdgpu_vm_frag_ptes - add fragment information to PTEs
820 * @params: see amdgpu_pte_update_params definition
822 * @start: first PTE to handle
823 * @end: last PTE to handle
824 * @dst: addr those PTEs should point to
825 * @flags: hw mapping flags
827 static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
828 struct amdgpu_vm *vm,
829 uint64_t start, uint64_t end,
830 uint64_t dst, uint32_t flags)
833 * The MC L1 TLB supports variable sized pages, based on a fragment
834 * field in the PTE. When this field is set to a non-zero value, page
835 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
836 * flags are considered valid for all PTEs within the fragment range
837 * and corresponding mappings are assumed to be physically contiguous.
839 * The L1 TLB can store a single PTE for the whole fragment,
840 * significantly increasing the space available for translation
841 * caching. This leads to large improvements in throughput when the
842 * TLB is under pressure.
844 * The L2 TLB distributes small and large fragments into two
845 * asymmetric partitions. The large fragment cache is significantly
846 * larger. Thus, we try to use large fragments wherever possible.
847 * Userspace can support this by aligning virtual base address and
848 * allocation size to the fragment size.
851 const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
853 uint64_t frag_start = ALIGN(start, frag_align);
854 uint64_t frag_end = end & ~(frag_align - 1);
858 /* system pages are non continuously */
859 if (params->src || !(flags & AMDGPU_PTE_VALID) ||
860 (frag_start >= frag_end)) {
862 amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
866 /* use more than 64KB fragment size if possible */
867 frag = lower_32_bits(frag_start | frag_end);
868 frag = likely(frag) ? __ffs(frag) : 31;
870 /* handle the 4K area at the beginning */
871 if (start != frag_start) {
872 amdgpu_vm_update_ptes(params, vm, start, frag_start,
874 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
877 /* handle the area in the middle */
878 amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
879 flags | AMDGPU_PTE_FRAG(frag));
881 /* handle the 4K area at the end */
882 if (frag_end != end) {
883 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
884 amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
889 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
891 * @adev: amdgpu_device pointer
892 * @exclusive: fence we need to sync to
893 * @src: address where to copy page table entries from
894 * @pages_addr: DMA addresses to use for mapping
896 * @start: start of mapped range
897 * @last: last mapped entry
898 * @flags: flags for the entries
899 * @addr: addr to set the area to
900 * @fence: optional resulting fence
902 * Fill in the page table entries between @start and @last.
903 * Returns 0 for success, -EINVAL for failure.
905 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
906 struct fence *exclusive,
908 dma_addr_t *pages_addr,
909 struct amdgpu_vm *vm,
910 uint64_t start, uint64_t last,
911 uint32_t flags, uint64_t addr,
912 struct fence **fence)
914 struct amdgpu_ring *ring;
915 void *owner = AMDGPU_FENCE_OWNER_VM;
916 unsigned nptes, ncmds, ndw;
917 struct amdgpu_job *job;
918 struct amdgpu_pte_update_params params;
919 struct fence *f = NULL;
922 memset(¶ms, 0, sizeof(params));
926 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
928 memset(¶ms, 0, sizeof(params));
932 /* sync to everything on unmapping */
933 if (!(flags & AMDGPU_PTE_VALID))
934 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
936 nptes = last - start + 1;
939 * reserve space for one command every (1 << BLOCK_SIZE)
940 * entries or 2k dwords (whatever is smaller)
942 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
948 /* only copy commands needed */
951 params.func = amdgpu_vm_do_copy_ptes;
953 } else if (pages_addr) {
954 /* copy commands needed */
960 params.func = amdgpu_vm_do_copy_ptes;
963 /* set page commands needed */
966 /* two extra commands for begin/end of fragment */
969 params.func = amdgpu_vm_do_set_ptes;
972 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
976 params.ib = &job->ibs[0];
978 if (!src && pages_addr) {
982 /* Put the PTEs at the end of the IB. */
984 pte= (uint64_t *)&(job->ibs->ptr[i]);
985 params.src = job->ibs->gpu_addr + i * 4;
987 for (i = 0; i < nptes; ++i) {
988 pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
989 AMDGPU_GPU_PAGE_SIZE);
994 r = amdgpu_sync_fence(adev, &job->sync, exclusive);
998 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
1003 r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
1007 amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags);
1009 amdgpu_ring_pad_ib(ring, params.ib);
1010 WARN_ON(params.ib->length_dw > ndw);
1011 r = amdgpu_job_submit(job, ring, &vm->entity,
1012 AMDGPU_FENCE_OWNER_VM, &f);
1016 amdgpu_bo_fence(vm->page_directory, f, true);
1019 *fence = fence_get(f);
1025 amdgpu_job_free(job);
1030 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1032 * @adev: amdgpu_device pointer
1033 * @exclusive: fence we need to sync to
1034 * @gtt_flags: flags as they are used for GTT
1035 * @pages_addr: DMA addresses to use for mapping
1037 * @mapping: mapped range and flags to use for the update
1038 * @addr: addr to set the area to
1039 * @flags: HW flags for the mapping
1040 * @fence: optional resulting fence
1042 * Split the mapping into smaller chunks so that each update fits
1044 * Returns 0 for success, -EINVAL for failure.
1046 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1047 struct fence *exclusive,
1049 dma_addr_t *pages_addr,
1050 struct amdgpu_vm *vm,
1051 struct amdgpu_bo_va_mapping *mapping,
1052 uint32_t flags, uint64_t addr,
1053 struct fence **fence)
1055 const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
1057 uint64_t src = 0, start = mapping->it.start;
1060 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1061 * but in case of something, we filter the flags in first place
1063 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1064 flags &= ~AMDGPU_PTE_READABLE;
1065 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1066 flags &= ~AMDGPU_PTE_WRITEABLE;
1068 trace_amdgpu_vm_bo_update(mapping);
1071 if (flags == gtt_flags)
1072 src = adev->gart.table_addr + (addr >> 12) * 8;
1075 addr += mapping->offset;
1077 if (!pages_addr || src)
1078 return amdgpu_vm_bo_update_mapping(adev, exclusive,
1079 src, pages_addr, vm,
1080 start, mapping->it.last,
1081 flags, addr, fence);
1083 while (start != mapping->it.last + 1) {
1086 last = min((uint64_t)mapping->it.last, start + max_size - 1);
1087 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1088 src, pages_addr, vm,
1089 start, last, flags, addr,
1095 addr += max_size * AMDGPU_GPU_PAGE_SIZE;
1102 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1104 * @adev: amdgpu_device pointer
1105 * @bo_va: requested BO and VM object
1108 * Fill in the page table entries for @bo_va.
1109 * Returns 0 for success, -EINVAL for failure.
1111 * Object have to be reserved and mutex must be locked!
1113 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1114 struct amdgpu_bo_va *bo_va,
1115 struct ttm_mem_reg *mem)
1117 struct amdgpu_vm *vm = bo_va->vm;
1118 struct amdgpu_bo_va_mapping *mapping;
1119 dma_addr_t *pages_addr = NULL;
1120 uint32_t gtt_flags, flags;
1121 struct fence *exclusive;
1126 struct ttm_dma_tt *ttm;
1128 addr = (u64)mem->start << PAGE_SHIFT;
1129 switch (mem->mem_type) {
1131 ttm = container_of(bo_va->bo->tbo.ttm, struct
1133 pages_addr = ttm->dma_address;
1137 addr += adev->vm_manager.vram_base_offset;
1144 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
1150 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1151 gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
1153 spin_lock(&vm->status_lock);
1154 if (!list_empty(&bo_va->vm_status))
1155 list_splice_init(&bo_va->valids, &bo_va->invalids);
1156 spin_unlock(&vm->status_lock);
1158 list_for_each_entry(mapping, &bo_va->invalids, list) {
1159 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1160 gtt_flags, pages_addr, vm,
1161 mapping, flags, addr,
1162 &bo_va->last_pt_update);
1167 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1168 list_for_each_entry(mapping, &bo_va->valids, list)
1169 trace_amdgpu_vm_bo_mapping(mapping);
1171 list_for_each_entry(mapping, &bo_va->invalids, list)
1172 trace_amdgpu_vm_bo_mapping(mapping);
1175 spin_lock(&vm->status_lock);
1176 list_splice_init(&bo_va->invalids, &bo_va->valids);
1177 list_del_init(&bo_va->vm_status);
1179 list_add(&bo_va->vm_status, &vm->cleared);
1180 spin_unlock(&vm->status_lock);
1186 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1188 * @adev: amdgpu_device pointer
1191 * Make sure all freed BOs are cleared in the PT.
1192 * Returns 0 for success.
1194 * PTs have to be reserved and mutex must be locked!
1196 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1197 struct amdgpu_vm *vm)
1199 struct amdgpu_bo_va_mapping *mapping;
1202 while (!list_empty(&vm->freed)) {
1203 mapping = list_first_entry(&vm->freed,
1204 struct amdgpu_bo_va_mapping, list);
1205 list_del(&mapping->list);
1207 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1219 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1221 * @adev: amdgpu_device pointer
1224 * Make sure all invalidated BOs are cleared in the PT.
1225 * Returns 0 for success.
1227 * PTs have to be reserved and mutex must be locked!
1229 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1230 struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1232 struct amdgpu_bo_va *bo_va = NULL;
1235 spin_lock(&vm->status_lock);
1236 while (!list_empty(&vm->invalidated)) {
1237 bo_va = list_first_entry(&vm->invalidated,
1238 struct amdgpu_bo_va, vm_status);
1239 spin_unlock(&vm->status_lock);
1241 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
1245 spin_lock(&vm->status_lock);
1247 spin_unlock(&vm->status_lock);
1250 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1256 * amdgpu_vm_bo_add - add a bo to a specific vm
1258 * @adev: amdgpu_device pointer
1260 * @bo: amdgpu buffer object
1262 * Add @bo into the requested vm.
1263 * Add @bo to the list of bos associated with the vm
1264 * Returns newly added bo_va or NULL for failure
1266 * Object has to be reserved!
1268 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1269 struct amdgpu_vm *vm,
1270 struct amdgpu_bo *bo)
1272 struct amdgpu_bo_va *bo_va;
1274 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1275 if (bo_va == NULL) {
1280 bo_va->ref_count = 1;
1281 INIT_LIST_HEAD(&bo_va->bo_list);
1282 INIT_LIST_HEAD(&bo_va->valids);
1283 INIT_LIST_HEAD(&bo_va->invalids);
1284 INIT_LIST_HEAD(&bo_va->vm_status);
1286 list_add_tail(&bo_va->bo_list, &bo->va);
1292 * amdgpu_vm_bo_map - map bo inside a vm
1294 * @adev: amdgpu_device pointer
1295 * @bo_va: bo_va to store the address
1296 * @saddr: where to map the BO
1297 * @offset: requested offset in the BO
1298 * @flags: attributes of pages (read/write/valid/etc.)
1300 * Add a mapping of the BO at the specefied addr into the VM.
1301 * Returns 0 for success, error for failure.
1303 * Object has to be reserved and unreserved outside!
1305 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1306 struct amdgpu_bo_va *bo_va,
1307 uint64_t saddr, uint64_t offset,
1308 uint64_t size, uint32_t flags)
1310 struct amdgpu_bo_va_mapping *mapping;
1311 struct amdgpu_vm *vm = bo_va->vm;
1312 struct interval_tree_node *it;
1313 unsigned last_pfn, pt_idx;
1317 /* validate the parameters */
1318 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1319 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1322 /* make sure object fit at this offset */
1323 eaddr = saddr + size - 1;
1324 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1327 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1328 if (last_pfn >= adev->vm_manager.max_pfn) {
1329 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1330 last_pfn, adev->vm_manager.max_pfn);
1334 saddr /= AMDGPU_GPU_PAGE_SIZE;
1335 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1337 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1339 struct amdgpu_bo_va_mapping *tmp;
1340 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1341 /* bo and tmp overlap, invalid addr */
1342 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1343 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1344 tmp->it.start, tmp->it.last + 1);
1349 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1355 INIT_LIST_HEAD(&mapping->list);
1356 mapping->it.start = saddr;
1357 mapping->it.last = eaddr;
1358 mapping->offset = offset;
1359 mapping->flags = flags;
1361 list_add(&mapping->list, &bo_va->invalids);
1362 interval_tree_insert(&mapping->it, &vm->va);
1364 /* Make sure the page tables are allocated */
1365 saddr >>= amdgpu_vm_block_size;
1366 eaddr >>= amdgpu_vm_block_size;
1368 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1370 if (eaddr > vm->max_pde_used)
1371 vm->max_pde_used = eaddr;
1373 /* walk over the address space and allocate the page tables */
1374 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1375 struct reservation_object *resv = vm->page_directory->tbo.resv;
1376 struct amdgpu_bo_list_entry *entry;
1377 struct amdgpu_bo *pt;
1379 entry = &vm->page_tables[pt_idx].entry;
1383 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1384 AMDGPU_GPU_PAGE_SIZE, true,
1385 AMDGPU_GEM_DOMAIN_VRAM,
1386 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1387 AMDGPU_GEM_CREATE_SHADOW,
1392 /* Keep a reference to the page table to avoid freeing
1393 * them up in the wrong order.
1395 pt->parent = amdgpu_bo_ref(vm->page_directory);
1397 r = amdgpu_vm_clear_bo(adev, vm, pt);
1399 amdgpu_bo_unref(&pt);
1404 entry->priority = 0;
1405 entry->tv.bo = &entry->robj->tbo;
1406 entry->tv.shared = true;
1407 entry->user_pages = NULL;
1408 vm->page_tables[pt_idx].addr = 0;
1414 list_del(&mapping->list);
1415 interval_tree_remove(&mapping->it, &vm->va);
1416 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1424 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1426 * @adev: amdgpu_device pointer
1427 * @bo_va: bo_va to remove the address from
1428 * @saddr: where to the BO is mapped
1430 * Remove a mapping of the BO at the specefied addr from the VM.
1431 * Returns 0 for success, error for failure.
1433 * Object has to be reserved and unreserved outside!
1435 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1436 struct amdgpu_bo_va *bo_va,
1439 struct amdgpu_bo_va_mapping *mapping;
1440 struct amdgpu_vm *vm = bo_va->vm;
1443 saddr /= AMDGPU_GPU_PAGE_SIZE;
1445 list_for_each_entry(mapping, &bo_va->valids, list) {
1446 if (mapping->it.start == saddr)
1450 if (&mapping->list == &bo_va->valids) {
1453 list_for_each_entry(mapping, &bo_va->invalids, list) {
1454 if (mapping->it.start == saddr)
1458 if (&mapping->list == &bo_va->invalids)
1462 list_del(&mapping->list);
1463 interval_tree_remove(&mapping->it, &vm->va);
1464 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1467 list_add(&mapping->list, &vm->freed);
1475 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1477 * @adev: amdgpu_device pointer
1478 * @bo_va: requested bo_va
1480 * Remove @bo_va->bo from the requested vm.
1482 * Object have to be reserved!
1484 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1485 struct amdgpu_bo_va *bo_va)
1487 struct amdgpu_bo_va_mapping *mapping, *next;
1488 struct amdgpu_vm *vm = bo_va->vm;
1490 list_del(&bo_va->bo_list);
1492 spin_lock(&vm->status_lock);
1493 list_del(&bo_va->vm_status);
1494 spin_unlock(&vm->status_lock);
1496 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1497 list_del(&mapping->list);
1498 interval_tree_remove(&mapping->it, &vm->va);
1499 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1500 list_add(&mapping->list, &vm->freed);
1502 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1503 list_del(&mapping->list);
1504 interval_tree_remove(&mapping->it, &vm->va);
1508 fence_put(bo_va->last_pt_update);
1513 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1515 * @adev: amdgpu_device pointer
1517 * @bo: amdgpu buffer object
1519 * Mark @bo as invalid.
1521 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1522 struct amdgpu_bo *bo)
1524 struct amdgpu_bo_va *bo_va;
1526 list_for_each_entry(bo_va, &bo->va, bo_list) {
1527 spin_lock(&bo_va->vm->status_lock);
1528 if (list_empty(&bo_va->vm_status))
1529 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1530 spin_unlock(&bo_va->vm->status_lock);
1535 * amdgpu_vm_init - initialize a vm instance
1537 * @adev: amdgpu_device pointer
1542 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1544 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1545 AMDGPU_VM_PTE_COUNT * 8);
1546 unsigned pd_size, pd_entries;
1547 unsigned ring_instance;
1548 struct amdgpu_ring *ring;
1549 struct amd_sched_rq *rq;
1552 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1555 vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
1556 spin_lock_init(&vm->status_lock);
1557 INIT_LIST_HEAD(&vm->invalidated);
1558 INIT_LIST_HEAD(&vm->cleared);
1559 INIT_LIST_HEAD(&vm->freed);
1561 pd_size = amdgpu_vm_directory_size(adev);
1562 pd_entries = amdgpu_vm_num_pdes(adev);
1564 /* allocate page table array */
1565 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1566 if (vm->page_tables == NULL) {
1567 DRM_ERROR("Cannot allocate memory for page table array\n");
1571 /* create scheduler entity for page table updates */
1573 ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1574 ring_instance %= adev->vm_manager.vm_pte_num_rings;
1575 ring = adev->vm_manager.vm_pte_rings[ring_instance];
1576 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1577 r = amd_sched_entity_init(&ring->sched, &vm->entity,
1578 rq, amdgpu_sched_jobs);
1582 vm->page_directory_fence = NULL;
1584 r = amdgpu_bo_create(adev, pd_size, align, true,
1585 AMDGPU_GEM_DOMAIN_VRAM,
1586 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1587 AMDGPU_GEM_CREATE_SHADOW,
1588 NULL, NULL, &vm->page_directory);
1590 goto error_free_sched_entity;
1592 r = amdgpu_bo_reserve(vm->page_directory, false);
1594 goto error_free_page_directory;
1596 r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1597 amdgpu_bo_unreserve(vm->page_directory);
1599 goto error_free_page_directory;
1600 vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
1604 error_free_page_directory:
1605 amdgpu_bo_unref(&vm->page_directory);
1606 vm->page_directory = NULL;
1608 error_free_sched_entity:
1609 amd_sched_entity_fini(&ring->sched, &vm->entity);
1615 * amdgpu_vm_fini - tear down a vm instance
1617 * @adev: amdgpu_device pointer
1621 * Unbind the VM and remove all bos from the vm bo list
1623 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1625 struct amdgpu_bo_va_mapping *mapping, *tmp;
1628 amd_sched_entity_fini(vm->entity.sched, &vm->entity);
1630 if (!RB_EMPTY_ROOT(&vm->va)) {
1631 dev_err(adev->dev, "still active bo inside vm\n");
1633 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1634 list_del(&mapping->list);
1635 interval_tree_remove(&mapping->it, &vm->va);
1638 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1639 list_del(&mapping->list);
1643 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
1644 if (vm->page_tables[i].entry.robj &&
1645 vm->page_tables[i].entry.robj->shadow)
1646 amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow);
1647 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
1649 drm_free_large(vm->page_tables);
1651 if (vm->page_directory->shadow)
1652 amdgpu_bo_unref(&vm->page_directory->shadow);
1653 amdgpu_bo_unref(&vm->page_directory);
1654 fence_put(vm->page_directory_fence);
1658 * amdgpu_vm_manager_init - init the VM manager
1660 * @adev: amdgpu_device pointer
1662 * Initialize the VM manager structures
1664 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1668 INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1670 /* skip over VMID 0, since it is the system VM */
1671 for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1672 amdgpu_vm_reset_id(adev, i);
1673 amdgpu_sync_create(&adev->vm_manager.ids[i].active);
1674 list_add_tail(&adev->vm_manager.ids[i].list,
1675 &adev->vm_manager.ids_lru);
1678 adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1679 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1680 adev->vm_manager.seqno[i] = 0;
1682 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1683 atomic64_set(&adev->vm_manager.client_counter, 0);
1687 * amdgpu_vm_manager_fini - cleanup VM manager
1689 * @adev: amdgpu_device pointer
1691 * Cleanup the VM manager and free resources.
1693 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1697 for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1698 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1700 fence_put(adev->vm_manager.ids[i].first);
1701 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1702 fence_put(id->flushed_updates);