xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision 110e6f26af80dfd90b6e5c645b1aed7228aa580d)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/amdgpu_drm.h>
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32 
33 /*
34  * GPUVM
35  * GPUVM is similar to the legacy gart on older asics, however
36  * rather than there being a single global gart table
37  * for the entire GPU, there are multiple VM page tables active
38  * at any given time.  The VM page tables can contain a mix
39  * vram pages and system memory pages and system memory pages
40  * can be mapped as snooped (cached system pages) or unsnooped
41  * (uncached system pages).
42  * Each VM has an ID associated with it and there is a page table
43  * associated with each VMID.  When execting a command buffer,
44  * the kernel tells the the ring what VMID to use for that command
45  * buffer.  VMIDs are allocated dynamically as commands are submitted.
46  * The userspace drivers maintain their own address space and the kernel
47  * sets up their pages tables accordingly when they submit their
48  * command buffers and a VMID is assigned.
49  * Cayman/Trinity support up to 8 active VMs at any given time;
50  * SI supports 16.
51  */
52 
53 /* Special value that no flush is necessary */
54 #define AMDGPU_VM_NO_FLUSH (~0ll)
55 
56 /**
57  * amdgpu_vm_num_pde - return the number of page directory entries
58  *
59  * @adev: amdgpu_device pointer
60  *
61  * Calculate the number of page directory entries.
62  */
63 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
64 {
65 	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
66 }
67 
68 /**
69  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
70  *
71  * @adev: amdgpu_device pointer
72  *
73  * Calculate the size of the page directory in bytes.
74  */
75 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
76 {
77 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
78 }
79 
80 /**
81  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
82  *
83  * @vm: vm providing the BOs
84  * @validated: head of validation list
85  * @entry: entry to add
86  *
87  * Add the page directory to the list of BOs to
88  * validate for command submission.
89  */
90 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
91 			 struct list_head *validated,
92 			 struct amdgpu_bo_list_entry *entry)
93 {
94 	entry->robj = vm->page_directory;
95 	entry->priority = 0;
96 	entry->tv.bo = &vm->page_directory->tbo;
97 	entry->tv.shared = true;
98 	entry->user_pages = NULL;
99 	list_add(&entry->tv.head, validated);
100 }
101 
102 /**
103  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
104  *
105  * @vm: vm providing the BOs
106  * @duplicates: head of duplicates list
107  *
108  * Add the page directory to the BO duplicates list
109  * for command submission.
110  */
111 void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
112 {
113 	unsigned i;
114 
115 	/* add the vm page table to the list */
116 	for (i = 0; i <= vm->max_pde_used; ++i) {
117 		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
118 
119 		if (!entry->robj)
120 			continue;
121 
122 		list_add(&entry->tv.head, duplicates);
123 	}
124 
125 }
126 
127 /**
128  * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
129  *
130  * @adev: amdgpu device instance
131  * @vm: vm providing the BOs
132  *
133  * Move the PT BOs to the tail of the LRU.
134  */
135 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
136 				  struct amdgpu_vm *vm)
137 {
138 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
139 	unsigned i;
140 
141 	spin_lock(&glob->lru_lock);
142 	for (i = 0; i <= vm->max_pde_used; ++i) {
143 		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
144 
145 		if (!entry->robj)
146 			continue;
147 
148 		ttm_bo_move_to_lru_tail(&entry->robj->tbo);
149 	}
150 	spin_unlock(&glob->lru_lock);
151 }
152 
153 /**
154  * amdgpu_vm_grab_id - allocate the next free VMID
155  *
156  * @vm: vm to allocate id for
157  * @ring: ring we want to submit job to
158  * @sync: sync object where we add dependencies
159  * @fence: fence protecting ID from reuse
160  *
161  * Allocate an id for the vm, adding fences to the sync obj as necessary.
162  */
163 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
164 		      struct amdgpu_sync *sync, struct fence *fence,
165 		      unsigned *vm_id, uint64_t *vm_pd_addr)
166 {
167 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
168 	struct amdgpu_device *adev = ring->adev;
169 	struct fence *updates = sync->last_vm_update;
170 	struct amdgpu_vm_id *id;
171 	unsigned i = ring->idx;
172 	int r;
173 
174 	mutex_lock(&adev->vm_manager.lock);
175 
176 	/* Check if we can use a VMID already assigned to this VM */
177 	do {
178 		struct fence *flushed;
179 
180 		id = vm->ids[i++];
181 		if (i == AMDGPU_MAX_RINGS)
182 			i = 0;
183 
184 		/* Check all the prerequisites to using this VMID */
185 		if (!id)
186 			continue;
187 
188 		if (atomic_long_read(&id->owner) != (long)vm)
189 			continue;
190 
191 		if (pd_addr != id->pd_gpu_addr)
192 			continue;
193 
194 		if (id != vm->ids[ring->idx] &&
195 		    (!id->last_flush || !fence_is_signaled(id->last_flush)))
196 			continue;
197 
198 		flushed  = id->flushed_updates;
199 		if (updates && (!flushed || fence_is_later(updates, flushed)))
200 			continue;
201 
202 		/* Good we can use this VMID */
203 		if (id == vm->ids[ring->idx]) {
204 			r = amdgpu_sync_fence(ring->adev, sync,
205 					      id->first);
206 			if (r)
207 				goto error;
208 		}
209 
210 		/* And remember this submission as user of the VMID */
211 		r = amdgpu_sync_fence(ring->adev, &id->active, fence);
212 		if (r)
213 			goto error;
214 
215 		list_move_tail(&id->list, &adev->vm_manager.ids_lru);
216 		vm->ids[ring->idx] = id;
217 
218 		*vm_id = id - adev->vm_manager.ids;
219 		*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
220 		trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
221 
222 		mutex_unlock(&adev->vm_manager.lock);
223 		return 0;
224 
225 	} while (i != ring->idx);
226 
227 	id = list_first_entry(&adev->vm_manager.ids_lru,
228 			      struct amdgpu_vm_id,
229 			      list);
230 
231 	if (!amdgpu_sync_is_idle(&id->active)) {
232 		struct list_head *head = &adev->vm_manager.ids_lru;
233 		struct amdgpu_vm_id *tmp;
234 
235 		list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
236 					 list) {
237 			if (amdgpu_sync_is_idle(&id->active)) {
238 				list_move(&id->list, head);
239 				head = &id->list;
240 			}
241 		}
242 		id = list_first_entry(&adev->vm_manager.ids_lru,
243 				      struct amdgpu_vm_id,
244 				      list);
245 	}
246 
247 	r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
248 	if (r)
249 		goto error;
250 
251 	fence_put(id->first);
252 	id->first = fence_get(fence);
253 
254 	fence_put(id->last_flush);
255 	id->last_flush = NULL;
256 
257 	fence_put(id->flushed_updates);
258 	id->flushed_updates = fence_get(updates);
259 
260 	id->pd_gpu_addr = pd_addr;
261 
262 	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
263 	atomic_long_set(&id->owner, (long)vm);
264 	vm->ids[ring->idx] = id;
265 
266 	*vm_id = id - adev->vm_manager.ids;
267 	*vm_pd_addr = pd_addr;
268 	trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
269 
270 error:
271 	mutex_unlock(&adev->vm_manager.lock);
272 	return r;
273 }
274 
275 /**
276  * amdgpu_vm_flush - hardware flush the vm
277  *
278  * @ring: ring to use for flush
279  * @vm_id: vmid number to use
280  * @pd_addr: address of the page directory
281  *
282  * Emit a VM flush when it is necessary.
283  */
284 int amdgpu_vm_flush(struct amdgpu_ring *ring,
285 		    unsigned vm_id, uint64_t pd_addr,
286 		    uint32_t gds_base, uint32_t gds_size,
287 		    uint32_t gws_base, uint32_t gws_size,
288 		    uint32_t oa_base, uint32_t oa_size)
289 {
290 	struct amdgpu_device *adev = ring->adev;
291 	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
292 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
293 		id->gds_base != gds_base ||
294 		id->gds_size != gds_size ||
295 		id->gws_base != gws_base ||
296 		id->gws_size != gws_size ||
297 		id->oa_base != oa_base ||
298 		id->oa_size != oa_size);
299 	int r;
300 
301 	if (ring->funcs->emit_pipeline_sync && (
302 	    pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
303 		amdgpu_ring_emit_pipeline_sync(ring);
304 
305 	if (pd_addr != AMDGPU_VM_NO_FLUSH) {
306 		struct fence *fence;
307 
308 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
309 		amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
310 		r = amdgpu_fence_emit(ring, &fence);
311 		if (r)
312 			return r;
313 
314 		mutex_lock(&adev->vm_manager.lock);
315 		fence_put(id->last_flush);
316 		id->last_flush = fence;
317 		mutex_unlock(&adev->vm_manager.lock);
318 	}
319 
320 	if (gds_switch_needed) {
321 		id->gds_base = gds_base;
322 		id->gds_size = gds_size;
323 		id->gws_base = gws_base;
324 		id->gws_size = gws_size;
325 		id->oa_base = oa_base;
326 		id->oa_size = oa_size;
327 		amdgpu_ring_emit_gds_switch(ring, vm_id,
328 					    gds_base, gds_size,
329 					    gws_base, gws_size,
330 					    oa_base, oa_size);
331 	}
332 
333 	return 0;
334 }
335 
336 /**
337  * amdgpu_vm_reset_id - reset VMID to zero
338  *
339  * @adev: amdgpu device structure
340  * @vm_id: vmid number to use
341  *
342  * Reset saved GDW, GWS and OA to force switch on next flush.
343  */
344 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
345 {
346 	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
347 
348 	id->gds_base = 0;
349 	id->gds_size = 0;
350 	id->gws_base = 0;
351 	id->gws_size = 0;
352 	id->oa_base = 0;
353 	id->oa_size = 0;
354 }
355 
356 /**
357  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
358  *
359  * @vm: requested vm
360  * @bo: requested buffer object
361  *
362  * Find @bo inside the requested vm.
363  * Search inside the @bos vm list for the requested vm
364  * Returns the found bo_va or NULL if none is found
365  *
366  * Object has to be reserved!
367  */
368 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
369 				       struct amdgpu_bo *bo)
370 {
371 	struct amdgpu_bo_va *bo_va;
372 
373 	list_for_each_entry(bo_va, &bo->va, bo_list) {
374 		if (bo_va->vm == vm) {
375 			return bo_va;
376 		}
377 	}
378 	return NULL;
379 }
380 
381 /**
382  * amdgpu_vm_update_pages - helper to call the right asic function
383  *
384  * @adev: amdgpu_device pointer
385  * @src: address where to copy page table entries from
386  * @pages_addr: DMA addresses to use for mapping
387  * @ib: indirect buffer to fill with commands
388  * @pe: addr of the page entry
389  * @addr: dst addr to write into pe
390  * @count: number of page entries to update
391  * @incr: increase next addr by incr bytes
392  * @flags: hw access flags
393  *
394  * Traces the parameters and calls the right asic functions
395  * to setup the page table using the DMA.
396  */
397 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
398 				   uint64_t src,
399 				   dma_addr_t *pages_addr,
400 				   struct amdgpu_ib *ib,
401 				   uint64_t pe, uint64_t addr,
402 				   unsigned count, uint32_t incr,
403 				   uint32_t flags)
404 {
405 	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
406 
407 	if (src) {
408 		src += (addr >> 12) * 8;
409 		amdgpu_vm_copy_pte(adev, ib, pe, src, count);
410 
411 	} else if (pages_addr) {
412 		amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
413 				    count, incr, flags);
414 
415 	} else if (count < 3) {
416 		amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
417 				    count, incr, flags);
418 
419 	} else {
420 		amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
421 				      count, incr, flags);
422 	}
423 }
424 
425 /**
426  * amdgpu_vm_clear_bo - initially clear the page dir/table
427  *
428  * @adev: amdgpu_device pointer
429  * @bo: bo to clear
430  *
431  * need to reserve bo first before calling it.
432  */
433 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
434 			      struct amdgpu_vm *vm,
435 			      struct amdgpu_bo *bo)
436 {
437 	struct amdgpu_ring *ring;
438 	struct fence *fence = NULL;
439 	struct amdgpu_job *job;
440 	unsigned entries;
441 	uint64_t addr;
442 	int r;
443 
444 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
445 
446 	r = reservation_object_reserve_shared(bo->tbo.resv);
447 	if (r)
448 		return r;
449 
450 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
451 	if (r)
452 		goto error;
453 
454 	addr = amdgpu_bo_gpu_offset(bo);
455 	entries = amdgpu_bo_size(bo) / 8;
456 
457 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
458 	if (r)
459 		goto error;
460 
461 	amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries,
462 			       0, 0);
463 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
464 
465 	WARN_ON(job->ibs[0].length_dw > 64);
466 	r = amdgpu_job_submit(job, ring, &vm->entity,
467 			      AMDGPU_FENCE_OWNER_VM, &fence);
468 	if (r)
469 		goto error_free;
470 
471 	amdgpu_bo_fence(bo, fence, true);
472 	fence_put(fence);
473 	return 0;
474 
475 error_free:
476 	amdgpu_job_free(job);
477 
478 error:
479 	return r;
480 }
481 
482 /**
483  * amdgpu_vm_map_gart - Resolve gart mapping of addr
484  *
485  * @pages_addr: optional DMA address to use for lookup
486  * @addr: the unmapped addr
487  *
488  * Look up the physical address of the page that the pte resolves
489  * to and return the pointer for the page table entry.
490  */
491 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
492 {
493 	uint64_t result;
494 
495 	if (pages_addr) {
496 		/* page table offset */
497 		result = pages_addr[addr >> PAGE_SHIFT];
498 
499 		/* in case cpu page size != gpu page size*/
500 		result |= addr & (~PAGE_MASK);
501 
502 	} else {
503 		/* No mapping required */
504 		result = addr;
505 	}
506 
507 	result &= 0xFFFFFFFFFFFFF000ULL;
508 
509 	return result;
510 }
511 
512 /**
513  * amdgpu_vm_update_pdes - make sure that page directory is valid
514  *
515  * @adev: amdgpu_device pointer
516  * @vm: requested vm
517  * @start: start of GPU address range
518  * @end: end of GPU address range
519  *
520  * Allocates new page tables if necessary
521  * and updates the page directory.
522  * Returns 0 for success, error for failure.
523  */
524 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
525 				    struct amdgpu_vm *vm)
526 {
527 	struct amdgpu_ring *ring;
528 	struct amdgpu_bo *pd = vm->page_directory;
529 	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
530 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
531 	uint64_t last_pde = ~0, last_pt = ~0;
532 	unsigned count = 0, pt_idx, ndw;
533 	struct amdgpu_job *job;
534 	struct amdgpu_ib *ib;
535 	struct fence *fence = NULL;
536 
537 	int r;
538 
539 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
540 
541 	/* padding, etc. */
542 	ndw = 64;
543 
544 	/* assume the worst case */
545 	ndw += vm->max_pde_used * 6;
546 
547 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
548 	if (r)
549 		return r;
550 
551 	ib = &job->ibs[0];
552 
553 	/* walk over the address space and update the page directory */
554 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
555 		struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
556 		uint64_t pde, pt;
557 
558 		if (bo == NULL)
559 			continue;
560 
561 		pt = amdgpu_bo_gpu_offset(bo);
562 		if (vm->page_tables[pt_idx].addr == pt)
563 			continue;
564 		vm->page_tables[pt_idx].addr = pt;
565 
566 		pde = pd_addr + pt_idx * 8;
567 		if (((last_pde + 8 * count) != pde) ||
568 		    ((last_pt + incr * count) != pt)) {
569 
570 			if (count) {
571 				amdgpu_vm_update_pages(adev, 0, NULL, ib,
572 						       last_pde, last_pt,
573 						       count, incr,
574 						       AMDGPU_PTE_VALID);
575 			}
576 
577 			count = 1;
578 			last_pde = pde;
579 			last_pt = pt;
580 		} else {
581 			++count;
582 		}
583 	}
584 
585 	if (count)
586 		amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt,
587 				       count, incr, AMDGPU_PTE_VALID);
588 
589 	if (ib->length_dw != 0) {
590 		amdgpu_ring_pad_ib(ring, ib);
591 		amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
592 				 AMDGPU_FENCE_OWNER_VM);
593 		WARN_ON(ib->length_dw > ndw);
594 		r = amdgpu_job_submit(job, ring, &vm->entity,
595 				      AMDGPU_FENCE_OWNER_VM, &fence);
596 		if (r)
597 			goto error_free;
598 
599 		amdgpu_bo_fence(pd, fence, true);
600 		fence_put(vm->page_directory_fence);
601 		vm->page_directory_fence = fence_get(fence);
602 		fence_put(fence);
603 
604 	} else {
605 		amdgpu_job_free(job);
606 	}
607 
608 	return 0;
609 
610 error_free:
611 	amdgpu_job_free(job);
612 	return r;
613 }
614 
615 /**
616  * amdgpu_vm_frag_ptes - add fragment information to PTEs
617  *
618  * @adev: amdgpu_device pointer
619  * @src: address where to copy page table entries from
620  * @pages_addr: DMA addresses to use for mapping
621  * @ib: IB for the update
622  * @pe_start: first PTE to handle
623  * @pe_end: last PTE to handle
624  * @addr: addr those PTEs should point to
625  * @flags: hw mapping flags
626  */
627 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
628 				uint64_t src,
629 				dma_addr_t *pages_addr,
630 				struct amdgpu_ib *ib,
631 				uint64_t pe_start, uint64_t pe_end,
632 				uint64_t addr, uint32_t flags)
633 {
634 	/**
635 	 * The MC L1 TLB supports variable sized pages, based on a fragment
636 	 * field in the PTE. When this field is set to a non-zero value, page
637 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
638 	 * flags are considered valid for all PTEs within the fragment range
639 	 * and corresponding mappings are assumed to be physically contiguous.
640 	 *
641 	 * The L1 TLB can store a single PTE for the whole fragment,
642 	 * significantly increasing the space available for translation
643 	 * caching. This leads to large improvements in throughput when the
644 	 * TLB is under pressure.
645 	 *
646 	 * The L2 TLB distributes small and large fragments into two
647 	 * asymmetric partitions. The large fragment cache is significantly
648 	 * larger. Thus, we try to use large fragments wherever possible.
649 	 * Userspace can support this by aligning virtual base address and
650 	 * allocation size to the fragment size.
651 	 */
652 
653 	/* SI and newer are optimized for 64KB */
654 	uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
655 	uint64_t frag_align = 0x80;
656 
657 	uint64_t frag_start = ALIGN(pe_start, frag_align);
658 	uint64_t frag_end = pe_end & ~(frag_align - 1);
659 
660 	unsigned count;
661 
662 	/* Abort early if there isn't anything to do */
663 	if (pe_start == pe_end)
664 		return;
665 
666 	/* system pages are non continuously */
667 	if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) ||
668 	    (frag_start >= frag_end)) {
669 
670 		count = (pe_end - pe_start) / 8;
671 		amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start,
672 				       addr, count, AMDGPU_GPU_PAGE_SIZE,
673 				       flags);
674 		return;
675 	}
676 
677 	/* handle the 4K area at the beginning */
678 	if (pe_start != frag_start) {
679 		count = (frag_start - pe_start) / 8;
680 		amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr,
681 				       count, AMDGPU_GPU_PAGE_SIZE, flags);
682 		addr += AMDGPU_GPU_PAGE_SIZE * count;
683 	}
684 
685 	/* handle the area in the middle */
686 	count = (frag_end - frag_start) / 8;
687 	amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count,
688 			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
689 
690 	/* handle the 4K area at the end */
691 	if (frag_end != pe_end) {
692 		addr += AMDGPU_GPU_PAGE_SIZE * count;
693 		count = (pe_end - frag_end) / 8;
694 		amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr,
695 				       count, AMDGPU_GPU_PAGE_SIZE, flags);
696 	}
697 }
698 
699 /**
700  * amdgpu_vm_update_ptes - make sure that page tables are valid
701  *
702  * @adev: amdgpu_device pointer
703  * @src: address where to copy page table entries from
704  * @pages_addr: DMA addresses to use for mapping
705  * @vm: requested vm
706  * @start: start of GPU address range
707  * @end: end of GPU address range
708  * @dst: destination address to map to
709  * @flags: mapping flags
710  *
711  * Update the page tables in the range @start - @end.
712  */
713 static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
714 				  uint64_t src,
715 				  dma_addr_t *pages_addr,
716 				  struct amdgpu_vm *vm,
717 				  struct amdgpu_ib *ib,
718 				  uint64_t start, uint64_t end,
719 				  uint64_t dst, uint32_t flags)
720 {
721 	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
722 
723 	uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
724 	uint64_t addr;
725 
726 	/* walk over the address space and update the page tables */
727 	for (addr = start; addr < end; ) {
728 		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
729 		struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
730 		unsigned nptes;
731 		uint64_t pe_start;
732 
733 		if ((addr & ~mask) == (end & ~mask))
734 			nptes = end - addr;
735 		else
736 			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
737 
738 		pe_start = amdgpu_bo_gpu_offset(pt);
739 		pe_start += (addr & mask) * 8;
740 
741 		if (last_pe_end != pe_start) {
742 
743 			amdgpu_vm_frag_ptes(adev, src, pages_addr, ib,
744 					    last_pe_start, last_pe_end,
745 					    last_dst, flags);
746 
747 			last_pe_start = pe_start;
748 			last_pe_end = pe_start + 8 * nptes;
749 			last_dst = dst;
750 		} else {
751 			last_pe_end += 8 * nptes;
752 		}
753 
754 		addr += nptes;
755 		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
756 	}
757 
758 	amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start,
759 			    last_pe_end, last_dst, flags);
760 }
761 
762 /**
763  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
764  *
765  * @adev: amdgpu_device pointer
766  * @src: address where to copy page table entries from
767  * @pages_addr: DMA addresses to use for mapping
768  * @vm: requested vm
769  * @start: start of mapped range
770  * @last: last mapped entry
771  * @flags: flags for the entries
772  * @addr: addr to set the area to
773  * @fence: optional resulting fence
774  *
775  * Fill in the page table entries between @start and @last.
776  * Returns 0 for success, -EINVAL for failure.
777  */
778 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
779 				       uint64_t src,
780 				       dma_addr_t *pages_addr,
781 				       struct amdgpu_vm *vm,
782 				       uint64_t start, uint64_t last,
783 				       uint32_t flags, uint64_t addr,
784 				       struct fence **fence)
785 {
786 	struct amdgpu_ring *ring;
787 	void *owner = AMDGPU_FENCE_OWNER_VM;
788 	unsigned nptes, ncmds, ndw;
789 	struct amdgpu_job *job;
790 	struct amdgpu_ib *ib;
791 	struct fence *f = NULL;
792 	int r;
793 
794 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
795 
796 	/* sync to everything on unmapping */
797 	if (!(flags & AMDGPU_PTE_VALID))
798 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
799 
800 	nptes = last - start + 1;
801 
802 	/*
803 	 * reserve space for one command every (1 << BLOCK_SIZE)
804 	 *  entries or 2k dwords (whatever is smaller)
805 	 */
806 	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
807 
808 	/* padding, etc. */
809 	ndw = 64;
810 
811 	if (src) {
812 		/* only copy commands needed */
813 		ndw += ncmds * 7;
814 
815 	} else if (pages_addr) {
816 		/* header for write data commands */
817 		ndw += ncmds * 4;
818 
819 		/* body of write data command */
820 		ndw += nptes * 2;
821 
822 	} else {
823 		/* set page commands needed */
824 		ndw += ncmds * 10;
825 
826 		/* two extra commands for begin/end of fragment */
827 		ndw += 2 * 10;
828 	}
829 
830 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
831 	if (r)
832 		return r;
833 
834 	ib = &job->ibs[0];
835 
836 	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
837 			     owner);
838 	if (r)
839 		goto error_free;
840 
841 	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
842 	if (r)
843 		goto error_free;
844 
845 	amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start,
846 			      last + 1, addr, flags);
847 
848 	amdgpu_ring_pad_ib(ring, ib);
849 	WARN_ON(ib->length_dw > ndw);
850 	r = amdgpu_job_submit(job, ring, &vm->entity,
851 			      AMDGPU_FENCE_OWNER_VM, &f);
852 	if (r)
853 		goto error_free;
854 
855 	amdgpu_bo_fence(vm->page_directory, f, true);
856 	if (fence) {
857 		fence_put(*fence);
858 		*fence = fence_get(f);
859 	}
860 	fence_put(f);
861 	return 0;
862 
863 error_free:
864 	amdgpu_job_free(job);
865 	return r;
866 }
867 
868 /**
869  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
870  *
871  * @adev: amdgpu_device pointer
872  * @gtt_flags: flags as they are used for GTT
873  * @pages_addr: DMA addresses to use for mapping
874  * @vm: requested vm
875  * @mapping: mapped range and flags to use for the update
876  * @addr: addr to set the area to
877  * @flags: HW flags for the mapping
878  * @fence: optional resulting fence
879  *
880  * Split the mapping into smaller chunks so that each update fits
881  * into a SDMA IB.
882  * Returns 0 for success, -EINVAL for failure.
883  */
884 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
885 				      uint32_t gtt_flags,
886 				      dma_addr_t *pages_addr,
887 				      struct amdgpu_vm *vm,
888 				      struct amdgpu_bo_va_mapping *mapping,
889 				      uint32_t flags, uint64_t addr,
890 				      struct fence **fence)
891 {
892 	const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
893 
894 	uint64_t src = 0, start = mapping->it.start;
895 	int r;
896 
897 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
898 	 * but in case of something, we filter the flags in first place
899 	 */
900 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
901 		flags &= ~AMDGPU_PTE_READABLE;
902 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
903 		flags &= ~AMDGPU_PTE_WRITEABLE;
904 
905 	trace_amdgpu_vm_bo_update(mapping);
906 
907 	if (pages_addr) {
908 		if (flags == gtt_flags)
909 			src = adev->gart.table_addr + (addr >> 12) * 8;
910 		addr = 0;
911 	}
912 	addr += mapping->offset;
913 
914 	if (!pages_addr || src)
915 		return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
916 						   start, mapping->it.last,
917 						   flags, addr, fence);
918 
919 	while (start != mapping->it.last + 1) {
920 		uint64_t last;
921 
922 		last = min((uint64_t)mapping->it.last, start + max_size - 1);
923 		r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
924 						start, last, flags, addr,
925 						fence);
926 		if (r)
927 			return r;
928 
929 		start = last + 1;
930 		addr += max_size * AMDGPU_GPU_PAGE_SIZE;
931 	}
932 
933 	return 0;
934 }
935 
936 /**
937  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
938  *
939  * @adev: amdgpu_device pointer
940  * @bo_va: requested BO and VM object
941  * @mem: ttm mem
942  *
943  * Fill in the page table entries for @bo_va.
944  * Returns 0 for success, -EINVAL for failure.
945  *
946  * Object have to be reserved and mutex must be locked!
947  */
948 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
949 			struct amdgpu_bo_va *bo_va,
950 			struct ttm_mem_reg *mem)
951 {
952 	struct amdgpu_vm *vm = bo_va->vm;
953 	struct amdgpu_bo_va_mapping *mapping;
954 	dma_addr_t *pages_addr = NULL;
955 	uint32_t gtt_flags, flags;
956 	uint64_t addr;
957 	int r;
958 
959 	if (mem) {
960 		struct ttm_dma_tt *ttm;
961 
962 		addr = (u64)mem->start << PAGE_SHIFT;
963 		switch (mem->mem_type) {
964 		case TTM_PL_TT:
965 			ttm = container_of(bo_va->bo->tbo.ttm, struct
966 					   ttm_dma_tt, ttm);
967 			pages_addr = ttm->dma_address;
968 			break;
969 
970 		case TTM_PL_VRAM:
971 			addr += adev->vm_manager.vram_base_offset;
972 			break;
973 
974 		default:
975 			break;
976 		}
977 	} else {
978 		addr = 0;
979 	}
980 
981 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
982 	gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
983 
984 	spin_lock(&vm->status_lock);
985 	if (!list_empty(&bo_va->vm_status))
986 		list_splice_init(&bo_va->valids, &bo_va->invalids);
987 	spin_unlock(&vm->status_lock);
988 
989 	list_for_each_entry(mapping, &bo_va->invalids, list) {
990 		r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
991 					       mapping, flags, addr,
992 					       &bo_va->last_pt_update);
993 		if (r)
994 			return r;
995 	}
996 
997 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
998 		list_for_each_entry(mapping, &bo_va->valids, list)
999 			trace_amdgpu_vm_bo_mapping(mapping);
1000 
1001 		list_for_each_entry(mapping, &bo_va->invalids, list)
1002 			trace_amdgpu_vm_bo_mapping(mapping);
1003 	}
1004 
1005 	spin_lock(&vm->status_lock);
1006 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1007 	list_del_init(&bo_va->vm_status);
1008 	if (!mem)
1009 		list_add(&bo_va->vm_status, &vm->cleared);
1010 	spin_unlock(&vm->status_lock);
1011 
1012 	return 0;
1013 }
1014 
1015 /**
1016  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1017  *
1018  * @adev: amdgpu_device pointer
1019  * @vm: requested vm
1020  *
1021  * Make sure all freed BOs are cleared in the PT.
1022  * Returns 0 for success.
1023  *
1024  * PTs have to be reserved and mutex must be locked!
1025  */
1026 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1027 			  struct amdgpu_vm *vm)
1028 {
1029 	struct amdgpu_bo_va_mapping *mapping;
1030 	int r;
1031 
1032 	while (!list_empty(&vm->freed)) {
1033 		mapping = list_first_entry(&vm->freed,
1034 			struct amdgpu_bo_va_mapping, list);
1035 		list_del(&mapping->list);
1036 
1037 		r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
1038 					       0, 0, NULL);
1039 		kfree(mapping);
1040 		if (r)
1041 			return r;
1042 
1043 	}
1044 	return 0;
1045 
1046 }
1047 
1048 /**
1049  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1050  *
1051  * @adev: amdgpu_device pointer
1052  * @vm: requested vm
1053  *
1054  * Make sure all invalidated BOs are cleared in the PT.
1055  * Returns 0 for success.
1056  *
1057  * PTs have to be reserved and mutex must be locked!
1058  */
1059 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1060 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1061 {
1062 	struct amdgpu_bo_va *bo_va = NULL;
1063 	int r = 0;
1064 
1065 	spin_lock(&vm->status_lock);
1066 	while (!list_empty(&vm->invalidated)) {
1067 		bo_va = list_first_entry(&vm->invalidated,
1068 			struct amdgpu_bo_va, vm_status);
1069 		spin_unlock(&vm->status_lock);
1070 
1071 		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
1072 		if (r)
1073 			return r;
1074 
1075 		spin_lock(&vm->status_lock);
1076 	}
1077 	spin_unlock(&vm->status_lock);
1078 
1079 	if (bo_va)
1080 		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1081 
1082 	return r;
1083 }
1084 
1085 /**
1086  * amdgpu_vm_bo_add - add a bo to a specific vm
1087  *
1088  * @adev: amdgpu_device pointer
1089  * @vm: requested vm
1090  * @bo: amdgpu buffer object
1091  *
1092  * Add @bo into the requested vm.
1093  * Add @bo to the list of bos associated with the vm
1094  * Returns newly added bo_va or NULL for failure
1095  *
1096  * Object has to be reserved!
1097  */
1098 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1099 				      struct amdgpu_vm *vm,
1100 				      struct amdgpu_bo *bo)
1101 {
1102 	struct amdgpu_bo_va *bo_va;
1103 
1104 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1105 	if (bo_va == NULL) {
1106 		return NULL;
1107 	}
1108 	bo_va->vm = vm;
1109 	bo_va->bo = bo;
1110 	bo_va->ref_count = 1;
1111 	INIT_LIST_HEAD(&bo_va->bo_list);
1112 	INIT_LIST_HEAD(&bo_va->valids);
1113 	INIT_LIST_HEAD(&bo_va->invalids);
1114 	INIT_LIST_HEAD(&bo_va->vm_status);
1115 
1116 	list_add_tail(&bo_va->bo_list, &bo->va);
1117 
1118 	return bo_va;
1119 }
1120 
1121 /**
1122  * amdgpu_vm_bo_map - map bo inside a vm
1123  *
1124  * @adev: amdgpu_device pointer
1125  * @bo_va: bo_va to store the address
1126  * @saddr: where to map the BO
1127  * @offset: requested offset in the BO
1128  * @flags: attributes of pages (read/write/valid/etc.)
1129  *
1130  * Add a mapping of the BO at the specefied addr into the VM.
1131  * Returns 0 for success, error for failure.
1132  *
1133  * Object has to be reserved and unreserved outside!
1134  */
1135 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1136 		     struct amdgpu_bo_va *bo_va,
1137 		     uint64_t saddr, uint64_t offset,
1138 		     uint64_t size, uint32_t flags)
1139 {
1140 	struct amdgpu_bo_va_mapping *mapping;
1141 	struct amdgpu_vm *vm = bo_va->vm;
1142 	struct interval_tree_node *it;
1143 	unsigned last_pfn, pt_idx;
1144 	uint64_t eaddr;
1145 	int r;
1146 
1147 	/* validate the parameters */
1148 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1149 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1150 		return -EINVAL;
1151 
1152 	/* make sure object fit at this offset */
1153 	eaddr = saddr + size - 1;
1154 	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1155 		return -EINVAL;
1156 
1157 	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1158 	if (last_pfn >= adev->vm_manager.max_pfn) {
1159 		dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1160 			last_pfn, adev->vm_manager.max_pfn);
1161 		return -EINVAL;
1162 	}
1163 
1164 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1165 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1166 
1167 	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1168 	if (it) {
1169 		struct amdgpu_bo_va_mapping *tmp;
1170 		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1171 		/* bo and tmp overlap, invalid addr */
1172 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1173 			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1174 			tmp->it.start, tmp->it.last + 1);
1175 		r = -EINVAL;
1176 		goto error;
1177 	}
1178 
1179 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1180 	if (!mapping) {
1181 		r = -ENOMEM;
1182 		goto error;
1183 	}
1184 
1185 	INIT_LIST_HEAD(&mapping->list);
1186 	mapping->it.start = saddr;
1187 	mapping->it.last = eaddr;
1188 	mapping->offset = offset;
1189 	mapping->flags = flags;
1190 
1191 	list_add(&mapping->list, &bo_va->invalids);
1192 	interval_tree_insert(&mapping->it, &vm->va);
1193 
1194 	/* Make sure the page tables are allocated */
1195 	saddr >>= amdgpu_vm_block_size;
1196 	eaddr >>= amdgpu_vm_block_size;
1197 
1198 	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1199 
1200 	if (eaddr > vm->max_pde_used)
1201 		vm->max_pde_used = eaddr;
1202 
1203 	/* walk over the address space and allocate the page tables */
1204 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1205 		struct reservation_object *resv = vm->page_directory->tbo.resv;
1206 		struct amdgpu_bo_list_entry *entry;
1207 		struct amdgpu_bo *pt;
1208 
1209 		entry = &vm->page_tables[pt_idx].entry;
1210 		if (entry->robj)
1211 			continue;
1212 
1213 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1214 				     AMDGPU_GPU_PAGE_SIZE, true,
1215 				     AMDGPU_GEM_DOMAIN_VRAM,
1216 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1217 				     NULL, resv, &pt);
1218 		if (r)
1219 			goto error_free;
1220 
1221 		/* Keep a reference to the page table to avoid freeing
1222 		 * them up in the wrong order.
1223 		 */
1224 		pt->parent = amdgpu_bo_ref(vm->page_directory);
1225 
1226 		r = amdgpu_vm_clear_bo(adev, vm, pt);
1227 		if (r) {
1228 			amdgpu_bo_unref(&pt);
1229 			goto error_free;
1230 		}
1231 
1232 		entry->robj = pt;
1233 		entry->priority = 0;
1234 		entry->tv.bo = &entry->robj->tbo;
1235 		entry->tv.shared = true;
1236 		entry->user_pages = NULL;
1237 		vm->page_tables[pt_idx].addr = 0;
1238 	}
1239 
1240 	return 0;
1241 
1242 error_free:
1243 	list_del(&mapping->list);
1244 	interval_tree_remove(&mapping->it, &vm->va);
1245 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1246 	kfree(mapping);
1247 
1248 error:
1249 	return r;
1250 }
1251 
1252 /**
1253  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1254  *
1255  * @adev: amdgpu_device pointer
1256  * @bo_va: bo_va to remove the address from
1257  * @saddr: where to the BO is mapped
1258  *
1259  * Remove a mapping of the BO at the specefied addr from the VM.
1260  * Returns 0 for success, error for failure.
1261  *
1262  * Object has to be reserved and unreserved outside!
1263  */
1264 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1265 		       struct amdgpu_bo_va *bo_va,
1266 		       uint64_t saddr)
1267 {
1268 	struct amdgpu_bo_va_mapping *mapping;
1269 	struct amdgpu_vm *vm = bo_va->vm;
1270 	bool valid = true;
1271 
1272 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1273 
1274 	list_for_each_entry(mapping, &bo_va->valids, list) {
1275 		if (mapping->it.start == saddr)
1276 			break;
1277 	}
1278 
1279 	if (&mapping->list == &bo_va->valids) {
1280 		valid = false;
1281 
1282 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1283 			if (mapping->it.start == saddr)
1284 				break;
1285 		}
1286 
1287 		if (&mapping->list == &bo_va->invalids)
1288 			return -ENOENT;
1289 	}
1290 
1291 	list_del(&mapping->list);
1292 	interval_tree_remove(&mapping->it, &vm->va);
1293 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1294 
1295 	if (valid)
1296 		list_add(&mapping->list, &vm->freed);
1297 	else
1298 		kfree(mapping);
1299 
1300 	return 0;
1301 }
1302 
1303 /**
1304  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1305  *
1306  * @adev: amdgpu_device pointer
1307  * @bo_va: requested bo_va
1308  *
1309  * Remove @bo_va->bo from the requested vm.
1310  *
1311  * Object have to be reserved!
1312  */
1313 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1314 		      struct amdgpu_bo_va *bo_va)
1315 {
1316 	struct amdgpu_bo_va_mapping *mapping, *next;
1317 	struct amdgpu_vm *vm = bo_va->vm;
1318 
1319 	list_del(&bo_va->bo_list);
1320 
1321 	spin_lock(&vm->status_lock);
1322 	list_del(&bo_va->vm_status);
1323 	spin_unlock(&vm->status_lock);
1324 
1325 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1326 		list_del(&mapping->list);
1327 		interval_tree_remove(&mapping->it, &vm->va);
1328 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1329 		list_add(&mapping->list, &vm->freed);
1330 	}
1331 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1332 		list_del(&mapping->list);
1333 		interval_tree_remove(&mapping->it, &vm->va);
1334 		kfree(mapping);
1335 	}
1336 
1337 	fence_put(bo_va->last_pt_update);
1338 	kfree(bo_va);
1339 }
1340 
1341 /**
1342  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1343  *
1344  * @adev: amdgpu_device pointer
1345  * @vm: requested vm
1346  * @bo: amdgpu buffer object
1347  *
1348  * Mark @bo as invalid.
1349  */
1350 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1351 			     struct amdgpu_bo *bo)
1352 {
1353 	struct amdgpu_bo_va *bo_va;
1354 
1355 	list_for_each_entry(bo_va, &bo->va, bo_list) {
1356 		spin_lock(&bo_va->vm->status_lock);
1357 		if (list_empty(&bo_va->vm_status))
1358 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1359 		spin_unlock(&bo_va->vm->status_lock);
1360 	}
1361 }
1362 
1363 /**
1364  * amdgpu_vm_init - initialize a vm instance
1365  *
1366  * @adev: amdgpu_device pointer
1367  * @vm: requested vm
1368  *
1369  * Init @vm fields.
1370  */
1371 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1372 {
1373 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1374 		AMDGPU_VM_PTE_COUNT * 8);
1375 	unsigned pd_size, pd_entries;
1376 	unsigned ring_instance;
1377 	struct amdgpu_ring *ring;
1378 	struct amd_sched_rq *rq;
1379 	int i, r;
1380 
1381 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1382 		vm->ids[i] = NULL;
1383 	vm->va = RB_ROOT;
1384 	spin_lock_init(&vm->status_lock);
1385 	INIT_LIST_HEAD(&vm->invalidated);
1386 	INIT_LIST_HEAD(&vm->cleared);
1387 	INIT_LIST_HEAD(&vm->freed);
1388 
1389 	pd_size = amdgpu_vm_directory_size(adev);
1390 	pd_entries = amdgpu_vm_num_pdes(adev);
1391 
1392 	/* allocate page table array */
1393 	vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1394 	if (vm->page_tables == NULL) {
1395 		DRM_ERROR("Cannot allocate memory for page table array\n");
1396 		return -ENOMEM;
1397 	}
1398 
1399 	/* create scheduler entity for page table updates */
1400 
1401 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1402 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
1403 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
1404 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1405 	r = amd_sched_entity_init(&ring->sched, &vm->entity,
1406 				  rq, amdgpu_sched_jobs);
1407 	if (r)
1408 		return r;
1409 
1410 	vm->page_directory_fence = NULL;
1411 
1412 	r = amdgpu_bo_create(adev, pd_size, align, true,
1413 			     AMDGPU_GEM_DOMAIN_VRAM,
1414 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1415 			     NULL, NULL, &vm->page_directory);
1416 	if (r)
1417 		goto error_free_sched_entity;
1418 
1419 	r = amdgpu_bo_reserve(vm->page_directory, false);
1420 	if (r)
1421 		goto error_free_page_directory;
1422 
1423 	r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1424 	amdgpu_bo_unreserve(vm->page_directory);
1425 	if (r)
1426 		goto error_free_page_directory;
1427 
1428 	return 0;
1429 
1430 error_free_page_directory:
1431 	amdgpu_bo_unref(&vm->page_directory);
1432 	vm->page_directory = NULL;
1433 
1434 error_free_sched_entity:
1435 	amd_sched_entity_fini(&ring->sched, &vm->entity);
1436 
1437 	return r;
1438 }
1439 
1440 /**
1441  * amdgpu_vm_fini - tear down a vm instance
1442  *
1443  * @adev: amdgpu_device pointer
1444  * @vm: requested vm
1445  *
1446  * Tear down @vm.
1447  * Unbind the VM and remove all bos from the vm bo list
1448  */
1449 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1450 {
1451 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1452 	int i;
1453 
1454 	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
1455 
1456 	if (!RB_EMPTY_ROOT(&vm->va)) {
1457 		dev_err(adev->dev, "still active bo inside vm\n");
1458 	}
1459 	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1460 		list_del(&mapping->list);
1461 		interval_tree_remove(&mapping->it, &vm->va);
1462 		kfree(mapping);
1463 	}
1464 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1465 		list_del(&mapping->list);
1466 		kfree(mapping);
1467 	}
1468 
1469 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1470 		amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
1471 	drm_free_large(vm->page_tables);
1472 
1473 	amdgpu_bo_unref(&vm->page_directory);
1474 	fence_put(vm->page_directory_fence);
1475 
1476 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1477 		struct amdgpu_vm_id *id = vm->ids[i];
1478 
1479 		if (!id)
1480 			continue;
1481 
1482 		atomic_long_cmpxchg(&id->owner, (long)vm, 0);
1483 	}
1484 }
1485 
1486 /**
1487  * amdgpu_vm_manager_init - init the VM manager
1488  *
1489  * @adev: amdgpu_device pointer
1490  *
1491  * Initialize the VM manager structures
1492  */
1493 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1494 {
1495 	unsigned i;
1496 
1497 	INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1498 
1499 	/* skip over VMID 0, since it is the system VM */
1500 	for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1501 		amdgpu_vm_reset_id(adev, i);
1502 		amdgpu_sync_create(&adev->vm_manager.ids[i].active);
1503 		list_add_tail(&adev->vm_manager.ids[i].list,
1504 			      &adev->vm_manager.ids_lru);
1505 	}
1506 
1507 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1508 }
1509 
1510 /**
1511  * amdgpu_vm_manager_fini - cleanup VM manager
1512  *
1513  * @adev: amdgpu_device pointer
1514  *
1515  * Cleanup the VM manager and free resources.
1516  */
1517 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1518 {
1519 	unsigned i;
1520 
1521 	for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1522 		struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1523 
1524 		fence_put(adev->vm_manager.ids[i].first);
1525 		amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1526 		fence_put(id->flushed_updates);
1527 	}
1528 }
1529