xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ids.h"
24 
25 #include <linux/xarray.h>
26 #include <linux/dma-fence-array.h>
27 
28 
29 #include "amdgpu.h"
30 #include "amdgpu_trace.h"
31 
32 /*
33  * PASID manager
34  *
35  * PASIDs are global address space identifiers that can be shared
36  * between the GPU, an IOMMU and the driver. VMs on different devices
37  * may use the same PASID if they share the same address
38  * space. Therefore PASIDs are allocated using IDR cyclic allocator
39  * (similar to kernel PID allocation) which naturally delays reuse.
40  * VMs are looked up from the PASID per amdgpu_device.
41  */
42 
43 static DEFINE_XARRAY_FLAGS(amdgpu_pasid_xa, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC1);
44 static u32 amdgpu_pasid_xa_next;
45 
46 /* Helper to free pasid from a fence callback */
47 struct amdgpu_pasid_cb {
48 	struct dma_fence_cb cb;
49 	u32 pasid;
50 };
51 
52 /**
53  * amdgpu_pasid_alloc - Allocate a PASID
54  * @bits: Maximum width of the PASID in bits, must be at least 1
55  *
56  * Uses kernel's IDR cyclic allocator (same as PID allocation).
57  * Allocates sequentially with automatic wrap-around.
58  *
59  * Returns a positive integer on success. Returns %-EINVAL if bits==0.
60  * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
61  * memory allocation failure.
62  */
amdgpu_pasid_alloc(unsigned int bits)63 int amdgpu_pasid_alloc(unsigned int bits)
64 {
65 	u32 pasid;
66 	int r;
67 
68 	if (bits == 0)
69 		return -EINVAL;
70 
71 	r = xa_alloc_cyclic_irq(&amdgpu_pasid_xa, &pasid, xa_mk_value(0),
72 			    XA_LIMIT(1, (1U << bits) - 1),
73 			    &amdgpu_pasid_xa_next, GFP_KERNEL);
74 	if (r < 0)
75 		return r;
76 
77 	trace_amdgpu_pasid_allocated(pasid);
78 	return pasid;
79 }
80 
81 /**
82  * amdgpu_pasid_free - Free a PASID
83  * @pasid: PASID to free
84  *
85  * Called in IRQ context.
86  */
amdgpu_pasid_free(u32 pasid)87 void amdgpu_pasid_free(u32 pasid)
88 {
89 	unsigned long flags;
90 
91 	trace_amdgpu_pasid_freed(pasid);
92 
93 	xa_lock_irqsave(&amdgpu_pasid_xa, flags);
94 	__xa_erase(&amdgpu_pasid_xa, pasid);
95 	xa_unlock_irqrestore(&amdgpu_pasid_xa, flags);
96 }
97 
amdgpu_pasid_free_cb(struct dma_fence * fence,struct dma_fence_cb * _cb)98 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
99 				 struct dma_fence_cb *_cb)
100 {
101 	struct amdgpu_pasid_cb *cb =
102 		container_of(_cb, struct amdgpu_pasid_cb, cb);
103 
104 	amdgpu_pasid_free(cb->pasid);
105 	dma_fence_put(fence);
106 	kfree(cb);
107 }
108 
109 /**
110  * amdgpu_pasid_free_delayed - free pasid when fences signal
111  *
112  * @resv: reservation object with the fences to wait for
113  * @pasid: pasid to free
114  *
115  * Free the pasid only after all the fences in resv are signaled.
116  */
amdgpu_pasid_free_delayed(struct dma_resv * resv,u32 pasid)117 void amdgpu_pasid_free_delayed(struct dma_resv *resv,
118 			       u32 pasid)
119 {
120 	struct amdgpu_pasid_cb *cb;
121 	struct dma_fence *fence;
122 	int r;
123 
124 	r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
125 	if (r)
126 		goto fallback;
127 
128 	if (!fence) {
129 		amdgpu_pasid_free(pasid);
130 		return;
131 	}
132 
133 	cb = kmalloc_obj(*cb);
134 	if (!cb) {
135 		/* Last resort when we are OOM */
136 		dma_fence_wait(fence, false);
137 		dma_fence_put(fence);
138 		amdgpu_pasid_free(pasid);
139 	} else {
140 		cb->pasid = pasid;
141 		if (dma_fence_add_callback(fence, &cb->cb,
142 					   amdgpu_pasid_free_cb))
143 			amdgpu_pasid_free_cb(fence, &cb->cb);
144 	}
145 
146 	return;
147 
148 fallback:
149 	/* Not enough memory for the delayed delete, as last resort
150 	 * block for all the fences to complete.
151 	 */
152 	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
153 			      false, MAX_SCHEDULE_TIMEOUT);
154 	amdgpu_pasid_free(pasid);
155 }
156 
157 /*
158  * VMID manager
159  *
160  * VMIDs are a per VMHUB identifier for page tables handling.
161  */
162 
163 /**
164  * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
165  *
166  * @adev: amdgpu_device pointer
167  * @id: VMID structure
168  *
169  * Check if GPU reset occured since last use of the VMID.
170  */
amdgpu_vmid_had_gpu_reset(struct amdgpu_device * adev,struct amdgpu_vmid * id)171 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
172 			       struct amdgpu_vmid *id)
173 {
174 	return id->current_gpu_reset_count !=
175 		atomic_read(&adev->gpu_reset_counter);
176 }
177 
178 /* Check if we need to switch to another set of resources */
amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid * id,struct amdgpu_job * job)179 static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
180 					  struct amdgpu_job *job)
181 {
182 	return id->gds_base != job->gds_base ||
183 		id->gds_size != job->gds_size ||
184 		id->gws_base != job->gws_base ||
185 		id->gws_size != job->gws_size ||
186 		id->oa_base != job->oa_base ||
187 		id->oa_size != job->oa_size;
188 }
189 
190 /* Check if the id is compatible with the job */
amdgpu_vmid_compatible(struct amdgpu_vmid * id,struct amdgpu_job * job)191 static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
192 				   struct amdgpu_job *job)
193 {
194 	return  id->pd_gpu_addr == job->vm_pd_addr &&
195 		!amdgpu_vmid_gds_switch_needed(id, job);
196 }
197 
198 /**
199  * amdgpu_vmid_grab_idle - grab idle VMID
200  *
201  * @ring: ring we want to submit job to
202  * @idle: resulting idle VMID
203  * @fence: fence to wait for if no id could be grabbed
204  *
205  * Try to find an idle VMID, if none is idle add a fence to wait to the sync
206  * object. Returns -ENOMEM when we are out of memory.
207  */
amdgpu_vmid_grab_idle(struct amdgpu_ring * ring,struct amdgpu_vmid ** idle,struct dma_fence ** fence)208 static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
209 				 struct amdgpu_vmid **idle,
210 				 struct dma_fence **fence)
211 {
212 	struct amdgpu_device *adev = ring->adev;
213 	unsigned vmhub = ring->vm_hub;
214 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
215 
216 	/* If anybody is waiting for a VMID let everybody wait for fairness */
217 	if (!dma_fence_is_signaled(ring->vmid_wait)) {
218 		*fence = dma_fence_get(ring->vmid_wait);
219 		return 0;
220 	}
221 
222 	/* Check if we have an idle VMID */
223 	list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) {
224 		/* Don't use per engine and per process VMID at the same time */
225 		struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
226 			NULL : ring;
227 
228 		*fence = amdgpu_sync_peek_fence(&(*idle)->active, r);
229 		if (!(*fence))
230 			return 0;
231 	}
232 
233 	/*
234 	 * If we can't find a idle VMID to use, wait on a fence from the least
235 	 * recently used in the hope that it will be available soon.
236 	 */
237 	*idle = NULL;
238 	dma_fence_put(ring->vmid_wait);
239 	ring->vmid_wait = dma_fence_get(*fence);
240 
241 	/* This is the reference we return */
242 	dma_fence_get(*fence);
243 	return 0;
244 }
245 
246 /**
247  * amdgpu_vmid_grab_reserved - try to assign reserved VMID
248  *
249  * @vm: vm to allocate id for
250  * @ring: ring we want to submit job to
251  * @job: job who wants to use the VMID
252  * @id: resulting VMID
253  * @fence: fence to wait for if no id could be grabbed
254  *
255  * Try to assign a reserved VMID.
256  */
amdgpu_vmid_grab_reserved(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_vmid ** id,struct dma_fence ** fence)257 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
258 				     struct amdgpu_ring *ring,
259 				     struct amdgpu_job *job,
260 				     struct amdgpu_vmid **id,
261 				     struct dma_fence **fence)
262 {
263 	struct amdgpu_device *adev = ring->adev;
264 	unsigned vmhub = ring->vm_hub;
265 	uint64_t fence_context = adev->fence_context + ring->idx;
266 	bool needs_flush = vm->use_cpu_for_update;
267 	uint64_t updates = amdgpu_vm_tlb_seq(vm);
268 	int r;
269 
270 	*id = vm->reserved_vmid[vmhub];
271 	if ((*id)->owner != vm->immediate.fence_context ||
272 	    !amdgpu_vmid_compatible(*id, job) ||
273 	    (*id)->flushed_updates < updates ||
274 	    !(*id)->last_flush ||
275 	    ((*id)->last_flush->context != fence_context &&
276 	     !dma_fence_is_signaled((*id)->last_flush)))
277 		needs_flush = true;
278 
279 	if ((*id)->owner != vm->immediate.fence_context ||
280 	    (!adev->vm_manager.concurrent_flush && needs_flush)) {
281 		struct dma_fence *tmp;
282 
283 		/* Don't use per engine and per process VMID at the
284 		 * same time
285 		 */
286 		if (adev->vm_manager.concurrent_flush)
287 			ring = NULL;
288 
289 		/* to prevent one context starved by another context */
290 		(*id)->pd_gpu_addr = 0;
291 		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
292 		if (tmp) {
293 			*id = NULL;
294 			*fence = dma_fence_get(tmp);
295 			return 0;
296 		}
297 	}
298 
299 	/* Good we can use this VMID. Remember this submission as
300 	* user of the VMID.
301 	*/
302 	r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
303 			      GFP_ATOMIC);
304 	if (r)
305 		return r;
306 
307 	job->vm_needs_flush = needs_flush;
308 	job->spm_update_needed = true;
309 	return 0;
310 }
311 
312 /**
313  * amdgpu_vmid_grab_used - try to reuse a VMID
314  *
315  * @vm: vm to allocate id for
316  * @ring: ring we want to submit job to
317  * @job: job who wants to use the VMID
318  * @id: resulting VMID
319  *
320  * Try to reuse a VMID for this submission.
321  */
amdgpu_vmid_grab_used(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_vmid ** id)322 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
323 				 struct amdgpu_ring *ring,
324 				 struct amdgpu_job *job,
325 				 struct amdgpu_vmid **id)
326 {
327 	struct amdgpu_device *adev = ring->adev;
328 	unsigned vmhub = ring->vm_hub;
329 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
330 	uint64_t fence_context = adev->fence_context + ring->idx;
331 	uint64_t updates = amdgpu_vm_tlb_seq(vm);
332 	int r;
333 
334 	job->vm_needs_flush = vm->use_cpu_for_update;
335 
336 	/* Check if we can use a VMID already assigned to this VM */
337 	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
338 		bool needs_flush = vm->use_cpu_for_update;
339 
340 		/* Check all the prerequisites to using this VMID */
341 		if ((*id)->owner != vm->immediate.fence_context)
342 			continue;
343 
344 		if (!amdgpu_vmid_compatible(*id, job))
345 			continue;
346 
347 		if (!(*id)->last_flush ||
348 		    ((*id)->last_flush->context != fence_context &&
349 		     !dma_fence_is_signaled((*id)->last_flush)))
350 			needs_flush = true;
351 
352 		if ((*id)->flushed_updates < updates)
353 			needs_flush = true;
354 
355 		if (needs_flush && !adev->vm_manager.concurrent_flush)
356 			continue;
357 
358 		/* Good, we can use this VMID. Remember this submission as
359 		 * user of the VMID.
360 		 */
361 		r = amdgpu_sync_fence(&(*id)->active,
362 				      &job->base.s_fence->finished,
363 				      GFP_ATOMIC);
364 		if (r)
365 			return r;
366 
367 		job->vm_needs_flush |= needs_flush;
368 		return 0;
369 	}
370 
371 	*id = NULL;
372 	return 0;
373 }
374 
375 /**
376  * amdgpu_vmid_grab - allocate the next free VMID
377  *
378  * @vm: vm to allocate id for
379  * @ring: ring we want to submit job to
380  * @job: job who wants to use the VMID
381  * @fence: fence to wait for if no id could be grabbed
382  *
383  * Allocate an id for the vm, adding fences to the sync obj as necessary.
384  */
amdgpu_vmid_grab(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_job * job,struct dma_fence ** fence)385 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
386 		     struct amdgpu_job *job, struct dma_fence **fence)
387 {
388 	struct amdgpu_device *adev = ring->adev;
389 	unsigned vmhub = ring->vm_hub;
390 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
391 	struct amdgpu_vmid *idle = NULL;
392 	struct amdgpu_vmid *id = NULL;
393 	int r = 0;
394 
395 	mutex_lock(&id_mgr->lock);
396 	r = amdgpu_vmid_grab_idle(ring, &idle, fence);
397 	if (r || !idle)
398 		goto error;
399 
400 	if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
401 		r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
402 		if (r || !id)
403 			goto error;
404 	} else {
405 		r = amdgpu_vmid_grab_used(vm, ring, job, &id);
406 		if (r)
407 			goto error;
408 
409 		if (!id) {
410 			/* Still no ID to use? Then use the idle one found earlier */
411 			id = idle;
412 
413 			/* Remember this submission as user of the VMID */
414 			r = amdgpu_sync_fence(&id->active,
415 					      &job->base.s_fence->finished,
416 					      GFP_ATOMIC);
417 			if (r)
418 				goto error;
419 
420 			job->vm_needs_flush = true;
421 		}
422 
423 		list_move_tail(&id->list, &id_mgr->ids_lru);
424 	}
425 
426 	job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
427 	if (job->vm_needs_flush) {
428 		id->flushed_updates = amdgpu_vm_tlb_seq(vm);
429 		dma_fence_put(id->last_flush);
430 		id->last_flush = NULL;
431 	}
432 	job->vmid = id - id_mgr->ids;
433 	job->pasid = vm->pasid;
434 
435 	id->gds_base = job->gds_base;
436 	id->gds_size = job->gds_size;
437 	id->gws_base = job->gws_base;
438 	id->gws_size = job->gws_size;
439 	id->oa_base = job->oa_base;
440 	id->oa_size = job->oa_size;
441 	id->pd_gpu_addr = job->vm_pd_addr;
442 	id->owner = vm->immediate.fence_context;
443 
444 	trace_amdgpu_vm_grab_id(vm, ring, job);
445 
446 error:
447 	mutex_unlock(&id_mgr->lock);
448 	return r;
449 }
450 
451 /*
452  * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
453  * @vm: the VM to check
454  * @vmhub: the VMHUB which will be used
455  *
456  * Returns: True if the VM will use a reserved VMID.
457  */
amdgpu_vmid_uses_reserved(struct amdgpu_vm * vm,unsigned int vmhub)458 bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
459 {
460 	return vm->reserved_vmid[vmhub];
461 }
462 
463 /*
464  * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm
465  * @adev: amdgpu device structure
466  * @vm: the VM to reserve an ID for
467  * @vmhub: the VMHUB which should be used
468  *
469  * Mostly used to have a reserved VMID for debugging and SPM.
470  *
471  * Returns: 0 for success, -ENOENT if an ID is already reserved.
472  */
amdgpu_vmid_alloc_reserved(struct amdgpu_device * adev,struct amdgpu_vm * vm,unsigned vmhub)473 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
474 			       unsigned vmhub)
475 {
476 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
477 	struct amdgpu_vmid *id;
478 	int r = 0;
479 
480 	mutex_lock(&id_mgr->lock);
481 	if (vm->reserved_vmid[vmhub])
482 		goto unlock;
483 	if (id_mgr->reserved_vmid) {
484 		r = -ENOENT;
485 		goto unlock;
486 	}
487 	/* Remove from normal round robin handling */
488 	id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
489 	list_del_init(&id->list);
490 	vm->reserved_vmid[vmhub] = id;
491 	id_mgr->reserved_vmid = true;
492 	mutex_unlock(&id_mgr->lock);
493 
494 	return 0;
495 unlock:
496 	mutex_unlock(&id_mgr->lock);
497 	return r;
498 }
499 
500 /*
501  * amdgpu_vmid_free_reserved - free up a reserved VMID again
502  * @adev: amdgpu device structure
503  * @vm: the VM with the reserved ID
504  * @vmhub: the VMHUB which should be used
505  */
amdgpu_vmid_free_reserved(struct amdgpu_device * adev,struct amdgpu_vm * vm,unsigned vmhub)506 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
507 			       unsigned vmhub)
508 {
509 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
510 
511 	mutex_lock(&id_mgr->lock);
512 	if (vm->reserved_vmid[vmhub]) {
513 		list_add(&vm->reserved_vmid[vmhub]->list,
514 			&id_mgr->ids_lru);
515 		vm->reserved_vmid[vmhub] = NULL;
516 		id_mgr->reserved_vmid = false;
517 	}
518 	mutex_unlock(&id_mgr->lock);
519 }
520 
521 /**
522  * amdgpu_vmid_reset - reset VMID to zero
523  *
524  * @adev: amdgpu device structure
525  * @vmhub: vmhub type
526  * @vmid: vmid number to use
527  *
528  * Reset saved GDW, GWS and OA to force switch on next flush.
529  */
amdgpu_vmid_reset(struct amdgpu_device * adev,unsigned vmhub,unsigned vmid)530 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
531 		       unsigned vmid)
532 {
533 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
534 	struct amdgpu_vmid *id = &id_mgr->ids[vmid];
535 
536 	mutex_lock(&id_mgr->lock);
537 	id->owner = 0;
538 	id->gds_base = 0;
539 	id->gds_size = 0;
540 	id->gws_base = 0;
541 	id->gws_size = 0;
542 	id->oa_base = 0;
543 	id->oa_size = 0;
544 	mutex_unlock(&id_mgr->lock);
545 }
546 
547 /**
548  * amdgpu_vmid_reset_all - reset VMID to zero
549  *
550  * @adev: amdgpu device structure
551  *
552  * Reset VMID to force flush on next use
553  */
amdgpu_vmid_reset_all(struct amdgpu_device * adev)554 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
555 {
556 	unsigned i, j;
557 
558 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
559 		struct amdgpu_vmid_mgr *id_mgr =
560 			&adev->vm_manager.id_mgr[i];
561 
562 		for (j = 1; j < id_mgr->num_ids; ++j)
563 			amdgpu_vmid_reset(adev, i, j);
564 	}
565 }
566 
567 /**
568  * amdgpu_vmid_mgr_init - init the VMID manager
569  *
570  * @adev: amdgpu_device pointer
571  *
572  * Initialize the VM manager structures
573  */
amdgpu_vmid_mgr_init(struct amdgpu_device * adev)574 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
575 {
576 	unsigned i, j;
577 
578 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
579 		struct amdgpu_vmid_mgr *id_mgr =
580 			&adev->vm_manager.id_mgr[i];
581 
582 		mutex_init(&id_mgr->lock);
583 		INIT_LIST_HEAD(&id_mgr->ids_lru);
584 
585 		/* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
586 		if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
587 			/* manage only VMIDs not used by KFD */
588 			id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
589 		else if (AMDGPU_IS_MMHUB0(i) ||
590 			 AMDGPU_IS_MMHUB1(i))
591 			id_mgr->num_ids = 16;
592 		else
593 			/* manage only VMIDs not used by KFD */
594 			id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
595 
596 		/* skip over VMID 0, since it is the system VM */
597 		for (j = 1; j < id_mgr->num_ids; ++j) {
598 			amdgpu_vmid_reset(adev, i, j);
599 			amdgpu_sync_create(&id_mgr->ids[j].active);
600 			list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
601 		}
602 	}
603 }
604 
605 /**
606  * amdgpu_vmid_mgr_fini - cleanup VM manager
607  *
608  * @adev: amdgpu_device pointer
609  *
610  * Cleanup the VM manager and free resources.
611  */
amdgpu_vmid_mgr_fini(struct amdgpu_device * adev)612 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
613 {
614 	unsigned i, j;
615 
616 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
617 		struct amdgpu_vmid_mgr *id_mgr =
618 			&adev->vm_manager.id_mgr[i];
619 
620 		mutex_destroy(&id_mgr->lock);
621 		for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
622 			struct amdgpu_vmid *id = &id_mgr->ids[j];
623 
624 			amdgpu_sync_free(&id->active);
625 			dma_fence_put(id->last_flush);
626 			dma_fence_put(id->pasid_mapping);
627 		}
628 	}
629 }
630 
631 /**
632  * amdgpu_pasid_mgr_cleanup - cleanup PASID manager
633  *
634  * Cleanup the IDR allocator.
635  */
amdgpu_pasid_mgr_cleanup(void)636 void amdgpu_pasid_mgr_cleanup(void)
637 {
638 	xa_destroy(&amdgpu_pasid_xa);
639 }
640