xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ids.h"
24 
25 #include <linux/idr.h>
26 #include <linux/dma-fence-array.h>
27 
28 
29 #include "amdgpu.h"
30 #include "amdgpu_trace.h"
31 
32 /*
33  * PASID manager
34  *
35  * PASIDs are global address space identifiers that can be shared
36  * between the GPU, an IOMMU and the driver. VMs on different devices
37  * may use the same PASID if they share the same address
38  * space. Therefore PASIDs are allocated using a global IDA. VMs are
39  * looked up from the PASID per amdgpu_device.
40  */
41 static DEFINE_IDA(amdgpu_pasid_ida);
42 
43 /* Helper to free pasid from a fence callback */
44 struct amdgpu_pasid_cb {
45 	struct dma_fence_cb cb;
46 	u32 pasid;
47 };
48 
49 /**
50  * amdgpu_pasid_alloc - Allocate a PASID
51  * @bits: Maximum width of the PASID in bits, must be at least 1
52  *
53  * Allocates a PASID of the given width while keeping smaller PASIDs
54  * available if possible.
55  *
56  * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57  * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
58  * memory allocation failure.
59  */
amdgpu_pasid_alloc(unsigned int bits)60 int amdgpu_pasid_alloc(unsigned int bits)
61 {
62 	int pasid = -EINVAL;
63 
64 	for (bits = min(bits, 31U); bits > 0; bits--) {
65 		pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1),
66 					(1U << bits) - 1, GFP_KERNEL);
67 		if (pasid != -ENOSPC)
68 			break;
69 	}
70 
71 	if (pasid >= 0)
72 		trace_amdgpu_pasid_allocated(pasid);
73 
74 	return pasid;
75 }
76 
77 /**
78  * amdgpu_pasid_free - Free a PASID
79  * @pasid: PASID to free
80  */
amdgpu_pasid_free(u32 pasid)81 void amdgpu_pasid_free(u32 pasid)
82 {
83 	trace_amdgpu_pasid_freed(pasid);
84 	ida_free(&amdgpu_pasid_ida, pasid);
85 }
86 
amdgpu_pasid_free_cb(struct dma_fence * fence,struct dma_fence_cb * _cb)87 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
88 				 struct dma_fence_cb *_cb)
89 {
90 	struct amdgpu_pasid_cb *cb =
91 		container_of(_cb, struct amdgpu_pasid_cb, cb);
92 
93 	amdgpu_pasid_free(cb->pasid);
94 	dma_fence_put(fence);
95 	kfree(cb);
96 }
97 
98 /**
99  * amdgpu_pasid_free_delayed - free pasid when fences signal
100  *
101  * @resv: reservation object with the fences to wait for
102  * @pasid: pasid to free
103  *
104  * Free the pasid only after all the fences in resv are signaled.
105  */
amdgpu_pasid_free_delayed(struct dma_resv * resv,u32 pasid)106 void amdgpu_pasid_free_delayed(struct dma_resv *resv,
107 			       u32 pasid)
108 {
109 	struct amdgpu_pasid_cb *cb;
110 	struct dma_fence *fence;
111 	int r;
112 
113 	r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
114 	if (r)
115 		goto fallback;
116 
117 	if (!fence) {
118 		amdgpu_pasid_free(pasid);
119 		return;
120 	}
121 
122 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
123 	if (!cb) {
124 		/* Last resort when we are OOM */
125 		dma_fence_wait(fence, false);
126 		dma_fence_put(fence);
127 		amdgpu_pasid_free(pasid);
128 	} else {
129 		cb->pasid = pasid;
130 		if (dma_fence_add_callback(fence, &cb->cb,
131 					   amdgpu_pasid_free_cb))
132 			amdgpu_pasid_free_cb(fence, &cb->cb);
133 	}
134 
135 	return;
136 
137 fallback:
138 	/* Not enough memory for the delayed delete, as last resort
139 	 * block for all the fences to complete.
140 	 */
141 	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
142 			      false, MAX_SCHEDULE_TIMEOUT);
143 	amdgpu_pasid_free(pasid);
144 }
145 
146 /*
147  * VMID manager
148  *
149  * VMIDs are a per VMHUB identifier for page tables handling.
150  */
151 
152 /**
153  * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
154  *
155  * @adev: amdgpu_device pointer
156  * @id: VMID structure
157  *
158  * Check if GPU reset occured since last use of the VMID.
159  */
amdgpu_vmid_had_gpu_reset(struct amdgpu_device * adev,struct amdgpu_vmid * id)160 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
161 			       struct amdgpu_vmid *id)
162 {
163 	return id->current_gpu_reset_count !=
164 		atomic_read(&adev->gpu_reset_counter);
165 }
166 
167 /* Check if we need to switch to another set of resources */
amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid * id,struct amdgpu_job * job)168 static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
169 					  struct amdgpu_job *job)
170 {
171 	return id->gds_base != job->gds_base ||
172 		id->gds_size != job->gds_size ||
173 		id->gws_base != job->gws_base ||
174 		id->gws_size != job->gws_size ||
175 		id->oa_base != job->oa_base ||
176 		id->oa_size != job->oa_size;
177 }
178 
179 /* Check if the id is compatible with the job */
amdgpu_vmid_compatible(struct amdgpu_vmid * id,struct amdgpu_job * job)180 static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
181 				   struct amdgpu_job *job)
182 {
183 	return  id->pd_gpu_addr == job->vm_pd_addr &&
184 		!amdgpu_vmid_gds_switch_needed(id, job);
185 }
186 
187 /**
188  * amdgpu_vmid_grab_idle - grab idle VMID
189  *
190  * @ring: ring we want to submit job to
191  * @idle: resulting idle VMID
192  * @fence: fence to wait for if no id could be grabbed
193  *
194  * Try to find an idle VMID, if none is idle add a fence to wait to the sync
195  * object. Returns -ENOMEM when we are out of memory.
196  */
amdgpu_vmid_grab_idle(struct amdgpu_ring * ring,struct amdgpu_vmid ** idle,struct dma_fence ** fence)197 static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
198 				 struct amdgpu_vmid **idle,
199 				 struct dma_fence **fence)
200 {
201 	struct amdgpu_device *adev = ring->adev;
202 	unsigned vmhub = ring->vm_hub;
203 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
204 	struct dma_fence **fences;
205 	unsigned i;
206 
207 	if (!dma_fence_is_signaled(ring->vmid_wait)) {
208 		*fence = dma_fence_get(ring->vmid_wait);
209 		return 0;
210 	}
211 
212 	fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
213 	if (!fences)
214 		return -ENOMEM;
215 
216 	/* Check if we have an idle VMID */
217 	i = 0;
218 	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
219 		/* Don't use per engine and per process VMID at the same time */
220 		struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
221 			NULL : ring;
222 
223 		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
224 		if (!fences[i])
225 			break;
226 		++i;
227 	}
228 
229 	/* If we can't find a idle VMID to use, wait till one becomes available */
230 	if (&(*idle)->list == &id_mgr->ids_lru) {
231 		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
232 		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
233 		struct dma_fence_array *array;
234 		unsigned j;
235 
236 		*idle = NULL;
237 		for (j = 0; j < i; ++j)
238 			dma_fence_get(fences[j]);
239 
240 		array = dma_fence_array_create(i, fences, fence_context,
241 					       seqno, true);
242 		if (!array) {
243 			for (j = 0; j < i; ++j)
244 				dma_fence_put(fences[j]);
245 			kfree(fences);
246 			return -ENOMEM;
247 		}
248 
249 		*fence = dma_fence_get(&array->base);
250 		dma_fence_put(ring->vmid_wait);
251 		ring->vmid_wait = &array->base;
252 		return 0;
253 	}
254 	kfree(fences);
255 
256 	return 0;
257 }
258 
259 /**
260  * amdgpu_vmid_grab_reserved - try to assign reserved VMID
261  *
262  * @vm: vm to allocate id for
263  * @ring: ring we want to submit job to
264  * @job: job who wants to use the VMID
265  * @id: resulting VMID
266  * @fence: fence to wait for if no id could be grabbed
267  *
268  * Try to assign a reserved VMID.
269  */
amdgpu_vmid_grab_reserved(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_vmid ** id,struct dma_fence ** fence)270 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
271 				     struct amdgpu_ring *ring,
272 				     struct amdgpu_job *job,
273 				     struct amdgpu_vmid **id,
274 				     struct dma_fence **fence)
275 {
276 	struct amdgpu_device *adev = ring->adev;
277 	unsigned vmhub = ring->vm_hub;
278 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
279 	uint64_t fence_context = adev->fence_context + ring->idx;
280 	bool needs_flush = vm->use_cpu_for_update;
281 	uint64_t updates = amdgpu_vm_tlb_seq(vm);
282 	int r;
283 
284 	*id = id_mgr->reserved;
285 	if ((*id)->owner != vm->immediate.fence_context ||
286 	    !amdgpu_vmid_compatible(*id, job) ||
287 	    (*id)->flushed_updates < updates ||
288 	    !(*id)->last_flush ||
289 	    ((*id)->last_flush->context != fence_context &&
290 	     !dma_fence_is_signaled((*id)->last_flush))) {
291 		struct dma_fence *tmp;
292 
293 		/* Wait for the gang to be assembled before using a
294 		 * reserved VMID or otherwise the gang could deadlock.
295 		 */
296 		tmp = amdgpu_device_get_gang(adev);
297 		if (!dma_fence_is_signaled(tmp) && tmp != job->gang_submit) {
298 			*id = NULL;
299 			*fence = tmp;
300 			return 0;
301 		}
302 		dma_fence_put(tmp);
303 
304 		/* Make sure the id is owned by the gang before proceeding */
305 		if (!job->gang_submit ||
306 		    (*id)->owner != vm->immediate.fence_context) {
307 
308 			/* Don't use per engine and per process VMID at the
309 			 * same time
310 			 */
311 			if (adev->vm_manager.concurrent_flush)
312 				ring = NULL;
313 
314 			/* to prevent one context starved by another context */
315 			(*id)->pd_gpu_addr = 0;
316 			tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
317 			if (tmp) {
318 				*id = NULL;
319 				*fence = dma_fence_get(tmp);
320 				return 0;
321 			}
322 		}
323 		needs_flush = true;
324 	}
325 
326 	/* Good we can use this VMID. Remember this submission as
327 	* user of the VMID.
328 	*/
329 	r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
330 	if (r)
331 		return r;
332 
333 	job->vm_needs_flush = needs_flush;
334 	job->spm_update_needed = true;
335 	return 0;
336 }
337 
338 /**
339  * amdgpu_vmid_grab_used - try to reuse a VMID
340  *
341  * @vm: vm to allocate id for
342  * @ring: ring we want to submit job to
343  * @job: job who wants to use the VMID
344  * @id: resulting VMID
345  *
346  * Try to reuse a VMID for this submission.
347  */
amdgpu_vmid_grab_used(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_vmid ** id)348 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
349 				 struct amdgpu_ring *ring,
350 				 struct amdgpu_job *job,
351 				 struct amdgpu_vmid **id)
352 {
353 	struct amdgpu_device *adev = ring->adev;
354 	unsigned vmhub = ring->vm_hub;
355 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
356 	uint64_t fence_context = adev->fence_context + ring->idx;
357 	uint64_t updates = amdgpu_vm_tlb_seq(vm);
358 	int r;
359 
360 	job->vm_needs_flush = vm->use_cpu_for_update;
361 
362 	/* Check if we can use a VMID already assigned to this VM */
363 	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
364 		bool needs_flush = vm->use_cpu_for_update;
365 
366 		/* Check all the prerequisites to using this VMID */
367 		if ((*id)->owner != vm->immediate.fence_context)
368 			continue;
369 
370 		if (!amdgpu_vmid_compatible(*id, job))
371 			continue;
372 
373 		if (!(*id)->last_flush ||
374 		    ((*id)->last_flush->context != fence_context &&
375 		     !dma_fence_is_signaled((*id)->last_flush)))
376 			needs_flush = true;
377 
378 		if ((*id)->flushed_updates < updates)
379 			needs_flush = true;
380 
381 		if (needs_flush && !adev->vm_manager.concurrent_flush)
382 			continue;
383 
384 		/* Good, we can use this VMID. Remember this submission as
385 		 * user of the VMID.
386 		 */
387 		r = amdgpu_sync_fence(&(*id)->active,
388 				      &job->base.s_fence->finished);
389 		if (r)
390 			return r;
391 
392 		job->vm_needs_flush |= needs_flush;
393 		return 0;
394 	}
395 
396 	*id = NULL;
397 	return 0;
398 }
399 
400 /**
401  * amdgpu_vmid_grab - allocate the next free VMID
402  *
403  * @vm: vm to allocate id for
404  * @ring: ring we want to submit job to
405  * @job: job who wants to use the VMID
406  * @fence: fence to wait for if no id could be grabbed
407  *
408  * Allocate an id for the vm, adding fences to the sync obj as necessary.
409  */
amdgpu_vmid_grab(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_job * job,struct dma_fence ** fence)410 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
411 		     struct amdgpu_job *job, struct dma_fence **fence)
412 {
413 	struct amdgpu_device *adev = ring->adev;
414 	unsigned vmhub = ring->vm_hub;
415 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
416 	struct amdgpu_vmid *idle = NULL;
417 	struct amdgpu_vmid *id = NULL;
418 	int r = 0;
419 
420 	mutex_lock(&id_mgr->lock);
421 	r = amdgpu_vmid_grab_idle(ring, &idle, fence);
422 	if (r || !idle)
423 		goto error;
424 
425 	if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) {
426 		r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
427 		if (r || !id)
428 			goto error;
429 	} else {
430 		r = amdgpu_vmid_grab_used(vm, ring, job, &id);
431 		if (r)
432 			goto error;
433 
434 		if (!id) {
435 			/* Still no ID to use? Then use the idle one found earlier */
436 			id = idle;
437 
438 			/* Remember this submission as user of the VMID */
439 			r = amdgpu_sync_fence(&id->active,
440 					      &job->base.s_fence->finished);
441 			if (r)
442 				goto error;
443 
444 			job->vm_needs_flush = true;
445 		}
446 
447 		list_move_tail(&id->list, &id_mgr->ids_lru);
448 	}
449 
450 	job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
451 	if (job->vm_needs_flush) {
452 		id->flushed_updates = amdgpu_vm_tlb_seq(vm);
453 		dma_fence_put(id->last_flush);
454 		id->last_flush = NULL;
455 	}
456 	job->vmid = id - id_mgr->ids;
457 	job->pasid = vm->pasid;
458 
459 	id->gds_base = job->gds_base;
460 	id->gds_size = job->gds_size;
461 	id->gws_base = job->gws_base;
462 	id->gws_size = job->gws_size;
463 	id->oa_base = job->oa_base;
464 	id->oa_size = job->oa_size;
465 	id->pd_gpu_addr = job->vm_pd_addr;
466 	id->owner = vm->immediate.fence_context;
467 
468 	trace_amdgpu_vm_grab_id(vm, ring, job);
469 
470 error:
471 	mutex_unlock(&id_mgr->lock);
472 	return r;
473 }
474 
475 /*
476  * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
477  * @adev: amdgpu_device pointer
478  * @vm: the VM to check
479  * @vmhub: the VMHUB which will be used
480  *
481  * Returns: True if the VM will use a reserved VMID.
482  */
amdgpu_vmid_uses_reserved(struct amdgpu_device * adev,struct amdgpu_vm * vm,unsigned int vmhub)483 bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
484 			       struct amdgpu_vm *vm, unsigned int vmhub)
485 {
486 	return vm->reserved_vmid[vmhub] ||
487 		(adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ?
488 					 vm->root.bo->xcp_id : 0] &&
489 		 AMDGPU_IS_GFXHUB(vmhub));
490 }
491 
amdgpu_vmid_alloc_reserved(struct amdgpu_device * adev,unsigned vmhub)492 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
493 			       unsigned vmhub)
494 {
495 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
496 
497 	mutex_lock(&id_mgr->lock);
498 
499 	++id_mgr->reserved_use_count;
500 	if (!id_mgr->reserved) {
501 		struct amdgpu_vmid *id;
502 
503 		id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
504 				      list);
505 		/* Remove from normal round robin handling */
506 		list_del_init(&id->list);
507 		id_mgr->reserved = id;
508 	}
509 
510 	mutex_unlock(&id_mgr->lock);
511 	return 0;
512 }
513 
amdgpu_vmid_free_reserved(struct amdgpu_device * adev,unsigned vmhub)514 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
515 			       unsigned vmhub)
516 {
517 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
518 
519 	mutex_lock(&id_mgr->lock);
520 	if (!--id_mgr->reserved_use_count) {
521 		/* give the reserved ID back to normal round robin */
522 		list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
523 		id_mgr->reserved = NULL;
524 	}
525 
526 	mutex_unlock(&id_mgr->lock);
527 }
528 
529 /**
530  * amdgpu_vmid_reset - reset VMID to zero
531  *
532  * @adev: amdgpu device structure
533  * @vmhub: vmhub type
534  * @vmid: vmid number to use
535  *
536  * Reset saved GDW, GWS and OA to force switch on next flush.
537  */
amdgpu_vmid_reset(struct amdgpu_device * adev,unsigned vmhub,unsigned vmid)538 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
539 		       unsigned vmid)
540 {
541 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
542 	struct amdgpu_vmid *id = &id_mgr->ids[vmid];
543 
544 	mutex_lock(&id_mgr->lock);
545 	id->owner = 0;
546 	id->gds_base = 0;
547 	id->gds_size = 0;
548 	id->gws_base = 0;
549 	id->gws_size = 0;
550 	id->oa_base = 0;
551 	id->oa_size = 0;
552 	mutex_unlock(&id_mgr->lock);
553 }
554 
555 /**
556  * amdgpu_vmid_reset_all - reset VMID to zero
557  *
558  * @adev: amdgpu device structure
559  *
560  * Reset VMID to force flush on next use
561  */
amdgpu_vmid_reset_all(struct amdgpu_device * adev)562 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
563 {
564 	unsigned i, j;
565 
566 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
567 		struct amdgpu_vmid_mgr *id_mgr =
568 			&adev->vm_manager.id_mgr[i];
569 
570 		for (j = 1; j < id_mgr->num_ids; ++j)
571 			amdgpu_vmid_reset(adev, i, j);
572 	}
573 }
574 
575 /**
576  * amdgpu_vmid_mgr_init - init the VMID manager
577  *
578  * @adev: amdgpu_device pointer
579  *
580  * Initialize the VM manager structures
581  */
amdgpu_vmid_mgr_init(struct amdgpu_device * adev)582 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
583 {
584 	unsigned i, j;
585 
586 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
587 		struct amdgpu_vmid_mgr *id_mgr =
588 			&adev->vm_manager.id_mgr[i];
589 
590 		mutex_init(&id_mgr->lock);
591 		INIT_LIST_HEAD(&id_mgr->ids_lru);
592 		id_mgr->reserved_use_count = 0;
593 
594 		/* manage only VMIDs not used by KFD */
595 		id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
596 
597 		/* skip over VMID 0, since it is the system VM */
598 		for (j = 1; j < id_mgr->num_ids; ++j) {
599 			amdgpu_vmid_reset(adev, i, j);
600 			amdgpu_sync_create(&id_mgr->ids[j].active);
601 			list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
602 		}
603 	}
604 	/* alloc a default reserved vmid to enforce isolation */
605 	for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
606 		if (adev->enforce_isolation[i])
607 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
608 	}
609 }
610 
611 /**
612  * amdgpu_vmid_mgr_fini - cleanup VM manager
613  *
614  * @adev: amdgpu_device pointer
615  *
616  * Cleanup the VM manager and free resources.
617  */
amdgpu_vmid_mgr_fini(struct amdgpu_device * adev)618 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
619 {
620 	unsigned i, j;
621 
622 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
623 		struct amdgpu_vmid_mgr *id_mgr =
624 			&adev->vm_manager.id_mgr[i];
625 
626 		mutex_destroy(&id_mgr->lock);
627 		for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
628 			struct amdgpu_vmid *id = &id_mgr->ids[j];
629 
630 			amdgpu_sync_free(&id->active);
631 			dma_fence_put(id->last_flush);
632 			dma_fence_put(id->pasid_mapping);
633 		}
634 	}
635 }
636