xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c (revision c156ef573efe4230ef3dc1ff2ec0038fe0eb217f)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_priv.h"
29 #include "kfd_kernel_queue.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_reset.h"
32 
33 static inline struct process_queue_node *get_queue_by_qid(
34 			struct process_queue_manager *pqm, unsigned int qid)
35 {
36 	struct process_queue_node *pqn;
37 
38 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
39 		if ((pqn->q && pqn->q->properties.queue_id == qid) ||
40 		    (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
41 			return pqn;
42 	}
43 
44 	return NULL;
45 }
46 
47 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
48 				    unsigned int qid)
49 {
50 	if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
51 		return -EINVAL;
52 
53 	if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
54 		pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
55 		return -ENOSPC;
56 	}
57 
58 	return 0;
59 }
60 
61 static int find_available_queue_slot(struct process_queue_manager *pqm,
62 					unsigned int *qid)
63 {
64 	unsigned long found;
65 
66 	found = find_first_zero_bit(pqm->queue_slot_bitmap,
67 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
68 
69 	pr_debug("The new slot id %lu\n", found);
70 
71 	if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
72 		pr_info("Cannot open more queues for process with pasid 0x%x\n",
73 				pqm->process->pasid);
74 		return -ENOMEM;
75 	}
76 
77 	set_bit(found, pqm->queue_slot_bitmap);
78 	*qid = found;
79 
80 	return 0;
81 }
82 
83 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
84 {
85 	struct kfd_node *dev = pdd->dev;
86 
87 	if (pdd->already_dequeued)
88 		return;
89 
90 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
91 	if (dev->kfd->shared_resources.enable_mes &&
92 	    down_read_trylock(&dev->adev->reset_domain->sem)) {
93 		amdgpu_mes_flush_shader_debugger(dev->adev,
94 						 pdd->proc_ctx_gpu_addr);
95 		up_read(&dev->adev->reset_domain->sem);
96 	}
97 	pdd->already_dequeued = true;
98 }
99 
100 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
101 			void *gws)
102 {
103 	struct mqd_update_info minfo = {0};
104 	struct kfd_node *dev = NULL;
105 	struct process_queue_node *pqn;
106 	struct kfd_process_device *pdd;
107 	struct kgd_mem *mem = NULL;
108 	int ret;
109 
110 	pqn = get_queue_by_qid(pqm, qid);
111 	if (!pqn) {
112 		pr_err("Queue id does not match any known queue\n");
113 		return -EINVAL;
114 	}
115 
116 	if (pqn->q)
117 		dev = pqn->q->device;
118 	if (WARN_ON(!dev))
119 		return -ENODEV;
120 
121 	pdd = kfd_get_process_device_data(dev, pqm->process);
122 	if (!pdd) {
123 		pr_err("Process device data doesn't exist\n");
124 		return -EINVAL;
125 	}
126 
127 	/* Only allow one queue per process can have GWS assigned */
128 	if (gws && pdd->qpd.num_gws)
129 		return -EBUSY;
130 
131 	if (!gws && pdd->qpd.num_gws == 0)
132 		return -EINVAL;
133 
134 	if ((KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
135 	     KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
136 	     KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0)) &&
137 	    !dev->kfd->shared_resources.enable_mes) {
138 		if (gws)
139 			ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
140 				gws, &mem);
141 		else
142 			ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
143 				pqn->q->gws);
144 		if (unlikely(ret))
145 			return ret;
146 		pqn->q->gws = mem;
147 	} else {
148 		/*
149 		 * Intentionally set GWS to a non-NULL value
150 		 * for devices that do not use GWS for global wave
151 		 * synchronization but require the formality
152 		 * of setting GWS for cooperative groups.
153 		 */
154 		pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
155 	}
156 
157 	pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
158 	minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
159 
160 	return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
161 							pqn->q, &minfo);
162 }
163 
164 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
165 {
166 	int i;
167 
168 	for (i = 0; i < p->n_pdds; i++)
169 		kfd_process_dequeue_from_device(p->pdds[i]);
170 }
171 
172 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
173 {
174 	INIT_LIST_HEAD(&pqm->queues);
175 	pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
176 					       GFP_KERNEL);
177 	if (!pqm->queue_slot_bitmap)
178 		return -ENOMEM;
179 	pqm->process = p;
180 
181 	return 0;
182 }
183 
184 static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
185 				     struct process_queue_node *pqn)
186 {
187 	struct kfd_node *dev;
188 	struct kfd_process_device *pdd;
189 
190 	dev = pqn->q->device;
191 
192 	pdd = kfd_get_process_device_data(dev, pqm->process);
193 	if (!pdd) {
194 		pr_err("Process device data doesn't exist\n");
195 		return;
196 	}
197 
198 	if (pqn->q->gws) {
199 		if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
200 		    KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) &&
201 		    KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 5, 0) &&
202 		    !dev->kfd->shared_resources.enable_mes)
203 			amdgpu_amdkfd_remove_gws_from_process(
204 				pqm->process->kgd_process_info, pqn->q->gws);
205 		pdd->qpd.num_gws = 0;
206 	}
207 
208 	if (dev->kfd->shared_resources.enable_mes) {
209 		amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo);
210 		amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo_gart);
211 	}
212 }
213 
214 void pqm_uninit(struct process_queue_manager *pqm)
215 {
216 	struct process_queue_node *pqn, *next;
217 
218 	list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
219 		if (pqn->q) {
220 			struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device,
221 										     pqm->process);
222 			if (pdd) {
223 				kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
224 				kfd_queue_release_buffers(pdd, &pqn->q->properties);
225 			} else {
226 				WARN_ON(!pdd);
227 			}
228 			pqm_clean_queue_resource(pqm, pqn);
229 		}
230 
231 		kfd_procfs_del_queue(pqn->q);
232 		uninit_queue(pqn->q);
233 		list_del(&pqn->process_queue_list);
234 		kfree(pqn);
235 	}
236 
237 	bitmap_free(pqm->queue_slot_bitmap);
238 	pqm->queue_slot_bitmap = NULL;
239 }
240 
241 static int init_user_queue(struct process_queue_manager *pqm,
242 				struct kfd_node *dev, struct queue **q,
243 				struct queue_properties *q_properties,
244 				unsigned int qid)
245 {
246 	int retval;
247 
248 	/* Doorbell initialized in user space*/
249 	q_properties->doorbell_ptr = NULL;
250 	q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
251 
252 	/* let DQM handle it*/
253 	q_properties->vmid = 0;
254 	q_properties->queue_id = qid;
255 
256 	retval = init_queue(q, q_properties);
257 	if (retval != 0)
258 		return retval;
259 
260 	(*q)->device = dev;
261 	(*q)->process = pqm->process;
262 
263 	if (dev->kfd->shared_resources.enable_mes) {
264 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
265 						AMDGPU_MES_GANG_CTX_SIZE,
266 						&(*q)->gang_ctx_bo,
267 						&(*q)->gang_ctx_gpu_addr,
268 						&(*q)->gang_ctx_cpu_ptr,
269 						false);
270 		if (retval) {
271 			pr_err("failed to allocate gang context bo\n");
272 			goto cleanup;
273 		}
274 		memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
275 
276 		/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
277 		 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
278 		 */
279 		if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
280 		    >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
281 			if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
282 				pr_err("Queue memory allocated to wrong device\n");
283 				retval = -EINVAL;
284 				goto free_gang_ctx_bo;
285 			}
286 
287 			retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
288 								  &(*q)->wptr_bo_gart);
289 			if (retval) {
290 				pr_err("Failed to map wptr bo to GART\n");
291 				goto free_gang_ctx_bo;
292 			}
293 		}
294 	}
295 
296 	pr_debug("PQM After init queue");
297 	return 0;
298 
299 free_gang_ctx_bo:
300 	amdgpu_amdkfd_free_gtt_mem(dev->adev, (*q)->gang_ctx_bo);
301 cleanup:
302 	uninit_queue(*q);
303 	*q = NULL;
304 	return retval;
305 }
306 
307 int pqm_create_queue(struct process_queue_manager *pqm,
308 			    struct kfd_node *dev,
309 			    struct queue_properties *properties,
310 			    unsigned int *qid,
311 			    const struct kfd_criu_queue_priv_data *q_data,
312 			    const void *restore_mqd,
313 			    const void *restore_ctl_stack,
314 			    uint32_t *p_doorbell_offset_in_process)
315 {
316 	int retval;
317 	struct kfd_process_device *pdd;
318 	struct queue *q;
319 	struct process_queue_node *pqn;
320 	struct kernel_queue *kq;
321 	enum kfd_queue_type type = properties->type;
322 	unsigned int max_queues = 127; /* HWS limit */
323 
324 	/*
325 	 * On GFX 9.4.3/9.5.0, increase the number of queues that
326 	 * can be created to 255. No HWS limit on GFX 9.4.3/9.5.0.
327 	 */
328 	if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
329 	    KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
330 	    KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0))
331 		max_queues = 255;
332 
333 	q = NULL;
334 	kq = NULL;
335 
336 	pdd = kfd_get_process_device_data(dev, pqm->process);
337 	if (!pdd) {
338 		pr_err("Process device data doesn't exist\n");
339 		return -1;
340 	}
341 
342 	/*
343 	 * for debug process, verify that it is within the static queues limit
344 	 * currently limit is set to half of the total avail HQD slots
345 	 * If we are just about to create DIQ, the is_debug flag is not set yet
346 	 * Hence we also check the type as well
347 	 */
348 	if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
349 		max_queues = dev->kfd->device_info.max_no_of_hqd/2;
350 
351 	if (pdd->qpd.queue_count >= max_queues)
352 		return -ENOSPC;
353 
354 	if (q_data) {
355 		retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
356 		*qid = q_data->q_id;
357 	} else
358 		retval = find_available_queue_slot(pqm, qid);
359 
360 	if (retval != 0)
361 		return retval;
362 
363 	if (list_empty(&pdd->qpd.queues_list) &&
364 	    list_empty(&pdd->qpd.priv_queue_list))
365 		dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
366 
367 	pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
368 	if (!pqn) {
369 		retval = -ENOMEM;
370 		goto err_allocate_pqn;
371 	}
372 
373 	switch (type) {
374 	case KFD_QUEUE_TYPE_SDMA:
375 	case KFD_QUEUE_TYPE_SDMA_XGMI:
376 	case KFD_QUEUE_TYPE_SDMA_BY_ENG_ID:
377 		/* SDMA queues are always allocated statically no matter
378 		 * which scheduler mode is used. We also do not need to
379 		 * check whether a SDMA queue can be allocated here, because
380 		 * allocate_sdma_queue() in create_queue() has the
381 		 * corresponding check logic.
382 		 */
383 		retval = init_user_queue(pqm, dev, &q, properties, *qid);
384 		if (retval != 0)
385 			goto err_create_queue;
386 		pqn->q = q;
387 		pqn->kq = NULL;
388 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
389 						    restore_mqd, restore_ctl_stack);
390 		print_queue(q);
391 		break;
392 
393 	case KFD_QUEUE_TYPE_COMPUTE:
394 		/* check if there is over subscription */
395 		if ((dev->dqm->sched_policy ==
396 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
397 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
398 		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
399 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
400 			retval = -EPERM;
401 			goto err_create_queue;
402 		}
403 
404 		retval = init_user_queue(pqm, dev, &q, properties, *qid);
405 		if (retval != 0)
406 			goto err_create_queue;
407 		pqn->q = q;
408 		pqn->kq = NULL;
409 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
410 						    restore_mqd, restore_ctl_stack);
411 		print_queue(q);
412 		break;
413 	case KFD_QUEUE_TYPE_DIQ:
414 		kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
415 		if (!kq) {
416 			retval = -ENOMEM;
417 			goto err_create_queue;
418 		}
419 		kq->queue->properties.queue_id = *qid;
420 		pqn->kq = kq;
421 		pqn->q = NULL;
422 		retval = kfd_process_drain_interrupts(pdd);
423 		if (retval)
424 			break;
425 
426 		retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
427 							kq, &pdd->qpd);
428 		break;
429 	default:
430 		WARN(1, "Invalid queue type %d", type);
431 		retval = -EINVAL;
432 	}
433 
434 	if (retval != 0) {
435 		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
436 			pqm->process->pasid, type, retval);
437 		goto err_create_queue;
438 	}
439 
440 	if (q && p_doorbell_offset_in_process) {
441 		/* Return the doorbell offset within the doorbell page
442 		 * to the caller so it can be passed up to user mode
443 		 * (in bytes).
444 		 * relative doorbell index = Absolute doorbell index -
445 		 * absolute index of first doorbell in the page.
446 		 */
447 		uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev,
448 								       pdd->qpd.proc_doorbells,
449 								       0,
450 								       pdd->dev->kfd->device_info.doorbell_size);
451 
452 		*p_doorbell_offset_in_process = (q->properties.doorbell_off
453 						- first_db_index) * sizeof(uint32_t);
454 	}
455 
456 	pr_debug("PQM After DQM create queue\n");
457 
458 	list_add(&pqn->process_queue_list, &pqm->queues);
459 
460 	if (q) {
461 		pr_debug("PQM done creating queue\n");
462 		kfd_procfs_add_queue(q);
463 		print_queue_properties(&q->properties);
464 	}
465 
466 	return retval;
467 
468 err_create_queue:
469 	uninit_queue(q);
470 	if (kq)
471 		kernel_queue_uninit(kq);
472 	kfree(pqn);
473 err_allocate_pqn:
474 	/* check if queues list is empty unregister process from device */
475 	clear_bit(*qid, pqm->queue_slot_bitmap);
476 	if (list_empty(&pdd->qpd.queues_list) &&
477 	    list_empty(&pdd->qpd.priv_queue_list))
478 		dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
479 	return retval;
480 }
481 
482 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
483 {
484 	struct process_queue_node *pqn;
485 	struct kfd_process_device *pdd;
486 	struct device_queue_manager *dqm;
487 	struct kfd_node *dev;
488 	int retval;
489 
490 	dqm = NULL;
491 
492 	retval = 0;
493 
494 	pqn = get_queue_by_qid(pqm, qid);
495 	if (!pqn) {
496 		pr_err("Queue id does not match any known queue\n");
497 		return -EINVAL;
498 	}
499 
500 	dev = NULL;
501 	if (pqn->kq)
502 		dev = pqn->kq->dev;
503 	if (pqn->q)
504 		dev = pqn->q->device;
505 	if (WARN_ON(!dev))
506 		return -ENODEV;
507 
508 	pdd = kfd_get_process_device_data(dev, pqm->process);
509 	if (!pdd) {
510 		pr_err("Process device data doesn't exist\n");
511 		return -1;
512 	}
513 
514 	if (pqn->kq) {
515 		/* destroy kernel queue (DIQ) */
516 		dqm = pqn->kq->dev->dqm;
517 		dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
518 		kernel_queue_uninit(pqn->kq);
519 	}
520 
521 	if (pqn->q) {
522 		retval = kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
523 		if (retval)
524 			goto err_destroy_queue;
525 
526 		dqm = pqn->q->device->dqm;
527 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
528 		if (retval) {
529 			pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
530 				pqm->process->pasid,
531 				pqn->q->properties.queue_id, retval);
532 			if (retval != -ETIME)
533 				goto err_destroy_queue;
534 		}
535 		kfd_procfs_del_queue(pqn->q);
536 		kfd_queue_release_buffers(pdd, &pqn->q->properties);
537 		pqm_clean_queue_resource(pqm, pqn);
538 		uninit_queue(pqn->q);
539 	}
540 
541 	list_del(&pqn->process_queue_list);
542 	kfree(pqn);
543 	clear_bit(qid, pqm->queue_slot_bitmap);
544 
545 	if (list_empty(&pdd->qpd.queues_list) &&
546 	    list_empty(&pdd->qpd.priv_queue_list))
547 		dqm->ops.unregister_process(dqm, &pdd->qpd);
548 
549 err_destroy_queue:
550 	return retval;
551 }
552 
553 int pqm_update_queue_properties(struct process_queue_manager *pqm,
554 				unsigned int qid, struct queue_properties *p)
555 {
556 	int retval;
557 	struct process_queue_node *pqn;
558 
559 	pqn = get_queue_by_qid(pqm, qid);
560 	if (!pqn || !pqn->q) {
561 		pr_debug("No queue %d exists for update operation\n", qid);
562 		return -EFAULT;
563 	}
564 
565 	/*
566 	 * Update with NULL ring address is used to disable the queue
567 	 */
568 	if (p->queue_address && p->queue_size) {
569 		struct kfd_process_device *pdd;
570 		struct amdgpu_vm *vm;
571 		struct queue *q = pqn->q;
572 		int err;
573 
574 		pdd = kfd_get_process_device_data(q->device, q->process);
575 		if (!pdd)
576 			return -ENODEV;
577 		vm = drm_priv_to_vm(pdd->drm_priv);
578 		err = amdgpu_bo_reserve(vm->root.bo, false);
579 		if (err)
580 			return err;
581 
582 		if (kfd_queue_buffer_get(vm, (void *)p->queue_address, &p->ring_bo,
583 					 p->queue_size)) {
584 			pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n",
585 				 p->queue_address, p->queue_size);
586 			return -EFAULT;
587 		}
588 
589 		kfd_queue_unref_bo_va(vm, &pqn->q->properties.ring_bo);
590 		kfd_queue_buffer_put(&pqn->q->properties.ring_bo);
591 		amdgpu_bo_unreserve(vm->root.bo);
592 
593 		pqn->q->properties.ring_bo = p->ring_bo;
594 	}
595 
596 	pqn->q->properties.queue_address = p->queue_address;
597 	pqn->q->properties.queue_size = p->queue_size;
598 	pqn->q->properties.queue_percent = p->queue_percent;
599 	pqn->q->properties.priority = p->priority;
600 	pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
601 
602 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
603 							pqn->q, NULL);
604 	if (retval != 0)
605 		return retval;
606 
607 	return 0;
608 }
609 
610 int pqm_update_mqd(struct process_queue_manager *pqm,
611 				unsigned int qid, struct mqd_update_info *minfo)
612 {
613 	int retval;
614 	struct process_queue_node *pqn;
615 
616 	pqn = get_queue_by_qid(pqm, qid);
617 	if (!pqn) {
618 		pr_debug("No queue %d exists for update operation\n", qid);
619 		return -EFAULT;
620 	}
621 
622 	/* CUs are masked for debugger requirements so deny user mask  */
623 	if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
624 		return -EBUSY;
625 
626 	/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
627 	if (minfo && minfo->cu_mask.ptr &&
628 			KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
629 		int i;
630 
631 		for (i = 0; i < minfo->cu_mask.count; i += 2) {
632 			uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
633 
634 			if (cu_pair && cu_pair != 0x3) {
635 				pr_debug("CUs must be adjacent pairwise enabled.\n");
636 				return -EINVAL;
637 			}
638 		}
639 	}
640 
641 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
642 							pqn->q, minfo);
643 	if (retval != 0)
644 		return retval;
645 
646 	if (minfo && minfo->cu_mask.ptr)
647 		pqn->q->properties.is_user_cu_masked = true;
648 
649 	return 0;
650 }
651 
652 struct kernel_queue *pqm_get_kernel_queue(
653 					struct process_queue_manager *pqm,
654 					unsigned int qid)
655 {
656 	struct process_queue_node *pqn;
657 
658 	pqn = get_queue_by_qid(pqm, qid);
659 	if (pqn && pqn->kq)
660 		return pqn->kq;
661 
662 	return NULL;
663 }
664 
665 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
666 					unsigned int qid)
667 {
668 	struct process_queue_node *pqn;
669 
670 	pqn = get_queue_by_qid(pqm, qid);
671 	return pqn ? pqn->q : NULL;
672 }
673 
674 int pqm_get_wave_state(struct process_queue_manager *pqm,
675 		       unsigned int qid,
676 		       void __user *ctl_stack,
677 		       u32 *ctl_stack_used_size,
678 		       u32 *save_area_used_size)
679 {
680 	struct process_queue_node *pqn;
681 
682 	pqn = get_queue_by_qid(pqm, qid);
683 	if (!pqn) {
684 		pr_debug("amdkfd: No queue %d exists for operation\n",
685 			 qid);
686 		return -EFAULT;
687 	}
688 
689 	return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
690 						       pqn->q,
691 						       ctl_stack,
692 						       ctl_stack_used_size,
693 						       save_area_used_size);
694 }
695 
696 int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
697 			   uint64_t exception_clear_mask,
698 			   void __user *buf,
699 			   int *num_qss_entries,
700 			   uint32_t *entry_size)
701 {
702 	struct process_queue_node *pqn;
703 	struct kfd_queue_snapshot_entry src;
704 	uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
705 	int r = 0;
706 
707 	*num_qss_entries = 0;
708 	if (!(*entry_size))
709 		return -EINVAL;
710 
711 	*entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
712 	mutex_lock(&pqm->process->event_mutex);
713 
714 	memset(&src, 0, sizeof(src));
715 
716 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
717 		if (!pqn->q)
718 			continue;
719 
720 		if (*num_qss_entries < tmp_qss_entries) {
721 			set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
722 
723 			if (copy_to_user(buf, &src, *entry_size)) {
724 				r = -EFAULT;
725 				break;
726 			}
727 			buf += tmp_entry_size;
728 		}
729 		*num_qss_entries += 1;
730 	}
731 
732 	mutex_unlock(&pqm->process->event_mutex);
733 	return r;
734 }
735 
736 static int get_queue_data_sizes(struct kfd_process_device *pdd,
737 				struct queue *q,
738 				uint32_t *mqd_size,
739 				uint32_t *ctl_stack_size)
740 {
741 	int ret;
742 
743 	ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
744 					    q->properties.queue_id,
745 					    mqd_size,
746 					    ctl_stack_size);
747 	if (ret)
748 		pr_err("Failed to get queue dump info (%d)\n", ret);
749 
750 	return ret;
751 }
752 
753 int kfd_process_get_queue_info(struct kfd_process *p,
754 			       uint32_t *num_queues,
755 			       uint64_t *priv_data_sizes)
756 {
757 	uint32_t extra_data_sizes = 0;
758 	struct queue *q;
759 	int i;
760 	int ret;
761 
762 	*num_queues = 0;
763 
764 	/* Run over all PDDs of the process */
765 	for (i = 0; i < p->n_pdds; i++) {
766 		struct kfd_process_device *pdd = p->pdds[i];
767 
768 		list_for_each_entry(q, &pdd->qpd.queues_list, list) {
769 			if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
770 				q->properties.type == KFD_QUEUE_TYPE_SDMA ||
771 				q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
772 				uint32_t mqd_size, ctl_stack_size;
773 
774 				*num_queues = *num_queues + 1;
775 
776 				ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
777 				if (ret)
778 					return ret;
779 
780 				extra_data_sizes += mqd_size + ctl_stack_size;
781 			} else {
782 				pr_err("Unsupported queue type (%d)\n", q->properties.type);
783 				return -EOPNOTSUPP;
784 			}
785 		}
786 	}
787 	*priv_data_sizes = extra_data_sizes +
788 				(*num_queues * sizeof(struct kfd_criu_queue_priv_data));
789 
790 	return 0;
791 }
792 
793 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
794 			      unsigned int qid,
795 			      void *mqd,
796 			      void *ctl_stack)
797 {
798 	struct process_queue_node *pqn;
799 
800 	pqn = get_queue_by_qid(pqm, qid);
801 	if (!pqn) {
802 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
803 		return -EFAULT;
804 	}
805 
806 	if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
807 		pr_err("amdkfd: queue dumping not supported on this device\n");
808 		return -EOPNOTSUPP;
809 	}
810 
811 	return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
812 						       pqn->q, mqd, ctl_stack);
813 }
814 
815 static int criu_checkpoint_queue(struct kfd_process_device *pdd,
816 			   struct queue *q,
817 			   struct kfd_criu_queue_priv_data *q_data)
818 {
819 	uint8_t *mqd, *ctl_stack;
820 	int ret;
821 
822 	mqd = (void *)(q_data + 1);
823 	ctl_stack = mqd + q_data->mqd_size;
824 
825 	q_data->gpu_id = pdd->user_gpu_id;
826 	q_data->type = q->properties.type;
827 	q_data->format = q->properties.format;
828 	q_data->q_id =  q->properties.queue_id;
829 	q_data->q_address = q->properties.queue_address;
830 	q_data->q_size = q->properties.queue_size;
831 	q_data->priority = q->properties.priority;
832 	q_data->q_percent = q->properties.queue_percent;
833 	q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
834 	q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
835 	q_data->doorbell_id = q->doorbell_id;
836 
837 	q_data->sdma_id = q->sdma_id;
838 
839 	q_data->eop_ring_buffer_address =
840 		q->properties.eop_ring_buffer_address;
841 
842 	q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
843 
844 	q_data->ctx_save_restore_area_address =
845 		q->properties.ctx_save_restore_area_address;
846 
847 	q_data->ctx_save_restore_area_size =
848 		q->properties.ctx_save_restore_area_size;
849 
850 	q_data->gws = !!q->gws;
851 
852 	ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
853 	if (ret) {
854 		pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
855 		return ret;
856 	}
857 
858 	pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
859 	return ret;
860 }
861 
862 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
863 				   uint8_t __user *user_priv,
864 				   unsigned int *q_index,
865 				   uint64_t *queues_priv_data_offset)
866 {
867 	unsigned int q_private_data_size = 0;
868 	uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
869 	struct queue *q;
870 	int ret = 0;
871 
872 	list_for_each_entry(q, &pdd->qpd.queues_list, list) {
873 		struct kfd_criu_queue_priv_data *q_data;
874 		uint64_t q_data_size;
875 		uint32_t mqd_size;
876 		uint32_t ctl_stack_size;
877 
878 		if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
879 			q->properties.type != KFD_QUEUE_TYPE_SDMA &&
880 			q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
881 
882 			pr_err("Unsupported queue type (%d)\n", q->properties.type);
883 			ret = -EOPNOTSUPP;
884 			break;
885 		}
886 
887 		ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
888 		if (ret)
889 			break;
890 
891 		q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
892 
893 		/* Increase local buffer space if needed */
894 		if (q_private_data_size < q_data_size) {
895 			kfree(q_private_data);
896 
897 			q_private_data = kzalloc(q_data_size, GFP_KERNEL);
898 			if (!q_private_data) {
899 				ret = -ENOMEM;
900 				break;
901 			}
902 			q_private_data_size = q_data_size;
903 		}
904 
905 		q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
906 
907 		/* data stored in this order: priv_data, mqd, ctl_stack */
908 		q_data->mqd_size = mqd_size;
909 		q_data->ctl_stack_size = ctl_stack_size;
910 
911 		ret = criu_checkpoint_queue(pdd, q, q_data);
912 		if (ret)
913 			break;
914 
915 		q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
916 
917 		ret = copy_to_user(user_priv + *queues_priv_data_offset,
918 				q_data, q_data_size);
919 		if (ret) {
920 			ret = -EFAULT;
921 			break;
922 		}
923 		*queues_priv_data_offset += q_data_size;
924 		*q_index = *q_index + 1;
925 	}
926 
927 	kfree(q_private_data);
928 
929 	return ret;
930 }
931 
932 int kfd_criu_checkpoint_queues(struct kfd_process *p,
933 			 uint8_t __user *user_priv_data,
934 			 uint64_t *priv_data_offset)
935 {
936 	int ret = 0, pdd_index, q_index = 0;
937 
938 	for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
939 		struct kfd_process_device *pdd = p->pdds[pdd_index];
940 
941 		/*
942 		 * criu_checkpoint_queues_device will copy data to user and update q_index and
943 		 * queues_priv_data_offset
944 		 */
945 		ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
946 					      priv_data_offset);
947 
948 		if (ret)
949 			break;
950 	}
951 
952 	return ret;
953 }
954 
955 static void set_queue_properties_from_criu(struct queue_properties *qp,
956 					  struct kfd_criu_queue_priv_data *q_data)
957 {
958 	qp->is_interop = false;
959 	qp->queue_percent = q_data->q_percent;
960 	qp->priority = q_data->priority;
961 	qp->queue_address = q_data->q_address;
962 	qp->queue_size = q_data->q_size;
963 	qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
964 	qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
965 	qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
966 	qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
967 	qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
968 	qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
969 	qp->ctl_stack_size = q_data->ctl_stack_size;
970 	qp->type = q_data->type;
971 	qp->format = q_data->format;
972 }
973 
974 int kfd_criu_restore_queue(struct kfd_process *p,
975 			   uint8_t __user *user_priv_ptr,
976 			   uint64_t *priv_data_offset,
977 			   uint64_t max_priv_data_size)
978 {
979 	uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
980 	struct kfd_criu_queue_priv_data *q_data;
981 	struct kfd_process_device *pdd;
982 	uint64_t q_extra_data_size;
983 	struct queue_properties qp;
984 	unsigned int queue_id;
985 	int ret = 0;
986 
987 	if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
988 		return -EINVAL;
989 
990 	q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
991 	if (!q_data)
992 		return -ENOMEM;
993 
994 	ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
995 	if (ret) {
996 		ret = -EFAULT;
997 		goto exit;
998 	}
999 
1000 	*priv_data_offset += sizeof(*q_data);
1001 	q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
1002 
1003 	if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
1004 		ret = -EINVAL;
1005 		goto exit;
1006 	}
1007 
1008 	q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
1009 	if (!q_extra_data) {
1010 		ret = -ENOMEM;
1011 		goto exit;
1012 	}
1013 
1014 	ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
1015 	if (ret) {
1016 		ret = -EFAULT;
1017 		goto exit;
1018 	}
1019 
1020 	*priv_data_offset += q_extra_data_size;
1021 
1022 	pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
1023 	if (!pdd) {
1024 		pr_err("Failed to get pdd\n");
1025 		ret = -EINVAL;
1026 		goto exit;
1027 	}
1028 
1029 	/* data stored in this order: mqd, ctl_stack */
1030 	mqd = q_extra_data;
1031 	ctl_stack = mqd + q_data->mqd_size;
1032 
1033 	memset(&qp, 0, sizeof(qp));
1034 	set_queue_properties_from_criu(&qp, q_data);
1035 
1036 	print_queue_properties(&qp);
1037 
1038 	ret = pqm_create_queue(&p->pqm, pdd->dev, &qp, &queue_id, q_data, mqd, ctl_stack, NULL);
1039 	if (ret) {
1040 		pr_err("Failed to create new queue err:%d\n", ret);
1041 		goto exit;
1042 	}
1043 
1044 	if (q_data->gws)
1045 		ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
1046 
1047 exit:
1048 	if (ret)
1049 		pr_err("Failed to restore queue (%d)\n", ret);
1050 	else
1051 		pr_debug("Queue id %d was restored successfully\n", queue_id);
1052 
1053 	kfree(q_data);
1054 	kfree(q_extra_data);
1055 
1056 	return ret;
1057 }
1058 
1059 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
1060 				  unsigned int qid,
1061 				  uint32_t *mqd_size,
1062 				  uint32_t *ctl_stack_size)
1063 {
1064 	struct process_queue_node *pqn;
1065 
1066 	pqn = get_queue_by_qid(pqm, qid);
1067 	if (!pqn) {
1068 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
1069 		return -EFAULT;
1070 	}
1071 
1072 	if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
1073 		pr_err("amdkfd: queue dumping not supported on this device\n");
1074 		return -EOPNOTSUPP;
1075 	}
1076 
1077 	pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
1078 						       pqn->q, mqd_size,
1079 						       ctl_stack_size);
1080 	return 0;
1081 }
1082 
1083 #if defined(CONFIG_DEBUG_FS)
1084 
1085 int pqm_debugfs_mqds(struct seq_file *m, void *data)
1086 {
1087 	struct process_queue_manager *pqm = data;
1088 	struct process_queue_node *pqn;
1089 	struct queue *q;
1090 	enum KFD_MQD_TYPE mqd_type;
1091 	struct mqd_manager *mqd_mgr;
1092 	int r = 0, xcc, num_xccs = 1;
1093 	void *mqd;
1094 	uint64_t size = 0;
1095 
1096 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
1097 		if (pqn->q) {
1098 			q = pqn->q;
1099 			switch (q->properties.type) {
1100 			case KFD_QUEUE_TYPE_SDMA:
1101 			case KFD_QUEUE_TYPE_SDMA_XGMI:
1102 				seq_printf(m, "  SDMA queue on device %x\n",
1103 					   q->device->id);
1104 				mqd_type = KFD_MQD_TYPE_SDMA;
1105 				break;
1106 			case KFD_QUEUE_TYPE_COMPUTE:
1107 				seq_printf(m, "  Compute queue on device %x\n",
1108 					   q->device->id);
1109 				mqd_type = KFD_MQD_TYPE_CP;
1110 				num_xccs = NUM_XCC(q->device->xcc_mask);
1111 				break;
1112 			default:
1113 				seq_printf(m,
1114 				"  Bad user queue type %d on device %x\n",
1115 					   q->properties.type, q->device->id);
1116 				continue;
1117 			}
1118 			mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
1119 			size = mqd_mgr->mqd_stride(mqd_mgr,
1120 							&q->properties);
1121 		} else if (pqn->kq) {
1122 			q = pqn->kq->queue;
1123 			mqd_mgr = pqn->kq->mqd_mgr;
1124 			switch (q->properties.type) {
1125 			case KFD_QUEUE_TYPE_DIQ:
1126 				seq_printf(m, "  DIQ on device %x\n",
1127 					   pqn->kq->dev->id);
1128 				break;
1129 			default:
1130 				seq_printf(m,
1131 				"  Bad kernel queue type %d on device %x\n",
1132 					   q->properties.type,
1133 					   pqn->kq->dev->id);
1134 				continue;
1135 			}
1136 		} else {
1137 			seq_printf(m,
1138 		"  Weird: Queue node with neither kernel nor user queue\n");
1139 			continue;
1140 		}
1141 
1142 		for (xcc = 0; xcc < num_xccs; xcc++) {
1143 			mqd = q->mqd + size * xcc;
1144 			r = mqd_mgr->debugfs_show_mqd(m, mqd);
1145 			if (r != 0)
1146 				break;
1147 		}
1148 	}
1149 
1150 	return r;
1151 }
1152 
1153 #endif
1154