xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c (revision fa73ec95c969c7af292caf622ef499e7af7cb062)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_priv.h"
29 #include "kfd_kernel_queue.h"
30 #include "amdgpu_amdkfd.h"
31 
32 static inline struct process_queue_node *get_queue_by_qid(
33 			struct process_queue_manager *pqm, unsigned int qid)
34 {
35 	struct process_queue_node *pqn;
36 
37 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
38 		if ((pqn->q && pqn->q->properties.queue_id == qid) ||
39 		    (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
40 			return pqn;
41 	}
42 
43 	return NULL;
44 }
45 
46 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
47 				    unsigned int qid)
48 {
49 	if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
50 		return -EINVAL;
51 
52 	if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
53 		pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
54 		return -ENOSPC;
55 	}
56 
57 	return 0;
58 }
59 
60 static int find_available_queue_slot(struct process_queue_manager *pqm,
61 					unsigned int *qid)
62 {
63 	unsigned long found;
64 
65 	found = find_first_zero_bit(pqm->queue_slot_bitmap,
66 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
67 
68 	pr_debug("The new slot id %lu\n", found);
69 
70 	if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
71 		pr_info("Cannot open more queues for process with pasid 0x%x\n",
72 				pqm->process->pasid);
73 		return -ENOMEM;
74 	}
75 
76 	set_bit(found, pqm->queue_slot_bitmap);
77 	*qid = found;
78 
79 	return 0;
80 }
81 
82 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
83 {
84 	struct kfd_node *dev = pdd->dev;
85 
86 	if (pdd->already_dequeued)
87 		return;
88 
89 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
90 	if (dev->kfd->shared_resources.enable_mes)
91 		amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr);
92 	pdd->already_dequeued = true;
93 }
94 
95 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
96 			void *gws)
97 {
98 	struct mqd_update_info minfo = {0};
99 	struct kfd_node *dev = NULL;
100 	struct process_queue_node *pqn;
101 	struct kfd_process_device *pdd;
102 	struct kgd_mem *mem = NULL;
103 	int ret;
104 
105 	pqn = get_queue_by_qid(pqm, qid);
106 	if (!pqn) {
107 		pr_err("Queue id does not match any known queue\n");
108 		return -EINVAL;
109 	}
110 
111 	if (pqn->q)
112 		dev = pqn->q->device;
113 	if (WARN_ON(!dev))
114 		return -ENODEV;
115 
116 	pdd = kfd_get_process_device_data(dev, pqm->process);
117 	if (!pdd) {
118 		pr_err("Process device data doesn't exist\n");
119 		return -EINVAL;
120 	}
121 
122 	/* Only allow one queue per process can have GWS assigned */
123 	if (gws && pdd->qpd.num_gws)
124 		return -EBUSY;
125 
126 	if (!gws && pdd->qpd.num_gws == 0)
127 		return -EINVAL;
128 
129 	if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
130 	    KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
131 	    !dev->kfd->shared_resources.enable_mes) {
132 		if (gws)
133 			ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
134 				gws, &mem);
135 		else
136 			ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
137 				pqn->q->gws);
138 		if (unlikely(ret))
139 			return ret;
140 		pqn->q->gws = mem;
141 	} else {
142 		/*
143 		 * Intentionally set GWS to a non-NULL value
144 		 * for devices that do not use GWS for global wave
145 		 * synchronization but require the formality
146 		 * of setting GWS for cooperative groups.
147 		 */
148 		pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
149 	}
150 
151 	pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
152 	minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
153 
154 	return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
155 							pqn->q, &minfo);
156 }
157 
158 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
159 {
160 	int i;
161 
162 	for (i = 0; i < p->n_pdds; i++)
163 		kfd_process_dequeue_from_device(p->pdds[i]);
164 }
165 
166 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
167 {
168 	INIT_LIST_HEAD(&pqm->queues);
169 	pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
170 					       GFP_KERNEL);
171 	if (!pqm->queue_slot_bitmap)
172 		return -ENOMEM;
173 	pqm->process = p;
174 
175 	return 0;
176 }
177 
178 static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
179 				     struct process_queue_node *pqn)
180 {
181 	struct kfd_node *dev;
182 	struct kfd_process_device *pdd;
183 
184 	dev = pqn->q->device;
185 
186 	pdd = kfd_get_process_device_data(dev, pqm->process);
187 	if (!pdd) {
188 		pr_err("Process device data doesn't exist\n");
189 		return;
190 	}
191 
192 	if (pqn->q->gws) {
193 		if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
194 		    KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) &&
195 		    !dev->kfd->shared_resources.enable_mes)
196 			amdgpu_amdkfd_remove_gws_from_process(
197 				pqm->process->kgd_process_info, pqn->q->gws);
198 		pdd->qpd.num_gws = 0;
199 	}
200 
201 	if (dev->kfd->shared_resources.enable_mes) {
202 		amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo);
203 		if (pqn->q->wptr_bo)
204 			amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
205 	}
206 }
207 
208 void pqm_uninit(struct process_queue_manager *pqm)
209 {
210 	struct process_queue_node *pqn, *next;
211 
212 	list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
213 		if (pqn->q)
214 			pqm_clean_queue_resource(pqm, pqn);
215 
216 		kfd_procfs_del_queue(pqn->q);
217 		uninit_queue(pqn->q);
218 		list_del(&pqn->process_queue_list);
219 		kfree(pqn);
220 	}
221 
222 	bitmap_free(pqm->queue_slot_bitmap);
223 	pqm->queue_slot_bitmap = NULL;
224 }
225 
226 static int init_user_queue(struct process_queue_manager *pqm,
227 				struct kfd_node *dev, struct queue **q,
228 				struct queue_properties *q_properties,
229 				struct file *f, struct amdgpu_bo *wptr_bo,
230 				unsigned int qid)
231 {
232 	int retval;
233 
234 	/* Doorbell initialized in user space*/
235 	q_properties->doorbell_ptr = NULL;
236 	q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
237 
238 	/* let DQM handle it*/
239 	q_properties->vmid = 0;
240 	q_properties->queue_id = qid;
241 
242 	retval = init_queue(q, q_properties);
243 	if (retval != 0)
244 		return retval;
245 
246 	(*q)->device = dev;
247 	(*q)->process = pqm->process;
248 
249 	if (dev->kfd->shared_resources.enable_mes) {
250 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
251 						AMDGPU_MES_GANG_CTX_SIZE,
252 						&(*q)->gang_ctx_bo,
253 						&(*q)->gang_ctx_gpu_addr,
254 						&(*q)->gang_ctx_cpu_ptr,
255 						false);
256 		if (retval) {
257 			pr_err("failed to allocate gang context bo\n");
258 			goto cleanup;
259 		}
260 		memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
261 		(*q)->wptr_bo = wptr_bo;
262 	}
263 
264 	pr_debug("PQM After init queue");
265 	return 0;
266 
267 cleanup:
268 	uninit_queue(*q);
269 	*q = NULL;
270 	return retval;
271 }
272 
273 int pqm_create_queue(struct process_queue_manager *pqm,
274 			    struct kfd_node *dev,
275 			    struct file *f,
276 			    struct queue_properties *properties,
277 			    unsigned int *qid,
278 			    struct amdgpu_bo *wptr_bo,
279 			    const struct kfd_criu_queue_priv_data *q_data,
280 			    const void *restore_mqd,
281 			    const void *restore_ctl_stack,
282 			    uint32_t *p_doorbell_offset_in_process)
283 {
284 	int retval;
285 	struct kfd_process_device *pdd;
286 	struct queue *q;
287 	struct process_queue_node *pqn;
288 	struct kernel_queue *kq;
289 	enum kfd_queue_type type = properties->type;
290 	unsigned int max_queues = 127; /* HWS limit */
291 
292 	/*
293 	 * On GFX 9.4.3, increase the number of queues that
294 	 * can be created to 255. No HWS limit on GFX 9.4.3.
295 	 */
296 	if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
297 	    KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4))
298 		max_queues = 255;
299 
300 	q = NULL;
301 	kq = NULL;
302 
303 	pdd = kfd_get_process_device_data(dev, pqm->process);
304 	if (!pdd) {
305 		pr_err("Process device data doesn't exist\n");
306 		return -1;
307 	}
308 
309 	/*
310 	 * for debug process, verify that it is within the static queues limit
311 	 * currently limit is set to half of the total avail HQD slots
312 	 * If we are just about to create DIQ, the is_debug flag is not set yet
313 	 * Hence we also check the type as well
314 	 */
315 	if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
316 		max_queues = dev->kfd->device_info.max_no_of_hqd/2;
317 
318 	if (pdd->qpd.queue_count >= max_queues)
319 		return -ENOSPC;
320 
321 	if (q_data) {
322 		retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
323 		*qid = q_data->q_id;
324 	} else
325 		retval = find_available_queue_slot(pqm, qid);
326 
327 	if (retval != 0)
328 		return retval;
329 
330 	if (list_empty(&pdd->qpd.queues_list) &&
331 	    list_empty(&pdd->qpd.priv_queue_list))
332 		dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
333 
334 	pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
335 	if (!pqn) {
336 		retval = -ENOMEM;
337 		goto err_allocate_pqn;
338 	}
339 
340 	switch (type) {
341 	case KFD_QUEUE_TYPE_SDMA:
342 	case KFD_QUEUE_TYPE_SDMA_XGMI:
343 		/* SDMA queues are always allocated statically no matter
344 		 * which scheduler mode is used. We also do not need to
345 		 * check whether a SDMA queue can be allocated here, because
346 		 * allocate_sdma_queue() in create_queue() has the
347 		 * corresponding check logic.
348 		 */
349 		retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
350 		if (retval != 0)
351 			goto err_create_queue;
352 		pqn->q = q;
353 		pqn->kq = NULL;
354 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
355 						    restore_mqd, restore_ctl_stack);
356 		print_queue(q);
357 		break;
358 
359 	case KFD_QUEUE_TYPE_COMPUTE:
360 		/* check if there is over subscription */
361 		if ((dev->dqm->sched_policy ==
362 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
363 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
364 		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
365 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
366 			retval = -EPERM;
367 			goto err_create_queue;
368 		}
369 
370 		retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
371 		if (retval != 0)
372 			goto err_create_queue;
373 		pqn->q = q;
374 		pqn->kq = NULL;
375 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
376 						    restore_mqd, restore_ctl_stack);
377 		print_queue(q);
378 		break;
379 	case KFD_QUEUE_TYPE_DIQ:
380 		kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
381 		if (!kq) {
382 			retval = -ENOMEM;
383 			goto err_create_queue;
384 		}
385 		kq->queue->properties.queue_id = *qid;
386 		pqn->kq = kq;
387 		pqn->q = NULL;
388 		retval = kfd_process_drain_interrupts(pdd);
389 		if (retval)
390 			break;
391 
392 		retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
393 							kq, &pdd->qpd);
394 		break;
395 	default:
396 		WARN(1, "Invalid queue type %d", type);
397 		retval = -EINVAL;
398 	}
399 
400 	if (retval != 0) {
401 		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
402 			pqm->process->pasid, type, retval);
403 		goto err_create_queue;
404 	}
405 
406 	if (q && p_doorbell_offset_in_process) {
407 		/* Return the doorbell offset within the doorbell page
408 		 * to the caller so it can be passed up to user mode
409 		 * (in bytes).
410 		 * relative doorbell index = Absolute doorbell index -
411 		 * absolute index of first doorbell in the page.
412 		 */
413 		uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev,
414 								       pdd->qpd.proc_doorbells,
415 								       0,
416 								       pdd->dev->kfd->device_info.doorbell_size);
417 
418 		*p_doorbell_offset_in_process = (q->properties.doorbell_off
419 						- first_db_index) * sizeof(uint32_t);
420 	}
421 
422 	pr_debug("PQM After DQM create queue\n");
423 
424 	list_add(&pqn->process_queue_list, &pqm->queues);
425 
426 	if (q) {
427 		pr_debug("PQM done creating queue\n");
428 		kfd_procfs_add_queue(q);
429 		print_queue_properties(&q->properties);
430 	}
431 
432 	return retval;
433 
434 err_create_queue:
435 	uninit_queue(q);
436 	if (kq)
437 		kernel_queue_uninit(kq, false);
438 	kfree(pqn);
439 err_allocate_pqn:
440 	/* check if queues list is empty unregister process from device */
441 	clear_bit(*qid, pqm->queue_slot_bitmap);
442 	if (list_empty(&pdd->qpd.queues_list) &&
443 	    list_empty(&pdd->qpd.priv_queue_list))
444 		dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
445 	return retval;
446 }
447 
448 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
449 {
450 	struct process_queue_node *pqn;
451 	struct kfd_process_device *pdd;
452 	struct device_queue_manager *dqm;
453 	struct kfd_node *dev;
454 	int retval;
455 
456 	dqm = NULL;
457 
458 	retval = 0;
459 
460 	pqn = get_queue_by_qid(pqm, qid);
461 	if (!pqn) {
462 		pr_err("Queue id does not match any known queue\n");
463 		return -EINVAL;
464 	}
465 
466 	dev = NULL;
467 	if (pqn->kq)
468 		dev = pqn->kq->dev;
469 	if (pqn->q)
470 		dev = pqn->q->device;
471 	if (WARN_ON(!dev))
472 		return -ENODEV;
473 
474 	pdd = kfd_get_process_device_data(dev, pqm->process);
475 	if (!pdd) {
476 		pr_err("Process device data doesn't exist\n");
477 		return -1;
478 	}
479 
480 	if (pqn->kq) {
481 		/* destroy kernel queue (DIQ) */
482 		dqm = pqn->kq->dev->dqm;
483 		dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
484 		kernel_queue_uninit(pqn->kq, false);
485 	}
486 
487 	if (pqn->q) {
488 		kfd_procfs_del_queue(pqn->q);
489 		dqm = pqn->q->device->dqm;
490 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
491 		if (retval) {
492 			pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
493 				pqm->process->pasid,
494 				pqn->q->properties.queue_id, retval);
495 			if (retval != -ETIME)
496 				goto err_destroy_queue;
497 		}
498 
499 		pqm_clean_queue_resource(pqm, pqn);
500 		uninit_queue(pqn->q);
501 	}
502 
503 	list_del(&pqn->process_queue_list);
504 	kfree(pqn);
505 	clear_bit(qid, pqm->queue_slot_bitmap);
506 
507 	if (list_empty(&pdd->qpd.queues_list) &&
508 	    list_empty(&pdd->qpd.priv_queue_list))
509 		dqm->ops.unregister_process(dqm, &pdd->qpd);
510 
511 err_destroy_queue:
512 	return retval;
513 }
514 
515 int pqm_update_queue_properties(struct process_queue_manager *pqm,
516 				unsigned int qid, struct queue_properties *p)
517 {
518 	int retval;
519 	struct process_queue_node *pqn;
520 
521 	pqn = get_queue_by_qid(pqm, qid);
522 	if (!pqn) {
523 		pr_debug("No queue %d exists for update operation\n", qid);
524 		return -EFAULT;
525 	}
526 
527 	pqn->q->properties.queue_address = p->queue_address;
528 	pqn->q->properties.queue_size = p->queue_size;
529 	pqn->q->properties.queue_percent = p->queue_percent;
530 	pqn->q->properties.priority = p->priority;
531 	pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
532 
533 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
534 							pqn->q, NULL);
535 	if (retval != 0)
536 		return retval;
537 
538 	return 0;
539 }
540 
541 int pqm_update_mqd(struct process_queue_manager *pqm,
542 				unsigned int qid, struct mqd_update_info *minfo)
543 {
544 	int retval;
545 	struct process_queue_node *pqn;
546 
547 	pqn = get_queue_by_qid(pqm, qid);
548 	if (!pqn) {
549 		pr_debug("No queue %d exists for update operation\n", qid);
550 		return -EFAULT;
551 	}
552 
553 	/* CUs are masked for debugger requirements so deny user mask  */
554 	if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
555 		return -EBUSY;
556 
557 	/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
558 	if (minfo && minfo->cu_mask.ptr &&
559 			KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
560 		int i;
561 
562 		for (i = 0; i < minfo->cu_mask.count; i += 2) {
563 			uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
564 
565 			if (cu_pair && cu_pair != 0x3) {
566 				pr_debug("CUs must be adjacent pairwise enabled.\n");
567 				return -EINVAL;
568 			}
569 		}
570 	}
571 
572 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
573 							pqn->q, minfo);
574 	if (retval != 0)
575 		return retval;
576 
577 	if (minfo && minfo->cu_mask.ptr)
578 		pqn->q->properties.is_user_cu_masked = true;
579 
580 	return 0;
581 }
582 
583 struct kernel_queue *pqm_get_kernel_queue(
584 					struct process_queue_manager *pqm,
585 					unsigned int qid)
586 {
587 	struct process_queue_node *pqn;
588 
589 	pqn = get_queue_by_qid(pqm, qid);
590 	if (pqn && pqn->kq)
591 		return pqn->kq;
592 
593 	return NULL;
594 }
595 
596 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
597 					unsigned int qid)
598 {
599 	struct process_queue_node *pqn;
600 
601 	pqn = get_queue_by_qid(pqm, qid);
602 	return pqn ? pqn->q : NULL;
603 }
604 
605 int pqm_get_wave_state(struct process_queue_manager *pqm,
606 		       unsigned int qid,
607 		       void __user *ctl_stack,
608 		       u32 *ctl_stack_used_size,
609 		       u32 *save_area_used_size)
610 {
611 	struct process_queue_node *pqn;
612 
613 	pqn = get_queue_by_qid(pqm, qid);
614 	if (!pqn) {
615 		pr_debug("amdkfd: No queue %d exists for operation\n",
616 			 qid);
617 		return -EFAULT;
618 	}
619 
620 	return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
621 						       pqn->q,
622 						       ctl_stack,
623 						       ctl_stack_used_size,
624 						       save_area_used_size);
625 }
626 
627 int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
628 			   uint64_t exception_clear_mask,
629 			   void __user *buf,
630 			   int *num_qss_entries,
631 			   uint32_t *entry_size)
632 {
633 	struct process_queue_node *pqn;
634 	struct kfd_queue_snapshot_entry src;
635 	uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
636 	int r = 0;
637 
638 	*num_qss_entries = 0;
639 	if (!(*entry_size))
640 		return -EINVAL;
641 
642 	*entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
643 	mutex_lock(&pqm->process->event_mutex);
644 
645 	memset(&src, 0, sizeof(src));
646 
647 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
648 		if (!pqn->q)
649 			continue;
650 
651 		if (*num_qss_entries < tmp_qss_entries) {
652 			set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
653 
654 			if (copy_to_user(buf, &src, *entry_size)) {
655 				r = -EFAULT;
656 				break;
657 			}
658 			buf += tmp_entry_size;
659 		}
660 		*num_qss_entries += 1;
661 	}
662 
663 	mutex_unlock(&pqm->process->event_mutex);
664 	return r;
665 }
666 
667 static int get_queue_data_sizes(struct kfd_process_device *pdd,
668 				struct queue *q,
669 				uint32_t *mqd_size,
670 				uint32_t *ctl_stack_size)
671 {
672 	int ret;
673 
674 	ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
675 					    q->properties.queue_id,
676 					    mqd_size,
677 					    ctl_stack_size);
678 	if (ret)
679 		pr_err("Failed to get queue dump info (%d)\n", ret);
680 
681 	return ret;
682 }
683 
684 int kfd_process_get_queue_info(struct kfd_process *p,
685 			       uint32_t *num_queues,
686 			       uint64_t *priv_data_sizes)
687 {
688 	uint32_t extra_data_sizes = 0;
689 	struct queue *q;
690 	int i;
691 	int ret;
692 
693 	*num_queues = 0;
694 
695 	/* Run over all PDDs of the process */
696 	for (i = 0; i < p->n_pdds; i++) {
697 		struct kfd_process_device *pdd = p->pdds[i];
698 
699 		list_for_each_entry(q, &pdd->qpd.queues_list, list) {
700 			if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
701 				q->properties.type == KFD_QUEUE_TYPE_SDMA ||
702 				q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
703 				uint32_t mqd_size, ctl_stack_size;
704 
705 				*num_queues = *num_queues + 1;
706 
707 				ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
708 				if (ret)
709 					return ret;
710 
711 				extra_data_sizes += mqd_size + ctl_stack_size;
712 			} else {
713 				pr_err("Unsupported queue type (%d)\n", q->properties.type);
714 				return -EOPNOTSUPP;
715 			}
716 		}
717 	}
718 	*priv_data_sizes = extra_data_sizes +
719 				(*num_queues * sizeof(struct kfd_criu_queue_priv_data));
720 
721 	return 0;
722 }
723 
724 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
725 			      unsigned int qid,
726 			      void *mqd,
727 			      void *ctl_stack)
728 {
729 	struct process_queue_node *pqn;
730 
731 	pqn = get_queue_by_qid(pqm, qid);
732 	if (!pqn) {
733 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
734 		return -EFAULT;
735 	}
736 
737 	if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
738 		pr_err("amdkfd: queue dumping not supported on this device\n");
739 		return -EOPNOTSUPP;
740 	}
741 
742 	return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
743 						       pqn->q, mqd, ctl_stack);
744 }
745 
746 static int criu_checkpoint_queue(struct kfd_process_device *pdd,
747 			   struct queue *q,
748 			   struct kfd_criu_queue_priv_data *q_data)
749 {
750 	uint8_t *mqd, *ctl_stack;
751 	int ret;
752 
753 	mqd = (void *)(q_data + 1);
754 	ctl_stack = mqd + q_data->mqd_size;
755 
756 	q_data->gpu_id = pdd->user_gpu_id;
757 	q_data->type = q->properties.type;
758 	q_data->format = q->properties.format;
759 	q_data->q_id =  q->properties.queue_id;
760 	q_data->q_address = q->properties.queue_address;
761 	q_data->q_size = q->properties.queue_size;
762 	q_data->priority = q->properties.priority;
763 	q_data->q_percent = q->properties.queue_percent;
764 	q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
765 	q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
766 	q_data->doorbell_id = q->doorbell_id;
767 
768 	q_data->sdma_id = q->sdma_id;
769 
770 	q_data->eop_ring_buffer_address =
771 		q->properties.eop_ring_buffer_address;
772 
773 	q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
774 
775 	q_data->ctx_save_restore_area_address =
776 		q->properties.ctx_save_restore_area_address;
777 
778 	q_data->ctx_save_restore_area_size =
779 		q->properties.ctx_save_restore_area_size;
780 
781 	q_data->gws = !!q->gws;
782 
783 	ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
784 	if (ret) {
785 		pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
786 		return ret;
787 	}
788 
789 	pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
790 	return ret;
791 }
792 
793 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
794 				   uint8_t __user *user_priv,
795 				   unsigned int *q_index,
796 				   uint64_t *queues_priv_data_offset)
797 {
798 	unsigned int q_private_data_size = 0;
799 	uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
800 	struct queue *q;
801 	int ret = 0;
802 
803 	list_for_each_entry(q, &pdd->qpd.queues_list, list) {
804 		struct kfd_criu_queue_priv_data *q_data;
805 		uint64_t q_data_size;
806 		uint32_t mqd_size;
807 		uint32_t ctl_stack_size;
808 
809 		if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
810 			q->properties.type != KFD_QUEUE_TYPE_SDMA &&
811 			q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
812 
813 			pr_err("Unsupported queue type (%d)\n", q->properties.type);
814 			ret = -EOPNOTSUPP;
815 			break;
816 		}
817 
818 		ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
819 		if (ret)
820 			break;
821 
822 		q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
823 
824 		/* Increase local buffer space if needed */
825 		if (q_private_data_size < q_data_size) {
826 			kfree(q_private_data);
827 
828 			q_private_data = kzalloc(q_data_size, GFP_KERNEL);
829 			if (!q_private_data) {
830 				ret = -ENOMEM;
831 				break;
832 			}
833 			q_private_data_size = q_data_size;
834 		}
835 
836 		q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
837 
838 		/* data stored in this order: priv_data, mqd, ctl_stack */
839 		q_data->mqd_size = mqd_size;
840 		q_data->ctl_stack_size = ctl_stack_size;
841 
842 		ret = criu_checkpoint_queue(pdd, q, q_data);
843 		if (ret)
844 			break;
845 
846 		q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
847 
848 		ret = copy_to_user(user_priv + *queues_priv_data_offset,
849 				q_data, q_data_size);
850 		if (ret) {
851 			ret = -EFAULT;
852 			break;
853 		}
854 		*queues_priv_data_offset += q_data_size;
855 		*q_index = *q_index + 1;
856 	}
857 
858 	kfree(q_private_data);
859 
860 	return ret;
861 }
862 
863 int kfd_criu_checkpoint_queues(struct kfd_process *p,
864 			 uint8_t __user *user_priv_data,
865 			 uint64_t *priv_data_offset)
866 {
867 	int ret = 0, pdd_index, q_index = 0;
868 
869 	for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
870 		struct kfd_process_device *pdd = p->pdds[pdd_index];
871 
872 		/*
873 		 * criu_checkpoint_queues_device will copy data to user and update q_index and
874 		 * queues_priv_data_offset
875 		 */
876 		ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
877 					      priv_data_offset);
878 
879 		if (ret)
880 			break;
881 	}
882 
883 	return ret;
884 }
885 
886 static void set_queue_properties_from_criu(struct queue_properties *qp,
887 					  struct kfd_criu_queue_priv_data *q_data)
888 {
889 	qp->is_interop = false;
890 	qp->queue_percent = q_data->q_percent;
891 	qp->priority = q_data->priority;
892 	qp->queue_address = q_data->q_address;
893 	qp->queue_size = q_data->q_size;
894 	qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
895 	qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
896 	qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
897 	qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
898 	qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
899 	qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
900 	qp->ctl_stack_size = q_data->ctl_stack_size;
901 	qp->type = q_data->type;
902 	qp->format = q_data->format;
903 }
904 
905 int kfd_criu_restore_queue(struct kfd_process *p,
906 			   uint8_t __user *user_priv_ptr,
907 			   uint64_t *priv_data_offset,
908 			   uint64_t max_priv_data_size)
909 {
910 	uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
911 	struct kfd_criu_queue_priv_data *q_data;
912 	struct kfd_process_device *pdd;
913 	uint64_t q_extra_data_size;
914 	struct queue_properties qp;
915 	unsigned int queue_id;
916 	int ret = 0;
917 
918 	if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
919 		return -EINVAL;
920 
921 	q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
922 	if (!q_data)
923 		return -ENOMEM;
924 
925 	ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
926 	if (ret) {
927 		ret = -EFAULT;
928 		goto exit;
929 	}
930 
931 	*priv_data_offset += sizeof(*q_data);
932 	q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
933 
934 	if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
935 		ret = -EINVAL;
936 		goto exit;
937 	}
938 
939 	q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
940 	if (!q_extra_data) {
941 		ret = -ENOMEM;
942 		goto exit;
943 	}
944 
945 	ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
946 	if (ret) {
947 		ret = -EFAULT;
948 		goto exit;
949 	}
950 
951 	*priv_data_offset += q_extra_data_size;
952 
953 	pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
954 	if (!pdd) {
955 		pr_err("Failed to get pdd\n");
956 		ret = -EINVAL;
957 		goto exit;
958 	}
959 
960 	/* data stored in this order: mqd, ctl_stack */
961 	mqd = q_extra_data;
962 	ctl_stack = mqd + q_data->mqd_size;
963 
964 	memset(&qp, 0, sizeof(qp));
965 	set_queue_properties_from_criu(&qp, q_data);
966 
967 	print_queue_properties(&qp);
968 
969 	ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack,
970 				NULL);
971 	if (ret) {
972 		pr_err("Failed to create new queue err:%d\n", ret);
973 		goto exit;
974 	}
975 
976 	if (q_data->gws)
977 		ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
978 
979 exit:
980 	if (ret)
981 		pr_err("Failed to restore queue (%d)\n", ret);
982 	else
983 		pr_debug("Queue id %d was restored successfully\n", queue_id);
984 
985 	kfree(q_data);
986 
987 	return ret;
988 }
989 
990 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
991 				  unsigned int qid,
992 				  uint32_t *mqd_size,
993 				  uint32_t *ctl_stack_size)
994 {
995 	struct process_queue_node *pqn;
996 
997 	pqn = get_queue_by_qid(pqm, qid);
998 	if (!pqn) {
999 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
1000 		return -EFAULT;
1001 	}
1002 
1003 	if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
1004 		pr_err("amdkfd: queue dumping not supported on this device\n");
1005 		return -EOPNOTSUPP;
1006 	}
1007 
1008 	pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
1009 						       pqn->q, mqd_size,
1010 						       ctl_stack_size);
1011 	return 0;
1012 }
1013 
1014 #if defined(CONFIG_DEBUG_FS)
1015 
1016 int pqm_debugfs_mqds(struct seq_file *m, void *data)
1017 {
1018 	struct process_queue_manager *pqm = data;
1019 	struct process_queue_node *pqn;
1020 	struct queue *q;
1021 	enum KFD_MQD_TYPE mqd_type;
1022 	struct mqd_manager *mqd_mgr;
1023 	int r = 0, xcc, num_xccs = 1;
1024 	void *mqd;
1025 	uint64_t size = 0;
1026 
1027 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
1028 		if (pqn->q) {
1029 			q = pqn->q;
1030 			switch (q->properties.type) {
1031 			case KFD_QUEUE_TYPE_SDMA:
1032 			case KFD_QUEUE_TYPE_SDMA_XGMI:
1033 				seq_printf(m, "  SDMA queue on device %x\n",
1034 					   q->device->id);
1035 				mqd_type = KFD_MQD_TYPE_SDMA;
1036 				break;
1037 			case KFD_QUEUE_TYPE_COMPUTE:
1038 				seq_printf(m, "  Compute queue on device %x\n",
1039 					   q->device->id);
1040 				mqd_type = KFD_MQD_TYPE_CP;
1041 				num_xccs = NUM_XCC(q->device->xcc_mask);
1042 				break;
1043 			default:
1044 				seq_printf(m,
1045 				"  Bad user queue type %d on device %x\n",
1046 					   q->properties.type, q->device->id);
1047 				continue;
1048 			}
1049 			mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
1050 			size = mqd_mgr->mqd_stride(mqd_mgr,
1051 							&q->properties);
1052 		} else if (pqn->kq) {
1053 			q = pqn->kq->queue;
1054 			mqd_mgr = pqn->kq->mqd_mgr;
1055 			switch (q->properties.type) {
1056 			case KFD_QUEUE_TYPE_DIQ:
1057 				seq_printf(m, "  DIQ on device %x\n",
1058 					   pqn->kq->dev->id);
1059 				break;
1060 			default:
1061 				seq_printf(m,
1062 				"  Bad kernel queue type %d on device %x\n",
1063 					   q->properties.type,
1064 					   pqn->kq->dev->id);
1065 				continue;
1066 			}
1067 		} else {
1068 			seq_printf(m,
1069 		"  Weird: Queue node with neither kernel nor user queue\n");
1070 			continue;
1071 		}
1072 
1073 		for (xcc = 0; xcc < num_xccs; xcc++) {
1074 			mqd = q->mqd + size * xcc;
1075 			r = mqd_mgr->debugfs_show_mqd(m, mqd);
1076 			if (r != 0)
1077 				break;
1078 		}
1079 	}
1080 
1081 	return r;
1082 }
1083 
1084 #endif
1085