xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c (revision 128c8f96eb8638c060cd3532dc394d046ce64fe1)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_priv.h"
29 #include "kfd_kernel_queue.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_reset.h"
32 
get_queue_by_qid(struct process_queue_manager * pqm,unsigned int qid)33 static inline struct process_queue_node *get_queue_by_qid(
34 			struct process_queue_manager *pqm, unsigned int qid)
35 {
36 	struct process_queue_node *pqn;
37 
38 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
39 		if ((pqn->q && pqn->q->properties.queue_id == qid) ||
40 		    (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
41 			return pqn;
42 	}
43 
44 	return NULL;
45 }
46 
assign_queue_slot_by_qid(struct process_queue_manager * pqm,unsigned int qid)47 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
48 				    unsigned int qid)
49 {
50 	if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
51 		return -EINVAL;
52 
53 	if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
54 		pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
55 		return -ENOSPC;
56 	}
57 
58 	return 0;
59 }
60 
find_available_queue_slot(struct process_queue_manager * pqm,unsigned int * qid)61 static int find_available_queue_slot(struct process_queue_manager *pqm,
62 					unsigned int *qid)
63 {
64 	unsigned long found;
65 
66 	found = find_first_zero_bit(pqm->queue_slot_bitmap,
67 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
68 
69 	pr_debug("The new slot id %lu\n", found);
70 
71 	if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
72 		pr_info("Cannot open more queues for process with pasid 0x%x\n",
73 				pqm->process->pasid);
74 		return -ENOMEM;
75 	}
76 
77 	set_bit(found, pqm->queue_slot_bitmap);
78 	*qid = found;
79 
80 	return 0;
81 }
82 
kfd_process_dequeue_from_device(struct kfd_process_device * pdd)83 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
84 {
85 	struct kfd_node *dev = pdd->dev;
86 
87 	if (pdd->already_dequeued)
88 		return;
89 	/* The MES context flush needs to filter out the case which the
90 	 * KFD process is created without setting up the MES context and
91 	 * queue for creating a compute queue.
92 	 */
93 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
94 	if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
95 	    down_read_trylock(&dev->adev->reset_domain->sem)) {
96 		amdgpu_mes_flush_shader_debugger(dev->adev,
97 						 pdd->proc_ctx_gpu_addr);
98 		up_read(&dev->adev->reset_domain->sem);
99 	}
100 	pdd->already_dequeued = true;
101 }
102 
pqm_set_gws(struct process_queue_manager * pqm,unsigned int qid,void * gws)103 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
104 			void *gws)
105 {
106 	struct mqd_update_info minfo = {0};
107 	struct kfd_node *dev = NULL;
108 	struct process_queue_node *pqn;
109 	struct kfd_process_device *pdd;
110 	struct kgd_mem *mem = NULL;
111 	int ret;
112 
113 	pqn = get_queue_by_qid(pqm, qid);
114 	if (!pqn) {
115 		pr_err("Queue id does not match any known queue\n");
116 		return -EINVAL;
117 	}
118 
119 	if (pqn->q)
120 		dev = pqn->q->device;
121 	if (WARN_ON(!dev))
122 		return -ENODEV;
123 
124 	pdd = kfd_get_process_device_data(dev, pqm->process);
125 	if (!pdd) {
126 		pr_err("Process device data doesn't exist\n");
127 		return -EINVAL;
128 	}
129 
130 	/* Only allow one queue per process can have GWS assigned */
131 	if (gws && pdd->qpd.num_gws)
132 		return -EBUSY;
133 
134 	if (!gws && pdd->qpd.num_gws == 0)
135 		return -EINVAL;
136 
137 	if ((KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
138 	     KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
139 	     KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0)) &&
140 	    !dev->kfd->shared_resources.enable_mes) {
141 		if (gws)
142 			ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
143 				gws, &mem);
144 		else
145 			ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
146 				pqn->q->gws);
147 		if (unlikely(ret))
148 			return ret;
149 		pqn->q->gws = mem;
150 	} else {
151 		/*
152 		 * Intentionally set GWS to a non-NULL value
153 		 * for devices that do not use GWS for global wave
154 		 * synchronization but require the formality
155 		 * of setting GWS for cooperative groups.
156 		 */
157 		pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
158 	}
159 
160 	pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
161 	minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
162 
163 	return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
164 							pqn->q, &minfo);
165 }
166 
kfd_process_dequeue_from_all_devices(struct kfd_process * p)167 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
168 {
169 	int i;
170 
171 	for (i = 0; i < p->n_pdds; i++)
172 		kfd_process_dequeue_from_device(p->pdds[i]);
173 }
174 
pqm_init(struct process_queue_manager * pqm,struct kfd_process * p)175 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
176 {
177 	INIT_LIST_HEAD(&pqm->queues);
178 	pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
179 					       GFP_KERNEL);
180 	if (!pqm->queue_slot_bitmap)
181 		return -ENOMEM;
182 	pqm->process = p;
183 
184 	return 0;
185 }
186 
pqm_clean_queue_resource(struct process_queue_manager * pqm,struct process_queue_node * pqn)187 static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
188 				     struct process_queue_node *pqn)
189 {
190 	struct kfd_node *dev;
191 	struct kfd_process_device *pdd;
192 
193 	dev = pqn->q->device;
194 
195 	pdd = kfd_get_process_device_data(dev, pqm->process);
196 	if (!pdd) {
197 		pr_err("Process device data doesn't exist\n");
198 		return;
199 	}
200 
201 	if (pqn->q->gws) {
202 		if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
203 		    KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) &&
204 		    KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 5, 0) &&
205 		    !dev->kfd->shared_resources.enable_mes)
206 			amdgpu_amdkfd_remove_gws_from_process(
207 				pqm->process->kgd_process_info, pqn->q->gws);
208 		pdd->qpd.num_gws = 0;
209 	}
210 
211 	if (dev->kfd->shared_resources.enable_mes) {
212 		amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo);
213 		amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo_gart);
214 	}
215 }
216 
pqm_uninit(struct process_queue_manager * pqm)217 void pqm_uninit(struct process_queue_manager *pqm)
218 {
219 	struct process_queue_node *pqn, *next;
220 
221 	list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
222 		if (pqn->q) {
223 			struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device,
224 										     pqm->process);
225 			if (pdd) {
226 				kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
227 				kfd_queue_release_buffers(pdd, &pqn->q->properties);
228 			} else {
229 				WARN_ON(!pdd);
230 			}
231 			pqm_clean_queue_resource(pqm, pqn);
232 		}
233 
234 		kfd_procfs_del_queue(pqn->q);
235 		uninit_queue(pqn->q);
236 		list_del(&pqn->process_queue_list);
237 		kfree(pqn);
238 	}
239 
240 	bitmap_free(pqm->queue_slot_bitmap);
241 	pqm->queue_slot_bitmap = NULL;
242 }
243 
init_user_queue(struct process_queue_manager * pqm,struct kfd_node * dev,struct queue ** q,struct queue_properties * q_properties,unsigned int qid)244 static int init_user_queue(struct process_queue_manager *pqm,
245 				struct kfd_node *dev, struct queue **q,
246 				struct queue_properties *q_properties,
247 				unsigned int qid)
248 {
249 	int retval;
250 
251 	/* Doorbell initialized in user space*/
252 	q_properties->doorbell_ptr = NULL;
253 	q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
254 
255 	/* let DQM handle it*/
256 	q_properties->vmid = 0;
257 	q_properties->queue_id = qid;
258 
259 	retval = init_queue(q, q_properties);
260 	if (retval != 0)
261 		return retval;
262 
263 	(*q)->device = dev;
264 	(*q)->process = pqm->process;
265 
266 	if (dev->kfd->shared_resources.enable_mes) {
267 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
268 						AMDGPU_MES_GANG_CTX_SIZE,
269 						&(*q)->gang_ctx_bo,
270 						&(*q)->gang_ctx_gpu_addr,
271 						&(*q)->gang_ctx_cpu_ptr,
272 						false);
273 		if (retval) {
274 			pr_err("failed to allocate gang context bo\n");
275 			goto cleanup;
276 		}
277 		memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
278 
279 		/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
280 		 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
281 		 */
282 		if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
283 		    >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
284 			if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
285 				pr_err("Queue memory allocated to wrong device\n");
286 				retval = -EINVAL;
287 				goto free_gang_ctx_bo;
288 			}
289 
290 			retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
291 								  &(*q)->wptr_bo_gart);
292 			if (retval) {
293 				pr_err("Failed to map wptr bo to GART\n");
294 				goto free_gang_ctx_bo;
295 			}
296 		}
297 	}
298 
299 	pr_debug("PQM After init queue");
300 	return 0;
301 
302 free_gang_ctx_bo:
303 	amdgpu_amdkfd_free_gtt_mem(dev->adev, &(*q)->gang_ctx_bo);
304 cleanup:
305 	uninit_queue(*q);
306 	*q = NULL;
307 	return retval;
308 }
309 
pqm_create_queue(struct process_queue_manager * pqm,struct kfd_node * dev,struct queue_properties * properties,unsigned int * qid,const struct kfd_criu_queue_priv_data * q_data,const void * restore_mqd,const void * restore_ctl_stack,uint32_t * p_doorbell_offset_in_process)310 int pqm_create_queue(struct process_queue_manager *pqm,
311 			    struct kfd_node *dev,
312 			    struct queue_properties *properties,
313 			    unsigned int *qid,
314 			    const struct kfd_criu_queue_priv_data *q_data,
315 			    const void *restore_mqd,
316 			    const void *restore_ctl_stack,
317 			    uint32_t *p_doorbell_offset_in_process)
318 {
319 	int retval;
320 	struct kfd_process_device *pdd;
321 	struct queue *q;
322 	struct process_queue_node *pqn;
323 	struct kernel_queue *kq;
324 	enum kfd_queue_type type = properties->type;
325 	unsigned int max_queues = 127; /* HWS limit */
326 
327 	/*
328 	 * On GFX 9.4.3/9.5.0, increase the number of queues that
329 	 * can be created to 255. No HWS limit on GFX 9.4.3/9.5.0.
330 	 */
331 	if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
332 	    KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
333 	    KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0))
334 		max_queues = 255;
335 
336 	q = NULL;
337 	kq = NULL;
338 
339 	pdd = kfd_get_process_device_data(dev, pqm->process);
340 	if (!pdd) {
341 		pr_err("Process device data doesn't exist\n");
342 		return -1;
343 	}
344 
345 	/*
346 	 * for debug process, verify that it is within the static queues limit
347 	 * currently limit is set to half of the total avail HQD slots
348 	 * If we are just about to create DIQ, the is_debug flag is not set yet
349 	 * Hence we also check the type as well
350 	 */
351 	if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
352 		max_queues = dev->kfd->device_info.max_no_of_hqd/2;
353 
354 	if (pdd->qpd.queue_count >= max_queues)
355 		return -ENOSPC;
356 
357 	if (q_data) {
358 		retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
359 		*qid = q_data->q_id;
360 	} else
361 		retval = find_available_queue_slot(pqm, qid);
362 
363 	if (retval != 0)
364 		return retval;
365 
366 	if (list_empty(&pdd->qpd.queues_list) &&
367 	    list_empty(&pdd->qpd.priv_queue_list))
368 		dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
369 
370 	pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
371 	if (!pqn) {
372 		retval = -ENOMEM;
373 		goto err_allocate_pqn;
374 	}
375 
376 	switch (type) {
377 	case KFD_QUEUE_TYPE_SDMA:
378 	case KFD_QUEUE_TYPE_SDMA_XGMI:
379 	case KFD_QUEUE_TYPE_SDMA_BY_ENG_ID:
380 		/* SDMA queues are always allocated statically no matter
381 		 * which scheduler mode is used. We also do not need to
382 		 * check whether a SDMA queue can be allocated here, because
383 		 * allocate_sdma_queue() in create_queue() has the
384 		 * corresponding check logic.
385 		 */
386 		retval = init_user_queue(pqm, dev, &q, properties, *qid);
387 		if (retval != 0)
388 			goto err_create_queue;
389 		pqn->q = q;
390 		pqn->kq = NULL;
391 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
392 						    restore_mqd, restore_ctl_stack);
393 		print_queue(q);
394 		break;
395 
396 	case KFD_QUEUE_TYPE_COMPUTE:
397 		/* check if there is over subscription */
398 		if ((dev->dqm->sched_policy ==
399 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
400 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
401 		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
402 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
403 			retval = -EPERM;
404 			goto err_create_queue;
405 		}
406 
407 		retval = init_user_queue(pqm, dev, &q, properties, *qid);
408 		if (retval != 0)
409 			goto err_create_queue;
410 		pqn->q = q;
411 		pqn->kq = NULL;
412 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
413 						    restore_mqd, restore_ctl_stack);
414 		print_queue(q);
415 		break;
416 	case KFD_QUEUE_TYPE_DIQ:
417 		kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
418 		if (!kq) {
419 			retval = -ENOMEM;
420 			goto err_create_queue;
421 		}
422 		kq->queue->properties.queue_id = *qid;
423 		pqn->kq = kq;
424 		pqn->q = NULL;
425 		retval = kfd_process_drain_interrupts(pdd);
426 		if (retval)
427 			break;
428 
429 		retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
430 							kq, &pdd->qpd);
431 		break;
432 	default:
433 		WARN(1, "Invalid queue type %d", type);
434 		retval = -EINVAL;
435 	}
436 
437 	if (retval != 0) {
438 		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
439 			pqm->process->pasid, type, retval);
440 		goto err_create_queue;
441 	}
442 
443 	if (q && p_doorbell_offset_in_process) {
444 		/* Return the doorbell offset within the doorbell page
445 		 * to the caller so it can be passed up to user mode
446 		 * (in bytes).
447 		 * relative doorbell index = Absolute doorbell index -
448 		 * absolute index of first doorbell in the page.
449 		 */
450 		uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev,
451 								       pdd->qpd.proc_doorbells,
452 								       0,
453 								       pdd->dev->kfd->device_info.doorbell_size);
454 
455 		*p_doorbell_offset_in_process = (q->properties.doorbell_off
456 						- first_db_index) * sizeof(uint32_t);
457 	}
458 
459 	pr_debug("PQM After DQM create queue\n");
460 
461 	list_add(&pqn->process_queue_list, &pqm->queues);
462 
463 	if (q) {
464 		pr_debug("PQM done creating queue\n");
465 		kfd_procfs_add_queue(q);
466 		print_queue_properties(&q->properties);
467 	}
468 
469 	return retval;
470 
471 err_create_queue:
472 	uninit_queue(q);
473 	if (kq)
474 		kernel_queue_uninit(kq);
475 	kfree(pqn);
476 err_allocate_pqn:
477 	/* check if queues list is empty unregister process from device */
478 	clear_bit(*qid, pqm->queue_slot_bitmap);
479 	if (list_empty(&pdd->qpd.queues_list) &&
480 	    list_empty(&pdd->qpd.priv_queue_list))
481 		dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
482 	return retval;
483 }
484 
pqm_destroy_queue(struct process_queue_manager * pqm,unsigned int qid)485 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
486 {
487 	struct process_queue_node *pqn;
488 	struct kfd_process_device *pdd;
489 	struct device_queue_manager *dqm;
490 	struct kfd_node *dev;
491 	int retval;
492 
493 	dqm = NULL;
494 
495 	retval = 0;
496 
497 	pqn = get_queue_by_qid(pqm, qid);
498 	if (!pqn) {
499 		pr_err("Queue id does not match any known queue\n");
500 		return -EINVAL;
501 	}
502 
503 	dev = NULL;
504 	if (pqn->kq)
505 		dev = pqn->kq->dev;
506 	if (pqn->q)
507 		dev = pqn->q->device;
508 	if (WARN_ON(!dev))
509 		return -ENODEV;
510 
511 	pdd = kfd_get_process_device_data(dev, pqm->process);
512 	if (!pdd) {
513 		pr_err("Process device data doesn't exist\n");
514 		return -1;
515 	}
516 
517 	if (pqn->kq) {
518 		/* destroy kernel queue (DIQ) */
519 		dqm = pqn->kq->dev->dqm;
520 		dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
521 		kernel_queue_uninit(pqn->kq);
522 	}
523 
524 	if (pqn->q) {
525 		retval = kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
526 		if (retval)
527 			goto err_destroy_queue;
528 
529 		dqm = pqn->q->device->dqm;
530 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
531 		if (retval) {
532 			pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
533 				pqm->process->pasid,
534 				pqn->q->properties.queue_id, retval);
535 			if (retval != -ETIME)
536 				goto err_destroy_queue;
537 		}
538 		kfd_procfs_del_queue(pqn->q);
539 		kfd_queue_release_buffers(pdd, &pqn->q->properties);
540 		pqm_clean_queue_resource(pqm, pqn);
541 		uninit_queue(pqn->q);
542 	}
543 
544 	list_del(&pqn->process_queue_list);
545 	kfree(pqn);
546 	clear_bit(qid, pqm->queue_slot_bitmap);
547 
548 	if (list_empty(&pdd->qpd.queues_list) &&
549 	    list_empty(&pdd->qpd.priv_queue_list))
550 		dqm->ops.unregister_process(dqm, &pdd->qpd);
551 
552 err_destroy_queue:
553 	return retval;
554 }
555 
pqm_update_queue_properties(struct process_queue_manager * pqm,unsigned int qid,struct queue_properties * p)556 int pqm_update_queue_properties(struct process_queue_manager *pqm,
557 				unsigned int qid, struct queue_properties *p)
558 {
559 	int retval;
560 	struct process_queue_node *pqn;
561 
562 	pqn = get_queue_by_qid(pqm, qid);
563 	if (!pqn || !pqn->q) {
564 		pr_debug("No queue %d exists for update operation\n", qid);
565 		return -EFAULT;
566 	}
567 
568 	/*
569 	 * Update with NULL ring address is used to disable the queue
570 	 */
571 	if (p->queue_address && p->queue_size) {
572 		struct kfd_process_device *pdd;
573 		struct amdgpu_vm *vm;
574 		struct queue *q = pqn->q;
575 		int err;
576 
577 		pdd = kfd_get_process_device_data(q->device, q->process);
578 		if (!pdd)
579 			return -ENODEV;
580 		vm = drm_priv_to_vm(pdd->drm_priv);
581 		err = amdgpu_bo_reserve(vm->root.bo, false);
582 		if (err)
583 			return err;
584 
585 		if (kfd_queue_buffer_get(vm, (void *)p->queue_address, &p->ring_bo,
586 					 p->queue_size)) {
587 			pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n",
588 				 p->queue_address, p->queue_size);
589 			return -EFAULT;
590 		}
591 
592 		kfd_queue_unref_bo_va(vm, &pqn->q->properties.ring_bo);
593 		kfd_queue_buffer_put(&pqn->q->properties.ring_bo);
594 		amdgpu_bo_unreserve(vm->root.bo);
595 
596 		pqn->q->properties.ring_bo = p->ring_bo;
597 	}
598 
599 	pqn->q->properties.queue_address = p->queue_address;
600 	pqn->q->properties.queue_size = p->queue_size;
601 	pqn->q->properties.queue_percent = p->queue_percent;
602 	pqn->q->properties.priority = p->priority;
603 	pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
604 
605 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
606 							pqn->q, NULL);
607 	if (retval != 0)
608 		return retval;
609 
610 	return 0;
611 }
612 
pqm_update_mqd(struct process_queue_manager * pqm,unsigned int qid,struct mqd_update_info * minfo)613 int pqm_update_mqd(struct process_queue_manager *pqm,
614 				unsigned int qid, struct mqd_update_info *minfo)
615 {
616 	int retval;
617 	struct process_queue_node *pqn;
618 
619 	pqn = get_queue_by_qid(pqm, qid);
620 	if (!pqn) {
621 		pr_debug("No queue %d exists for update operation\n", qid);
622 		return -EFAULT;
623 	}
624 
625 	/* CUs are masked for debugger requirements so deny user mask  */
626 	if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
627 		return -EBUSY;
628 
629 	/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
630 	if (minfo && minfo->cu_mask.ptr &&
631 			KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
632 		int i;
633 
634 		for (i = 0; i < minfo->cu_mask.count; i += 2) {
635 			uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
636 
637 			if (cu_pair && cu_pair != 0x3) {
638 				pr_debug("CUs must be adjacent pairwise enabled.\n");
639 				return -EINVAL;
640 			}
641 		}
642 	}
643 
644 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
645 							pqn->q, minfo);
646 	if (retval != 0)
647 		return retval;
648 
649 	if (minfo && minfo->cu_mask.ptr)
650 		pqn->q->properties.is_user_cu_masked = true;
651 
652 	return 0;
653 }
654 
pqm_get_kernel_queue(struct process_queue_manager * pqm,unsigned int qid)655 struct kernel_queue *pqm_get_kernel_queue(
656 					struct process_queue_manager *pqm,
657 					unsigned int qid)
658 {
659 	struct process_queue_node *pqn;
660 
661 	pqn = get_queue_by_qid(pqm, qid);
662 	if (pqn && pqn->kq)
663 		return pqn->kq;
664 
665 	return NULL;
666 }
667 
pqm_get_user_queue(struct process_queue_manager * pqm,unsigned int qid)668 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
669 					unsigned int qid)
670 {
671 	struct process_queue_node *pqn;
672 
673 	pqn = get_queue_by_qid(pqm, qid);
674 	return pqn ? pqn->q : NULL;
675 }
676 
pqm_get_wave_state(struct process_queue_manager * pqm,unsigned int qid,void __user * ctl_stack,u32 * ctl_stack_used_size,u32 * save_area_used_size)677 int pqm_get_wave_state(struct process_queue_manager *pqm,
678 		       unsigned int qid,
679 		       void __user *ctl_stack,
680 		       u32 *ctl_stack_used_size,
681 		       u32 *save_area_used_size)
682 {
683 	struct process_queue_node *pqn;
684 
685 	pqn = get_queue_by_qid(pqm, qid);
686 	if (!pqn) {
687 		pr_debug("amdkfd: No queue %d exists for operation\n",
688 			 qid);
689 		return -EFAULT;
690 	}
691 
692 	return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
693 						       pqn->q,
694 						       ctl_stack,
695 						       ctl_stack_used_size,
696 						       save_area_used_size);
697 }
698 
pqm_get_queue_snapshot(struct process_queue_manager * pqm,uint64_t exception_clear_mask,void __user * buf,int * num_qss_entries,uint32_t * entry_size)699 int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
700 			   uint64_t exception_clear_mask,
701 			   void __user *buf,
702 			   int *num_qss_entries,
703 			   uint32_t *entry_size)
704 {
705 	struct process_queue_node *pqn;
706 	struct kfd_queue_snapshot_entry src;
707 	uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
708 	int r = 0;
709 
710 	*num_qss_entries = 0;
711 	if (!(*entry_size))
712 		return -EINVAL;
713 
714 	*entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
715 	mutex_lock(&pqm->process->event_mutex);
716 
717 	memset(&src, 0, sizeof(src));
718 
719 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
720 		if (!pqn->q)
721 			continue;
722 
723 		if (*num_qss_entries < tmp_qss_entries) {
724 			set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
725 
726 			if (copy_to_user(buf, &src, *entry_size)) {
727 				r = -EFAULT;
728 				break;
729 			}
730 			buf += tmp_entry_size;
731 		}
732 		*num_qss_entries += 1;
733 	}
734 
735 	mutex_unlock(&pqm->process->event_mutex);
736 	return r;
737 }
738 
get_queue_data_sizes(struct kfd_process_device * pdd,struct queue * q,uint32_t * mqd_size,uint32_t * ctl_stack_size)739 static int get_queue_data_sizes(struct kfd_process_device *pdd,
740 				struct queue *q,
741 				uint32_t *mqd_size,
742 				uint32_t *ctl_stack_size)
743 {
744 	int ret;
745 
746 	ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
747 					    q->properties.queue_id,
748 					    mqd_size,
749 					    ctl_stack_size);
750 	if (ret)
751 		pr_err("Failed to get queue dump info (%d)\n", ret);
752 
753 	return ret;
754 }
755 
kfd_process_get_queue_info(struct kfd_process * p,uint32_t * num_queues,uint64_t * priv_data_sizes)756 int kfd_process_get_queue_info(struct kfd_process *p,
757 			       uint32_t *num_queues,
758 			       uint64_t *priv_data_sizes)
759 {
760 	uint32_t extra_data_sizes = 0;
761 	struct queue *q;
762 	int i;
763 	int ret;
764 
765 	*num_queues = 0;
766 
767 	/* Run over all PDDs of the process */
768 	for (i = 0; i < p->n_pdds; i++) {
769 		struct kfd_process_device *pdd = p->pdds[i];
770 
771 		list_for_each_entry(q, &pdd->qpd.queues_list, list) {
772 			if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
773 				q->properties.type == KFD_QUEUE_TYPE_SDMA ||
774 				q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
775 				uint32_t mqd_size, ctl_stack_size;
776 
777 				*num_queues = *num_queues + 1;
778 
779 				ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
780 				if (ret)
781 					return ret;
782 
783 				extra_data_sizes += mqd_size + ctl_stack_size;
784 			} else {
785 				pr_err("Unsupported queue type (%d)\n", q->properties.type);
786 				return -EOPNOTSUPP;
787 			}
788 		}
789 	}
790 	*priv_data_sizes = extra_data_sizes +
791 				(*num_queues * sizeof(struct kfd_criu_queue_priv_data));
792 
793 	return 0;
794 }
795 
pqm_checkpoint_mqd(struct process_queue_manager * pqm,unsigned int qid,void * mqd,void * ctl_stack)796 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
797 			      unsigned int qid,
798 			      void *mqd,
799 			      void *ctl_stack)
800 {
801 	struct process_queue_node *pqn;
802 
803 	pqn = get_queue_by_qid(pqm, qid);
804 	if (!pqn) {
805 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
806 		return -EFAULT;
807 	}
808 
809 	if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
810 		pr_err("amdkfd: queue dumping not supported on this device\n");
811 		return -EOPNOTSUPP;
812 	}
813 
814 	return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
815 						       pqn->q, mqd, ctl_stack);
816 }
817 
criu_checkpoint_queue(struct kfd_process_device * pdd,struct queue * q,struct kfd_criu_queue_priv_data * q_data)818 static int criu_checkpoint_queue(struct kfd_process_device *pdd,
819 			   struct queue *q,
820 			   struct kfd_criu_queue_priv_data *q_data)
821 {
822 	uint8_t *mqd, *ctl_stack;
823 	int ret;
824 
825 	mqd = (void *)(q_data + 1);
826 	ctl_stack = mqd + q_data->mqd_size;
827 
828 	q_data->gpu_id = pdd->user_gpu_id;
829 	q_data->type = q->properties.type;
830 	q_data->format = q->properties.format;
831 	q_data->q_id =  q->properties.queue_id;
832 	q_data->q_address = q->properties.queue_address;
833 	q_data->q_size = q->properties.queue_size;
834 	q_data->priority = q->properties.priority;
835 	q_data->q_percent = q->properties.queue_percent;
836 	q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
837 	q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
838 	q_data->doorbell_id = q->doorbell_id;
839 
840 	q_data->sdma_id = q->sdma_id;
841 
842 	q_data->eop_ring_buffer_address =
843 		q->properties.eop_ring_buffer_address;
844 
845 	q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
846 
847 	q_data->ctx_save_restore_area_address =
848 		q->properties.ctx_save_restore_area_address;
849 
850 	q_data->ctx_save_restore_area_size =
851 		q->properties.ctx_save_restore_area_size;
852 
853 	q_data->gws = !!q->gws;
854 
855 	ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
856 	if (ret) {
857 		pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
858 		return ret;
859 	}
860 
861 	pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
862 	return ret;
863 }
864 
criu_checkpoint_queues_device(struct kfd_process_device * pdd,uint8_t __user * user_priv,unsigned int * q_index,uint64_t * queues_priv_data_offset)865 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
866 				   uint8_t __user *user_priv,
867 				   unsigned int *q_index,
868 				   uint64_t *queues_priv_data_offset)
869 {
870 	unsigned int q_private_data_size = 0;
871 	uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
872 	struct queue *q;
873 	int ret = 0;
874 
875 	list_for_each_entry(q, &pdd->qpd.queues_list, list) {
876 		struct kfd_criu_queue_priv_data *q_data;
877 		uint64_t q_data_size;
878 		uint32_t mqd_size;
879 		uint32_t ctl_stack_size;
880 
881 		if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
882 			q->properties.type != KFD_QUEUE_TYPE_SDMA &&
883 			q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
884 
885 			pr_err("Unsupported queue type (%d)\n", q->properties.type);
886 			ret = -EOPNOTSUPP;
887 			break;
888 		}
889 
890 		ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
891 		if (ret)
892 			break;
893 
894 		q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
895 
896 		/* Increase local buffer space if needed */
897 		if (q_private_data_size < q_data_size) {
898 			kfree(q_private_data);
899 
900 			q_private_data = kzalloc(q_data_size, GFP_KERNEL);
901 			if (!q_private_data) {
902 				ret = -ENOMEM;
903 				break;
904 			}
905 			q_private_data_size = q_data_size;
906 		}
907 
908 		q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
909 
910 		/* data stored in this order: priv_data, mqd, ctl_stack */
911 		q_data->mqd_size = mqd_size;
912 		q_data->ctl_stack_size = ctl_stack_size;
913 
914 		ret = criu_checkpoint_queue(pdd, q, q_data);
915 		if (ret)
916 			break;
917 
918 		q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
919 
920 		ret = copy_to_user(user_priv + *queues_priv_data_offset,
921 				q_data, q_data_size);
922 		if (ret) {
923 			ret = -EFAULT;
924 			break;
925 		}
926 		*queues_priv_data_offset += q_data_size;
927 		*q_index = *q_index + 1;
928 	}
929 
930 	kfree(q_private_data);
931 
932 	return ret;
933 }
934 
kfd_criu_checkpoint_queues(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_data_offset)935 int kfd_criu_checkpoint_queues(struct kfd_process *p,
936 			 uint8_t __user *user_priv_data,
937 			 uint64_t *priv_data_offset)
938 {
939 	int ret = 0, pdd_index, q_index = 0;
940 
941 	for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
942 		struct kfd_process_device *pdd = p->pdds[pdd_index];
943 
944 		/*
945 		 * criu_checkpoint_queues_device will copy data to user and update q_index and
946 		 * queues_priv_data_offset
947 		 */
948 		ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
949 					      priv_data_offset);
950 
951 		if (ret)
952 			break;
953 	}
954 
955 	return ret;
956 }
957 
set_queue_properties_from_criu(struct queue_properties * qp,struct kfd_criu_queue_priv_data * q_data)958 static void set_queue_properties_from_criu(struct queue_properties *qp,
959 					  struct kfd_criu_queue_priv_data *q_data)
960 {
961 	qp->is_interop = false;
962 	qp->queue_percent = q_data->q_percent;
963 	qp->priority = q_data->priority;
964 	qp->queue_address = q_data->q_address;
965 	qp->queue_size = q_data->q_size;
966 	qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
967 	qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
968 	qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
969 	qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
970 	qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
971 	qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
972 	qp->ctl_stack_size = q_data->ctl_stack_size;
973 	qp->type = q_data->type;
974 	qp->format = q_data->format;
975 }
976 
kfd_criu_restore_queue(struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)977 int kfd_criu_restore_queue(struct kfd_process *p,
978 			   uint8_t __user *user_priv_ptr,
979 			   uint64_t *priv_data_offset,
980 			   uint64_t max_priv_data_size)
981 {
982 	uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
983 	struct kfd_criu_queue_priv_data *q_data;
984 	struct kfd_process_device *pdd;
985 	uint64_t q_extra_data_size;
986 	struct queue_properties qp;
987 	unsigned int queue_id;
988 	int ret = 0;
989 
990 	if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
991 		return -EINVAL;
992 
993 	q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
994 	if (!q_data)
995 		return -ENOMEM;
996 
997 	ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
998 	if (ret) {
999 		ret = -EFAULT;
1000 		goto exit;
1001 	}
1002 
1003 	*priv_data_offset += sizeof(*q_data);
1004 	q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
1005 
1006 	if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
1007 		ret = -EINVAL;
1008 		goto exit;
1009 	}
1010 
1011 	q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
1012 	if (!q_extra_data) {
1013 		ret = -ENOMEM;
1014 		goto exit;
1015 	}
1016 
1017 	ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
1018 	if (ret) {
1019 		ret = -EFAULT;
1020 		goto exit;
1021 	}
1022 
1023 	*priv_data_offset += q_extra_data_size;
1024 
1025 	pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
1026 	if (!pdd) {
1027 		pr_err("Failed to get pdd\n");
1028 		ret = -EINVAL;
1029 		goto exit;
1030 	}
1031 
1032 	/* data stored in this order: mqd, ctl_stack */
1033 	mqd = q_extra_data;
1034 	ctl_stack = mqd + q_data->mqd_size;
1035 
1036 	memset(&qp, 0, sizeof(qp));
1037 	set_queue_properties_from_criu(&qp, q_data);
1038 
1039 	print_queue_properties(&qp);
1040 
1041 	ret = pqm_create_queue(&p->pqm, pdd->dev, &qp, &queue_id, q_data, mqd, ctl_stack, NULL);
1042 	if (ret) {
1043 		pr_err("Failed to create new queue err:%d\n", ret);
1044 		goto exit;
1045 	}
1046 
1047 	if (q_data->gws)
1048 		ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
1049 
1050 exit:
1051 	if (ret)
1052 		pr_err("Failed to restore queue (%d)\n", ret);
1053 	else
1054 		pr_debug("Queue id %d was restored successfully\n", queue_id);
1055 
1056 	kfree(q_data);
1057 	kfree(q_extra_data);
1058 
1059 	return ret;
1060 }
1061 
pqm_get_queue_checkpoint_info(struct process_queue_manager * pqm,unsigned int qid,uint32_t * mqd_size,uint32_t * ctl_stack_size)1062 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
1063 				  unsigned int qid,
1064 				  uint32_t *mqd_size,
1065 				  uint32_t *ctl_stack_size)
1066 {
1067 	struct process_queue_node *pqn;
1068 
1069 	pqn = get_queue_by_qid(pqm, qid);
1070 	if (!pqn) {
1071 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
1072 		return -EFAULT;
1073 	}
1074 
1075 	if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
1076 		pr_err("amdkfd: queue dumping not supported on this device\n");
1077 		return -EOPNOTSUPP;
1078 	}
1079 
1080 	pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
1081 						       pqn->q, mqd_size,
1082 						       ctl_stack_size);
1083 	return 0;
1084 }
1085 
1086 #if defined(CONFIG_DEBUG_FS)
1087 
pqm_debugfs_mqds(struct seq_file * m,void * data)1088 int pqm_debugfs_mqds(struct seq_file *m, void *data)
1089 {
1090 	struct process_queue_manager *pqm = data;
1091 	struct process_queue_node *pqn;
1092 	struct queue *q;
1093 	enum KFD_MQD_TYPE mqd_type;
1094 	struct mqd_manager *mqd_mgr;
1095 	int r = 0, xcc, num_xccs = 1;
1096 	void *mqd;
1097 	uint64_t size = 0;
1098 
1099 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
1100 		if (pqn->q) {
1101 			q = pqn->q;
1102 			switch (q->properties.type) {
1103 			case KFD_QUEUE_TYPE_SDMA:
1104 			case KFD_QUEUE_TYPE_SDMA_XGMI:
1105 				seq_printf(m, "  SDMA queue on device %x\n",
1106 					   q->device->id);
1107 				mqd_type = KFD_MQD_TYPE_SDMA;
1108 				break;
1109 			case KFD_QUEUE_TYPE_COMPUTE:
1110 				seq_printf(m, "  Compute queue on device %x\n",
1111 					   q->device->id);
1112 				mqd_type = KFD_MQD_TYPE_CP;
1113 				num_xccs = NUM_XCC(q->device->xcc_mask);
1114 				break;
1115 			default:
1116 				seq_printf(m,
1117 				"  Bad user queue type %d on device %x\n",
1118 					   q->properties.type, q->device->id);
1119 				continue;
1120 			}
1121 			mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
1122 			size = mqd_mgr->mqd_stride(mqd_mgr,
1123 							&q->properties);
1124 		} else if (pqn->kq) {
1125 			q = pqn->kq->queue;
1126 			mqd_mgr = pqn->kq->mqd_mgr;
1127 			switch (q->properties.type) {
1128 			case KFD_QUEUE_TYPE_DIQ:
1129 				seq_printf(m, "  DIQ on device %x\n",
1130 					   pqn->kq->dev->id);
1131 				break;
1132 			default:
1133 				seq_printf(m,
1134 				"  Bad kernel queue type %d on device %x\n",
1135 					   q->properties.type,
1136 					   pqn->kq->dev->id);
1137 				continue;
1138 			}
1139 		} else {
1140 			seq_printf(m,
1141 		"  Weird: Queue node with neither kernel nor user queue\n");
1142 			continue;
1143 		}
1144 
1145 		for (xcc = 0; xcc < num_xccs; xcc++) {
1146 			mqd = q->mqd + size * xcc;
1147 			r = mqd_mgr->debugfs_show_mqd(m, mqd);
1148 			if (r != 0)
1149 				break;
1150 		}
1151 	}
1152 
1153 	return r;
1154 }
1155 
1156 #endif
1157