xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/mutex.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_kernel_queue.h"
29 #include "kfd_priv.h"
30 
31 #define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
32 #define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
33 #define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
34 
inc_wptr(unsigned int * wptr,unsigned int increment_bytes,unsigned int buffer_size_bytes)35 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
36 				unsigned int buffer_size_bytes)
37 {
38 	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
39 
40 	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
41 	     "Runlist IB overflow");
42 	*wptr = temp;
43 }
44 
pm_calc_rlib_size(struct packet_manager * pm,unsigned int * rlib_size,int * over_subscription)45 static void pm_calc_rlib_size(struct packet_manager *pm,
46 				unsigned int *rlib_size,
47 				int *over_subscription)
48 {
49 	unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
50 	unsigned int map_queue_size;
51 	unsigned int max_proc_per_quantum = 1;
52 	struct kfd_node *node = pm->dqm->dev;
53 	struct device *dev = node->adev->dev;
54 
55 	process_count = pm->dqm->processes_count;
56 	queue_count = pm->dqm->active_queue_count;
57 	compute_queue_count = pm->dqm->active_cp_queue_count;
58 	gws_queue_count = pm->dqm->gws_queue_count;
59 
60 	/* check if there is over subscription
61 	 * Note: the arbitration between the number of VMIDs and
62 	 * hws_max_conc_proc has been done in
63 	 * kgd2kfd_device_init().
64 	 */
65 	*over_subscription = 0;
66 
67 	if (node->max_proc_per_quantum > 1)
68 		max_proc_per_quantum = node->max_proc_per_quantum;
69 
70 	if (process_count > max_proc_per_quantum)
71 		*over_subscription |= OVER_SUBSCRIPTION_PROCESS_COUNT;
72 	if (compute_queue_count > get_cp_queues_num(pm->dqm))
73 		*over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
74 	if (gws_queue_count > 1)
75 		*over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
76 
77 	if (*over_subscription)
78 		dev_dbg(dev, "Over subscribed runlist\n");
79 
80 	map_queue_size = pm->pmf->map_queues_size;
81 	/* calculate run list ib allocation size */
82 	*rlib_size = process_count * pm->pmf->map_process_size +
83 		     queue_count * map_queue_size;
84 
85 	/*
86 	 * Increase the allocation size in case we need a chained run list
87 	 * when over subscription
88 	 */
89 	if (*over_subscription)
90 		*rlib_size += pm->pmf->runlist_size;
91 
92 	dev_dbg(dev, "runlist ib size %d\n", *rlib_size);
93 }
94 
pm_allocate_runlist_ib(struct packet_manager * pm,unsigned int ** rl_buffer,uint64_t * rl_gpu_buffer,unsigned int * rl_buffer_size,int * is_over_subscription)95 static int pm_allocate_runlist_ib(struct packet_manager *pm,
96 				unsigned int **rl_buffer,
97 				uint64_t *rl_gpu_buffer,
98 				unsigned int *rl_buffer_size,
99 				int *is_over_subscription)
100 {
101 	struct kfd_node *node = pm->dqm->dev;
102 	struct device *dev = node->adev->dev;
103 	int retval;
104 
105 	if (WARN_ON(pm->allocated))
106 		return -EINVAL;
107 
108 	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
109 
110 	mutex_lock(&pm->lock);
111 
112 	retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj);
113 
114 	if (retval) {
115 		dev_err(dev, "Failed to allocate runlist IB\n");
116 		goto out;
117 	}
118 
119 	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
120 	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
121 
122 	memset(*rl_buffer, 0, *rl_buffer_size);
123 	pm->allocated = true;
124 
125 out:
126 	mutex_unlock(&pm->lock);
127 	return retval;
128 }
129 
pm_create_runlist_ib(struct packet_manager * pm,struct list_head * queues,uint64_t * rl_gpu_addr,size_t * rl_size_bytes)130 static int pm_create_runlist_ib(struct packet_manager *pm,
131 				struct list_head *queues,
132 				uint64_t *rl_gpu_addr,
133 				size_t *rl_size_bytes)
134 {
135 	unsigned int alloc_size_bytes;
136 	unsigned int *rl_buffer, rl_wptr, i;
137 	struct kfd_node *node = pm->dqm->dev;
138 	struct device *dev = node->adev->dev;
139 	int retval, processes_mapped;
140 	struct device_process_node *cur;
141 	struct qcm_process_device *qpd;
142 	struct queue *q;
143 	struct kernel_queue *kq;
144 	int is_over_subscription;
145 
146 	rl_wptr = retval = processes_mapped = 0;
147 
148 	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
149 				&alloc_size_bytes, &is_over_subscription);
150 	if (retval)
151 		return retval;
152 
153 	*rl_size_bytes = alloc_size_bytes;
154 	pm->ib_size_bytes = alloc_size_bytes;
155 
156 	dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
157 		pm->dqm->processes_count, pm->dqm->active_queue_count);
158 
159 	/* build the run list ib packet */
160 	list_for_each_entry(cur, queues, list) {
161 		qpd = cur->qpd;
162 		/* build map process packet */
163 		if (processes_mapped >= pm->dqm->processes_count) {
164 			dev_dbg(dev, "Not enough space left in runlist IB\n");
165 			pm_release_ib(pm);
166 			return -ENOMEM;
167 		}
168 
169 		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
170 		if (retval)
171 			return retval;
172 
173 		processes_mapped++;
174 		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
175 				alloc_size_bytes);
176 
177 		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
178 			if (!kq->queue->properties.is_active)
179 				continue;
180 
181 			dev_dbg(dev,
182 				"static_queue, mapping kernel q %d, is debug status %d\n",
183 				kq->queue->queue, qpd->is_debug);
184 
185 			retval = pm->pmf->map_queues(pm,
186 						&rl_buffer[rl_wptr],
187 						kq->queue,
188 						qpd->is_debug);
189 			if (retval)
190 				return retval;
191 
192 			inc_wptr(&rl_wptr,
193 				pm->pmf->map_queues_size,
194 				alloc_size_bytes);
195 		}
196 
197 		list_for_each_entry(q, &qpd->queues_list, list) {
198 			if (!q->properties.is_active)
199 				continue;
200 
201 			dev_dbg(dev,
202 				"static_queue, mapping user queue %d, is debug status %d\n",
203 				q->queue, qpd->is_debug);
204 
205 			retval = pm->pmf->map_queues(pm,
206 						&rl_buffer[rl_wptr],
207 						q,
208 						qpd->is_debug);
209 
210 			if (retval)
211 				return retval;
212 
213 			inc_wptr(&rl_wptr,
214 				pm->pmf->map_queues_size,
215 				alloc_size_bytes);
216 		}
217 	}
218 
219 	dev_dbg(dev, "Finished map process and queues to runlist\n");
220 
221 	if (is_over_subscription) {
222 		if (!pm->is_over_subscription)
223 			dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
224 				 is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
225 				 " too many processes." : "",
226 				 is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
227 				 " too many queues." : "",
228 				 is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
229 				 " multiple processes using cooperative launch." : "");
230 
231 		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
232 					*rl_gpu_addr,
233 					alloc_size_bytes / sizeof(uint32_t),
234 					true);
235 	}
236 	pm->is_over_subscription = !!is_over_subscription;
237 
238 	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
239 		pr_debug("0x%2X ", rl_buffer[i]);
240 	pr_debug("\n");
241 
242 	return retval;
243 }
244 
pm_init(struct packet_manager * pm,struct device_queue_manager * dqm)245 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
246 {
247 	switch (dqm->dev->adev->asic_type) {
248 	case CHIP_KAVERI:
249 	case CHIP_HAWAII:
250 		/* PM4 packet structures on CIK are the same as on VI */
251 	case CHIP_CARRIZO:
252 	case CHIP_TONGA:
253 	case CHIP_FIJI:
254 	case CHIP_POLARIS10:
255 	case CHIP_POLARIS11:
256 	case CHIP_POLARIS12:
257 	case CHIP_VEGAM:
258 		pm->pmf = &kfd_vi_pm_funcs;
259 		break;
260 	default:
261 		if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
262 		    KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3) ||
263 		    KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4) ||
264 		    KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 5, 0))
265 			pm->pmf = &kfd_aldebaran_pm_funcs;
266 		else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
267 			pm->pmf = &kfd_v9_pm_funcs;
268 		else {
269 			WARN(1, "Unexpected ASIC family %u",
270 			     dqm->dev->adev->asic_type);
271 			return -EINVAL;
272 		}
273 	}
274 
275 	pm->dqm = dqm;
276 	mutex_init(&pm->lock);
277 	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
278 	if (!pm->priv_queue) {
279 		mutex_destroy(&pm->lock);
280 		return -ENOMEM;
281 	}
282 	pm->allocated = false;
283 
284 	return 0;
285 }
286 
pm_uninit(struct packet_manager * pm)287 void pm_uninit(struct packet_manager *pm)
288 {
289 	mutex_destroy(&pm->lock);
290 	kernel_queue_uninit(pm->priv_queue);
291 	pm->priv_queue = NULL;
292 }
293 
pm_send_set_resources(struct packet_manager * pm,struct scheduling_resources * res)294 int pm_send_set_resources(struct packet_manager *pm,
295 				struct scheduling_resources *res)
296 {
297 	struct kfd_node *node = pm->dqm->dev;
298 	struct device *dev = node->adev->dev;
299 	uint32_t *buffer, size;
300 	int retval = 0;
301 
302 	size = pm->pmf->set_resources_size;
303 	mutex_lock(&pm->lock);
304 	kq_acquire_packet_buffer(pm->priv_queue,
305 					size / sizeof(uint32_t),
306 					(unsigned int **)&buffer);
307 	if (!buffer) {
308 		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
309 		retval = -ENOMEM;
310 		goto out;
311 	}
312 
313 	retval = pm->pmf->set_resources(pm, buffer, res);
314 	if (!retval)
315 		retval = kq_submit_packet(pm->priv_queue);
316 	else
317 		kq_rollback_packet(pm->priv_queue);
318 
319 out:
320 	mutex_unlock(&pm->lock);
321 
322 	return retval;
323 }
324 
pm_send_runlist(struct packet_manager * pm,struct list_head * dqm_queues)325 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
326 {
327 	uint64_t rl_gpu_ib_addr;
328 	uint32_t *rl_buffer;
329 	size_t rl_ib_size, packet_size_dwords;
330 	int retval;
331 
332 	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
333 					&rl_ib_size);
334 	if (retval)
335 		goto fail_create_runlist_ib;
336 
337 	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
338 
339 	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
340 	mutex_lock(&pm->lock);
341 
342 	retval = kq_acquire_packet_buffer(pm->priv_queue,
343 					packet_size_dwords, &rl_buffer);
344 	if (retval)
345 		goto fail_acquire_packet_buffer;
346 
347 	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
348 					rl_ib_size / sizeof(uint32_t), false);
349 	if (retval)
350 		goto fail_create_runlist;
351 
352 	retval = kq_submit_packet(pm->priv_queue);
353 
354 	mutex_unlock(&pm->lock);
355 
356 	return retval;
357 
358 fail_create_runlist:
359 	kq_rollback_packet(pm->priv_queue);
360 fail_acquire_packet_buffer:
361 	mutex_unlock(&pm->lock);
362 fail_create_runlist_ib:
363 	pm_release_ib(pm);
364 	return retval;
365 }
366 
pm_send_query_status(struct packet_manager * pm,uint64_t fence_address,uint64_t fence_value)367 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
368 			uint64_t fence_value)
369 {
370 	struct kfd_node *node = pm->dqm->dev;
371 	struct device *dev = node->adev->dev;
372 	uint32_t *buffer, size;
373 	int retval = 0;
374 
375 	if (WARN_ON(!fence_address))
376 		return -EFAULT;
377 
378 	size = pm->pmf->query_status_size;
379 	mutex_lock(&pm->lock);
380 	kq_acquire_packet_buffer(pm->priv_queue,
381 			size / sizeof(uint32_t), (unsigned int **)&buffer);
382 	if (!buffer) {
383 		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
384 		retval = -ENOMEM;
385 		goto out;
386 	}
387 
388 	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
389 	if (!retval)
390 		retval = kq_submit_packet(pm->priv_queue);
391 	else
392 		kq_rollback_packet(pm->priv_queue);
393 
394 out:
395 	mutex_unlock(&pm->lock);
396 	return retval;
397 }
398 
pm_update_grace_period(struct packet_manager * pm,uint32_t grace_period)399 int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
400 {
401 	struct kfd_node *node = pm->dqm->dev;
402 	struct device *dev = node->adev->dev;
403 	int retval = 0;
404 	uint32_t *buffer, size;
405 
406 	size = pm->pmf->set_grace_period_size;
407 
408 	mutex_lock(&pm->lock);
409 
410 	if (size) {
411 		kq_acquire_packet_buffer(pm->priv_queue,
412 			size / sizeof(uint32_t),
413 			(unsigned int **)&buffer);
414 
415 		if (!buffer) {
416 			dev_err(dev,
417 				"Failed to allocate buffer on kernel queue\n");
418 			retval = -ENOMEM;
419 			goto out;
420 		}
421 
422 		retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
423 		if (!retval)
424 			retval = kq_submit_packet(pm->priv_queue);
425 		else
426 			kq_rollback_packet(pm->priv_queue);
427 	}
428 
429 out:
430 	mutex_unlock(&pm->lock);
431 	return retval;
432 }
433 
pm_send_unmap_queue(struct packet_manager * pm,enum kfd_unmap_queues_filter filter,uint32_t filter_param,bool reset)434 int pm_send_unmap_queue(struct packet_manager *pm,
435 			enum kfd_unmap_queues_filter filter,
436 			uint32_t filter_param, bool reset)
437 {
438 	struct kfd_node *node = pm->dqm->dev;
439 	struct device *dev = node->adev->dev;
440 	uint32_t *buffer, size;
441 	int retval = 0;
442 
443 	size = pm->pmf->unmap_queues_size;
444 	mutex_lock(&pm->lock);
445 	kq_acquire_packet_buffer(pm->priv_queue,
446 			size / sizeof(uint32_t), (unsigned int **)&buffer);
447 	if (!buffer) {
448 		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
449 		retval = -ENOMEM;
450 		goto out;
451 	}
452 
453 	retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset);
454 	if (!retval)
455 		retval = kq_submit_packet(pm->priv_queue);
456 	else
457 		kq_rollback_packet(pm->priv_queue);
458 
459 out:
460 	mutex_unlock(&pm->lock);
461 	return retval;
462 }
463 
pm_release_ib(struct packet_manager * pm)464 void pm_release_ib(struct packet_manager *pm)
465 {
466 	mutex_lock(&pm->lock);
467 	if (pm->allocated) {
468 		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
469 		pm->allocated = false;
470 	}
471 	mutex_unlock(&pm->lock);
472 }
473 
474 #if defined(CONFIG_DEBUG_FS)
475 
pm_debugfs_runlist(struct seq_file * m,void * data)476 int pm_debugfs_runlist(struct seq_file *m, void *data)
477 {
478 	struct packet_manager *pm = data;
479 
480 	mutex_lock(&pm->lock);
481 
482 	if (!pm->allocated) {
483 		seq_puts(m, "  No active runlist\n");
484 		goto out;
485 	}
486 
487 	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
488 		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
489 
490 out:
491 	mutex_unlock(&pm->lock);
492 	return 0;
493 }
494 
pm_debugfs_hang_hws(struct packet_manager * pm)495 int pm_debugfs_hang_hws(struct packet_manager *pm)
496 {
497 	struct kfd_node *node = pm->dqm->dev;
498 	struct device *dev = node->adev->dev;
499 	uint32_t *buffer, size;
500 	int r = 0;
501 
502 	if (!pm->priv_queue)
503 		return -EAGAIN;
504 
505 	size = pm->pmf->query_status_size;
506 	mutex_lock(&pm->lock);
507 	kq_acquire_packet_buffer(pm->priv_queue,
508 			size / sizeof(uint32_t), (unsigned int **)&buffer);
509 	if (!buffer) {
510 		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
511 		r = -ENOMEM;
512 		goto out;
513 	}
514 	memset(buffer, 0x55, size);
515 	kq_submit_packet(pm->priv_queue);
516 
517 	dev_info(dev, "Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
518 		 buffer[0], buffer[1], buffer[2], buffer[3], buffer[4],
519 		 buffer[5], buffer[6]);
520 out:
521 	mutex_unlock(&pm->lock);
522 	return r;
523 }
524 
525 
526 #endif
527