xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
28 #include "kfd_priv.h"
29 
30 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
31 				unsigned int buffer_size_bytes)
32 {
33 	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
34 
35 	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
36 	     "Runlist IB overflow");
37 	*wptr = temp;
38 }
39 
40 static void pm_calc_rlib_size(struct packet_manager *pm,
41 				unsigned int *rlib_size,
42 				bool *over_subscription)
43 {
44 	unsigned int process_count, queue_count, compute_queue_count;
45 	unsigned int map_queue_size;
46 	unsigned int max_proc_per_quantum = 1;
47 	struct kfd_dev *dev = pm->dqm->dev;
48 
49 	process_count = pm->dqm->processes_count;
50 	queue_count = pm->dqm->queue_count;
51 	compute_queue_count = queue_count - pm->dqm->sdma_queue_count;
52 
53 	/* check if there is over subscription
54 	 * Note: the arbitration between the number of VMIDs and
55 	 * hws_max_conc_proc has been done in
56 	 * kgd2kfd_device_init().
57 	 */
58 	*over_subscription = false;
59 
60 	if (dev->max_proc_per_quantum > 1)
61 		max_proc_per_quantum = dev->max_proc_per_quantum;
62 
63 	if ((process_count > max_proc_per_quantum) ||
64 	    compute_queue_count > get_queues_num(pm->dqm)) {
65 		*over_subscription = true;
66 		pr_debug("Over subscribed runlist\n");
67 	}
68 
69 	map_queue_size = pm->pmf->map_queues_size;
70 	/* calculate run list ib allocation size */
71 	*rlib_size = process_count * pm->pmf->map_process_size +
72 		     queue_count * map_queue_size;
73 
74 	/*
75 	 * Increase the allocation size in case we need a chained run list
76 	 * when over subscription
77 	 */
78 	if (*over_subscription)
79 		*rlib_size += pm->pmf->runlist_size;
80 
81 	pr_debug("runlist ib size %d\n", *rlib_size);
82 }
83 
84 static int pm_allocate_runlist_ib(struct packet_manager *pm,
85 				unsigned int **rl_buffer,
86 				uint64_t *rl_gpu_buffer,
87 				unsigned int *rl_buffer_size,
88 				bool *is_over_subscription)
89 {
90 	int retval;
91 
92 	if (WARN_ON(pm->allocated))
93 		return -EINVAL;
94 
95 	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
96 
97 	mutex_lock(&pm->lock);
98 
99 	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
100 					&pm->ib_buffer_obj);
101 
102 	if (retval) {
103 		pr_err("Failed to allocate runlist IB\n");
104 		goto out;
105 	}
106 
107 	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
108 	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
109 
110 	memset(*rl_buffer, 0, *rl_buffer_size);
111 	pm->allocated = true;
112 
113 out:
114 	mutex_unlock(&pm->lock);
115 	return retval;
116 }
117 
118 static int pm_create_runlist_ib(struct packet_manager *pm,
119 				struct list_head *queues,
120 				uint64_t *rl_gpu_addr,
121 				size_t *rl_size_bytes)
122 {
123 	unsigned int alloc_size_bytes;
124 	unsigned int *rl_buffer, rl_wptr, i;
125 	int retval, proccesses_mapped;
126 	struct device_process_node *cur;
127 	struct qcm_process_device *qpd;
128 	struct queue *q;
129 	struct kernel_queue *kq;
130 	bool is_over_subscription;
131 
132 	rl_wptr = retval = proccesses_mapped = 0;
133 
134 	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
135 				&alloc_size_bytes, &is_over_subscription);
136 	if (retval)
137 		return retval;
138 
139 	*rl_size_bytes = alloc_size_bytes;
140 	pm->ib_size_bytes = alloc_size_bytes;
141 
142 	pr_debug("Building runlist ib process count: %d queues count %d\n",
143 		pm->dqm->processes_count, pm->dqm->queue_count);
144 
145 	/* build the run list ib packet */
146 	list_for_each_entry(cur, queues, list) {
147 		qpd = cur->qpd;
148 		/* build map process packet */
149 		if (proccesses_mapped >= pm->dqm->processes_count) {
150 			pr_debug("Not enough space left in runlist IB\n");
151 			pm_release_ib(pm);
152 			return -ENOMEM;
153 		}
154 
155 		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
156 		if (retval)
157 			return retval;
158 
159 		proccesses_mapped++;
160 		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
161 				alloc_size_bytes);
162 
163 		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
164 			if (!kq->queue->properties.is_active)
165 				continue;
166 
167 			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
168 				kq->queue->queue, qpd->is_debug);
169 
170 			retval = pm->pmf->map_queues(pm,
171 						&rl_buffer[rl_wptr],
172 						kq->queue,
173 						qpd->is_debug);
174 			if (retval)
175 				return retval;
176 
177 			inc_wptr(&rl_wptr,
178 				pm->pmf->map_queues_size,
179 				alloc_size_bytes);
180 		}
181 
182 		list_for_each_entry(q, &qpd->queues_list, list) {
183 			if (!q->properties.is_active)
184 				continue;
185 
186 			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
187 				q->queue, qpd->is_debug);
188 
189 			retval = pm->pmf->map_queues(pm,
190 						&rl_buffer[rl_wptr],
191 						q,
192 						qpd->is_debug);
193 
194 			if (retval)
195 				return retval;
196 
197 			inc_wptr(&rl_wptr,
198 				pm->pmf->map_queues_size,
199 				alloc_size_bytes);
200 		}
201 	}
202 
203 	pr_debug("Finished map process and queues to runlist\n");
204 
205 	if (is_over_subscription)
206 		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
207 					*rl_gpu_addr,
208 					alloc_size_bytes / sizeof(uint32_t),
209 					true);
210 
211 	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
212 		pr_debug("0x%2X ", rl_buffer[i]);
213 	pr_debug("\n");
214 
215 	return retval;
216 }
217 
218 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
219 {
220 	switch (dqm->dev->device_info->asic_family) {
221 	case CHIP_KAVERI:
222 	case CHIP_HAWAII:
223 		/* PM4 packet structures on CIK are the same as on VI */
224 	case CHIP_CARRIZO:
225 	case CHIP_TONGA:
226 	case CHIP_FIJI:
227 	case CHIP_POLARIS10:
228 	case CHIP_POLARIS11:
229 		pm->pmf = &kfd_vi_pm_funcs;
230 		break;
231 	case CHIP_VEGA10:
232 	case CHIP_RAVEN:
233 		pm->pmf = &kfd_v9_pm_funcs;
234 		break;
235 	default:
236 		WARN(1, "Unexpected ASIC family %u",
237 		     dqm->dev->device_info->asic_family);
238 		return -EINVAL;
239 	}
240 
241 	pm->dqm = dqm;
242 	mutex_init(&pm->lock);
243 	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
244 	if (!pm->priv_queue) {
245 		mutex_destroy(&pm->lock);
246 		return -ENOMEM;
247 	}
248 	pm->allocated = false;
249 
250 	return 0;
251 }
252 
253 void pm_uninit(struct packet_manager *pm)
254 {
255 	mutex_destroy(&pm->lock);
256 	kernel_queue_uninit(pm->priv_queue);
257 }
258 
259 int pm_send_set_resources(struct packet_manager *pm,
260 				struct scheduling_resources *res)
261 {
262 	uint32_t *buffer, size;
263 	int retval = 0;
264 
265 	size = pm->pmf->set_resources_size;
266 	mutex_lock(&pm->lock);
267 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
268 					size / sizeof(uint32_t),
269 					(unsigned int **)&buffer);
270 	if (!buffer) {
271 		pr_err("Failed to allocate buffer on kernel queue\n");
272 		retval = -ENOMEM;
273 		goto out;
274 	}
275 
276 	retval = pm->pmf->set_resources(pm, buffer, res);
277 	if (!retval)
278 		pm->priv_queue->ops.submit_packet(pm->priv_queue);
279 	else
280 		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
281 
282 out:
283 	mutex_unlock(&pm->lock);
284 
285 	return retval;
286 }
287 
288 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
289 {
290 	uint64_t rl_gpu_ib_addr;
291 	uint32_t *rl_buffer;
292 	size_t rl_ib_size, packet_size_dwords;
293 	int retval;
294 
295 	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
296 					&rl_ib_size);
297 	if (retval)
298 		goto fail_create_runlist_ib;
299 
300 	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
301 
302 	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
303 	mutex_lock(&pm->lock);
304 
305 	retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
306 					packet_size_dwords, &rl_buffer);
307 	if (retval)
308 		goto fail_acquire_packet_buffer;
309 
310 	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
311 					rl_ib_size / sizeof(uint32_t), false);
312 	if (retval)
313 		goto fail_create_runlist;
314 
315 	pm->priv_queue->ops.submit_packet(pm->priv_queue);
316 
317 	mutex_unlock(&pm->lock);
318 
319 	return retval;
320 
321 fail_create_runlist:
322 	pm->priv_queue->ops.rollback_packet(pm->priv_queue);
323 fail_acquire_packet_buffer:
324 	mutex_unlock(&pm->lock);
325 fail_create_runlist_ib:
326 	pm_release_ib(pm);
327 	return retval;
328 }
329 
330 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
331 			uint32_t fence_value)
332 {
333 	uint32_t *buffer, size;
334 	int retval = 0;
335 
336 	if (WARN_ON(!fence_address))
337 		return -EFAULT;
338 
339 	size = pm->pmf->query_status_size;
340 	mutex_lock(&pm->lock);
341 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
342 			size / sizeof(uint32_t), (unsigned int **)&buffer);
343 	if (!buffer) {
344 		pr_err("Failed to allocate buffer on kernel queue\n");
345 		retval = -ENOMEM;
346 		goto out;
347 	}
348 
349 	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
350 	if (!retval)
351 		pm->priv_queue->ops.submit_packet(pm->priv_queue);
352 	else
353 		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
354 
355 out:
356 	mutex_unlock(&pm->lock);
357 	return retval;
358 }
359 
360 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
361 			enum kfd_unmap_queues_filter filter,
362 			uint32_t filter_param, bool reset,
363 			unsigned int sdma_engine)
364 {
365 	uint32_t *buffer, size;
366 	int retval = 0;
367 
368 	size = pm->pmf->unmap_queues_size;
369 	mutex_lock(&pm->lock);
370 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
371 			size / sizeof(uint32_t), (unsigned int **)&buffer);
372 	if (!buffer) {
373 		pr_err("Failed to allocate buffer on kernel queue\n");
374 		retval = -ENOMEM;
375 		goto out;
376 	}
377 
378 	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
379 				       reset, sdma_engine);
380 	if (!retval)
381 		pm->priv_queue->ops.submit_packet(pm->priv_queue);
382 	else
383 		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
384 
385 out:
386 	mutex_unlock(&pm->lock);
387 	return retval;
388 }
389 
390 void pm_release_ib(struct packet_manager *pm)
391 {
392 	mutex_lock(&pm->lock);
393 	if (pm->allocated) {
394 		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
395 		pm->allocated = false;
396 	}
397 	mutex_unlock(&pm->lock);
398 }
399 
400 #if defined(CONFIG_DEBUG_FS)
401 
402 int pm_debugfs_runlist(struct seq_file *m, void *data)
403 {
404 	struct packet_manager *pm = data;
405 
406 	mutex_lock(&pm->lock);
407 
408 	if (!pm->allocated) {
409 		seq_puts(m, "  No active runlist\n");
410 		goto out;
411 	}
412 
413 	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
414 		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
415 
416 out:
417 	mutex_unlock(&pm->lock);
418 	return 0;
419 }
420 
421 int pm_debugfs_hang_hws(struct packet_manager *pm)
422 {
423 	uint32_t *buffer, size;
424 	int r = 0;
425 
426 	size = pm->pmf->query_status_size;
427 	mutex_lock(&pm->lock);
428 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
429 			size / sizeof(uint32_t), (unsigned int **)&buffer);
430 	if (!buffer) {
431 		pr_err("Failed to allocate buffer on kernel queue\n");
432 		r = -ENOMEM;
433 		goto out;
434 	}
435 	memset(buffer, 0x55, size);
436 	pm->priv_queue->ops.submit_packet(pm->priv_queue);
437 
438 	pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
439 		buffer[0], buffer[1], buffer[2], buffer[3],
440 		buffer[4], buffer[5], buffer[6]);
441 out:
442 	mutex_unlock(&pm->lock);
443 	return r;
444 }
445 
446 
447 #endif
448