xref: /linux/drivers/accel/amdxdna/amdxdna_ctx.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_shmem_helper.h>
12 #include <drm/drm_print.h>
13 #include <drm/gpu_scheduler.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16 
17 #include "amdxdna_ctx.h"
18 #include "amdxdna_gem.h"
19 #include "amdxdna_pci_drv.h"
20 
21 #define MAX_HWCTX_ID		255
22 #define MAX_ARG_COUNT		4095
23 
24 struct amdxdna_fence {
25 	struct dma_fence	base;
26 	spinlock_t		lock; /* for base */
27 	struct amdxdna_hwctx	*hwctx;
28 };
29 
30 static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
31 {
32 	return KBUILD_MODNAME;
33 }
34 
35 static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
36 {
37 	struct amdxdna_fence *xdna_fence;
38 
39 	xdna_fence = container_of(fence, struct amdxdna_fence, base);
40 
41 	return xdna_fence->hwctx->name;
42 }
43 
44 static const struct dma_fence_ops fence_ops = {
45 	.get_driver_name = amdxdna_fence_get_driver_name,
46 	.get_timeline_name = amdxdna_fence_get_timeline_name,
47 };
48 
49 static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
50 {
51 	struct amdxdna_fence *fence;
52 
53 	fence = kzalloc_obj(*fence);
54 	if (!fence)
55 		return NULL;
56 
57 	fence->hwctx = hwctx;
58 	spin_lock_init(&fence->lock);
59 	dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
60 	return &fence->base;
61 }
62 
63 static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
64 				      struct srcu_struct *ss)
65 {
66 	struct amdxdna_dev *xdna = hwctx->client->xdna;
67 
68 	synchronize_srcu(ss);
69 
70 	/* At this point, user is not able to submit new commands */
71 	xdna->dev_info->ops->hwctx_fini(hwctx);
72 
73 	kfree(hwctx->name);
74 	kfree(hwctx);
75 }
76 
77 int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
78 		       int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
79 {
80 	struct amdxdna_hwctx *hwctx;
81 	unsigned long hwctx_id;
82 	int ret = 0, idx;
83 
84 	idx = srcu_read_lock(&client->hwctx_srcu);
85 	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
86 		ret = walk(hwctx, arg);
87 		if (ret)
88 			break;
89 	}
90 	srcu_read_unlock(&client->hwctx_srcu, idx);
91 
92 	return ret;
93 }
94 
95 void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
96 {
97 	struct amdxdna_cmd *cmd = abo->mem.kva;
98 	u32 num_masks, count;
99 
100 	if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
101 		num_masks = 0;
102 	else
103 		num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
104 
105 	if (size) {
106 		count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
107 		if (unlikely(count <= num_masks ||
108 			     count * sizeof(u32) +
109 			     offsetof(struct amdxdna_cmd, data[0]) >
110 			     abo->mem.size)) {
111 			*size = 0;
112 			return NULL;
113 		}
114 		*size = (count - num_masks) * sizeof(u32);
115 	}
116 	return &cmd->data[num_masks];
117 }
118 
119 u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
120 {
121 	struct amdxdna_cmd *cmd = abo->mem.kva;
122 	u32 num_masks, i;
123 	u32 *cu_mask;
124 
125 	if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
126 		return INVALID_CU_IDX;
127 
128 	num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
129 	cu_mask = cmd->data;
130 	for (i = 0; i < num_masks; i++) {
131 		if (cu_mask[i])
132 			return ffs(cu_mask[i]) - 1;
133 	}
134 
135 	return INVALID_CU_IDX;
136 }
137 
138 /*
139  * This should be called in close() and remove(). DO NOT call in other syscalls.
140  * This guarantee that when hwctx and resources will be released, if user
141  * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
142  */
143 void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
144 {
145 	struct amdxdna_hwctx *hwctx;
146 	unsigned long hwctx_id;
147 
148 	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
149 		XDNA_DBG(client->xdna, "PID %d close HW context %d",
150 			 client->pid, hwctx->id);
151 		xa_erase(&client->hwctx_xa, hwctx->id);
152 		amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
153 	}
154 }
155 
156 int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
157 {
158 	struct amdxdna_client *client = filp->driver_priv;
159 	struct amdxdna_drm_create_hwctx *args = data;
160 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
161 	struct amdxdna_hwctx *hwctx;
162 	int ret, idx;
163 
164 	if (args->ext || args->ext_flags)
165 		return -EINVAL;
166 
167 	hwctx = kzalloc_obj(*hwctx);
168 	if (!hwctx)
169 		return -ENOMEM;
170 
171 	if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
172 		XDNA_ERR(xdna, "Access QoS info failed");
173 		kfree(hwctx);
174 		return -EFAULT;
175 	}
176 
177 	hwctx->client = client;
178 	hwctx->fw_ctx_id = -1;
179 	hwctx->num_tiles = args->num_tiles;
180 	hwctx->mem_size = args->mem_size;
181 	hwctx->max_opc = args->max_opc;
182 
183 	guard(mutex)(&xdna->dev_lock);
184 
185 	if (!drm_dev_enter(dev, &idx)) {
186 		ret = -ENODEV;
187 		goto free_hwctx;
188 	}
189 
190 	ret = xdna->dev_info->ops->hwctx_init(hwctx);
191 	if (ret) {
192 		XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
193 		goto dev_exit;
194 	}
195 
196 	hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id);
197 	if (!hwctx->name) {
198 		ret = -ENOMEM;
199 		goto fini_hwctx;
200 	}
201 
202 	ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
203 			      XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
204 			      &client->next_hwctxid, GFP_KERNEL);
205 	if (ret < 0) {
206 		XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
207 		goto free_name;
208 	}
209 
210 	args->handle = hwctx->id;
211 	args->syncobj_handle = hwctx->syncobj_hdl;
212 
213 	atomic64_set(&hwctx->job_submit_cnt, 0);
214 	atomic64_set(&hwctx->job_free_cnt, 0);
215 	XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
216 	drm_dev_exit(idx);
217 	return 0;
218 
219 free_name:
220 	kfree(hwctx->name);
221 fini_hwctx:
222 	xdna->dev_info->ops->hwctx_fini(hwctx);
223 dev_exit:
224 	drm_dev_exit(idx);
225 free_hwctx:
226 	kfree(hwctx);
227 	return ret;
228 }
229 
230 int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
231 {
232 	struct amdxdna_client *client = filp->driver_priv;
233 	struct amdxdna_drm_destroy_hwctx *args = data;
234 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
235 	struct amdxdna_hwctx *hwctx;
236 	int ret = 0, idx;
237 
238 	if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
239 		return -EINVAL;
240 
241 	if (!drm_dev_enter(dev, &idx))
242 		return -ENODEV;
243 
244 	mutex_lock(&xdna->dev_lock);
245 	hwctx = xa_erase(&client->hwctx_xa, args->handle);
246 	if (!hwctx) {
247 		ret = -EINVAL;
248 		XDNA_DBG(xdna, "PID %d HW context %d not exist",
249 			 client->pid, args->handle);
250 		goto out;
251 	}
252 
253 	/*
254 	 * The pushed jobs are handled by DRM scheduler during destroy.
255 	 * SRCU to synchronize with exec command ioctls.
256 	 */
257 	amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
258 
259 	XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
260 out:
261 	mutex_unlock(&xdna->dev_lock);
262 	drm_dev_exit(idx);
263 	return ret;
264 }
265 
266 int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
267 {
268 	struct amdxdna_client *client = filp->driver_priv;
269 	struct amdxdna_drm_config_hwctx *args = data;
270 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
271 	struct amdxdna_hwctx *hwctx;
272 	u32 buf_size;
273 	void *buf;
274 	int ret;
275 	u64 val;
276 
277 	if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
278 		return -EINVAL;
279 
280 	if (!xdna->dev_info->ops->hwctx_config)
281 		return -EOPNOTSUPP;
282 
283 	val = args->param_val;
284 	buf_size = args->param_val_size;
285 
286 	switch (args->param_type) {
287 	case DRM_AMDXDNA_HWCTX_CONFIG_CU:
288 		/* For those types that param_val is pointer */
289 		if (buf_size > PAGE_SIZE) {
290 			XDNA_ERR(xdna, "Config CU param buffer too large");
291 			return -E2BIG;
292 		}
293 
294 		/* Hwctx needs to keep buf */
295 		buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
296 		if (!buf)
297 			return -ENOMEM;
298 
299 		if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
300 			kfree(buf);
301 			return -EFAULT;
302 		}
303 
304 		break;
305 	case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
306 	case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
307 		/* For those types that param_val is a value */
308 		buf = NULL;
309 		buf_size = 0;
310 		break;
311 	default:
312 		XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
313 		return -EINVAL;
314 	}
315 
316 	guard(mutex)(&xdna->dev_lock);
317 	hwctx = xa_load(&client->hwctx_xa, args->handle);
318 	if (!hwctx) {
319 		XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
320 		ret = -EINVAL;
321 		goto free_buf;
322 	}
323 
324 	ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
325 
326 free_buf:
327 	kfree(buf);
328 	return ret;
329 }
330 
331 int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
332 {
333 	struct amdxdna_dev *xdna = client->xdna;
334 	struct amdxdna_hwctx *hwctx;
335 	struct amdxdna_gem_obj *abo;
336 	struct drm_gem_object *gobj;
337 	int ret;
338 
339 	if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
340 		return -EOPNOTSUPP;
341 
342 	gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl);
343 	if (!gobj)
344 		return -EINVAL;
345 
346 	abo = to_xdna_obj(gobj);
347 	guard(mutex)(&xdna->dev_lock);
348 	hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
349 	if (!hwctx) {
350 		ret = -EINVAL;
351 		goto put_obj;
352 	}
353 
354 	ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
355 
356 put_obj:
357 	drm_gem_object_put(gobj);
358 	return ret;
359 }
360 
361 static void
362 amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
363 {
364 	int i;
365 
366 	for (i = 0; i < job->bo_cnt; i++) {
367 		if (!job->bos[i])
368 			break;
369 		drm_gem_object_put(job->bos[i]);
370 	}
371 }
372 
373 static int
374 amdxdna_arg_bos_lookup(struct amdxdna_client *client,
375 		       struct amdxdna_sched_job *job,
376 		       u32 *bo_hdls, u32 bo_cnt)
377 {
378 	struct drm_gem_object *gobj;
379 	int i, ret;
380 
381 	job->bo_cnt = bo_cnt;
382 	for (i = 0; i < job->bo_cnt; i++) {
383 		struct amdxdna_gem_obj *abo;
384 
385 		gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
386 		if (!gobj) {
387 			ret = -ENOENT;
388 			goto put_shmem_bo;
389 		}
390 		abo = to_xdna_obj(gobj);
391 
392 		mutex_lock(&abo->lock);
393 		if (abo->pinned) {
394 			mutex_unlock(&abo->lock);
395 			job->bos[i] = gobj;
396 			continue;
397 		}
398 
399 		ret = amdxdna_gem_pin_nolock(abo);
400 		if (ret) {
401 			mutex_unlock(&abo->lock);
402 			drm_gem_object_put(gobj);
403 			goto put_shmem_bo;
404 		}
405 		abo->pinned = true;
406 		mutex_unlock(&abo->lock);
407 
408 		job->bos[i] = gobj;
409 	}
410 
411 	return 0;
412 
413 put_shmem_bo:
414 	amdxdna_arg_bos_put(job);
415 	return ret;
416 }
417 
418 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
419 {
420 	trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
421 	amdxdna_arg_bos_put(job);
422 	amdxdna_gem_put_obj(job->cmd_bo);
423 	dma_fence_put(job->fence);
424 }
425 
426 int amdxdna_cmd_submit(struct amdxdna_client *client,
427 		       struct amdxdna_drv_cmd *drv_cmd,
428 		       u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
429 		       u32 hwctx_hdl, u64 *seq)
430 {
431 	struct amdxdna_dev *xdna = client->xdna;
432 	struct amdxdna_sched_job *job;
433 	struct amdxdna_hwctx *hwctx;
434 	int ret, idx;
435 
436 	XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
437 	job = kzalloc_flex(*job, bos, arg_bo_cnt);
438 	if (!job)
439 		return -ENOMEM;
440 
441 	job->drv_cmd = drv_cmd;
442 
443 	if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
444 		job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
445 		if (!job->cmd_bo) {
446 			XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
447 			ret = -EINVAL;
448 			goto free_job;
449 		}
450 	}
451 
452 	ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
453 	if (ret) {
454 		XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
455 		goto cmd_put;
456 	}
457 
458 	idx = srcu_read_lock(&client->hwctx_srcu);
459 	hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
460 	if (!hwctx) {
461 		XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
462 			 client->pid, hwctx_hdl);
463 		ret = -EINVAL;
464 		goto unlock_srcu;
465 	}
466 
467 
468 	job->hwctx = hwctx;
469 	job->mm = current->mm;
470 
471 	job->fence = amdxdna_fence_create(hwctx);
472 	if (!job->fence) {
473 		XDNA_ERR(xdna, "Failed to create fence");
474 		ret = -ENOMEM;
475 		goto unlock_srcu;
476 	}
477 	kref_init(&job->refcnt);
478 
479 	ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
480 	if (ret)
481 		goto put_fence;
482 
483 	/*
484 	 * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
485 	 * resource after synchronize_srcu(). The submitted jobs should be
486 	 * handled by the queue, for example DRM scheduler, in device layer.
487 	 * For here we can unlock SRCU.
488 	 */
489 	srcu_read_unlock(&client->hwctx_srcu, idx);
490 	trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
491 
492 	return 0;
493 
494 put_fence:
495 	dma_fence_put(job->fence);
496 unlock_srcu:
497 	srcu_read_unlock(&client->hwctx_srcu, idx);
498 	amdxdna_arg_bos_put(job);
499 cmd_put:
500 	amdxdna_gem_put_obj(job->cmd_bo);
501 free_job:
502 	kfree(job);
503 	return ret;
504 }
505 
506 /*
507  * The submit command ioctl submits a command to firmware. One firmware command
508  * may contain multiple command BOs for processing as a whole.
509  * The command sequence number is returned which can be used for wait command ioctl.
510  */
511 static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
512 				      struct amdxdna_drm_exec_cmd *args)
513 {
514 	struct amdxdna_dev *xdna = client->xdna;
515 	u32 *arg_bo_hdls = NULL;
516 	u32 cmd_bo_hdl;
517 	int ret;
518 
519 	if (args->arg_count > MAX_ARG_COUNT) {
520 		XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
521 		return -EINVAL;
522 	}
523 
524 	/* Only support single command for now. */
525 	if (args->cmd_count != 1) {
526 		XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
527 		return -EINVAL;
528 	}
529 
530 	cmd_bo_hdl = (u32)args->cmd_handles;
531 	if (args->arg_count) {
532 		arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
533 		if (!arg_bo_hdls)
534 			return -ENOMEM;
535 		ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
536 				     args->arg_count * sizeof(u32));
537 		if (ret) {
538 			ret = -EFAULT;
539 			goto free_cmd_bo_hdls;
540 		}
541 	}
542 
543 	ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls,
544 				 args->arg_count, args->hwctx, &args->seq);
545 	if (ret)
546 		XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
547 
548 free_cmd_bo_hdls:
549 	kfree(arg_bo_hdls);
550 	if (!ret)
551 		XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
552 	return ret;
553 }
554 
555 int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
556 {
557 	struct amdxdna_client *client = filp->driver_priv;
558 	struct amdxdna_drm_exec_cmd *args = data;
559 
560 	if (args->ext || args->ext_flags)
561 		return -EINVAL;
562 
563 	switch (args->type) {
564 	case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
565 		return amdxdna_drm_submit_execbuf(client, args);
566 	}
567 
568 	XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
569 	return -EINVAL;
570 }
571