xref: /linux/drivers/accel/amdxdna/amdxdna_ctx.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_shmem_helper.h>
12 #include <drm/drm_print.h>
13 #include <drm/gpu_scheduler.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16 
17 #include "amdxdna_ctx.h"
18 #include "amdxdna_gem.h"
19 #include "amdxdna_pci_drv.h"
20 
21 #define MAX_HWCTX_ID		255
22 #define MAX_ARG_COUNT		4095
23 
24 struct amdxdna_fence {
25 	struct dma_fence	base;
26 	spinlock_t		lock; /* for base */
27 	struct amdxdna_hwctx	*hwctx;
28 };
29 
30 static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
31 {
32 	return KBUILD_MODNAME;
33 }
34 
35 static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
36 {
37 	struct amdxdna_fence *xdna_fence;
38 
39 	xdna_fence = container_of(fence, struct amdxdna_fence, base);
40 
41 	return xdna_fence->hwctx->name;
42 }
43 
44 static const struct dma_fence_ops fence_ops = {
45 	.get_driver_name = amdxdna_fence_get_driver_name,
46 	.get_timeline_name = amdxdna_fence_get_timeline_name,
47 };
48 
49 static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
50 {
51 	struct amdxdna_fence *fence;
52 
53 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
54 	if (!fence)
55 		return NULL;
56 
57 	fence->hwctx = hwctx;
58 	spin_lock_init(&fence->lock);
59 	dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
60 	return &fence->base;
61 }
62 
63 static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
64 				      struct srcu_struct *ss)
65 {
66 	struct amdxdna_dev *xdna = hwctx->client->xdna;
67 
68 	synchronize_srcu(ss);
69 
70 	/* At this point, user is not able to submit new commands */
71 	xdna->dev_info->ops->hwctx_fini(hwctx);
72 
73 	kfree(hwctx->name);
74 	kfree(hwctx);
75 }
76 
77 int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
78 		       int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
79 {
80 	struct amdxdna_hwctx *hwctx;
81 	unsigned long hwctx_id;
82 	int ret = 0, idx;
83 
84 	idx = srcu_read_lock(&client->hwctx_srcu);
85 	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
86 		ret = walk(hwctx, arg);
87 		if (ret)
88 			break;
89 	}
90 	srcu_read_unlock(&client->hwctx_srcu, idx);
91 
92 	return ret;
93 }
94 
95 void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
96 {
97 	struct amdxdna_cmd *cmd = abo->mem.kva;
98 	u32 num_masks, count;
99 
100 	if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
101 		num_masks = 0;
102 	else
103 		num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
104 
105 	if (size) {
106 		count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
107 		if (unlikely(count <= num_masks)) {
108 			*size = 0;
109 			return NULL;
110 		}
111 		*size = (count - num_masks) * sizeof(u32);
112 	}
113 	return &cmd->data[num_masks];
114 }
115 
116 u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
117 {
118 	struct amdxdna_cmd *cmd = abo->mem.kva;
119 	u32 num_masks, i;
120 	u32 *cu_mask;
121 
122 	if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
123 		return INVALID_CU_IDX;
124 
125 	num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
126 	cu_mask = cmd->data;
127 	for (i = 0; i < num_masks; i++) {
128 		if (cu_mask[i])
129 			return ffs(cu_mask[i]) - 1;
130 	}
131 
132 	return INVALID_CU_IDX;
133 }
134 
135 /*
136  * This should be called in close() and remove(). DO NOT call in other syscalls.
137  * This guarantee that when hwctx and resources will be released, if user
138  * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
139  */
140 void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
141 {
142 	struct amdxdna_hwctx *hwctx;
143 	unsigned long hwctx_id;
144 
145 	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
146 		XDNA_DBG(client->xdna, "PID %d close HW context %d",
147 			 client->pid, hwctx->id);
148 		xa_erase(&client->hwctx_xa, hwctx->id);
149 		amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
150 	}
151 }
152 
153 int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
154 {
155 	struct amdxdna_client *client = filp->driver_priv;
156 	struct amdxdna_drm_create_hwctx *args = data;
157 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
158 	struct amdxdna_hwctx *hwctx;
159 	int ret, idx;
160 
161 	if (args->ext || args->ext_flags)
162 		return -EINVAL;
163 
164 	hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
165 	if (!hwctx)
166 		return -ENOMEM;
167 
168 	if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
169 		XDNA_ERR(xdna, "Access QoS info failed");
170 		kfree(hwctx);
171 		return -EFAULT;
172 	}
173 
174 	hwctx->client = client;
175 	hwctx->fw_ctx_id = -1;
176 	hwctx->num_tiles = args->num_tiles;
177 	hwctx->mem_size = args->mem_size;
178 	hwctx->max_opc = args->max_opc;
179 
180 	guard(mutex)(&xdna->dev_lock);
181 
182 	if (!drm_dev_enter(dev, &idx)) {
183 		ret = -ENODEV;
184 		goto free_hwctx;
185 	}
186 
187 	ret = xdna->dev_info->ops->hwctx_init(hwctx);
188 	if (ret) {
189 		XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
190 		goto dev_exit;
191 	}
192 
193 	hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id);
194 	if (!hwctx->name) {
195 		ret = -ENOMEM;
196 		goto fini_hwctx;
197 	}
198 
199 	ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
200 			      XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
201 			      &client->next_hwctxid, GFP_KERNEL);
202 	if (ret < 0) {
203 		XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
204 		goto free_name;
205 	}
206 
207 	args->handle = hwctx->id;
208 	args->syncobj_handle = hwctx->syncobj_hdl;
209 
210 	atomic64_set(&hwctx->job_submit_cnt, 0);
211 	atomic64_set(&hwctx->job_free_cnt, 0);
212 	XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
213 	drm_dev_exit(idx);
214 	return 0;
215 
216 free_name:
217 	kfree(hwctx->name);
218 fini_hwctx:
219 	xdna->dev_info->ops->hwctx_fini(hwctx);
220 dev_exit:
221 	drm_dev_exit(idx);
222 free_hwctx:
223 	kfree(hwctx);
224 	return ret;
225 }
226 
227 int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
228 {
229 	struct amdxdna_client *client = filp->driver_priv;
230 	struct amdxdna_drm_destroy_hwctx *args = data;
231 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
232 	struct amdxdna_hwctx *hwctx;
233 	int ret = 0, idx;
234 
235 	if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
236 		return -EINVAL;
237 
238 	if (!drm_dev_enter(dev, &idx))
239 		return -ENODEV;
240 
241 	mutex_lock(&xdna->dev_lock);
242 	hwctx = xa_erase(&client->hwctx_xa, args->handle);
243 	if (!hwctx) {
244 		ret = -EINVAL;
245 		XDNA_DBG(xdna, "PID %d HW context %d not exist",
246 			 client->pid, args->handle);
247 		goto out;
248 	}
249 
250 	/*
251 	 * The pushed jobs are handled by DRM scheduler during destroy.
252 	 * SRCU to synchronize with exec command ioctls.
253 	 */
254 	amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
255 
256 	XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
257 out:
258 	mutex_unlock(&xdna->dev_lock);
259 	drm_dev_exit(idx);
260 	return ret;
261 }
262 
263 int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
264 {
265 	struct amdxdna_client *client = filp->driver_priv;
266 	struct amdxdna_drm_config_hwctx *args = data;
267 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
268 	struct amdxdna_hwctx *hwctx;
269 	int ret, idx;
270 	u32 buf_size;
271 	void *buf;
272 	u64 val;
273 
274 	if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
275 		return -EINVAL;
276 
277 	if (!xdna->dev_info->ops->hwctx_config)
278 		return -EOPNOTSUPP;
279 
280 	val = args->param_val;
281 	buf_size = args->param_val_size;
282 
283 	switch (args->param_type) {
284 	case DRM_AMDXDNA_HWCTX_CONFIG_CU:
285 		/* For those types that param_val is pointer */
286 		if (buf_size > PAGE_SIZE) {
287 			XDNA_ERR(xdna, "Config CU param buffer too large");
288 			return -E2BIG;
289 		}
290 
291 		/* Hwctx needs to keep buf */
292 		buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
293 		if (!buf)
294 			return -ENOMEM;
295 
296 		if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
297 			kfree(buf);
298 			return -EFAULT;
299 		}
300 
301 		break;
302 	case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
303 	case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
304 		/* For those types that param_val is a value */
305 		buf = NULL;
306 		buf_size = 0;
307 		break;
308 	default:
309 		XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
310 		return -EINVAL;
311 	}
312 
313 	mutex_lock(&xdna->dev_lock);
314 	idx = srcu_read_lock(&client->hwctx_srcu);
315 	hwctx = xa_load(&client->hwctx_xa, args->handle);
316 	if (!hwctx) {
317 		XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
318 		ret = -EINVAL;
319 		goto unlock_srcu;
320 	}
321 
322 	ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
323 
324 unlock_srcu:
325 	srcu_read_unlock(&client->hwctx_srcu, idx);
326 	mutex_unlock(&xdna->dev_lock);
327 	kfree(buf);
328 	return ret;
329 }
330 
331 int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
332 {
333 	struct amdxdna_dev *xdna = client->xdna;
334 	struct amdxdna_hwctx *hwctx;
335 	struct amdxdna_gem_obj *abo;
336 	struct drm_gem_object *gobj;
337 	int ret, idx;
338 
339 	if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
340 		return -EOPNOTSUPP;
341 
342 	gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl);
343 	if (!gobj)
344 		return -EINVAL;
345 
346 	abo = to_xdna_obj(gobj);
347 	guard(mutex)(&xdna->dev_lock);
348 	idx = srcu_read_lock(&client->hwctx_srcu);
349 	hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
350 	if (!hwctx) {
351 		ret = -EINVAL;
352 		goto unlock_srcu;
353 	}
354 
355 	ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
356 
357 unlock_srcu:
358 	srcu_read_unlock(&client->hwctx_srcu, idx);
359 	drm_gem_object_put(gobj);
360 	return ret;
361 }
362 
363 static void
364 amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
365 {
366 	int i;
367 
368 	for (i = 0; i < job->bo_cnt; i++) {
369 		if (!job->bos[i])
370 			break;
371 		drm_gem_object_put(job->bos[i]);
372 	}
373 }
374 
375 static int
376 amdxdna_arg_bos_lookup(struct amdxdna_client *client,
377 		       struct amdxdna_sched_job *job,
378 		       u32 *bo_hdls, u32 bo_cnt)
379 {
380 	struct drm_gem_object *gobj;
381 	int i, ret;
382 
383 	job->bo_cnt = bo_cnt;
384 	for (i = 0; i < job->bo_cnt; i++) {
385 		struct amdxdna_gem_obj *abo;
386 
387 		gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
388 		if (!gobj) {
389 			ret = -ENOENT;
390 			goto put_shmem_bo;
391 		}
392 		abo = to_xdna_obj(gobj);
393 
394 		mutex_lock(&abo->lock);
395 		if (abo->pinned) {
396 			mutex_unlock(&abo->lock);
397 			job->bos[i] = gobj;
398 			continue;
399 		}
400 
401 		ret = amdxdna_gem_pin_nolock(abo);
402 		if (ret) {
403 			mutex_unlock(&abo->lock);
404 			drm_gem_object_put(gobj);
405 			goto put_shmem_bo;
406 		}
407 		abo->pinned = true;
408 		mutex_unlock(&abo->lock);
409 
410 		job->bos[i] = gobj;
411 	}
412 
413 	return 0;
414 
415 put_shmem_bo:
416 	amdxdna_arg_bos_put(job);
417 	return ret;
418 }
419 
420 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
421 {
422 	trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
423 	amdxdna_arg_bos_put(job);
424 	amdxdna_gem_put_obj(job->cmd_bo);
425 }
426 
427 int amdxdna_cmd_submit(struct amdxdna_client *client,
428 		       struct amdxdna_drv_cmd *drv_cmd,
429 		       u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
430 		       u32 hwctx_hdl, u64 *seq)
431 {
432 	struct amdxdna_dev *xdna = client->xdna;
433 	struct amdxdna_sched_job *job;
434 	struct amdxdna_hwctx *hwctx;
435 	int ret, idx;
436 
437 	XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
438 	job = kzalloc(struct_size(job, bos, arg_bo_cnt), GFP_KERNEL);
439 	if (!job)
440 		return -ENOMEM;
441 
442 	job->drv_cmd = drv_cmd;
443 
444 	if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
445 		job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
446 		if (!job->cmd_bo) {
447 			XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
448 			ret = -EINVAL;
449 			goto free_job;
450 		}
451 	}
452 
453 	ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
454 	if (ret) {
455 		XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
456 		goto cmd_put;
457 	}
458 
459 	idx = srcu_read_lock(&client->hwctx_srcu);
460 	hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
461 	if (!hwctx) {
462 		XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
463 			 client->pid, hwctx_hdl);
464 		ret = -EINVAL;
465 		goto unlock_srcu;
466 	}
467 
468 
469 	job->hwctx = hwctx;
470 	job->mm = current->mm;
471 
472 	job->fence = amdxdna_fence_create(hwctx);
473 	if (!job->fence) {
474 		XDNA_ERR(xdna, "Failed to create fence");
475 		ret = -ENOMEM;
476 		goto unlock_srcu;
477 	}
478 	kref_init(&job->refcnt);
479 
480 	ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
481 	if (ret)
482 		goto put_fence;
483 
484 	/*
485 	 * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
486 	 * resource after synchronize_srcu(). The submitted jobs should be
487 	 * handled by the queue, for example DRM scheduler, in device layer.
488 	 * For here we can unlock SRCU.
489 	 */
490 	srcu_read_unlock(&client->hwctx_srcu, idx);
491 	trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
492 
493 	return 0;
494 
495 put_fence:
496 	dma_fence_put(job->fence);
497 unlock_srcu:
498 	srcu_read_unlock(&client->hwctx_srcu, idx);
499 	amdxdna_arg_bos_put(job);
500 cmd_put:
501 	amdxdna_gem_put_obj(job->cmd_bo);
502 free_job:
503 	kfree(job);
504 	return ret;
505 }
506 
507 /*
508  * The submit command ioctl submits a command to firmware. One firmware command
509  * may contain multiple command BOs for processing as a whole.
510  * The command sequence number is returned which can be used for wait command ioctl.
511  */
512 static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
513 				      struct amdxdna_drm_exec_cmd *args)
514 {
515 	struct amdxdna_dev *xdna = client->xdna;
516 	u32 *arg_bo_hdls = NULL;
517 	u32 cmd_bo_hdl;
518 	int ret;
519 
520 	if (args->arg_count > MAX_ARG_COUNT) {
521 		XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
522 		return -EINVAL;
523 	}
524 
525 	/* Only support single command for now. */
526 	if (args->cmd_count != 1) {
527 		XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
528 		return -EINVAL;
529 	}
530 
531 	cmd_bo_hdl = (u32)args->cmd_handles;
532 	if (args->arg_count) {
533 		arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
534 		if (!arg_bo_hdls)
535 			return -ENOMEM;
536 		ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
537 				     args->arg_count * sizeof(u32));
538 		if (ret) {
539 			ret = -EFAULT;
540 			goto free_cmd_bo_hdls;
541 		}
542 	}
543 
544 	ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls,
545 				 args->arg_count, args->hwctx, &args->seq);
546 	if (ret)
547 		XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
548 
549 free_cmd_bo_hdls:
550 	kfree(arg_bo_hdls);
551 	if (!ret)
552 		XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
553 	return ret;
554 }
555 
556 int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
557 {
558 	struct amdxdna_client *client = filp->driver_priv;
559 	struct amdxdna_drm_exec_cmd *args = data;
560 
561 	if (args->ext || args->ext_flags)
562 		return -EINVAL;
563 
564 	switch (args->type) {
565 	case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
566 		return amdxdna_drm_submit_execbuf(client, args);
567 	}
568 
569 	XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
570 	return -EINVAL;
571 }
572