xref: /linux/drivers/accel/amdxdna/amdxdna_ctx.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_shmem_helper.h>
12 #include <drm/drm_print.h>
13 #include <drm/gpu_scheduler.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16 
17 #include "amdxdna_ctx.h"
18 #include "amdxdna_gem.h"
19 #include "amdxdna_pci_drv.h"
20 
21 #define MAX_HWCTX_ID		255
22 #define MAX_ARG_COUNT		4095
23 
24 struct amdxdna_fence {
25 	struct dma_fence	base;
26 	spinlock_t		lock; /* for base */
27 	struct amdxdna_hwctx	*hwctx;
28 };
29 
30 static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
31 {
32 	return KBUILD_MODNAME;
33 }
34 
35 static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
36 {
37 	struct amdxdna_fence *xdna_fence;
38 
39 	xdna_fence = container_of(fence, struct amdxdna_fence, base);
40 
41 	return xdna_fence->hwctx->name;
42 }
43 
44 static const struct dma_fence_ops fence_ops = {
45 	.get_driver_name = amdxdna_fence_get_driver_name,
46 	.get_timeline_name = amdxdna_fence_get_timeline_name,
47 };
48 
49 static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
50 {
51 	struct amdxdna_fence *fence;
52 
53 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
54 	if (!fence)
55 		return NULL;
56 
57 	fence->hwctx = hwctx;
58 	spin_lock_init(&fence->lock);
59 	dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
60 	return &fence->base;
61 }
62 
63 static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
64 				      struct srcu_struct *ss)
65 {
66 	struct amdxdna_dev *xdna = hwctx->client->xdna;
67 
68 	synchronize_srcu(ss);
69 
70 	/* At this point, user is not able to submit new commands */
71 	xdna->dev_info->ops->hwctx_fini(hwctx);
72 
73 	kfree(hwctx->name);
74 	kfree(hwctx);
75 }
76 
77 int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
78 		       int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
79 {
80 	struct amdxdna_hwctx *hwctx;
81 	unsigned long hwctx_id;
82 	int ret = 0, idx;
83 
84 	idx = srcu_read_lock(&client->hwctx_srcu);
85 	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
86 		ret = walk(hwctx, arg);
87 		if (ret)
88 			break;
89 	}
90 	srcu_read_unlock(&client->hwctx_srcu, idx);
91 
92 	return ret;
93 }
94 
95 void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
96 {
97 	struct amdxdna_cmd *cmd = abo->mem.kva;
98 	u32 num_masks, count;
99 
100 	if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
101 		num_masks = 0;
102 	else
103 		num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
104 
105 	if (size) {
106 		count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
107 		if (unlikely(count <= num_masks)) {
108 			*size = 0;
109 			return NULL;
110 		}
111 		*size = (count - num_masks) * sizeof(u32);
112 	}
113 	return &cmd->data[num_masks];
114 }
115 
116 int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
117 {
118 	struct amdxdna_cmd *cmd = abo->mem.kva;
119 	u32 num_masks, i;
120 	u32 *cu_mask;
121 
122 	if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
123 		return -1;
124 
125 	num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
126 	cu_mask = cmd->data;
127 	for (i = 0; i < num_masks; i++) {
128 		if (cu_mask[i])
129 			return ffs(cu_mask[i]) - 1;
130 	}
131 
132 	return -1;
133 }
134 
135 /*
136  * This should be called in close() and remove(). DO NOT call in other syscalls.
137  * This guarantee that when hwctx and resources will be released, if user
138  * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
139  */
140 void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
141 {
142 	struct amdxdna_hwctx *hwctx;
143 	unsigned long hwctx_id;
144 
145 	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
146 		XDNA_DBG(client->xdna, "PID %d close HW context %d",
147 			 client->pid, hwctx->id);
148 		xa_erase(&client->hwctx_xa, hwctx->id);
149 		amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
150 	}
151 }
152 
153 int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
154 {
155 	struct amdxdna_client *client = filp->driver_priv;
156 	struct amdxdna_drm_create_hwctx *args = data;
157 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
158 	struct amdxdna_hwctx *hwctx;
159 	int ret, idx;
160 
161 	if (args->ext || args->ext_flags)
162 		return -EINVAL;
163 
164 	if (!drm_dev_enter(dev, &idx))
165 		return -ENODEV;
166 
167 	hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
168 	if (!hwctx) {
169 		ret = -ENOMEM;
170 		goto exit;
171 	}
172 
173 	if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
174 		XDNA_ERR(xdna, "Access QoS info failed");
175 		ret = -EFAULT;
176 		goto free_hwctx;
177 	}
178 
179 	hwctx->client = client;
180 	hwctx->fw_ctx_id = -1;
181 	hwctx->num_tiles = args->num_tiles;
182 	hwctx->mem_size = args->mem_size;
183 	hwctx->max_opc = args->max_opc;
184 	ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
185 			      XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
186 			      &client->next_hwctxid, GFP_KERNEL);
187 	if (ret < 0) {
188 		XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
189 		goto free_hwctx;
190 	}
191 
192 	hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
193 	if (!hwctx->name) {
194 		ret = -ENOMEM;
195 		goto rm_id;
196 	}
197 
198 	mutex_lock(&xdna->dev_lock);
199 	ret = xdna->dev_info->ops->hwctx_init(hwctx);
200 	if (ret) {
201 		mutex_unlock(&xdna->dev_lock);
202 		XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
203 		goto free_name;
204 	}
205 	args->handle = hwctx->id;
206 	args->syncobj_handle = hwctx->syncobj_hdl;
207 	mutex_unlock(&xdna->dev_lock);
208 
209 	atomic64_set(&hwctx->job_submit_cnt, 0);
210 	atomic64_set(&hwctx->job_free_cnt, 0);
211 	XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
212 	drm_dev_exit(idx);
213 	return 0;
214 
215 free_name:
216 	kfree(hwctx->name);
217 rm_id:
218 	xa_erase(&client->hwctx_xa, hwctx->id);
219 free_hwctx:
220 	kfree(hwctx);
221 exit:
222 	drm_dev_exit(idx);
223 	return ret;
224 }
225 
226 int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
227 {
228 	struct amdxdna_client *client = filp->driver_priv;
229 	struct amdxdna_drm_destroy_hwctx *args = data;
230 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
231 	struct amdxdna_hwctx *hwctx;
232 	int ret = 0, idx;
233 
234 	if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
235 		return -EINVAL;
236 
237 	if (!drm_dev_enter(dev, &idx))
238 		return -ENODEV;
239 
240 	mutex_lock(&xdna->dev_lock);
241 	hwctx = xa_erase(&client->hwctx_xa, args->handle);
242 	if (!hwctx) {
243 		ret = -EINVAL;
244 		XDNA_DBG(xdna, "PID %d HW context %d not exist",
245 			 client->pid, args->handle);
246 		goto out;
247 	}
248 
249 	/*
250 	 * The pushed jobs are handled by DRM scheduler during destroy.
251 	 * SRCU to synchronize with exec command ioctls.
252 	 */
253 	amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
254 
255 	XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
256 out:
257 	mutex_unlock(&xdna->dev_lock);
258 	drm_dev_exit(idx);
259 	return ret;
260 }
261 
262 int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
263 {
264 	struct amdxdna_client *client = filp->driver_priv;
265 	struct amdxdna_drm_config_hwctx *args = data;
266 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
267 	struct amdxdna_hwctx *hwctx;
268 	int ret, idx;
269 	u32 buf_size;
270 	void *buf;
271 	u64 val;
272 
273 	if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
274 		return -EINVAL;
275 
276 	if (!xdna->dev_info->ops->hwctx_config)
277 		return -EOPNOTSUPP;
278 
279 	val = args->param_val;
280 	buf_size = args->param_val_size;
281 
282 	switch (args->param_type) {
283 	case DRM_AMDXDNA_HWCTX_CONFIG_CU:
284 		/* For those types that param_val is pointer */
285 		if (buf_size > PAGE_SIZE) {
286 			XDNA_ERR(xdna, "Config CU param buffer too large");
287 			return -E2BIG;
288 		}
289 
290 		/* Hwctx needs to keep buf */
291 		buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
292 		if (!buf)
293 			return -ENOMEM;
294 
295 		if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
296 			kfree(buf);
297 			return -EFAULT;
298 		}
299 
300 		break;
301 	case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
302 	case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
303 		/* For those types that param_val is a value */
304 		buf = NULL;
305 		buf_size = 0;
306 		break;
307 	default:
308 		XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
309 		return -EINVAL;
310 	}
311 
312 	mutex_lock(&xdna->dev_lock);
313 	idx = srcu_read_lock(&client->hwctx_srcu);
314 	hwctx = xa_load(&client->hwctx_xa, args->handle);
315 	if (!hwctx) {
316 		XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
317 		ret = -EINVAL;
318 		goto unlock_srcu;
319 	}
320 
321 	ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
322 
323 unlock_srcu:
324 	srcu_read_unlock(&client->hwctx_srcu, idx);
325 	mutex_unlock(&xdna->dev_lock);
326 	kfree(buf);
327 	return ret;
328 }
329 
330 static void
331 amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
332 {
333 	int i;
334 
335 	for (i = 0; i < job->bo_cnt; i++) {
336 		if (!job->bos[i])
337 			break;
338 		drm_gem_object_put(job->bos[i]);
339 	}
340 }
341 
342 static int
343 amdxdna_arg_bos_lookup(struct amdxdna_client *client,
344 		       struct amdxdna_sched_job *job,
345 		       u32 *bo_hdls, u32 bo_cnt)
346 {
347 	struct drm_gem_object *gobj;
348 	int i, ret;
349 
350 	job->bo_cnt = bo_cnt;
351 	for (i = 0; i < job->bo_cnt; i++) {
352 		struct amdxdna_gem_obj *abo;
353 
354 		gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
355 		if (!gobj) {
356 			ret = -ENOENT;
357 			goto put_shmem_bo;
358 		}
359 		abo = to_xdna_obj(gobj);
360 
361 		mutex_lock(&abo->lock);
362 		if (abo->pinned) {
363 			mutex_unlock(&abo->lock);
364 			job->bos[i] = gobj;
365 			continue;
366 		}
367 
368 		ret = amdxdna_gem_pin_nolock(abo);
369 		if (ret) {
370 			mutex_unlock(&abo->lock);
371 			drm_gem_object_put(gobj);
372 			goto put_shmem_bo;
373 		}
374 		abo->pinned = true;
375 		mutex_unlock(&abo->lock);
376 
377 		job->bos[i] = gobj;
378 	}
379 
380 	return 0;
381 
382 put_shmem_bo:
383 	amdxdna_arg_bos_put(job);
384 	return ret;
385 }
386 
387 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
388 {
389 	trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
390 	amdxdna_arg_bos_put(job);
391 	amdxdna_gem_put_obj(job->cmd_bo);
392 }
393 
394 int amdxdna_cmd_submit(struct amdxdna_client *client,
395 		       u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
396 		       u32 hwctx_hdl, u64 *seq)
397 {
398 	struct amdxdna_dev *xdna = client->xdna;
399 	struct amdxdna_sched_job *job;
400 	struct amdxdna_hwctx *hwctx;
401 	int ret, idx;
402 
403 	XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
404 	job = kzalloc(struct_size(job, bos, arg_bo_cnt), GFP_KERNEL);
405 	if (!job)
406 		return -ENOMEM;
407 
408 	if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
409 		job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
410 		if (!job->cmd_bo) {
411 			XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
412 			ret = -EINVAL;
413 			goto free_job;
414 		}
415 	} else {
416 		job->cmd_bo = NULL;
417 	}
418 
419 	ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
420 	if (ret) {
421 		XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
422 		goto cmd_put;
423 	}
424 
425 	idx = srcu_read_lock(&client->hwctx_srcu);
426 	hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
427 	if (!hwctx) {
428 		XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
429 			 client->pid, hwctx_hdl);
430 		ret = -EINVAL;
431 		goto unlock_srcu;
432 	}
433 
434 	if (hwctx->status != HWCTX_STAT_READY) {
435 		XDNA_ERR(xdna, "HW Context is not ready");
436 		ret = -EINVAL;
437 		goto unlock_srcu;
438 	}
439 
440 	job->hwctx = hwctx;
441 	job->mm = current->mm;
442 
443 	job->fence = amdxdna_fence_create(hwctx);
444 	if (!job->fence) {
445 		XDNA_ERR(xdna, "Failed to create fence");
446 		ret = -ENOMEM;
447 		goto unlock_srcu;
448 	}
449 	kref_init(&job->refcnt);
450 
451 	ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
452 	if (ret)
453 		goto put_fence;
454 
455 	/*
456 	 * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
457 	 * resource after synchronize_srcu(). The submitted jobs should be
458 	 * handled by the queue, for example DRM scheduler, in device layer.
459 	 * For here we can unlock SRCU.
460 	 */
461 	srcu_read_unlock(&client->hwctx_srcu, idx);
462 	trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
463 
464 	return 0;
465 
466 put_fence:
467 	dma_fence_put(job->fence);
468 unlock_srcu:
469 	srcu_read_unlock(&client->hwctx_srcu, idx);
470 	amdxdna_arg_bos_put(job);
471 cmd_put:
472 	amdxdna_gem_put_obj(job->cmd_bo);
473 free_job:
474 	kfree(job);
475 	return ret;
476 }
477 
478 /*
479  * The submit command ioctl submits a command to firmware. One firmware command
480  * may contain multiple command BOs for processing as a whole.
481  * The command sequence number is returned which can be used for wait command ioctl.
482  */
483 static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
484 				      struct amdxdna_drm_exec_cmd *args)
485 {
486 	struct amdxdna_dev *xdna = client->xdna;
487 	u32 *arg_bo_hdls = NULL;
488 	u32 cmd_bo_hdl;
489 	int ret;
490 
491 	if (args->arg_count > MAX_ARG_COUNT) {
492 		XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
493 		return -EINVAL;
494 	}
495 
496 	/* Only support single command for now. */
497 	if (args->cmd_count != 1) {
498 		XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
499 		return -EINVAL;
500 	}
501 
502 	cmd_bo_hdl = (u32)args->cmd_handles;
503 	if (args->arg_count) {
504 		arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
505 		if (!arg_bo_hdls)
506 			return -ENOMEM;
507 		ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
508 				     args->arg_count * sizeof(u32));
509 		if (ret) {
510 			ret = -EFAULT;
511 			goto free_cmd_bo_hdls;
512 		}
513 	}
514 
515 	ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
516 				 args->arg_count, args->hwctx, &args->seq);
517 	if (ret)
518 		XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
519 
520 free_cmd_bo_hdls:
521 	kfree(arg_bo_hdls);
522 	if (!ret)
523 		XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
524 	return ret;
525 }
526 
527 int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
528 {
529 	struct amdxdna_client *client = filp->driver_priv;
530 	struct amdxdna_drm_exec_cmd *args = data;
531 
532 	if (args->ext || args->ext_flags)
533 		return -EINVAL;
534 
535 	switch (args->type) {
536 	case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
537 		return amdxdna_drm_submit_execbuf(client, args);
538 	}
539 
540 	XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
541 	return -EINVAL;
542 }
543