xref: /linux/drivers/accel/amdxdna/amdxdna_ctx.c (revision ed07a76be7baa0bb164b152116486e4d9fed50dc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_shmem_helper.h>
12 #include <drm/drm_print.h>
13 #include <drm/gpu_scheduler.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16 
17 #include "amdxdna_ctx.h"
18 #include "amdxdna_gem.h"
19 #include "amdxdna_pci_drv.h"
20 
21 #define MAX_HWCTX_ID		255
22 #define MAX_ARG_COUNT		4095
23 
24 struct amdxdna_fence {
25 	struct dma_fence	base;
26 	spinlock_t		lock; /* for base */
27 	struct amdxdna_hwctx	*hwctx;
28 };
29 
30 static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
31 {
32 	return KBUILD_MODNAME;
33 }
34 
35 static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
36 {
37 	struct amdxdna_fence *xdna_fence;
38 
39 	xdna_fence = container_of(fence, struct amdxdna_fence, base);
40 
41 	return xdna_fence->hwctx->name;
42 }
43 
44 static const struct dma_fence_ops fence_ops = {
45 	.get_driver_name = amdxdna_fence_get_driver_name,
46 	.get_timeline_name = amdxdna_fence_get_timeline_name,
47 };
48 
49 static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
50 {
51 	struct amdxdna_fence *fence;
52 
53 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
54 	if (!fence)
55 		return NULL;
56 
57 	fence->hwctx = hwctx;
58 	spin_lock_init(&fence->lock);
59 	dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
60 	return &fence->base;
61 }
62 
63 static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
64 				      struct srcu_struct *ss)
65 {
66 	struct amdxdna_dev *xdna = hwctx->client->xdna;
67 
68 	synchronize_srcu(ss);
69 
70 	/* At this point, user is not able to submit new commands */
71 	mutex_lock(&xdna->dev_lock);
72 	xdna->dev_info->ops->hwctx_fini(hwctx);
73 	mutex_unlock(&xdna->dev_lock);
74 
75 	kfree(hwctx->name);
76 	kfree(hwctx);
77 }
78 
79 void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
80 {
81 	struct amdxdna_cmd *cmd = abo->mem.kva;
82 	u32 num_masks, count;
83 
84 	if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
85 		num_masks = 0;
86 	else
87 		num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
88 
89 	if (size) {
90 		count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
91 		if (unlikely(count <= num_masks)) {
92 			*size = 0;
93 			return NULL;
94 		}
95 		*size = (count - num_masks) * sizeof(u32);
96 	}
97 	return &cmd->data[num_masks];
98 }
99 
100 int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
101 {
102 	struct amdxdna_cmd *cmd = abo->mem.kva;
103 	u32 num_masks, i;
104 	u32 *cu_mask;
105 
106 	if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
107 		return -1;
108 
109 	num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
110 	cu_mask = cmd->data;
111 	for (i = 0; i < num_masks; i++) {
112 		if (cu_mask[i])
113 			return ffs(cu_mask[i]) - 1;
114 	}
115 
116 	return -1;
117 }
118 
119 /*
120  * This should be called in close() and remove(). DO NOT call in other syscalls.
121  * This guarantee that when hwctx and resources will be released, if user
122  * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
123  */
124 void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
125 {
126 	struct amdxdna_hwctx *hwctx;
127 	unsigned long hwctx_id;
128 
129 	mutex_lock(&client->hwctx_lock);
130 	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
131 		XDNA_DBG(client->xdna, "PID %d close HW context %d",
132 			 client->pid, hwctx->id);
133 		xa_erase(&client->hwctx_xa, hwctx->id);
134 		mutex_unlock(&client->hwctx_lock);
135 		amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
136 		mutex_lock(&client->hwctx_lock);
137 	}
138 	mutex_unlock(&client->hwctx_lock);
139 }
140 
141 int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
142 {
143 	struct amdxdna_client *client = filp->driver_priv;
144 	struct amdxdna_drm_create_hwctx *args = data;
145 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
146 	struct amdxdna_hwctx *hwctx;
147 	int ret, idx;
148 
149 	if (args->ext || args->ext_flags)
150 		return -EINVAL;
151 
152 	if (!drm_dev_enter(dev, &idx))
153 		return -ENODEV;
154 
155 	hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
156 	if (!hwctx) {
157 		ret = -ENOMEM;
158 		goto exit;
159 	}
160 
161 	if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
162 		XDNA_ERR(xdna, "Access QoS info failed");
163 		ret = -EFAULT;
164 		goto free_hwctx;
165 	}
166 
167 	hwctx->client = client;
168 	hwctx->fw_ctx_id = -1;
169 	hwctx->num_tiles = args->num_tiles;
170 	hwctx->mem_size = args->mem_size;
171 	hwctx->max_opc = args->max_opc;
172 	ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
173 			      XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
174 			      &client->next_hwctxid, GFP_KERNEL);
175 	if (ret < 0) {
176 		XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
177 		goto free_hwctx;
178 	}
179 
180 	hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
181 	if (!hwctx->name) {
182 		ret = -ENOMEM;
183 		goto rm_id;
184 	}
185 
186 	mutex_lock(&xdna->dev_lock);
187 	ret = xdna->dev_info->ops->hwctx_init(hwctx);
188 	if (ret) {
189 		mutex_unlock(&xdna->dev_lock);
190 		XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
191 		goto free_name;
192 	}
193 	args->handle = hwctx->id;
194 	args->syncobj_handle = hwctx->syncobj_hdl;
195 	mutex_unlock(&xdna->dev_lock);
196 
197 	atomic64_set(&hwctx->job_submit_cnt, 0);
198 	atomic64_set(&hwctx->job_free_cnt, 0);
199 	XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
200 	drm_dev_exit(idx);
201 	return 0;
202 
203 free_name:
204 	kfree(hwctx->name);
205 rm_id:
206 	xa_erase(&client->hwctx_xa, hwctx->id);
207 free_hwctx:
208 	kfree(hwctx);
209 exit:
210 	drm_dev_exit(idx);
211 	return ret;
212 }
213 
214 int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
215 {
216 	struct amdxdna_client *client = filp->driver_priv;
217 	struct amdxdna_drm_destroy_hwctx *args = data;
218 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
219 	struct amdxdna_hwctx *hwctx;
220 	int ret = 0, idx;
221 
222 	if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
223 		return -EINVAL;
224 
225 	if (!drm_dev_enter(dev, &idx))
226 		return -ENODEV;
227 
228 	hwctx = xa_erase(&client->hwctx_xa, args->handle);
229 	if (!hwctx) {
230 		ret = -EINVAL;
231 		XDNA_DBG(xdna, "PID %d HW context %d not exist",
232 			 client->pid, args->handle);
233 		goto out;
234 	}
235 
236 	/*
237 	 * The pushed jobs are handled by DRM scheduler during destroy.
238 	 * SRCU to synchronize with exec command ioctls.
239 	 */
240 	amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
241 
242 	XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
243 out:
244 	drm_dev_exit(idx);
245 	return ret;
246 }
247 
248 int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
249 {
250 	struct amdxdna_client *client = filp->driver_priv;
251 	struct amdxdna_drm_config_hwctx *args = data;
252 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
253 	struct amdxdna_hwctx *hwctx;
254 	int ret, idx;
255 	u32 buf_size;
256 	void *buf;
257 	u64 val;
258 
259 	if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
260 		return -EINVAL;
261 
262 	if (!xdna->dev_info->ops->hwctx_config)
263 		return -EOPNOTSUPP;
264 
265 	val = args->param_val;
266 	buf_size = args->param_val_size;
267 
268 	switch (args->param_type) {
269 	case DRM_AMDXDNA_HWCTX_CONFIG_CU:
270 		/* For those types that param_val is pointer */
271 		if (buf_size > PAGE_SIZE) {
272 			XDNA_ERR(xdna, "Config CU param buffer too large");
273 			return -E2BIG;
274 		}
275 
276 		/* Hwctx needs to keep buf */
277 		buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
278 		if (!buf)
279 			return -ENOMEM;
280 
281 		if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
282 			kfree(buf);
283 			return -EFAULT;
284 		}
285 
286 		break;
287 	case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
288 	case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
289 		/* For those types that param_val is a value */
290 		buf = NULL;
291 		buf_size = 0;
292 		break;
293 	default:
294 		XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
295 		return -EINVAL;
296 	}
297 
298 	mutex_lock(&xdna->dev_lock);
299 	idx = srcu_read_lock(&client->hwctx_srcu);
300 	hwctx = xa_load(&client->hwctx_xa, args->handle);
301 	if (!hwctx) {
302 		XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
303 		ret = -EINVAL;
304 		goto unlock_srcu;
305 	}
306 
307 	ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
308 
309 unlock_srcu:
310 	srcu_read_unlock(&client->hwctx_srcu, idx);
311 	mutex_unlock(&xdna->dev_lock);
312 	kfree(buf);
313 	return ret;
314 }
315 
316 static void
317 amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
318 {
319 	int i;
320 
321 	for (i = 0; i < job->bo_cnt; i++) {
322 		if (!job->bos[i])
323 			break;
324 		drm_gem_object_put(job->bos[i]);
325 	}
326 }
327 
328 static int
329 amdxdna_arg_bos_lookup(struct amdxdna_client *client,
330 		       struct amdxdna_sched_job *job,
331 		       u32 *bo_hdls, u32 bo_cnt)
332 {
333 	struct drm_gem_object *gobj;
334 	int i, ret;
335 
336 	job->bo_cnt = bo_cnt;
337 	for (i = 0; i < job->bo_cnt; i++) {
338 		struct amdxdna_gem_obj *abo;
339 
340 		gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
341 		if (!gobj) {
342 			ret = -ENOENT;
343 			goto put_shmem_bo;
344 		}
345 		abo = to_xdna_obj(gobj);
346 
347 		mutex_lock(&abo->lock);
348 		if (abo->pinned) {
349 			mutex_unlock(&abo->lock);
350 			job->bos[i] = gobj;
351 			continue;
352 		}
353 
354 		ret = amdxdna_gem_pin_nolock(abo);
355 		if (ret) {
356 			mutex_unlock(&abo->lock);
357 			drm_gem_object_put(gobj);
358 			goto put_shmem_bo;
359 		}
360 		abo->pinned = true;
361 		mutex_unlock(&abo->lock);
362 
363 		job->bos[i] = gobj;
364 	}
365 
366 	return 0;
367 
368 put_shmem_bo:
369 	amdxdna_arg_bos_put(job);
370 	return ret;
371 }
372 
373 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
374 {
375 	trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
376 	amdxdna_arg_bos_put(job);
377 	amdxdna_gem_put_obj(job->cmd_bo);
378 }
379 
380 int amdxdna_cmd_submit(struct amdxdna_client *client,
381 		       u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
382 		       u32 hwctx_hdl, u64 *seq)
383 {
384 	struct amdxdna_dev *xdna = client->xdna;
385 	struct amdxdna_sched_job *job;
386 	struct amdxdna_hwctx *hwctx;
387 	int ret, idx;
388 
389 	XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
390 	job = kzalloc(struct_size(job, bos, arg_bo_cnt), GFP_KERNEL);
391 	if (!job)
392 		return -ENOMEM;
393 
394 	if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
395 		job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
396 		if (!job->cmd_bo) {
397 			XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
398 			ret = -EINVAL;
399 			goto free_job;
400 		}
401 	} else {
402 		job->cmd_bo = NULL;
403 	}
404 
405 	ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
406 	if (ret) {
407 		XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
408 		goto cmd_put;
409 	}
410 
411 	idx = srcu_read_lock(&client->hwctx_srcu);
412 	hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
413 	if (!hwctx) {
414 		XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
415 			 client->pid, hwctx_hdl);
416 		ret = -EINVAL;
417 		goto unlock_srcu;
418 	}
419 
420 	if (hwctx->status != HWCTX_STAT_READY) {
421 		XDNA_ERR(xdna, "HW Context is not ready");
422 		ret = -EINVAL;
423 		goto unlock_srcu;
424 	}
425 
426 	job->hwctx = hwctx;
427 	job->mm = current->mm;
428 
429 	job->fence = amdxdna_fence_create(hwctx);
430 	if (!job->fence) {
431 		XDNA_ERR(xdna, "Failed to create fence");
432 		ret = -ENOMEM;
433 		goto unlock_srcu;
434 	}
435 	kref_init(&job->refcnt);
436 
437 	ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
438 	if (ret)
439 		goto put_fence;
440 
441 	/*
442 	 * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
443 	 * resource after synchronize_srcu(). The submitted jobs should be
444 	 * handled by the queue, for example DRM scheduler, in device layer.
445 	 * For here we can unlock SRCU.
446 	 */
447 	srcu_read_unlock(&client->hwctx_srcu, idx);
448 	trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
449 
450 	return 0;
451 
452 put_fence:
453 	dma_fence_put(job->fence);
454 unlock_srcu:
455 	srcu_read_unlock(&client->hwctx_srcu, idx);
456 	amdxdna_arg_bos_put(job);
457 cmd_put:
458 	amdxdna_gem_put_obj(job->cmd_bo);
459 free_job:
460 	kfree(job);
461 	return ret;
462 }
463 
464 /*
465  * The submit command ioctl submits a command to firmware. One firmware command
466  * may contain multiple command BOs for processing as a whole.
467  * The command sequence number is returned which can be used for wait command ioctl.
468  */
469 static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
470 				      struct amdxdna_drm_exec_cmd *args)
471 {
472 	struct amdxdna_dev *xdna = client->xdna;
473 	u32 *arg_bo_hdls = NULL;
474 	u32 cmd_bo_hdl;
475 	int ret;
476 
477 	if (args->arg_count > MAX_ARG_COUNT) {
478 		XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
479 		return -EINVAL;
480 	}
481 
482 	/* Only support single command for now. */
483 	if (args->cmd_count != 1) {
484 		XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
485 		return -EINVAL;
486 	}
487 
488 	cmd_bo_hdl = (u32)args->cmd_handles;
489 	if (args->arg_count) {
490 		arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
491 		if (!arg_bo_hdls)
492 			return -ENOMEM;
493 		ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
494 				     args->arg_count * sizeof(u32));
495 		if (ret) {
496 			ret = -EFAULT;
497 			goto free_cmd_bo_hdls;
498 		}
499 	}
500 
501 	ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
502 				 args->arg_count, args->hwctx, &args->seq);
503 	if (ret)
504 		XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
505 
506 free_cmd_bo_hdls:
507 	kfree(arg_bo_hdls);
508 	if (!ret)
509 		XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
510 	return ret;
511 }
512 
513 int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
514 {
515 	struct amdxdna_client *client = filp->driver_priv;
516 	struct amdxdna_drm_exec_cmd *args = data;
517 
518 	if (args->ext || args->ext_flags)
519 		return -EINVAL;
520 
521 	switch (args->type) {
522 	case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
523 		return amdxdna_drm_submit_execbuf(client, args);
524 	}
525 
526 	XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
527 	return -EINVAL;
528 }
529