xref: /linux/drivers/accel/amdxdna/aie2_ctx.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_gem.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_print.h>
11 #include <drm/drm_syncobj.h>
12 #include <linux/hmm.h>
13 #include <linux/types.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16 
17 #include "aie2_msg_priv.h"
18 #include "aie2_pci.h"
19 #include "aie2_solver.h"
20 #include "amdxdna_ctx.h"
21 #include "amdxdna_gem.h"
22 #include "amdxdna_mailbox.h"
23 #include "amdxdna_pci_drv.h"
24 #include "amdxdna_pm.h"
25 
26 static bool force_cmdlist;
27 module_param(force_cmdlist, bool, 0600);
28 MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
29 
30 #define HWCTX_MAX_TIMEOUT	60000 /* milliseconds */
31 
32 static void aie2_job_release(struct kref *ref)
33 {
34 	struct amdxdna_sched_job *job;
35 
36 	job = container_of(ref, struct amdxdna_sched_job, refcnt);
37 	amdxdna_sched_job_cleanup(job);
38 	atomic64_inc(&job->hwctx->job_free_cnt);
39 	wake_up(&job->hwctx->priv->job_free_wq);
40 	if (job->out_fence)
41 		dma_fence_put(job->out_fence);
42 	kfree(job);
43 }
44 
45 static void aie2_job_put(struct amdxdna_sched_job *job)
46 {
47 	kref_put(&job->refcnt, aie2_job_release);
48 }
49 
50 static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx)
51 {
52 	 hwctx->old_status = hwctx->status;
53 	 hwctx->status = HWCTX_STAT_STOP;
54 }
55 
56 static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx)
57 {
58 	hwctx->status = hwctx->old_status;
59 }
60 
61 /* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
62 static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
63 			    struct drm_sched_job *bad_job)
64 {
65 	drm_sched_stop(&hwctx->priv->sched, bad_job);
66 	aie2_destroy_context(xdna->dev_handle, hwctx);
67 }
68 
69 static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
70 {
71 	struct amdxdna_gem_obj *heap = hwctx->priv->heap;
72 	int ret;
73 
74 	ret = aie2_create_context(xdna->dev_handle, hwctx);
75 	if (ret) {
76 		XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
77 		goto out;
78 	}
79 
80 	ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
81 				heap->mem.userptr, heap->mem.size);
82 	if (ret) {
83 		XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
84 		goto out;
85 	}
86 
87 	if (hwctx->status != HWCTX_STAT_READY) {
88 		XDNA_DBG(xdna, "hwctx is not ready, status %d", hwctx->status);
89 		goto out;
90 	}
91 
92 	ret = aie2_config_cu(hwctx, NULL);
93 	if (ret) {
94 		XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
95 		goto out;
96 	}
97 
98 out:
99 	drm_sched_start(&hwctx->priv->sched, 0);
100 	XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
101 	return ret;
102 }
103 
104 static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
105 {
106 	struct dma_fence *fence, *out_fence = NULL;
107 	int ret;
108 
109 	fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
110 	if (!fence)
111 		return NULL;
112 
113 	ret = dma_fence_chain_find_seqno(&fence,  seq);
114 	if (ret)
115 		goto out;
116 
117 	out_fence = dma_fence_get(dma_fence_chain_contained(fence));
118 
119 out:
120 	dma_fence_put(fence);
121 	return out_fence;
122 }
123 
124 static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
125 {
126 	struct dma_fence *fence;
127 
128 	fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
129 	if (!fence)
130 		return;
131 
132 	/* Wait up to 2 seconds for fw to finish all pending requests */
133 	dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000));
134 	dma_fence_put(fence);
135 }
136 
137 static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
138 {
139 	struct amdxdna_dev *xdna = hwctx->client->xdna;
140 
141 	aie2_hwctx_wait_for_idle(hwctx);
142 	aie2_hwctx_stop(xdna, hwctx, NULL);
143 	aie2_hwctx_status_shift_stop(hwctx);
144 
145 	return 0;
146 }
147 
148 void aie2_hwctx_suspend(struct amdxdna_client *client)
149 {
150 	struct amdxdna_dev *xdna = client->xdna;
151 
152 	/*
153 	 * Command timeout is unlikely. But if it happens, it doesn't
154 	 * break the system. aie2_hwctx_stop() will destroy mailbox
155 	 * and abort all commands.
156 	 */
157 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
158 	amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
159 }
160 
161 static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
162 {
163 	struct amdxdna_dev *xdna = hwctx->client->xdna;
164 
165 	aie2_hwctx_status_restore(hwctx);
166 	return aie2_hwctx_restart(xdna, hwctx);
167 }
168 
169 int aie2_hwctx_resume(struct amdxdna_client *client)
170 {
171 	/*
172 	 * The resume path cannot guarantee that mailbox channel can be
173 	 * regenerated. If this happen, when submit message to this
174 	 * mailbox channel, error will return.
175 	 */
176 	return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
177 }
178 
179 static void
180 aie2_sched_notify(struct amdxdna_sched_job *job)
181 {
182 	struct dma_fence *fence = job->fence;
183 
184 	trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
185 
186 	amdxdna_pm_suspend_put(job->hwctx->client->xdna);
187 	job->hwctx->priv->completed++;
188 	dma_fence_signal(fence);
189 
190 	up(&job->hwctx->priv->job_sem);
191 	job->job_done = true;
192 	dma_fence_put(fence);
193 	mmput_async(job->mm);
194 	aie2_job_put(job);
195 }
196 
197 static int
198 aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
199 {
200 	struct amdxdna_sched_job *job = handle;
201 	struct amdxdna_gem_obj *cmd_abo;
202 	int ret = 0;
203 	u32 status;
204 
205 	cmd_abo = job->cmd_bo;
206 
207 	if (unlikely(!data))
208 		goto out;
209 
210 	if (unlikely(size != sizeof(u32))) {
211 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
212 		ret = -EINVAL;
213 		goto out;
214 	}
215 
216 	status = readl(data);
217 	XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
218 	if (status == AIE2_STATUS_SUCCESS)
219 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
220 	else
221 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
222 
223 out:
224 	aie2_sched_notify(job);
225 	return ret;
226 }
227 
228 static int
229 aie2_sched_drvcmd_resp_handler(void *handle, void __iomem *data, size_t size)
230 {
231 	struct amdxdna_sched_job *job = handle;
232 	int ret = 0;
233 
234 	if (unlikely(!data))
235 		goto out;
236 
237 	if (unlikely(size != sizeof(u32))) {
238 		ret = -EINVAL;
239 		goto out;
240 	}
241 
242 	job->drv_cmd->result = readl(data);
243 
244 out:
245 	aie2_sched_notify(job);
246 	return ret;
247 }
248 
249 static int
250 aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
251 {
252 	struct amdxdna_sched_job *job = handle;
253 	struct amdxdna_gem_obj *cmd_abo;
254 	struct amdxdna_dev *xdna;
255 	u32 fail_cmd_status;
256 	u32 fail_cmd_idx;
257 	u32 cmd_status;
258 	int ret = 0;
259 
260 	cmd_abo = job->cmd_bo;
261 	if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
262 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
263 		ret = -EINVAL;
264 		goto out;
265 	}
266 
267 	cmd_status = readl(data + offsetof(struct cmd_chain_resp, status));
268 	xdna = job->hwctx->client->xdna;
269 	XDNA_DBG(xdna, "Status 0x%x", cmd_status);
270 	if (cmd_status == AIE2_STATUS_SUCCESS) {
271 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
272 		goto out;
273 	}
274 
275 	/* Slow path to handle error, read from ringbuf on BAR */
276 	fail_cmd_idx = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_idx));
277 	fail_cmd_status = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_status));
278 	XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
279 		 fail_cmd_idx, fail_cmd_status);
280 
281 	if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
282 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
283 		ret = -EINVAL;
284 		goto out;
285 	}
286 	amdxdna_cmd_set_state(cmd_abo, fail_cmd_status);
287 
288 	if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
289 		struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
290 
291 		cc->error_index = fail_cmd_idx;
292 		if (cc->error_index >= cc->command_count)
293 			cc->error_index = 0;
294 	}
295 out:
296 	aie2_sched_notify(job);
297 	return ret;
298 }
299 
300 static struct dma_fence *
301 aie2_sched_job_run(struct drm_sched_job *sched_job)
302 {
303 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
304 	struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
305 	struct amdxdna_hwctx *hwctx = job->hwctx;
306 	struct dma_fence *fence;
307 	int ret;
308 
309 	if (!mmget_not_zero(job->mm))
310 		return ERR_PTR(-ESRCH);
311 
312 	kref_get(&job->refcnt);
313 	fence = dma_fence_get(job->fence);
314 
315 	if (job->drv_cmd) {
316 		switch (job->drv_cmd->opcode) {
317 		case SYNC_DEBUG_BO:
318 			ret = aie2_sync_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
319 			break;
320 		case ATTACH_DEBUG_BO:
321 			ret = aie2_config_debug_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
322 			break;
323 		default:
324 			ret = -EINVAL;
325 			break;
326 		}
327 		goto out;
328 	}
329 
330 	amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
331 
332 	if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
333 		ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
334 	else if (force_cmdlist)
335 		ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
336 	else
337 		ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
338 
339 out:
340 	if (ret) {
341 		dma_fence_put(job->fence);
342 		aie2_job_put(job);
343 		mmput(job->mm);
344 		fence = ERR_PTR(ret);
345 	}
346 	trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
347 
348 	return fence;
349 }
350 
351 static void aie2_sched_job_free(struct drm_sched_job *sched_job)
352 {
353 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
354 	struct amdxdna_hwctx *hwctx = job->hwctx;
355 
356 	trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
357 	if (!job->job_done)
358 		up(&hwctx->priv->job_sem);
359 
360 	drm_sched_job_cleanup(sched_job);
361 	aie2_job_put(job);
362 }
363 
364 static enum drm_gpu_sched_stat
365 aie2_sched_job_timedout(struct drm_sched_job *sched_job)
366 {
367 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
368 	struct amdxdna_hwctx *hwctx = job->hwctx;
369 	struct amdxdna_dev *xdna;
370 
371 	xdna = hwctx->client->xdna;
372 	trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
373 	mutex_lock(&xdna->dev_lock);
374 	aie2_hwctx_stop(xdna, hwctx, sched_job);
375 
376 	aie2_hwctx_restart(xdna, hwctx);
377 	mutex_unlock(&xdna->dev_lock);
378 
379 	return DRM_GPU_SCHED_STAT_RESET;
380 }
381 
382 static const struct drm_sched_backend_ops sched_ops = {
383 	.run_job = aie2_sched_job_run,
384 	.free_job = aie2_sched_job_free,
385 	.timedout_job = aie2_sched_job_timedout,
386 };
387 
388 static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
389 {
390 	struct amdxdna_dev *xdna = hwctx->client->xdna;
391 	struct amdxdna_dev_hdl *ndev;
392 	int start, end, first, last;
393 	u32 width = 1, entries = 0;
394 	int i;
395 
396 	if (!hwctx->num_tiles) {
397 		XDNA_ERR(xdna, "Number of tiles is zero");
398 		return -EINVAL;
399 	}
400 
401 	ndev = xdna->dev_handle;
402 	if (unlikely(!ndev->metadata.core.row_count)) {
403 		XDNA_WARN(xdna, "Core tile row count is zero");
404 		return -EINVAL;
405 	}
406 
407 	hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
408 	if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
409 		XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
410 		return -EINVAL;
411 	}
412 
413 	if (ndev->priv->col_align == COL_ALIGN_NATURE)
414 		width = hwctx->num_col;
415 
416 	/*
417 	 * In range [start, end], find out columns that is multiple of width.
418 	 *	'first' is the first column,
419 	 *	'last' is the last column,
420 	 *	'entries' is the total number of columns.
421 	 */
422 	start =  xdna->dev_info->first_col;
423 	end =  ndev->total_col - hwctx->num_col;
424 	if (start > 0 && end == 0) {
425 		XDNA_DBG(xdna, "Force start from col 0");
426 		start = 0;
427 	}
428 	first = start + (width - start % width) % width;
429 	last = end - end % width;
430 	if (last >= first)
431 		entries = (last - first) / width + 1;
432 	XDNA_DBG(xdna, "start %d end %d first %d last %d",
433 		 start, end, first, last);
434 
435 	if (unlikely(!entries)) {
436 		XDNA_ERR(xdna, "Start %d end %d width %d",
437 			 start, end, width);
438 		return -EINVAL;
439 	}
440 
441 	hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
442 	if (!hwctx->col_list)
443 		return -ENOMEM;
444 
445 	hwctx->col_list_len = entries;
446 	hwctx->col_list[0] = first;
447 	for (i = 1; i < entries; i++)
448 		hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
449 
450 	print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
451 			     entries * sizeof(*hwctx->col_list), false);
452 	return 0;
453 }
454 
455 static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
456 {
457 	struct amdxdna_dev *xdna = hwctx->client->xdna;
458 	struct alloc_requests *xrs_req;
459 	int ret;
460 
461 	xrs_req = kzalloc(sizeof(*xrs_req), GFP_KERNEL);
462 	if (!xrs_req)
463 		return -ENOMEM;
464 
465 	xrs_req->cdo.start_cols = hwctx->col_list;
466 	xrs_req->cdo.cols_len = hwctx->col_list_len;
467 	xrs_req->cdo.ncols = hwctx->num_col;
468 	xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
469 
470 	xrs_req->rqos.gops = hwctx->qos.gops;
471 	xrs_req->rqos.fps = hwctx->qos.fps;
472 	xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
473 	xrs_req->rqos.latency = hwctx->qos.latency;
474 	xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
475 	xrs_req->rqos.priority = hwctx->qos.priority;
476 
477 	xrs_req->rid = (uintptr_t)hwctx;
478 
479 	ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
480 	if (ret)
481 		XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
482 
483 	kfree(xrs_req);
484 	return ret;
485 }
486 
487 static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
488 {
489 	struct amdxdna_dev *xdna = hwctx->client->xdna;
490 	int ret;
491 
492 	ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
493 	if (ret)
494 		XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
495 }
496 
497 static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
498 {
499 	struct amdxdna_dev *xdna = hwctx->client->xdna;
500 	struct drm_file *filp = hwctx->client->filp;
501 	struct drm_syncobj *syncobj;
502 	u32 hdl;
503 	int ret;
504 
505 	hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
506 
507 	ret = drm_syncobj_create(&syncobj, 0, NULL);
508 	if (ret) {
509 		XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
510 		return ret;
511 	}
512 	ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
513 	if (ret) {
514 		drm_syncobj_put(syncobj);
515 		XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
516 		return ret;
517 	}
518 	hwctx->priv->syncobj = syncobj;
519 	hwctx->syncobj_hdl = hdl;
520 
521 	return 0;
522 }
523 
524 static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
525 {
526 	/*
527 	 * The syncobj_hdl is owned by user space and will be cleaned up
528 	 * separately.
529 	 */
530 	drm_syncobj_put(hwctx->priv->syncobj);
531 }
532 
533 int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
534 {
535 	struct amdxdna_client *client = hwctx->client;
536 	struct amdxdna_dev *xdna = client->xdna;
537 	const struct drm_sched_init_args args = {
538 		.ops = &sched_ops,
539 		.num_rqs = DRM_SCHED_PRIORITY_COUNT,
540 		.credit_limit = HWCTX_MAX_CMDS,
541 		.timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
542 		.name = "amdxdna_js",
543 		.dev = xdna->ddev.dev,
544 	};
545 	struct drm_gpu_scheduler *sched;
546 	struct amdxdna_hwctx_priv *priv;
547 	struct amdxdna_gem_obj *heap;
548 	struct amdxdna_dev_hdl *ndev;
549 	int i, ret;
550 
551 	priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
552 	if (!priv)
553 		return -ENOMEM;
554 	hwctx->priv = priv;
555 
556 	mutex_lock(&client->mm_lock);
557 	heap = client->dev_heap;
558 	if (!heap) {
559 		XDNA_ERR(xdna, "The client dev heap object not exist");
560 		mutex_unlock(&client->mm_lock);
561 		ret = -ENOENT;
562 		goto free_priv;
563 	}
564 	drm_gem_object_get(to_gobj(heap));
565 	mutex_unlock(&client->mm_lock);
566 	priv->heap = heap;
567 	sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
568 
569 	ret = amdxdna_gem_pin(heap);
570 	if (ret) {
571 		XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
572 		goto put_heap;
573 	}
574 
575 	for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
576 		struct amdxdna_gem_obj *abo;
577 		struct amdxdna_drm_create_bo args = {
578 			.flags = 0,
579 			.type = AMDXDNA_BO_DEV,
580 			.vaddr = 0,
581 			.size = MAX_CHAIN_CMDBUF_SIZE,
582 		};
583 
584 		abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
585 		if (IS_ERR(abo)) {
586 			ret = PTR_ERR(abo);
587 			goto free_cmd_bufs;
588 		}
589 
590 		XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
591 			 i, abo->mem.dev_addr, abo->mem.size);
592 		priv->cmd_buf[i] = abo;
593 	}
594 
595 	sched = &priv->sched;
596 	mutex_init(&priv->io_lock);
597 
598 	fs_reclaim_acquire(GFP_KERNEL);
599 	might_lock(&priv->io_lock);
600 	fs_reclaim_release(GFP_KERNEL);
601 
602 	ret = drm_sched_init(sched, &args);
603 	if (ret) {
604 		XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
605 		goto free_cmd_bufs;
606 	}
607 
608 	ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
609 				    &sched, 1, NULL);
610 	if (ret) {
611 		XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
612 		goto free_sched;
613 	}
614 
615 	ret = aie2_hwctx_col_list(hwctx);
616 	if (ret) {
617 		XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
618 		goto free_entity;
619 	}
620 
621 	ret = amdxdna_pm_resume_get(xdna);
622 	if (ret)
623 		goto free_col_list;
624 
625 	ret = aie2_alloc_resource(hwctx);
626 	if (ret) {
627 		XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
628 		goto suspend_put;
629 	}
630 
631 	ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
632 				heap->mem.userptr, heap->mem.size);
633 	if (ret) {
634 		XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
635 		goto release_resource;
636 	}
637 
638 	ret = aie2_ctx_syncobj_create(hwctx);
639 	if (ret) {
640 		XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
641 		goto release_resource;
642 	}
643 	amdxdna_pm_suspend_put(xdna);
644 
645 	hwctx->status = HWCTX_STAT_INIT;
646 	ndev = xdna->dev_handle;
647 	ndev->hwctx_num++;
648 	init_waitqueue_head(&priv->job_free_wq);
649 
650 	XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
651 
652 	return 0;
653 
654 release_resource:
655 	aie2_release_resource(hwctx);
656 suspend_put:
657 	amdxdna_pm_suspend_put(xdna);
658 free_col_list:
659 	kfree(hwctx->col_list);
660 free_entity:
661 	drm_sched_entity_destroy(&priv->entity);
662 free_sched:
663 	drm_sched_fini(&priv->sched);
664 free_cmd_bufs:
665 	for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
666 		if (!priv->cmd_buf[i])
667 			continue;
668 		drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
669 	}
670 	amdxdna_gem_unpin(heap);
671 put_heap:
672 	drm_gem_object_put(to_gobj(heap));
673 free_priv:
674 	kfree(priv);
675 	return ret;
676 }
677 
678 void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
679 {
680 	struct amdxdna_dev_hdl *ndev;
681 	struct amdxdna_dev *xdna;
682 	int idx;
683 
684 	xdna = hwctx->client->xdna;
685 	ndev = xdna->dev_handle;
686 	ndev->hwctx_num--;
687 
688 	XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
689 	drm_sched_entity_destroy(&hwctx->priv->entity);
690 
691 	aie2_hwctx_wait_for_idle(hwctx);
692 
693 	/* Request fw to destroy hwctx and cancel the rest pending requests */
694 	aie2_release_resource(hwctx);
695 
696 	/* Wait for all submitted jobs to be completed or canceled */
697 	wait_event(hwctx->priv->job_free_wq,
698 		   atomic64_read(&hwctx->job_submit_cnt) ==
699 		   atomic64_read(&hwctx->job_free_cnt));
700 
701 	drm_sched_fini(&hwctx->priv->sched);
702 	aie2_ctx_syncobj_destroy(hwctx);
703 
704 	for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
705 		drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
706 	amdxdna_gem_unpin(hwctx->priv->heap);
707 	drm_gem_object_put(to_gobj(hwctx->priv->heap));
708 
709 	mutex_destroy(&hwctx->priv->io_lock);
710 	kfree(hwctx->col_list);
711 	kfree(hwctx->priv);
712 	kfree(hwctx->cus);
713 }
714 
715 static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size)
716 {
717 	struct amdxdna_hwctx *hwctx = handle;
718 
719 	amdxdna_pm_suspend_put(hwctx->client->xdna);
720 	return 0;
721 }
722 
723 static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
724 {
725 	struct amdxdna_hwctx_param_config_cu *config = buf;
726 	struct amdxdna_dev *xdna = hwctx->client->xdna;
727 	u32 total_size;
728 	int ret;
729 
730 	XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
731 	if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
732 		return -EINVAL;
733 
734 	if (hwctx->status != HWCTX_STAT_INIT) {
735 		XDNA_ERR(xdna, "Not support re-config CU");
736 		return -EINVAL;
737 	}
738 
739 	if (!config->num_cus) {
740 		XDNA_ERR(xdna, "Number of CU is zero");
741 		return -EINVAL;
742 	}
743 
744 	total_size = struct_size(config, cu_configs, config->num_cus);
745 	if (total_size > size) {
746 		XDNA_ERR(xdna, "CU config larger than size");
747 		return -EINVAL;
748 	}
749 
750 	hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
751 	if (!hwctx->cus)
752 		return -ENOMEM;
753 
754 	ret = amdxdna_pm_resume_get(xdna);
755 	if (ret)
756 		goto free_cus;
757 
758 	ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler);
759 	if (ret) {
760 		XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
761 		goto pm_suspend_put;
762 	}
763 
764 	wmb(); /* To avoid locking in command submit when check status */
765 	hwctx->status = HWCTX_STAT_READY;
766 
767 	return 0;
768 
769 pm_suspend_put:
770 	amdxdna_pm_suspend_put(xdna);
771 free_cus:
772 	kfree(hwctx->cus);
773 	hwctx->cus = NULL;
774 	return ret;
775 }
776 
777 static void aie2_cmd_wait(struct amdxdna_hwctx *hwctx, u64 seq)
778 {
779 	struct dma_fence *out_fence = aie2_cmd_get_out_fence(hwctx, seq);
780 
781 	if (!out_fence) {
782 		XDNA_ERR(hwctx->client->xdna, "Failed to get fence");
783 		return;
784 	}
785 
786 	dma_fence_wait_timeout(out_fence, false, MAX_SCHEDULE_TIMEOUT);
787 	dma_fence_put(out_fence);
788 }
789 
790 static int aie2_hwctx_cfg_debug_bo(struct amdxdna_hwctx *hwctx, u32 bo_hdl,
791 				   bool attach)
792 {
793 	struct amdxdna_client *client = hwctx->client;
794 	struct amdxdna_dev *xdna = client->xdna;
795 	struct amdxdna_drv_cmd cmd = { 0 };
796 	struct amdxdna_gem_obj *abo;
797 	u64 seq;
798 	int ret;
799 
800 	abo = amdxdna_gem_get_obj(client, bo_hdl, AMDXDNA_BO_DEV);
801 	if (!abo) {
802 		XDNA_ERR(xdna, "Get bo %d failed", bo_hdl);
803 		return -EINVAL;
804 	}
805 
806 	if (attach) {
807 		if (abo->assigned_hwctx != AMDXDNA_INVALID_CTX_HANDLE) {
808 			ret = -EBUSY;
809 			goto put_obj;
810 		}
811 		cmd.opcode = ATTACH_DEBUG_BO;
812 	} else {
813 		if (abo->assigned_hwctx != hwctx->id) {
814 			ret = -EINVAL;
815 			goto put_obj;
816 		}
817 		cmd.opcode = DETACH_DEBUG_BO;
818 	}
819 
820 	ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
821 				 &bo_hdl, 1, hwctx->id, &seq);
822 	if (ret) {
823 		XDNA_ERR(xdna, "Submit command failed");
824 		goto put_obj;
825 	}
826 
827 	aie2_cmd_wait(hwctx, seq);
828 	if (cmd.result) {
829 		XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
830 		goto put_obj;
831 	}
832 
833 	if (attach)
834 		abo->assigned_hwctx = hwctx->id;
835 	else
836 		abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
837 
838 	XDNA_DBG(xdna, "Config debug BO %d to %s", bo_hdl, hwctx->name);
839 
840 put_obj:
841 	amdxdna_gem_put_obj(abo);
842 	return ret;
843 }
844 
845 int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
846 {
847 	struct amdxdna_dev *xdna = hwctx->client->xdna;
848 
849 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
850 	switch (type) {
851 	case DRM_AMDXDNA_HWCTX_CONFIG_CU:
852 		return aie2_hwctx_cu_config(hwctx, buf, size);
853 	case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
854 		return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, true);
855 	case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
856 		return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, false);
857 	default:
858 		XDNA_DBG(xdna, "Not supported type %d", type);
859 		return -EOPNOTSUPP;
860 	}
861 }
862 
863 int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl)
864 {
865 	struct amdxdna_client *client = hwctx->client;
866 	struct amdxdna_dev *xdna = client->xdna;
867 	struct amdxdna_drv_cmd cmd = { 0 };
868 	u64 seq;
869 	int ret;
870 
871 	cmd.opcode = SYNC_DEBUG_BO;
872 	ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
873 				 &debug_bo_hdl, 1, hwctx->id, &seq);
874 	if (ret) {
875 		XDNA_ERR(xdna, "Submit command failed");
876 		return ret;
877 	}
878 
879 	aie2_cmd_wait(hwctx, seq);
880 	if (cmd.result) {
881 		XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
882 		return -EINVAL;
883 	}
884 
885 	return 0;
886 }
887 
888 static int aie2_populate_range(struct amdxdna_gem_obj *abo)
889 {
890 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
891 	struct amdxdna_umap *mapp;
892 	unsigned long timeout;
893 	struct mm_struct *mm;
894 	bool found;
895 	int ret;
896 
897 	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
898 again:
899 	found = false;
900 	down_write(&xdna->notifier_lock);
901 	list_for_each_entry(mapp, &abo->mem.umap_list, node) {
902 		if (mapp->invalid) {
903 			found = true;
904 			break;
905 		}
906 	}
907 
908 	if (!found) {
909 		abo->mem.map_invalid = false;
910 		up_write(&xdna->notifier_lock);
911 		return 0;
912 	}
913 	kref_get(&mapp->refcnt);
914 	up_write(&xdna->notifier_lock);
915 
916 	XDNA_DBG(xdna, "populate memory range %lx %lx",
917 		 mapp->vma->vm_start, mapp->vma->vm_end);
918 	mm = mapp->notifier.mm;
919 	if (!mmget_not_zero(mm)) {
920 		amdxdna_umap_put(mapp);
921 		return -EFAULT;
922 	}
923 
924 	mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
925 	mmap_read_lock(mm);
926 	ret = hmm_range_fault(&mapp->range);
927 	mmap_read_unlock(mm);
928 	if (ret) {
929 		if (time_after(jiffies, timeout)) {
930 			ret = -ETIME;
931 			goto put_mm;
932 		}
933 
934 		if (ret == -EBUSY) {
935 			amdxdna_umap_put(mapp);
936 			goto again;
937 		}
938 
939 		goto put_mm;
940 	}
941 
942 	down_write(&xdna->notifier_lock);
943 	if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
944 		up_write(&xdna->notifier_lock);
945 		amdxdna_umap_put(mapp);
946 		goto again;
947 	}
948 	mapp->invalid = false;
949 	up_write(&xdna->notifier_lock);
950 	amdxdna_umap_put(mapp);
951 	goto again;
952 
953 put_mm:
954 	amdxdna_umap_put(mapp);
955 	mmput(mm);
956 	return ret;
957 }
958 
959 int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
960 {
961 	struct amdxdna_dev *xdna = hwctx->client->xdna;
962 	struct ww_acquire_ctx acquire_ctx;
963 	struct dma_fence_chain *chain;
964 	struct amdxdna_gem_obj *abo;
965 	unsigned long timeout = 0;
966 	int ret, i;
967 
968 	ret = down_interruptible(&hwctx->priv->job_sem);
969 	if (ret) {
970 		XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
971 		return ret;
972 	}
973 
974 	chain = dma_fence_chain_alloc();
975 	if (!chain) {
976 		XDNA_ERR(xdna, "Alloc fence chain failed");
977 		ret = -ENOMEM;
978 		goto up_sem;
979 	}
980 
981 	ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx,
982 				 hwctx->client->filp->client_id);
983 	if (ret) {
984 		XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
985 		goto free_chain;
986 	}
987 
988 	ret = amdxdna_pm_resume_get(xdna);
989 	if (ret)
990 		goto cleanup_job;
991 
992 retry:
993 	ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
994 	if (ret) {
995 		XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
996 		goto suspend_put;
997 	}
998 
999 	for (i = 0; i < job->bo_cnt; i++) {
1000 		ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
1001 		if (ret) {
1002 			XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
1003 			drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1004 			goto suspend_put;
1005 		}
1006 	}
1007 
1008 	down_read(&xdna->notifier_lock);
1009 	for (i = 0; i < job->bo_cnt; i++) {
1010 		abo = to_xdna_obj(job->bos[i]);
1011 		if (abo->mem.map_invalid) {
1012 			up_read(&xdna->notifier_lock);
1013 			drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1014 			if (!timeout) {
1015 				timeout = jiffies +
1016 					msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
1017 			} else if (time_after(jiffies, timeout)) {
1018 				ret = -ETIME;
1019 				goto suspend_put;
1020 			}
1021 
1022 			ret = aie2_populate_range(abo);
1023 			if (ret)
1024 				goto suspend_put;
1025 			goto retry;
1026 		}
1027 	}
1028 
1029 	mutex_lock(&hwctx->priv->io_lock);
1030 	drm_sched_job_arm(&job->base);
1031 	job->out_fence = dma_fence_get(&job->base.s_fence->finished);
1032 	for (i = 0; i < job->bo_cnt; i++)
1033 		dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
1034 	job->seq = hwctx->priv->seq++;
1035 	kref_get(&job->refcnt);
1036 	drm_sched_entity_push_job(&job->base);
1037 
1038 	*seq = job->seq;
1039 	drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
1040 	mutex_unlock(&hwctx->priv->io_lock);
1041 
1042 	up_read(&xdna->notifier_lock);
1043 	drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1044 
1045 	aie2_job_put(job);
1046 	atomic64_inc(&hwctx->job_submit_cnt);
1047 
1048 	return 0;
1049 
1050 suspend_put:
1051 	amdxdna_pm_suspend_put(xdna);
1052 cleanup_job:
1053 	drm_sched_job_cleanup(&job->base);
1054 free_chain:
1055 	dma_fence_chain_free(chain);
1056 up_sem:
1057 	up(&hwctx->priv->job_sem);
1058 	job->job_done = true;
1059 	return ret;
1060 }
1061 
1062 void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
1063 			 unsigned long cur_seq)
1064 {
1065 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
1066 	struct drm_gem_object *gobj = to_gobj(abo);
1067 	long ret;
1068 
1069 	ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
1070 				    true, MAX_SCHEDULE_TIMEOUT);
1071 	if (!ret || ret == -ERESTARTSYS)
1072 		XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
1073 }
1074