xref: /linux/drivers/accel/amdxdna/aie2_ctx.c (revision e005fd94e2e5867f2a4e66e5df85069cda6f0db4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_gem.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_print.h>
11 #include <drm/drm_syncobj.h>
12 #include <linux/hmm.h>
13 #include <linux/types.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16 
17 #include "aie2_msg_priv.h"
18 #include "aie2_pci.h"
19 #include "aie2_solver.h"
20 #include "amdxdna_ctx.h"
21 #include "amdxdna_gem.h"
22 #include "amdxdna_mailbox.h"
23 #include "amdxdna_pci_drv.h"
24 #include "amdxdna_pm.h"
25 
26 static bool force_cmdlist;
27 module_param(force_cmdlist, bool, 0600);
28 MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
29 
30 #define HWCTX_MAX_TIMEOUT	60000 /* milliseconds */
31 
32 static void aie2_job_release(struct kref *ref)
33 {
34 	struct amdxdna_sched_job *job;
35 
36 	job = container_of(ref, struct amdxdna_sched_job, refcnt);
37 	amdxdna_sched_job_cleanup(job);
38 	atomic64_inc(&job->hwctx->job_free_cnt);
39 	wake_up(&job->hwctx->priv->job_free_wq);
40 	if (job->out_fence)
41 		dma_fence_put(job->out_fence);
42 	kfree(job);
43 }
44 
45 static void aie2_job_put(struct amdxdna_sched_job *job)
46 {
47 	kref_put(&job->refcnt, aie2_job_release);
48 }
49 
50 static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx)
51 {
52 	 hwctx->old_status = hwctx->status;
53 	 hwctx->status = HWCTX_STAT_STOP;
54 }
55 
56 static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx)
57 {
58 	hwctx->status = hwctx->old_status;
59 }
60 
61 /* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
62 static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
63 			    struct drm_sched_job *bad_job)
64 {
65 	drm_sched_stop(&hwctx->priv->sched, bad_job);
66 	aie2_destroy_context(xdna->dev_handle, hwctx);
67 }
68 
69 static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
70 {
71 	struct amdxdna_gem_obj *heap = hwctx->priv->heap;
72 	int ret;
73 
74 	ret = aie2_create_context(xdna->dev_handle, hwctx);
75 	if (ret) {
76 		XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
77 		goto out;
78 	}
79 
80 	ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
81 				heap->mem.userptr, heap->mem.size);
82 	if (ret) {
83 		XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
84 		goto out;
85 	}
86 
87 	if (hwctx->status != HWCTX_STAT_READY) {
88 		XDNA_DBG(xdna, "hwctx is not ready, status %d", hwctx->status);
89 		goto out;
90 	}
91 
92 	ret = aie2_config_cu(hwctx, NULL);
93 	if (ret) {
94 		XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
95 		goto out;
96 	}
97 
98 out:
99 	drm_sched_start(&hwctx->priv->sched, 0);
100 	XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
101 	return ret;
102 }
103 
104 static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
105 {
106 	struct dma_fence *fence, *out_fence = NULL;
107 	int ret;
108 
109 	fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
110 	if (!fence)
111 		return NULL;
112 
113 	ret = dma_fence_chain_find_seqno(&fence,  seq);
114 	if (ret)
115 		goto out;
116 
117 	out_fence = dma_fence_get(dma_fence_chain_contained(fence));
118 
119 out:
120 	dma_fence_put(fence);
121 	return out_fence;
122 }
123 
124 static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
125 {
126 	struct dma_fence *fence;
127 
128 	fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
129 	if (!fence)
130 		return;
131 
132 	/* Wait up to 2 seconds for fw to finish all pending requests */
133 	dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000));
134 	dma_fence_put(fence);
135 }
136 
137 static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
138 {
139 	struct amdxdna_dev *xdna = hwctx->client->xdna;
140 
141 	aie2_hwctx_wait_for_idle(hwctx);
142 	aie2_hwctx_stop(xdna, hwctx, NULL);
143 	aie2_hwctx_status_shift_stop(hwctx);
144 
145 	return 0;
146 }
147 
148 void aie2_hwctx_suspend(struct amdxdna_client *client)
149 {
150 	struct amdxdna_dev *xdna = client->xdna;
151 
152 	/*
153 	 * Command timeout is unlikely. But if it happens, it doesn't
154 	 * break the system. aie2_hwctx_stop() will destroy mailbox
155 	 * and abort all commands.
156 	 */
157 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
158 	amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
159 }
160 
161 static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
162 {
163 	struct amdxdna_dev *xdna = hwctx->client->xdna;
164 
165 	aie2_hwctx_status_restore(hwctx);
166 	return aie2_hwctx_restart(xdna, hwctx);
167 }
168 
169 int aie2_hwctx_resume(struct amdxdna_client *client)
170 {
171 	/*
172 	 * The resume path cannot guarantee that mailbox channel can be
173 	 * regenerated. If this happen, when submit message to this
174 	 * mailbox channel, error will return.
175 	 */
176 	return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
177 }
178 
179 static void
180 aie2_sched_notify(struct amdxdna_sched_job *job)
181 {
182 	struct dma_fence *fence = job->fence;
183 
184 	trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
185 
186 	amdxdna_pm_suspend_put(job->hwctx->client->xdna);
187 	job->hwctx->priv->completed++;
188 	dma_fence_signal(fence);
189 
190 	up(&job->hwctx->priv->job_sem);
191 	job->job_done = true;
192 	mmput_async(job->mm);
193 	aie2_job_put(job);
194 }
195 
196 static int
197 aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
198 {
199 	struct amdxdna_sched_job *job = handle;
200 	struct amdxdna_gem_obj *cmd_abo;
201 	int ret = 0;
202 	u32 status;
203 
204 	cmd_abo = job->cmd_bo;
205 
206 	if (unlikely(job->job_timeout)) {
207 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
208 		ret = -EINVAL;
209 		goto out;
210 	}
211 
212 	if (unlikely(!data) || unlikely(size != sizeof(u32))) {
213 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
214 		ret = -EINVAL;
215 		goto out;
216 	}
217 
218 	status = readl(data);
219 	XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
220 	if (status == AIE2_STATUS_SUCCESS)
221 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
222 	else
223 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
224 
225 out:
226 	aie2_sched_notify(job);
227 	return ret;
228 }
229 
230 static int
231 aie2_sched_drvcmd_resp_handler(void *handle, void __iomem *data, size_t size)
232 {
233 	struct amdxdna_sched_job *job = handle;
234 	int ret = 0;
235 
236 	if (unlikely(!data))
237 		goto out;
238 
239 	if (unlikely(size != sizeof(u32))) {
240 		ret = -EINVAL;
241 		goto out;
242 	}
243 
244 	job->drv_cmd->result = readl(data);
245 
246 out:
247 	aie2_sched_notify(job);
248 	return ret;
249 }
250 
251 static int
252 aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
253 {
254 	struct amdxdna_sched_job *job = handle;
255 	struct amdxdna_gem_obj *cmd_abo;
256 	struct amdxdna_dev *xdna;
257 	u32 fail_cmd_status;
258 	u32 fail_cmd_idx;
259 	u32 cmd_status;
260 	int ret = 0;
261 
262 	cmd_abo = job->cmd_bo;
263 
264 	if (unlikely(job->job_timeout)) {
265 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
266 		ret = -EINVAL;
267 		goto out;
268 	}
269 
270 	if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
271 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
272 		ret = -EINVAL;
273 		goto out;
274 	}
275 
276 	cmd_status = readl(data + offsetof(struct cmd_chain_resp, status));
277 	xdna = job->hwctx->client->xdna;
278 	XDNA_DBG(xdna, "Status 0x%x", cmd_status);
279 	if (cmd_status == AIE2_STATUS_SUCCESS) {
280 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
281 		goto out;
282 	}
283 
284 	/* Slow path to handle error, read from ringbuf on BAR */
285 	fail_cmd_idx = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_idx));
286 	fail_cmd_status = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_status));
287 	XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
288 		 fail_cmd_idx, fail_cmd_status);
289 
290 	if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
291 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
292 		ret = -EINVAL;
293 		goto out;
294 	}
295 	amdxdna_cmd_set_state(cmd_abo, fail_cmd_status);
296 
297 	if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
298 		struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
299 
300 		cc->error_index = fail_cmd_idx;
301 		if (cc->error_index >= cc->command_count)
302 			cc->error_index = 0;
303 	}
304 out:
305 	aie2_sched_notify(job);
306 	return ret;
307 }
308 
309 static struct dma_fence *
310 aie2_sched_job_run(struct drm_sched_job *sched_job)
311 {
312 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
313 	struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
314 	struct amdxdna_hwctx *hwctx = job->hwctx;
315 	struct dma_fence *fence;
316 	int ret;
317 
318 	if (!mmget_not_zero(job->mm))
319 		return ERR_PTR(-ESRCH);
320 
321 	kref_get(&job->refcnt);
322 	fence = dma_fence_get(job->fence);
323 
324 	if (job->drv_cmd) {
325 		switch (job->drv_cmd->opcode) {
326 		case SYNC_DEBUG_BO:
327 			ret = aie2_sync_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
328 			break;
329 		case ATTACH_DEBUG_BO:
330 			ret = aie2_config_debug_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
331 			break;
332 		default:
333 			ret = -EINVAL;
334 			break;
335 		}
336 		goto out;
337 	}
338 
339 	amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
340 
341 	if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
342 		ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
343 	else if (force_cmdlist)
344 		ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
345 	else
346 		ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
347 
348 out:
349 	if (ret) {
350 		dma_fence_put(job->fence);
351 		aie2_job_put(job);
352 		mmput(job->mm);
353 		fence = ERR_PTR(ret);
354 	}
355 	trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
356 
357 	return fence;
358 }
359 
360 static void aie2_sched_job_free(struct drm_sched_job *sched_job)
361 {
362 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
363 	struct amdxdna_hwctx *hwctx = job->hwctx;
364 
365 	trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
366 	if (!job->job_done)
367 		up(&hwctx->priv->job_sem);
368 
369 	drm_sched_job_cleanup(sched_job);
370 	aie2_job_put(job);
371 }
372 
373 static enum drm_gpu_sched_stat
374 aie2_sched_job_timedout(struct drm_sched_job *sched_job)
375 {
376 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
377 	struct amdxdna_hwctx *hwctx = job->hwctx;
378 	struct amdxdna_dev *xdna;
379 
380 	xdna = hwctx->client->xdna;
381 	trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
382 	job->job_timeout = true;
383 	mutex_lock(&xdna->dev_lock);
384 	aie2_hwctx_stop(xdna, hwctx, sched_job);
385 
386 	aie2_hwctx_restart(xdna, hwctx);
387 	mutex_unlock(&xdna->dev_lock);
388 
389 	return DRM_GPU_SCHED_STAT_RESET;
390 }
391 
392 static const struct drm_sched_backend_ops sched_ops = {
393 	.run_job = aie2_sched_job_run,
394 	.free_job = aie2_sched_job_free,
395 	.timedout_job = aie2_sched_job_timedout,
396 };
397 
398 static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
399 {
400 	struct amdxdna_dev *xdna = hwctx->client->xdna;
401 	struct amdxdna_dev_hdl *ndev;
402 	int start, end, first, last;
403 	u32 width = 1, entries = 0;
404 	int i;
405 
406 	if (!hwctx->num_tiles) {
407 		XDNA_ERR(xdna, "Number of tiles is zero");
408 		return -EINVAL;
409 	}
410 
411 	ndev = xdna->dev_handle;
412 	if (unlikely(!ndev->metadata.core.row_count)) {
413 		XDNA_WARN(xdna, "Core tile row count is zero");
414 		return -EINVAL;
415 	}
416 
417 	hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
418 	if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
419 		XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
420 		return -EINVAL;
421 	}
422 
423 	if (ndev->priv->col_align == COL_ALIGN_NATURE)
424 		width = hwctx->num_col;
425 
426 	/*
427 	 * In range [start, end], find out columns that is multiple of width.
428 	 *	'first' is the first column,
429 	 *	'last' is the last column,
430 	 *	'entries' is the total number of columns.
431 	 */
432 	start =  xdna->dev_info->first_col;
433 	end =  ndev->total_col - hwctx->num_col;
434 	if (start > 0 && end == 0) {
435 		XDNA_DBG(xdna, "Force start from col 0");
436 		start = 0;
437 	}
438 	first = start + (width - start % width) % width;
439 	last = end - end % width;
440 	if (last >= first)
441 		entries = (last - first) / width + 1;
442 	XDNA_DBG(xdna, "start %d end %d first %d last %d",
443 		 start, end, first, last);
444 
445 	if (unlikely(!entries)) {
446 		XDNA_ERR(xdna, "Start %d end %d width %d",
447 			 start, end, width);
448 		return -EINVAL;
449 	}
450 
451 	hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
452 	if (!hwctx->col_list)
453 		return -ENOMEM;
454 
455 	hwctx->col_list_len = entries;
456 	hwctx->col_list[0] = first;
457 	for (i = 1; i < entries; i++)
458 		hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
459 
460 	print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
461 			     entries * sizeof(*hwctx->col_list), false);
462 	return 0;
463 }
464 
465 static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
466 {
467 	struct amdxdna_dev *xdna = hwctx->client->xdna;
468 	struct alloc_requests *xrs_req;
469 	int ret;
470 
471 	if (AIE2_FEATURE_ON(xdna->dev_handle, AIE2_TEMPORAL_ONLY)) {
472 		hwctx->num_unused_col = xdna->dev_handle->total_col - hwctx->num_col;
473 		hwctx->num_col = xdna->dev_handle->total_col;
474 		return aie2_create_context(xdna->dev_handle, hwctx);
475 	}
476 
477 	xrs_req = kzalloc(sizeof(*xrs_req), GFP_KERNEL);
478 	if (!xrs_req)
479 		return -ENOMEM;
480 
481 	xrs_req->cdo.start_cols = hwctx->col_list;
482 	xrs_req->cdo.cols_len = hwctx->col_list_len;
483 	xrs_req->cdo.ncols = hwctx->num_col;
484 	xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
485 
486 	xrs_req->rqos.gops = hwctx->qos.gops;
487 	xrs_req->rqos.fps = hwctx->qos.fps;
488 	xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
489 	xrs_req->rqos.latency = hwctx->qos.latency;
490 	xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
491 	xrs_req->rqos.priority = hwctx->qos.priority;
492 
493 	xrs_req->rid = (uintptr_t)hwctx;
494 
495 	ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
496 	if (ret)
497 		XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
498 
499 	kfree(xrs_req);
500 	return ret;
501 }
502 
503 static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
504 {
505 	struct amdxdna_dev *xdna = hwctx->client->xdna;
506 	int ret;
507 
508 	if (AIE2_FEATURE_ON(xdna->dev_handle, AIE2_TEMPORAL_ONLY)) {
509 		ret = aie2_destroy_context(xdna->dev_handle, hwctx);
510 		if (ret)
511 			XDNA_ERR(xdna, "Destroy temporal only context failed, ret %d", ret);
512 	} else {
513 		ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
514 		if (ret)
515 			XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
516 	}
517 }
518 
519 static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
520 {
521 	struct amdxdna_dev *xdna = hwctx->client->xdna;
522 	struct drm_file *filp = hwctx->client->filp;
523 	struct drm_syncobj *syncobj;
524 	u32 hdl;
525 	int ret;
526 
527 	hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
528 
529 	ret = drm_syncobj_create(&syncobj, 0, NULL);
530 	if (ret) {
531 		XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
532 		return ret;
533 	}
534 	ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
535 	if (ret) {
536 		drm_syncobj_put(syncobj);
537 		XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
538 		return ret;
539 	}
540 	hwctx->priv->syncobj = syncobj;
541 	hwctx->syncobj_hdl = hdl;
542 
543 	return 0;
544 }
545 
546 static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
547 {
548 	/*
549 	 * The syncobj_hdl is owned by user space and will be cleaned up
550 	 * separately.
551 	 */
552 	drm_syncobj_put(hwctx->priv->syncobj);
553 }
554 
555 int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
556 {
557 	struct amdxdna_client *client = hwctx->client;
558 	struct amdxdna_dev *xdna = client->xdna;
559 	const struct drm_sched_init_args args = {
560 		.ops = &sched_ops,
561 		.num_rqs = DRM_SCHED_PRIORITY_COUNT,
562 		.credit_limit = HWCTX_MAX_CMDS,
563 		.timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
564 		.name = "amdxdna_js",
565 		.dev = xdna->ddev.dev,
566 	};
567 	struct drm_gpu_scheduler *sched;
568 	struct amdxdna_hwctx_priv *priv;
569 	struct amdxdna_gem_obj *heap;
570 	int i, ret;
571 
572 	priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
573 	if (!priv)
574 		return -ENOMEM;
575 	hwctx->priv = priv;
576 
577 	mutex_lock(&client->mm_lock);
578 	heap = client->dev_heap;
579 	if (!heap) {
580 		XDNA_ERR(xdna, "The client dev heap object not exist");
581 		mutex_unlock(&client->mm_lock);
582 		ret = -ENOENT;
583 		goto free_priv;
584 	}
585 	drm_gem_object_get(to_gobj(heap));
586 	mutex_unlock(&client->mm_lock);
587 	priv->heap = heap;
588 	sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
589 
590 	ret = amdxdna_gem_pin(heap);
591 	if (ret) {
592 		XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
593 		goto put_heap;
594 	}
595 
596 	for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
597 		struct amdxdna_gem_obj *abo;
598 		struct amdxdna_drm_create_bo args = {
599 			.flags = 0,
600 			.type = AMDXDNA_BO_DEV,
601 			.vaddr = 0,
602 			.size = MAX_CHAIN_CMDBUF_SIZE,
603 		};
604 
605 		abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
606 		if (IS_ERR(abo)) {
607 			ret = PTR_ERR(abo);
608 			goto free_cmd_bufs;
609 		}
610 
611 		XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
612 			 i, abo->mem.dev_addr, abo->mem.size);
613 		priv->cmd_buf[i] = abo;
614 	}
615 
616 	sched = &priv->sched;
617 	mutex_init(&priv->io_lock);
618 
619 	fs_reclaim_acquire(GFP_KERNEL);
620 	might_lock(&priv->io_lock);
621 	fs_reclaim_release(GFP_KERNEL);
622 
623 	ret = drm_sched_init(sched, &args);
624 	if (ret) {
625 		XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
626 		goto free_cmd_bufs;
627 	}
628 
629 	ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
630 				    &sched, 1, NULL);
631 	if (ret) {
632 		XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
633 		goto free_sched;
634 	}
635 
636 	ret = aie2_hwctx_col_list(hwctx);
637 	if (ret) {
638 		XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
639 		goto free_entity;
640 	}
641 
642 	ret = amdxdna_pm_resume_get(xdna);
643 	if (ret)
644 		goto free_col_list;
645 
646 	ret = aie2_alloc_resource(hwctx);
647 	if (ret) {
648 		XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
649 		goto suspend_put;
650 	}
651 
652 	ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
653 				heap->mem.userptr, heap->mem.size);
654 	if (ret) {
655 		XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
656 		goto release_resource;
657 	}
658 
659 	ret = aie2_ctx_syncobj_create(hwctx);
660 	if (ret) {
661 		XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
662 		goto release_resource;
663 	}
664 	amdxdna_pm_suspend_put(xdna);
665 
666 	hwctx->status = HWCTX_STAT_INIT;
667 	init_waitqueue_head(&priv->job_free_wq);
668 
669 	XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
670 
671 	return 0;
672 
673 release_resource:
674 	aie2_release_resource(hwctx);
675 suspend_put:
676 	amdxdna_pm_suspend_put(xdna);
677 free_col_list:
678 	kfree(hwctx->col_list);
679 free_entity:
680 	drm_sched_entity_destroy(&priv->entity);
681 free_sched:
682 	drm_sched_fini(&priv->sched);
683 free_cmd_bufs:
684 	for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
685 		if (!priv->cmd_buf[i])
686 			continue;
687 		drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
688 	}
689 	amdxdna_gem_unpin(heap);
690 put_heap:
691 	drm_gem_object_put(to_gobj(heap));
692 free_priv:
693 	kfree(priv);
694 	return ret;
695 }
696 
697 void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
698 {
699 	struct amdxdna_dev *xdna;
700 	int idx;
701 
702 	xdna = hwctx->client->xdna;
703 
704 	XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
705 	aie2_hwctx_wait_for_idle(hwctx);
706 
707 	/* Request fw to destroy hwctx and cancel the rest pending requests */
708 	aie2_release_resource(hwctx);
709 
710 	mutex_unlock(&xdna->dev_lock);
711 	drm_sched_entity_destroy(&hwctx->priv->entity);
712 
713 	/* Wait for all submitted jobs to be completed or canceled */
714 	wait_event(hwctx->priv->job_free_wq,
715 		   atomic64_read(&hwctx->job_submit_cnt) ==
716 		   atomic64_read(&hwctx->job_free_cnt));
717 	mutex_lock(&xdna->dev_lock);
718 
719 	drm_sched_fini(&hwctx->priv->sched);
720 	aie2_ctx_syncobj_destroy(hwctx);
721 
722 	for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
723 		drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
724 	amdxdna_gem_unpin(hwctx->priv->heap);
725 	drm_gem_object_put(to_gobj(hwctx->priv->heap));
726 
727 	mutex_destroy(&hwctx->priv->io_lock);
728 	kfree(hwctx->col_list);
729 	kfree(hwctx->priv);
730 	kfree(hwctx->cus);
731 }
732 
733 static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size)
734 {
735 	struct amdxdna_hwctx *hwctx = handle;
736 
737 	amdxdna_pm_suspend_put(hwctx->client->xdna);
738 	return 0;
739 }
740 
741 static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
742 {
743 	struct amdxdna_hwctx_param_config_cu *config = buf;
744 	struct amdxdna_dev *xdna = hwctx->client->xdna;
745 	u32 total_size;
746 	int ret;
747 
748 	XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
749 	if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
750 		return -EINVAL;
751 
752 	if (hwctx->status != HWCTX_STAT_INIT) {
753 		XDNA_ERR(xdna, "Not support re-config CU");
754 		return -EINVAL;
755 	}
756 
757 	if (!config->num_cus) {
758 		XDNA_ERR(xdna, "Number of CU is zero");
759 		return -EINVAL;
760 	}
761 
762 	total_size = struct_size(config, cu_configs, config->num_cus);
763 	if (total_size > size) {
764 		XDNA_ERR(xdna, "CU config larger than size");
765 		return -EINVAL;
766 	}
767 
768 	hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
769 	if (!hwctx->cus)
770 		return -ENOMEM;
771 
772 	ret = amdxdna_pm_resume_get(xdna);
773 	if (ret)
774 		goto free_cus;
775 
776 	ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler);
777 	if (ret) {
778 		XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
779 		goto pm_suspend_put;
780 	}
781 
782 	wmb(); /* To avoid locking in command submit when check status */
783 	hwctx->status = HWCTX_STAT_READY;
784 
785 	return 0;
786 
787 pm_suspend_put:
788 	amdxdna_pm_suspend_put(xdna);
789 free_cus:
790 	kfree(hwctx->cus);
791 	hwctx->cus = NULL;
792 	return ret;
793 }
794 
795 static void aie2_cmd_wait(struct amdxdna_hwctx *hwctx, u64 seq)
796 {
797 	struct dma_fence *out_fence = aie2_cmd_get_out_fence(hwctx, seq);
798 
799 	if (!out_fence) {
800 		XDNA_ERR(hwctx->client->xdna, "Failed to get fence");
801 		return;
802 	}
803 
804 	dma_fence_wait_timeout(out_fence, false, MAX_SCHEDULE_TIMEOUT);
805 	dma_fence_put(out_fence);
806 }
807 
808 static int aie2_hwctx_cfg_debug_bo(struct amdxdna_hwctx *hwctx, u32 bo_hdl,
809 				   bool attach)
810 {
811 	struct amdxdna_client *client = hwctx->client;
812 	struct amdxdna_dev *xdna = client->xdna;
813 	struct amdxdna_drv_cmd cmd = { 0 };
814 	struct amdxdna_gem_obj *abo;
815 	u64 seq;
816 	int ret;
817 
818 	abo = amdxdna_gem_get_obj(client, bo_hdl, AMDXDNA_BO_DEV);
819 	if (!abo) {
820 		XDNA_ERR(xdna, "Get bo %d failed", bo_hdl);
821 		return -EINVAL;
822 	}
823 
824 	if (attach) {
825 		if (abo->assigned_hwctx != AMDXDNA_INVALID_CTX_HANDLE) {
826 			ret = -EBUSY;
827 			goto put_obj;
828 		}
829 		cmd.opcode = ATTACH_DEBUG_BO;
830 	} else {
831 		if (abo->assigned_hwctx != hwctx->id) {
832 			ret = -EINVAL;
833 			goto put_obj;
834 		}
835 		cmd.opcode = DETACH_DEBUG_BO;
836 	}
837 
838 	ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
839 				 &bo_hdl, 1, hwctx->id, &seq);
840 	if (ret) {
841 		XDNA_ERR(xdna, "Submit command failed");
842 		goto put_obj;
843 	}
844 
845 	aie2_cmd_wait(hwctx, seq);
846 	if (cmd.result) {
847 		XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
848 		goto put_obj;
849 	}
850 
851 	if (attach)
852 		abo->assigned_hwctx = hwctx->id;
853 	else
854 		abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
855 
856 	XDNA_DBG(xdna, "Config debug BO %d to %s", bo_hdl, hwctx->name);
857 
858 put_obj:
859 	amdxdna_gem_put_obj(abo);
860 	return ret;
861 }
862 
863 int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
864 {
865 	struct amdxdna_dev *xdna = hwctx->client->xdna;
866 
867 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
868 	switch (type) {
869 	case DRM_AMDXDNA_HWCTX_CONFIG_CU:
870 		return aie2_hwctx_cu_config(hwctx, buf, size);
871 	case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
872 		return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, true);
873 	case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
874 		return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, false);
875 	default:
876 		XDNA_DBG(xdna, "Not supported type %d", type);
877 		return -EOPNOTSUPP;
878 	}
879 }
880 
881 int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl)
882 {
883 	struct amdxdna_client *client = hwctx->client;
884 	struct amdxdna_dev *xdna = client->xdna;
885 	struct amdxdna_drv_cmd cmd = { 0 };
886 	u64 seq;
887 	int ret;
888 
889 	cmd.opcode = SYNC_DEBUG_BO;
890 	ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
891 				 &debug_bo_hdl, 1, hwctx->id, &seq);
892 	if (ret) {
893 		XDNA_ERR(xdna, "Submit command failed");
894 		return ret;
895 	}
896 
897 	aie2_cmd_wait(hwctx, seq);
898 	if (cmd.result) {
899 		XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
900 		return -EINVAL;
901 	}
902 
903 	return 0;
904 }
905 
906 static int aie2_populate_range(struct amdxdna_gem_obj *abo)
907 {
908 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
909 	struct amdxdna_umap *mapp;
910 	unsigned long timeout;
911 	struct mm_struct *mm;
912 	bool found;
913 	int ret;
914 
915 	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
916 again:
917 	found = false;
918 	down_write(&xdna->notifier_lock);
919 	list_for_each_entry(mapp, &abo->mem.umap_list, node) {
920 		if (mapp->invalid) {
921 			found = true;
922 			break;
923 		}
924 	}
925 
926 	if (!found) {
927 		abo->mem.map_invalid = false;
928 		up_write(&xdna->notifier_lock);
929 		return 0;
930 	}
931 	kref_get(&mapp->refcnt);
932 	up_write(&xdna->notifier_lock);
933 
934 	XDNA_DBG(xdna, "populate memory range %lx %lx",
935 		 mapp->vma->vm_start, mapp->vma->vm_end);
936 	mm = mapp->notifier.mm;
937 	if (!mmget_not_zero(mm)) {
938 		amdxdna_umap_put(mapp);
939 		return -EFAULT;
940 	}
941 
942 	mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
943 	mmap_read_lock(mm);
944 	ret = hmm_range_fault(&mapp->range);
945 	mmap_read_unlock(mm);
946 	if (ret) {
947 		if (time_after(jiffies, timeout)) {
948 			ret = -ETIME;
949 			goto put_mm;
950 		}
951 
952 		if (ret == -EBUSY) {
953 			amdxdna_umap_put(mapp);
954 			goto again;
955 		}
956 
957 		goto put_mm;
958 	}
959 
960 	down_write(&xdna->notifier_lock);
961 	if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
962 		up_write(&xdna->notifier_lock);
963 		amdxdna_umap_put(mapp);
964 		goto again;
965 	}
966 	mapp->invalid = false;
967 	up_write(&xdna->notifier_lock);
968 	amdxdna_umap_put(mapp);
969 	goto again;
970 
971 put_mm:
972 	amdxdna_umap_put(mapp);
973 	mmput(mm);
974 	return ret;
975 }
976 
977 int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
978 {
979 	struct amdxdna_dev *xdna = hwctx->client->xdna;
980 	struct ww_acquire_ctx acquire_ctx;
981 	struct dma_fence_chain *chain;
982 	struct amdxdna_gem_obj *abo;
983 	unsigned long timeout = 0;
984 	int ret, i;
985 
986 	ret = down_interruptible(&hwctx->priv->job_sem);
987 	if (ret) {
988 		XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
989 		return ret;
990 	}
991 
992 	chain = dma_fence_chain_alloc();
993 	if (!chain) {
994 		XDNA_ERR(xdna, "Alloc fence chain failed");
995 		ret = -ENOMEM;
996 		goto up_sem;
997 	}
998 
999 	ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx,
1000 				 hwctx->client->filp->client_id);
1001 	if (ret) {
1002 		XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
1003 		goto free_chain;
1004 	}
1005 
1006 	ret = amdxdna_pm_resume_get(xdna);
1007 	if (ret)
1008 		goto cleanup_job;
1009 
1010 retry:
1011 	ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1012 	if (ret) {
1013 		XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
1014 		goto suspend_put;
1015 	}
1016 
1017 	for (i = 0; i < job->bo_cnt; i++) {
1018 		ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
1019 		if (ret) {
1020 			XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
1021 			drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1022 			goto suspend_put;
1023 		}
1024 	}
1025 
1026 	down_read(&xdna->notifier_lock);
1027 	for (i = 0; i < job->bo_cnt; i++) {
1028 		abo = to_xdna_obj(job->bos[i]);
1029 		if (abo->mem.map_invalid) {
1030 			up_read(&xdna->notifier_lock);
1031 			drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1032 			if (!timeout) {
1033 				timeout = jiffies +
1034 					msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
1035 			} else if (time_after(jiffies, timeout)) {
1036 				ret = -ETIME;
1037 				goto suspend_put;
1038 			}
1039 
1040 			ret = aie2_populate_range(abo);
1041 			if (ret)
1042 				goto suspend_put;
1043 			goto retry;
1044 		}
1045 	}
1046 
1047 	mutex_lock(&hwctx->priv->io_lock);
1048 	drm_sched_job_arm(&job->base);
1049 	job->out_fence = dma_fence_get(&job->base.s_fence->finished);
1050 	for (i = 0; i < job->bo_cnt; i++)
1051 		dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
1052 	job->seq = hwctx->priv->seq++;
1053 	kref_get(&job->refcnt);
1054 	drm_sched_entity_push_job(&job->base);
1055 
1056 	*seq = job->seq;
1057 	drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
1058 	mutex_unlock(&hwctx->priv->io_lock);
1059 
1060 	up_read(&xdna->notifier_lock);
1061 	drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1062 
1063 	aie2_job_put(job);
1064 	atomic64_inc(&hwctx->job_submit_cnt);
1065 
1066 	return 0;
1067 
1068 suspend_put:
1069 	amdxdna_pm_suspend_put(xdna);
1070 cleanup_job:
1071 	drm_sched_job_cleanup(&job->base);
1072 free_chain:
1073 	dma_fence_chain_free(chain);
1074 up_sem:
1075 	up(&hwctx->priv->job_sem);
1076 	job->job_done = true;
1077 	return ret;
1078 }
1079 
1080 void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
1081 			 unsigned long cur_seq)
1082 {
1083 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
1084 	struct drm_gem_object *gobj = to_gobj(abo);
1085 	long ret;
1086 
1087 	ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
1088 				    true, MAX_SCHEDULE_TIMEOUT);
1089 	if (!ret || ret == -ERESTARTSYS)
1090 		XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
1091 }
1092