xref: /linux/drivers/accel/amdxdna/aie2_ctx.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_gem.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_print.h>
11 #include <drm/drm_syncobj.h>
12 #include <linux/hmm.h>
13 #include <linux/types.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16 
17 #include "aie2_msg_priv.h"
18 #include "aie2_pci.h"
19 #include "aie2_solver.h"
20 #include "amdxdna_ctx.h"
21 #include "amdxdna_gem.h"
22 #include "amdxdna_mailbox.h"
23 #include "amdxdna_pci_drv.h"
24 #include "amdxdna_pm.h"
25 
26 static bool force_cmdlist = true;
27 module_param(force_cmdlist, bool, 0600);
28 MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default true)");
29 
30 #define HWCTX_MAX_TIMEOUT	60000 /* milliseconds */
31 
32 static void aie2_job_release(struct kref *ref)
33 {
34 	struct amdxdna_sched_job *job;
35 
36 	job = container_of(ref, struct amdxdna_sched_job, refcnt);
37 	amdxdna_sched_job_cleanup(job);
38 	atomic64_inc(&job->hwctx->job_free_cnt);
39 	wake_up(&job->hwctx->priv->job_free_wq);
40 	if (job->out_fence)
41 		dma_fence_put(job->out_fence);
42 	kfree(job);
43 }
44 
45 static void aie2_job_put(struct amdxdna_sched_job *job)
46 {
47 	kref_put(&job->refcnt, aie2_job_release);
48 }
49 
50 /* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
51 static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
52 			    struct drm_sched_job *bad_job)
53 {
54 	drm_sched_stop(&hwctx->priv->sched, bad_job);
55 	aie2_destroy_context(xdna->dev_handle, hwctx);
56 	drm_sched_start(&hwctx->priv->sched, 0);
57 }
58 
59 static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
60 {
61 	struct amdxdna_gem_obj *heap = hwctx->priv->heap;
62 	int ret;
63 
64 	ret = aie2_create_context(xdna->dev_handle, hwctx);
65 	if (ret) {
66 		XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
67 		goto out;
68 	}
69 
70 	ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
71 				heap->mem.userptr, heap->mem.size);
72 	if (ret) {
73 		XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
74 		goto out;
75 	}
76 
77 	ret = aie2_config_cu(hwctx, NULL);
78 	if (ret) {
79 		XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
80 		goto out;
81 	}
82 
83 out:
84 	XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
85 	return ret;
86 }
87 
88 static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
89 {
90 	struct dma_fence *fence, *out_fence = NULL;
91 	int ret;
92 
93 	fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
94 	if (!fence)
95 		return NULL;
96 
97 	ret = dma_fence_chain_find_seqno(&fence,  seq);
98 	if (ret)
99 		goto out;
100 
101 	out_fence = dma_fence_get(dma_fence_chain_contained(fence));
102 
103 out:
104 	dma_fence_put(fence);
105 	return out_fence;
106 }
107 
108 static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
109 {
110 	struct dma_fence *fence;
111 
112 	fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
113 	if (!fence)
114 		return;
115 
116 	/* Wait up to 2 seconds for fw to finish all pending requests */
117 	dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000));
118 	dma_fence_put(fence);
119 }
120 
121 static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
122 {
123 	struct amdxdna_dev *xdna = hwctx->client->xdna;
124 
125 	aie2_hwctx_wait_for_idle(hwctx);
126 	aie2_hwctx_stop(xdna, hwctx, NULL);
127 
128 	return 0;
129 }
130 
131 void aie2_hwctx_suspend(struct amdxdna_client *client)
132 {
133 	struct amdxdna_dev *xdna = client->xdna;
134 
135 	/*
136 	 * Command timeout is unlikely. But if it happens, it doesn't
137 	 * break the system. aie2_hwctx_stop() will destroy mailbox
138 	 * and abort all commands.
139 	 */
140 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
141 	amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
142 }
143 
144 static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
145 {
146 	struct amdxdna_dev *xdna = hwctx->client->xdna;
147 
148 	return aie2_hwctx_restart(xdna, hwctx);
149 }
150 
151 int aie2_hwctx_resume(struct amdxdna_client *client)
152 {
153 	/*
154 	 * The resume path cannot guarantee that mailbox channel can be
155 	 * regenerated. If this happen, when submit message to this
156 	 * mailbox channel, error will return.
157 	 */
158 	return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
159 }
160 
161 static void
162 aie2_sched_notify(struct amdxdna_sched_job *job)
163 {
164 	struct dma_fence *fence = job->fence;
165 
166 	trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
167 
168 	amdxdna_pm_suspend_put(job->hwctx->client->xdna);
169 	job->hwctx->priv->completed++;
170 	dma_fence_signal(fence);
171 
172 	up(&job->hwctx->priv->job_sem);
173 	job->job_done = true;
174 	mmput_async(job->mm);
175 	aie2_job_put(job);
176 }
177 
178 static int
179 aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
180 {
181 	struct amdxdna_sched_job *job = handle;
182 	struct amdxdna_gem_obj *cmd_abo;
183 	int ret = 0;
184 	u32 status;
185 
186 	cmd_abo = job->cmd_bo;
187 
188 	if (unlikely(job->job_timeout)) {
189 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
190 		ret = -EINVAL;
191 		goto out;
192 	}
193 
194 	if (unlikely(!data) || unlikely(size != sizeof(u32))) {
195 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
196 		ret = -EINVAL;
197 		goto out;
198 	}
199 
200 	status = readl(data);
201 	XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
202 	if (status == AIE2_STATUS_SUCCESS)
203 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
204 	else
205 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
206 
207 out:
208 	aie2_sched_notify(job);
209 	return ret;
210 }
211 
212 static int
213 aie2_sched_drvcmd_resp_handler(void *handle, void __iomem *data, size_t size)
214 {
215 	struct amdxdna_sched_job *job = handle;
216 	int ret = 0;
217 
218 	if (unlikely(!data))
219 		goto out;
220 
221 	if (unlikely(size != sizeof(u32))) {
222 		ret = -EINVAL;
223 		goto out;
224 	}
225 
226 	job->drv_cmd->result = readl(data);
227 
228 out:
229 	aie2_sched_notify(job);
230 	return ret;
231 }
232 
233 static int
234 aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
235 {
236 	struct amdxdna_sched_job *job = handle;
237 	struct amdxdna_gem_obj *cmd_abo;
238 	struct amdxdna_dev *xdna;
239 	u32 fail_cmd_status;
240 	u32 fail_cmd_idx;
241 	u32 cmd_status;
242 	int ret = 0;
243 
244 	cmd_abo = job->cmd_bo;
245 
246 	if (unlikely(job->job_timeout)) {
247 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
248 		ret = -EINVAL;
249 		goto out;
250 	}
251 
252 	if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
253 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
254 		ret = -EINVAL;
255 		goto out;
256 	}
257 
258 	cmd_status = readl(data + offsetof(struct cmd_chain_resp, status));
259 	xdna = job->hwctx->client->xdna;
260 	XDNA_DBG(xdna, "Status 0x%x", cmd_status);
261 	if (cmd_status == AIE2_STATUS_SUCCESS) {
262 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
263 		goto out;
264 	}
265 
266 	/* Slow path to handle error, read from ringbuf on BAR */
267 	fail_cmd_idx = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_idx));
268 	fail_cmd_status = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_status));
269 	XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
270 		 fail_cmd_idx, fail_cmd_status);
271 
272 	if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
273 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
274 		ret = -EINVAL;
275 		goto out;
276 	}
277 	amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
278 
279 	if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
280 		struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
281 
282 		cc->error_index = fail_cmd_idx;
283 		if (cc->error_index >= cc->command_count)
284 			cc->error_index = 0;
285 	}
286 out:
287 	aie2_sched_notify(job);
288 	return ret;
289 }
290 
291 static struct dma_fence *
292 aie2_sched_job_run(struct drm_sched_job *sched_job)
293 {
294 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
295 	struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
296 	struct amdxdna_hwctx *hwctx = job->hwctx;
297 	struct dma_fence *fence;
298 	int ret;
299 
300 	ret = amdxdna_pm_resume_get(hwctx->client->xdna);
301 	if (ret)
302 		return NULL;
303 
304 	if (!hwctx->priv->mbox_chann) {
305 		amdxdna_pm_suspend_put(hwctx->client->xdna);
306 		return NULL;
307 	}
308 
309 	if (!mmget_not_zero(job->mm)) {
310 		amdxdna_pm_suspend_put(hwctx->client->xdna);
311 		return ERR_PTR(-ESRCH);
312 	}
313 
314 	kref_get(&job->refcnt);
315 	fence = dma_fence_get(job->fence);
316 
317 	if (job->drv_cmd) {
318 		switch (job->drv_cmd->opcode) {
319 		case SYNC_DEBUG_BO:
320 			ret = aie2_sync_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
321 			break;
322 		case ATTACH_DEBUG_BO:
323 			ret = aie2_config_debug_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
324 			break;
325 		default:
326 			ret = -EINVAL;
327 			break;
328 		}
329 		goto out;
330 	}
331 
332 	amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
333 
334 	if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
335 		ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
336 	else if (force_cmdlist)
337 		ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
338 	else
339 		ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
340 
341 out:
342 	if (ret) {
343 		amdxdna_pm_suspend_put(hwctx->client->xdna);
344 		dma_fence_put(job->fence);
345 		aie2_job_put(job);
346 		mmput(job->mm);
347 		fence = ERR_PTR(ret);
348 	}
349 	trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
350 
351 	return fence;
352 }
353 
354 static void aie2_sched_job_free(struct drm_sched_job *sched_job)
355 {
356 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
357 	struct amdxdna_hwctx *hwctx = job->hwctx;
358 
359 	trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
360 	if (!job->job_done)
361 		up(&hwctx->priv->job_sem);
362 
363 	drm_sched_job_cleanup(sched_job);
364 	aie2_job_put(job);
365 }
366 
367 static enum drm_gpu_sched_stat
368 aie2_sched_job_timedout(struct drm_sched_job *sched_job)
369 {
370 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
371 	struct amdxdna_hwctx *hwctx = job->hwctx;
372 	struct amdxdna_dev *xdna;
373 
374 	xdna = hwctx->client->xdna;
375 	trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
376 	job->job_timeout = true;
377 	mutex_lock(&xdna->dev_lock);
378 	aie2_hwctx_stop(xdna, hwctx, sched_job);
379 
380 	aie2_hwctx_restart(xdna, hwctx);
381 	mutex_unlock(&xdna->dev_lock);
382 
383 	return DRM_GPU_SCHED_STAT_RESET;
384 }
385 
386 static const struct drm_sched_backend_ops sched_ops = {
387 	.run_job = aie2_sched_job_run,
388 	.free_job = aie2_sched_job_free,
389 	.timedout_job = aie2_sched_job_timedout,
390 };
391 
392 static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
393 {
394 	struct amdxdna_dev *xdna = hwctx->client->xdna;
395 	struct amdxdna_dev_hdl *ndev;
396 	int start, end, first, last;
397 	u32 width = 1, entries = 0;
398 	int i;
399 
400 	if (!hwctx->num_tiles) {
401 		XDNA_ERR(xdna, "Number of tiles is zero");
402 		return -EINVAL;
403 	}
404 
405 	ndev = xdna->dev_handle;
406 	if (unlikely(!ndev->metadata.core.row_count)) {
407 		XDNA_WARN(xdna, "Core tile row count is zero");
408 		return -EINVAL;
409 	}
410 
411 	hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
412 	if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
413 		XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
414 		return -EINVAL;
415 	}
416 
417 	if (ndev->priv->col_align == COL_ALIGN_NATURE)
418 		width = hwctx->num_col;
419 
420 	/*
421 	 * In range [start, end], find out columns that is multiple of width.
422 	 *	'first' is the first column,
423 	 *	'last' is the last column,
424 	 *	'entries' is the total number of columns.
425 	 */
426 	start =  xdna->dev_info->first_col;
427 	end =  ndev->total_col - hwctx->num_col;
428 	if (start > 0 && end == 0) {
429 		XDNA_DBG(xdna, "Force start from col 0");
430 		start = 0;
431 	}
432 	first = start + (width - start % width) % width;
433 	last = end - end % width;
434 	if (last >= first)
435 		entries = (last - first) / width + 1;
436 	XDNA_DBG(xdna, "start %d end %d first %d last %d",
437 		 start, end, first, last);
438 
439 	if (unlikely(!entries)) {
440 		XDNA_ERR(xdna, "Start %d end %d width %d",
441 			 start, end, width);
442 		return -EINVAL;
443 	}
444 
445 	hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
446 	if (!hwctx->col_list)
447 		return -ENOMEM;
448 
449 	hwctx->col_list_len = entries;
450 	hwctx->col_list[0] = first;
451 	for (i = 1; i < entries; i++)
452 		hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
453 
454 	print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
455 			     entries * sizeof(*hwctx->col_list), false);
456 	return 0;
457 }
458 
459 static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
460 {
461 	struct amdxdna_dev *xdna = hwctx->client->xdna;
462 	struct alloc_requests *xrs_req;
463 	int ret;
464 
465 	if (AIE2_FEATURE_ON(xdna->dev_handle, AIE2_TEMPORAL_ONLY)) {
466 		hwctx->num_unused_col = xdna->dev_handle->total_col - hwctx->num_col;
467 		hwctx->num_col = xdna->dev_handle->total_col;
468 		return aie2_create_context(xdna->dev_handle, hwctx);
469 	}
470 
471 	xrs_req = kzalloc_obj(*xrs_req);
472 	if (!xrs_req)
473 		return -ENOMEM;
474 
475 	xrs_req->cdo.start_cols = hwctx->col_list;
476 	xrs_req->cdo.cols_len = hwctx->col_list_len;
477 	xrs_req->cdo.ncols = hwctx->num_col;
478 	xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
479 
480 	xrs_req->rqos.gops = hwctx->qos.gops;
481 	xrs_req->rqos.fps = hwctx->qos.fps;
482 	xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
483 	xrs_req->rqos.latency = hwctx->qos.latency;
484 	xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
485 	xrs_req->rqos.priority = hwctx->qos.priority;
486 
487 	xrs_req->rid = (uintptr_t)hwctx;
488 
489 	ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
490 	if (ret)
491 		XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
492 
493 	kfree(xrs_req);
494 	return ret;
495 }
496 
497 static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
498 {
499 	struct amdxdna_dev *xdna = hwctx->client->xdna;
500 	int ret;
501 
502 	if (AIE2_FEATURE_ON(xdna->dev_handle, AIE2_TEMPORAL_ONLY)) {
503 		ret = aie2_destroy_context(xdna->dev_handle, hwctx);
504 		if (ret && ret != -ENODEV)
505 			XDNA_ERR(xdna, "Destroy temporal only context failed, ret %d", ret);
506 	} else {
507 		ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
508 		if (ret)
509 			XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
510 	}
511 }
512 
513 static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
514 {
515 	struct amdxdna_dev *xdna = hwctx->client->xdna;
516 	struct drm_file *filp = hwctx->client->filp;
517 	struct drm_syncobj *syncobj;
518 	u32 hdl;
519 	int ret;
520 
521 	hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
522 
523 	ret = drm_syncobj_create(&syncobj, 0, NULL);
524 	if (ret) {
525 		XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
526 		return ret;
527 	}
528 	ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
529 	if (ret) {
530 		drm_syncobj_put(syncobj);
531 		XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
532 		return ret;
533 	}
534 	hwctx->priv->syncobj = syncobj;
535 	hwctx->syncobj_hdl = hdl;
536 
537 	return 0;
538 }
539 
540 static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
541 {
542 	/*
543 	 * The syncobj_hdl is owned by user space and will be cleaned up
544 	 * separately.
545 	 */
546 	drm_syncobj_put(hwctx->priv->syncobj);
547 }
548 
549 int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
550 {
551 	struct amdxdna_client *client = hwctx->client;
552 	struct amdxdna_dev *xdna = client->xdna;
553 	const struct drm_sched_init_args args = {
554 		.ops = &sched_ops,
555 		.num_rqs = DRM_SCHED_PRIORITY_COUNT,
556 		.credit_limit = HWCTX_MAX_CMDS,
557 		.timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
558 		.name = "amdxdna_js",
559 		.dev = xdna->ddev.dev,
560 	};
561 	struct drm_gpu_scheduler *sched;
562 	struct amdxdna_hwctx_priv *priv;
563 	struct amdxdna_gem_obj *heap;
564 	int i, ret;
565 
566 	priv = kzalloc_obj(*hwctx->priv);
567 	if (!priv)
568 		return -ENOMEM;
569 	hwctx->priv = priv;
570 
571 	mutex_lock(&client->mm_lock);
572 	heap = client->dev_heap;
573 	if (!heap) {
574 		XDNA_ERR(xdna, "The client dev heap object not exist");
575 		mutex_unlock(&client->mm_lock);
576 		ret = -ENOENT;
577 		goto free_priv;
578 	}
579 	drm_gem_object_get(to_gobj(heap));
580 	mutex_unlock(&client->mm_lock);
581 	priv->heap = heap;
582 	sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
583 
584 	ret = amdxdna_gem_pin(heap);
585 	if (ret) {
586 		XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
587 		goto put_heap;
588 	}
589 
590 	for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
591 		struct amdxdna_gem_obj *abo;
592 		struct amdxdna_drm_create_bo args = {
593 			.flags = 0,
594 			.type = AMDXDNA_BO_DEV,
595 			.vaddr = 0,
596 			.size = MAX_CHAIN_CMDBUF_SIZE,
597 		};
598 
599 		abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
600 		if (IS_ERR(abo)) {
601 			ret = PTR_ERR(abo);
602 			goto free_cmd_bufs;
603 		}
604 
605 		XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
606 			 i, abo->mem.dev_addr, abo->mem.size);
607 		priv->cmd_buf[i] = abo;
608 	}
609 
610 	sched = &priv->sched;
611 	mutex_init(&priv->io_lock);
612 
613 	fs_reclaim_acquire(GFP_KERNEL);
614 	might_lock(&priv->io_lock);
615 	fs_reclaim_release(GFP_KERNEL);
616 
617 	ret = drm_sched_init(sched, &args);
618 	if (ret) {
619 		XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
620 		goto free_cmd_bufs;
621 	}
622 
623 	ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
624 				    &sched, 1, NULL);
625 	if (ret) {
626 		XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
627 		goto free_sched;
628 	}
629 
630 	ret = aie2_hwctx_col_list(hwctx);
631 	if (ret) {
632 		XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
633 		goto free_entity;
634 	}
635 
636 	ret = amdxdna_pm_resume_get_locked(xdna);
637 	if (ret)
638 		goto free_col_list;
639 
640 	ret = aie2_alloc_resource(hwctx);
641 	if (ret) {
642 		XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
643 		goto suspend_put;
644 	}
645 
646 	ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
647 				heap->mem.userptr, heap->mem.size);
648 	if (ret) {
649 		XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
650 		goto release_resource;
651 	}
652 
653 	ret = aie2_ctx_syncobj_create(hwctx);
654 	if (ret) {
655 		XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
656 		goto release_resource;
657 	}
658 	amdxdna_pm_suspend_put(xdna);
659 
660 	init_waitqueue_head(&priv->job_free_wq);
661 
662 	XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
663 
664 	return 0;
665 
666 release_resource:
667 	aie2_release_resource(hwctx);
668 suspend_put:
669 	amdxdna_pm_suspend_put(xdna);
670 free_col_list:
671 	kfree(hwctx->col_list);
672 free_entity:
673 	drm_sched_entity_destroy(&priv->entity);
674 free_sched:
675 	drm_sched_fini(&priv->sched);
676 free_cmd_bufs:
677 	for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
678 		if (!priv->cmd_buf[i])
679 			continue;
680 		drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
681 	}
682 	amdxdna_gem_unpin(heap);
683 put_heap:
684 	drm_gem_object_put(to_gobj(heap));
685 free_priv:
686 	kfree(priv);
687 	return ret;
688 }
689 
690 void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
691 {
692 	struct amdxdna_dev *xdna;
693 	int idx;
694 
695 	xdna = hwctx->client->xdna;
696 
697 	XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
698 	aie2_hwctx_wait_for_idle(hwctx);
699 
700 	/* Request fw to destroy hwctx and cancel the rest pending requests */
701 	drm_sched_stop(&hwctx->priv->sched, NULL);
702 	aie2_release_resource(hwctx);
703 	drm_sched_start(&hwctx->priv->sched, 0);
704 
705 	mutex_unlock(&xdna->dev_lock);
706 	drm_sched_entity_destroy(&hwctx->priv->entity);
707 
708 	/* Wait for all submitted jobs to be completed or canceled */
709 	wait_event(hwctx->priv->job_free_wq,
710 		   atomic64_read(&hwctx->job_submit_cnt) ==
711 		   atomic64_read(&hwctx->job_free_cnt));
712 	mutex_lock(&xdna->dev_lock);
713 
714 	drm_sched_fini(&hwctx->priv->sched);
715 	aie2_ctx_syncobj_destroy(hwctx);
716 
717 	for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
718 		drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
719 	amdxdna_gem_unpin(hwctx->priv->heap);
720 	drm_gem_object_put(to_gobj(hwctx->priv->heap));
721 
722 	mutex_destroy(&hwctx->priv->io_lock);
723 	kfree(hwctx->col_list);
724 	kfree(hwctx->priv);
725 	kfree(hwctx->cus);
726 }
727 
728 static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size)
729 {
730 	struct amdxdna_hwctx *hwctx = handle;
731 
732 	amdxdna_pm_suspend_put(hwctx->client->xdna);
733 	return 0;
734 }
735 
736 static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
737 {
738 	struct amdxdna_hwctx_param_config_cu *config = buf;
739 	struct amdxdna_dev *xdna = hwctx->client->xdna;
740 	u32 total_size;
741 	int ret;
742 
743 	XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
744 	if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
745 		return -EINVAL;
746 
747 	if (hwctx->cus) {
748 		XDNA_ERR(xdna, "Not support re-config CU");
749 		return -EINVAL;
750 	}
751 
752 	if (!config->num_cus) {
753 		XDNA_ERR(xdna, "Number of CU is zero");
754 		return -EINVAL;
755 	}
756 
757 	total_size = struct_size(config, cu_configs, config->num_cus);
758 	if (total_size > size) {
759 		XDNA_ERR(xdna, "CU config larger than size");
760 		return -EINVAL;
761 	}
762 
763 	hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
764 	if (!hwctx->cus)
765 		return -ENOMEM;
766 
767 	ret = amdxdna_pm_resume_get_locked(xdna);
768 	if (ret)
769 		goto free_cus;
770 
771 	ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler);
772 	if (ret) {
773 		XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
774 		goto pm_suspend_put;
775 	}
776 
777 	wmb(); /* To avoid locking in command submit when check status */
778 
779 	return 0;
780 
781 pm_suspend_put:
782 	amdxdna_pm_suspend_put(xdna);
783 free_cus:
784 	kfree(hwctx->cus);
785 	hwctx->cus = NULL;
786 	return ret;
787 }
788 
789 static void aie2_cmd_wait(struct amdxdna_hwctx *hwctx, u64 seq)
790 {
791 	struct dma_fence *out_fence = aie2_cmd_get_out_fence(hwctx, seq);
792 
793 	if (!out_fence) {
794 		XDNA_ERR(hwctx->client->xdna, "Failed to get fence");
795 		return;
796 	}
797 
798 	dma_fence_wait_timeout(out_fence, false, MAX_SCHEDULE_TIMEOUT);
799 	dma_fence_put(out_fence);
800 }
801 
802 static int aie2_hwctx_cfg_debug_bo(struct amdxdna_hwctx *hwctx, u32 bo_hdl,
803 				   bool attach)
804 {
805 	struct amdxdna_client *client = hwctx->client;
806 	struct amdxdna_dev *xdna = client->xdna;
807 	struct amdxdna_drv_cmd cmd = { 0 };
808 	struct amdxdna_gem_obj *abo;
809 	u64 seq;
810 	int ret;
811 
812 	abo = amdxdna_gem_get_obj(client, bo_hdl, AMDXDNA_BO_DEV);
813 	if (!abo) {
814 		XDNA_ERR(xdna, "Get bo %d failed", bo_hdl);
815 		return -EINVAL;
816 	}
817 
818 	if (attach) {
819 		if (abo->assigned_hwctx != AMDXDNA_INVALID_CTX_HANDLE) {
820 			ret = -EBUSY;
821 			goto put_obj;
822 		}
823 		cmd.opcode = ATTACH_DEBUG_BO;
824 	} else {
825 		if (abo->assigned_hwctx != hwctx->id) {
826 			ret = -EINVAL;
827 			goto put_obj;
828 		}
829 		cmd.opcode = DETACH_DEBUG_BO;
830 	}
831 
832 	ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
833 				 &bo_hdl, 1, hwctx->id, &seq);
834 	if (ret) {
835 		XDNA_ERR(xdna, "Submit command failed");
836 		goto put_obj;
837 	}
838 
839 	aie2_cmd_wait(hwctx, seq);
840 	if (cmd.result) {
841 		XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
842 		goto put_obj;
843 	}
844 
845 	if (attach)
846 		abo->assigned_hwctx = hwctx->id;
847 	else
848 		abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
849 
850 	XDNA_DBG(xdna, "Config debug BO %d to %s", bo_hdl, hwctx->name);
851 
852 put_obj:
853 	amdxdna_gem_put_obj(abo);
854 	return ret;
855 }
856 
857 int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
858 {
859 	struct amdxdna_dev *xdna = hwctx->client->xdna;
860 
861 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
862 	switch (type) {
863 	case DRM_AMDXDNA_HWCTX_CONFIG_CU:
864 		return aie2_hwctx_cu_config(hwctx, buf, size);
865 	case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
866 		return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, true);
867 	case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
868 		return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, false);
869 	default:
870 		XDNA_DBG(xdna, "Not supported type %d", type);
871 		return -EOPNOTSUPP;
872 	}
873 }
874 
875 int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl)
876 {
877 	struct amdxdna_client *client = hwctx->client;
878 	struct amdxdna_dev *xdna = client->xdna;
879 	struct amdxdna_drv_cmd cmd = { 0 };
880 	u64 seq;
881 	int ret;
882 
883 	cmd.opcode = SYNC_DEBUG_BO;
884 	ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
885 				 &debug_bo_hdl, 1, hwctx->id, &seq);
886 	if (ret) {
887 		XDNA_ERR(xdna, "Submit command failed");
888 		return ret;
889 	}
890 
891 	aie2_cmd_wait(hwctx, seq);
892 	if (cmd.result) {
893 		XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
894 		return -EINVAL;
895 	}
896 
897 	return 0;
898 }
899 
900 static int aie2_populate_range(struct amdxdna_gem_obj *abo)
901 {
902 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
903 	struct amdxdna_umap *mapp;
904 	unsigned long timeout;
905 	struct mm_struct *mm;
906 	bool found;
907 	int ret;
908 
909 	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
910 again:
911 	found = false;
912 	down_write(&xdna->notifier_lock);
913 	list_for_each_entry(mapp, &abo->mem.umap_list, node) {
914 		if (mapp->invalid) {
915 			found = true;
916 			break;
917 		}
918 	}
919 
920 	if (!found) {
921 		abo->mem.map_invalid = false;
922 		up_write(&xdna->notifier_lock);
923 		return 0;
924 	}
925 	kref_get(&mapp->refcnt);
926 	up_write(&xdna->notifier_lock);
927 
928 	XDNA_DBG(xdna, "populate memory range %lx %lx",
929 		 mapp->vma->vm_start, mapp->vma->vm_end);
930 	mm = mapp->notifier.mm;
931 	if (!mmget_not_zero(mm)) {
932 		amdxdna_umap_put(mapp);
933 		return -EFAULT;
934 	}
935 
936 	mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
937 	mmap_read_lock(mm);
938 	ret = hmm_range_fault(&mapp->range);
939 	mmap_read_unlock(mm);
940 	if (ret) {
941 		if (time_after(jiffies, timeout)) {
942 			ret = -ETIME;
943 			goto put_mm;
944 		}
945 
946 		if (ret == -EBUSY) {
947 			amdxdna_umap_put(mapp);
948 			goto again;
949 		}
950 
951 		goto put_mm;
952 	}
953 
954 	down_write(&xdna->notifier_lock);
955 	if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
956 		up_write(&xdna->notifier_lock);
957 		amdxdna_umap_put(mapp);
958 		goto again;
959 	}
960 	mapp->invalid = false;
961 	up_write(&xdna->notifier_lock);
962 	amdxdna_umap_put(mapp);
963 	goto again;
964 
965 put_mm:
966 	amdxdna_umap_put(mapp);
967 	mmput(mm);
968 	return ret;
969 }
970 
971 int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
972 {
973 	struct amdxdna_dev *xdna = hwctx->client->xdna;
974 	struct ww_acquire_ctx acquire_ctx;
975 	struct dma_fence_chain *chain;
976 	struct amdxdna_gem_obj *abo;
977 	unsigned long timeout = 0;
978 	int ret, i;
979 
980 	ret = down_interruptible(&hwctx->priv->job_sem);
981 	if (ret) {
982 		XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
983 		return ret;
984 	}
985 
986 	chain = dma_fence_chain_alloc();
987 	if (!chain) {
988 		XDNA_ERR(xdna, "Alloc fence chain failed");
989 		ret = -ENOMEM;
990 		goto up_sem;
991 	}
992 
993 	ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx,
994 				 hwctx->client->filp->client_id);
995 	if (ret) {
996 		XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
997 		goto free_chain;
998 	}
999 
1000 retry:
1001 	ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1002 	if (ret) {
1003 		XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
1004 		goto cleanup_job;
1005 	}
1006 
1007 	for (i = 0; i < job->bo_cnt; i++) {
1008 		ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
1009 		if (ret) {
1010 			XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
1011 			drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1012 			goto cleanup_job;
1013 		}
1014 	}
1015 
1016 	down_read(&xdna->notifier_lock);
1017 	for (i = 0; i < job->bo_cnt; i++) {
1018 		abo = to_xdna_obj(job->bos[i]);
1019 		if (abo->mem.map_invalid) {
1020 			up_read(&xdna->notifier_lock);
1021 			drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1022 			if (!timeout) {
1023 				timeout = jiffies +
1024 					msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
1025 			} else if (time_after(jiffies, timeout)) {
1026 				ret = -ETIME;
1027 				goto cleanup_job;
1028 			}
1029 
1030 			ret = aie2_populate_range(abo);
1031 			if (ret)
1032 				goto cleanup_job;
1033 			goto retry;
1034 		}
1035 	}
1036 
1037 	mutex_lock(&hwctx->priv->io_lock);
1038 	drm_sched_job_arm(&job->base);
1039 	job->out_fence = dma_fence_get(&job->base.s_fence->finished);
1040 	for (i = 0; i < job->bo_cnt; i++)
1041 		dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
1042 	job->seq = hwctx->priv->seq++;
1043 	kref_get(&job->refcnt);
1044 	drm_sched_entity_push_job(&job->base);
1045 
1046 	*seq = job->seq;
1047 	drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
1048 	mutex_unlock(&hwctx->priv->io_lock);
1049 
1050 	up_read(&xdna->notifier_lock);
1051 	drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
1052 
1053 	aie2_job_put(job);
1054 	atomic64_inc(&hwctx->job_submit_cnt);
1055 
1056 	return 0;
1057 
1058 cleanup_job:
1059 	drm_sched_job_cleanup(&job->base);
1060 free_chain:
1061 	dma_fence_chain_free(chain);
1062 up_sem:
1063 	up(&hwctx->priv->job_sem);
1064 	job->job_done = true;
1065 	return ret;
1066 }
1067 
1068 void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
1069 			 unsigned long cur_seq)
1070 {
1071 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
1072 	struct drm_gem_object *gobj = to_gobj(abo);
1073 	long ret;
1074 
1075 	ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
1076 				    true, MAX_SCHEDULE_TIMEOUT);
1077 	if (!ret)
1078 		XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
1079 	else if (ret == -ERESTARTSYS)
1080 		XDNA_DBG(xdna, "Wait for bo interrupted by signal");
1081 }
1082