xref: /linux/drivers/accel/amdxdna/aie2_ctx.c (revision ed07a76be7baa0bb164b152116486e4d9fed50dc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_gem.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_print.h>
11 #include <drm/drm_syncobj.h>
12 #include <linux/hmm.h>
13 #include <linux/types.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16 
17 #include "aie2_msg_priv.h"
18 #include "aie2_pci.h"
19 #include "aie2_solver.h"
20 #include "amdxdna_ctx.h"
21 #include "amdxdna_gem.h"
22 #include "amdxdna_mailbox.h"
23 #include "amdxdna_pci_drv.h"
24 
25 static bool force_cmdlist;
26 module_param(force_cmdlist, bool, 0600);
27 MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
28 
29 #define HWCTX_MAX_TIMEOUT	60000 /* milliseconds */
30 
31 static void aie2_job_release(struct kref *ref)
32 {
33 	struct amdxdna_sched_job *job;
34 
35 	job = container_of(ref, struct amdxdna_sched_job, refcnt);
36 	amdxdna_sched_job_cleanup(job);
37 	atomic64_inc(&job->hwctx->job_free_cnt);
38 	wake_up(&job->hwctx->priv->job_free_wq);
39 	if (job->out_fence)
40 		dma_fence_put(job->out_fence);
41 	kfree(job);
42 }
43 
44 static void aie2_job_put(struct amdxdna_sched_job *job)
45 {
46 	kref_put(&job->refcnt, aie2_job_release);
47 }
48 
49 static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx)
50 {
51 	 hwctx->old_status = hwctx->status;
52 	 hwctx->status = HWCTX_STAT_STOP;
53 }
54 
55 static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx)
56 {
57 	hwctx->status = hwctx->old_status;
58 }
59 
60 /* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
61 static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
62 			    struct drm_sched_job *bad_job)
63 {
64 	drm_sched_stop(&hwctx->priv->sched, bad_job);
65 	aie2_destroy_context(xdna->dev_handle, hwctx);
66 }
67 
68 static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
69 {
70 	struct amdxdna_gem_obj *heap = hwctx->priv->heap;
71 	int ret;
72 
73 	ret = aie2_create_context(xdna->dev_handle, hwctx);
74 	if (ret) {
75 		XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
76 		goto out;
77 	}
78 
79 	ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
80 				heap->mem.userptr, heap->mem.size);
81 	if (ret) {
82 		XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
83 		goto out;
84 	}
85 
86 	if (hwctx->status != HWCTX_STAT_READY) {
87 		XDNA_DBG(xdna, "hwctx is not ready, status %d", hwctx->status);
88 		goto out;
89 	}
90 
91 	ret = aie2_config_cu(hwctx);
92 	if (ret) {
93 		XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
94 		goto out;
95 	}
96 
97 out:
98 	drm_sched_start(&hwctx->priv->sched, 0);
99 	XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
100 	return ret;
101 }
102 
103 static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
104 {
105 	struct dma_fence *fence, *out_fence = NULL;
106 	int ret;
107 
108 	fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
109 	if (!fence)
110 		return NULL;
111 
112 	ret = dma_fence_chain_find_seqno(&fence,  seq);
113 	if (ret)
114 		goto out;
115 
116 	out_fence = dma_fence_get(dma_fence_chain_contained(fence));
117 
118 out:
119 	dma_fence_put(fence);
120 	return out_fence;
121 }
122 
123 static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
124 {
125 	struct dma_fence *fence;
126 
127 	fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
128 	if (!fence)
129 		return;
130 
131 	/* Wait up to 2 seconds for fw to finish all pending requests */
132 	dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000));
133 	dma_fence_put(fence);
134 }
135 
136 void aie2_hwctx_suspend(struct amdxdna_client *client)
137 {
138 	struct amdxdna_dev *xdna = client->xdna;
139 	struct amdxdna_hwctx *hwctx;
140 	unsigned long hwctx_id;
141 
142 	/*
143 	 * Command timeout is unlikely. But if it happens, it doesn't
144 	 * break the system. aie2_hwctx_stop() will destroy mailbox
145 	 * and abort all commands.
146 	 */
147 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
148 	guard(mutex)(&client->hwctx_lock);
149 	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
150 		aie2_hwctx_wait_for_idle(hwctx);
151 		aie2_hwctx_stop(xdna, hwctx, NULL);
152 		aie2_hwctx_status_shift_stop(hwctx);
153 	}
154 }
155 
156 void aie2_hwctx_resume(struct amdxdna_client *client)
157 {
158 	struct amdxdna_dev *xdna = client->xdna;
159 	struct amdxdna_hwctx *hwctx;
160 	unsigned long hwctx_id;
161 
162 	/*
163 	 * The resume path cannot guarantee that mailbox channel can be
164 	 * regenerated. If this happen, when submit message to this
165 	 * mailbox channel, error will return.
166 	 */
167 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
168 	guard(mutex)(&client->hwctx_lock);
169 	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
170 		aie2_hwctx_status_restore(hwctx);
171 		aie2_hwctx_restart(xdna, hwctx);
172 	}
173 }
174 
175 static void
176 aie2_sched_notify(struct amdxdna_sched_job *job)
177 {
178 	struct dma_fence *fence = job->fence;
179 
180 	trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
181 	job->hwctx->priv->completed++;
182 	dma_fence_signal(fence);
183 
184 	up(&job->hwctx->priv->job_sem);
185 	job->job_done = true;
186 	dma_fence_put(fence);
187 	mmput_async(job->mm);
188 	aie2_job_put(job);
189 }
190 
191 static int
192 aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
193 {
194 	struct amdxdna_sched_job *job = handle;
195 	struct amdxdna_gem_obj *cmd_abo;
196 	u32 ret = 0;
197 	u32 status;
198 
199 	cmd_abo = job->cmd_bo;
200 
201 	if (unlikely(!data))
202 		goto out;
203 
204 	if (unlikely(size != sizeof(u32))) {
205 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
206 		ret = -EINVAL;
207 		goto out;
208 	}
209 
210 	status = readl(data);
211 	XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
212 	if (status == AIE2_STATUS_SUCCESS)
213 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
214 	else
215 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
216 
217 out:
218 	aie2_sched_notify(job);
219 	return ret;
220 }
221 
222 static int
223 aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
224 {
225 	struct amdxdna_sched_job *job = handle;
226 	u32 ret = 0;
227 	u32 status;
228 
229 	if (unlikely(!data))
230 		goto out;
231 
232 	if (unlikely(size != sizeof(u32))) {
233 		ret = -EINVAL;
234 		goto out;
235 	}
236 
237 	status = readl(data);
238 	XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
239 
240 out:
241 	aie2_sched_notify(job);
242 	return ret;
243 }
244 
245 static int
246 aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
247 {
248 	struct amdxdna_sched_job *job = handle;
249 	struct amdxdna_gem_obj *cmd_abo;
250 	struct amdxdna_dev *xdna;
251 	u32 fail_cmd_status;
252 	u32 fail_cmd_idx;
253 	u32 cmd_status;
254 	u32 ret = 0;
255 
256 	cmd_abo = job->cmd_bo;
257 	if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
258 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
259 		ret = -EINVAL;
260 		goto out;
261 	}
262 
263 	cmd_status = readl(data + offsetof(struct cmd_chain_resp, status));
264 	xdna = job->hwctx->client->xdna;
265 	XDNA_DBG(xdna, "Status 0x%x", cmd_status);
266 	if (cmd_status == AIE2_STATUS_SUCCESS) {
267 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
268 		goto out;
269 	}
270 
271 	/* Slow path to handle error, read from ringbuf on BAR */
272 	fail_cmd_idx = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_idx));
273 	fail_cmd_status = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_status));
274 	XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
275 		 fail_cmd_idx, fail_cmd_status);
276 
277 	if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
278 		amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
279 		ret = -EINVAL;
280 		goto out;
281 	}
282 	amdxdna_cmd_set_state(cmd_abo, fail_cmd_status);
283 
284 	if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
285 		struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
286 
287 		cc->error_index = fail_cmd_idx;
288 		if (cc->error_index >= cc->command_count)
289 			cc->error_index = 0;
290 	}
291 out:
292 	aie2_sched_notify(job);
293 	return ret;
294 }
295 
296 static struct dma_fence *
297 aie2_sched_job_run(struct drm_sched_job *sched_job)
298 {
299 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
300 	struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
301 	struct amdxdna_hwctx *hwctx = job->hwctx;
302 	struct dma_fence *fence;
303 	int ret;
304 
305 	if (!mmget_not_zero(job->mm))
306 		return ERR_PTR(-ESRCH);
307 
308 	kref_get(&job->refcnt);
309 	fence = dma_fence_get(job->fence);
310 
311 	if (unlikely(!cmd_abo)) {
312 		ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler);
313 		goto out;
314 	}
315 
316 	amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
317 
318 	if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
319 		ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
320 	else if (force_cmdlist)
321 		ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
322 	else
323 		ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
324 
325 out:
326 	if (ret) {
327 		dma_fence_put(job->fence);
328 		aie2_job_put(job);
329 		mmput(job->mm);
330 		fence = ERR_PTR(ret);
331 	}
332 	trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
333 
334 	return fence;
335 }
336 
337 static void aie2_sched_job_free(struct drm_sched_job *sched_job)
338 {
339 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
340 	struct amdxdna_hwctx *hwctx = job->hwctx;
341 
342 	trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
343 	if (!job->job_done)
344 		up(&hwctx->priv->job_sem);
345 
346 	drm_sched_job_cleanup(sched_job);
347 	aie2_job_put(job);
348 }
349 
350 static enum drm_gpu_sched_stat
351 aie2_sched_job_timedout(struct drm_sched_job *sched_job)
352 {
353 	struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
354 	struct amdxdna_hwctx *hwctx = job->hwctx;
355 	struct amdxdna_dev *xdna;
356 
357 	xdna = hwctx->client->xdna;
358 	trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
359 	mutex_lock(&xdna->dev_lock);
360 	aie2_hwctx_stop(xdna, hwctx, sched_job);
361 
362 	aie2_hwctx_restart(xdna, hwctx);
363 	mutex_unlock(&xdna->dev_lock);
364 
365 	return DRM_GPU_SCHED_STAT_RESET;
366 }
367 
368 static const struct drm_sched_backend_ops sched_ops = {
369 	.run_job = aie2_sched_job_run,
370 	.free_job = aie2_sched_job_free,
371 	.timedout_job = aie2_sched_job_timedout,
372 };
373 
374 static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
375 {
376 	struct amdxdna_dev *xdna = hwctx->client->xdna;
377 	struct amdxdna_dev_hdl *ndev;
378 	int start, end, first, last;
379 	u32 width = 1, entries = 0;
380 	int i;
381 
382 	if (!hwctx->num_tiles) {
383 		XDNA_ERR(xdna, "Number of tiles is zero");
384 		return -EINVAL;
385 	}
386 
387 	ndev = xdna->dev_handle;
388 	if (unlikely(!ndev->metadata.core.row_count)) {
389 		XDNA_WARN(xdna, "Core tile row count is zero");
390 		return -EINVAL;
391 	}
392 
393 	hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
394 	if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
395 		XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
396 		return -EINVAL;
397 	}
398 
399 	if (ndev->priv->col_align == COL_ALIGN_NATURE)
400 		width = hwctx->num_col;
401 
402 	/*
403 	 * In range [start, end], find out columns that is multiple of width.
404 	 *	'first' is the first column,
405 	 *	'last' is the last column,
406 	 *	'entries' is the total number of columns.
407 	 */
408 	start =  xdna->dev_info->first_col;
409 	end =  ndev->total_col - hwctx->num_col;
410 	if (start > 0 && end == 0) {
411 		XDNA_DBG(xdna, "Force start from col 0");
412 		start = 0;
413 	}
414 	first = start + (width - start % width) % width;
415 	last = end - end % width;
416 	if (last >= first)
417 		entries = (last - first) / width + 1;
418 	XDNA_DBG(xdna, "start %d end %d first %d last %d",
419 		 start, end, first, last);
420 
421 	if (unlikely(!entries)) {
422 		XDNA_ERR(xdna, "Start %d end %d width %d",
423 			 start, end, width);
424 		return -EINVAL;
425 	}
426 
427 	hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
428 	if (!hwctx->col_list)
429 		return -ENOMEM;
430 
431 	hwctx->col_list_len = entries;
432 	hwctx->col_list[0] = first;
433 	for (i = 1; i < entries; i++)
434 		hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
435 
436 	print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
437 			     entries * sizeof(*hwctx->col_list), false);
438 	return 0;
439 }
440 
441 static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
442 {
443 	struct amdxdna_dev *xdna = hwctx->client->xdna;
444 	struct alloc_requests *xrs_req;
445 	int ret;
446 
447 	xrs_req = kzalloc(sizeof(*xrs_req), GFP_KERNEL);
448 	if (!xrs_req)
449 		return -ENOMEM;
450 
451 	xrs_req->cdo.start_cols = hwctx->col_list;
452 	xrs_req->cdo.cols_len = hwctx->col_list_len;
453 	xrs_req->cdo.ncols = hwctx->num_col;
454 	xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
455 
456 	xrs_req->rqos.gops = hwctx->qos.gops;
457 	xrs_req->rqos.fps = hwctx->qos.fps;
458 	xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
459 	xrs_req->rqos.latency = hwctx->qos.latency;
460 	xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
461 	xrs_req->rqos.priority = hwctx->qos.priority;
462 
463 	xrs_req->rid = (uintptr_t)hwctx;
464 
465 	ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
466 	if (ret)
467 		XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
468 
469 	kfree(xrs_req);
470 	return ret;
471 }
472 
473 static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
474 {
475 	struct amdxdna_dev *xdna = hwctx->client->xdna;
476 	int ret;
477 
478 	ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
479 	if (ret)
480 		XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
481 }
482 
483 static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
484 {
485 	struct amdxdna_dev *xdna = hwctx->client->xdna;
486 	struct drm_file *filp = hwctx->client->filp;
487 	struct drm_syncobj *syncobj;
488 	u32 hdl;
489 	int ret;
490 
491 	hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
492 
493 	ret = drm_syncobj_create(&syncobj, 0, NULL);
494 	if (ret) {
495 		XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
496 		return ret;
497 	}
498 	ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
499 	if (ret) {
500 		drm_syncobj_put(syncobj);
501 		XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
502 		return ret;
503 	}
504 	hwctx->priv->syncobj = syncobj;
505 	hwctx->syncobj_hdl = hdl;
506 
507 	return 0;
508 }
509 
510 static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
511 {
512 	/*
513 	 * The syncobj_hdl is owned by user space and will be cleaned up
514 	 * separately.
515 	 */
516 	drm_syncobj_put(hwctx->priv->syncobj);
517 }
518 
519 int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
520 {
521 	struct amdxdna_client *client = hwctx->client;
522 	struct amdxdna_dev *xdna = client->xdna;
523 	const struct drm_sched_init_args args = {
524 		.ops = &sched_ops,
525 		.num_rqs = DRM_SCHED_PRIORITY_COUNT,
526 		.credit_limit = HWCTX_MAX_CMDS,
527 		.timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
528 		.name = hwctx->name,
529 		.dev = xdna->ddev.dev,
530 	};
531 	struct drm_gpu_scheduler *sched;
532 	struct amdxdna_hwctx_priv *priv;
533 	struct amdxdna_gem_obj *heap;
534 	struct amdxdna_dev_hdl *ndev;
535 	int i, ret;
536 
537 	priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
538 	if (!priv)
539 		return -ENOMEM;
540 	hwctx->priv = priv;
541 
542 	mutex_lock(&client->mm_lock);
543 	heap = client->dev_heap;
544 	if (!heap) {
545 		XDNA_ERR(xdna, "The client dev heap object not exist");
546 		mutex_unlock(&client->mm_lock);
547 		ret = -ENOENT;
548 		goto free_priv;
549 	}
550 	drm_gem_object_get(to_gobj(heap));
551 	mutex_unlock(&client->mm_lock);
552 	priv->heap = heap;
553 	sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
554 
555 	ret = amdxdna_gem_pin(heap);
556 	if (ret) {
557 		XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
558 		goto put_heap;
559 	}
560 
561 	for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
562 		struct amdxdna_gem_obj *abo;
563 		struct amdxdna_drm_create_bo args = {
564 			.flags = 0,
565 			.type = AMDXDNA_BO_DEV,
566 			.vaddr = 0,
567 			.size = MAX_CHAIN_CMDBUF_SIZE,
568 		};
569 
570 		abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
571 		if (IS_ERR(abo)) {
572 			ret = PTR_ERR(abo);
573 			goto free_cmd_bufs;
574 		}
575 
576 		XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
577 			 i, abo->mem.dev_addr, abo->mem.size);
578 		priv->cmd_buf[i] = abo;
579 	}
580 
581 	sched = &priv->sched;
582 	mutex_init(&priv->io_lock);
583 
584 	fs_reclaim_acquire(GFP_KERNEL);
585 	might_lock(&priv->io_lock);
586 	fs_reclaim_release(GFP_KERNEL);
587 
588 	ret = drm_sched_init(sched, &args);
589 	if (ret) {
590 		XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
591 		goto free_cmd_bufs;
592 	}
593 
594 	ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
595 				    &sched, 1, NULL);
596 	if (ret) {
597 		XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
598 		goto free_sched;
599 	}
600 
601 	ret = aie2_hwctx_col_list(hwctx);
602 	if (ret) {
603 		XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
604 		goto free_entity;
605 	}
606 
607 	ret = aie2_alloc_resource(hwctx);
608 	if (ret) {
609 		XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
610 		goto free_col_list;
611 	}
612 
613 	ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
614 				heap->mem.userptr, heap->mem.size);
615 	if (ret) {
616 		XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
617 		goto release_resource;
618 	}
619 
620 	ret = aie2_ctx_syncobj_create(hwctx);
621 	if (ret) {
622 		XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
623 		goto release_resource;
624 	}
625 
626 	hwctx->status = HWCTX_STAT_INIT;
627 	ndev = xdna->dev_handle;
628 	ndev->hwctx_num++;
629 	init_waitqueue_head(&priv->job_free_wq);
630 
631 	XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
632 
633 	return 0;
634 
635 release_resource:
636 	aie2_release_resource(hwctx);
637 free_col_list:
638 	kfree(hwctx->col_list);
639 free_entity:
640 	drm_sched_entity_destroy(&priv->entity);
641 free_sched:
642 	drm_sched_fini(&priv->sched);
643 free_cmd_bufs:
644 	for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
645 		if (!priv->cmd_buf[i])
646 			continue;
647 		drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
648 	}
649 	amdxdna_gem_unpin(heap);
650 put_heap:
651 	drm_gem_object_put(to_gobj(heap));
652 free_priv:
653 	kfree(priv);
654 	return ret;
655 }
656 
657 void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
658 {
659 	struct amdxdna_dev_hdl *ndev;
660 	struct amdxdna_dev *xdna;
661 	int idx;
662 
663 	xdna = hwctx->client->xdna;
664 	ndev = xdna->dev_handle;
665 	ndev->hwctx_num--;
666 
667 	XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
668 	drm_sched_entity_destroy(&hwctx->priv->entity);
669 
670 	aie2_hwctx_wait_for_idle(hwctx);
671 
672 	/* Request fw to destroy hwctx and cancel the rest pending requests */
673 	aie2_release_resource(hwctx);
674 
675 	/* Wait for all submitted jobs to be completed or canceled */
676 	wait_event(hwctx->priv->job_free_wq,
677 		   atomic64_read(&hwctx->job_submit_cnt) ==
678 		   atomic64_read(&hwctx->job_free_cnt));
679 
680 	drm_sched_fini(&hwctx->priv->sched);
681 	aie2_ctx_syncobj_destroy(hwctx);
682 
683 	for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
684 		drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
685 	amdxdna_gem_unpin(hwctx->priv->heap);
686 	drm_gem_object_put(to_gobj(hwctx->priv->heap));
687 
688 	mutex_destroy(&hwctx->priv->io_lock);
689 	kfree(hwctx->col_list);
690 	kfree(hwctx->priv);
691 	kfree(hwctx->cus);
692 }
693 
694 static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
695 {
696 	struct amdxdna_hwctx_param_config_cu *config = buf;
697 	struct amdxdna_dev *xdna = hwctx->client->xdna;
698 	u32 total_size;
699 	int ret;
700 
701 	XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
702 	if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
703 		return -EINVAL;
704 
705 	if (hwctx->status != HWCTX_STAT_INIT) {
706 		XDNA_ERR(xdna, "Not support re-config CU");
707 		return -EINVAL;
708 	}
709 
710 	if (!config->num_cus) {
711 		XDNA_ERR(xdna, "Number of CU is zero");
712 		return -EINVAL;
713 	}
714 
715 	total_size = struct_size(config, cu_configs, config->num_cus);
716 	if (total_size > size) {
717 		XDNA_ERR(xdna, "CU config larger than size");
718 		return -EINVAL;
719 	}
720 
721 	hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
722 	if (!hwctx->cus)
723 		return -ENOMEM;
724 
725 	ret = aie2_config_cu(hwctx);
726 	if (ret) {
727 		XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
728 		goto free_cus;
729 	}
730 
731 	wmb(); /* To avoid locking in command submit when check status */
732 	hwctx->status = HWCTX_STAT_READY;
733 
734 	return 0;
735 
736 free_cus:
737 	kfree(hwctx->cus);
738 	hwctx->cus = NULL;
739 	return ret;
740 }
741 
742 int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
743 {
744 	struct amdxdna_dev *xdna = hwctx->client->xdna;
745 
746 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
747 	switch (type) {
748 	case DRM_AMDXDNA_HWCTX_CONFIG_CU:
749 		return aie2_hwctx_cu_config(hwctx, buf, size);
750 	case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
751 	case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
752 		return -EOPNOTSUPP;
753 	default:
754 		XDNA_DBG(xdna, "Not supported type %d", type);
755 		return -EOPNOTSUPP;
756 	}
757 }
758 
759 static int aie2_populate_range(struct amdxdna_gem_obj *abo)
760 {
761 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
762 	struct amdxdna_umap *mapp;
763 	unsigned long timeout;
764 	struct mm_struct *mm;
765 	bool found;
766 	int ret;
767 
768 	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
769 again:
770 	found = false;
771 	down_write(&xdna->notifier_lock);
772 	list_for_each_entry(mapp, &abo->mem.umap_list, node) {
773 		if (mapp->invalid) {
774 			found = true;
775 			break;
776 		}
777 	}
778 
779 	if (!found) {
780 		abo->mem.map_invalid = false;
781 		up_write(&xdna->notifier_lock);
782 		return 0;
783 	}
784 	kref_get(&mapp->refcnt);
785 	up_write(&xdna->notifier_lock);
786 
787 	XDNA_DBG(xdna, "populate memory range %lx %lx",
788 		 mapp->vma->vm_start, mapp->vma->vm_end);
789 	mm = mapp->notifier.mm;
790 	if (!mmget_not_zero(mm)) {
791 		amdxdna_umap_put(mapp);
792 		return -EFAULT;
793 	}
794 
795 	mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
796 	mmap_read_lock(mm);
797 	ret = hmm_range_fault(&mapp->range);
798 	mmap_read_unlock(mm);
799 	if (ret) {
800 		if (time_after(jiffies, timeout)) {
801 			ret = -ETIME;
802 			goto put_mm;
803 		}
804 
805 		if (ret == -EBUSY) {
806 			amdxdna_umap_put(mapp);
807 			goto again;
808 		}
809 
810 		goto put_mm;
811 	}
812 
813 	down_write(&xdna->notifier_lock);
814 	if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
815 		up_write(&xdna->notifier_lock);
816 		amdxdna_umap_put(mapp);
817 		goto again;
818 	}
819 	mapp->invalid = false;
820 	up_write(&xdna->notifier_lock);
821 	amdxdna_umap_put(mapp);
822 	goto again;
823 
824 put_mm:
825 	amdxdna_umap_put(mapp);
826 	mmput(mm);
827 	return ret;
828 }
829 
830 int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
831 {
832 	struct amdxdna_dev *xdna = hwctx->client->xdna;
833 	struct ww_acquire_ctx acquire_ctx;
834 	struct dma_fence_chain *chain;
835 	struct amdxdna_gem_obj *abo;
836 	unsigned long timeout = 0;
837 	int ret, i;
838 
839 	ret = down_interruptible(&hwctx->priv->job_sem);
840 	if (ret) {
841 		XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
842 		return ret;
843 	}
844 
845 	chain = dma_fence_chain_alloc();
846 	if (!chain) {
847 		XDNA_ERR(xdna, "Alloc fence chain failed");
848 		ret = -ENOMEM;
849 		goto up_sem;
850 	}
851 
852 	ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx,
853 				 hwctx->client->filp->client_id);
854 	if (ret) {
855 		XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
856 		goto free_chain;
857 	}
858 
859 retry:
860 	ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
861 	if (ret) {
862 		XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
863 		goto cleanup_job;
864 	}
865 
866 	for (i = 0; i < job->bo_cnt; i++) {
867 		ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
868 		if (ret) {
869 			XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
870 			drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
871 			goto cleanup_job;
872 		}
873 	}
874 
875 	down_read(&xdna->notifier_lock);
876 	for (i = 0; i < job->bo_cnt; i++) {
877 		abo = to_xdna_obj(job->bos[i]);
878 		if (abo->mem.map_invalid) {
879 			up_read(&xdna->notifier_lock);
880 			drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
881 			if (!timeout) {
882 				timeout = jiffies +
883 					msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
884 			} else if (time_after(jiffies, timeout)) {
885 				ret = -ETIME;
886 				goto cleanup_job;
887 			}
888 
889 			ret = aie2_populate_range(abo);
890 			if (ret)
891 				goto cleanup_job;
892 			goto retry;
893 		}
894 	}
895 
896 	mutex_lock(&hwctx->priv->io_lock);
897 	drm_sched_job_arm(&job->base);
898 	job->out_fence = dma_fence_get(&job->base.s_fence->finished);
899 	for (i = 0; i < job->bo_cnt; i++)
900 		dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
901 	job->seq = hwctx->priv->seq++;
902 	kref_get(&job->refcnt);
903 	drm_sched_entity_push_job(&job->base);
904 
905 	*seq = job->seq;
906 	drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
907 	mutex_unlock(&hwctx->priv->io_lock);
908 
909 	up_read(&xdna->notifier_lock);
910 	drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
911 
912 	aie2_job_put(job);
913 	atomic64_inc(&hwctx->job_submit_cnt);
914 
915 	return 0;
916 
917 cleanup_job:
918 	drm_sched_job_cleanup(&job->base);
919 free_chain:
920 	dma_fence_chain_free(chain);
921 up_sem:
922 	up(&hwctx->priv->job_sem);
923 	job->job_done = true;
924 	return ret;
925 }
926 
927 void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
928 			 unsigned long cur_seq)
929 {
930 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
931 	struct drm_gem_object *gobj = to_gobj(abo);
932 	long ret;
933 
934 	ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
935 				    true, MAX_SCHEDULE_TIMEOUT);
936 	if (!ret || ret == -ERESTARTSYS)
937 		XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
938 }
939