xref: /linux/drivers/accel/amdxdna/aie2_pci.c (revision ed07a76be7baa0bb164b152116486e4d9fed50dc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <drm/gpu_scheduler.h>
13 #include <linux/errno.h>
14 #include <linux/firmware.h>
15 #include <linux/iommu.h>
16 #include <linux/iopoll.h>
17 #include <linux/pci.h>
18 #include <linux/xarray.h>
19 
20 #include "aie2_msg_priv.h"
21 #include "aie2_pci.h"
22 #include "aie2_solver.h"
23 #include "amdxdna_ctx.h"
24 #include "amdxdna_gem.h"
25 #include "amdxdna_mailbox.h"
26 #include "amdxdna_pci_drv.h"
27 
28 static int aie2_max_col = XRS_MAX_COL;
29 module_param(aie2_max_col, uint, 0600);
30 MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
31 
32 /*
33  * The management mailbox channel is allocated by firmware.
34  * The related register and ring buffer information is on SRAM BAR.
35  * This struct is the register layout.
36  */
37 #define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
38 struct mgmt_mbox_chann_info {
39 	__u32	x2i_tail;
40 	__u32	x2i_head;
41 	__u32	x2i_buf;
42 	__u32	x2i_buf_sz;
43 	__u32	i2x_tail;
44 	__u32	i2x_head;
45 	__u32	i2x_buf;
46 	__u32	i2x_buf_sz;
47 	__u32	magic;
48 	__u32	msi_id;
49 	__u32	prot_major;
50 	__u32	prot_minor;
51 	__u32	rsvd[4];
52 };
53 
54 static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
55 {
56 	struct amdxdna_dev *xdna = ndev->xdna;
57 
58 	/*
59 	 * The driver supported mailbox behavior is defined by
60 	 * ndev->priv->protocol_major and protocol_minor.
61 	 *
62 	 * When protocol_major and fw_major are different, it means driver
63 	 * and firmware are incompatible.
64 	 */
65 	if (ndev->priv->protocol_major != fw_major) {
66 		XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
67 			 fw_major, fw_minor);
68 		return -EINVAL;
69 	}
70 
71 	/*
72 	 * When protocol_minor is greater then fw_minor, that means driver
73 	 * relies on operation the installed firmware does not support.
74 	 */
75 	if (ndev->priv->protocol_minor > fw_minor) {
76 		XDNA_ERR(xdna, "Firmware minor version smaller than supported");
77 		return -EINVAL;
78 	}
79 	return 0;
80 }
81 
82 static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
83 {
84 	struct amdxdna_dev *xdna = ndev->xdna;
85 
86 	XDNA_DBG(xdna, "i2x tail    0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
87 	XDNA_DBG(xdna, "i2x head    0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
88 	XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
89 	XDNA_DBG(xdna, "i2x rsize   0x%x", ndev->mgmt_i2x.rb_size);
90 	XDNA_DBG(xdna, "x2i tail    0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
91 	XDNA_DBG(xdna, "x2i head    0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
92 	XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
93 	XDNA_DBG(xdna, "x2i rsize   0x%x", ndev->mgmt_x2i.rb_size);
94 	XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
95 	XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
96 	XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
97 }
98 
99 static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
100 {
101 	struct mgmt_mbox_chann_info info_regs;
102 	struct xdna_mailbox_chann_res *i2x;
103 	struct xdna_mailbox_chann_res *x2i;
104 	u32 addr, off;
105 	u32 *reg;
106 	int ret;
107 	int i;
108 
109 	/*
110 	 * Once firmware is alive, it will write management channel
111 	 * information in SRAM BAR and write the address of that information
112 	 * at FW_ALIVE_OFF offset in SRMA BAR.
113 	 *
114 	 * Read a non-zero value from FW_ALIVE_OFF implies that firmware
115 	 * is alive.
116 	 */
117 	ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
118 				 addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
119 	if (ret || !addr)
120 		return -ETIME;
121 
122 	off = AIE2_SRAM_OFF(ndev, addr);
123 	reg = (u32 *)&info_regs;
124 	for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
125 		reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
126 
127 	if (info_regs.magic != MGMT_MBOX_MAGIC) {
128 		XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
129 		ret = -EINVAL;
130 		goto done;
131 	}
132 
133 	i2x = &ndev->mgmt_i2x;
134 	x2i = &ndev->mgmt_x2i;
135 
136 	i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
137 	i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
138 	i2x->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
139 	i2x->rb_size         = info_regs.i2x_buf_sz;
140 
141 	x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
142 	x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
143 	x2i->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
144 	x2i->rb_size         = info_regs.x2i_buf_sz;
145 
146 	ndev->mgmt_chan_idx  = info_regs.msi_id;
147 	ndev->mgmt_prot_major = info_regs.prot_major;
148 	ndev->mgmt_prot_minor = info_regs.prot_minor;
149 
150 	ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
151 
152 done:
153 	aie2_dump_chann_info_debug(ndev);
154 
155 	/* Must clear address at FW_ALIVE_OFF */
156 	writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
157 
158 	return ret;
159 }
160 
161 int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
162 		     enum rt_config_category category, u32 *val)
163 {
164 	const struct rt_config *cfg;
165 	u32 value;
166 	int ret;
167 
168 	for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
169 		if (cfg->category != category)
170 			continue;
171 
172 		value = val ? *val : cfg->value;
173 		ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
174 		if (ret) {
175 			XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
176 				 cfg->type, value);
177 			return ret;
178 		}
179 	}
180 
181 	return 0;
182 }
183 
184 static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
185 {
186 	int ret;
187 
188 	ret = aie2_suspend_fw(ndev);
189 	if (ret) {
190 		XDNA_ERR(ndev->xdna, "Suspend firmware failed");
191 		return ret;
192 	}
193 
194 	ret = aie2_resume_fw(ndev);
195 	if (ret) {
196 		XDNA_ERR(ndev->xdna, "Resume firmware failed");
197 		return ret;
198 	}
199 
200 	return 0;
201 }
202 
203 static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
204 {
205 	int ret;
206 
207 	ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
208 	if (ret) {
209 		XDNA_ERR(ndev->xdna, "Runtime config failed");
210 		return ret;
211 	}
212 
213 	ret = aie2_assign_mgmt_pasid(ndev, 0);
214 	if (ret) {
215 		XDNA_ERR(ndev->xdna, "Can not assign PASID");
216 		return ret;
217 	}
218 
219 	ret = aie2_xdna_reset(ndev);
220 	if (ret) {
221 		XDNA_ERR(ndev->xdna, "Reset firmware failed");
222 		return ret;
223 	}
224 
225 	if (!ndev->async_events)
226 		return 0;
227 
228 	ret = aie2_error_async_events_send(ndev);
229 	if (ret) {
230 		XDNA_ERR(ndev->xdna, "Send async events failed");
231 		return ret;
232 	}
233 
234 	return 0;
235 }
236 
237 static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
238 {
239 	int ret;
240 
241 	ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
242 	if (ret) {
243 		XDNA_ERR(ndev->xdna, "query firmware version failed");
244 		return ret;
245 	}
246 
247 	ret = aie2_query_aie_version(ndev, &ndev->version);
248 	if (ret) {
249 		XDNA_ERR(ndev->xdna, "Query AIE version failed");
250 		return ret;
251 	}
252 
253 	ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
254 	if (ret) {
255 		XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
256 		return ret;
257 	}
258 
259 	return 0;
260 }
261 
262 static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
263 {
264 	if (aie2_suspend_fw(ndev))
265 		XDNA_ERR(ndev->xdna, "Suspend_fw failed");
266 	XDNA_DBG(ndev->xdna, "Firmware suspended");
267 }
268 
269 static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
270 {
271 	struct amdxdna_hwctx *hwctx = cb_arg;
272 	struct amdxdna_dev *xdna;
273 	int ret;
274 
275 	xdna = hwctx->client->xdna;
276 
277 	hwctx->start_col = action->part.start_col;
278 	hwctx->num_col = action->part.ncols;
279 	ret = aie2_create_context(xdna->dev_handle, hwctx);
280 	if (ret)
281 		XDNA_ERR(xdna, "create context failed, ret %d", ret);
282 
283 	return ret;
284 }
285 
286 static int aie2_xrs_unload(void *cb_arg)
287 {
288 	struct amdxdna_hwctx *hwctx = cb_arg;
289 	struct amdxdna_dev *xdna;
290 	int ret;
291 
292 	xdna = hwctx->client->xdna;
293 
294 	ret = aie2_destroy_context(xdna->dev_handle, hwctx);
295 	if (ret)
296 		XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
297 
298 	return ret;
299 }
300 
301 static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
302 {
303 	struct amdxdna_dev *xdna = to_xdna_dev(ddev);
304 	struct amdxdna_dev_hdl *ndev;
305 
306 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
307 
308 	ndev = xdna->dev_handle;
309 	ndev->dft_dpm_level = dpm_level;
310 	if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
311 		return 0;
312 
313 	return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
314 }
315 
316 static struct xrs_action_ops aie2_xrs_actions = {
317 	.load = aie2_xrs_load,
318 	.unload = aie2_xrs_unload,
319 	.set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
320 };
321 
322 static void aie2_hw_stop(struct amdxdna_dev *xdna)
323 {
324 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
325 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
326 
327 	if (ndev->dev_status <= AIE2_DEV_INIT) {
328 		XDNA_ERR(xdna, "device is already stopped");
329 		return;
330 	}
331 
332 	aie2_mgmt_fw_fini(ndev);
333 	xdna_mailbox_stop_channel(ndev->mgmt_chann);
334 	xdna_mailbox_destroy_channel(ndev->mgmt_chann);
335 	ndev->mgmt_chann = NULL;
336 	drmm_kfree(&xdna->ddev, ndev->mbox);
337 	ndev->mbox = NULL;
338 	aie2_psp_stop(ndev->psp_hdl);
339 	aie2_smu_fini(ndev);
340 	pci_disable_device(pdev);
341 
342 	ndev->dev_status = AIE2_DEV_INIT;
343 }
344 
345 static int aie2_hw_start(struct amdxdna_dev *xdna)
346 {
347 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
348 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
349 	struct xdna_mailbox_res mbox_res;
350 	u32 xdna_mailbox_intr_reg;
351 	int mgmt_mb_irq, ret;
352 
353 	if (ndev->dev_status >= AIE2_DEV_START) {
354 		XDNA_INFO(xdna, "device is already started");
355 		return 0;
356 	}
357 
358 	ret = pci_enable_device(pdev);
359 	if (ret) {
360 		XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
361 		return ret;
362 	}
363 	pci_set_master(pdev);
364 
365 	ret = aie2_smu_init(ndev);
366 	if (ret) {
367 		XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
368 		goto disable_dev;
369 	}
370 
371 	ret = aie2_psp_start(ndev->psp_hdl);
372 	if (ret) {
373 		XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
374 		goto fini_smu;
375 	}
376 
377 	ret = aie2_get_mgmt_chann_info(ndev);
378 	if (ret) {
379 		XDNA_ERR(xdna, "firmware is not alive");
380 		goto stop_psp;
381 	}
382 
383 	mbox_res.ringbuf_base = ndev->sram_base;
384 	mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
385 	mbox_res.mbox_base = ndev->mbox_base;
386 	mbox_res.mbox_size = MBOX_SIZE(ndev);
387 	mbox_res.name = "xdna_mailbox";
388 	ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
389 	if (!ndev->mbox) {
390 		XDNA_ERR(xdna, "failed to create mailbox device");
391 		ret = -ENODEV;
392 		goto stop_psp;
393 	}
394 
395 	mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
396 	if (mgmt_mb_irq < 0) {
397 		ret = mgmt_mb_irq;
398 		XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
399 		goto stop_psp;
400 	}
401 
402 	xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
403 	ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
404 						       &ndev->mgmt_x2i,
405 						       &ndev->mgmt_i2x,
406 						       xdna_mailbox_intr_reg,
407 						       mgmt_mb_irq);
408 	if (!ndev->mgmt_chann) {
409 		XDNA_ERR(xdna, "failed to create management mailbox channel");
410 		ret = -EINVAL;
411 		goto stop_psp;
412 	}
413 
414 	ret = aie2_pm_init(ndev);
415 	if (ret) {
416 		XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
417 		goto destroy_mgmt_chann;
418 	}
419 
420 	ret = aie2_mgmt_fw_init(ndev);
421 	if (ret) {
422 		XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
423 		goto destroy_mgmt_chann;
424 	}
425 
426 	ndev->dev_status = AIE2_DEV_START;
427 
428 	return 0;
429 
430 destroy_mgmt_chann:
431 	xdna_mailbox_stop_channel(ndev->mgmt_chann);
432 	xdna_mailbox_destroy_channel(ndev->mgmt_chann);
433 stop_psp:
434 	aie2_psp_stop(ndev->psp_hdl);
435 fini_smu:
436 	aie2_smu_fini(ndev);
437 disable_dev:
438 	pci_disable_device(pdev);
439 
440 	return ret;
441 }
442 
443 static int aie2_hw_suspend(struct amdxdna_dev *xdna)
444 {
445 	struct amdxdna_client *client;
446 
447 	guard(mutex)(&xdna->dev_lock);
448 	list_for_each_entry(client, &xdna->client_list, node)
449 		aie2_hwctx_suspend(client);
450 
451 	aie2_hw_stop(xdna);
452 
453 	return 0;
454 }
455 
456 static int aie2_hw_resume(struct amdxdna_dev *xdna)
457 {
458 	struct amdxdna_client *client;
459 	int ret;
460 
461 	guard(mutex)(&xdna->dev_lock);
462 	ret = aie2_hw_start(xdna);
463 	if (ret) {
464 		XDNA_ERR(xdna, "Start hardware failed, %d", ret);
465 		return ret;
466 	}
467 
468 	list_for_each_entry(client, &xdna->client_list, node)
469 		aie2_hwctx_resume(client);
470 
471 	return ret;
472 }
473 
474 static int aie2_init(struct amdxdna_dev *xdna)
475 {
476 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
477 	void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
478 	struct init_config xrs_cfg = { 0 };
479 	struct amdxdna_dev_hdl *ndev;
480 	struct psp_config psp_conf;
481 	const struct firmware *fw;
482 	unsigned long bars = 0;
483 	int i, nvec, ret;
484 
485 	ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
486 	if (!ndev)
487 		return -ENOMEM;
488 
489 	ndev->priv = xdna->dev_info->dev_priv;
490 	ndev->xdna = xdna;
491 
492 	ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
493 	if (ret) {
494 		XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
495 			 ndev->priv->fw_path, ret);
496 		return ret;
497 	}
498 
499 	ret = pcim_enable_device(pdev);
500 	if (ret) {
501 		XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
502 		goto release_fw;
503 	}
504 
505 	for (i = 0; i < PSP_MAX_REGS; i++)
506 		set_bit(PSP_REG_BAR(ndev, i), &bars);
507 
508 	set_bit(xdna->dev_info->sram_bar, &bars);
509 	set_bit(xdna->dev_info->smu_bar, &bars);
510 	set_bit(xdna->dev_info->mbox_bar, &bars);
511 
512 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
513 		if (!test_bit(i, &bars))
514 			continue;
515 		tbl[i] = pcim_iomap(pdev, i, 0);
516 		if (!tbl[i]) {
517 			XDNA_ERR(xdna, "map bar %d failed", i);
518 			ret = -ENOMEM;
519 			goto release_fw;
520 		}
521 	}
522 
523 	ndev->sram_base = tbl[xdna->dev_info->sram_bar];
524 	ndev->smu_base = tbl[xdna->dev_info->smu_bar];
525 	ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
526 
527 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
528 	if (ret) {
529 		XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
530 		goto release_fw;
531 	}
532 
533 	nvec = pci_msix_vec_count(pdev);
534 	if (nvec <= 0) {
535 		XDNA_ERR(xdna, "does not get number of interrupt vector");
536 		ret = -EINVAL;
537 		goto release_fw;
538 	}
539 
540 	ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
541 	if (ret < 0) {
542 		XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
543 		goto release_fw;
544 	}
545 
546 	psp_conf.fw_size = fw->size;
547 	psp_conf.fw_buf = fw->data;
548 	for (i = 0; i < PSP_MAX_REGS; i++)
549 		psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
550 	ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
551 	if (!ndev->psp_hdl) {
552 		XDNA_ERR(xdna, "failed to create psp");
553 		ret = -ENOMEM;
554 		goto release_fw;
555 	}
556 	xdna->dev_handle = ndev;
557 
558 	ret = aie2_hw_start(xdna);
559 	if (ret) {
560 		XDNA_ERR(xdna, "start npu failed, ret %d", ret);
561 		goto release_fw;
562 	}
563 
564 	ret = aie2_mgmt_fw_query(ndev);
565 	if (ret) {
566 		XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
567 		goto stop_hw;
568 	}
569 	ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
570 
571 	xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
572 	for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
573 		xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
574 	xrs_cfg.sys_eff_factor = 1;
575 	xrs_cfg.ddev = &xdna->ddev;
576 	xrs_cfg.actions = &aie2_xrs_actions;
577 	xrs_cfg.total_col = ndev->total_col;
578 
579 	xdna->xrs_hdl = xrsm_init(&xrs_cfg);
580 	if (!xdna->xrs_hdl) {
581 		XDNA_ERR(xdna, "Initialize resolver failed");
582 		ret = -EINVAL;
583 		goto stop_hw;
584 	}
585 
586 	ret = aie2_error_async_events_alloc(ndev);
587 	if (ret) {
588 		XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
589 		goto stop_hw;
590 	}
591 
592 	ret = aie2_error_async_events_send(ndev);
593 	if (ret) {
594 		XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
595 		goto async_event_free;
596 	}
597 
598 	/* Issue a command to make sure firmware handled async events */
599 	ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
600 	if (ret) {
601 		XDNA_ERR(xdna, "Re-query firmware version failed");
602 		goto async_event_free;
603 	}
604 
605 	release_firmware(fw);
606 	return 0;
607 
608 async_event_free:
609 	aie2_error_async_events_free(ndev);
610 stop_hw:
611 	aie2_hw_stop(xdna);
612 release_fw:
613 	release_firmware(fw);
614 
615 	return ret;
616 }
617 
618 static void aie2_fini(struct amdxdna_dev *xdna)
619 {
620 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
621 
622 	aie2_hw_stop(xdna);
623 	aie2_error_async_events_free(ndev);
624 }
625 
626 static int aie2_get_aie_status(struct amdxdna_client *client,
627 			       struct amdxdna_drm_get_info *args)
628 {
629 	struct amdxdna_drm_query_aie_status status;
630 	struct amdxdna_dev *xdna = client->xdna;
631 	struct amdxdna_dev_hdl *ndev;
632 	int ret;
633 
634 	ndev = xdna->dev_handle;
635 	if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
636 		XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
637 		return -EFAULT;
638 	}
639 
640 	if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
641 		XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
642 			 status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
643 		return -EINVAL;
644 	}
645 
646 	ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
647 				status.buffer_size, &status.cols_filled);
648 	if (ret) {
649 		XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
650 		return ret;
651 	}
652 
653 	if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
654 		XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
655 		return -EFAULT;
656 	}
657 
658 	return 0;
659 }
660 
661 static int aie2_get_aie_metadata(struct amdxdna_client *client,
662 				 struct amdxdna_drm_get_info *args)
663 {
664 	struct amdxdna_drm_query_aie_metadata *meta;
665 	struct amdxdna_dev *xdna = client->xdna;
666 	struct amdxdna_dev_hdl *ndev;
667 	int ret = 0;
668 
669 	ndev = xdna->dev_handle;
670 	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
671 	if (!meta)
672 		return -ENOMEM;
673 
674 	meta->col_size = ndev->metadata.size;
675 	meta->cols = ndev->metadata.cols;
676 	meta->rows = ndev->metadata.rows;
677 
678 	meta->version.major = ndev->metadata.version.major;
679 	meta->version.minor = ndev->metadata.version.minor;
680 
681 	meta->core.row_count = ndev->metadata.core.row_count;
682 	meta->core.row_start = ndev->metadata.core.row_start;
683 	meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
684 	meta->core.lock_count = ndev->metadata.core.lock_count;
685 	meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
686 
687 	meta->mem.row_count = ndev->metadata.mem.row_count;
688 	meta->mem.row_start = ndev->metadata.mem.row_start;
689 	meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
690 	meta->mem.lock_count = ndev->metadata.mem.lock_count;
691 	meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
692 
693 	meta->shim.row_count = ndev->metadata.shim.row_count;
694 	meta->shim.row_start = ndev->metadata.shim.row_start;
695 	meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
696 	meta->shim.lock_count = ndev->metadata.shim.lock_count;
697 	meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
698 
699 	if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
700 		ret = -EFAULT;
701 
702 	kfree(meta);
703 	return ret;
704 }
705 
706 static int aie2_get_aie_version(struct amdxdna_client *client,
707 				struct amdxdna_drm_get_info *args)
708 {
709 	struct amdxdna_drm_query_aie_version version;
710 	struct amdxdna_dev *xdna = client->xdna;
711 	struct amdxdna_dev_hdl *ndev;
712 
713 	ndev = xdna->dev_handle;
714 	version.major = ndev->version.major;
715 	version.minor = ndev->version.minor;
716 
717 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
718 		return -EFAULT;
719 
720 	return 0;
721 }
722 
723 static int aie2_get_firmware_version(struct amdxdna_client *client,
724 				     struct amdxdna_drm_get_info *args)
725 {
726 	struct amdxdna_drm_query_firmware_version version;
727 	struct amdxdna_dev *xdna = client->xdna;
728 
729 	version.major = xdna->fw_ver.major;
730 	version.minor = xdna->fw_ver.minor;
731 	version.patch = xdna->fw_ver.sub;
732 	version.build = xdna->fw_ver.build;
733 
734 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
735 		return -EFAULT;
736 
737 	return 0;
738 }
739 
740 static int aie2_get_power_mode(struct amdxdna_client *client,
741 			       struct amdxdna_drm_get_info *args)
742 {
743 	struct amdxdna_drm_get_power_mode mode = {};
744 	struct amdxdna_dev *xdna = client->xdna;
745 	struct amdxdna_dev_hdl *ndev;
746 
747 	ndev = xdna->dev_handle;
748 	mode.power_mode = ndev->pw_mode;
749 
750 	if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
751 		return -EFAULT;
752 
753 	return 0;
754 }
755 
756 static int aie2_get_clock_metadata(struct amdxdna_client *client,
757 				   struct amdxdna_drm_get_info *args)
758 {
759 	struct amdxdna_drm_query_clock_metadata *clock;
760 	struct amdxdna_dev *xdna = client->xdna;
761 	struct amdxdna_dev_hdl *ndev;
762 	int ret = 0;
763 
764 	ndev = xdna->dev_handle;
765 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
766 	if (!clock)
767 		return -ENOMEM;
768 
769 	snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
770 		 "MP-NPU Clock");
771 	clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
772 	snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
773 	clock->h_clock.freq_mhz = ndev->hclk_freq;
774 
775 	if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
776 		ret = -EFAULT;
777 
778 	kfree(clock);
779 	return ret;
780 }
781 
782 static int aie2_get_hwctx_status(struct amdxdna_client *client,
783 				 struct amdxdna_drm_get_info *args)
784 {
785 	struct amdxdna_drm_query_hwctx __user *buf;
786 	struct amdxdna_dev *xdna = client->xdna;
787 	struct amdxdna_drm_query_hwctx *tmp;
788 	struct amdxdna_client *tmp_client;
789 	struct amdxdna_hwctx *hwctx;
790 	unsigned long hwctx_id;
791 	bool overflow = false;
792 	u32 req_bytes = 0;
793 	u32 hw_i = 0;
794 	int ret = 0;
795 	int idx;
796 
797 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
798 
799 	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
800 	if (!tmp)
801 		return -ENOMEM;
802 
803 	buf = u64_to_user_ptr(args->buffer);
804 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
805 		idx = srcu_read_lock(&tmp_client->hwctx_srcu);
806 		amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
807 			req_bytes += sizeof(*tmp);
808 			if (args->buffer_size < req_bytes) {
809 				/* Continue iterating to get the required size */
810 				overflow = true;
811 				continue;
812 			}
813 
814 			memset(tmp, 0, sizeof(*tmp));
815 			tmp->pid = tmp_client->pid;
816 			tmp->context_id = hwctx->id;
817 			tmp->start_col = hwctx->start_col;
818 			tmp->num_col = hwctx->num_col;
819 			tmp->command_submissions = hwctx->priv->seq;
820 			tmp->command_completions = hwctx->priv->completed;
821 
822 			if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
823 				ret = -EFAULT;
824 				srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
825 				goto out;
826 			}
827 			hw_i++;
828 		}
829 		srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
830 	}
831 
832 	if (overflow) {
833 		XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
834 			 args->buffer_size, req_bytes);
835 		ret = -EINVAL;
836 	}
837 
838 out:
839 	kfree(tmp);
840 	args->buffer_size = req_bytes;
841 	return ret;
842 }
843 
844 static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
845 {
846 	struct amdxdna_dev *xdna = client->xdna;
847 	int ret, idx;
848 
849 	if (!drm_dev_enter(&xdna->ddev, &idx))
850 		return -ENODEV;
851 
852 	switch (args->param) {
853 	case DRM_AMDXDNA_QUERY_AIE_STATUS:
854 		ret = aie2_get_aie_status(client, args);
855 		break;
856 	case DRM_AMDXDNA_QUERY_AIE_METADATA:
857 		ret = aie2_get_aie_metadata(client, args);
858 		break;
859 	case DRM_AMDXDNA_QUERY_AIE_VERSION:
860 		ret = aie2_get_aie_version(client, args);
861 		break;
862 	case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
863 		ret = aie2_get_clock_metadata(client, args);
864 		break;
865 	case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
866 		ret = aie2_get_hwctx_status(client, args);
867 		break;
868 	case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
869 		ret = aie2_get_firmware_version(client, args);
870 		break;
871 	case DRM_AMDXDNA_GET_POWER_MODE:
872 		ret = aie2_get_power_mode(client, args);
873 		break;
874 	default:
875 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
876 		ret = -EOPNOTSUPP;
877 	}
878 	XDNA_DBG(xdna, "Got param %d", args->param);
879 
880 	drm_dev_exit(idx);
881 	return ret;
882 }
883 
884 static int aie2_set_power_mode(struct amdxdna_client *client,
885 			       struct amdxdna_drm_set_state *args)
886 {
887 	struct amdxdna_drm_set_power_mode power_state;
888 	enum amdxdna_power_mode_type power_mode;
889 	struct amdxdna_dev *xdna = client->xdna;
890 
891 	if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
892 			   sizeof(power_state))) {
893 		XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
894 		return -EFAULT;
895 	}
896 
897 	if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
898 		return -EINVAL;
899 
900 	power_mode = power_state.power_mode;
901 	if (power_mode > POWER_MODE_TURBO) {
902 		XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
903 		return -EINVAL;
904 	}
905 
906 	return aie2_pm_set_mode(xdna->dev_handle, power_mode);
907 }
908 
909 static int aie2_set_state(struct amdxdna_client *client,
910 			  struct amdxdna_drm_set_state *args)
911 {
912 	struct amdxdna_dev *xdna = client->xdna;
913 	int ret, idx;
914 
915 	if (!drm_dev_enter(&xdna->ddev, &idx))
916 		return -ENODEV;
917 
918 	switch (args->param) {
919 	case DRM_AMDXDNA_SET_POWER_MODE:
920 		ret = aie2_set_power_mode(client, args);
921 		break;
922 	default:
923 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
924 		ret = -EOPNOTSUPP;
925 		break;
926 	}
927 
928 	drm_dev_exit(idx);
929 	return ret;
930 }
931 
932 const struct amdxdna_dev_ops aie2_ops = {
933 	.init           = aie2_init,
934 	.fini           = aie2_fini,
935 	.resume         = aie2_hw_resume,
936 	.suspend        = aie2_hw_suspend,
937 	.get_aie_info   = aie2_get_info,
938 	.set_aie_state	= aie2_set_state,
939 	.hwctx_init     = aie2_hwctx_init,
940 	.hwctx_fini     = aie2_hwctx_fini,
941 	.hwctx_config   = aie2_hwctx_config,
942 	.cmd_submit     = aie2_cmd_submit,
943 	.hmm_invalidate = aie2_hmm_invalidate,
944 };
945