xref: /linux/drivers/accel/amdxdna/aie2_pci.c (revision 92d6295a29dba56148406a8452c69ab49787741b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <drm/gpu_scheduler.h>
13 #include <linux/cleanup.h>
14 #include <linux/errno.h>
15 #include <linux/firmware.h>
16 #include <linux/iommu.h>
17 #include <linux/iopoll.h>
18 #include <linux/pci.h>
19 #include <linux/xarray.h>
20 
21 #include "aie2_msg_priv.h"
22 #include "aie2_pci.h"
23 #include "aie2_solver.h"
24 #include "amdxdna_ctx.h"
25 #include "amdxdna_gem.h"
26 #include "amdxdna_mailbox.h"
27 #include "amdxdna_pci_drv.h"
28 
29 static int aie2_max_col = XRS_MAX_COL;
30 module_param(aie2_max_col, uint, 0600);
31 MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
32 
33 /*
34  * The management mailbox channel is allocated by firmware.
35  * The related register and ring buffer information is on SRAM BAR.
36  * This struct is the register layout.
37  */
38 #define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
39 struct mgmt_mbox_chann_info {
40 	__u32	x2i_tail;
41 	__u32	x2i_head;
42 	__u32	x2i_buf;
43 	__u32	x2i_buf_sz;
44 	__u32	i2x_tail;
45 	__u32	i2x_head;
46 	__u32	i2x_buf;
47 	__u32	i2x_buf_sz;
48 	__u32	magic;
49 	__u32	msi_id;
50 	__u32	prot_major;
51 	__u32	prot_minor;
52 	__u32	rsvd[4];
53 };
54 
55 static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
56 {
57 	struct amdxdna_dev *xdna = ndev->xdna;
58 
59 	/*
60 	 * The driver supported mailbox behavior is defined by
61 	 * ndev->priv->protocol_major and protocol_minor.
62 	 *
63 	 * When protocol_major and fw_major are different, it means driver
64 	 * and firmware are incompatible.
65 	 */
66 	if (ndev->priv->protocol_major != fw_major) {
67 		XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
68 			 fw_major, fw_minor);
69 		return -EINVAL;
70 	}
71 
72 	/*
73 	 * When protocol_minor is greater then fw_minor, that means driver
74 	 * relies on operation the installed firmware does not support.
75 	 */
76 	if (ndev->priv->protocol_minor > fw_minor) {
77 		XDNA_ERR(xdna, "Firmware minor version smaller than supported");
78 		return -EINVAL;
79 	}
80 	return 0;
81 }
82 
83 static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
84 {
85 	struct amdxdna_dev *xdna = ndev->xdna;
86 
87 	XDNA_DBG(xdna, "i2x tail    0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
88 	XDNA_DBG(xdna, "i2x head    0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
89 	XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
90 	XDNA_DBG(xdna, "i2x rsize   0x%x", ndev->mgmt_i2x.rb_size);
91 	XDNA_DBG(xdna, "x2i tail    0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
92 	XDNA_DBG(xdna, "x2i head    0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
93 	XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
94 	XDNA_DBG(xdna, "x2i rsize   0x%x", ndev->mgmt_x2i.rb_size);
95 	XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
96 	XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
97 	XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
98 }
99 
100 static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
101 {
102 	struct mgmt_mbox_chann_info info_regs;
103 	struct xdna_mailbox_chann_res *i2x;
104 	struct xdna_mailbox_chann_res *x2i;
105 	u32 addr, off;
106 	u32 *reg;
107 	int ret;
108 	int i;
109 
110 	/*
111 	 * Once firmware is alive, it will write management channel
112 	 * information in SRAM BAR and write the address of that information
113 	 * at FW_ALIVE_OFF offset in SRMA BAR.
114 	 *
115 	 * Read a non-zero value from FW_ALIVE_OFF implies that firmware
116 	 * is alive.
117 	 */
118 	ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
119 				 addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
120 	if (ret || !addr)
121 		return -ETIME;
122 
123 	off = AIE2_SRAM_OFF(ndev, addr);
124 	reg = (u32 *)&info_regs;
125 	for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
126 		reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
127 
128 	if (info_regs.magic != MGMT_MBOX_MAGIC) {
129 		XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
130 		ret = -EINVAL;
131 		goto done;
132 	}
133 
134 	i2x = &ndev->mgmt_i2x;
135 	x2i = &ndev->mgmt_x2i;
136 
137 	i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
138 	i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
139 	i2x->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
140 	i2x->rb_size         = info_regs.i2x_buf_sz;
141 
142 	x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
143 	x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
144 	x2i->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
145 	x2i->rb_size         = info_regs.x2i_buf_sz;
146 
147 	ndev->mgmt_chan_idx  = info_regs.msi_id;
148 	ndev->mgmt_prot_major = info_regs.prot_major;
149 	ndev->mgmt_prot_minor = info_regs.prot_minor;
150 
151 	ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
152 
153 done:
154 	aie2_dump_chann_info_debug(ndev);
155 
156 	/* Must clear address at FW_ALIVE_OFF */
157 	writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
158 
159 	return ret;
160 }
161 
162 int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
163 		     enum rt_config_category category, u32 *val)
164 {
165 	const struct rt_config *cfg;
166 	u32 value;
167 	int ret;
168 
169 	for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
170 		if (cfg->category != category)
171 			continue;
172 
173 		value = val ? *val : cfg->value;
174 		ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
175 		if (ret) {
176 			XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
177 				 cfg->type, value);
178 			return ret;
179 		}
180 	}
181 
182 	return 0;
183 }
184 
185 static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
186 {
187 	int ret;
188 
189 	ret = aie2_suspend_fw(ndev);
190 	if (ret) {
191 		XDNA_ERR(ndev->xdna, "Suspend firmware failed");
192 		return ret;
193 	}
194 
195 	ret = aie2_resume_fw(ndev);
196 	if (ret) {
197 		XDNA_ERR(ndev->xdna, "Resume firmware failed");
198 		return ret;
199 	}
200 
201 	return 0;
202 }
203 
204 static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
205 {
206 	int ret;
207 
208 	ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
209 	if (ret) {
210 		XDNA_ERR(ndev->xdna, "Runtime config failed");
211 		return ret;
212 	}
213 
214 	ret = aie2_assign_mgmt_pasid(ndev, 0);
215 	if (ret) {
216 		XDNA_ERR(ndev->xdna, "Can not assign PASID");
217 		return ret;
218 	}
219 
220 	ret = aie2_xdna_reset(ndev);
221 	if (ret) {
222 		XDNA_ERR(ndev->xdna, "Reset firmware failed");
223 		return ret;
224 	}
225 
226 	if (!ndev->async_events)
227 		return 0;
228 
229 	ret = aie2_error_async_events_send(ndev);
230 	if (ret) {
231 		XDNA_ERR(ndev->xdna, "Send async events failed");
232 		return ret;
233 	}
234 
235 	return 0;
236 }
237 
238 static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
239 {
240 	int ret;
241 
242 	ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
243 	if (ret) {
244 		XDNA_ERR(ndev->xdna, "query firmware version failed");
245 		return ret;
246 	}
247 
248 	ret = aie2_query_aie_version(ndev, &ndev->version);
249 	if (ret) {
250 		XDNA_ERR(ndev->xdna, "Query AIE version failed");
251 		return ret;
252 	}
253 
254 	ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
255 	if (ret) {
256 		XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
257 		return ret;
258 	}
259 
260 	return 0;
261 }
262 
263 static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
264 {
265 	if (aie2_suspend_fw(ndev))
266 		XDNA_ERR(ndev->xdna, "Suspend_fw failed");
267 	XDNA_DBG(ndev->xdna, "Firmware suspended");
268 }
269 
270 static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
271 {
272 	struct amdxdna_hwctx *hwctx = cb_arg;
273 	struct amdxdna_dev *xdna;
274 	int ret;
275 
276 	xdna = hwctx->client->xdna;
277 
278 	hwctx->start_col = action->part.start_col;
279 	hwctx->num_col = action->part.ncols;
280 	ret = aie2_create_context(xdna->dev_handle, hwctx);
281 	if (ret)
282 		XDNA_ERR(xdna, "create context failed, ret %d", ret);
283 
284 	return ret;
285 }
286 
287 static int aie2_xrs_unload(void *cb_arg)
288 {
289 	struct amdxdna_hwctx *hwctx = cb_arg;
290 	struct amdxdna_dev *xdna;
291 	int ret;
292 
293 	xdna = hwctx->client->xdna;
294 
295 	ret = aie2_destroy_context(xdna->dev_handle, hwctx);
296 	if (ret)
297 		XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
298 
299 	return ret;
300 }
301 
302 static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
303 {
304 	struct amdxdna_dev *xdna = to_xdna_dev(ddev);
305 	struct amdxdna_dev_hdl *ndev;
306 
307 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
308 
309 	ndev = xdna->dev_handle;
310 	ndev->dft_dpm_level = dpm_level;
311 	if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
312 		return 0;
313 
314 	return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
315 }
316 
317 static struct xrs_action_ops aie2_xrs_actions = {
318 	.load = aie2_xrs_load,
319 	.unload = aie2_xrs_unload,
320 	.set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
321 };
322 
323 static void aie2_hw_stop(struct amdxdna_dev *xdna)
324 {
325 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
326 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
327 
328 	if (ndev->dev_status <= AIE2_DEV_INIT) {
329 		XDNA_ERR(xdna, "device is already stopped");
330 		return;
331 	}
332 
333 	aie2_mgmt_fw_fini(ndev);
334 	xdna_mailbox_stop_channel(ndev->mgmt_chann);
335 	xdna_mailbox_destroy_channel(ndev->mgmt_chann);
336 	ndev->mgmt_chann = NULL;
337 	drmm_kfree(&xdna->ddev, ndev->mbox);
338 	ndev->mbox = NULL;
339 	aie2_psp_stop(ndev->psp_hdl);
340 	aie2_smu_fini(ndev);
341 	pci_disable_device(pdev);
342 
343 	ndev->dev_status = AIE2_DEV_INIT;
344 }
345 
346 static int aie2_hw_start(struct amdxdna_dev *xdna)
347 {
348 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
349 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
350 	struct xdna_mailbox_res mbox_res;
351 	u32 xdna_mailbox_intr_reg;
352 	int mgmt_mb_irq, ret;
353 
354 	if (ndev->dev_status >= AIE2_DEV_START) {
355 		XDNA_INFO(xdna, "device is already started");
356 		return 0;
357 	}
358 
359 	ret = pci_enable_device(pdev);
360 	if (ret) {
361 		XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
362 		return ret;
363 	}
364 	pci_set_master(pdev);
365 
366 	ret = aie2_smu_init(ndev);
367 	if (ret) {
368 		XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
369 		goto disable_dev;
370 	}
371 
372 	ret = aie2_psp_start(ndev->psp_hdl);
373 	if (ret) {
374 		XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
375 		goto fini_smu;
376 	}
377 
378 	ret = aie2_get_mgmt_chann_info(ndev);
379 	if (ret) {
380 		XDNA_ERR(xdna, "firmware is not alive");
381 		goto stop_psp;
382 	}
383 
384 	mbox_res.ringbuf_base = ndev->sram_base;
385 	mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
386 	mbox_res.mbox_base = ndev->mbox_base;
387 	mbox_res.mbox_size = MBOX_SIZE(ndev);
388 	mbox_res.name = "xdna_mailbox";
389 	ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
390 	if (!ndev->mbox) {
391 		XDNA_ERR(xdna, "failed to create mailbox device");
392 		ret = -ENODEV;
393 		goto stop_psp;
394 	}
395 
396 	mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
397 	if (mgmt_mb_irq < 0) {
398 		ret = mgmt_mb_irq;
399 		XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
400 		goto stop_psp;
401 	}
402 
403 	xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
404 	ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
405 						       &ndev->mgmt_x2i,
406 						       &ndev->mgmt_i2x,
407 						       xdna_mailbox_intr_reg,
408 						       mgmt_mb_irq);
409 	if (!ndev->mgmt_chann) {
410 		XDNA_ERR(xdna, "failed to create management mailbox channel");
411 		ret = -EINVAL;
412 		goto stop_psp;
413 	}
414 
415 	ret = aie2_pm_init(ndev);
416 	if (ret) {
417 		XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
418 		goto destroy_mgmt_chann;
419 	}
420 
421 	ret = aie2_mgmt_fw_init(ndev);
422 	if (ret) {
423 		XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
424 		goto destroy_mgmt_chann;
425 	}
426 
427 	ndev->dev_status = AIE2_DEV_START;
428 
429 	return 0;
430 
431 destroy_mgmt_chann:
432 	xdna_mailbox_stop_channel(ndev->mgmt_chann);
433 	xdna_mailbox_destroy_channel(ndev->mgmt_chann);
434 stop_psp:
435 	aie2_psp_stop(ndev->psp_hdl);
436 fini_smu:
437 	aie2_smu_fini(ndev);
438 disable_dev:
439 	pci_disable_device(pdev);
440 
441 	return ret;
442 }
443 
444 static int aie2_hw_suspend(struct amdxdna_dev *xdna)
445 {
446 	struct amdxdna_client *client;
447 
448 	guard(mutex)(&xdna->dev_lock);
449 	list_for_each_entry(client, &xdna->client_list, node)
450 		aie2_hwctx_suspend(client);
451 
452 	aie2_hw_stop(xdna);
453 
454 	return 0;
455 }
456 
457 static int aie2_hw_resume(struct amdxdna_dev *xdna)
458 {
459 	struct amdxdna_client *client;
460 	int ret;
461 
462 	guard(mutex)(&xdna->dev_lock);
463 	ret = aie2_hw_start(xdna);
464 	if (ret) {
465 		XDNA_ERR(xdna, "Start hardware failed, %d", ret);
466 		return ret;
467 	}
468 
469 	list_for_each_entry(client, &xdna->client_list, node) {
470 		ret = aie2_hwctx_resume(client);
471 		if (ret)
472 			break;
473 	}
474 
475 	return ret;
476 }
477 
478 static int aie2_init(struct amdxdna_dev *xdna)
479 {
480 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
481 	void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
482 	struct init_config xrs_cfg = { 0 };
483 	struct amdxdna_dev_hdl *ndev;
484 	struct psp_config psp_conf;
485 	const struct firmware *fw;
486 	unsigned long bars = 0;
487 	int i, nvec, ret;
488 
489 	ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
490 	if (!ndev)
491 		return -ENOMEM;
492 
493 	ndev->priv = xdna->dev_info->dev_priv;
494 	ndev->xdna = xdna;
495 
496 	ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
497 	if (ret) {
498 		XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
499 			 ndev->priv->fw_path, ret);
500 		return ret;
501 	}
502 
503 	ret = pcim_enable_device(pdev);
504 	if (ret) {
505 		XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
506 		goto release_fw;
507 	}
508 
509 	for (i = 0; i < PSP_MAX_REGS; i++)
510 		set_bit(PSP_REG_BAR(ndev, i), &bars);
511 
512 	set_bit(xdna->dev_info->sram_bar, &bars);
513 	set_bit(xdna->dev_info->smu_bar, &bars);
514 	set_bit(xdna->dev_info->mbox_bar, &bars);
515 
516 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
517 		if (!test_bit(i, &bars))
518 			continue;
519 		tbl[i] = pcim_iomap(pdev, i, 0);
520 		if (!tbl[i]) {
521 			XDNA_ERR(xdna, "map bar %d failed", i);
522 			ret = -ENOMEM;
523 			goto release_fw;
524 		}
525 	}
526 
527 	ndev->sram_base = tbl[xdna->dev_info->sram_bar];
528 	ndev->smu_base = tbl[xdna->dev_info->smu_bar];
529 	ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
530 
531 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
532 	if (ret) {
533 		XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
534 		goto release_fw;
535 	}
536 
537 	nvec = pci_msix_vec_count(pdev);
538 	if (nvec <= 0) {
539 		XDNA_ERR(xdna, "does not get number of interrupt vector");
540 		ret = -EINVAL;
541 		goto release_fw;
542 	}
543 
544 	ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
545 	if (ret < 0) {
546 		XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
547 		goto release_fw;
548 	}
549 
550 	psp_conf.fw_size = fw->size;
551 	psp_conf.fw_buf = fw->data;
552 	for (i = 0; i < PSP_MAX_REGS; i++)
553 		psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
554 	ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
555 	if (!ndev->psp_hdl) {
556 		XDNA_ERR(xdna, "failed to create psp");
557 		ret = -ENOMEM;
558 		goto release_fw;
559 	}
560 	xdna->dev_handle = ndev;
561 
562 	ret = aie2_hw_start(xdna);
563 	if (ret) {
564 		XDNA_ERR(xdna, "start npu failed, ret %d", ret);
565 		goto release_fw;
566 	}
567 
568 	ret = aie2_mgmt_fw_query(ndev);
569 	if (ret) {
570 		XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
571 		goto stop_hw;
572 	}
573 	ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
574 
575 	xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
576 	for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
577 		xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
578 	xrs_cfg.sys_eff_factor = 1;
579 	xrs_cfg.ddev = &xdna->ddev;
580 	xrs_cfg.actions = &aie2_xrs_actions;
581 	xrs_cfg.total_col = ndev->total_col;
582 
583 	xdna->xrs_hdl = xrsm_init(&xrs_cfg);
584 	if (!xdna->xrs_hdl) {
585 		XDNA_ERR(xdna, "Initialize resolver failed");
586 		ret = -EINVAL;
587 		goto stop_hw;
588 	}
589 
590 	ret = aie2_error_async_events_alloc(ndev);
591 	if (ret) {
592 		XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
593 		goto stop_hw;
594 	}
595 
596 	ret = aie2_error_async_events_send(ndev);
597 	if (ret) {
598 		XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
599 		goto async_event_free;
600 	}
601 
602 	/* Issue a command to make sure firmware handled async events */
603 	ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
604 	if (ret) {
605 		XDNA_ERR(xdna, "Re-query firmware version failed");
606 		goto async_event_free;
607 	}
608 
609 	release_firmware(fw);
610 	return 0;
611 
612 async_event_free:
613 	aie2_error_async_events_free(ndev);
614 stop_hw:
615 	aie2_hw_stop(xdna);
616 release_fw:
617 	release_firmware(fw);
618 
619 	return ret;
620 }
621 
622 static void aie2_fini(struct amdxdna_dev *xdna)
623 {
624 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
625 
626 	aie2_hw_stop(xdna);
627 	aie2_error_async_events_free(ndev);
628 }
629 
630 static int aie2_get_aie_status(struct amdxdna_client *client,
631 			       struct amdxdna_drm_get_info *args)
632 {
633 	struct amdxdna_drm_query_aie_status status;
634 	struct amdxdna_dev *xdna = client->xdna;
635 	struct amdxdna_dev_hdl *ndev;
636 	int ret;
637 
638 	ndev = xdna->dev_handle;
639 	if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
640 		XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
641 		return -EFAULT;
642 	}
643 
644 	if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
645 		XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
646 			 status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
647 		return -EINVAL;
648 	}
649 
650 	ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
651 				status.buffer_size, &status.cols_filled);
652 	if (ret) {
653 		XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
654 		return ret;
655 	}
656 
657 	if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
658 		XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
659 		return -EFAULT;
660 	}
661 
662 	return 0;
663 }
664 
665 static int aie2_get_aie_metadata(struct amdxdna_client *client,
666 				 struct amdxdna_drm_get_info *args)
667 {
668 	struct amdxdna_drm_query_aie_metadata *meta;
669 	struct amdxdna_dev *xdna = client->xdna;
670 	struct amdxdna_dev_hdl *ndev;
671 	int ret = 0;
672 
673 	ndev = xdna->dev_handle;
674 	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
675 	if (!meta)
676 		return -ENOMEM;
677 
678 	meta->col_size = ndev->metadata.size;
679 	meta->cols = ndev->metadata.cols;
680 	meta->rows = ndev->metadata.rows;
681 
682 	meta->version.major = ndev->metadata.version.major;
683 	meta->version.minor = ndev->metadata.version.minor;
684 
685 	meta->core.row_count = ndev->metadata.core.row_count;
686 	meta->core.row_start = ndev->metadata.core.row_start;
687 	meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
688 	meta->core.lock_count = ndev->metadata.core.lock_count;
689 	meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
690 
691 	meta->mem.row_count = ndev->metadata.mem.row_count;
692 	meta->mem.row_start = ndev->metadata.mem.row_start;
693 	meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
694 	meta->mem.lock_count = ndev->metadata.mem.lock_count;
695 	meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
696 
697 	meta->shim.row_count = ndev->metadata.shim.row_count;
698 	meta->shim.row_start = ndev->metadata.shim.row_start;
699 	meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
700 	meta->shim.lock_count = ndev->metadata.shim.lock_count;
701 	meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
702 
703 	if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
704 		ret = -EFAULT;
705 
706 	kfree(meta);
707 	return ret;
708 }
709 
710 static int aie2_get_aie_version(struct amdxdna_client *client,
711 				struct amdxdna_drm_get_info *args)
712 {
713 	struct amdxdna_drm_query_aie_version version;
714 	struct amdxdna_dev *xdna = client->xdna;
715 	struct amdxdna_dev_hdl *ndev;
716 
717 	ndev = xdna->dev_handle;
718 	version.major = ndev->version.major;
719 	version.minor = ndev->version.minor;
720 
721 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
722 		return -EFAULT;
723 
724 	return 0;
725 }
726 
727 static int aie2_get_firmware_version(struct amdxdna_client *client,
728 				     struct amdxdna_drm_get_info *args)
729 {
730 	struct amdxdna_drm_query_firmware_version version;
731 	struct amdxdna_dev *xdna = client->xdna;
732 
733 	version.major = xdna->fw_ver.major;
734 	version.minor = xdna->fw_ver.minor;
735 	version.patch = xdna->fw_ver.sub;
736 	version.build = xdna->fw_ver.build;
737 
738 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
739 		return -EFAULT;
740 
741 	return 0;
742 }
743 
744 static int aie2_get_power_mode(struct amdxdna_client *client,
745 			       struct amdxdna_drm_get_info *args)
746 {
747 	struct amdxdna_drm_get_power_mode mode = {};
748 	struct amdxdna_dev *xdna = client->xdna;
749 	struct amdxdna_dev_hdl *ndev;
750 
751 	ndev = xdna->dev_handle;
752 	mode.power_mode = ndev->pw_mode;
753 
754 	if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
755 		return -EFAULT;
756 
757 	return 0;
758 }
759 
760 static int aie2_get_clock_metadata(struct amdxdna_client *client,
761 				   struct amdxdna_drm_get_info *args)
762 {
763 	struct amdxdna_drm_query_clock_metadata *clock;
764 	struct amdxdna_dev *xdna = client->xdna;
765 	struct amdxdna_dev_hdl *ndev;
766 	int ret = 0;
767 
768 	ndev = xdna->dev_handle;
769 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
770 	if (!clock)
771 		return -ENOMEM;
772 
773 	snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
774 		 "MP-NPU Clock");
775 	clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
776 	snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
777 	clock->h_clock.freq_mhz = ndev->hclk_freq;
778 
779 	if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
780 		ret = -EFAULT;
781 
782 	kfree(clock);
783 	return ret;
784 }
785 
786 static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
787 {
788 	struct amdxdna_drm_query_hwctx *tmp __free(kfree) = NULL;
789 	struct amdxdna_drm_get_info *get_info_args = arg;
790 	struct amdxdna_drm_query_hwctx __user *buf;
791 
792 	if (get_info_args->buffer_size < sizeof(*tmp))
793 		return -EINVAL;
794 
795 	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
796 	if (!tmp)
797 		return -ENOMEM;
798 
799 	tmp->pid = hwctx->client->pid;
800 	tmp->context_id = hwctx->id;
801 	tmp->start_col = hwctx->start_col;
802 	tmp->num_col = hwctx->num_col;
803 	tmp->command_submissions = hwctx->priv->seq;
804 	tmp->command_completions = hwctx->priv->completed;
805 
806 	buf = u64_to_user_ptr(get_info_args->buffer);
807 
808 	if (copy_to_user(buf, tmp, sizeof(*tmp)))
809 		return -EFAULT;
810 
811 	get_info_args->buffer += sizeof(*tmp);
812 	get_info_args->buffer_size -= sizeof(*tmp);
813 
814 	return 0;
815 }
816 
817 static int aie2_get_hwctx_status(struct amdxdna_client *client,
818 				 struct amdxdna_drm_get_info *args)
819 {
820 	struct amdxdna_dev *xdna = client->xdna;
821 	struct amdxdna_drm_get_info info_args;
822 	struct amdxdna_client *tmp_client;
823 	int ret;
824 
825 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
826 
827 	info_args.buffer = args->buffer;
828 	info_args.buffer_size = args->buffer_size;
829 
830 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
831 		ret = amdxdna_hwctx_walk(tmp_client, &info_args, aie2_hwctx_status_cb);
832 		if (ret)
833 			break;
834 	}
835 
836 	args->buffer_size = (u32)(info_args.buffer - args->buffer);
837 	return ret;
838 }
839 
840 static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
841 {
842 	struct amdxdna_dev *xdna = client->xdna;
843 	int ret, idx;
844 
845 	if (!drm_dev_enter(&xdna->ddev, &idx))
846 		return -ENODEV;
847 
848 	switch (args->param) {
849 	case DRM_AMDXDNA_QUERY_AIE_STATUS:
850 		ret = aie2_get_aie_status(client, args);
851 		break;
852 	case DRM_AMDXDNA_QUERY_AIE_METADATA:
853 		ret = aie2_get_aie_metadata(client, args);
854 		break;
855 	case DRM_AMDXDNA_QUERY_AIE_VERSION:
856 		ret = aie2_get_aie_version(client, args);
857 		break;
858 	case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
859 		ret = aie2_get_clock_metadata(client, args);
860 		break;
861 	case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
862 		ret = aie2_get_hwctx_status(client, args);
863 		break;
864 	case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
865 		ret = aie2_get_firmware_version(client, args);
866 		break;
867 	case DRM_AMDXDNA_GET_POWER_MODE:
868 		ret = aie2_get_power_mode(client, args);
869 		break;
870 	default:
871 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
872 		ret = -EOPNOTSUPP;
873 	}
874 	XDNA_DBG(xdna, "Got param %d", args->param);
875 
876 	drm_dev_exit(idx);
877 	return ret;
878 }
879 
880 static int aie2_set_power_mode(struct amdxdna_client *client,
881 			       struct amdxdna_drm_set_state *args)
882 {
883 	struct amdxdna_drm_set_power_mode power_state;
884 	enum amdxdna_power_mode_type power_mode;
885 	struct amdxdna_dev *xdna = client->xdna;
886 
887 	if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
888 			   sizeof(power_state))) {
889 		XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
890 		return -EFAULT;
891 	}
892 
893 	if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
894 		return -EINVAL;
895 
896 	power_mode = power_state.power_mode;
897 	if (power_mode > POWER_MODE_TURBO) {
898 		XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
899 		return -EINVAL;
900 	}
901 
902 	return aie2_pm_set_mode(xdna->dev_handle, power_mode);
903 }
904 
905 static int aie2_set_state(struct amdxdna_client *client,
906 			  struct amdxdna_drm_set_state *args)
907 {
908 	struct amdxdna_dev *xdna = client->xdna;
909 	int ret, idx;
910 
911 	if (!drm_dev_enter(&xdna->ddev, &idx))
912 		return -ENODEV;
913 
914 	switch (args->param) {
915 	case DRM_AMDXDNA_SET_POWER_MODE:
916 		ret = aie2_set_power_mode(client, args);
917 		break;
918 	default:
919 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
920 		ret = -EOPNOTSUPP;
921 		break;
922 	}
923 
924 	drm_dev_exit(idx);
925 	return ret;
926 }
927 
928 const struct amdxdna_dev_ops aie2_ops = {
929 	.init           = aie2_init,
930 	.fini           = aie2_fini,
931 	.resume         = aie2_hw_resume,
932 	.suspend        = aie2_hw_suspend,
933 	.get_aie_info   = aie2_get_info,
934 	.set_aie_state	= aie2_set_state,
935 	.hwctx_init     = aie2_hwctx_init,
936 	.hwctx_fini     = aie2_hwctx_fini,
937 	.hwctx_config   = aie2_hwctx_config,
938 	.cmd_submit     = aie2_cmd_submit,
939 	.hmm_invalidate = aie2_hmm_invalidate,
940 };
941