xref: /linux/drivers/accel/amdxdna/aie2_pci.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <drm/gpu_scheduler.h>
13 #include <linux/cleanup.h>
14 #include <linux/errno.h>
15 #include <linux/firmware.h>
16 #include <linux/iommu.h>
17 #include <linux/iopoll.h>
18 #include <linux/pci.h>
19 #include <linux/xarray.h>
20 
21 #include "aie2_msg_priv.h"
22 #include "aie2_pci.h"
23 #include "aie2_solver.h"
24 #include "amdxdna_ctx.h"
25 #include "amdxdna_gem.h"
26 #include "amdxdna_mailbox.h"
27 #include "amdxdna_pci_drv.h"
28 #include "amdxdna_pm.h"
29 
30 static int aie2_max_col = XRS_MAX_COL;
31 module_param(aie2_max_col, uint, 0600);
32 MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
33 
34 /*
35  * The management mailbox channel is allocated by firmware.
36  * The related register and ring buffer information is on SRAM BAR.
37  * This struct is the register layout.
38  */
39 #define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
40 struct mgmt_mbox_chann_info {
41 	__u32	x2i_tail;
42 	__u32	x2i_head;
43 	__u32	x2i_buf;
44 	__u32	x2i_buf_sz;
45 	__u32	i2x_tail;
46 	__u32	i2x_head;
47 	__u32	i2x_buf;
48 	__u32	i2x_buf_sz;
49 	__u32	magic;
50 	__u32	msi_id;
51 	__u32	prot_major;
52 	__u32	prot_minor;
53 	__u32	rsvd[4];
54 };
55 
56 static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
57 {
58 	struct amdxdna_dev *xdna = ndev->xdna;
59 
60 	/*
61 	 * The driver supported mailbox behavior is defined by
62 	 * ndev->priv->protocol_major and protocol_minor.
63 	 *
64 	 * When protocol_major and fw_major are different, it means driver
65 	 * and firmware are incompatible.
66 	 */
67 	if (ndev->priv->protocol_major != fw_major) {
68 		XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
69 			 fw_major, fw_minor);
70 		return -EINVAL;
71 	}
72 
73 	/*
74 	 * When protocol_minor is greater then fw_minor, that means driver
75 	 * relies on operation the installed firmware does not support.
76 	 */
77 	if (ndev->priv->protocol_minor > fw_minor) {
78 		XDNA_ERR(xdna, "Firmware minor version smaller than supported");
79 		return -EINVAL;
80 	}
81 	return 0;
82 }
83 
84 static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
85 {
86 	struct amdxdna_dev *xdna = ndev->xdna;
87 
88 	XDNA_DBG(xdna, "i2x tail    0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
89 	XDNA_DBG(xdna, "i2x head    0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
90 	XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
91 	XDNA_DBG(xdna, "i2x rsize   0x%x", ndev->mgmt_i2x.rb_size);
92 	XDNA_DBG(xdna, "x2i tail    0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
93 	XDNA_DBG(xdna, "x2i head    0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
94 	XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
95 	XDNA_DBG(xdna, "x2i rsize   0x%x", ndev->mgmt_x2i.rb_size);
96 	XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
97 	XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
98 	XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
99 }
100 
101 static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
102 {
103 	struct mgmt_mbox_chann_info info_regs;
104 	struct xdna_mailbox_chann_res *i2x;
105 	struct xdna_mailbox_chann_res *x2i;
106 	u32 addr, off;
107 	u32 *reg;
108 	int ret;
109 	int i;
110 
111 	/*
112 	 * Once firmware is alive, it will write management channel
113 	 * information in SRAM BAR and write the address of that information
114 	 * at FW_ALIVE_OFF offset in SRMA BAR.
115 	 *
116 	 * Read a non-zero value from FW_ALIVE_OFF implies that firmware
117 	 * is alive.
118 	 */
119 	ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
120 				 addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
121 	if (ret || !addr)
122 		return -ETIME;
123 
124 	off = AIE2_SRAM_OFF(ndev, addr);
125 	reg = (u32 *)&info_regs;
126 	for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
127 		reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
128 
129 	if (info_regs.magic != MGMT_MBOX_MAGIC) {
130 		XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
131 		ret = -EINVAL;
132 		goto done;
133 	}
134 
135 	i2x = &ndev->mgmt_i2x;
136 	x2i = &ndev->mgmt_x2i;
137 
138 	i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
139 	i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
140 	i2x->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
141 	i2x->rb_size         = info_regs.i2x_buf_sz;
142 
143 	x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
144 	x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
145 	x2i->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
146 	x2i->rb_size         = info_regs.x2i_buf_sz;
147 
148 	ndev->mgmt_chan_idx  = info_regs.msi_id;
149 	ndev->mgmt_prot_major = info_regs.prot_major;
150 	ndev->mgmt_prot_minor = info_regs.prot_minor;
151 
152 	ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
153 
154 done:
155 	aie2_dump_chann_info_debug(ndev);
156 
157 	/* Must clear address at FW_ALIVE_OFF */
158 	writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
159 
160 	return ret;
161 }
162 
163 int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
164 		     enum rt_config_category category, u32 *val)
165 {
166 	const struct rt_config *cfg;
167 	u32 value;
168 	int ret;
169 
170 	for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
171 		if (cfg->category != category)
172 			continue;
173 
174 		value = val ? *val : cfg->value;
175 		ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
176 		if (ret) {
177 			XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
178 				 cfg->type, value);
179 			return ret;
180 		}
181 	}
182 
183 	return 0;
184 }
185 
186 static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
187 {
188 	int ret;
189 
190 	ret = aie2_suspend_fw(ndev);
191 	if (ret) {
192 		XDNA_ERR(ndev->xdna, "Suspend firmware failed");
193 		return ret;
194 	}
195 
196 	ret = aie2_resume_fw(ndev);
197 	if (ret) {
198 		XDNA_ERR(ndev->xdna, "Resume firmware failed");
199 		return ret;
200 	}
201 
202 	return 0;
203 }
204 
205 static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
206 {
207 	int ret;
208 
209 	ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
210 	if (ret) {
211 		XDNA_ERR(ndev->xdna, "Runtime config failed");
212 		return ret;
213 	}
214 
215 	ret = aie2_assign_mgmt_pasid(ndev, 0);
216 	if (ret) {
217 		XDNA_ERR(ndev->xdna, "Can not assign PASID");
218 		return ret;
219 	}
220 
221 	ret = aie2_xdna_reset(ndev);
222 	if (ret) {
223 		XDNA_ERR(ndev->xdna, "Reset firmware failed");
224 		return ret;
225 	}
226 
227 	return 0;
228 }
229 
230 static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
231 {
232 	int ret;
233 
234 	ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
235 	if (ret) {
236 		XDNA_ERR(ndev->xdna, "query firmware version failed");
237 		return ret;
238 	}
239 
240 	ret = aie2_query_aie_version(ndev, &ndev->version);
241 	if (ret) {
242 		XDNA_ERR(ndev->xdna, "Query AIE version failed");
243 		return ret;
244 	}
245 
246 	ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
247 	if (ret) {
248 		XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
249 		return ret;
250 	}
251 
252 	ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
253 
254 	return 0;
255 }
256 
257 static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
258 {
259 	if (aie2_suspend_fw(ndev))
260 		XDNA_ERR(ndev->xdna, "Suspend_fw failed");
261 	XDNA_DBG(ndev->xdna, "Firmware suspended");
262 }
263 
264 static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
265 {
266 	struct amdxdna_hwctx *hwctx = cb_arg;
267 	struct amdxdna_dev *xdna;
268 	int ret;
269 
270 	xdna = hwctx->client->xdna;
271 
272 	hwctx->start_col = action->part.start_col;
273 	hwctx->num_col = action->part.ncols;
274 	ret = aie2_create_context(xdna->dev_handle, hwctx);
275 	if (ret)
276 		XDNA_ERR(xdna, "create context failed, ret %d", ret);
277 
278 	return ret;
279 }
280 
281 static int aie2_xrs_unload(void *cb_arg)
282 {
283 	struct amdxdna_hwctx *hwctx = cb_arg;
284 	struct amdxdna_dev *xdna;
285 	int ret;
286 
287 	xdna = hwctx->client->xdna;
288 
289 	ret = aie2_destroy_context(xdna->dev_handle, hwctx);
290 	if (ret)
291 		XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
292 
293 	return ret;
294 }
295 
296 static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
297 {
298 	struct amdxdna_dev *xdna = to_xdna_dev(ddev);
299 	struct amdxdna_dev_hdl *ndev;
300 
301 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
302 
303 	ndev = xdna->dev_handle;
304 	ndev->dft_dpm_level = dpm_level;
305 	if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
306 		return 0;
307 
308 	return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
309 }
310 
311 static struct xrs_action_ops aie2_xrs_actions = {
312 	.load = aie2_xrs_load,
313 	.unload = aie2_xrs_unload,
314 	.set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
315 };
316 
317 static void aie2_hw_stop(struct amdxdna_dev *xdna)
318 {
319 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
320 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
321 
322 	if (ndev->dev_status <= AIE2_DEV_INIT) {
323 		XDNA_ERR(xdna, "device is already stopped");
324 		return;
325 	}
326 
327 	aie2_mgmt_fw_fini(ndev);
328 	xdna_mailbox_stop_channel(ndev->mgmt_chann);
329 	xdna_mailbox_destroy_channel(ndev->mgmt_chann);
330 	ndev->mgmt_chann = NULL;
331 	drmm_kfree(&xdna->ddev, ndev->mbox);
332 	ndev->mbox = NULL;
333 	aie2_psp_stop(ndev->psp_hdl);
334 	aie2_smu_fini(ndev);
335 	aie2_error_async_events_free(ndev);
336 	pci_disable_device(pdev);
337 
338 	ndev->dev_status = AIE2_DEV_INIT;
339 }
340 
341 static int aie2_hw_start(struct amdxdna_dev *xdna)
342 {
343 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
344 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
345 	struct xdna_mailbox_res mbox_res;
346 	u32 xdna_mailbox_intr_reg;
347 	int mgmt_mb_irq, ret;
348 
349 	if (ndev->dev_status >= AIE2_DEV_START) {
350 		XDNA_INFO(xdna, "device is already started");
351 		return 0;
352 	}
353 
354 	ret = pci_enable_device(pdev);
355 	if (ret) {
356 		XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
357 		return ret;
358 	}
359 	pci_set_master(pdev);
360 
361 	ret = aie2_smu_init(ndev);
362 	if (ret) {
363 		XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
364 		goto disable_dev;
365 	}
366 
367 	ret = aie2_psp_start(ndev->psp_hdl);
368 	if (ret) {
369 		XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
370 		goto fini_smu;
371 	}
372 
373 	ret = aie2_get_mgmt_chann_info(ndev);
374 	if (ret) {
375 		XDNA_ERR(xdna, "firmware is not alive");
376 		goto stop_psp;
377 	}
378 
379 	mbox_res.ringbuf_base = ndev->sram_base;
380 	mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
381 	mbox_res.mbox_base = ndev->mbox_base;
382 	mbox_res.mbox_size = MBOX_SIZE(ndev);
383 	mbox_res.name = "xdna_mailbox";
384 	ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
385 	if (!ndev->mbox) {
386 		XDNA_ERR(xdna, "failed to create mailbox device");
387 		ret = -ENODEV;
388 		goto stop_psp;
389 	}
390 
391 	mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
392 	if (mgmt_mb_irq < 0) {
393 		ret = mgmt_mb_irq;
394 		XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
395 		goto stop_psp;
396 	}
397 
398 	xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
399 	ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
400 						       &ndev->mgmt_x2i,
401 						       &ndev->mgmt_i2x,
402 						       xdna_mailbox_intr_reg,
403 						       mgmt_mb_irq);
404 	if (!ndev->mgmt_chann) {
405 		XDNA_ERR(xdna, "failed to create management mailbox channel");
406 		ret = -EINVAL;
407 		goto stop_psp;
408 	}
409 
410 	ret = aie2_pm_init(ndev);
411 	if (ret) {
412 		XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
413 		goto destroy_mgmt_chann;
414 	}
415 
416 	ret = aie2_mgmt_fw_init(ndev);
417 	if (ret) {
418 		XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
419 		goto destroy_mgmt_chann;
420 	}
421 
422 	ret = aie2_mgmt_fw_query(ndev);
423 	if (ret) {
424 		XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
425 		goto destroy_mgmt_chann;
426 	}
427 
428 	ret = aie2_error_async_events_alloc(ndev);
429 	if (ret) {
430 		XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
431 		goto destroy_mgmt_chann;
432 	}
433 
434 	ndev->dev_status = AIE2_DEV_START;
435 
436 	return 0;
437 
438 destroy_mgmt_chann:
439 	xdna_mailbox_stop_channel(ndev->mgmt_chann);
440 	xdna_mailbox_destroy_channel(ndev->mgmt_chann);
441 stop_psp:
442 	aie2_psp_stop(ndev->psp_hdl);
443 fini_smu:
444 	aie2_smu_fini(ndev);
445 disable_dev:
446 	pci_disable_device(pdev);
447 
448 	return ret;
449 }
450 
451 static int aie2_hw_suspend(struct amdxdna_dev *xdna)
452 {
453 	struct amdxdna_client *client;
454 
455 	guard(mutex)(&xdna->dev_lock);
456 	list_for_each_entry(client, &xdna->client_list, node)
457 		aie2_hwctx_suspend(client);
458 
459 	aie2_hw_stop(xdna);
460 
461 	return 0;
462 }
463 
464 static int aie2_hw_resume(struct amdxdna_dev *xdna)
465 {
466 	struct amdxdna_client *client;
467 	int ret;
468 
469 	ret = aie2_hw_start(xdna);
470 	if (ret) {
471 		XDNA_ERR(xdna, "Start hardware failed, %d", ret);
472 		return ret;
473 	}
474 
475 	list_for_each_entry(client, &xdna->client_list, node) {
476 		ret = aie2_hwctx_resume(client);
477 		if (ret)
478 			break;
479 	}
480 
481 	return ret;
482 }
483 
484 static int aie2_init(struct amdxdna_dev *xdna)
485 {
486 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
487 	void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
488 	struct init_config xrs_cfg = { 0 };
489 	struct amdxdna_dev_hdl *ndev;
490 	struct psp_config psp_conf;
491 	const struct firmware *fw;
492 	unsigned long bars = 0;
493 	int i, nvec, ret;
494 
495 	ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
496 	if (!ndev)
497 		return -ENOMEM;
498 
499 	ndev->priv = xdna->dev_info->dev_priv;
500 	ndev->xdna = xdna;
501 
502 	ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
503 	if (ret) {
504 		XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
505 			 ndev->priv->fw_path, ret);
506 		return ret;
507 	}
508 
509 	ret = pcim_enable_device(pdev);
510 	if (ret) {
511 		XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
512 		goto release_fw;
513 	}
514 
515 	for (i = 0; i < PSP_MAX_REGS; i++)
516 		set_bit(PSP_REG_BAR(ndev, i), &bars);
517 
518 	set_bit(xdna->dev_info->sram_bar, &bars);
519 	set_bit(xdna->dev_info->smu_bar, &bars);
520 	set_bit(xdna->dev_info->mbox_bar, &bars);
521 
522 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
523 		if (!test_bit(i, &bars))
524 			continue;
525 		tbl[i] = pcim_iomap(pdev, i, 0);
526 		if (!tbl[i]) {
527 			XDNA_ERR(xdna, "map bar %d failed", i);
528 			ret = -ENOMEM;
529 			goto release_fw;
530 		}
531 	}
532 
533 	ndev->sram_base = tbl[xdna->dev_info->sram_bar];
534 	ndev->smu_base = tbl[xdna->dev_info->smu_bar];
535 	ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
536 
537 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
538 	if (ret) {
539 		XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
540 		goto release_fw;
541 	}
542 
543 	nvec = pci_msix_vec_count(pdev);
544 	if (nvec <= 0) {
545 		XDNA_ERR(xdna, "does not get number of interrupt vector");
546 		ret = -EINVAL;
547 		goto release_fw;
548 	}
549 
550 	ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
551 	if (ret < 0) {
552 		XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
553 		goto release_fw;
554 	}
555 
556 	psp_conf.fw_size = fw->size;
557 	psp_conf.fw_buf = fw->data;
558 	for (i = 0; i < PSP_MAX_REGS; i++)
559 		psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
560 	ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
561 	if (!ndev->psp_hdl) {
562 		XDNA_ERR(xdna, "failed to create psp");
563 		ret = -ENOMEM;
564 		goto release_fw;
565 	}
566 	xdna->dev_handle = ndev;
567 
568 	ret = aie2_hw_start(xdna);
569 	if (ret) {
570 		XDNA_ERR(xdna, "start npu failed, ret %d", ret);
571 		goto release_fw;
572 	}
573 
574 	xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
575 	for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
576 		xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
577 	xrs_cfg.sys_eff_factor = 1;
578 	xrs_cfg.ddev = &xdna->ddev;
579 	xrs_cfg.actions = &aie2_xrs_actions;
580 	xrs_cfg.total_col = ndev->total_col;
581 
582 	xdna->xrs_hdl = xrsm_init(&xrs_cfg);
583 	if (!xdna->xrs_hdl) {
584 		XDNA_ERR(xdna, "Initialize resolver failed");
585 		ret = -EINVAL;
586 		goto stop_hw;
587 	}
588 
589 	release_firmware(fw);
590 	amdxdna_pm_init(xdna);
591 	return 0;
592 
593 stop_hw:
594 	aie2_hw_stop(xdna);
595 release_fw:
596 	release_firmware(fw);
597 
598 	return ret;
599 }
600 
601 static void aie2_fini(struct amdxdna_dev *xdna)
602 {
603 	amdxdna_pm_fini(xdna);
604 	aie2_hw_stop(xdna);
605 }
606 
607 static int aie2_get_aie_status(struct amdxdna_client *client,
608 			       struct amdxdna_drm_get_info *args)
609 {
610 	struct amdxdna_drm_query_aie_status status;
611 	struct amdxdna_dev *xdna = client->xdna;
612 	struct amdxdna_dev_hdl *ndev;
613 	int ret;
614 
615 	ndev = xdna->dev_handle;
616 	if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
617 		XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
618 		return -EFAULT;
619 	}
620 
621 	if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
622 		XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
623 			 status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
624 		return -EINVAL;
625 	}
626 
627 	ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
628 				status.buffer_size, &status.cols_filled);
629 	if (ret) {
630 		XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
631 		return ret;
632 	}
633 
634 	if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
635 		XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
636 		return -EFAULT;
637 	}
638 
639 	return 0;
640 }
641 
642 static int aie2_get_aie_metadata(struct amdxdna_client *client,
643 				 struct amdxdna_drm_get_info *args)
644 {
645 	struct amdxdna_drm_query_aie_metadata *meta;
646 	struct amdxdna_dev *xdna = client->xdna;
647 	struct amdxdna_dev_hdl *ndev;
648 	int ret = 0;
649 
650 	ndev = xdna->dev_handle;
651 	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
652 	if (!meta)
653 		return -ENOMEM;
654 
655 	meta->col_size = ndev->metadata.size;
656 	meta->cols = ndev->metadata.cols;
657 	meta->rows = ndev->metadata.rows;
658 
659 	meta->version.major = ndev->metadata.version.major;
660 	meta->version.minor = ndev->metadata.version.minor;
661 
662 	meta->core.row_count = ndev->metadata.core.row_count;
663 	meta->core.row_start = ndev->metadata.core.row_start;
664 	meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
665 	meta->core.lock_count = ndev->metadata.core.lock_count;
666 	meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
667 
668 	meta->mem.row_count = ndev->metadata.mem.row_count;
669 	meta->mem.row_start = ndev->metadata.mem.row_start;
670 	meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
671 	meta->mem.lock_count = ndev->metadata.mem.lock_count;
672 	meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
673 
674 	meta->shim.row_count = ndev->metadata.shim.row_count;
675 	meta->shim.row_start = ndev->metadata.shim.row_start;
676 	meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
677 	meta->shim.lock_count = ndev->metadata.shim.lock_count;
678 	meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
679 
680 	if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
681 		ret = -EFAULT;
682 
683 	kfree(meta);
684 	return ret;
685 }
686 
687 static int aie2_get_aie_version(struct amdxdna_client *client,
688 				struct amdxdna_drm_get_info *args)
689 {
690 	struct amdxdna_drm_query_aie_version version;
691 	struct amdxdna_dev *xdna = client->xdna;
692 	struct amdxdna_dev_hdl *ndev;
693 
694 	ndev = xdna->dev_handle;
695 	version.major = ndev->version.major;
696 	version.minor = ndev->version.minor;
697 
698 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
699 		return -EFAULT;
700 
701 	return 0;
702 }
703 
704 static int aie2_get_firmware_version(struct amdxdna_client *client,
705 				     struct amdxdna_drm_get_info *args)
706 {
707 	struct amdxdna_drm_query_firmware_version version;
708 	struct amdxdna_dev *xdna = client->xdna;
709 
710 	version.major = xdna->fw_ver.major;
711 	version.minor = xdna->fw_ver.minor;
712 	version.patch = xdna->fw_ver.sub;
713 	version.build = xdna->fw_ver.build;
714 
715 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
716 		return -EFAULT;
717 
718 	return 0;
719 }
720 
721 static int aie2_get_power_mode(struct amdxdna_client *client,
722 			       struct amdxdna_drm_get_info *args)
723 {
724 	struct amdxdna_drm_get_power_mode mode = {};
725 	struct amdxdna_dev *xdna = client->xdna;
726 	struct amdxdna_dev_hdl *ndev;
727 
728 	ndev = xdna->dev_handle;
729 	mode.power_mode = ndev->pw_mode;
730 
731 	if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
732 		return -EFAULT;
733 
734 	return 0;
735 }
736 
737 static int aie2_get_clock_metadata(struct amdxdna_client *client,
738 				   struct amdxdna_drm_get_info *args)
739 {
740 	struct amdxdna_drm_query_clock_metadata *clock;
741 	struct amdxdna_dev *xdna = client->xdna;
742 	struct amdxdna_dev_hdl *ndev;
743 	int ret = 0;
744 
745 	ndev = xdna->dev_handle;
746 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
747 	if (!clock)
748 		return -ENOMEM;
749 
750 	snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
751 		 "MP-NPU Clock");
752 	clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
753 	snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
754 	clock->h_clock.freq_mhz = ndev->hclk_freq;
755 
756 	if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
757 		ret = -EFAULT;
758 
759 	kfree(clock);
760 	return ret;
761 }
762 
763 static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
764 {
765 	struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
766 	struct amdxdna_drm_get_array *array_args = arg;
767 	struct amdxdna_drm_hwctx_entry __user *buf;
768 	u32 size;
769 
770 	if (!array_args->num_element)
771 		return -EINVAL;
772 
773 	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
774 	if (!tmp)
775 		return -ENOMEM;
776 
777 	tmp->pid = hwctx->client->pid;
778 	tmp->context_id = hwctx->id;
779 	tmp->start_col = hwctx->start_col;
780 	tmp->num_col = hwctx->num_col;
781 	tmp->command_submissions = hwctx->priv->seq;
782 	tmp->command_completions = hwctx->priv->completed;
783 	tmp->pasid = hwctx->client->pasid;
784 	tmp->priority = hwctx->qos.priority;
785 	tmp->gops = hwctx->qos.gops;
786 	tmp->fps = hwctx->qos.fps;
787 	tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
788 	tmp->latency = hwctx->qos.latency;
789 	tmp->frame_exec_time = hwctx->qos.frame_exec_time;
790 	tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
791 
792 	buf = u64_to_user_ptr(array_args->buffer);
793 	size = min(sizeof(*tmp), array_args->element_size);
794 
795 	if (copy_to_user(buf, tmp, size))
796 		return -EFAULT;
797 
798 	array_args->buffer += size;
799 	array_args->num_element--;
800 
801 	return 0;
802 }
803 
804 static int aie2_get_hwctx_status(struct amdxdna_client *client,
805 				 struct amdxdna_drm_get_info *args)
806 {
807 	struct amdxdna_drm_get_array array_args;
808 	struct amdxdna_dev *xdna = client->xdna;
809 	struct amdxdna_client *tmp_client;
810 	int ret;
811 
812 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
813 
814 	array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
815 	array_args.buffer = args->buffer;
816 	array_args.num_element = args->buffer_size / array_args.element_size;
817 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
818 		ret = amdxdna_hwctx_walk(tmp_client, &array_args,
819 					 aie2_hwctx_status_cb);
820 		if (ret)
821 			break;
822 	}
823 
824 	args->buffer_size -= (u32)(array_args.buffer - args->buffer);
825 	return 0;
826 }
827 
828 static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
829 {
830 	struct amdxdna_dev *xdna = client->xdna;
831 	int ret, idx;
832 
833 	if (!drm_dev_enter(&xdna->ddev, &idx))
834 		return -ENODEV;
835 
836 	ret = amdxdna_pm_resume_get(xdna);
837 	if (ret)
838 		goto dev_exit;
839 
840 	switch (args->param) {
841 	case DRM_AMDXDNA_QUERY_AIE_STATUS:
842 		ret = aie2_get_aie_status(client, args);
843 		break;
844 	case DRM_AMDXDNA_QUERY_AIE_METADATA:
845 		ret = aie2_get_aie_metadata(client, args);
846 		break;
847 	case DRM_AMDXDNA_QUERY_AIE_VERSION:
848 		ret = aie2_get_aie_version(client, args);
849 		break;
850 	case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
851 		ret = aie2_get_clock_metadata(client, args);
852 		break;
853 	case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
854 		ret = aie2_get_hwctx_status(client, args);
855 		break;
856 	case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
857 		ret = aie2_get_firmware_version(client, args);
858 		break;
859 	case DRM_AMDXDNA_GET_POWER_MODE:
860 		ret = aie2_get_power_mode(client, args);
861 		break;
862 	default:
863 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
864 		ret = -EOPNOTSUPP;
865 	}
866 
867 	amdxdna_pm_suspend_put(xdna);
868 	XDNA_DBG(xdna, "Got param %d", args->param);
869 
870 dev_exit:
871 	drm_dev_exit(idx);
872 	return ret;
873 }
874 
875 static int aie2_query_ctx_status_array(struct amdxdna_client *client,
876 				       struct amdxdna_drm_get_array *args)
877 {
878 	struct amdxdna_drm_get_array array_args;
879 	struct amdxdna_dev *xdna = client->xdna;
880 	struct amdxdna_client *tmp_client;
881 	int ret;
882 
883 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
884 
885 	if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
886 		XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
887 			 args->element_size, args->num_element);
888 		return -EINVAL;
889 	}
890 
891 	array_args.element_size = min(args->element_size,
892 				      sizeof(struct amdxdna_drm_hwctx_entry));
893 	array_args.buffer = args->buffer;
894 	array_args.num_element = args->num_element * args->element_size /
895 				array_args.element_size;
896 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
897 		ret = amdxdna_hwctx_walk(tmp_client, &array_args,
898 					 aie2_hwctx_status_cb);
899 		if (ret)
900 			break;
901 	}
902 
903 	args->element_size = array_args.element_size;
904 	args->num_element = (u32)((array_args.buffer - args->buffer) /
905 				  args->element_size);
906 
907 	return 0;
908 }
909 
910 static int aie2_get_array(struct amdxdna_client *client,
911 			  struct amdxdna_drm_get_array *args)
912 {
913 	struct amdxdna_dev *xdna = client->xdna;
914 	int ret, idx;
915 
916 	if (!drm_dev_enter(&xdna->ddev, &idx))
917 		return -ENODEV;
918 
919 	ret = amdxdna_pm_resume_get(xdna);
920 	if (ret)
921 		goto dev_exit;
922 
923 	switch (args->param) {
924 	case DRM_AMDXDNA_HW_CONTEXT_ALL:
925 		ret = aie2_query_ctx_status_array(client, args);
926 		break;
927 	case DRM_AMDXDNA_HW_LAST_ASYNC_ERR:
928 		ret = aie2_get_array_async_error(xdna->dev_handle, args);
929 		break;
930 	default:
931 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
932 		ret = -EOPNOTSUPP;
933 	}
934 
935 	amdxdna_pm_suspend_put(xdna);
936 	XDNA_DBG(xdna, "Got param %d", args->param);
937 
938 dev_exit:
939 	drm_dev_exit(idx);
940 	return ret;
941 }
942 
943 static int aie2_set_power_mode(struct amdxdna_client *client,
944 			       struct amdxdna_drm_set_state *args)
945 {
946 	struct amdxdna_drm_set_power_mode power_state;
947 	enum amdxdna_power_mode_type power_mode;
948 	struct amdxdna_dev *xdna = client->xdna;
949 
950 	if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
951 			   sizeof(power_state))) {
952 		XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
953 		return -EFAULT;
954 	}
955 
956 	if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
957 		return -EINVAL;
958 
959 	power_mode = power_state.power_mode;
960 	if (power_mode > POWER_MODE_TURBO) {
961 		XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
962 		return -EINVAL;
963 	}
964 
965 	return aie2_pm_set_mode(xdna->dev_handle, power_mode);
966 }
967 
968 static int aie2_set_state(struct amdxdna_client *client,
969 			  struct amdxdna_drm_set_state *args)
970 {
971 	struct amdxdna_dev *xdna = client->xdna;
972 	int ret, idx;
973 
974 	if (!drm_dev_enter(&xdna->ddev, &idx))
975 		return -ENODEV;
976 
977 	ret = amdxdna_pm_resume_get(xdna);
978 	if (ret)
979 		goto dev_exit;
980 
981 	switch (args->param) {
982 	case DRM_AMDXDNA_SET_POWER_MODE:
983 		ret = aie2_set_power_mode(client, args);
984 		break;
985 	default:
986 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
987 		ret = -EOPNOTSUPP;
988 		break;
989 	}
990 
991 	amdxdna_pm_suspend_put(xdna);
992 dev_exit:
993 	drm_dev_exit(idx);
994 	return ret;
995 }
996 
997 const struct amdxdna_dev_ops aie2_ops = {
998 	.init = aie2_init,
999 	.fini = aie2_fini,
1000 	.resume = aie2_hw_resume,
1001 	.suspend = aie2_hw_suspend,
1002 	.get_aie_info = aie2_get_info,
1003 	.set_aie_state = aie2_set_state,
1004 	.hwctx_init = aie2_hwctx_init,
1005 	.hwctx_fini = aie2_hwctx_fini,
1006 	.hwctx_config = aie2_hwctx_config,
1007 	.hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo,
1008 	.cmd_submit = aie2_cmd_submit,
1009 	.hmm_invalidate = aie2_hmm_invalidate,
1010 	.get_array = aie2_get_array,
1011 };
1012