xref: /linux/drivers/accel/amdxdna/aie2_pci.c (revision dfb31428444b00824b161d8c0741d4868552813a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <drm/gpu_scheduler.h>
13 #include <linux/cleanup.h>
14 #include <linux/errno.h>
15 #include <linux/firmware.h>
16 #include <linux/iommu.h>
17 #include <linux/iopoll.h>
18 #include <linux/pci.h>
19 #include <linux/xarray.h>
20 #include <asm/hypervisor.h>
21 
22 #include "aie2_msg_priv.h"
23 #include "aie2_pci.h"
24 #include "aie2_solver.h"
25 #include "amdxdna_ctx.h"
26 #include "amdxdna_gem.h"
27 #include "amdxdna_mailbox.h"
28 #include "amdxdna_pci_drv.h"
29 #include "amdxdna_pm.h"
30 
31 static int aie2_max_col = XRS_MAX_COL;
32 module_param(aie2_max_col, uint, 0600);
33 MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
34 
35 static char *npu_fw[] = {
36 	"npu_7.sbin",
37 	"npu.sbin"
38 };
39 
40 /*
41  * The management mailbox channel is allocated by firmware.
42  * The related register and ring buffer information is on SRAM BAR.
43  * This struct is the register layout.
44  */
45 #define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
46 struct mgmt_mbox_chann_info {
47 	__u32	x2i_tail;
48 	__u32	x2i_head;
49 	__u32	x2i_buf;
50 	__u32	x2i_buf_sz;
51 	__u32	i2x_tail;
52 	__u32	i2x_head;
53 	__u32	i2x_buf;
54 	__u32	i2x_buf_sz;
55 	__u32	magic;
56 	__u32	msi_id;
57 	__u32	prot_major;
58 	__u32	prot_minor;
59 	__u32	rsvd[4];
60 };
61 
aie2_check_protocol(struct amdxdna_dev_hdl * ndev,u32 fw_major,u32 fw_minor)62 static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
63 {
64 	const struct aie2_fw_feature_tbl *feature;
65 	bool found = false;
66 
67 	for (feature = ndev->priv->fw_feature_tbl; feature->major; feature++) {
68 		if (feature->major != fw_major)
69 			continue;
70 		if (fw_minor < feature->min_minor)
71 			continue;
72 		if (feature->max_minor > 0 && fw_minor > feature->max_minor)
73 			continue;
74 
75 		ndev->feature_mask |= feature->features;
76 
77 		/* firmware version matches one of the driver support entry */
78 		found = true;
79 	}
80 
81 	return found ? 0 : -EOPNOTSUPP;
82 }
83 
aie2_dump_chann_info_debug(struct amdxdna_dev_hdl * ndev)84 static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
85 {
86 	struct amdxdna_dev *xdna = ndev->xdna;
87 
88 	XDNA_DBG(xdna, "i2x tail    0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
89 	XDNA_DBG(xdna, "i2x head    0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
90 	XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
91 	XDNA_DBG(xdna, "i2x rsize   0x%x", ndev->mgmt_i2x.rb_size);
92 	XDNA_DBG(xdna, "x2i tail    0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
93 	XDNA_DBG(xdna, "x2i head    0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
94 	XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
95 	XDNA_DBG(xdna, "x2i rsize   0x%x", ndev->mgmt_x2i.rb_size);
96 	XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
97 	XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
98 	XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
99 }
100 
aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl * ndev)101 static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
102 {
103 	struct mgmt_mbox_chann_info info_regs;
104 	struct xdna_mailbox_chann_res *i2x;
105 	struct xdna_mailbox_chann_res *x2i;
106 	u32 addr, off;
107 	u32 *reg;
108 	int ret;
109 	int i;
110 
111 	/*
112 	 * Once firmware is alive, it will write management channel
113 	 * information in SRAM BAR and write the address of that information
114 	 * at FW_ALIVE_OFF offset in SRMA BAR.
115 	 *
116 	 * Read a non-zero value from FW_ALIVE_OFF implies that firmware
117 	 * is alive.
118 	 */
119 	ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
120 				 addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
121 	if (ret || !addr)
122 		return -ETIME;
123 
124 	off = AIE2_SRAM_OFF(ndev, addr);
125 	reg = (u32 *)&info_regs;
126 	for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
127 		reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
128 
129 	if (info_regs.magic != MGMT_MBOX_MAGIC) {
130 		XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
131 		ret = -EINVAL;
132 		goto done;
133 	}
134 
135 	i2x = &ndev->mgmt_i2x;
136 	x2i = &ndev->mgmt_x2i;
137 
138 	i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
139 	i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
140 	i2x->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
141 	i2x->rb_size         = info_regs.i2x_buf_sz;
142 
143 	x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
144 	x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
145 	x2i->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
146 	x2i->rb_size         = info_regs.x2i_buf_sz;
147 
148 	ndev->mgmt_chan_idx  = info_regs.msi_id;
149 	ndev->mgmt_prot_major = info_regs.prot_major;
150 	ndev->mgmt_prot_minor = info_regs.prot_minor;
151 
152 	ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
153 
154 done:
155 	aie2_dump_chann_info_debug(ndev);
156 
157 	/* Must clear address at FW_ALIVE_OFF */
158 	writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
159 
160 	return ret;
161 }
162 
aie2_runtime_cfg(struct amdxdna_dev_hdl * ndev,enum rt_config_category category,u32 * val)163 int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
164 		     enum rt_config_category category, u32 *val)
165 {
166 	const struct rt_config *cfg;
167 	u32 value;
168 	int ret;
169 
170 	for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
171 		if (cfg->category != category)
172 			continue;
173 
174 		if (cfg->feature_mask &&
175 		    bitmap_subset(&cfg->feature_mask, &ndev->feature_mask, AIE2_FEATURE_MAX))
176 			continue;
177 
178 		value = val ? *val : cfg->value;
179 		ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
180 		if (ret) {
181 			XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
182 				 cfg->type, value);
183 			return ret;
184 		}
185 	}
186 
187 	return 0;
188 }
189 
aie2_xdna_reset(struct amdxdna_dev_hdl * ndev)190 static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
191 {
192 	int ret;
193 
194 	ret = aie2_suspend_fw(ndev);
195 	if (ret) {
196 		XDNA_ERR(ndev->xdna, "Suspend firmware failed");
197 		return ret;
198 	}
199 
200 	ret = aie2_resume_fw(ndev);
201 	if (ret) {
202 		XDNA_ERR(ndev->xdna, "Resume firmware failed");
203 		return ret;
204 	}
205 
206 	return 0;
207 }
208 
aie2_mgmt_fw_init(struct amdxdna_dev_hdl * ndev)209 static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
210 {
211 	int ret;
212 
213 	ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
214 	if (ret) {
215 		XDNA_ERR(ndev->xdna, "Runtime config failed");
216 		return ret;
217 	}
218 
219 	ret = aie2_assign_mgmt_pasid(ndev, 0);
220 	if (ret) {
221 		XDNA_ERR(ndev->xdna, "Can not assign PASID");
222 		return ret;
223 	}
224 
225 	ret = aie2_xdna_reset(ndev);
226 	if (ret) {
227 		XDNA_ERR(ndev->xdna, "Reset firmware failed");
228 		return ret;
229 	}
230 
231 	return 0;
232 }
233 
aie2_mgmt_fw_query(struct amdxdna_dev_hdl * ndev)234 static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
235 {
236 	int ret;
237 
238 	ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
239 	if (ret) {
240 		XDNA_ERR(ndev->xdna, "query firmware version failed");
241 		return ret;
242 	}
243 
244 	ret = aie2_query_aie_version(ndev, &ndev->version);
245 	if (ret) {
246 		XDNA_ERR(ndev->xdna, "Query AIE version failed");
247 		return ret;
248 	}
249 
250 	ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
251 	if (ret) {
252 		XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
253 		return ret;
254 	}
255 
256 	ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
257 
258 	return 0;
259 }
260 
aie2_mgmt_fw_fini(struct amdxdna_dev_hdl * ndev)261 static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
262 {
263 	if (aie2_suspend_fw(ndev))
264 		XDNA_ERR(ndev->xdna, "Suspend_fw failed");
265 	XDNA_DBG(ndev->xdna, "Firmware suspended");
266 }
267 
aie2_xrs_load(void * cb_arg,struct xrs_action_load * action)268 static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
269 {
270 	struct amdxdna_hwctx *hwctx = cb_arg;
271 	struct amdxdna_dev *xdna;
272 	int ret;
273 
274 	xdna = hwctx->client->xdna;
275 
276 	hwctx->start_col = action->part.start_col;
277 	hwctx->num_col = action->part.ncols;
278 	ret = aie2_create_context(xdna->dev_handle, hwctx);
279 	if (ret)
280 		XDNA_ERR(xdna, "create context failed, ret %d", ret);
281 
282 	return ret;
283 }
284 
aie2_xrs_unload(void * cb_arg)285 static int aie2_xrs_unload(void *cb_arg)
286 {
287 	struct amdxdna_hwctx *hwctx = cb_arg;
288 	struct amdxdna_dev *xdna;
289 	int ret;
290 
291 	xdna = hwctx->client->xdna;
292 
293 	ret = aie2_destroy_context(xdna->dev_handle, hwctx);
294 	if (ret)
295 		XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
296 
297 	return ret;
298 }
299 
aie2_xrs_set_dft_dpm_level(struct drm_device * ddev,u32 dpm_level)300 static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
301 {
302 	struct amdxdna_dev *xdna = to_xdna_dev(ddev);
303 	struct amdxdna_dev_hdl *ndev;
304 
305 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
306 
307 	ndev = xdna->dev_handle;
308 	ndev->dft_dpm_level = dpm_level;
309 	if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
310 		return 0;
311 
312 	return aie2_pm_set_dpm(ndev, dpm_level);
313 }
314 
315 static struct xrs_action_ops aie2_xrs_actions = {
316 	.load = aie2_xrs_load,
317 	.unload = aie2_xrs_unload,
318 	.set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
319 };
320 
aie2_hw_stop(struct amdxdna_dev * xdna)321 static void aie2_hw_stop(struct amdxdna_dev *xdna)
322 {
323 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
324 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
325 
326 	if (ndev->dev_status <= AIE2_DEV_INIT) {
327 		XDNA_ERR(xdna, "device is already stopped");
328 		return;
329 	}
330 
331 	aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, NULL);
332 	aie2_mgmt_fw_fini(ndev);
333 	aie2_destroy_mgmt_chann(ndev);
334 	drmm_kfree(&xdna->ddev, ndev->mbox);
335 	ndev->mbox = NULL;
336 	aie2_psp_stop(ndev->psp_hdl);
337 	aie2_smu_fini(ndev);
338 	aie2_error_async_events_free(ndev);
339 	pci_disable_device(pdev);
340 
341 	ndev->dev_status = AIE2_DEV_INIT;
342 }
343 
aie2_hw_start(struct amdxdna_dev * xdna)344 static int aie2_hw_start(struct amdxdna_dev *xdna)
345 {
346 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
347 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
348 	struct xdna_mailbox_res mbox_res;
349 	u32 xdna_mailbox_intr_reg;
350 	int mgmt_mb_irq, ret;
351 
352 	if (ndev->dev_status >= AIE2_DEV_START) {
353 		XDNA_INFO(xdna, "device is already started");
354 		return 0;
355 	}
356 
357 	ret = pci_enable_device(pdev);
358 	if (ret) {
359 		XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
360 		return ret;
361 	}
362 	pci_set_master(pdev);
363 
364 	mbox_res.ringbuf_base = ndev->sram_base;
365 	mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
366 	mbox_res.mbox_base = ndev->mbox_base;
367 	mbox_res.mbox_size = MBOX_SIZE(ndev);
368 	mbox_res.name = "xdna_mailbox";
369 	ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
370 	if (!ndev->mbox) {
371 		XDNA_ERR(xdna, "failed to create mailbox device");
372 		ret = -ENODEV;
373 		goto disable_dev;
374 	}
375 
376 	ndev->mgmt_chann = xdna_mailbox_alloc_channel(ndev->mbox);
377 	if (!ndev->mgmt_chann) {
378 		XDNA_ERR(xdna, "failed to alloc channel");
379 		ret = -ENODEV;
380 		goto disable_dev;
381 	}
382 
383 	ret = aie2_smu_init(ndev);
384 	if (ret) {
385 		XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
386 		goto free_channel;
387 	}
388 
389 	ret = aie2_psp_start(ndev->psp_hdl);
390 	if (ret) {
391 		XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
392 		goto fini_smu;
393 	}
394 
395 	ret = aie2_get_mgmt_chann_info(ndev);
396 	if (ret) {
397 		XDNA_ERR(xdna, "firmware is not alive");
398 		goto stop_psp;
399 	}
400 
401 	mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
402 	if (mgmt_mb_irq < 0) {
403 		ret = mgmt_mb_irq;
404 		XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
405 		goto stop_psp;
406 	}
407 
408 	xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
409 	ret = xdna_mailbox_start_channel(ndev->mgmt_chann,
410 					 &ndev->mgmt_x2i,
411 					 &ndev->mgmt_i2x,
412 					 xdna_mailbox_intr_reg,
413 					 mgmt_mb_irq);
414 	if (ret) {
415 		XDNA_ERR(xdna, "failed to start management mailbox channel");
416 		ret = -EINVAL;
417 		goto stop_psp;
418 	}
419 
420 	ret = aie2_mgmt_fw_init(ndev);
421 	if (ret) {
422 		XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
423 		goto stop_fw;
424 	}
425 
426 	ret = aie2_pm_init(ndev);
427 	if (ret) {
428 		XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
429 		goto stop_fw;
430 	}
431 
432 	ret = aie2_mgmt_fw_query(ndev);
433 	if (ret) {
434 		XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
435 		goto stop_fw;
436 	}
437 
438 	ret = aie2_error_async_events_alloc(ndev);
439 	if (ret) {
440 		XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
441 		goto stop_fw;
442 	}
443 
444 	ndev->dev_status = AIE2_DEV_START;
445 
446 	return 0;
447 
448 stop_fw:
449 	aie2_suspend_fw(ndev);
450 	xdna_mailbox_stop_channel(ndev->mgmt_chann);
451 stop_psp:
452 	aie2_psp_stop(ndev->psp_hdl);
453 fini_smu:
454 	aie2_smu_fini(ndev);
455 free_channel:
456 	xdna_mailbox_free_channel(ndev->mgmt_chann);
457 	ndev->mgmt_chann = NULL;
458 disable_dev:
459 	pci_disable_device(pdev);
460 
461 	return ret;
462 }
463 
aie2_hw_suspend(struct amdxdna_dev * xdna)464 static int aie2_hw_suspend(struct amdxdna_dev *xdna)
465 {
466 	struct amdxdna_client *client;
467 
468 	list_for_each_entry(client, &xdna->client_list, node)
469 		aie2_hwctx_suspend(client);
470 
471 	aie2_hw_stop(xdna);
472 
473 	return 0;
474 }
475 
aie2_hw_resume(struct amdxdna_dev * xdna)476 static int aie2_hw_resume(struct amdxdna_dev *xdna)
477 {
478 	struct amdxdna_client *client;
479 	int ret;
480 
481 	ret = aie2_hw_start(xdna);
482 	if (ret) {
483 		XDNA_ERR(xdna, "Start hardware failed, %d", ret);
484 		return ret;
485 	}
486 
487 	list_for_each_entry(client, &xdna->client_list, node) {
488 		ret = aie2_hwctx_resume(client);
489 		if (ret)
490 			break;
491 	}
492 
493 	return ret;
494 }
495 
aie2_init(struct amdxdna_dev * xdna)496 static int aie2_init(struct amdxdna_dev *xdna)
497 {
498 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
499 	void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
500 	struct init_config xrs_cfg = { 0 };
501 	struct amdxdna_dev_hdl *ndev;
502 	struct psp_config psp_conf;
503 	const struct firmware *fw;
504 	unsigned long bars = 0;
505 	char *fw_full_path;
506 	int i, nvec, ret;
507 
508 	if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
509 		XDNA_ERR(xdna, "Running under hypervisor not supported");
510 		return -EINVAL;
511 	}
512 
513 	ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
514 	if (!ndev)
515 		return -ENOMEM;
516 
517 	ndev->priv = xdna->dev_info->dev_priv;
518 	ndev->xdna = xdna;
519 
520 	for (i = 0; i < ARRAY_SIZE(npu_fw); i++) {
521 		fw_full_path = kasprintf(GFP_KERNEL, "%s%s", ndev->priv->fw_path, npu_fw[i]);
522 		if (!fw_full_path)
523 			return -ENOMEM;
524 
525 		ret = firmware_request_nowarn(&fw, fw_full_path, &pdev->dev);
526 		kfree(fw_full_path);
527 		if (!ret) {
528 			XDNA_INFO(xdna, "Load firmware %s%s", ndev->priv->fw_path, npu_fw[i]);
529 			break;
530 		}
531 	}
532 
533 	if (ret) {
534 		XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
535 			 ndev->priv->fw_path, ret);
536 		return ret;
537 	}
538 
539 	ret = pcim_enable_device(pdev);
540 	if (ret) {
541 		XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
542 		goto release_fw;
543 	}
544 
545 	for (i = 0; i < PSP_MAX_REGS; i++)
546 		set_bit(PSP_REG_BAR(ndev, i), &bars);
547 
548 	set_bit(xdna->dev_info->sram_bar, &bars);
549 	set_bit(xdna->dev_info->smu_bar, &bars);
550 	set_bit(xdna->dev_info->mbox_bar, &bars);
551 
552 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
553 		if (!test_bit(i, &bars))
554 			continue;
555 		tbl[i] = pcim_iomap(pdev, i, 0);
556 		if (!tbl[i]) {
557 			XDNA_ERR(xdna, "map bar %d failed", i);
558 			ret = -ENOMEM;
559 			goto release_fw;
560 		}
561 	}
562 
563 	ndev->sram_base = tbl[xdna->dev_info->sram_bar];
564 	ndev->smu_base = tbl[xdna->dev_info->smu_bar];
565 	ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
566 
567 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
568 	if (ret) {
569 		XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
570 		goto release_fw;
571 	}
572 
573 	nvec = pci_msix_vec_count(pdev);
574 	if (nvec <= 0) {
575 		XDNA_ERR(xdna, "does not get number of interrupt vector");
576 		ret = -EINVAL;
577 		goto release_fw;
578 	}
579 
580 	ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
581 	if (ret < 0) {
582 		XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
583 		goto release_fw;
584 	}
585 
586 	psp_conf.fw_size = fw->size;
587 	psp_conf.fw_buf = fw->data;
588 	for (i = 0; i < PSP_MAX_REGS; i++)
589 		psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
590 	ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
591 	if (!ndev->psp_hdl) {
592 		XDNA_ERR(xdna, "failed to create psp");
593 		ret = -ENOMEM;
594 		goto release_fw;
595 	}
596 	xdna->dev_handle = ndev;
597 
598 	ret = aie2_hw_start(xdna);
599 	if (ret) {
600 		XDNA_ERR(xdna, "start npu failed, ret %d", ret);
601 		goto release_fw;
602 	}
603 
604 	xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
605 	for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
606 		xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
607 	xrs_cfg.sys_eff_factor = 1;
608 	xrs_cfg.ddev = &xdna->ddev;
609 	xrs_cfg.actions = &aie2_xrs_actions;
610 	xrs_cfg.total_col = ndev->total_col;
611 
612 	xdna->xrs_hdl = xrsm_init(&xrs_cfg);
613 	if (!xdna->xrs_hdl) {
614 		XDNA_ERR(xdna, "Initialize resolver failed");
615 		ret = -EINVAL;
616 		goto stop_hw;
617 	}
618 
619 	release_firmware(fw);
620 	aie2_msg_init(ndev);
621 	amdxdna_pm_init(xdna);
622 	return 0;
623 
624 stop_hw:
625 	aie2_hw_stop(xdna);
626 release_fw:
627 	release_firmware(fw);
628 
629 	return ret;
630 }
631 
aie2_fini(struct amdxdna_dev * xdna)632 static void aie2_fini(struct amdxdna_dev *xdna)
633 {
634 	amdxdna_pm_fini(xdna);
635 	aie2_hw_stop(xdna);
636 }
637 
aie2_get_aie_status(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)638 static int aie2_get_aie_status(struct amdxdna_client *client,
639 			       struct amdxdna_drm_get_info *args)
640 {
641 	struct amdxdna_drm_query_aie_status status;
642 	struct amdxdna_dev *xdna = client->xdna;
643 	struct amdxdna_dev_hdl *ndev;
644 	int ret;
645 
646 	ndev = xdna->dev_handle;
647 	if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
648 		XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
649 		return -EFAULT;
650 	}
651 
652 	if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
653 		XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
654 			 status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
655 		return -EINVAL;
656 	}
657 
658 	ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
659 				status.buffer_size, &status.cols_filled);
660 	if (ret) {
661 		XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
662 		return ret;
663 	}
664 
665 	if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
666 		XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
667 		return -EFAULT;
668 	}
669 
670 	return 0;
671 }
672 
aie2_get_aie_metadata(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)673 static int aie2_get_aie_metadata(struct amdxdna_client *client,
674 				 struct amdxdna_drm_get_info *args)
675 {
676 	struct amdxdna_drm_query_aie_metadata *meta;
677 	struct amdxdna_dev *xdna = client->xdna;
678 	struct amdxdna_dev_hdl *ndev;
679 	int ret = 0;
680 
681 	ndev = xdna->dev_handle;
682 	meta = kzalloc_obj(*meta);
683 	if (!meta)
684 		return -ENOMEM;
685 
686 	meta->col_size = ndev->metadata.size;
687 	meta->cols = ndev->metadata.cols;
688 	meta->rows = ndev->metadata.rows;
689 
690 	meta->version.major = ndev->metadata.version.major;
691 	meta->version.minor = ndev->metadata.version.minor;
692 
693 	meta->core.row_count = ndev->metadata.core.row_count;
694 	meta->core.row_start = ndev->metadata.core.row_start;
695 	meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
696 	meta->core.lock_count = ndev->metadata.core.lock_count;
697 	meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
698 
699 	meta->mem.row_count = ndev->metadata.mem.row_count;
700 	meta->mem.row_start = ndev->metadata.mem.row_start;
701 	meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
702 	meta->mem.lock_count = ndev->metadata.mem.lock_count;
703 	meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
704 
705 	meta->shim.row_count = ndev->metadata.shim.row_count;
706 	meta->shim.row_start = ndev->metadata.shim.row_start;
707 	meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
708 	meta->shim.lock_count = ndev->metadata.shim.lock_count;
709 	meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
710 
711 	if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
712 		ret = -EFAULT;
713 
714 	kfree(meta);
715 	return ret;
716 }
717 
aie2_get_aie_version(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)718 static int aie2_get_aie_version(struct amdxdna_client *client,
719 				struct amdxdna_drm_get_info *args)
720 {
721 	struct amdxdna_drm_query_aie_version version;
722 	struct amdxdna_dev *xdna = client->xdna;
723 	struct amdxdna_dev_hdl *ndev;
724 
725 	ndev = xdna->dev_handle;
726 	version.major = ndev->version.major;
727 	version.minor = ndev->version.minor;
728 
729 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
730 		return -EFAULT;
731 
732 	return 0;
733 }
734 
aie2_get_firmware_version(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)735 static int aie2_get_firmware_version(struct amdxdna_client *client,
736 				     struct amdxdna_drm_get_info *args)
737 {
738 	struct amdxdna_drm_query_firmware_version version;
739 	struct amdxdna_dev *xdna = client->xdna;
740 
741 	version.major = xdna->fw_ver.major;
742 	version.minor = xdna->fw_ver.minor;
743 	version.patch = xdna->fw_ver.sub;
744 	version.build = xdna->fw_ver.build;
745 
746 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
747 		return -EFAULT;
748 
749 	return 0;
750 }
751 
aie2_get_power_mode(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)752 static int aie2_get_power_mode(struct amdxdna_client *client,
753 			       struct amdxdna_drm_get_info *args)
754 {
755 	struct amdxdna_drm_get_power_mode mode = {};
756 	struct amdxdna_dev *xdna = client->xdna;
757 	struct amdxdna_dev_hdl *ndev;
758 
759 	ndev = xdna->dev_handle;
760 	mode.power_mode = ndev->pw_mode;
761 
762 	if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
763 		return -EFAULT;
764 
765 	return 0;
766 }
767 
aie2_get_clock_metadata(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)768 static int aie2_get_clock_metadata(struct amdxdna_client *client,
769 				   struct amdxdna_drm_get_info *args)
770 {
771 	struct amdxdna_drm_query_clock_metadata *clock;
772 	struct amdxdna_dev *xdna = client->xdna;
773 	struct amdxdna_dev_hdl *ndev;
774 	int ret = 0;
775 
776 	ndev = xdna->dev_handle;
777 	clock = kzalloc_obj(*clock);
778 	if (!clock)
779 		return -ENOMEM;
780 
781 	snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
782 		 "MP-NPU Clock");
783 	clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
784 	snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
785 	clock->h_clock.freq_mhz = ndev->hclk_freq;
786 
787 	if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
788 		ret = -EFAULT;
789 
790 	kfree(clock);
791 	return ret;
792 }
793 
aie2_hwctx_status_cb(struct amdxdna_hwctx * hwctx,void * arg)794 static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
795 {
796 	struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
797 	struct amdxdna_drm_get_array *array_args = arg;
798 	struct amdxdna_drm_hwctx_entry __user *buf;
799 	u32 size;
800 
801 	if (!array_args->num_element)
802 		return -EINVAL;
803 
804 	tmp = kzalloc_obj(*tmp);
805 	if (!tmp)
806 		return -ENOMEM;
807 
808 	tmp->pid = hwctx->client->pid;
809 	tmp->context_id = hwctx->id;
810 	tmp->start_col = hwctx->start_col;
811 	tmp->num_col = hwctx->num_col;
812 	tmp->command_submissions = hwctx->priv->seq;
813 	tmp->command_completions = hwctx->priv->completed;
814 	tmp->pasid = hwctx->client->pasid;
815 	tmp->priority = hwctx->qos.priority;
816 	tmp->gops = hwctx->qos.gops;
817 	tmp->fps = hwctx->qos.fps;
818 	tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
819 	tmp->latency = hwctx->qos.latency;
820 	tmp->frame_exec_time = hwctx->qos.frame_exec_time;
821 	tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
822 
823 	buf = u64_to_user_ptr(array_args->buffer);
824 	size = min(sizeof(*tmp), array_args->element_size);
825 
826 	if (copy_to_user(buf, tmp, size))
827 		return -EFAULT;
828 
829 	array_args->buffer += size;
830 	array_args->num_element--;
831 
832 	return 0;
833 }
834 
aie2_get_hwctx_status(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)835 static int aie2_get_hwctx_status(struct amdxdna_client *client,
836 				 struct amdxdna_drm_get_info *args)
837 {
838 	struct amdxdna_drm_get_array array_args;
839 	struct amdxdna_dev *xdna = client->xdna;
840 	struct amdxdna_client *tmp_client;
841 	int ret;
842 
843 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
844 
845 	array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
846 	array_args.buffer = args->buffer;
847 	array_args.num_element = args->buffer_size / array_args.element_size;
848 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
849 		ret = amdxdna_hwctx_walk(tmp_client, &array_args,
850 					 aie2_hwctx_status_cb);
851 		if (ret)
852 			break;
853 	}
854 
855 	args->buffer_size -= (u32)(array_args.buffer - args->buffer);
856 	return 0;
857 }
858 
aie2_query_resource_info(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)859 static int aie2_query_resource_info(struct amdxdna_client *client,
860 				    struct amdxdna_drm_get_info *args)
861 {
862 	struct amdxdna_drm_get_resource_info res_info;
863 	const struct amdxdna_dev_priv *priv;
864 	struct amdxdna_dev_hdl *ndev;
865 	struct amdxdna_dev *xdna;
866 
867 	xdna = client->xdna;
868 	ndev = xdna->dev_handle;
869 	priv = ndev->priv;
870 
871 	res_info.npu_clk_max = priv->dpm_clk_tbl[ndev->max_dpm_level].hclk;
872 	res_info.npu_tops_max = ndev->max_tops;
873 	res_info.npu_task_max = priv->hwctx_limit;
874 	res_info.npu_tops_curr = ndev->curr_tops;
875 	res_info.npu_task_curr = ndev->hwctx_num;
876 
877 	if (copy_to_user(u64_to_user_ptr(args->buffer), &res_info, sizeof(res_info)))
878 		return -EFAULT;
879 
880 	return 0;
881 }
882 
aie2_fill_hwctx_map(struct amdxdna_hwctx * hwctx,void * arg)883 static int aie2_fill_hwctx_map(struct amdxdna_hwctx *hwctx, void *arg)
884 {
885 	struct amdxdna_dev *xdna = hwctx->client->xdna;
886 	u32 *map = arg;
887 
888 	if (hwctx->fw_ctx_id >= xdna->dev_handle->priv->hwctx_limit) {
889 		XDNA_ERR(xdna, "Invalid fw ctx id %d/%d ", hwctx->fw_ctx_id,
890 			 xdna->dev_handle->priv->hwctx_limit);
891 		return -EINVAL;
892 	}
893 
894 	map[hwctx->fw_ctx_id] = hwctx->id;
895 	return 0;
896 }
897 
aie2_get_telemetry(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)898 static int aie2_get_telemetry(struct amdxdna_client *client,
899 			      struct amdxdna_drm_get_info *args)
900 {
901 	struct amdxdna_drm_query_telemetry_header *header __free(kfree) = NULL;
902 	u32 telemetry_data_sz, header_sz, elem_num;
903 	struct amdxdna_dev *xdna = client->xdna;
904 	struct amdxdna_client *tmp_client;
905 	int ret;
906 
907 	elem_num = xdna->dev_handle->priv->hwctx_limit;
908 	header_sz = struct_size(header, map, elem_num);
909 	if (args->buffer_size <= header_sz) {
910 		XDNA_ERR(xdna, "Invalid buffer size");
911 		return -EINVAL;
912 	}
913 
914 	telemetry_data_sz = args->buffer_size - header_sz;
915 	if (telemetry_data_sz > SZ_4M) {
916 		XDNA_ERR(xdna, "Buffer size is too big, %d", telemetry_data_sz);
917 		return -EINVAL;
918 	}
919 
920 	header = kzalloc(header_sz, GFP_KERNEL);
921 	if (!header)
922 		return -ENOMEM;
923 
924 	if (copy_from_user(header, u64_to_user_ptr(args->buffer), sizeof(*header))) {
925 		XDNA_ERR(xdna, "Failed to copy telemetry header from user");
926 		return -EFAULT;
927 	}
928 
929 	header->map_num_elements = elem_num;
930 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
931 		ret = amdxdna_hwctx_walk(tmp_client, &header->map,
932 					 aie2_fill_hwctx_map);
933 		if (ret)
934 			return ret;
935 	}
936 
937 	ret = aie2_query_telemetry(xdna->dev_handle,
938 				   u64_to_user_ptr(args->buffer + header_sz),
939 				   telemetry_data_sz, header);
940 	if (ret) {
941 		XDNA_ERR(xdna, "Query telemetry failed ret %d", ret);
942 		return ret;
943 	}
944 
945 	if (copy_to_user(u64_to_user_ptr(args->buffer), header, header_sz)) {
946 		XDNA_ERR(xdna, "Copy header failed");
947 		return -EFAULT;
948 	}
949 
950 	return 0;
951 }
952 
aie2_get_preempt_state(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)953 static int aie2_get_preempt_state(struct amdxdna_client *client,
954 				  struct amdxdna_drm_get_info *args)
955 {
956 	struct amdxdna_drm_attribute_state state = {};
957 	struct amdxdna_dev *xdna = client->xdna;
958 	struct amdxdna_dev_hdl *ndev;
959 
960 	ndev = xdna->dev_handle;
961 	if (args->param == DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE)
962 		state.state = ndev->force_preempt_enabled;
963 	else if (args->param == DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE)
964 		state.state = ndev->frame_boundary_preempt;
965 
966 	if (copy_to_user(u64_to_user_ptr(args->buffer), &state, sizeof(state)))
967 		return -EFAULT;
968 
969 	return 0;
970 }
971 
aie2_get_info(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)972 static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
973 {
974 	struct amdxdna_dev *xdna = client->xdna;
975 	int ret, idx;
976 
977 	if (!drm_dev_enter(&xdna->ddev, &idx))
978 		return -ENODEV;
979 
980 	ret = amdxdna_pm_resume_get_locked(xdna);
981 	if (ret)
982 		goto dev_exit;
983 
984 	switch (args->param) {
985 	case DRM_AMDXDNA_QUERY_AIE_STATUS:
986 		ret = aie2_get_aie_status(client, args);
987 		break;
988 	case DRM_AMDXDNA_QUERY_AIE_METADATA:
989 		ret = aie2_get_aie_metadata(client, args);
990 		break;
991 	case DRM_AMDXDNA_QUERY_AIE_VERSION:
992 		ret = aie2_get_aie_version(client, args);
993 		break;
994 	case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
995 		ret = aie2_get_clock_metadata(client, args);
996 		break;
997 	case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
998 		ret = aie2_get_hwctx_status(client, args);
999 		break;
1000 	case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
1001 		ret = aie2_get_firmware_version(client, args);
1002 		break;
1003 	case DRM_AMDXDNA_GET_POWER_MODE:
1004 		ret = aie2_get_power_mode(client, args);
1005 		break;
1006 	case DRM_AMDXDNA_QUERY_TELEMETRY:
1007 		ret = aie2_get_telemetry(client, args);
1008 		break;
1009 	case DRM_AMDXDNA_QUERY_RESOURCE_INFO:
1010 		ret = aie2_query_resource_info(client, args);
1011 		break;
1012 	case DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE:
1013 	case DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE:
1014 		ret = aie2_get_preempt_state(client, args);
1015 		break;
1016 	default:
1017 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
1018 		ret = -EOPNOTSUPP;
1019 	}
1020 
1021 	amdxdna_pm_suspend_put(xdna);
1022 	XDNA_DBG(xdna, "Got param %d", args->param);
1023 
1024 dev_exit:
1025 	drm_dev_exit(idx);
1026 	return ret;
1027 }
1028 
aie2_query_ctx_status_array(struct amdxdna_client * client,struct amdxdna_drm_get_array * args)1029 static int aie2_query_ctx_status_array(struct amdxdna_client *client,
1030 				       struct amdxdna_drm_get_array *args)
1031 {
1032 	struct amdxdna_drm_get_array array_args;
1033 	struct amdxdna_dev *xdna = client->xdna;
1034 	struct amdxdna_client *tmp_client;
1035 	int ret;
1036 
1037 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
1038 
1039 	if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
1040 		XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
1041 			 args->element_size, args->num_element);
1042 		return -EINVAL;
1043 	}
1044 
1045 	array_args.element_size = min(args->element_size,
1046 				      sizeof(struct amdxdna_drm_hwctx_entry));
1047 	array_args.buffer = args->buffer;
1048 	array_args.num_element = args->num_element * args->element_size /
1049 				array_args.element_size;
1050 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
1051 		ret = amdxdna_hwctx_walk(tmp_client, &array_args,
1052 					 aie2_hwctx_status_cb);
1053 		if (ret)
1054 			break;
1055 	}
1056 
1057 	args->element_size = array_args.element_size;
1058 	args->num_element = (u32)((array_args.buffer - args->buffer) /
1059 				  args->element_size);
1060 
1061 	return 0;
1062 }
1063 
aie2_get_array(struct amdxdna_client * client,struct amdxdna_drm_get_array * args)1064 static int aie2_get_array(struct amdxdna_client *client,
1065 			  struct amdxdna_drm_get_array *args)
1066 {
1067 	struct amdxdna_dev *xdna = client->xdna;
1068 	int ret, idx;
1069 
1070 	if (!drm_dev_enter(&xdna->ddev, &idx))
1071 		return -ENODEV;
1072 
1073 	ret = amdxdna_pm_resume_get_locked(xdna);
1074 	if (ret)
1075 		goto dev_exit;
1076 
1077 	switch (args->param) {
1078 	case DRM_AMDXDNA_HW_CONTEXT_ALL:
1079 		ret = aie2_query_ctx_status_array(client, args);
1080 		break;
1081 	case DRM_AMDXDNA_HW_LAST_ASYNC_ERR:
1082 		ret = aie2_get_array_async_error(xdna->dev_handle, args);
1083 		break;
1084 	default:
1085 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
1086 		ret = -EOPNOTSUPP;
1087 	}
1088 
1089 	amdxdna_pm_suspend_put(xdna);
1090 	XDNA_DBG(xdna, "Got param %d", args->param);
1091 
1092 dev_exit:
1093 	drm_dev_exit(idx);
1094 	return ret;
1095 }
1096 
aie2_set_power_mode(struct amdxdna_client * client,struct amdxdna_drm_set_state * args)1097 static int aie2_set_power_mode(struct amdxdna_client *client,
1098 			       struct amdxdna_drm_set_state *args)
1099 {
1100 	struct amdxdna_drm_set_power_mode power_state;
1101 	enum amdxdna_power_mode_type power_mode;
1102 	struct amdxdna_dev *xdna = client->xdna;
1103 
1104 	if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
1105 			   sizeof(power_state))) {
1106 		XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
1107 		return -EFAULT;
1108 	}
1109 
1110 	if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
1111 		return -EINVAL;
1112 
1113 	power_mode = power_state.power_mode;
1114 	if (power_mode > POWER_MODE_TURBO) {
1115 		XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
1116 		return -EINVAL;
1117 	}
1118 
1119 	return aie2_pm_set_mode(xdna->dev_handle, power_mode);
1120 }
1121 
aie2_set_preempt_state(struct amdxdna_client * client,struct amdxdna_drm_set_state * args)1122 static int aie2_set_preempt_state(struct amdxdna_client *client,
1123 				  struct amdxdna_drm_set_state *args)
1124 {
1125 	struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle;
1126 	struct amdxdna_drm_attribute_state state;
1127 	u32 val;
1128 	int ret;
1129 
1130 	if (copy_from_user(&state, u64_to_user_ptr(args->buffer), sizeof(state)))
1131 		return -EFAULT;
1132 
1133 	if (state.state > 1)
1134 		return -EINVAL;
1135 
1136 	if (XDNA_MBZ_DBG(client->xdna, state.pad, sizeof(state.pad)))
1137 		return -EINVAL;
1138 
1139 	if (args->param == DRM_AMDXDNA_SET_FORCE_PREEMPT) {
1140 		ndev->force_preempt_enabled = state.state;
1141 	} else if (args->param == DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT) {
1142 		val = state.state;
1143 		ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
1144 				       &val);
1145 		if (ret)
1146 			return ret;
1147 
1148 		ndev->frame_boundary_preempt = state.state;
1149 	}
1150 
1151 	return 0;
1152 }
1153 
aie2_set_state(struct amdxdna_client * client,struct amdxdna_drm_set_state * args)1154 static int aie2_set_state(struct amdxdna_client *client,
1155 			  struct amdxdna_drm_set_state *args)
1156 {
1157 	struct amdxdna_dev *xdna = client->xdna;
1158 	int ret, idx;
1159 
1160 	if (!drm_dev_enter(&xdna->ddev, &idx))
1161 		return -ENODEV;
1162 
1163 	ret = amdxdna_pm_resume_get_locked(xdna);
1164 	if (ret)
1165 		goto dev_exit;
1166 
1167 	switch (args->param) {
1168 	case DRM_AMDXDNA_SET_POWER_MODE:
1169 		ret = aie2_set_power_mode(client, args);
1170 		break;
1171 	case DRM_AMDXDNA_SET_FORCE_PREEMPT:
1172 	case DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT:
1173 		ret = aie2_set_preempt_state(client, args);
1174 		break;
1175 	default:
1176 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
1177 		ret = -EOPNOTSUPP;
1178 		break;
1179 	}
1180 
1181 	amdxdna_pm_suspend_put(xdna);
1182 dev_exit:
1183 	drm_dev_exit(idx);
1184 	return ret;
1185 }
1186 
1187 const struct amdxdna_dev_ops aie2_ops = {
1188 	.init = aie2_init,
1189 	.fini = aie2_fini,
1190 	.resume = aie2_hw_resume,
1191 	.suspend = aie2_hw_suspend,
1192 	.get_aie_info = aie2_get_info,
1193 	.set_aie_state = aie2_set_state,
1194 	.hwctx_init = aie2_hwctx_init,
1195 	.hwctx_fini = aie2_hwctx_fini,
1196 	.hwctx_config = aie2_hwctx_config,
1197 	.hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo,
1198 	.cmd_submit = aie2_cmd_submit,
1199 	.hmm_invalidate = aie2_hmm_invalidate,
1200 	.get_array = aie2_get_array,
1201 };
1202