xref: /linux/drivers/accel/amdxdna/aie2_pci.c (revision f2161d5f1aae21a42b0a64d87e10cb31db423f42)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <drm/gpu_scheduler.h>
13 #include <linux/cleanup.h>
14 #include <linux/errno.h>
15 #include <linux/firmware.h>
16 #include <linux/iommu.h>
17 #include <linux/iopoll.h>
18 #include <linux/pci.h>
19 #include <linux/xarray.h>
20 #include <asm/hypervisor.h>
21 
22 #include "aie2_msg_priv.h"
23 #include "aie2_pci.h"
24 #include "aie2_solver.h"
25 #include "amdxdna_ctx.h"
26 #include "amdxdna_gem.h"
27 #include "amdxdna_mailbox.h"
28 #include "amdxdna_pci_drv.h"
29 #include "amdxdna_pm.h"
30 
31 static int aie2_max_col = XRS_MAX_COL;
32 module_param(aie2_max_col, uint, 0600);
33 MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
34 
35 /*
36  * The management mailbox channel is allocated by firmware.
37  * The related register and ring buffer information is on SRAM BAR.
38  * This struct is the register layout.
39  */
40 #define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
41 struct mgmt_mbox_chann_info {
42 	__u32	x2i_tail;
43 	__u32	x2i_head;
44 	__u32	x2i_buf;
45 	__u32	x2i_buf_sz;
46 	__u32	i2x_tail;
47 	__u32	i2x_head;
48 	__u32	i2x_buf;
49 	__u32	i2x_buf_sz;
50 	__u32	magic;
51 	__u32	msi_id;
52 	__u32	prot_major;
53 	__u32	prot_minor;
54 	__u32	rsvd[4];
55 };
56 
aie2_check_protocol(struct amdxdna_dev_hdl * ndev,u32 fw_major,u32 fw_minor)57 static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
58 {
59 	const struct aie2_fw_feature_tbl *feature;
60 	struct amdxdna_dev *xdna = ndev->xdna;
61 
62 	/*
63 	 * The driver supported mailbox behavior is defined by
64 	 * ndev->priv->protocol_major and protocol_minor.
65 	 *
66 	 * When protocol_major and fw_major are different, it means driver
67 	 * and firmware are incompatible.
68 	 */
69 	if (ndev->priv->protocol_major != fw_major) {
70 		XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
71 			 fw_major, fw_minor);
72 		return -EINVAL;
73 	}
74 
75 	/*
76 	 * When protocol_minor is greater then fw_minor, that means driver
77 	 * relies on operation the installed firmware does not support.
78 	 */
79 	if (ndev->priv->protocol_minor > fw_minor) {
80 		XDNA_ERR(xdna, "Firmware minor version smaller than supported");
81 		return -EINVAL;
82 	}
83 
84 	for (feature = ndev->priv->fw_feature_tbl; feature && feature->min_minor;
85 	     feature++) {
86 		if (fw_minor < feature->min_minor)
87 			continue;
88 		if (feature->max_minor > 0 && fw_minor > feature->max_minor)
89 			continue;
90 
91 		set_bit(feature->feature, &ndev->feature_mask);
92 	}
93 
94 	return 0;
95 }
96 
aie2_dump_chann_info_debug(struct amdxdna_dev_hdl * ndev)97 static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
98 {
99 	struct amdxdna_dev *xdna = ndev->xdna;
100 
101 	XDNA_DBG(xdna, "i2x tail    0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
102 	XDNA_DBG(xdna, "i2x head    0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
103 	XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
104 	XDNA_DBG(xdna, "i2x rsize   0x%x", ndev->mgmt_i2x.rb_size);
105 	XDNA_DBG(xdna, "x2i tail    0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
106 	XDNA_DBG(xdna, "x2i head    0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
107 	XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
108 	XDNA_DBG(xdna, "x2i rsize   0x%x", ndev->mgmt_x2i.rb_size);
109 	XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
110 	XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
111 	XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
112 }
113 
aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl * ndev)114 static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
115 {
116 	struct mgmt_mbox_chann_info info_regs;
117 	struct xdna_mailbox_chann_res *i2x;
118 	struct xdna_mailbox_chann_res *x2i;
119 	u32 addr, off;
120 	u32 *reg;
121 	int ret;
122 	int i;
123 
124 	/*
125 	 * Once firmware is alive, it will write management channel
126 	 * information in SRAM BAR and write the address of that information
127 	 * at FW_ALIVE_OFF offset in SRMA BAR.
128 	 *
129 	 * Read a non-zero value from FW_ALIVE_OFF implies that firmware
130 	 * is alive.
131 	 */
132 	ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
133 				 addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
134 	if (ret || !addr)
135 		return -ETIME;
136 
137 	off = AIE2_SRAM_OFF(ndev, addr);
138 	reg = (u32 *)&info_regs;
139 	for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
140 		reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
141 
142 	if (info_regs.magic != MGMT_MBOX_MAGIC) {
143 		XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
144 		ret = -EINVAL;
145 		goto done;
146 	}
147 
148 	i2x = &ndev->mgmt_i2x;
149 	x2i = &ndev->mgmt_x2i;
150 
151 	i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
152 	i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
153 	i2x->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
154 	i2x->rb_size         = info_regs.i2x_buf_sz;
155 
156 	x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
157 	x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
158 	x2i->rb_start_addr   = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
159 	x2i->rb_size         = info_regs.x2i_buf_sz;
160 
161 	ndev->mgmt_chan_idx  = info_regs.msi_id;
162 	ndev->mgmt_prot_major = info_regs.prot_major;
163 	ndev->mgmt_prot_minor = info_regs.prot_minor;
164 
165 	ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
166 
167 done:
168 	aie2_dump_chann_info_debug(ndev);
169 
170 	/* Must clear address at FW_ALIVE_OFF */
171 	writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
172 
173 	return ret;
174 }
175 
aie2_runtime_cfg(struct amdxdna_dev_hdl * ndev,enum rt_config_category category,u32 * val)176 int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
177 		     enum rt_config_category category, u32 *val)
178 {
179 	const struct rt_config *cfg;
180 	u32 value;
181 	int ret;
182 
183 	for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
184 		if (cfg->category != category)
185 			continue;
186 
187 		if (cfg->feature_mask &&
188 		    bitmap_subset(&cfg->feature_mask, &ndev->feature_mask, AIE2_FEATURE_MAX))
189 			continue;
190 
191 		value = val ? *val : cfg->value;
192 		ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
193 		if (ret) {
194 			XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
195 				 cfg->type, value);
196 			return ret;
197 		}
198 	}
199 
200 	return 0;
201 }
202 
aie2_xdna_reset(struct amdxdna_dev_hdl * ndev)203 static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
204 {
205 	int ret;
206 
207 	ret = aie2_suspend_fw(ndev);
208 	if (ret) {
209 		XDNA_ERR(ndev->xdna, "Suspend firmware failed");
210 		return ret;
211 	}
212 
213 	ret = aie2_resume_fw(ndev);
214 	if (ret) {
215 		XDNA_ERR(ndev->xdna, "Resume firmware failed");
216 		return ret;
217 	}
218 
219 	return 0;
220 }
221 
aie2_mgmt_fw_init(struct amdxdna_dev_hdl * ndev)222 static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
223 {
224 	int ret;
225 
226 	ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
227 	if (ret) {
228 		XDNA_ERR(ndev->xdna, "Runtime config failed");
229 		return ret;
230 	}
231 
232 	ret = aie2_assign_mgmt_pasid(ndev, 0);
233 	if (ret) {
234 		XDNA_ERR(ndev->xdna, "Can not assign PASID");
235 		return ret;
236 	}
237 
238 	ret = aie2_xdna_reset(ndev);
239 	if (ret) {
240 		XDNA_ERR(ndev->xdna, "Reset firmware failed");
241 		return ret;
242 	}
243 
244 	return 0;
245 }
246 
aie2_mgmt_fw_query(struct amdxdna_dev_hdl * ndev)247 static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
248 {
249 	int ret;
250 
251 	ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
252 	if (ret) {
253 		XDNA_ERR(ndev->xdna, "query firmware version failed");
254 		return ret;
255 	}
256 
257 	ret = aie2_query_aie_version(ndev, &ndev->version);
258 	if (ret) {
259 		XDNA_ERR(ndev->xdna, "Query AIE version failed");
260 		return ret;
261 	}
262 
263 	ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
264 	if (ret) {
265 		XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
266 		return ret;
267 	}
268 
269 	ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
270 
271 	return 0;
272 }
273 
aie2_mgmt_fw_fini(struct amdxdna_dev_hdl * ndev)274 static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
275 {
276 	if (aie2_suspend_fw(ndev))
277 		XDNA_ERR(ndev->xdna, "Suspend_fw failed");
278 	XDNA_DBG(ndev->xdna, "Firmware suspended");
279 }
280 
aie2_xrs_load(void * cb_arg,struct xrs_action_load * action)281 static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
282 {
283 	struct amdxdna_hwctx *hwctx = cb_arg;
284 	struct amdxdna_dev *xdna;
285 	int ret;
286 
287 	xdna = hwctx->client->xdna;
288 
289 	hwctx->start_col = action->part.start_col;
290 	hwctx->num_col = action->part.ncols;
291 	ret = aie2_create_context(xdna->dev_handle, hwctx);
292 	if (ret)
293 		XDNA_ERR(xdna, "create context failed, ret %d", ret);
294 
295 	return ret;
296 }
297 
aie2_xrs_unload(void * cb_arg)298 static int aie2_xrs_unload(void *cb_arg)
299 {
300 	struct amdxdna_hwctx *hwctx = cb_arg;
301 	struct amdxdna_dev *xdna;
302 	int ret;
303 
304 	xdna = hwctx->client->xdna;
305 
306 	ret = aie2_destroy_context(xdna->dev_handle, hwctx);
307 	if (ret)
308 		XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
309 
310 	return ret;
311 }
312 
aie2_xrs_set_dft_dpm_level(struct drm_device * ddev,u32 dpm_level)313 static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
314 {
315 	struct amdxdna_dev *xdna = to_xdna_dev(ddev);
316 	struct amdxdna_dev_hdl *ndev;
317 
318 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
319 
320 	ndev = xdna->dev_handle;
321 	ndev->dft_dpm_level = dpm_level;
322 	if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
323 		return 0;
324 
325 	return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
326 }
327 
328 static struct xrs_action_ops aie2_xrs_actions = {
329 	.load = aie2_xrs_load,
330 	.unload = aie2_xrs_unload,
331 	.set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
332 };
333 
aie2_hw_stop(struct amdxdna_dev * xdna)334 static void aie2_hw_stop(struct amdxdna_dev *xdna)
335 {
336 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
337 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
338 
339 	if (ndev->dev_status <= AIE2_DEV_INIT) {
340 		XDNA_ERR(xdna, "device is already stopped");
341 		return;
342 	}
343 
344 	aie2_mgmt_fw_fini(ndev);
345 	xdna_mailbox_stop_channel(ndev->mgmt_chann);
346 	xdna_mailbox_destroy_channel(ndev->mgmt_chann);
347 	ndev->mgmt_chann = NULL;
348 	drmm_kfree(&xdna->ddev, ndev->mbox);
349 	ndev->mbox = NULL;
350 	aie2_psp_stop(ndev->psp_hdl);
351 	aie2_smu_fini(ndev);
352 	aie2_error_async_events_free(ndev);
353 	pci_disable_device(pdev);
354 
355 	ndev->dev_status = AIE2_DEV_INIT;
356 }
357 
aie2_hw_start(struct amdxdna_dev * xdna)358 static int aie2_hw_start(struct amdxdna_dev *xdna)
359 {
360 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
361 	struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
362 	struct xdna_mailbox_res mbox_res;
363 	u32 xdna_mailbox_intr_reg;
364 	int mgmt_mb_irq, ret;
365 
366 	if (ndev->dev_status >= AIE2_DEV_START) {
367 		XDNA_INFO(xdna, "device is already started");
368 		return 0;
369 	}
370 
371 	ret = pci_enable_device(pdev);
372 	if (ret) {
373 		XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
374 		return ret;
375 	}
376 	pci_set_master(pdev);
377 
378 	ret = aie2_smu_init(ndev);
379 	if (ret) {
380 		XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
381 		goto disable_dev;
382 	}
383 
384 	ret = aie2_psp_start(ndev->psp_hdl);
385 	if (ret) {
386 		XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
387 		goto fini_smu;
388 	}
389 
390 	ret = aie2_get_mgmt_chann_info(ndev);
391 	if (ret) {
392 		XDNA_ERR(xdna, "firmware is not alive");
393 		goto stop_psp;
394 	}
395 
396 	mbox_res.ringbuf_base = ndev->sram_base;
397 	mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
398 	mbox_res.mbox_base = ndev->mbox_base;
399 	mbox_res.mbox_size = MBOX_SIZE(ndev);
400 	mbox_res.name = "xdna_mailbox";
401 	ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
402 	if (!ndev->mbox) {
403 		XDNA_ERR(xdna, "failed to create mailbox device");
404 		ret = -ENODEV;
405 		goto stop_psp;
406 	}
407 
408 	mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
409 	if (mgmt_mb_irq < 0) {
410 		ret = mgmt_mb_irq;
411 		XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
412 		goto stop_psp;
413 	}
414 
415 	xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
416 	ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
417 						       &ndev->mgmt_x2i,
418 						       &ndev->mgmt_i2x,
419 						       xdna_mailbox_intr_reg,
420 						       mgmt_mb_irq);
421 	if (!ndev->mgmt_chann) {
422 		XDNA_ERR(xdna, "failed to create management mailbox channel");
423 		ret = -EINVAL;
424 		goto stop_psp;
425 	}
426 
427 	ret = aie2_pm_init(ndev);
428 	if (ret) {
429 		XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
430 		goto destroy_mgmt_chann;
431 	}
432 
433 	ret = aie2_mgmt_fw_init(ndev);
434 	if (ret) {
435 		XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
436 		goto destroy_mgmt_chann;
437 	}
438 
439 	ret = aie2_mgmt_fw_query(ndev);
440 	if (ret) {
441 		XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
442 		goto destroy_mgmt_chann;
443 	}
444 
445 	ret = aie2_error_async_events_alloc(ndev);
446 	if (ret) {
447 		XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
448 		goto destroy_mgmt_chann;
449 	}
450 
451 	ndev->dev_status = AIE2_DEV_START;
452 
453 	return 0;
454 
455 destroy_mgmt_chann:
456 	xdna_mailbox_stop_channel(ndev->mgmt_chann);
457 	xdna_mailbox_destroy_channel(ndev->mgmt_chann);
458 stop_psp:
459 	aie2_psp_stop(ndev->psp_hdl);
460 fini_smu:
461 	aie2_smu_fini(ndev);
462 disable_dev:
463 	pci_disable_device(pdev);
464 
465 	return ret;
466 }
467 
aie2_hw_suspend(struct amdxdna_dev * xdna)468 static int aie2_hw_suspend(struct amdxdna_dev *xdna)
469 {
470 	struct amdxdna_client *client;
471 
472 	guard(mutex)(&xdna->dev_lock);
473 	list_for_each_entry(client, &xdna->client_list, node)
474 		aie2_hwctx_suspend(client);
475 
476 	aie2_hw_stop(xdna);
477 
478 	return 0;
479 }
480 
aie2_hw_resume(struct amdxdna_dev * xdna)481 static int aie2_hw_resume(struct amdxdna_dev *xdna)
482 {
483 	struct amdxdna_client *client;
484 	int ret;
485 
486 	ret = aie2_hw_start(xdna);
487 	if (ret) {
488 		XDNA_ERR(xdna, "Start hardware failed, %d", ret);
489 		return ret;
490 	}
491 
492 	list_for_each_entry(client, &xdna->client_list, node) {
493 		ret = aie2_hwctx_resume(client);
494 		if (ret)
495 			break;
496 	}
497 
498 	return ret;
499 }
500 
aie2_init(struct amdxdna_dev * xdna)501 static int aie2_init(struct amdxdna_dev *xdna)
502 {
503 	struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
504 	void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
505 	struct init_config xrs_cfg = { 0 };
506 	struct amdxdna_dev_hdl *ndev;
507 	struct psp_config psp_conf;
508 	const struct firmware *fw;
509 	unsigned long bars = 0;
510 	int i, nvec, ret;
511 
512 	if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
513 		XDNA_ERR(xdna, "Running under hypervisor not supported");
514 		return -EINVAL;
515 	}
516 
517 	ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
518 	if (!ndev)
519 		return -ENOMEM;
520 
521 	ndev->priv = xdna->dev_info->dev_priv;
522 	ndev->xdna = xdna;
523 
524 	ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
525 	if (ret) {
526 		XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
527 			 ndev->priv->fw_path, ret);
528 		return ret;
529 	}
530 
531 	ret = pcim_enable_device(pdev);
532 	if (ret) {
533 		XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
534 		goto release_fw;
535 	}
536 
537 	for (i = 0; i < PSP_MAX_REGS; i++)
538 		set_bit(PSP_REG_BAR(ndev, i), &bars);
539 
540 	set_bit(xdna->dev_info->sram_bar, &bars);
541 	set_bit(xdna->dev_info->smu_bar, &bars);
542 	set_bit(xdna->dev_info->mbox_bar, &bars);
543 
544 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
545 		if (!test_bit(i, &bars))
546 			continue;
547 		tbl[i] = pcim_iomap(pdev, i, 0);
548 		if (!tbl[i]) {
549 			XDNA_ERR(xdna, "map bar %d failed", i);
550 			ret = -ENOMEM;
551 			goto release_fw;
552 		}
553 	}
554 
555 	ndev->sram_base = tbl[xdna->dev_info->sram_bar];
556 	ndev->smu_base = tbl[xdna->dev_info->smu_bar];
557 	ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
558 
559 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
560 	if (ret) {
561 		XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
562 		goto release_fw;
563 	}
564 
565 	nvec = pci_msix_vec_count(pdev);
566 	if (nvec <= 0) {
567 		XDNA_ERR(xdna, "does not get number of interrupt vector");
568 		ret = -EINVAL;
569 		goto release_fw;
570 	}
571 
572 	ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
573 	if (ret < 0) {
574 		XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
575 		goto release_fw;
576 	}
577 
578 	psp_conf.fw_size = fw->size;
579 	psp_conf.fw_buf = fw->data;
580 	for (i = 0; i < PSP_MAX_REGS; i++)
581 		psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
582 	ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
583 	if (!ndev->psp_hdl) {
584 		XDNA_ERR(xdna, "failed to create psp");
585 		ret = -ENOMEM;
586 		goto release_fw;
587 	}
588 	xdna->dev_handle = ndev;
589 
590 	ret = aie2_hw_start(xdna);
591 	if (ret) {
592 		XDNA_ERR(xdna, "start npu failed, ret %d", ret);
593 		goto release_fw;
594 	}
595 
596 	xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
597 	for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
598 		xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
599 	xrs_cfg.sys_eff_factor = 1;
600 	xrs_cfg.ddev = &xdna->ddev;
601 	xrs_cfg.actions = &aie2_xrs_actions;
602 	xrs_cfg.total_col = ndev->total_col;
603 
604 	xdna->xrs_hdl = xrsm_init(&xrs_cfg);
605 	if (!xdna->xrs_hdl) {
606 		XDNA_ERR(xdna, "Initialize resolver failed");
607 		ret = -EINVAL;
608 		goto stop_hw;
609 	}
610 
611 	release_firmware(fw);
612 	aie2_msg_init(ndev);
613 	amdxdna_pm_init(xdna);
614 	return 0;
615 
616 stop_hw:
617 	aie2_hw_stop(xdna);
618 release_fw:
619 	release_firmware(fw);
620 
621 	return ret;
622 }
623 
aie2_fini(struct amdxdna_dev * xdna)624 static void aie2_fini(struct amdxdna_dev *xdna)
625 {
626 	amdxdna_pm_fini(xdna);
627 	aie2_hw_stop(xdna);
628 }
629 
aie2_get_aie_status(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)630 static int aie2_get_aie_status(struct amdxdna_client *client,
631 			       struct amdxdna_drm_get_info *args)
632 {
633 	struct amdxdna_drm_query_aie_status status;
634 	struct amdxdna_dev *xdna = client->xdna;
635 	struct amdxdna_dev_hdl *ndev;
636 	int ret;
637 
638 	ndev = xdna->dev_handle;
639 	if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
640 		XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
641 		return -EFAULT;
642 	}
643 
644 	if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
645 		XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
646 			 status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
647 		return -EINVAL;
648 	}
649 
650 	ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
651 				status.buffer_size, &status.cols_filled);
652 	if (ret) {
653 		XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
654 		return ret;
655 	}
656 
657 	if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
658 		XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
659 		return -EFAULT;
660 	}
661 
662 	return 0;
663 }
664 
aie2_get_aie_metadata(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)665 static int aie2_get_aie_metadata(struct amdxdna_client *client,
666 				 struct amdxdna_drm_get_info *args)
667 {
668 	struct amdxdna_drm_query_aie_metadata *meta;
669 	struct amdxdna_dev *xdna = client->xdna;
670 	struct amdxdna_dev_hdl *ndev;
671 	int ret = 0;
672 
673 	ndev = xdna->dev_handle;
674 	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
675 	if (!meta)
676 		return -ENOMEM;
677 
678 	meta->col_size = ndev->metadata.size;
679 	meta->cols = ndev->metadata.cols;
680 	meta->rows = ndev->metadata.rows;
681 
682 	meta->version.major = ndev->metadata.version.major;
683 	meta->version.minor = ndev->metadata.version.minor;
684 
685 	meta->core.row_count = ndev->metadata.core.row_count;
686 	meta->core.row_start = ndev->metadata.core.row_start;
687 	meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
688 	meta->core.lock_count = ndev->metadata.core.lock_count;
689 	meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
690 
691 	meta->mem.row_count = ndev->metadata.mem.row_count;
692 	meta->mem.row_start = ndev->metadata.mem.row_start;
693 	meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
694 	meta->mem.lock_count = ndev->metadata.mem.lock_count;
695 	meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
696 
697 	meta->shim.row_count = ndev->metadata.shim.row_count;
698 	meta->shim.row_start = ndev->metadata.shim.row_start;
699 	meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
700 	meta->shim.lock_count = ndev->metadata.shim.lock_count;
701 	meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
702 
703 	if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
704 		ret = -EFAULT;
705 
706 	kfree(meta);
707 	return ret;
708 }
709 
aie2_get_aie_version(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)710 static int aie2_get_aie_version(struct amdxdna_client *client,
711 				struct amdxdna_drm_get_info *args)
712 {
713 	struct amdxdna_drm_query_aie_version version;
714 	struct amdxdna_dev *xdna = client->xdna;
715 	struct amdxdna_dev_hdl *ndev;
716 
717 	ndev = xdna->dev_handle;
718 	version.major = ndev->version.major;
719 	version.minor = ndev->version.minor;
720 
721 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
722 		return -EFAULT;
723 
724 	return 0;
725 }
726 
aie2_get_firmware_version(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)727 static int aie2_get_firmware_version(struct amdxdna_client *client,
728 				     struct amdxdna_drm_get_info *args)
729 {
730 	struct amdxdna_drm_query_firmware_version version;
731 	struct amdxdna_dev *xdna = client->xdna;
732 
733 	version.major = xdna->fw_ver.major;
734 	version.minor = xdna->fw_ver.minor;
735 	version.patch = xdna->fw_ver.sub;
736 	version.build = xdna->fw_ver.build;
737 
738 	if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
739 		return -EFAULT;
740 
741 	return 0;
742 }
743 
aie2_get_power_mode(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)744 static int aie2_get_power_mode(struct amdxdna_client *client,
745 			       struct amdxdna_drm_get_info *args)
746 {
747 	struct amdxdna_drm_get_power_mode mode = {};
748 	struct amdxdna_dev *xdna = client->xdna;
749 	struct amdxdna_dev_hdl *ndev;
750 
751 	ndev = xdna->dev_handle;
752 	mode.power_mode = ndev->pw_mode;
753 
754 	if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
755 		return -EFAULT;
756 
757 	return 0;
758 }
759 
aie2_get_clock_metadata(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)760 static int aie2_get_clock_metadata(struct amdxdna_client *client,
761 				   struct amdxdna_drm_get_info *args)
762 {
763 	struct amdxdna_drm_query_clock_metadata *clock;
764 	struct amdxdna_dev *xdna = client->xdna;
765 	struct amdxdna_dev_hdl *ndev;
766 	int ret = 0;
767 
768 	ndev = xdna->dev_handle;
769 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
770 	if (!clock)
771 		return -ENOMEM;
772 
773 	snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
774 		 "MP-NPU Clock");
775 	clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
776 	snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
777 	clock->h_clock.freq_mhz = ndev->hclk_freq;
778 
779 	if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
780 		ret = -EFAULT;
781 
782 	kfree(clock);
783 	return ret;
784 }
785 
aie2_hwctx_status_cb(struct amdxdna_hwctx * hwctx,void * arg)786 static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
787 {
788 	struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
789 	struct amdxdna_drm_get_array *array_args = arg;
790 	struct amdxdna_drm_hwctx_entry __user *buf;
791 	u32 size;
792 
793 	if (!array_args->num_element)
794 		return -EINVAL;
795 
796 	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
797 	if (!tmp)
798 		return -ENOMEM;
799 
800 	tmp->pid = hwctx->client->pid;
801 	tmp->context_id = hwctx->id;
802 	tmp->start_col = hwctx->start_col;
803 	tmp->num_col = hwctx->num_col;
804 	tmp->command_submissions = hwctx->priv->seq;
805 	tmp->command_completions = hwctx->priv->completed;
806 	tmp->pasid = hwctx->client->pasid;
807 	tmp->priority = hwctx->qos.priority;
808 	tmp->gops = hwctx->qos.gops;
809 	tmp->fps = hwctx->qos.fps;
810 	tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
811 	tmp->latency = hwctx->qos.latency;
812 	tmp->frame_exec_time = hwctx->qos.frame_exec_time;
813 	tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
814 
815 	buf = u64_to_user_ptr(array_args->buffer);
816 	size = min(sizeof(*tmp), array_args->element_size);
817 
818 	if (copy_to_user(buf, tmp, size))
819 		return -EFAULT;
820 
821 	array_args->buffer += size;
822 	array_args->num_element--;
823 
824 	return 0;
825 }
826 
aie2_get_hwctx_status(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)827 static int aie2_get_hwctx_status(struct amdxdna_client *client,
828 				 struct amdxdna_drm_get_info *args)
829 {
830 	struct amdxdna_drm_get_array array_args;
831 	struct amdxdna_dev *xdna = client->xdna;
832 	struct amdxdna_client *tmp_client;
833 	int ret;
834 
835 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
836 
837 	array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
838 	array_args.buffer = args->buffer;
839 	array_args.num_element = args->buffer_size / array_args.element_size;
840 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
841 		ret = amdxdna_hwctx_walk(tmp_client, &array_args,
842 					 aie2_hwctx_status_cb);
843 		if (ret)
844 			break;
845 	}
846 
847 	args->buffer_size -= (u32)(array_args.buffer - args->buffer);
848 	return 0;
849 }
850 
aie2_query_resource_info(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)851 static int aie2_query_resource_info(struct amdxdna_client *client,
852 				    struct amdxdna_drm_get_info *args)
853 {
854 	struct amdxdna_drm_get_resource_info res_info;
855 	const struct amdxdna_dev_priv *priv;
856 	struct amdxdna_dev_hdl *ndev;
857 	struct amdxdna_dev *xdna;
858 
859 	xdna = client->xdna;
860 	ndev = xdna->dev_handle;
861 	priv = ndev->priv;
862 
863 	res_info.npu_clk_max = priv->dpm_clk_tbl[ndev->max_dpm_level].hclk;
864 	res_info.npu_tops_max = ndev->max_tops;
865 	res_info.npu_task_max = priv->hwctx_limit;
866 	res_info.npu_tops_curr = ndev->curr_tops;
867 	res_info.npu_task_curr = ndev->hwctx_num;
868 
869 	if (copy_to_user(u64_to_user_ptr(args->buffer), &res_info, sizeof(res_info)))
870 		return -EFAULT;
871 
872 	return 0;
873 }
874 
aie2_fill_hwctx_map(struct amdxdna_hwctx * hwctx,void * arg)875 static int aie2_fill_hwctx_map(struct amdxdna_hwctx *hwctx, void *arg)
876 {
877 	struct amdxdna_dev *xdna = hwctx->client->xdna;
878 	u32 *map = arg;
879 
880 	if (hwctx->fw_ctx_id >= xdna->dev_handle->priv->hwctx_limit) {
881 		XDNA_ERR(xdna, "Invalid fw ctx id %d/%d ", hwctx->fw_ctx_id,
882 			 xdna->dev_handle->priv->hwctx_limit);
883 		return -EINVAL;
884 	}
885 
886 	map[hwctx->fw_ctx_id] = hwctx->id;
887 	return 0;
888 }
889 
aie2_get_telemetry(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)890 static int aie2_get_telemetry(struct amdxdna_client *client,
891 			      struct amdxdna_drm_get_info *args)
892 {
893 	struct amdxdna_drm_query_telemetry_header *header __free(kfree) = NULL;
894 	u32 telemetry_data_sz, header_sz, elem_num;
895 	struct amdxdna_dev *xdna = client->xdna;
896 	struct amdxdna_client *tmp_client;
897 	int ret;
898 
899 	elem_num = xdna->dev_handle->priv->hwctx_limit;
900 	header_sz = struct_size(header, map, elem_num);
901 	if (args->buffer_size <= header_sz) {
902 		XDNA_ERR(xdna, "Invalid buffer size");
903 		return -EINVAL;
904 	}
905 
906 	telemetry_data_sz = args->buffer_size - header_sz;
907 	if (telemetry_data_sz > SZ_4M) {
908 		XDNA_ERR(xdna, "Buffer size is too big, %d", telemetry_data_sz);
909 		return -EINVAL;
910 	}
911 
912 	header = kzalloc(header_sz, GFP_KERNEL);
913 	if (!header)
914 		return -ENOMEM;
915 
916 	if (copy_from_user(header, u64_to_user_ptr(args->buffer), sizeof(*header))) {
917 		XDNA_ERR(xdna, "Failed to copy telemetry header from user");
918 		return -EFAULT;
919 	}
920 
921 	header->map_num_elements = elem_num;
922 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
923 		ret = amdxdna_hwctx_walk(tmp_client, &header->map,
924 					 aie2_fill_hwctx_map);
925 		if (ret)
926 			return ret;
927 	}
928 
929 	ret = aie2_query_telemetry(xdna->dev_handle,
930 				   u64_to_user_ptr(args->buffer + header_sz),
931 				   telemetry_data_sz, header);
932 	if (ret) {
933 		XDNA_ERR(xdna, "Query telemetry failed ret %d", ret);
934 		return ret;
935 	}
936 
937 	if (copy_to_user(u64_to_user_ptr(args->buffer), header, header_sz)) {
938 		XDNA_ERR(xdna, "Copy header failed");
939 		return -EFAULT;
940 	}
941 
942 	return 0;
943 }
944 
aie2_get_preempt_state(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)945 static int aie2_get_preempt_state(struct amdxdna_client *client,
946 				  struct amdxdna_drm_get_info *args)
947 {
948 	struct amdxdna_drm_attribute_state state = {};
949 	struct amdxdna_dev *xdna = client->xdna;
950 	struct amdxdna_dev_hdl *ndev;
951 
952 	ndev = xdna->dev_handle;
953 	if (args->param == DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE)
954 		state.state = ndev->force_preempt_enabled;
955 	else if (args->param == DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE)
956 		state.state = ndev->frame_boundary_preempt;
957 
958 	if (copy_to_user(u64_to_user_ptr(args->buffer), &state, sizeof(state)))
959 		return -EFAULT;
960 
961 	return 0;
962 }
963 
aie2_get_info(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)964 static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
965 {
966 	struct amdxdna_dev *xdna = client->xdna;
967 	int ret, idx;
968 
969 	if (!drm_dev_enter(&xdna->ddev, &idx))
970 		return -ENODEV;
971 
972 	ret = amdxdna_pm_resume_get(xdna);
973 	if (ret)
974 		goto dev_exit;
975 
976 	switch (args->param) {
977 	case DRM_AMDXDNA_QUERY_AIE_STATUS:
978 		ret = aie2_get_aie_status(client, args);
979 		break;
980 	case DRM_AMDXDNA_QUERY_AIE_METADATA:
981 		ret = aie2_get_aie_metadata(client, args);
982 		break;
983 	case DRM_AMDXDNA_QUERY_AIE_VERSION:
984 		ret = aie2_get_aie_version(client, args);
985 		break;
986 	case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
987 		ret = aie2_get_clock_metadata(client, args);
988 		break;
989 	case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
990 		ret = aie2_get_hwctx_status(client, args);
991 		break;
992 	case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
993 		ret = aie2_get_firmware_version(client, args);
994 		break;
995 	case DRM_AMDXDNA_GET_POWER_MODE:
996 		ret = aie2_get_power_mode(client, args);
997 		break;
998 	case DRM_AMDXDNA_QUERY_TELEMETRY:
999 		ret = aie2_get_telemetry(client, args);
1000 		break;
1001 	case DRM_AMDXDNA_QUERY_RESOURCE_INFO:
1002 		ret = aie2_query_resource_info(client, args);
1003 		break;
1004 	case DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE:
1005 	case DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE:
1006 		ret = aie2_get_preempt_state(client, args);
1007 		break;
1008 	default:
1009 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
1010 		ret = -EOPNOTSUPP;
1011 	}
1012 
1013 	amdxdna_pm_suspend_put(xdna);
1014 	XDNA_DBG(xdna, "Got param %d", args->param);
1015 
1016 dev_exit:
1017 	drm_dev_exit(idx);
1018 	return ret;
1019 }
1020 
aie2_query_ctx_status_array(struct amdxdna_client * client,struct amdxdna_drm_get_array * args)1021 static int aie2_query_ctx_status_array(struct amdxdna_client *client,
1022 				       struct amdxdna_drm_get_array *args)
1023 {
1024 	struct amdxdna_drm_get_array array_args;
1025 	struct amdxdna_dev *xdna = client->xdna;
1026 	struct amdxdna_client *tmp_client;
1027 	int ret;
1028 
1029 	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
1030 
1031 	if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
1032 		XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
1033 			 args->element_size, args->num_element);
1034 		return -EINVAL;
1035 	}
1036 
1037 	array_args.element_size = min(args->element_size,
1038 				      sizeof(struct amdxdna_drm_hwctx_entry));
1039 	array_args.buffer = args->buffer;
1040 	array_args.num_element = args->num_element * args->element_size /
1041 				array_args.element_size;
1042 	list_for_each_entry(tmp_client, &xdna->client_list, node) {
1043 		ret = amdxdna_hwctx_walk(tmp_client, &array_args,
1044 					 aie2_hwctx_status_cb);
1045 		if (ret)
1046 			break;
1047 	}
1048 
1049 	args->element_size = array_args.element_size;
1050 	args->num_element = (u32)((array_args.buffer - args->buffer) /
1051 				  args->element_size);
1052 
1053 	return 0;
1054 }
1055 
aie2_get_array(struct amdxdna_client * client,struct amdxdna_drm_get_array * args)1056 static int aie2_get_array(struct amdxdna_client *client,
1057 			  struct amdxdna_drm_get_array *args)
1058 {
1059 	struct amdxdna_dev *xdna = client->xdna;
1060 	int ret, idx;
1061 
1062 	if (!drm_dev_enter(&xdna->ddev, &idx))
1063 		return -ENODEV;
1064 
1065 	ret = amdxdna_pm_resume_get(xdna);
1066 	if (ret)
1067 		goto dev_exit;
1068 
1069 	switch (args->param) {
1070 	case DRM_AMDXDNA_HW_CONTEXT_ALL:
1071 		ret = aie2_query_ctx_status_array(client, args);
1072 		break;
1073 	case DRM_AMDXDNA_HW_LAST_ASYNC_ERR:
1074 		ret = aie2_get_array_async_error(xdna->dev_handle, args);
1075 		break;
1076 	default:
1077 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
1078 		ret = -EOPNOTSUPP;
1079 	}
1080 
1081 	amdxdna_pm_suspend_put(xdna);
1082 	XDNA_DBG(xdna, "Got param %d", args->param);
1083 
1084 dev_exit:
1085 	drm_dev_exit(idx);
1086 	return ret;
1087 }
1088 
aie2_set_power_mode(struct amdxdna_client * client,struct amdxdna_drm_set_state * args)1089 static int aie2_set_power_mode(struct amdxdna_client *client,
1090 			       struct amdxdna_drm_set_state *args)
1091 {
1092 	struct amdxdna_drm_set_power_mode power_state;
1093 	enum amdxdna_power_mode_type power_mode;
1094 	struct amdxdna_dev *xdna = client->xdna;
1095 
1096 	if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
1097 			   sizeof(power_state))) {
1098 		XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
1099 		return -EFAULT;
1100 	}
1101 
1102 	if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
1103 		return -EINVAL;
1104 
1105 	power_mode = power_state.power_mode;
1106 	if (power_mode > POWER_MODE_TURBO) {
1107 		XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
1108 		return -EINVAL;
1109 	}
1110 
1111 	return aie2_pm_set_mode(xdna->dev_handle, power_mode);
1112 }
1113 
aie2_set_preempt_state(struct amdxdna_client * client,struct amdxdna_drm_set_state * args)1114 static int aie2_set_preempt_state(struct amdxdna_client *client,
1115 				  struct amdxdna_drm_set_state *args)
1116 {
1117 	struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle;
1118 	struct amdxdna_drm_attribute_state state;
1119 	u32 val;
1120 	int ret;
1121 
1122 	if (copy_from_user(&state, u64_to_user_ptr(args->buffer), sizeof(state)))
1123 		return -EFAULT;
1124 
1125 	if (state.state > 1)
1126 		return -EINVAL;
1127 
1128 	if (XDNA_MBZ_DBG(client->xdna, state.pad, sizeof(state.pad)))
1129 		return -EINVAL;
1130 
1131 	if (args->param == DRM_AMDXDNA_SET_FORCE_PREEMPT) {
1132 		ndev->force_preempt_enabled = state.state;
1133 	} else if (args->param == DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT) {
1134 		val = state.state;
1135 		ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
1136 				       &val);
1137 		if (ret)
1138 			return ret;
1139 
1140 		ndev->frame_boundary_preempt = state.state;
1141 	}
1142 
1143 	return 0;
1144 }
1145 
aie2_set_state(struct amdxdna_client * client,struct amdxdna_drm_set_state * args)1146 static int aie2_set_state(struct amdxdna_client *client,
1147 			  struct amdxdna_drm_set_state *args)
1148 {
1149 	struct amdxdna_dev *xdna = client->xdna;
1150 	int ret, idx;
1151 
1152 	if (!drm_dev_enter(&xdna->ddev, &idx))
1153 		return -ENODEV;
1154 
1155 	ret = amdxdna_pm_resume_get(xdna);
1156 	if (ret)
1157 		goto dev_exit;
1158 
1159 	switch (args->param) {
1160 	case DRM_AMDXDNA_SET_POWER_MODE:
1161 		ret = aie2_set_power_mode(client, args);
1162 		break;
1163 	case DRM_AMDXDNA_SET_FORCE_PREEMPT:
1164 	case DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT:
1165 		ret = aie2_set_preempt_state(client, args);
1166 		break;
1167 	default:
1168 		XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
1169 		ret = -EOPNOTSUPP;
1170 		break;
1171 	}
1172 
1173 	amdxdna_pm_suspend_put(xdna);
1174 dev_exit:
1175 	drm_dev_exit(idx);
1176 	return ret;
1177 }
1178 
1179 const struct amdxdna_dev_ops aie2_ops = {
1180 	.init = aie2_init,
1181 	.fini = aie2_fini,
1182 	.resume = aie2_hw_resume,
1183 	.suspend = aie2_hw_suspend,
1184 	.get_aie_info = aie2_get_info,
1185 	.set_aie_state = aie2_set_state,
1186 	.hwctx_init = aie2_hwctx_init,
1187 	.hwctx_fini = aie2_hwctx_fini,
1188 	.hwctx_config = aie2_hwctx_config,
1189 	.hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo,
1190 	.cmd_submit = aie2_cmd_submit,
1191 	.hmm_invalidate = aie2_hmm_invalidate,
1192 	.get_array = aie2_get_array,
1193 };
1194