1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2023-2024, Advanced Micro Devices, Inc. 4 */ 5 6 #include <drm/amdxdna_accel.h> 7 #include <drm/drm_device.h> 8 #include <drm/drm_drv.h> 9 #include <drm/drm_gem_shmem_helper.h> 10 #include <drm/drm_managed.h> 11 #include <drm/drm_print.h> 12 #include <drm/gpu_scheduler.h> 13 #include <linux/cleanup.h> 14 #include <linux/errno.h> 15 #include <linux/firmware.h> 16 #include <linux/iommu.h> 17 #include <linux/iopoll.h> 18 #include <linux/pci.h> 19 #include <linux/xarray.h> 20 21 #include "aie2_msg_priv.h" 22 #include "aie2_pci.h" 23 #include "aie2_solver.h" 24 #include "amdxdna_ctx.h" 25 #include "amdxdna_gem.h" 26 #include "amdxdna_mailbox.h" 27 #include "amdxdna_pci_drv.h" 28 #include "amdxdna_pm.h" 29 30 static int aie2_max_col = XRS_MAX_COL; 31 module_param(aie2_max_col, uint, 0600); 32 MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used"); 33 34 /* 35 * The management mailbox channel is allocated by firmware. 36 * The related register and ring buffer information is on SRAM BAR. 37 * This struct is the register layout. 38 */ 39 #define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */ 40 struct mgmt_mbox_chann_info { 41 __u32 x2i_tail; 42 __u32 x2i_head; 43 __u32 x2i_buf; 44 __u32 x2i_buf_sz; 45 __u32 i2x_tail; 46 __u32 i2x_head; 47 __u32 i2x_buf; 48 __u32 i2x_buf_sz; 49 __u32 magic; 50 __u32 msi_id; 51 __u32 prot_major; 52 __u32 prot_minor; 53 __u32 rsvd[4]; 54 }; 55 56 static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor) 57 { 58 const struct aie2_fw_feature_tbl *feature; 59 struct amdxdna_dev *xdna = ndev->xdna; 60 61 /* 62 * The driver supported mailbox behavior is defined by 63 * ndev->priv->protocol_major and protocol_minor. 64 * 65 * When protocol_major and fw_major are different, it means driver 66 * and firmware are incompatible. 67 */ 68 if (ndev->priv->protocol_major != fw_major) { 69 XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d", 70 fw_major, fw_minor); 71 return -EINVAL; 72 } 73 74 /* 75 * When protocol_minor is greater then fw_minor, that means driver 76 * relies on operation the installed firmware does not support. 77 */ 78 if (ndev->priv->protocol_minor > fw_minor) { 79 XDNA_ERR(xdna, "Firmware minor version smaller than supported"); 80 return -EINVAL; 81 } 82 83 for (feature = ndev->priv->fw_feature_tbl; feature && feature->min_minor; 84 feature++) { 85 if (fw_minor < feature->min_minor) 86 continue; 87 if (feature->max_minor > 0 && fw_minor > feature->max_minor) 88 continue; 89 90 set_bit(feature->feature, &ndev->feature_mask); 91 } 92 93 return 0; 94 } 95 96 static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev) 97 { 98 struct amdxdna_dev *xdna = ndev->xdna; 99 100 XDNA_DBG(xdna, "i2x tail 0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg); 101 XDNA_DBG(xdna, "i2x head 0x%x", ndev->mgmt_i2x.mb_head_ptr_reg); 102 XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr); 103 XDNA_DBG(xdna, "i2x rsize 0x%x", ndev->mgmt_i2x.rb_size); 104 XDNA_DBG(xdna, "x2i tail 0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg); 105 XDNA_DBG(xdna, "x2i head 0x%x", ndev->mgmt_x2i.mb_head_ptr_reg); 106 XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr); 107 XDNA_DBG(xdna, "x2i rsize 0x%x", ndev->mgmt_x2i.rb_size); 108 XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx); 109 XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major); 110 XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor); 111 } 112 113 static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev) 114 { 115 struct mgmt_mbox_chann_info info_regs; 116 struct xdna_mailbox_chann_res *i2x; 117 struct xdna_mailbox_chann_res *x2i; 118 u32 addr, off; 119 u32 *reg; 120 int ret; 121 int i; 122 123 /* 124 * Once firmware is alive, it will write management channel 125 * information in SRAM BAR and write the address of that information 126 * at FW_ALIVE_OFF offset in SRMA BAR. 127 * 128 * Read a non-zero value from FW_ALIVE_OFF implies that firmware 129 * is alive. 130 */ 131 ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF), 132 addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT); 133 if (ret || !addr) 134 return -ETIME; 135 136 off = AIE2_SRAM_OFF(ndev, addr); 137 reg = (u32 *)&info_regs; 138 for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++) 139 reg[i] = readl(ndev->sram_base + off + i * sizeof(u32)); 140 141 if (info_regs.magic != MGMT_MBOX_MAGIC) { 142 XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic); 143 ret = -EINVAL; 144 goto done; 145 } 146 147 i2x = &ndev->mgmt_i2x; 148 x2i = &ndev->mgmt_x2i; 149 150 i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head); 151 i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail); 152 i2x->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf); 153 i2x->rb_size = info_regs.i2x_buf_sz; 154 155 x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head); 156 x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail); 157 x2i->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf); 158 x2i->rb_size = info_regs.x2i_buf_sz; 159 160 ndev->mgmt_chan_idx = info_regs.msi_id; 161 ndev->mgmt_prot_major = info_regs.prot_major; 162 ndev->mgmt_prot_minor = info_regs.prot_minor; 163 164 ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor); 165 166 done: 167 aie2_dump_chann_info_debug(ndev); 168 169 /* Must clear address at FW_ALIVE_OFF */ 170 writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF)); 171 172 return ret; 173 } 174 175 int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev, 176 enum rt_config_category category, u32 *val) 177 { 178 const struct rt_config *cfg; 179 u32 value; 180 int ret; 181 182 for (cfg = ndev->priv->rt_config; cfg->type; cfg++) { 183 if (cfg->category != category) 184 continue; 185 186 value = val ? *val : cfg->value; 187 ret = aie2_set_runtime_cfg(ndev, cfg->type, value); 188 if (ret) { 189 XDNA_ERR(ndev->xdna, "Set type %d value %d failed", 190 cfg->type, value); 191 return ret; 192 } 193 } 194 195 return 0; 196 } 197 198 static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev) 199 { 200 int ret; 201 202 ret = aie2_suspend_fw(ndev); 203 if (ret) { 204 XDNA_ERR(ndev->xdna, "Suspend firmware failed"); 205 return ret; 206 } 207 208 ret = aie2_resume_fw(ndev); 209 if (ret) { 210 XDNA_ERR(ndev->xdna, "Resume firmware failed"); 211 return ret; 212 } 213 214 return 0; 215 } 216 217 static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev) 218 { 219 int ret; 220 221 ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL); 222 if (ret) { 223 XDNA_ERR(ndev->xdna, "Runtime config failed"); 224 return ret; 225 } 226 227 ret = aie2_assign_mgmt_pasid(ndev, 0); 228 if (ret) { 229 XDNA_ERR(ndev->xdna, "Can not assign PASID"); 230 return ret; 231 } 232 233 ret = aie2_xdna_reset(ndev); 234 if (ret) { 235 XDNA_ERR(ndev->xdna, "Reset firmware failed"); 236 return ret; 237 } 238 239 return 0; 240 } 241 242 static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev) 243 { 244 int ret; 245 246 ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver); 247 if (ret) { 248 XDNA_ERR(ndev->xdna, "query firmware version failed"); 249 return ret; 250 } 251 252 ret = aie2_query_aie_version(ndev, &ndev->version); 253 if (ret) { 254 XDNA_ERR(ndev->xdna, "Query AIE version failed"); 255 return ret; 256 } 257 258 ret = aie2_query_aie_metadata(ndev, &ndev->metadata); 259 if (ret) { 260 XDNA_ERR(ndev->xdna, "Query AIE metadata failed"); 261 return ret; 262 } 263 264 ndev->total_col = min(aie2_max_col, ndev->metadata.cols); 265 266 return 0; 267 } 268 269 static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev) 270 { 271 if (aie2_suspend_fw(ndev)) 272 XDNA_ERR(ndev->xdna, "Suspend_fw failed"); 273 XDNA_DBG(ndev->xdna, "Firmware suspended"); 274 } 275 276 static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action) 277 { 278 struct amdxdna_hwctx *hwctx = cb_arg; 279 struct amdxdna_dev *xdna; 280 int ret; 281 282 xdna = hwctx->client->xdna; 283 284 hwctx->start_col = action->part.start_col; 285 hwctx->num_col = action->part.ncols; 286 ret = aie2_create_context(xdna->dev_handle, hwctx); 287 if (ret) 288 XDNA_ERR(xdna, "create context failed, ret %d", ret); 289 290 return ret; 291 } 292 293 static int aie2_xrs_unload(void *cb_arg) 294 { 295 struct amdxdna_hwctx *hwctx = cb_arg; 296 struct amdxdna_dev *xdna; 297 int ret; 298 299 xdna = hwctx->client->xdna; 300 301 ret = aie2_destroy_context(xdna->dev_handle, hwctx); 302 if (ret) 303 XDNA_ERR(xdna, "destroy context failed, ret %d", ret); 304 305 return ret; 306 } 307 308 static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level) 309 { 310 struct amdxdna_dev *xdna = to_xdna_dev(ddev); 311 struct amdxdna_dev_hdl *ndev; 312 313 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); 314 315 ndev = xdna->dev_handle; 316 ndev->dft_dpm_level = dpm_level; 317 if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level) 318 return 0; 319 320 return ndev->priv->hw_ops.set_dpm(ndev, dpm_level); 321 } 322 323 static struct xrs_action_ops aie2_xrs_actions = { 324 .load = aie2_xrs_load, 325 .unload = aie2_xrs_unload, 326 .set_dft_dpm_level = aie2_xrs_set_dft_dpm_level, 327 }; 328 329 static void aie2_hw_stop(struct amdxdna_dev *xdna) 330 { 331 struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev); 332 struct amdxdna_dev_hdl *ndev = xdna->dev_handle; 333 334 if (ndev->dev_status <= AIE2_DEV_INIT) { 335 XDNA_ERR(xdna, "device is already stopped"); 336 return; 337 } 338 339 aie2_mgmt_fw_fini(ndev); 340 xdna_mailbox_stop_channel(ndev->mgmt_chann); 341 xdna_mailbox_destroy_channel(ndev->mgmt_chann); 342 ndev->mgmt_chann = NULL; 343 drmm_kfree(&xdna->ddev, ndev->mbox); 344 ndev->mbox = NULL; 345 aie2_psp_stop(ndev->psp_hdl); 346 aie2_smu_fini(ndev); 347 aie2_error_async_events_free(ndev); 348 pci_disable_device(pdev); 349 350 ndev->dev_status = AIE2_DEV_INIT; 351 } 352 353 static int aie2_hw_start(struct amdxdna_dev *xdna) 354 { 355 struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev); 356 struct amdxdna_dev_hdl *ndev = xdna->dev_handle; 357 struct xdna_mailbox_res mbox_res; 358 u32 xdna_mailbox_intr_reg; 359 int mgmt_mb_irq, ret; 360 361 if (ndev->dev_status >= AIE2_DEV_START) { 362 XDNA_INFO(xdna, "device is already started"); 363 return 0; 364 } 365 366 ret = pci_enable_device(pdev); 367 if (ret) { 368 XDNA_ERR(xdna, "failed to enable device, ret %d", ret); 369 return ret; 370 } 371 pci_set_master(pdev); 372 373 ret = aie2_smu_init(ndev); 374 if (ret) { 375 XDNA_ERR(xdna, "failed to init smu, ret %d", ret); 376 goto disable_dev; 377 } 378 379 ret = aie2_psp_start(ndev->psp_hdl); 380 if (ret) { 381 XDNA_ERR(xdna, "failed to start psp, ret %d", ret); 382 goto fini_smu; 383 } 384 385 ret = aie2_get_mgmt_chann_info(ndev); 386 if (ret) { 387 XDNA_ERR(xdna, "firmware is not alive"); 388 goto stop_psp; 389 } 390 391 mbox_res.ringbuf_base = ndev->sram_base; 392 mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar); 393 mbox_res.mbox_base = ndev->mbox_base; 394 mbox_res.mbox_size = MBOX_SIZE(ndev); 395 mbox_res.name = "xdna_mailbox"; 396 ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res); 397 if (!ndev->mbox) { 398 XDNA_ERR(xdna, "failed to create mailbox device"); 399 ret = -ENODEV; 400 goto stop_psp; 401 } 402 403 mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx); 404 if (mgmt_mb_irq < 0) { 405 ret = mgmt_mb_irq; 406 XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret); 407 goto stop_psp; 408 } 409 410 xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4; 411 ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox, 412 &ndev->mgmt_x2i, 413 &ndev->mgmt_i2x, 414 xdna_mailbox_intr_reg, 415 mgmt_mb_irq); 416 if (!ndev->mgmt_chann) { 417 XDNA_ERR(xdna, "failed to create management mailbox channel"); 418 ret = -EINVAL; 419 goto stop_psp; 420 } 421 422 ret = aie2_pm_init(ndev); 423 if (ret) { 424 XDNA_ERR(xdna, "failed to init pm, ret %d", ret); 425 goto destroy_mgmt_chann; 426 } 427 428 ret = aie2_mgmt_fw_init(ndev); 429 if (ret) { 430 XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret); 431 goto destroy_mgmt_chann; 432 } 433 434 ret = aie2_mgmt_fw_query(ndev); 435 if (ret) { 436 XDNA_ERR(xdna, "failed to query fw, ret %d", ret); 437 goto destroy_mgmt_chann; 438 } 439 440 ret = aie2_error_async_events_alloc(ndev); 441 if (ret) { 442 XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret); 443 goto destroy_mgmt_chann; 444 } 445 446 ndev->dev_status = AIE2_DEV_START; 447 448 return 0; 449 450 destroy_mgmt_chann: 451 xdna_mailbox_stop_channel(ndev->mgmt_chann); 452 xdna_mailbox_destroy_channel(ndev->mgmt_chann); 453 stop_psp: 454 aie2_psp_stop(ndev->psp_hdl); 455 fini_smu: 456 aie2_smu_fini(ndev); 457 disable_dev: 458 pci_disable_device(pdev); 459 460 return ret; 461 } 462 463 static int aie2_hw_suspend(struct amdxdna_dev *xdna) 464 { 465 struct amdxdna_client *client; 466 467 guard(mutex)(&xdna->dev_lock); 468 list_for_each_entry(client, &xdna->client_list, node) 469 aie2_hwctx_suspend(client); 470 471 aie2_hw_stop(xdna); 472 473 return 0; 474 } 475 476 static int aie2_hw_resume(struct amdxdna_dev *xdna) 477 { 478 struct amdxdna_client *client; 479 int ret; 480 481 ret = aie2_hw_start(xdna); 482 if (ret) { 483 XDNA_ERR(xdna, "Start hardware failed, %d", ret); 484 return ret; 485 } 486 487 list_for_each_entry(client, &xdna->client_list, node) { 488 ret = aie2_hwctx_resume(client); 489 if (ret) 490 break; 491 } 492 493 return ret; 494 } 495 496 static int aie2_init(struct amdxdna_dev *xdna) 497 { 498 struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev); 499 void __iomem *tbl[PCI_NUM_RESOURCES] = {0}; 500 struct init_config xrs_cfg = { 0 }; 501 struct amdxdna_dev_hdl *ndev; 502 struct psp_config psp_conf; 503 const struct firmware *fw; 504 unsigned long bars = 0; 505 int i, nvec, ret; 506 507 ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL); 508 if (!ndev) 509 return -ENOMEM; 510 511 ndev->priv = xdna->dev_info->dev_priv; 512 ndev->xdna = xdna; 513 514 ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev); 515 if (ret) { 516 XDNA_ERR(xdna, "failed to request_firmware %s, ret %d", 517 ndev->priv->fw_path, ret); 518 return ret; 519 } 520 521 ret = pcim_enable_device(pdev); 522 if (ret) { 523 XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret); 524 goto release_fw; 525 } 526 527 for (i = 0; i < PSP_MAX_REGS; i++) 528 set_bit(PSP_REG_BAR(ndev, i), &bars); 529 530 set_bit(xdna->dev_info->sram_bar, &bars); 531 set_bit(xdna->dev_info->smu_bar, &bars); 532 set_bit(xdna->dev_info->mbox_bar, &bars); 533 534 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 535 if (!test_bit(i, &bars)) 536 continue; 537 tbl[i] = pcim_iomap(pdev, i, 0); 538 if (!tbl[i]) { 539 XDNA_ERR(xdna, "map bar %d failed", i); 540 ret = -ENOMEM; 541 goto release_fw; 542 } 543 } 544 545 ndev->sram_base = tbl[xdna->dev_info->sram_bar]; 546 ndev->smu_base = tbl[xdna->dev_info->smu_bar]; 547 ndev->mbox_base = tbl[xdna->dev_info->mbox_bar]; 548 549 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 550 if (ret) { 551 XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret); 552 goto release_fw; 553 } 554 555 nvec = pci_msix_vec_count(pdev); 556 if (nvec <= 0) { 557 XDNA_ERR(xdna, "does not get number of interrupt vector"); 558 ret = -EINVAL; 559 goto release_fw; 560 } 561 562 ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); 563 if (ret < 0) { 564 XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret); 565 goto release_fw; 566 } 567 568 psp_conf.fw_size = fw->size; 569 psp_conf.fw_buf = fw->data; 570 for (i = 0; i < PSP_MAX_REGS; i++) 571 psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i); 572 ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf); 573 if (!ndev->psp_hdl) { 574 XDNA_ERR(xdna, "failed to create psp"); 575 ret = -ENOMEM; 576 goto release_fw; 577 } 578 xdna->dev_handle = ndev; 579 580 ret = aie2_hw_start(xdna); 581 if (ret) { 582 XDNA_ERR(xdna, "start npu failed, ret %d", ret); 583 goto release_fw; 584 } 585 586 xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1; 587 for (i = 0; i < xrs_cfg.clk_list.num_levels; i++) 588 xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk; 589 xrs_cfg.sys_eff_factor = 1; 590 xrs_cfg.ddev = &xdna->ddev; 591 xrs_cfg.actions = &aie2_xrs_actions; 592 xrs_cfg.total_col = ndev->total_col; 593 594 xdna->xrs_hdl = xrsm_init(&xrs_cfg); 595 if (!xdna->xrs_hdl) { 596 XDNA_ERR(xdna, "Initialize resolver failed"); 597 ret = -EINVAL; 598 goto stop_hw; 599 } 600 601 release_firmware(fw); 602 aie2_msg_init(ndev); 603 amdxdna_pm_init(xdna); 604 return 0; 605 606 stop_hw: 607 aie2_hw_stop(xdna); 608 release_fw: 609 release_firmware(fw); 610 611 return ret; 612 } 613 614 static void aie2_fini(struct amdxdna_dev *xdna) 615 { 616 amdxdna_pm_fini(xdna); 617 aie2_hw_stop(xdna); 618 } 619 620 static int aie2_get_aie_status(struct amdxdna_client *client, 621 struct amdxdna_drm_get_info *args) 622 { 623 struct amdxdna_drm_query_aie_status status; 624 struct amdxdna_dev *xdna = client->xdna; 625 struct amdxdna_dev_hdl *ndev; 626 int ret; 627 628 ndev = xdna->dev_handle; 629 if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) { 630 XDNA_ERR(xdna, "Failed to copy AIE request into kernel"); 631 return -EFAULT; 632 } 633 634 if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) { 635 XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.", 636 status.buffer_size, ndev->metadata.cols * ndev->metadata.size); 637 return -EINVAL; 638 } 639 640 ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer), 641 status.buffer_size, &status.cols_filled); 642 if (ret) { 643 XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret); 644 return ret; 645 } 646 647 if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) { 648 XDNA_ERR(xdna, "Failed to copy AIE request info to user space"); 649 return -EFAULT; 650 } 651 652 return 0; 653 } 654 655 static int aie2_get_aie_metadata(struct amdxdna_client *client, 656 struct amdxdna_drm_get_info *args) 657 { 658 struct amdxdna_drm_query_aie_metadata *meta; 659 struct amdxdna_dev *xdna = client->xdna; 660 struct amdxdna_dev_hdl *ndev; 661 int ret = 0; 662 663 ndev = xdna->dev_handle; 664 meta = kzalloc(sizeof(*meta), GFP_KERNEL); 665 if (!meta) 666 return -ENOMEM; 667 668 meta->col_size = ndev->metadata.size; 669 meta->cols = ndev->metadata.cols; 670 meta->rows = ndev->metadata.rows; 671 672 meta->version.major = ndev->metadata.version.major; 673 meta->version.minor = ndev->metadata.version.minor; 674 675 meta->core.row_count = ndev->metadata.core.row_count; 676 meta->core.row_start = ndev->metadata.core.row_start; 677 meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count; 678 meta->core.lock_count = ndev->metadata.core.lock_count; 679 meta->core.event_reg_count = ndev->metadata.core.event_reg_count; 680 681 meta->mem.row_count = ndev->metadata.mem.row_count; 682 meta->mem.row_start = ndev->metadata.mem.row_start; 683 meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count; 684 meta->mem.lock_count = ndev->metadata.mem.lock_count; 685 meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count; 686 687 meta->shim.row_count = ndev->metadata.shim.row_count; 688 meta->shim.row_start = ndev->metadata.shim.row_start; 689 meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count; 690 meta->shim.lock_count = ndev->metadata.shim.lock_count; 691 meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count; 692 693 if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta))) 694 ret = -EFAULT; 695 696 kfree(meta); 697 return ret; 698 } 699 700 static int aie2_get_aie_version(struct amdxdna_client *client, 701 struct amdxdna_drm_get_info *args) 702 { 703 struct amdxdna_drm_query_aie_version version; 704 struct amdxdna_dev *xdna = client->xdna; 705 struct amdxdna_dev_hdl *ndev; 706 707 ndev = xdna->dev_handle; 708 version.major = ndev->version.major; 709 version.minor = ndev->version.minor; 710 711 if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version))) 712 return -EFAULT; 713 714 return 0; 715 } 716 717 static int aie2_get_firmware_version(struct amdxdna_client *client, 718 struct amdxdna_drm_get_info *args) 719 { 720 struct amdxdna_drm_query_firmware_version version; 721 struct amdxdna_dev *xdna = client->xdna; 722 723 version.major = xdna->fw_ver.major; 724 version.minor = xdna->fw_ver.minor; 725 version.patch = xdna->fw_ver.sub; 726 version.build = xdna->fw_ver.build; 727 728 if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version))) 729 return -EFAULT; 730 731 return 0; 732 } 733 734 static int aie2_get_power_mode(struct amdxdna_client *client, 735 struct amdxdna_drm_get_info *args) 736 { 737 struct amdxdna_drm_get_power_mode mode = {}; 738 struct amdxdna_dev *xdna = client->xdna; 739 struct amdxdna_dev_hdl *ndev; 740 741 ndev = xdna->dev_handle; 742 mode.power_mode = ndev->pw_mode; 743 744 if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode))) 745 return -EFAULT; 746 747 return 0; 748 } 749 750 static int aie2_get_clock_metadata(struct amdxdna_client *client, 751 struct amdxdna_drm_get_info *args) 752 { 753 struct amdxdna_drm_query_clock_metadata *clock; 754 struct amdxdna_dev *xdna = client->xdna; 755 struct amdxdna_dev_hdl *ndev; 756 int ret = 0; 757 758 ndev = xdna->dev_handle; 759 clock = kzalloc(sizeof(*clock), GFP_KERNEL); 760 if (!clock) 761 return -ENOMEM; 762 763 snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name), 764 "MP-NPU Clock"); 765 clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq; 766 snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock"); 767 clock->h_clock.freq_mhz = ndev->hclk_freq; 768 769 if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock))) 770 ret = -EFAULT; 771 772 kfree(clock); 773 return ret; 774 } 775 776 static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg) 777 { 778 struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL; 779 struct amdxdna_drm_get_array *array_args = arg; 780 struct amdxdna_drm_hwctx_entry __user *buf; 781 u32 size; 782 783 if (!array_args->num_element) 784 return -EINVAL; 785 786 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 787 if (!tmp) 788 return -ENOMEM; 789 790 tmp->pid = hwctx->client->pid; 791 tmp->context_id = hwctx->id; 792 tmp->start_col = hwctx->start_col; 793 tmp->num_col = hwctx->num_col; 794 tmp->command_submissions = hwctx->priv->seq; 795 tmp->command_completions = hwctx->priv->completed; 796 tmp->pasid = hwctx->client->pasid; 797 tmp->priority = hwctx->qos.priority; 798 tmp->gops = hwctx->qos.gops; 799 tmp->fps = hwctx->qos.fps; 800 tmp->dma_bandwidth = hwctx->qos.dma_bandwidth; 801 tmp->latency = hwctx->qos.latency; 802 tmp->frame_exec_time = hwctx->qos.frame_exec_time; 803 tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE; 804 805 buf = u64_to_user_ptr(array_args->buffer); 806 size = min(sizeof(*tmp), array_args->element_size); 807 808 if (copy_to_user(buf, tmp, size)) 809 return -EFAULT; 810 811 array_args->buffer += size; 812 array_args->num_element--; 813 814 return 0; 815 } 816 817 static int aie2_get_hwctx_status(struct amdxdna_client *client, 818 struct amdxdna_drm_get_info *args) 819 { 820 struct amdxdna_drm_get_array array_args; 821 struct amdxdna_dev *xdna = client->xdna; 822 struct amdxdna_client *tmp_client; 823 int ret; 824 825 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); 826 827 array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx); 828 array_args.buffer = args->buffer; 829 array_args.num_element = args->buffer_size / array_args.element_size; 830 list_for_each_entry(tmp_client, &xdna->client_list, node) { 831 ret = amdxdna_hwctx_walk(tmp_client, &array_args, 832 aie2_hwctx_status_cb); 833 if (ret) 834 break; 835 } 836 837 args->buffer_size -= (u32)(array_args.buffer - args->buffer); 838 return 0; 839 } 840 841 static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args) 842 { 843 struct amdxdna_dev *xdna = client->xdna; 844 int ret, idx; 845 846 if (!drm_dev_enter(&xdna->ddev, &idx)) 847 return -ENODEV; 848 849 ret = amdxdna_pm_resume_get(xdna); 850 if (ret) 851 goto dev_exit; 852 853 switch (args->param) { 854 case DRM_AMDXDNA_QUERY_AIE_STATUS: 855 ret = aie2_get_aie_status(client, args); 856 break; 857 case DRM_AMDXDNA_QUERY_AIE_METADATA: 858 ret = aie2_get_aie_metadata(client, args); 859 break; 860 case DRM_AMDXDNA_QUERY_AIE_VERSION: 861 ret = aie2_get_aie_version(client, args); 862 break; 863 case DRM_AMDXDNA_QUERY_CLOCK_METADATA: 864 ret = aie2_get_clock_metadata(client, args); 865 break; 866 case DRM_AMDXDNA_QUERY_HW_CONTEXTS: 867 ret = aie2_get_hwctx_status(client, args); 868 break; 869 case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION: 870 ret = aie2_get_firmware_version(client, args); 871 break; 872 case DRM_AMDXDNA_GET_POWER_MODE: 873 ret = aie2_get_power_mode(client, args); 874 break; 875 default: 876 XDNA_ERR(xdna, "Not supported request parameter %u", args->param); 877 ret = -EOPNOTSUPP; 878 } 879 880 amdxdna_pm_suspend_put(xdna); 881 XDNA_DBG(xdna, "Got param %d", args->param); 882 883 dev_exit: 884 drm_dev_exit(idx); 885 return ret; 886 } 887 888 static int aie2_query_ctx_status_array(struct amdxdna_client *client, 889 struct amdxdna_drm_get_array *args) 890 { 891 struct amdxdna_drm_get_array array_args; 892 struct amdxdna_dev *xdna = client->xdna; 893 struct amdxdna_client *tmp_client; 894 int ret; 895 896 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); 897 898 if (args->element_size > SZ_4K || args->num_element > SZ_1K) { 899 XDNA_DBG(xdna, "Invalid element size %d or number of element %d", 900 args->element_size, args->num_element); 901 return -EINVAL; 902 } 903 904 array_args.element_size = min(args->element_size, 905 sizeof(struct amdxdna_drm_hwctx_entry)); 906 array_args.buffer = args->buffer; 907 array_args.num_element = args->num_element * args->element_size / 908 array_args.element_size; 909 list_for_each_entry(tmp_client, &xdna->client_list, node) { 910 ret = amdxdna_hwctx_walk(tmp_client, &array_args, 911 aie2_hwctx_status_cb); 912 if (ret) 913 break; 914 } 915 916 args->element_size = array_args.element_size; 917 args->num_element = (u32)((array_args.buffer - args->buffer) / 918 args->element_size); 919 920 return 0; 921 } 922 923 static int aie2_get_array(struct amdxdna_client *client, 924 struct amdxdna_drm_get_array *args) 925 { 926 struct amdxdna_dev *xdna = client->xdna; 927 int ret, idx; 928 929 if (!drm_dev_enter(&xdna->ddev, &idx)) 930 return -ENODEV; 931 932 ret = amdxdna_pm_resume_get(xdna); 933 if (ret) 934 goto dev_exit; 935 936 switch (args->param) { 937 case DRM_AMDXDNA_HW_CONTEXT_ALL: 938 ret = aie2_query_ctx_status_array(client, args); 939 break; 940 case DRM_AMDXDNA_HW_LAST_ASYNC_ERR: 941 ret = aie2_get_array_async_error(xdna->dev_handle, args); 942 break; 943 default: 944 XDNA_ERR(xdna, "Not supported request parameter %u", args->param); 945 ret = -EOPNOTSUPP; 946 } 947 948 amdxdna_pm_suspend_put(xdna); 949 XDNA_DBG(xdna, "Got param %d", args->param); 950 951 dev_exit: 952 drm_dev_exit(idx); 953 return ret; 954 } 955 956 static int aie2_set_power_mode(struct amdxdna_client *client, 957 struct amdxdna_drm_set_state *args) 958 { 959 struct amdxdna_drm_set_power_mode power_state; 960 enum amdxdna_power_mode_type power_mode; 961 struct amdxdna_dev *xdna = client->xdna; 962 963 if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer), 964 sizeof(power_state))) { 965 XDNA_ERR(xdna, "Failed to copy power mode request into kernel"); 966 return -EFAULT; 967 } 968 969 if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad))) 970 return -EINVAL; 971 972 power_mode = power_state.power_mode; 973 if (power_mode > POWER_MODE_TURBO) { 974 XDNA_ERR(xdna, "Invalid power mode %d", power_mode); 975 return -EINVAL; 976 } 977 978 return aie2_pm_set_mode(xdna->dev_handle, power_mode); 979 } 980 981 static int aie2_set_state(struct amdxdna_client *client, 982 struct amdxdna_drm_set_state *args) 983 { 984 struct amdxdna_dev *xdna = client->xdna; 985 int ret, idx; 986 987 if (!drm_dev_enter(&xdna->ddev, &idx)) 988 return -ENODEV; 989 990 ret = amdxdna_pm_resume_get(xdna); 991 if (ret) 992 goto dev_exit; 993 994 switch (args->param) { 995 case DRM_AMDXDNA_SET_POWER_MODE: 996 ret = aie2_set_power_mode(client, args); 997 break; 998 default: 999 XDNA_ERR(xdna, "Not supported request parameter %u", args->param); 1000 ret = -EOPNOTSUPP; 1001 break; 1002 } 1003 1004 amdxdna_pm_suspend_put(xdna); 1005 dev_exit: 1006 drm_dev_exit(idx); 1007 return ret; 1008 } 1009 1010 const struct amdxdna_dev_ops aie2_ops = { 1011 .init = aie2_init, 1012 .fini = aie2_fini, 1013 .resume = aie2_hw_resume, 1014 .suspend = aie2_hw_suspend, 1015 .get_aie_info = aie2_get_info, 1016 .set_aie_state = aie2_set_state, 1017 .hwctx_init = aie2_hwctx_init, 1018 .hwctx_fini = aie2_hwctx_fini, 1019 .hwctx_config = aie2_hwctx_config, 1020 .hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo, 1021 .cmd_submit = aie2_cmd_submit, 1022 .hmm_invalidate = aie2_hmm_invalidate, 1023 .get_array = aie2_get_array, 1024 }; 1025