1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2024 Intel Corporation 4 */ 5 6 #include "ivpu_drv.h" 7 #include "ivpu_hw.h" 8 #include "ivpu_hw_btrs.h" 9 #include "ivpu_hw_btrs_lnl_reg.h" 10 #include "ivpu_hw_btrs_mtl_reg.h" 11 #include "ivpu_hw_reg_io.h" 12 #include "ivpu_pm.h" 13 14 #define BTRS_MTL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \ 15 (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR))) 16 17 #define BTRS_LNL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \ 18 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \ 19 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \ 20 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \ 21 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \ 22 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR))) 23 24 #define BTRS_MTL_ALL_IRQ_MASK (BTRS_MTL_IRQ_MASK | (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, \ 25 FREQ_CHANGE))) 26 27 #define BTRS_IRQ_DISABLE_MASK ((u32)-1) 28 29 #define BTRS_LNL_ALL_IRQ_MASK ((u32)-1) 30 31 #define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3) 32 #define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3) 33 #define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3) 34 #define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3) 35 #define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0) 36 37 #define PLL_CDYN_DEFAULT 0x80 38 #define PLL_EPP_DEFAULT 0x80 39 #define PLL_CONFIG_DEFAULT 0x0 40 #define PLL_SIMULATION_FREQ 10000000 41 #define PLL_REF_CLK_FREQ 50000000 42 #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) 43 #define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC) 44 #define TIMEOUT_US (150 * USEC_PER_MSEC) 45 46 /* Work point configuration values */ 47 #define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio)) 48 #define MTL_CONFIG_1_TILE 0x01 49 #define MTL_CONFIG_2_TILE 0x02 50 #define MTL_PLL_RATIO_5_3 0x01 51 #define MTL_PLL_RATIO_4_3 0x02 52 #define BTRS_MTL_TILE_FUSE_ENABLE_BOTH 0x0 53 #define BTRS_MTL_TILE_SKU_BOTH 0x3630 54 55 #define BTRS_LNL_TILE_MAX_NUM 6 56 #define BTRS_LNL_TILE_MAX_MASK 0x3f 57 58 #define WEIGHTS_DEFAULT 0xf711f711u 59 #define WEIGHTS_ATS_DEFAULT 0x0000f711u 60 61 #define DCT_REQ 0x2 62 #define DCT_ENABLE 0x1 63 #define DCT_DISABLE 0x0 64 65 int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev) 66 { 67 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK); 68 if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) == BTRS_MTL_ALL_IRQ_MASK) { 69 /* Writing 1s does not clear the interrupt status register */ 70 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0); 71 return true; 72 } 73 74 return false; 75 } 76 77 static void freq_ratios_init_mtl(struct ivpu_device *vdev) 78 { 79 struct ivpu_hw_info *hw = vdev->hw; 80 u32 fmin_fuse, fmax_fuse; 81 82 fmin_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE); 83 hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, MIN_RATIO, fmin_fuse); 84 hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, PN_RATIO, fmin_fuse); 85 86 fmax_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE); 87 hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE, MAX_RATIO, fmax_fuse); 88 } 89 90 static void freq_ratios_init_lnl(struct ivpu_device *vdev) 91 { 92 struct ivpu_hw_info *hw = vdev->hw; 93 u32 fmin_fuse, fmax_fuse; 94 95 fmin_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE); 96 hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, MIN_RATIO, fmin_fuse); 97 hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, PN_RATIO, fmin_fuse); 98 99 fmax_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE); 100 hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE, MAX_RATIO, fmax_fuse); 101 } 102 103 void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev) 104 { 105 struct ivpu_hw_info *hw = vdev->hw; 106 107 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 108 freq_ratios_init_mtl(vdev); 109 else 110 freq_ratios_init_lnl(vdev); 111 112 hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 113 hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 114 hw->pll.pn_ratio = clamp_t(u8, hw->pll.pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 115 } 116 117 static bool tile_disable_check(u32 config) 118 { 119 /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */ 120 if (config == 0) 121 return true; 122 123 if (config > BIT(BTRS_LNL_TILE_MAX_NUM - 1)) 124 return false; 125 126 if ((config & (config - 1)) == 0) 127 return true; 128 129 return false; 130 } 131 132 static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config) 133 { 134 u32 fuse; 135 u32 config; 136 137 fuse = REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE); 138 if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, VALID, fuse)) { 139 ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse); 140 return -EIO; 141 } 142 143 config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse); 144 if (!tile_disable_check(config)) { 145 ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", config); 146 return -EIO; 147 } 148 149 if (config) 150 ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n", 151 BTRS_LNL_TILE_MAX_NUM - 1, ffs(config) - 1); 152 else 153 ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", BTRS_LNL_TILE_MAX_NUM); 154 155 *tile_fuse_config = config; 156 return 0; 157 } 158 159 static int info_init_mtl(struct ivpu_device *vdev) 160 { 161 struct ivpu_hw_info *hw = vdev->hw; 162 163 hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH; 164 hw->sku = BTRS_MTL_TILE_SKU_BOTH; 165 hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO; 166 hw->sched_mode = ivpu_sched_mode; 167 168 return 0; 169 } 170 171 static int info_init_lnl(struct ivpu_device *vdev) 172 { 173 struct ivpu_hw_info *hw = vdev->hw; 174 u32 tile_fuse_config; 175 int ret; 176 177 ret = read_tile_config_fuse(vdev, &tile_fuse_config); 178 if (ret) 179 return ret; 180 181 hw->sched_mode = ivpu_sched_mode; 182 hw->tile_fuse = tile_fuse_config; 183 hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; 184 185 return 0; 186 } 187 188 int ivpu_hw_btrs_info_init(struct ivpu_device *vdev) 189 { 190 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 191 return info_init_mtl(vdev); 192 else 193 return info_init_lnl(vdev); 194 } 195 196 static int wp_request_sync(struct ivpu_device *vdev) 197 { 198 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 199 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); 200 else 201 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); 202 } 203 204 static int wait_for_status_ready(struct ivpu_device *vdev, bool enable) 205 { 206 u32 exp_val = enable ? 0x1 : 0x0; 207 208 if (IVPU_WA(punit_disabled)) 209 return 0; 210 211 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 212 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US); 213 else 214 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US); 215 } 216 217 struct wp_request { 218 u16 min; 219 u16 max; 220 u16 target; 221 u16 cfg; 222 u16 epp; 223 u16 cdyn; 224 }; 225 226 static void wp_request_mtl(struct ivpu_device *vdev, struct wp_request *wp) 227 { 228 u32 val; 229 230 val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0); 231 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val); 232 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val); 233 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, val); 234 235 val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1); 236 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val); 237 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, EPP, PLL_EPP_DEFAULT, val); 238 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, val); 239 240 val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2); 241 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val); 242 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, val); 243 244 val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD); 245 val = REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, val); 246 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD, val); 247 } 248 249 static void wp_request_lnl(struct ivpu_device *vdev, struct wp_request *wp) 250 { 251 u32 val; 252 253 val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0); 254 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val); 255 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val); 256 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, val); 257 258 val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1); 259 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val); 260 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, EPP, wp->epp, val); 261 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, val); 262 263 val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2); 264 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val); 265 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CDYN, wp->cdyn, val); 266 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, val); 267 268 val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD); 269 val = REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, val); 270 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD, val); 271 } 272 273 static void wp_request(struct ivpu_device *vdev, struct wp_request *wp) 274 { 275 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 276 wp_request_mtl(vdev, wp); 277 else 278 wp_request_lnl(vdev, wp); 279 } 280 281 static int wp_request_send(struct ivpu_device *vdev, struct wp_request *wp) 282 { 283 int ret; 284 285 ret = wp_request_sync(vdev); 286 if (ret) { 287 ivpu_err(vdev, "Failed to sync before workpoint request: %d\n", ret); 288 return ret; 289 } 290 291 wp_request(vdev, wp); 292 293 ret = wp_request_sync(vdev); 294 if (ret) 295 ivpu_err(vdev, "Failed to sync after workpoint request: %d\n", ret); 296 297 return ret; 298 } 299 300 static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, bool enable) 301 { 302 struct ivpu_hw_info *hw = vdev->hw; 303 304 wp->min = hw->pll.min_ratio; 305 wp->max = hw->pll.max_ratio; 306 307 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { 308 wp->target = enable ? hw->pll.pn_ratio : 0; 309 wp->cfg = enable ? hw->config : 0; 310 wp->cdyn = 0; 311 wp->epp = 0; 312 } else { 313 wp->target = hw->pll.pn_ratio; 314 wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0; 315 wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0; 316 wp->epp = enable ? PLL_EPP_DEFAULT : 0; 317 } 318 319 /* Simics cannot start without at least one tile */ 320 if (enable && ivpu_is_simics(vdev)) 321 wp->cfg = 1; 322 } 323 324 static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable) 325 { 326 u32 exp_val = enable ? 0x1 : 0x0; 327 328 if (ivpu_hw_btrs_gen(vdev) != IVPU_HW_BTRS_MTL) 329 return 0; 330 331 if (IVPU_WA(punit_disabled)) 332 return 0; 333 334 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); 335 } 336 337 int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) 338 { 339 struct wp_request wp; 340 int ret; 341 342 if (IVPU_WA(punit_disabled)) { 343 ivpu_dbg(vdev, PM, "Skipping workpoint request\n"); 344 return 0; 345 } 346 347 prepare_wp_request(vdev, &wp, enable); 348 349 ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n", 350 PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn); 351 352 ret = wp_request_send(vdev, &wp); 353 if (ret) { 354 ivpu_err(vdev, "Failed to send workpoint request: %d\n", ret); 355 return ret; 356 } 357 358 ret = wait_for_pll_lock(vdev, enable); 359 if (ret) { 360 ivpu_err(vdev, "Timed out waiting for PLL lock\n"); 361 return ret; 362 } 363 364 ret = wait_for_status_ready(vdev, enable); 365 if (ret) { 366 ivpu_err(vdev, "Timed out waiting for NPU ready status\n"); 367 return ret; 368 } 369 370 return 0; 371 } 372 373 static int d0i3_drive_mtl(struct ivpu_device *vdev, bool enable) 374 { 375 int ret; 376 u32 val; 377 378 ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 379 if (ret) { 380 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); 381 return ret; 382 } 383 384 val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL); 385 if (enable) 386 val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val); 387 else 388 val = REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val); 389 REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, val); 390 391 ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 392 if (ret) 393 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); 394 395 return ret; 396 } 397 398 static int d0i3_drive_lnl(struct ivpu_device *vdev, bool enable) 399 { 400 int ret; 401 u32 val; 402 403 ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 404 if (ret) { 405 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); 406 return ret; 407 } 408 409 val = REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL); 410 if (enable) 411 val = REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val); 412 else 413 val = REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val); 414 REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL, val); 415 416 ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 417 if (ret) { 418 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); 419 return ret; 420 } 421 422 return 0; 423 } 424 425 static int d0i3_drive(struct ivpu_device *vdev, bool enable) 426 { 427 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 428 return d0i3_drive_mtl(vdev, enable); 429 else 430 return d0i3_drive_lnl(vdev, enable); 431 } 432 433 int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev) 434 { 435 int ret; 436 437 if (IVPU_WA(punit_disabled)) 438 return 0; 439 440 ret = d0i3_drive(vdev, true); 441 if (ret) 442 ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); 443 444 udelay(5); /* VPU requires 5 us to complete the transition */ 445 446 return ret; 447 } 448 449 int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev) 450 { 451 int ret; 452 453 if (IVPU_WA(punit_disabled)) 454 return 0; 455 456 ret = d0i3_drive(vdev, false); 457 if (ret) 458 ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); 459 460 return ret; 461 } 462 463 int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev) 464 { 465 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 466 return 0; 467 468 if (ivpu_is_simics(vdev)) 469 return 0; 470 471 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US); 472 } 473 474 void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev) 475 { 476 REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT); 477 REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT); 478 } 479 480 static int ip_reset_mtl(struct ivpu_device *vdev) 481 { 482 int ret; 483 u32 val; 484 485 ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); 486 if (ret) { 487 ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n"); 488 return ret; 489 } 490 491 val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET); 492 val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, val); 493 REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET, val); 494 495 ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); 496 if (ret) 497 ivpu_err(vdev, "Timed out waiting for RESET completion\n"); 498 499 return ret; 500 } 501 502 static int ip_reset_lnl(struct ivpu_device *vdev) 503 { 504 int ret; 505 u32 val; 506 507 ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev); 508 509 ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US); 510 if (ret) { 511 ivpu_err(vdev, "Wait for *_TRIGGER timed out\n"); 512 return ret; 513 } 514 515 val = REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET); 516 val = REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, val); 517 REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET, val); 518 519 ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US); 520 if (ret) 521 ivpu_err(vdev, "Timed out waiting for RESET completion\n"); 522 523 return ret; 524 } 525 526 int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev) 527 { 528 if (IVPU_WA(punit_disabled)) 529 return 0; 530 531 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 532 return ip_reset_mtl(vdev); 533 else 534 return ip_reset_lnl(vdev); 535 } 536 537 void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev) 538 { 539 u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS); 540 541 if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) 542 val = REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val); 543 else 544 val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val); 545 546 REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val); 547 } 548 549 void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev) 550 { 551 ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n", 552 REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS) ? "Enable" : "Disable"); 553 } 554 555 void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev) 556 { 557 u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS); 558 559 val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, DISABLE_CLK_RELINQUISH, val); 560 REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val); 561 } 562 563 bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev) 564 { 565 u32 val; 566 567 if (IVPU_WA(punit_disabled)) 568 return true; 569 570 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { 571 val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS); 572 573 return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, val) && 574 REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, val); 575 } else { 576 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS); 577 578 return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, val) && 579 REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, val); 580 } 581 } 582 583 int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev) 584 { 585 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 586 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); 587 else 588 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); 589 } 590 591 /* Handler for IRQs from Buttress core (irqB) */ 592 bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq) 593 { 594 u32 status = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK; 595 bool schedule_recovery = false; 596 597 if (!status) 598 return false; 599 600 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) 601 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", 602 REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL)); 603 604 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) { 605 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0)); 606 REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR, 0x1); 607 schedule_recovery = true; 608 } 609 610 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, status)) { 611 u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG); 612 613 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", 614 ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log), 615 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log), 616 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log)); 617 REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR, 0x1); 618 schedule_recovery = true; 619 } 620 621 /* This must be done after interrupts are cleared at the source. */ 622 if (IVPU_WA(interrupt_clear_with_0)) 623 /* 624 * Writing 1 triggers an interrupt, so we can't perform read update write. 625 * Clear local interrupt status by writing 0 to all bits. 626 */ 627 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0); 628 else 629 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, status); 630 631 if (schedule_recovery) 632 ivpu_pm_trigger_recovery(vdev, "Buttress IRQ"); 633 634 return true; 635 } 636 637 /* Handler for IRQs from Buttress core (irqB) */ 638 bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq) 639 { 640 u32 status = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK; 641 bool schedule_recovery = false; 642 643 if (!status) 644 return false; 645 646 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) { 647 ivpu_dbg(vdev, IRQ, "Survivability IRQ\n"); 648 if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT)) 649 ivpu_err_ratelimited(vdev, "IRQ FIFO full\n"); 650 } 651 652 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) 653 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ)); 654 655 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) { 656 ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 657 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1), 658 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2)); 659 REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR, 0x1); 660 schedule_recovery = true; 661 } 662 663 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, status)) { 664 ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG)); 665 REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR, 0x1); 666 schedule_recovery = true; 667 } 668 669 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, status)) { 670 ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG)); 671 REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR, 0x1); 672 schedule_recovery = true; 673 } 674 675 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, status)) { 676 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x", 677 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW), 678 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH)); 679 REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR, 0x1); 680 schedule_recovery = true; 681 } 682 683 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, status)) { 684 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x", 685 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW), 686 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH)); 687 REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR, 0x1); 688 schedule_recovery = true; 689 } 690 691 /* This must be done after interrupts are cleared at the source. */ 692 REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT, status); 693 694 if (schedule_recovery) 695 ivpu_pm_trigger_recovery(vdev, "Buttress IRQ"); 696 697 return true; 698 } 699 700 int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable) 701 { 702 u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW); 703 u32 cmd = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, CMD, val); 704 u32 param1 = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, PARAM1, val); 705 706 if (cmd != DCT_REQ) { 707 ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd); 708 return -EBADR; 709 } 710 711 switch (param1) { 712 case DCT_ENABLE: 713 *enable = true; 714 return 0; 715 case DCT_DISABLE: 716 *enable = false; 717 return 0; 718 default: 719 ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1); 720 return -EINVAL; 721 } 722 } 723 724 void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent) 725 { 726 u32 val = 0; 727 u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE; 728 729 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, CMD, DCT_REQ, val); 730 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM1, cmd, val); 731 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM2, active_percent, val); 732 733 REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val); 734 } 735 736 static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config) 737 { 738 u32 pll_clock = PLL_REF_CLK_FREQ * ratio; 739 u32 cpu_clock; 740 741 if ((config & 0xff) == MTL_PLL_RATIO_4_3) 742 cpu_clock = pll_clock * 2 / 4; 743 else 744 cpu_clock = pll_clock * 2 / 5; 745 746 return cpu_clock; 747 } 748 749 u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) 750 { 751 struct ivpu_hw_info *hw = vdev->hw; 752 753 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 754 return pll_ratio_to_freq_mtl(ratio, hw->config); 755 else 756 return PLL_RATIO_TO_FREQ(ratio); 757 } 758 759 static u32 pll_freq_get_mtl(struct ivpu_device *vdev) 760 { 761 u32 pll_curr_ratio; 762 763 pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL); 764 pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK; 765 766 if (!ivpu_is_silicon(vdev)) 767 return PLL_SIMULATION_FREQ; 768 769 return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config); 770 } 771 772 static u32 pll_freq_get_lnl(struct ivpu_device *vdev) 773 { 774 u32 pll_curr_ratio; 775 776 pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ); 777 pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK; 778 779 return PLL_RATIO_TO_FREQ(pll_curr_ratio); 780 } 781 782 u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev) 783 { 784 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 785 return pll_freq_get_mtl(vdev); 786 else 787 return pll_freq_get_lnl(vdev); 788 } 789 790 u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev) 791 { 792 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 793 return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET); 794 else 795 return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET); 796 } 797 798 u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev) 799 { 800 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 801 return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE); 802 else 803 return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE); 804 } 805 806 u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev) 807 { 808 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 809 return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE); 810 else 811 return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE); 812 } 813 814 void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev) 815 { 816 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 817 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1); 818 else 819 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1); 820 } 821 822 void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev) 823 { 824 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 825 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0); 826 else 827 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0); 828 } 829 830 void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev) 831 { 832 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { 833 REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, (u32)(~BTRS_MTL_IRQ_MASK)); 834 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0); 835 } else { 836 REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, (u32)(~BTRS_LNL_IRQ_MASK)); 837 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0); 838 } 839 } 840 841 void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev) 842 { 843 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { 844 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1); 845 REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK); 846 } else { 847 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1); 848 REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK); 849 } 850 } 851 852 static void diagnose_failure_mtl(struct ivpu_device *vdev) 853 { 854 u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK; 855 856 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, reg)) 857 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0)); 858 859 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, reg)) { 860 u32 log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG); 861 862 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", 863 log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, log), 864 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, log), 865 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, log)); 866 } 867 } 868 869 static void diagnose_failure_lnl(struct ivpu_device *vdev) 870 { 871 u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK; 872 873 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, reg)) { 874 ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 875 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1), 876 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2)); 877 } 878 879 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, reg)) 880 ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG)); 881 882 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, reg)) 883 ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG)); 884 885 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, reg)) 886 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n", 887 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW), 888 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH)); 889 890 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, reg)) 891 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n", 892 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW), 893 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH)); 894 895 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, reg)) 896 ivpu_err(vdev, "Survivability IRQ\n"); 897 } 898 899 void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev) 900 { 901 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 902 return diagnose_failure_mtl(vdev); 903 else 904 return diagnose_failure_lnl(vdev); 905 } 906