1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/irqdomain.h> 27 #include <linux/pm_domain.h> 28 #include <linux/platform_device.h> 29 #include <sound/designware_i2s.h> 30 #include <sound/pcm.h> 31 32 #include "amdgpu.h" 33 #include "atom.h" 34 #include "amdgpu_acp.h" 35 36 #include "acp_gfx_if.h" 37 38 #define ACP_TILE_ON_MASK 0x03 39 #define ACP_TILE_OFF_MASK 0x02 40 #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f 41 #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 42 43 #define ACP_TILE_P1_MASK 0x3e 44 #define ACP_TILE_P2_MASK 0x3d 45 #define ACP_TILE_DSP0_MASK 0x3b 46 #define ACP_TILE_DSP1_MASK 0x37 47 48 #define ACP_TILE_DSP2_MASK 0x2f 49 50 #define ACP_DMA_REGS_END 0x146c0 51 #define ACP_I2S_PLAY_REGS_START 0x14840 52 #define ACP_I2S_PLAY_REGS_END 0x148b4 53 #define ACP_I2S_CAP_REGS_START 0x148b8 54 #define ACP_I2S_CAP_REGS_END 0x1496c 55 56 #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac 57 #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 58 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c 59 #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 60 #define ACP_BT_PLAY_REGS_START 0x14970 61 #define ACP_BT_PLAY_REGS_END 0x14a24 62 #define ACP_BT_COMP1_REG_OFFSET 0xac 63 #define ACP_BT_COMP2_REG_OFFSET 0xa8 64 65 #define mmACP_PGFSM_RETAIN_REG 0x51c9 66 #define mmACP_PGFSM_CONFIG_REG 0x51ca 67 #define mmACP_PGFSM_READ_REG_0 0x51cc 68 69 #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 70 #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 71 #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa 72 #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb 73 74 #define mmACP_CONTROL 0x5131 75 #define mmACP_STATUS 0x5133 76 #define mmACP_SOFT_RESET 0x5134 77 #define ACP_CONTROL__ClkEn_MASK 0x1 78 #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100 79 #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000 80 #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF 81 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF 82 83 #define ACP_TIMEOUT_LOOP 0x000000FF 84 #define ACP_DEVS 4 85 #define ACP_SRC_ID 162 86 87 enum { 88 ACP_TILE_P1 = 0, 89 ACP_TILE_P2, 90 ACP_TILE_DSP0, 91 ACP_TILE_DSP1, 92 ACP_TILE_DSP2, 93 }; 94 95 static int acp_sw_init(void *handle) 96 { 97 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 98 99 adev->acp.parent = adev->dev; 100 101 adev->acp.cgs_device = 102 amdgpu_cgs_create_device(adev); 103 if (!adev->acp.cgs_device) 104 return -EINVAL; 105 106 return 0; 107 } 108 109 static int acp_sw_fini(void *handle) 110 { 111 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 112 113 if (adev->acp.cgs_device) 114 amdgpu_cgs_destroy_device(adev->acp.cgs_device); 115 116 return 0; 117 } 118 119 /* power off a tile/block within ACP */ 120 static int acp_suspend_tile(void *cgs_dev, int tile) 121 { 122 u32 val = 0; 123 u32 count = 0; 124 125 if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { 126 pr_err("Invalid ACP tile : %d to suspend\n", tile); 127 return -1; 128 } 129 130 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); 131 val &= ACP_TILE_ON_MASK; 132 133 if (val == 0x0) { 134 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); 135 val = val | (1 << tile); 136 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); 137 cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, 138 0x500 + tile); 139 140 count = ACP_TIMEOUT_LOOP; 141 while (true) { 142 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 143 + tile); 144 val = val & ACP_TILE_ON_MASK; 145 if (val == ACP_TILE_OFF_MASK) 146 break; 147 if (--count == 0) { 148 pr_err("Timeout reading ACP PGFSM status\n"); 149 return -ETIMEDOUT; 150 } 151 udelay(100); 152 } 153 154 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); 155 156 val |= ACP_TILE_OFF_RETAIN_REG_MASK; 157 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); 158 } 159 return 0; 160 } 161 162 /* power on a tile/block within ACP */ 163 static int acp_resume_tile(void *cgs_dev, int tile) 164 { 165 u32 val = 0; 166 u32 count = 0; 167 168 if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { 169 pr_err("Invalid ACP tile to resume\n"); 170 return -1; 171 } 172 173 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); 174 val = val & ACP_TILE_ON_MASK; 175 176 if (val != 0x0) { 177 cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, 178 0x600 + tile); 179 count = ACP_TIMEOUT_LOOP; 180 while (true) { 181 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 182 + tile); 183 val = val & ACP_TILE_ON_MASK; 184 if (val == 0x0) 185 break; 186 if (--count == 0) { 187 pr_err("Timeout reading ACP PGFSM status\n"); 188 return -ETIMEDOUT; 189 } 190 udelay(100); 191 } 192 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); 193 if (tile == ACP_TILE_P1) 194 val = val & (ACP_TILE_P1_MASK); 195 else if (tile == ACP_TILE_P2) 196 val = val & (ACP_TILE_P2_MASK); 197 198 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); 199 } 200 return 0; 201 } 202 203 struct acp_pm_domain { 204 void *cgs_dev; 205 struct generic_pm_domain gpd; 206 }; 207 208 static int acp_poweroff(struct generic_pm_domain *genpd) 209 { 210 int i, ret; 211 struct acp_pm_domain *apd; 212 213 apd = container_of(genpd, struct acp_pm_domain, gpd); 214 if (apd != NULL) { 215 /* Donot return abruptly if any of power tile fails to suspend. 216 * Log it and continue powering off other tile 217 */ 218 for (i = 4; i >= 0 ; i--) { 219 ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); 220 if (ret) 221 pr_err("ACP tile %d tile suspend failed\n", i); 222 } 223 } 224 return 0; 225 } 226 227 static int acp_poweron(struct generic_pm_domain *genpd) 228 { 229 int i, ret; 230 struct acp_pm_domain *apd; 231 232 apd = container_of(genpd, struct acp_pm_domain, gpd); 233 if (apd != NULL) { 234 for (i = 0; i < 2; i++) { 235 ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i); 236 if (ret) { 237 pr_err("ACP tile %d resume failed\n", i); 238 break; 239 } 240 } 241 242 /* Disable DSPs which are not going to be used */ 243 for (i = 0; i < 3; i++) { 244 ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i); 245 /* Continue suspending other DSP, even if one fails */ 246 if (ret) 247 pr_err("ACP DSP %d suspend failed\n", i); 248 } 249 } 250 return 0; 251 } 252 253 static struct device *get_mfd_cell_dev(const char *device_name, int r) 254 { 255 char auto_dev_name[25]; 256 struct device *dev; 257 258 snprintf(auto_dev_name, sizeof(auto_dev_name), 259 "%s.%d.auto", device_name, r); 260 dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); 261 dev_info(dev, "device %s added to pm domain\n", auto_dev_name); 262 263 return dev; 264 } 265 266 /** 267 * acp_hw_init - start and test ACP block 268 * 269 * @adev: amdgpu_device pointer 270 * 271 */ 272 static int acp_hw_init(void *handle) 273 { 274 int r, i; 275 uint64_t acp_base; 276 u32 val = 0; 277 u32 count = 0; 278 struct device *dev; 279 struct i2s_platform_data *i2s_pdata; 280 281 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 282 283 const struct amdgpu_ip_block *ip_block = 284 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); 285 286 if (!ip_block) 287 return -EINVAL; 288 289 r = amd_acp_hw_init(adev->acp.cgs_device, 290 ip_block->version->major, ip_block->version->minor); 291 /* -ENODEV means board uses AZ rather than ACP */ 292 if (r == -ENODEV) 293 return 0; 294 else if (r) 295 return r; 296 297 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) 298 return -EINVAL; 299 300 acp_base = adev->rmmio_base; 301 302 if (adev->asic_type != CHIP_STONEY) { 303 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); 304 if (adev->acp.acp_genpd == NULL) 305 return -ENOMEM; 306 307 adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; 308 adev->acp.acp_genpd->gpd.power_off = acp_poweroff; 309 adev->acp.acp_genpd->gpd.power_on = acp_poweron; 310 311 312 adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; 313 314 pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); 315 } 316 317 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), 318 GFP_KERNEL); 319 320 if (adev->acp.acp_cell == NULL) 321 return -ENOMEM; 322 323 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); 324 if (adev->acp.acp_res == NULL) { 325 kfree(adev->acp.acp_cell); 326 return -ENOMEM; 327 } 328 329 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); 330 if (i2s_pdata == NULL) { 331 kfree(adev->acp.acp_res); 332 kfree(adev->acp.acp_cell); 333 return -ENOMEM; 334 } 335 336 switch (adev->asic_type) { 337 case CHIP_STONEY: 338 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | 339 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; 340 break; 341 default: 342 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; 343 } 344 i2s_pdata[0].cap = DWC_I2S_PLAY; 345 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; 346 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; 347 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; 348 switch (adev->asic_type) { 349 case CHIP_STONEY: 350 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | 351 DW_I2S_QUIRK_COMP_PARAM1 | 352 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; 353 break; 354 default: 355 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | 356 DW_I2S_QUIRK_COMP_PARAM1; 357 } 358 359 i2s_pdata[1].cap = DWC_I2S_RECORD; 360 i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; 361 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; 362 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; 363 364 i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; 365 switch (adev->asic_type) { 366 case CHIP_STONEY: 367 i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; 368 break; 369 default: 370 break; 371 } 372 373 i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD; 374 i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000; 375 i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET; 376 i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET; 377 378 adev->acp.acp_res[0].name = "acp2x_dma"; 379 adev->acp.acp_res[0].flags = IORESOURCE_MEM; 380 adev->acp.acp_res[0].start = acp_base; 381 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END; 382 383 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play"; 384 adev->acp.acp_res[1].flags = IORESOURCE_MEM; 385 adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START; 386 adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END; 387 388 adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap"; 389 adev->acp.acp_res[2].flags = IORESOURCE_MEM; 390 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; 391 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; 392 393 adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap"; 394 adev->acp.acp_res[3].flags = IORESOURCE_MEM; 395 adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START; 396 adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END; 397 398 adev->acp.acp_res[4].name = "acp2x_dma_irq"; 399 adev->acp.acp_res[4].flags = IORESOURCE_IRQ; 400 adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162); 401 adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; 402 403 adev->acp.acp_cell[0].name = "acp_audio_dma"; 404 adev->acp.acp_cell[0].num_resources = 5; 405 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; 406 adev->acp.acp_cell[0].platform_data = &adev->asic_type; 407 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); 408 409 adev->acp.acp_cell[1].name = "designware-i2s"; 410 adev->acp.acp_cell[1].num_resources = 1; 411 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; 412 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; 413 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); 414 415 adev->acp.acp_cell[2].name = "designware-i2s"; 416 adev->acp.acp_cell[2].num_resources = 1; 417 adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2]; 418 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; 419 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); 420 421 adev->acp.acp_cell[3].name = "designware-i2s"; 422 adev->acp.acp_cell[3].num_resources = 1; 423 adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3]; 424 adev->acp.acp_cell[3].platform_data = &i2s_pdata[2]; 425 adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data); 426 427 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 428 ACP_DEVS); 429 if (r) 430 return r; 431 432 if (adev->asic_type != CHIP_STONEY) { 433 for (i = 0; i < ACP_DEVS ; i++) { 434 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 435 r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); 436 if (r) { 437 dev_err(dev, "Failed to add dev to genpd\n"); 438 return r; 439 } 440 } 441 } 442 443 /* Assert Soft reset of ACP */ 444 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 445 446 val |= ACP_SOFT_RESET__SoftResetAud_MASK; 447 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); 448 449 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; 450 while (true) { 451 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 452 if (ACP_SOFT_RESET__SoftResetAudDone_MASK == 453 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) 454 break; 455 if (--count == 0) { 456 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 457 return -ETIMEDOUT; 458 } 459 udelay(100); 460 } 461 /* Enable clock to ACP and wait until the clock is enabled */ 462 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); 463 val = val | ACP_CONTROL__ClkEn_MASK; 464 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); 465 466 count = ACP_CLOCK_EN_TIME_OUT_VALUE; 467 468 while (true) { 469 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); 470 if (val & (u32) 0x1) 471 break; 472 if (--count == 0) { 473 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 474 return -ETIMEDOUT; 475 } 476 udelay(100); 477 } 478 /* Deassert the SOFT RESET flags */ 479 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 480 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; 481 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); 482 return 0; 483 } 484 485 /** 486 * acp_hw_fini - stop the hardware block 487 * 488 * @adev: amdgpu_device pointer 489 * 490 */ 491 static int acp_hw_fini(void *handle) 492 { 493 int i, ret; 494 u32 val = 0; 495 u32 count = 0; 496 struct device *dev; 497 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 498 499 /* return early if no ACP */ 500 if (!adev->acp.acp_cell) 501 return 0; 502 503 /* Assert Soft reset of ACP */ 504 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 505 506 val |= ACP_SOFT_RESET__SoftResetAud_MASK; 507 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); 508 509 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; 510 while (true) { 511 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 512 if (ACP_SOFT_RESET__SoftResetAudDone_MASK == 513 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) 514 break; 515 if (--count == 0) { 516 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 517 return -ETIMEDOUT; 518 } 519 udelay(100); 520 } 521 /* Disable ACP clock */ 522 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); 523 val &= ~ACP_CONTROL__ClkEn_MASK; 524 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); 525 526 count = ACP_CLOCK_EN_TIME_OUT_VALUE; 527 528 while (true) { 529 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); 530 if (val & (u32) 0x1) 531 break; 532 if (--count == 0) { 533 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 534 return -ETIMEDOUT; 535 } 536 udelay(100); 537 } 538 539 if (adev->acp.acp_genpd) { 540 for (i = 0; i < ACP_DEVS ; i++) { 541 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 542 ret = pm_genpd_remove_device(dev); 543 /* If removal fails, dont giveup and try rest */ 544 if (ret) 545 dev_err(dev, "remove dev from genpd failed\n"); 546 } 547 kfree(adev->acp.acp_genpd); 548 } 549 550 mfd_remove_devices(adev->acp.parent); 551 kfree(adev->acp.acp_res); 552 kfree(adev->acp.acp_cell); 553 554 return 0; 555 } 556 557 static int acp_suspend(void *handle) 558 { 559 return 0; 560 } 561 562 static int acp_resume(void *handle) 563 { 564 return 0; 565 } 566 567 static int acp_early_init(void *handle) 568 { 569 return 0; 570 } 571 572 static bool acp_is_idle(void *handle) 573 { 574 return true; 575 } 576 577 static int acp_wait_for_idle(void *handle) 578 { 579 return 0; 580 } 581 582 static int acp_soft_reset(void *handle) 583 { 584 return 0; 585 } 586 587 static int acp_set_clockgating_state(void *handle, 588 enum amd_clockgating_state state) 589 { 590 return 0; 591 } 592 593 static int acp_set_powergating_state(void *handle, 594 enum amd_powergating_state state) 595 { 596 return 0; 597 } 598 599 static const struct amd_ip_funcs acp_ip_funcs = { 600 .name = "acp_ip", 601 .early_init = acp_early_init, 602 .late_init = NULL, 603 .sw_init = acp_sw_init, 604 .sw_fini = acp_sw_fini, 605 .hw_init = acp_hw_init, 606 .hw_fini = acp_hw_fini, 607 .suspend = acp_suspend, 608 .resume = acp_resume, 609 .is_idle = acp_is_idle, 610 .wait_for_idle = acp_wait_for_idle, 611 .soft_reset = acp_soft_reset, 612 .set_clockgating_state = acp_set_clockgating_state, 613 .set_powergating_state = acp_set_powergating_state, 614 }; 615 616 const struct amdgpu_ip_block_version acp_ip_block = 617 { 618 .type = AMD_IP_BLOCK_TYPE_ACP, 619 .major = 2, 620 .minor = 2, 621 .rev = 0, 622 .funcs = &acp_ip_funcs, 623 }; 624