1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright(c) 2021-2022 Intel Corporation 4 // 5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com> 6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> 7 // 8 // Special thanks to: 9 // Krzysztof Hejmowski <krzysztof.hejmowski@intel.com> 10 // Michal Sienkiewicz <michal.sienkiewicz@intel.com> 11 // Filip Proborszcz 12 // 13 // for sharing Intel AudioDSP expertise and helping shape the very 14 // foundation of this driver 15 // 16 17 #include <linux/acpi.h> 18 #include <linux/module.h> 19 #include <linux/pci.h> 20 #include <acpi/nhlt.h> 21 #include <sound/hda_codec.h> 22 #include <sound/hda_i915.h> 23 #include <sound/hda_register.h> 24 #include <sound/hdaudio.h> 25 #include <sound/hdaudio_ext.h> 26 #include <sound/intel-dsp-config.h> 27 #include "../../codecs/hda.h" 28 #include "avs.h" 29 #include "cldma.h" 30 #include "messages.h" 31 32 static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK; 33 module_param(pgctl_mask, uint, 0444); 34 MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override"); 35 36 static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK; 37 module_param(cgctl_mask, uint, 0444); 38 MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override"); 39 40 static void 41 avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value) 42 { 43 struct pci_dev *pci = to_pci_dev(bus->dev); 44 u32 data; 45 46 pci_read_config_dword(pci, reg, &data); 47 data &= ~mask; 48 data |= (value & mask); 49 pci_write_config_dword(pci, reg, data); 50 } 51 52 void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable) 53 { 54 u32 value = enable ? 0 : pgctl_mask; 55 56 avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value); 57 } 58 59 static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable) 60 { 61 u32 value = enable ? cgctl_mask : 0; 62 63 avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value); 64 } 65 66 void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable) 67 { 68 avs_hdac_clock_gating_enable(&adev->base.core, enable); 69 } 70 71 void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable) 72 { 73 if (enable) { 74 if (atomic_inc_and_test(&adev->l1sen_counter)) 75 snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, 76 AZX_VS_EM2_L1SEN); 77 } else { 78 if (atomic_dec_return(&adev->l1sen_counter) == -1) 79 snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, 0); 80 } 81 } 82 83 static int avs_hdac_bus_init_streams(struct hdac_bus *bus) 84 { 85 unsigned int cp_streams, pb_streams; 86 unsigned int gcap; 87 88 gcap = snd_hdac_chip_readw(bus, GCAP); 89 cp_streams = (gcap >> 8) & 0x0F; 90 pb_streams = (gcap >> 12) & 0x0F; 91 bus->num_streams = cp_streams + pb_streams; 92 93 snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE); 94 snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK); 95 96 return snd_hdac_bus_alloc_stream_pages(bus); 97 } 98 99 static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) 100 { 101 struct hdac_ext_link *hlink; 102 bool ret; 103 104 avs_hdac_clock_gating_enable(bus, false); 105 ret = snd_hdac_bus_init_chip(bus, full_reset); 106 107 /* Reset stream-to-link mapping */ 108 list_for_each_entry(hlink, &bus->hlink_list, list) 109 writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV); 110 111 avs_hdac_clock_gating_enable(bus, true); 112 113 /* Set DUM bit to address incorrect position reporting for capture 114 * streams. In order to do so, CTRL needs to be out of reset state 115 */ 116 snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM); 117 118 return ret; 119 } 120 121 static int probe_codec(struct hdac_bus *bus, int addr) 122 { 123 struct hda_codec *codec; 124 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | 125 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; 126 unsigned int res = -1; 127 int ret; 128 129 mutex_lock(&bus->cmd_mutex); 130 snd_hdac_bus_send_cmd(bus, cmd); 131 snd_hdac_bus_get_response(bus, addr, &res); 132 mutex_unlock(&bus->cmd_mutex); 133 if (res == -1) 134 return -EIO; 135 136 dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res); 137 138 codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr); 139 if (IS_ERR(codec)) { 140 dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec)); 141 return PTR_ERR(codec); 142 } 143 /* 144 * Allow avs_core suspend by forcing suspended state on all 145 * of its codec child devices. Component interested in 146 * dealing with hda codecs directly takes pm responsibilities 147 */ 148 pm_runtime_set_suspended(hda_codec_dev(codec)); 149 150 /* configure effectively creates new ASoC component */ 151 ret = snd_hda_codec_configure(codec); 152 if (ret < 0) { 153 dev_warn(bus->dev, "failed to config codec #%d: %d\n", addr, ret); 154 return ret; 155 } 156 157 return 0; 158 } 159 160 static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus) 161 { 162 int ret, c; 163 164 /* First try to probe all given codec slots */ 165 for (c = 0; c < HDA_MAX_CODECS; c++) { 166 if (!(bus->codec_mask & BIT(c))) 167 continue; 168 169 ret = probe_codec(bus, c); 170 /* Ignore codecs with no supporting driver. */ 171 if (!ret || ret == -ENODEV) 172 continue; 173 174 /* 175 * Some BIOSen give you wrong codec addresses 176 * that don't exist 177 */ 178 dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c); 179 bus->codec_mask &= ~BIT(c); 180 /* 181 * More badly, accessing to a non-existing 182 * codec often screws up the controller bus, 183 * and disturbs the further communications. 184 * Thus if an error occurs during probing, 185 * better to reset the controller bus to get 186 * back to the sanity state. 187 */ 188 snd_hdac_bus_stop_chip(bus); 189 avs_hdac_bus_init_chip(bus, true); 190 } 191 } 192 193 static void avs_hda_probe_work(struct work_struct *work) 194 { 195 struct avs_dev *adev = container_of(work, struct avs_dev, probe_work); 196 struct hdac_bus *bus = &adev->base.core; 197 struct hdac_ext_link *hlink; 198 int ret; 199 200 pm_runtime_set_active(bus->dev); /* clear runtime_error flag */ 201 202 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); 203 avs_hdac_bus_init_chip(bus, true); 204 avs_hdac_bus_probe_codecs(bus); 205 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); 206 207 /* with all codecs probed, links can be powered down */ 208 list_for_each_entry(hlink, &bus->hlink_list, list) 209 snd_hdac_ext_bus_link_put(bus, hlink); 210 211 snd_hdac_ext_bus_ppcap_enable(bus, true); 212 snd_hdac_ext_bus_ppcap_int_enable(bus, true); 213 avs_debugfs_init(adev); 214 215 ret = avs_dsp_first_boot_firmware(adev); 216 if (ret < 0) 217 return; 218 219 acpi_nhlt_get_gbl_table(); 220 221 avs_register_all_boards(adev); 222 223 /* configure PM */ 224 pm_runtime_set_autosuspend_delay(bus->dev, 2000); 225 pm_runtime_use_autosuspend(bus->dev); 226 pm_runtime_mark_last_busy(bus->dev); 227 pm_runtime_put_autosuspend(bus->dev); 228 pm_runtime_allow(bus->dev); 229 } 230 231 static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size) 232 { 233 u64 prev_pos, pos, num_bytes; 234 235 div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos); 236 pos = snd_hdac_stream_get_pos_posbuf(stream); 237 238 if (pos < prev_pos) 239 num_bytes = (buffer_size - prev_pos) + pos; 240 else 241 num_bytes = pos - prev_pos; 242 243 stream->curr_pos += num_bytes; 244 } 245 246 /* called from IRQ */ 247 static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream) 248 { 249 if (stream->substream) { 250 snd_pcm_period_elapsed(stream->substream); 251 } else if (stream->cstream) { 252 u64 buffer_size = stream->cstream->runtime->buffer_size; 253 254 hdac_stream_update_pos(stream, buffer_size); 255 snd_compr_fragment_elapsed(stream->cstream); 256 } 257 } 258 259 static irqreturn_t avs_hda_interrupt(struct hdac_bus *bus) 260 { 261 irqreturn_t ret = IRQ_NONE; 262 u32 status; 263 264 status = snd_hdac_chip_readl(bus, INTSTS); 265 if (snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream)) 266 ret = IRQ_HANDLED; 267 268 spin_lock_irq(&bus->reg_lock); 269 /* Clear RIRB interrupt. */ 270 status = snd_hdac_chip_readb(bus, RIRBSTS); 271 if (status & RIRB_INT_MASK) { 272 if (status & RIRB_INT_RESPONSE) 273 snd_hdac_bus_update_rirb(bus); 274 snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK); 275 ret = IRQ_HANDLED; 276 } 277 278 spin_unlock_irq(&bus->reg_lock); 279 return ret; 280 } 281 282 static irqreturn_t avs_hda_irq_handler(int irq, void *dev_id) 283 { 284 struct hdac_bus *bus = dev_id; 285 u32 intsts; 286 287 intsts = snd_hdac_chip_readl(bus, INTSTS); 288 if (intsts == UINT_MAX || !(intsts & AZX_INT_GLOBAL_EN)) 289 return IRQ_NONE; 290 291 /* Mask GIE, unmasked in irq_thread(). */ 292 snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, 0); 293 294 return IRQ_WAKE_THREAD; 295 } 296 297 static irqreturn_t avs_hda_irq_thread(int irq, void *dev_id) 298 { 299 struct hdac_bus *bus = dev_id; 300 u32 status; 301 302 status = snd_hdac_chip_readl(bus, INTSTS); 303 if (status & ~AZX_INT_GLOBAL_EN) 304 avs_hda_interrupt(bus); 305 306 /* Unmask GIE, masked in irq_handler(). */ 307 snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN); 308 309 return IRQ_HANDLED; 310 } 311 312 static irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id) 313 { 314 struct avs_dev *adev = dev_id; 315 316 return avs_hda_irq_handler(irq, &adev->base.core); 317 } 318 319 static irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id) 320 { 321 struct avs_dev *adev = dev_id; 322 struct hdac_bus *bus = &adev->base.core; 323 u32 status; 324 325 status = readl(bus->ppcap + AZX_REG_PP_PPSTS); 326 if (status & AZX_PPCTL_PIE) 327 avs_dsp_op(adev, dsp_interrupt); 328 329 /* Unmask GIE, masked in irq_handler(). */ 330 snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN); 331 332 return IRQ_HANDLED; 333 } 334 335 static int avs_hdac_acquire_irq(struct avs_dev *adev) 336 { 337 struct hdac_bus *bus = &adev->base.core; 338 struct pci_dev *pci = to_pci_dev(bus->dev); 339 int ret; 340 341 /* request one and check that we only got one interrupt */ 342 ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_INTX); 343 if (ret != 1) { 344 dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret); 345 return ret; 346 } 347 348 ret = pci_request_irq(pci, 0, avs_hda_irq_handler, avs_hda_irq_thread, bus, 349 KBUILD_MODNAME); 350 if (ret < 0) { 351 dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret); 352 goto free_vector; 353 } 354 355 ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev, 356 KBUILD_MODNAME); 357 if (ret < 0) { 358 dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret); 359 goto free_stream_irq; 360 } 361 362 return 0; 363 364 free_stream_irq: 365 pci_free_irq(pci, 0, bus); 366 free_vector: 367 pci_free_irq_vectors(pci); 368 return ret; 369 } 370 371 static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id) 372 { 373 struct hda_bus *bus = &adev->base; 374 struct avs_ipc *ipc; 375 struct device *dev = &pci->dev; 376 int ret; 377 378 ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops); 379 if (ret < 0) 380 return ret; 381 382 bus->core.use_posbuf = 1; 383 bus->core.bdl_pos_adj = 0; 384 bus->core.sync_write = 1; 385 bus->pci = pci; 386 bus->mixer_assigned = -1; 387 mutex_init(&bus->prepare_mutex); 388 389 ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL); 390 if (!ipc) 391 return -ENOMEM; 392 ret = avs_ipc_init(ipc, dev); 393 if (ret < 0) 394 return ret; 395 396 adev->modcfg_buf = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL); 397 if (!adev->modcfg_buf) 398 return -ENOMEM; 399 400 adev->dev = dev; 401 adev->spec = (const struct avs_spec *)id->driver_data; 402 adev->ipc = ipc; 403 adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK); 404 INIT_WORK(&adev->probe_work, avs_hda_probe_work); 405 INIT_LIST_HEAD(&adev->comp_list); 406 INIT_LIST_HEAD(&adev->path_list); 407 INIT_LIST_HEAD(&adev->fw_list); 408 init_completion(&adev->fw_ready); 409 spin_lock_init(&adev->path_list_lock); 410 mutex_init(&adev->modres_mutex); 411 mutex_init(&adev->comp_list_mutex); 412 mutex_init(&adev->path_mutex); 413 414 return 0; 415 } 416 417 static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) 418 { 419 struct hdac_bus *bus; 420 struct avs_dev *adev; 421 struct device *dev = &pci->dev; 422 int ret; 423 424 ret = snd_intel_dsp_driver_probe(pci); 425 if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_AVS) 426 return -ENODEV; 427 428 ret = pcim_enable_device(pci); 429 if (ret < 0) 430 return ret; 431 432 adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); 433 if (!adev) 434 return -ENOMEM; 435 ret = avs_bus_init(adev, pci, id); 436 if (ret < 0) { 437 dev_err(dev, "failed to init avs bus: %d\n", ret); 438 return ret; 439 } 440 441 ret = pci_request_regions(pci, "AVS HDAudio"); 442 if (ret < 0) 443 return ret; 444 445 bus = &adev->base.core; 446 bus->addr = pci_resource_start(pci, 0); 447 bus->remap_addr = pci_ioremap_bar(pci, 0); 448 if (!bus->remap_addr) { 449 dev_err(bus->dev, "ioremap error\n"); 450 ret = -ENXIO; 451 goto err_remap_bar0; 452 } 453 454 adev->dsp_ba = pci_ioremap_bar(pci, 4); 455 if (!adev->dsp_ba) { 456 dev_err(bus->dev, "ioremap error\n"); 457 ret = -ENXIO; 458 goto err_remap_bar4; 459 } 460 461 snd_hdac_bus_parse_capabilities(bus); 462 if (bus->mlcap) 463 snd_hdac_ext_bus_get_ml_capabilities(bus); 464 465 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 466 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 467 dma_set_max_seg_size(dev, UINT_MAX); 468 469 ret = avs_hdac_bus_init_streams(bus); 470 if (ret < 0) { 471 dev_err(dev, "failed to init streams: %d\n", ret); 472 goto err_init_streams; 473 } 474 475 ret = avs_hdac_acquire_irq(adev); 476 if (ret < 0) { 477 dev_err(bus->dev, "failed to acquire irq: %d\n", ret); 478 goto err_acquire_irq; 479 } 480 481 pci_set_master(pci); 482 pci_set_drvdata(pci, bus); 483 device_disable_async_suspend(dev); 484 485 ret = snd_hdac_i915_init(bus); 486 if (ret == -EPROBE_DEFER) 487 goto err_i915_init; 488 else if (ret < 0) 489 dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret); 490 491 schedule_work(&adev->probe_work); 492 493 return 0; 494 495 err_i915_init: 496 pci_free_irq(pci, 0, adev); 497 pci_free_irq(pci, 0, bus); 498 pci_free_irq_vectors(pci); 499 pci_clear_master(pci); 500 pci_set_drvdata(pci, NULL); 501 err_acquire_irq: 502 snd_hdac_bus_free_stream_pages(bus); 503 snd_hdac_ext_stream_free_all(bus); 504 err_init_streams: 505 iounmap(adev->dsp_ba); 506 err_remap_bar4: 507 iounmap(bus->remap_addr); 508 err_remap_bar0: 509 pci_release_regions(pci); 510 return ret; 511 } 512 513 static void avs_pci_shutdown(struct pci_dev *pci) 514 { 515 struct hdac_bus *bus = pci_get_drvdata(pci); 516 struct avs_dev *adev = hdac_to_avs(bus); 517 518 cancel_work_sync(&adev->probe_work); 519 avs_ipc_block(adev->ipc); 520 521 snd_hdac_stop_streams(bus); 522 avs_dsp_op(adev, int_control, false); 523 snd_hdac_ext_bus_ppcap_int_enable(bus, false); 524 snd_hdac_ext_bus_link_power_down_all(bus); 525 526 snd_hdac_bus_stop_chip(bus); 527 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); 528 529 pci_free_irq(pci, 0, adev); 530 pci_free_irq(pci, 0, bus); 531 pci_free_irq_vectors(pci); 532 } 533 534 static void avs_pci_remove(struct pci_dev *pci) 535 { 536 struct hdac_device *hdev, *save; 537 struct hdac_bus *bus = pci_get_drvdata(pci); 538 struct avs_dev *adev = hdac_to_avs(bus); 539 540 cancel_work_sync(&adev->probe_work); 541 avs_ipc_block(adev->ipc); 542 543 avs_unregister_all_boards(adev); 544 545 acpi_nhlt_put_gbl_table(); 546 avs_debugfs_exit(adev); 547 548 if (avs_platattr_test(adev, CLDMA)) 549 hda_cldma_free(&code_loader); 550 551 snd_hdac_stop_streams_and_chip(bus); 552 avs_dsp_op(adev, int_control, false); 553 snd_hdac_ext_bus_ppcap_int_enable(bus, false); 554 555 /* it is safe to remove all codecs from the system now */ 556 list_for_each_entry_safe(hdev, save, &bus->codec_list, list) 557 snd_hda_codec_unregister(hdac_to_hda_codec(hdev)); 558 559 snd_hdac_bus_free_stream_pages(bus); 560 snd_hdac_ext_stream_free_all(bus); 561 /* reverse ml_capabilities */ 562 snd_hdac_ext_link_free_all(bus); 563 snd_hdac_ext_bus_exit(bus); 564 565 avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0)); 566 snd_hdac_ext_bus_ppcap_enable(bus, false); 567 568 /* snd_hdac_stop_streams_and_chip does that already? */ 569 snd_hdac_bus_stop_chip(bus); 570 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); 571 if (bus->audio_component) 572 snd_hdac_i915_exit(bus); 573 574 avs_module_info_free(adev); 575 pci_free_irq(pci, 0, adev); 576 pci_free_irq(pci, 0, bus); 577 pci_free_irq_vectors(pci); 578 iounmap(bus->remap_addr); 579 iounmap(adev->dsp_ba); 580 pci_release_regions(pci); 581 582 /* Firmware is not needed anymore */ 583 avs_release_firmwares(adev); 584 585 /* pm_runtime_forbid() can rpm_resume() which we do not want */ 586 pm_runtime_disable(&pci->dev); 587 pm_runtime_forbid(&pci->dev); 588 pm_runtime_enable(&pci->dev); 589 pm_runtime_get_noresume(&pci->dev); 590 } 591 592 static int avs_suspend_standby(struct avs_dev *adev) 593 { 594 struct hdac_bus *bus = &adev->base.core; 595 struct pci_dev *pci = adev->base.pci; 596 597 if (bus->cmd_dma_state) 598 snd_hdac_bus_stop_cmd_io(bus); 599 600 snd_hdac_ext_bus_link_power_down_all(bus); 601 602 enable_irq_wake(pci->irq); 603 pci_save_state(pci); 604 605 return 0; 606 } 607 608 static int __maybe_unused avs_suspend_common(struct avs_dev *adev, bool low_power) 609 { 610 struct hdac_bus *bus = &adev->base.core; 611 int ret; 612 613 flush_work(&adev->probe_work); 614 if (low_power && adev->num_lp_paths) 615 return avs_suspend_standby(adev); 616 617 snd_hdac_ext_bus_link_power_down_all(bus); 618 619 ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false); 620 /* 621 * pm_runtime is blocked on DSP failure but system-wide suspend is not. 622 * Do not block entire system from suspending if that's the case. 623 */ 624 if (ret && ret != -EPERM) { 625 dev_err(adev->dev, "set dx failed: %d\n", ret); 626 return AVS_IPC_RET(ret); 627 } 628 629 avs_ipc_block(adev->ipc); 630 avs_dsp_op(adev, int_control, false); 631 snd_hdac_ext_bus_ppcap_int_enable(bus, false); 632 633 ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); 634 if (ret < 0) { 635 dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret); 636 return ret; 637 } 638 639 snd_hdac_ext_bus_ppcap_enable(bus, false); 640 /* disable LP SRAM retention */ 641 avs_hda_power_gating_enable(adev, false); 642 snd_hdac_bus_stop_chip(bus); 643 /* disable CG when putting controller to reset */ 644 avs_hdac_clock_gating_enable(bus, false); 645 snd_hdac_bus_enter_link_reset(bus); 646 avs_hdac_clock_gating_enable(bus, true); 647 648 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); 649 650 return 0; 651 } 652 653 static int avs_resume_standby(struct avs_dev *adev) 654 { 655 struct hdac_bus *bus = &adev->base.core; 656 struct pci_dev *pci = adev->base.pci; 657 658 pci_restore_state(pci); 659 disable_irq_wake(pci->irq); 660 661 snd_hdac_ext_bus_link_power_up_all(bus); 662 663 if (bus->cmd_dma_state) 664 snd_hdac_bus_init_cmd_io(bus); 665 666 return 0; 667 } 668 669 static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool low_power, bool purge) 670 { 671 struct hdac_bus *bus = &adev->base.core; 672 int ret; 673 674 if (low_power && adev->num_lp_paths) 675 return avs_resume_standby(adev); 676 677 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); 678 avs_hdac_bus_init_chip(bus, true); 679 680 snd_hdac_ext_bus_ppcap_enable(bus, true); 681 snd_hdac_ext_bus_ppcap_int_enable(bus, true); 682 683 ret = avs_dsp_boot_firmware(adev, purge); 684 if (ret < 0) { 685 dev_err(adev->dev, "firmware boot failed: %d\n", ret); 686 return ret; 687 } 688 689 return 0; 690 } 691 692 static int __maybe_unused avs_suspend(struct device *dev) 693 { 694 return avs_suspend_common(to_avs_dev(dev), true); 695 } 696 697 static int __maybe_unused avs_resume(struct device *dev) 698 { 699 return avs_resume_common(to_avs_dev(dev), true, true); 700 } 701 702 static int __maybe_unused avs_runtime_suspend(struct device *dev) 703 { 704 return avs_suspend_common(to_avs_dev(dev), true); 705 } 706 707 static int __maybe_unused avs_runtime_resume(struct device *dev) 708 { 709 return avs_resume_common(to_avs_dev(dev), true, false); 710 } 711 712 static int __maybe_unused avs_freeze(struct device *dev) 713 { 714 return avs_suspend_common(to_avs_dev(dev), false); 715 } 716 static int __maybe_unused avs_thaw(struct device *dev) 717 { 718 return avs_resume_common(to_avs_dev(dev), false, true); 719 } 720 721 static int __maybe_unused avs_poweroff(struct device *dev) 722 { 723 return avs_suspend_common(to_avs_dev(dev), false); 724 } 725 726 static int __maybe_unused avs_restore(struct device *dev) 727 { 728 return avs_resume_common(to_avs_dev(dev), false, true); 729 } 730 731 static const struct dev_pm_ops avs_dev_pm = { 732 .suspend = avs_suspend, 733 .resume = avs_resume, 734 .freeze = avs_freeze, 735 .thaw = avs_thaw, 736 .poweroff = avs_poweroff, 737 .restore = avs_restore, 738 SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL) 739 }; 740 741 static const struct avs_sram_spec skl_sram_spec = { 742 .base_offset = SKL_ADSP_SRAM_BASE_OFFSET, 743 .window_size = SKL_ADSP_SRAM_WINDOW_SIZE, 744 .rom_status_offset = SKL_ADSP_SRAM_BASE_OFFSET, 745 }; 746 747 static const struct avs_sram_spec apl_sram_spec = { 748 .base_offset = APL_ADSP_SRAM_BASE_OFFSET, 749 .window_size = APL_ADSP_SRAM_WINDOW_SIZE, 750 .rom_status_offset = APL_ADSP_SRAM_BASE_OFFSET, 751 }; 752 753 static const struct avs_hipc_spec skl_hipc_spec = { 754 .req_offset = SKL_ADSP_REG_HIPCI, 755 .req_ext_offset = SKL_ADSP_REG_HIPCIE, 756 .req_busy_mask = SKL_ADSP_HIPCI_BUSY, 757 .ack_offset = SKL_ADSP_REG_HIPCIE, 758 .ack_done_mask = SKL_ADSP_HIPCIE_DONE, 759 .rsp_offset = SKL_ADSP_REG_HIPCT, 760 .rsp_busy_mask = SKL_ADSP_HIPCT_BUSY, 761 .ctl_offset = SKL_ADSP_REG_HIPCCTL, 762 }; 763 764 static const struct avs_hipc_spec cnl_hipc_spec = { 765 .req_offset = CNL_ADSP_REG_HIPCIDR, 766 .req_ext_offset = CNL_ADSP_REG_HIPCIDD, 767 .req_busy_mask = CNL_ADSP_HIPCIDR_BUSY, 768 .ack_offset = CNL_ADSP_REG_HIPCIDA, 769 .ack_done_mask = CNL_ADSP_HIPCIDA_DONE, 770 .rsp_offset = CNL_ADSP_REG_HIPCTDR, 771 .rsp_busy_mask = CNL_ADSP_HIPCTDR_BUSY, 772 .ctl_offset = CNL_ADSP_REG_HIPCCTL, 773 }; 774 775 static const struct avs_spec skl_desc = { 776 .name = "skl", 777 .min_fw_version = { 9, 21, 0, 4732 }, 778 .dsp_ops = &avs_skl_dsp_ops, 779 .core_init_mask = 1, 780 .attributes = AVS_PLATATTR_CLDMA, 781 .sram = &skl_sram_spec, 782 .hipc = &skl_hipc_spec, 783 }; 784 785 static const struct avs_spec apl_desc = { 786 .name = "apl", 787 .min_fw_version = { 9, 22, 1, 4323 }, 788 .dsp_ops = &avs_apl_dsp_ops, 789 .core_init_mask = 3, 790 .attributes = AVS_PLATATTR_IMR, 791 .sram = &apl_sram_spec, 792 .hipc = &skl_hipc_spec, 793 }; 794 795 static const struct avs_spec cnl_desc = { 796 .name = "cnl", 797 .min_fw_version = { 10, 23, 0, 5314 }, 798 .dsp_ops = &avs_cnl_dsp_ops, 799 .core_init_mask = 1, 800 .attributes = AVS_PLATATTR_IMR, 801 .sram = &apl_sram_spec, 802 .hipc = &cnl_hipc_spec, 803 }; 804 805 static const struct avs_spec icl_desc = { 806 .name = "icl", 807 .min_fw_version = { 10, 23, 0, 5040 }, 808 .dsp_ops = &avs_icl_dsp_ops, 809 .core_init_mask = 1, 810 .attributes = AVS_PLATATTR_IMR, 811 .sram = &apl_sram_spec, 812 .hipc = &cnl_hipc_spec, 813 }; 814 815 static const struct avs_spec jsl_desc = { 816 .name = "jsl", 817 .min_fw_version = { 10, 26, 0, 5872 }, 818 .dsp_ops = &avs_icl_dsp_ops, 819 .core_init_mask = 1, 820 .attributes = AVS_PLATATTR_IMR, 821 .sram = &apl_sram_spec, 822 .hipc = &cnl_hipc_spec, 823 }; 824 825 #define AVS_TGL_BASED_SPEC(sname) \ 826 static const struct avs_spec sname##_desc = { \ 827 .name = #sname, \ 828 .min_fw_version = { 10, 29, 0, 5646 }, \ 829 .dsp_ops = &avs_tgl_dsp_ops, \ 830 .core_init_mask = 1, \ 831 .attributes = AVS_PLATATTR_IMR, \ 832 .sram = &apl_sram_spec, \ 833 .hipc = &cnl_hipc_spec, \ 834 } 835 836 AVS_TGL_BASED_SPEC(lkf); 837 AVS_TGL_BASED_SPEC(tgl); 838 AVS_TGL_BASED_SPEC(ehl); 839 AVS_TGL_BASED_SPEC(adl); 840 AVS_TGL_BASED_SPEC(adl_n); 841 842 static const struct pci_device_id avs_ids[] = { 843 { PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) }, 844 { PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) }, 845 { PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) }, 846 { PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) }, 847 { PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) }, 848 { PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) }, 849 { PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) }, 850 { PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) }, 851 { PCI_DEVICE_DATA(INTEL, HDA_CNL_LP, &cnl_desc) }, 852 { PCI_DEVICE_DATA(INTEL, HDA_CNL_H, &cnl_desc) }, 853 { PCI_DEVICE_DATA(INTEL, HDA_CML_LP, &cnl_desc) }, 854 { PCI_DEVICE_DATA(INTEL, HDA_CML_H, &cnl_desc) }, 855 { PCI_DEVICE_DATA(INTEL, HDA_RKL_S, &cnl_desc) }, 856 { PCI_DEVICE_DATA(INTEL, HDA_ICL_LP, &icl_desc) }, 857 { PCI_DEVICE_DATA(INTEL, HDA_ICL_N, &icl_desc) }, 858 { PCI_DEVICE_DATA(INTEL, HDA_ICL_H, &icl_desc) }, 859 { PCI_DEVICE_DATA(INTEL, HDA_JSL_N, &jsl_desc) }, 860 { PCI_DEVICE_DATA(INTEL, HDA_LKF, &lkf_desc) }, 861 { PCI_DEVICE_DATA(INTEL, HDA_TGL_LP, &tgl_desc) }, 862 { PCI_DEVICE_DATA(INTEL, HDA_TGL_H, &tgl_desc) }, 863 { PCI_DEVICE_DATA(INTEL, HDA_CML_R, &tgl_desc) }, 864 { PCI_DEVICE_DATA(INTEL, HDA_EHL_0, &ehl_desc) }, 865 { PCI_DEVICE_DATA(INTEL, HDA_EHL_3, &ehl_desc) }, 866 { PCI_DEVICE_DATA(INTEL, HDA_ADL_S, &adl_desc) }, 867 { PCI_DEVICE_DATA(INTEL, HDA_ADL_P, &adl_desc) }, 868 { PCI_DEVICE_DATA(INTEL, HDA_ADL_PS, &adl_desc) }, 869 { PCI_DEVICE_DATA(INTEL, HDA_ADL_M, &adl_desc) }, 870 { PCI_DEVICE_DATA(INTEL, HDA_ADL_PX, &adl_desc) }, 871 { PCI_DEVICE_DATA(INTEL, HDA_ADL_N, &adl_n_desc) }, 872 { PCI_DEVICE_DATA(INTEL, HDA_RPL_S, &adl_desc) }, 873 { PCI_DEVICE_DATA(INTEL, HDA_RPL_P_0, &adl_desc) }, 874 { PCI_DEVICE_DATA(INTEL, HDA_RPL_P_1, &adl_desc) }, 875 { PCI_DEVICE_DATA(INTEL, HDA_RPL_M, &adl_desc) }, 876 { PCI_DEVICE_DATA(INTEL, HDA_RPL_PX, &adl_desc) }, 877 { 0 } 878 }; 879 MODULE_DEVICE_TABLE(pci, avs_ids); 880 881 static struct pci_driver avs_pci_driver = { 882 .name = KBUILD_MODNAME, 883 .id_table = avs_ids, 884 .probe = avs_pci_probe, 885 .remove = avs_pci_remove, 886 .shutdown = avs_pci_shutdown, 887 .dev_groups = avs_attr_groups, 888 .driver = { 889 .pm = &avs_dev_pm, 890 }, 891 }; 892 module_pci_driver(avs_pci_driver); 893 894 MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>"); 895 MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>"); 896 MODULE_DESCRIPTION("Intel cAVS sound driver"); 897 MODULE_LICENSE("GPL"); 898