1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 // Special thanks to:
9 // Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
10 // Michal Sienkiewicz <michal.sienkiewicz@intel.com>
11 // Filip Proborszcz
12 //
13 // for sharing Intel AudioDSP expertise and helping shape the very
14 // foundation of this driver
15 //
16
17 #include <linux/acpi.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <acpi/nhlt.h>
21 #include <sound/hda_codec.h>
22 #include <sound/hda_i915.h>
23 #include <sound/hda_register.h>
24 #include <sound/hdaudio.h>
25 #include <sound/hdaudio_ext.h>
26 #include <sound/intel-dsp-config.h>
27 #include "../../codecs/hda.h"
28 #include "avs.h"
29 #include "cldma.h"
30 #include "messages.h"
31 #include "pcm.h"
32
33 static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
34 module_param(pgctl_mask, uint, 0444);
35 MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override");
36
37 static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK;
38 module_param(cgctl_mask, uint, 0444);
39 MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override");
40
41 static void
avs_hda_update_config_dword(struct hdac_bus * bus,u32 reg,u32 mask,u32 value)42 avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
43 {
44 struct pci_dev *pci = to_pci_dev(bus->dev);
45 u32 data;
46
47 pci_read_config_dword(pci, reg, &data);
48 data &= ~mask;
49 data |= (value & mask);
50 pci_write_config_dword(pci, reg, data);
51 }
52
avs_hda_power_gating_enable(struct avs_dev * adev,bool enable)53 void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
54 {
55 u32 value = enable ? 0 : pgctl_mask;
56
57 if (!avs_platattr_test(adev, ACE))
58 avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value);
59 }
60
avs_hdac_clock_gating_enable(struct hdac_bus * bus,bool enable)61 static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
62 {
63 struct avs_dev *adev = hdac_to_avs(bus);
64 u32 value = enable ? cgctl_mask : 0;
65
66 if (!avs_platattr_test(adev, ACE))
67 avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value);
68 }
69
avs_hda_clock_gating_enable(struct avs_dev * adev,bool enable)70 void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
71 {
72 avs_hdac_clock_gating_enable(&adev->base.core, enable);
73 }
74
avs_hda_l1sen_enable(struct avs_dev * adev,bool enable)75 void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
76 {
77 if (avs_platattr_test(adev, ACE))
78 return;
79 if (enable) {
80 if (atomic_inc_and_test(&adev->l1sen_counter))
81 snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN,
82 AZX_VS_EM2_L1SEN);
83 } else {
84 if (atomic_dec_return(&adev->l1sen_counter) == -1)
85 snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, 0);
86 }
87 }
88
avs_hdac_bus_init_streams(struct hdac_bus * bus)89 static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
90 {
91 unsigned int cp_streams, pb_streams;
92 unsigned int gcap;
93
94 gcap = snd_hdac_chip_readw(bus, GCAP);
95 cp_streams = (gcap >> 8) & 0x0F;
96 pb_streams = (gcap >> 12) & 0x0F;
97 bus->num_streams = cp_streams + pb_streams;
98
99 snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
100 snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
101
102 return snd_hdac_bus_alloc_stream_pages(bus);
103 }
104
avs_hdac_bus_init_chip(struct hdac_bus * bus,bool full_reset)105 static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
106 {
107 struct avs_dev *adev = hdac_to_avs(bus);
108 struct hdac_ext_link *hlink;
109 bool ret;
110
111 avs_hdac_clock_gating_enable(bus, false);
112 ret = snd_hdac_bus_init_chip(bus, full_reset);
113
114 /* Reset stream-to-link mapping */
115 list_for_each_entry(hlink, &bus->hlink_list, list)
116 writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
117
118 avs_hdac_clock_gating_enable(bus, true);
119
120 /* Set DUM bit to address incorrect position reporting for capture
121 * streams. In order to do so, CTRL needs to be out of reset state
122 */
123 if (!avs_platattr_test(adev, ACE))
124 snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
125
126 return ret;
127 }
128
probe_codec(struct hdac_bus * bus,int addr)129 static int probe_codec(struct hdac_bus *bus, int addr)
130 {
131 struct hda_codec *codec;
132 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
133 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
134 unsigned int res = -1;
135 int ret;
136
137 mutex_lock(&bus->cmd_mutex);
138 snd_hdac_bus_send_cmd(bus, cmd);
139 snd_hdac_bus_get_response(bus, addr, &res);
140 mutex_unlock(&bus->cmd_mutex);
141 if (res == -1)
142 return -EIO;
143
144 dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
145
146 codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
147 if (IS_ERR(codec)) {
148 dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
149 return PTR_ERR(codec);
150 }
151 /*
152 * Allow avs_core suspend by forcing suspended state on all
153 * of its codec child devices. Component interested in
154 * dealing with hda codecs directly takes pm responsibilities
155 */
156 pm_runtime_set_suspended(hda_codec_dev(codec));
157
158 /* configure effectively creates new ASoC component */
159 ret = snd_hda_codec_configure(codec);
160 if (ret < 0) {
161 dev_warn(bus->dev, "failed to config codec #%d: %d\n", addr, ret);
162 return ret;
163 }
164
165 return 0;
166 }
167
avs_hdac_bus_probe_codecs(struct hdac_bus * bus)168 static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
169 {
170 int ret, c;
171
172 /* First try to probe all given codec slots */
173 for (c = 0; c < HDA_MAX_CODECS; c++) {
174 if (!(bus->codec_mask & BIT(c)))
175 continue;
176
177 ret = probe_codec(bus, c);
178 /* Ignore codecs with no supporting driver. */
179 if (!ret || ret == -ENODEV)
180 continue;
181
182 /*
183 * Some BIOSen give you wrong codec addresses
184 * that don't exist
185 */
186 dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
187 bus->codec_mask &= ~BIT(c);
188 /*
189 * More badly, accessing to a non-existing
190 * codec often screws up the controller bus,
191 * and disturbs the further communications.
192 * Thus if an error occurs during probing,
193 * better to reset the controller bus to get
194 * back to the sanity state.
195 */
196 snd_hdac_bus_stop_chip(bus);
197 avs_hdac_bus_init_chip(bus, true);
198 }
199 }
200
avs_hda_probe_work(struct work_struct * work)201 static void avs_hda_probe_work(struct work_struct *work)
202 {
203 struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
204 struct hdac_bus *bus = &adev->base.core;
205 struct hdac_ext_link *hlink;
206 int ret;
207
208 pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
209
210 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
211 avs_hdac_bus_init_chip(bus, true);
212 avs_hdac_bus_probe_codecs(bus);
213 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
214
215 /* with all codecs probed, links can be powered down */
216 list_for_each_entry(hlink, &bus->hlink_list, list)
217 snd_hdac_ext_bus_link_put(bus, hlink);
218
219 snd_hdac_ext_bus_ppcap_enable(bus, true);
220 snd_hdac_ext_bus_ppcap_int_enable(bus, true);
221 avs_debugfs_init(adev);
222
223 ret = avs_dsp_first_boot_firmware(adev);
224 if (ret < 0)
225 return;
226
227 acpi_nhlt_get_gbl_table();
228
229 avs_register_all_boards(adev);
230
231 /* configure PM */
232 pm_runtime_set_autosuspend_delay(bus->dev, 2000);
233 pm_runtime_use_autosuspend(bus->dev);
234 pm_runtime_mark_last_busy(bus->dev);
235 pm_runtime_put_autosuspend(bus->dev);
236 pm_runtime_allow(bus->dev);
237 }
238
hdac_stream_update_pos(struct hdac_stream * stream,u64 buffer_size)239 static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
240 {
241 u64 prev_pos, pos, num_bytes;
242
243 div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
244 pos = snd_hdac_stream_get_pos_posbuf(stream);
245
246 if (pos < prev_pos)
247 num_bytes = (buffer_size - prev_pos) + pos;
248 else
249 num_bytes = pos - prev_pos;
250
251 stream->curr_pos += num_bytes;
252 }
253
254 /* called from IRQ */
hdac_update_stream(struct hdac_bus * bus,struct hdac_stream * stream)255 static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
256 {
257 if (stream->substream) {
258 avs_period_elapsed(stream->substream);
259 } else if (stream->cstream) {
260 u64 buffer_size = stream->cstream->runtime->buffer_size;
261
262 hdac_stream_update_pos(stream, buffer_size);
263 snd_compr_fragment_elapsed(stream->cstream);
264 }
265 }
266
avs_hda_interrupt(struct hdac_bus * bus)267 static irqreturn_t avs_hda_interrupt(struct hdac_bus *bus)
268 {
269 irqreturn_t ret = IRQ_NONE;
270 u32 status;
271
272 status = snd_hdac_chip_readl(bus, INTSTS);
273 if (snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream))
274 ret = IRQ_HANDLED;
275
276 spin_lock_irq(&bus->reg_lock);
277 /* Clear RIRB interrupt. */
278 status = snd_hdac_chip_readb(bus, RIRBSTS);
279 if (status & RIRB_INT_MASK) {
280 if (status & RIRB_INT_RESPONSE)
281 snd_hdac_bus_update_rirb(bus);
282 snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
283 ret = IRQ_HANDLED;
284 }
285
286 spin_unlock_irq(&bus->reg_lock);
287 return ret;
288 }
289
avs_hda_irq_handler(int irq,void * dev_id)290 static irqreturn_t avs_hda_irq_handler(int irq, void *dev_id)
291 {
292 struct hdac_bus *bus = dev_id;
293 u32 intsts;
294
295 intsts = snd_hdac_chip_readl(bus, INTSTS);
296 if (intsts == UINT_MAX || !(intsts & AZX_INT_GLOBAL_EN))
297 return IRQ_NONE;
298
299 /* Mask GIE, unmasked in irq_thread(). */
300 snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, 0);
301
302 return IRQ_WAKE_THREAD;
303 }
304
avs_hda_irq_thread(int irq,void * dev_id)305 static irqreturn_t avs_hda_irq_thread(int irq, void *dev_id)
306 {
307 struct hdac_bus *bus = dev_id;
308 u32 status;
309
310 status = snd_hdac_chip_readl(bus, INTSTS);
311 if (status & ~AZX_INT_GLOBAL_EN)
312 avs_hda_interrupt(bus);
313
314 /* Unmask GIE, masked in irq_handler(). */
315 snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
316
317 return IRQ_HANDLED;
318 }
319
avs_dsp_irq_handler(int irq,void * dev_id)320 static irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
321 {
322 struct avs_dev *adev = dev_id;
323
324 return avs_hda_irq_handler(irq, &adev->base.core);
325 }
326
avs_dsp_irq_thread(int irq,void * dev_id)327 static irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
328 {
329 struct avs_dev *adev = dev_id;
330 struct hdac_bus *bus = &adev->base.core;
331 u32 status;
332
333 status = readl(bus->ppcap + AZX_REG_PP_PPSTS);
334 if (status & AZX_PPCTL_PIE)
335 avs_dsp_op(adev, dsp_interrupt);
336
337 /* Unmask GIE, masked in irq_handler(). */
338 snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
339
340 return IRQ_HANDLED;
341 }
342
avs_hdac_acquire_irq(struct avs_dev * adev)343 static int avs_hdac_acquire_irq(struct avs_dev *adev)
344 {
345 struct hdac_bus *bus = &adev->base.core;
346 struct pci_dev *pci = to_pci_dev(bus->dev);
347 int ret;
348
349 /* request one and check that we only got one interrupt */
350 ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_INTX);
351 if (ret != 1) {
352 dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
353 return ret;
354 }
355
356 ret = pci_request_irq(pci, 0, avs_hda_irq_handler, avs_hda_irq_thread, bus,
357 KBUILD_MODNAME);
358 if (ret < 0) {
359 dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
360 goto free_vector;
361 }
362
363 ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
364 KBUILD_MODNAME);
365 if (ret < 0) {
366 dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
367 goto free_stream_irq;
368 }
369
370 return 0;
371
372 free_stream_irq:
373 pci_free_irq(pci, 0, bus);
374 free_vector:
375 pci_free_irq_vectors(pci);
376 return ret;
377 }
378
avs_bus_init(struct avs_dev * adev,struct pci_dev * pci,const struct pci_device_id * id)379 static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
380 {
381 struct hda_bus *bus = &adev->base;
382 struct avs_ipc *ipc;
383 struct device *dev = &pci->dev;
384 int ret;
385
386 ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
387 if (ret < 0)
388 return ret;
389
390 bus->core.use_posbuf = 1;
391 bus->core.bdl_pos_adj = 0;
392 bus->core.sync_write = 1;
393 bus->pci = pci;
394 bus->mixer_assigned = -1;
395 mutex_init(&bus->prepare_mutex);
396
397 ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
398 if (!ipc)
399 return -ENOMEM;
400 ret = avs_ipc_init(ipc, dev);
401 if (ret < 0)
402 return ret;
403
404 adev->modcfg_buf = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
405 if (!adev->modcfg_buf)
406 return -ENOMEM;
407
408 adev->dev = dev;
409 adev->spec = (const struct avs_spec *)id->driver_data;
410 adev->ipc = ipc;
411 adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
412 INIT_WORK(&adev->probe_work, avs_hda_probe_work);
413 INIT_LIST_HEAD(&adev->comp_list);
414 INIT_LIST_HEAD(&adev->path_list);
415 INIT_LIST_HEAD(&adev->fw_list);
416 init_completion(&adev->fw_ready);
417 spin_lock_init(&adev->path_list_lock);
418 mutex_init(&adev->modres_mutex);
419 mutex_init(&adev->comp_list_mutex);
420 mutex_init(&adev->path_mutex);
421
422 return 0;
423 }
424
avs_pci_probe(struct pci_dev * pci,const struct pci_device_id * id)425 static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
426 {
427 struct hdac_bus *bus;
428 struct avs_dev *adev;
429 struct device *dev = &pci->dev;
430 int ret;
431
432 ret = snd_intel_dsp_driver_probe(pci);
433 switch (ret) {
434 case SND_INTEL_DSP_DRIVER_ANY:
435 case SND_INTEL_DSP_DRIVER_SST:
436 case SND_INTEL_DSP_DRIVER_AVS:
437 break;
438 default:
439 return -ENODEV;
440 }
441
442 ret = pcim_enable_device(pci);
443 if (ret < 0)
444 return ret;
445
446 adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
447 if (!adev)
448 return -ENOMEM;
449 ret = avs_bus_init(adev, pci, id);
450 if (ret < 0) {
451 dev_err(dev, "failed to init avs bus: %d\n", ret);
452 return ret;
453 }
454
455 ret = pcim_request_all_regions(pci, "AVS HDAudio");
456 if (ret < 0)
457 return ret;
458
459 bus = &adev->base.core;
460 bus->addr = pci_resource_start(pci, 0);
461 bus->remap_addr = pci_ioremap_bar(pci, 0);
462 if (!bus->remap_addr) {
463 dev_err(bus->dev, "ioremap error\n");
464 return -ENXIO;
465 }
466
467 adev->dsp_ba = pci_ioremap_bar(pci, 4);
468 if (!adev->dsp_ba) {
469 dev_err(bus->dev, "ioremap error\n");
470 ret = -ENXIO;
471 goto err_remap_bar4;
472 }
473
474 snd_hdac_bus_parse_capabilities(bus);
475 if (bus->mlcap)
476 snd_hdac_ext_bus_get_ml_capabilities(bus);
477
478 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
479 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
480 dma_set_max_seg_size(dev, UINT_MAX);
481
482 ret = avs_hdac_bus_init_streams(bus);
483 if (ret < 0) {
484 dev_err(dev, "failed to init streams: %d\n", ret);
485 goto err_init_streams;
486 }
487
488 ret = avs_hdac_acquire_irq(adev);
489 if (ret < 0) {
490 dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
491 goto err_acquire_irq;
492 }
493
494 pci_set_master(pci);
495 pci_set_drvdata(pci, bus);
496 device_disable_async_suspend(dev);
497
498 ret = snd_hdac_i915_init(bus);
499 if (ret == -EPROBE_DEFER)
500 goto err_i915_init;
501 else if (ret < 0)
502 dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
503
504 schedule_work(&adev->probe_work);
505
506 return 0;
507
508 err_i915_init:
509 pci_free_irq(pci, 0, adev);
510 pci_free_irq(pci, 0, bus);
511 pci_free_irq_vectors(pci);
512 pci_clear_master(pci);
513 pci_set_drvdata(pci, NULL);
514 err_acquire_irq:
515 snd_hdac_bus_free_stream_pages(bus);
516 snd_hdac_ext_stream_free_all(bus);
517 err_init_streams:
518 iounmap(adev->dsp_ba);
519 err_remap_bar4:
520 iounmap(bus->remap_addr);
521 return ret;
522 }
523
avs_pci_shutdown(struct pci_dev * pci)524 static void avs_pci_shutdown(struct pci_dev *pci)
525 {
526 struct hdac_bus *bus = pci_get_drvdata(pci);
527 struct avs_dev *adev = hdac_to_avs(bus);
528
529 cancel_work_sync(&adev->probe_work);
530 avs_ipc_block(adev->ipc);
531
532 snd_hdac_stop_streams(bus);
533 avs_dsp_op(adev, int_control, false);
534 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
535 snd_hdac_ext_bus_link_power_down_all(bus);
536
537 snd_hdac_bus_stop_chip(bus);
538 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
539
540 pci_free_irq(pci, 0, adev);
541 pci_free_irq(pci, 0, bus);
542 pci_free_irq_vectors(pci);
543 }
544
avs_pci_remove(struct pci_dev * pci)545 static void avs_pci_remove(struct pci_dev *pci)
546 {
547 struct hdac_device *hdev, *save;
548 struct hdac_bus *bus = pci_get_drvdata(pci);
549 struct avs_dev *adev = hdac_to_avs(bus);
550
551 cancel_work_sync(&adev->probe_work);
552 avs_ipc_block(adev->ipc);
553
554 avs_unregister_all_boards(adev);
555
556 acpi_nhlt_put_gbl_table();
557 avs_debugfs_exit(adev);
558
559 if (avs_platattr_test(adev, CLDMA))
560 hda_cldma_free(&code_loader);
561
562 snd_hdac_stop_streams_and_chip(bus);
563 avs_dsp_op(adev, int_control, false);
564 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
565
566 /* it is safe to remove all codecs from the system now */
567 list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
568 snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
569
570 snd_hdac_bus_free_stream_pages(bus);
571 snd_hdac_ext_stream_free_all(bus);
572 /* reverse ml_capabilities */
573 snd_hdac_ext_link_free_all(bus);
574 snd_hdac_ext_bus_exit(bus);
575
576 avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
577 snd_hdac_ext_bus_ppcap_enable(bus, false);
578
579 /* snd_hdac_stop_streams_and_chip does that already? */
580 snd_hdac_bus_stop_chip(bus);
581 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
582 if (bus->audio_component)
583 snd_hdac_i915_exit(bus);
584
585 avs_module_info_free(adev);
586 pci_free_irq(pci, 0, adev);
587 pci_free_irq(pci, 0, bus);
588 pci_free_irq_vectors(pci);
589 iounmap(bus->remap_addr);
590 iounmap(adev->dsp_ba);
591
592 /* Firmware is not needed anymore */
593 avs_release_firmwares(adev);
594
595 /* pm_runtime_forbid() can rpm_resume() which we do not want */
596 pm_runtime_disable(&pci->dev);
597 pm_runtime_forbid(&pci->dev);
598 pm_runtime_enable(&pci->dev);
599 pm_runtime_get_noresume(&pci->dev);
600 }
601
avs_suspend_standby(struct avs_dev * adev)602 static int avs_suspend_standby(struct avs_dev *adev)
603 {
604 struct hdac_bus *bus = &adev->base.core;
605 struct pci_dev *pci = adev->base.pci;
606
607 if (bus->cmd_dma_state)
608 snd_hdac_bus_stop_cmd_io(bus);
609
610 snd_hdac_ext_bus_link_power_down_all(bus);
611
612 enable_irq_wake(pci->irq);
613 pci_save_state(pci);
614
615 return 0;
616 }
617
avs_suspend_common(struct avs_dev * adev,bool low_power)618 static int avs_suspend_common(struct avs_dev *adev, bool low_power)
619 {
620 struct hdac_bus *bus = &adev->base.core;
621 int ret;
622
623 flush_work(&adev->probe_work);
624 if (low_power && adev->num_lp_paths)
625 return avs_suspend_standby(adev);
626
627 snd_hdac_ext_bus_link_power_down_all(bus);
628
629 ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
630 /*
631 * pm_runtime is blocked on DSP failure but system-wide suspend is not.
632 * Do not block entire system from suspending if that's the case.
633 */
634 if (ret && ret != -EPERM) {
635 dev_err(adev->dev, "set dx failed: %d\n", ret);
636 return AVS_IPC_RET(ret);
637 }
638
639 avs_ipc_block(adev->ipc);
640 avs_dsp_op(adev, int_control, false);
641 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
642
643 ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
644 if (ret < 0) {
645 dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
646 return ret;
647 }
648
649 snd_hdac_ext_bus_ppcap_enable(bus, false);
650 /* disable LP SRAM retention */
651 avs_hda_power_gating_enable(adev, false);
652 snd_hdac_bus_stop_chip(bus);
653 /* disable CG when putting controller to reset */
654 avs_hdac_clock_gating_enable(bus, false);
655 snd_hdac_bus_enter_link_reset(bus);
656 avs_hdac_clock_gating_enable(bus, true);
657
658 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
659
660 return 0;
661 }
662
avs_resume_standby(struct avs_dev * adev)663 static int avs_resume_standby(struct avs_dev *adev)
664 {
665 struct hdac_bus *bus = &adev->base.core;
666 struct pci_dev *pci = adev->base.pci;
667
668 pci_restore_state(pci);
669 disable_irq_wake(pci->irq);
670
671 snd_hdac_ext_bus_link_power_up_all(bus);
672
673 if (bus->cmd_dma_state)
674 snd_hdac_bus_init_cmd_io(bus);
675
676 return 0;
677 }
678
avs_resume_common(struct avs_dev * adev,bool low_power,bool purge)679 static int avs_resume_common(struct avs_dev *adev, bool low_power, bool purge)
680 {
681 struct hdac_bus *bus = &adev->base.core;
682 int ret;
683
684 if (low_power && adev->num_lp_paths)
685 return avs_resume_standby(adev);
686
687 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
688 avs_hdac_bus_init_chip(bus, true);
689
690 snd_hdac_ext_bus_ppcap_enable(bus, true);
691 snd_hdac_ext_bus_ppcap_int_enable(bus, true);
692
693 ret = avs_dsp_boot_firmware(adev, purge);
694 if (ret < 0) {
695 dev_err(adev->dev, "firmware boot failed: %d\n", ret);
696 return ret;
697 }
698
699 return 0;
700 }
701
avs_suspend(struct device * dev)702 static int avs_suspend(struct device *dev)
703 {
704 return avs_suspend_common(to_avs_dev(dev), true);
705 }
706
avs_resume(struct device * dev)707 static int avs_resume(struct device *dev)
708 {
709 return avs_resume_common(to_avs_dev(dev), true, true);
710 }
711
avs_runtime_suspend(struct device * dev)712 static int avs_runtime_suspend(struct device *dev)
713 {
714 return avs_suspend_common(to_avs_dev(dev), true);
715 }
716
avs_runtime_resume(struct device * dev)717 static int avs_runtime_resume(struct device *dev)
718 {
719 return avs_resume_common(to_avs_dev(dev), true, false);
720 }
721
avs_freeze(struct device * dev)722 static int avs_freeze(struct device *dev)
723 {
724 return avs_suspend_common(to_avs_dev(dev), false);
725 }
avs_thaw(struct device * dev)726 static int avs_thaw(struct device *dev)
727 {
728 return avs_resume_common(to_avs_dev(dev), false, true);
729 }
730
avs_poweroff(struct device * dev)731 static int avs_poweroff(struct device *dev)
732 {
733 return avs_suspend_common(to_avs_dev(dev), false);
734 }
735
avs_restore(struct device * dev)736 static int avs_restore(struct device *dev)
737 {
738 return avs_resume_common(to_avs_dev(dev), false, true);
739 }
740
741 static const struct dev_pm_ops avs_dev_pm = {
742 .suspend = avs_suspend,
743 .resume = avs_resume,
744 .freeze = avs_freeze,
745 .thaw = avs_thaw,
746 .poweroff = avs_poweroff,
747 .restore = avs_restore,
748 RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
749 };
750
751 static const struct avs_sram_spec skl_sram_spec = {
752 .base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
753 .window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
754 };
755
756 static const struct avs_sram_spec apl_sram_spec = {
757 .base_offset = APL_ADSP_SRAM_BASE_OFFSET,
758 .window_size = APL_ADSP_SRAM_WINDOW_SIZE,
759 };
760
761 static const struct avs_sram_spec mtl_sram_spec = {
762 .base_offset = MTL_ADSP_SRAM_BASE_OFFSET,
763 .window_size = MTL_ADSP_SRAM_WINDOW_SIZE,
764 };
765
766 static const struct avs_hipc_spec skl_hipc_spec = {
767 .req_offset = SKL_ADSP_REG_HIPCI,
768 .req_ext_offset = SKL_ADSP_REG_HIPCIE,
769 .req_busy_mask = SKL_ADSP_HIPCI_BUSY,
770 .ack_offset = SKL_ADSP_REG_HIPCIE,
771 .ack_done_mask = SKL_ADSP_HIPCIE_DONE,
772 .rsp_offset = SKL_ADSP_REG_HIPCT,
773 .rsp_busy_mask = SKL_ADSP_HIPCT_BUSY,
774 .ctl_offset = SKL_ADSP_REG_HIPCCTL,
775 .sts_offset = SKL_ADSP_SRAM_BASE_OFFSET,
776 };
777
778 static const struct avs_hipc_spec apl_hipc_spec = {
779 .req_offset = SKL_ADSP_REG_HIPCI,
780 .req_ext_offset = SKL_ADSP_REG_HIPCIE,
781 .req_busy_mask = SKL_ADSP_HIPCI_BUSY,
782 .ack_offset = SKL_ADSP_REG_HIPCIE,
783 .ack_done_mask = SKL_ADSP_HIPCIE_DONE,
784 .rsp_offset = SKL_ADSP_REG_HIPCT,
785 .rsp_busy_mask = SKL_ADSP_HIPCT_BUSY,
786 .ctl_offset = SKL_ADSP_REG_HIPCCTL,
787 .sts_offset = APL_ADSP_SRAM_BASE_OFFSET,
788 };
789
790 static const struct avs_hipc_spec cnl_hipc_spec = {
791 .req_offset = CNL_ADSP_REG_HIPCIDR,
792 .req_ext_offset = CNL_ADSP_REG_HIPCIDD,
793 .req_busy_mask = CNL_ADSP_HIPCIDR_BUSY,
794 .ack_offset = CNL_ADSP_REG_HIPCIDA,
795 .ack_done_mask = CNL_ADSP_HIPCIDA_DONE,
796 .rsp_offset = CNL_ADSP_REG_HIPCTDR,
797 .rsp_busy_mask = CNL_ADSP_HIPCTDR_BUSY,
798 .ctl_offset = CNL_ADSP_REG_HIPCCTL,
799 .sts_offset = APL_ADSP_SRAM_BASE_OFFSET,
800 };
801
802 static const struct avs_hipc_spec lnl_hipc_spec = {
803 .req_offset = MTL_REG_HfIPCxIDR,
804 .req_ext_offset = MTL_REG_HfIPCxIDD,
805 .req_busy_mask = MTL_HfIPCxIDR_BUSY,
806 .ack_offset = MTL_REG_HfIPCxIDA,
807 .ack_done_mask = MTL_HfIPCxIDA_DONE,
808 .rsp_offset = MTL_REG_HfIPCxTDR,
809 .rsp_busy_mask = MTL_HfIPCxTDR_BUSY,
810 .ctl_offset = MTL_REG_HfIPCxCTL,
811 .sts_offset = LNL_REG_HfDFR(0),
812 };
813
814 static const struct avs_spec skl_desc = {
815 .name = "skl",
816 .min_fw_version = { 9, 21, 0, 4732 },
817 .dsp_ops = &avs_skl_dsp_ops,
818 .core_init_mask = 1,
819 .attributes = AVS_PLATATTR_CLDMA,
820 .sram = &skl_sram_spec,
821 .hipc = &skl_hipc_spec,
822 };
823
824 static const struct avs_spec apl_desc = {
825 .name = "apl",
826 .min_fw_version = { 9, 22, 1, 4323 },
827 .dsp_ops = &avs_apl_dsp_ops,
828 .core_init_mask = 3,
829 .attributes = AVS_PLATATTR_IMR,
830 .sram = &apl_sram_spec,
831 .hipc = &apl_hipc_spec,
832 };
833
834 static const struct avs_spec cnl_desc = {
835 .name = "cnl",
836 .min_fw_version = { 10, 23, 0, 5314 },
837 .dsp_ops = &avs_cnl_dsp_ops,
838 .core_init_mask = 1,
839 .attributes = AVS_PLATATTR_IMR,
840 .sram = &apl_sram_spec,
841 .hipc = &cnl_hipc_spec,
842 };
843
844 static const struct avs_spec icl_desc = {
845 .name = "icl",
846 .min_fw_version = { 10, 23, 0, 5040 },
847 .dsp_ops = &avs_icl_dsp_ops,
848 .core_init_mask = 1,
849 .attributes = AVS_PLATATTR_IMR,
850 .sram = &apl_sram_spec,
851 .hipc = &cnl_hipc_spec,
852 };
853
854 static const struct avs_spec jsl_desc = {
855 .name = "jsl",
856 .min_fw_version = { 10, 26, 0, 5872 },
857 .dsp_ops = &avs_icl_dsp_ops,
858 .core_init_mask = 1,
859 .attributes = AVS_PLATATTR_IMR,
860 .sram = &apl_sram_spec,
861 .hipc = &cnl_hipc_spec,
862 };
863
864 #define AVS_TGL_BASED_SPEC(sname, min) \
865 static const struct avs_spec sname##_desc = { \
866 .name = #sname, \
867 .min_fw_version = { 10, min, 0, 5646 }, \
868 .dsp_ops = &avs_tgl_dsp_ops, \
869 .core_init_mask = 1, \
870 .attributes = AVS_PLATATTR_IMR, \
871 .sram = &apl_sram_spec, \
872 .hipc = &cnl_hipc_spec, \
873 }
874
875 AVS_TGL_BASED_SPEC(lkf, 28);
876 AVS_TGL_BASED_SPEC(tgl, 29);
877 AVS_TGL_BASED_SPEC(ehl, 30);
878 AVS_TGL_BASED_SPEC(adl, 35);
879 AVS_TGL_BASED_SPEC(adl_n, 35);
880
881 static const struct avs_spec fcl_desc = {
882 .name = "fcl",
883 .min_fw_version = { 0 },
884 .dsp_ops = &avs_ptl_dsp_ops,
885 .core_init_mask = 1,
886 .attributes = AVS_PLATATTR_IMR | AVS_PLATATTR_ACE | AVS_PLATATTR_ALTHDA,
887 .sram = &mtl_sram_spec,
888 .hipc = &lnl_hipc_spec,
889 };
890
891 static const struct pci_device_id avs_ids[] = {
892 { PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
893 { PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) },
894 { PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) },
895 { PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) },
896 { PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) },
897 { PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) },
898 { PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) },
899 { PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) },
900 { PCI_DEVICE_DATA(INTEL, HDA_CNL_LP, &cnl_desc) },
901 { PCI_DEVICE_DATA(INTEL, HDA_CNL_H, &cnl_desc) },
902 { PCI_DEVICE_DATA(INTEL, HDA_CML_LP, &cnl_desc) },
903 { PCI_DEVICE_DATA(INTEL, HDA_CML_H, &cnl_desc) },
904 { PCI_DEVICE_DATA(INTEL, HDA_RKL_S, &cnl_desc) },
905 { PCI_DEVICE_DATA(INTEL, HDA_ICL_LP, &icl_desc) },
906 { PCI_DEVICE_DATA(INTEL, HDA_ICL_N, &icl_desc) },
907 { PCI_DEVICE_DATA(INTEL, HDA_ICL_H, &icl_desc) },
908 { PCI_DEVICE_DATA(INTEL, HDA_JSL_N, &jsl_desc) },
909 { PCI_DEVICE_DATA(INTEL, HDA_LKF, &lkf_desc) },
910 { PCI_DEVICE_DATA(INTEL, HDA_TGL_LP, &tgl_desc) },
911 { PCI_DEVICE_DATA(INTEL, HDA_TGL_H, &tgl_desc) },
912 { PCI_DEVICE_DATA(INTEL, HDA_CML_R, &tgl_desc) },
913 { PCI_DEVICE_DATA(INTEL, HDA_EHL_0, &ehl_desc) },
914 { PCI_DEVICE_DATA(INTEL, HDA_EHL_3, &ehl_desc) },
915 { PCI_DEVICE_DATA(INTEL, HDA_ADL_S, &adl_desc) },
916 { PCI_DEVICE_DATA(INTEL, HDA_ADL_P, &adl_desc) },
917 { PCI_DEVICE_DATA(INTEL, HDA_ADL_PS, &adl_desc) },
918 { PCI_DEVICE_DATA(INTEL, HDA_ADL_M, &adl_desc) },
919 { PCI_DEVICE_DATA(INTEL, HDA_ADL_PX, &adl_desc) },
920 { PCI_DEVICE_DATA(INTEL, HDA_ADL_N, &adl_n_desc) },
921 { PCI_DEVICE_DATA(INTEL, HDA_RPL_S, &adl_desc) },
922 { PCI_DEVICE_DATA(INTEL, HDA_RPL_P_0, &adl_desc) },
923 { PCI_DEVICE_DATA(INTEL, HDA_RPL_P_1, &adl_desc) },
924 { PCI_DEVICE_DATA(INTEL, HDA_RPL_M, &adl_desc) },
925 { PCI_DEVICE_DATA(INTEL, HDA_RPL_PX, &adl_desc) },
926 { PCI_DEVICE_DATA(INTEL, HDA_FCL, &fcl_desc) },
927 { 0 }
928 };
929 MODULE_DEVICE_TABLE(pci, avs_ids);
930
931 static struct pci_driver avs_pci_driver = {
932 .name = KBUILD_MODNAME,
933 .id_table = avs_ids,
934 .probe = avs_pci_probe,
935 .remove = avs_pci_remove,
936 .shutdown = avs_pci_shutdown,
937 .dev_groups = avs_attr_groups,
938 .driver = {
939 .pm = pm_ptr(&avs_dev_pm),
940 },
941 };
942 module_pci_driver(avs_pci_driver);
943
944 MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
945 MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
946 MODULE_DESCRIPTION("Intel cAVS sound driver");
947 MODULE_LICENSE("GPL");
948 MODULE_FIRMWARE("intel/avs/skl/dsp_basefw.bin");
949 MODULE_FIRMWARE("intel/avs/apl/dsp_basefw.bin");
950 MODULE_FIRMWARE("intel/avs/cnl/dsp_basefw.bin");
951 MODULE_FIRMWARE("intel/avs/icl/dsp_basefw.bin");
952 MODULE_FIRMWARE("intel/avs/jsl/dsp_basefw.bin");
953 MODULE_FIRMWARE("intel/avs/lkf/dsp_basefw.bin");
954 MODULE_FIRMWARE("intel/avs/tgl/dsp_basefw.bin");
955 MODULE_FIRMWARE("intel/avs/ehl/dsp_basefw.bin");
956 MODULE_FIRMWARE("intel/avs/adl/dsp_basefw.bin");
957 MODULE_FIRMWARE("intel/avs/adl_n/dsp_basefw.bin");
958 MODULE_FIRMWARE("intel/fcl/dsp_basefw.bin");
959