xref: /linux/sound/soc/intel/avs/core.c (revision 177bf8620cf4ed290ee170a6c5966adc0924b336)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 // Special thanks to:
9 //    Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
10 //    Michal Sienkiewicz <michal.sienkiewicz@intel.com>
11 //    Filip Proborszcz
12 //
13 // for sharing Intel AudioDSP expertise and helping shape the very
14 // foundation of this driver
15 //
16 
17 #include <linux/acpi.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <acpi/nhlt.h>
21 #include <sound/hda_codec.h>
22 #include <sound/hda_i915.h>
23 #include <sound/hda_register.h>
24 #include <sound/hdaudio.h>
25 #include <sound/hdaudio_ext.h>
26 #include <sound/intel-dsp-config.h>
27 #include "../../codecs/hda.h"
28 #include "avs.h"
29 #include "cldma.h"
30 #include "messages.h"
31 #include "pcm.h"
32 
33 static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
34 module_param(pgctl_mask, uint, 0444);
35 MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override");
36 
37 static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK;
38 module_param(cgctl_mask, uint, 0444);
39 MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override");
40 
41 static void
avs_hda_update_config_dword(struct hdac_bus * bus,u32 reg,u32 mask,u32 value)42 avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
43 {
44 	struct pci_dev *pci = to_pci_dev(bus->dev);
45 	u32 data;
46 
47 	pci_read_config_dword(pci, reg, &data);
48 	data &= ~mask;
49 	data |= (value & mask);
50 	pci_write_config_dword(pci, reg, data);
51 }
52 
avs_hda_power_gating_enable(struct avs_dev * adev,bool enable)53 void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
54 {
55 	u32 value = enable ? 0 : pgctl_mask;
56 
57 	if (!avs_platattr_test(adev, ACE))
58 		avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value);
59 }
60 
avs_hdac_clock_gating_enable(struct hdac_bus * bus,bool enable)61 static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
62 {
63 	struct avs_dev *adev = hdac_to_avs(bus);
64 	u32 value = enable ? cgctl_mask : 0;
65 
66 	if (!avs_platattr_test(adev, ACE))
67 		avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value);
68 }
69 
avs_hda_clock_gating_enable(struct avs_dev * adev,bool enable)70 void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
71 {
72 	avs_hdac_clock_gating_enable(&adev->base.core, enable);
73 }
74 
avs_hda_l1sen_enable(struct avs_dev * adev,bool enable)75 void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
76 {
77 	if (avs_platattr_test(adev, ACE))
78 		return;
79 	if (enable) {
80 		if (atomic_inc_and_test(&adev->l1sen_counter))
81 			snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN,
82 					      AZX_VS_EM2_L1SEN);
83 	} else {
84 		if (atomic_dec_return(&adev->l1sen_counter) == -1)
85 			snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, 0);
86 	}
87 }
88 
avs_hdac_bus_init_streams(struct hdac_bus * bus)89 static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
90 {
91 	unsigned int cp_streams, pb_streams;
92 	unsigned int gcap;
93 
94 	gcap = snd_hdac_chip_readw(bus, GCAP);
95 	cp_streams = (gcap >> 8) & 0x0F;
96 	pb_streams = (gcap >> 12) & 0x0F;
97 	bus->num_streams = cp_streams + pb_streams;
98 
99 	snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
100 	snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
101 
102 	return snd_hdac_bus_alloc_stream_pages(bus);
103 }
104 
avs_hdac_bus_init_chip(struct hdac_bus * bus,bool full_reset)105 static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
106 {
107 	struct avs_dev *adev = hdac_to_avs(bus);
108 	struct hdac_ext_link *hlink;
109 	bool ret;
110 
111 	avs_hdac_clock_gating_enable(bus, false);
112 	ret = snd_hdac_bus_init_chip(bus, full_reset);
113 
114 	/* Reset stream-to-link mapping */
115 	list_for_each_entry(hlink, &bus->hlink_list, list)
116 		writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
117 
118 	avs_hdac_clock_gating_enable(bus, true);
119 
120 	/* Set DUM bit to address incorrect position reporting for capture
121 	 * streams. In order to do so, CTRL needs to be out of reset state
122 	 */
123 	if (!avs_platattr_test(adev, ACE))
124 		snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
125 
126 	return ret;
127 }
128 
probe_codec(struct hdac_bus * bus,int addr)129 static int probe_codec(struct hdac_bus *bus, int addr)
130 {
131 	struct hda_codec *codec;
132 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
133 			   (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
134 	unsigned int res = -1;
135 	int ret;
136 
137 	mutex_lock(&bus->cmd_mutex);
138 	snd_hdac_bus_send_cmd(bus, cmd);
139 	snd_hdac_bus_get_response(bus, addr, &res);
140 	mutex_unlock(&bus->cmd_mutex);
141 	if (res == -1)
142 		return -EIO;
143 
144 	dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
145 
146 	codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
147 	if (IS_ERR(codec)) {
148 		dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
149 		return PTR_ERR(codec);
150 	}
151 	/*
152 	 * Allow avs_core suspend by forcing suspended state on all
153 	 * of its codec child devices. Component interested in
154 	 * dealing with hda codecs directly takes pm responsibilities
155 	 */
156 	pm_runtime_set_suspended(hda_codec_dev(codec));
157 
158 	/* configure effectively creates new ASoC component */
159 	ret = snd_hda_codec_configure(codec);
160 	if (ret < 0) {
161 		dev_warn(bus->dev, "failed to config codec #%d: %d\n", addr, ret);
162 		return ret;
163 	}
164 
165 	return 0;
166 }
167 
avs_hdac_bus_probe_codecs(struct hdac_bus * bus)168 static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
169 {
170 	int ret, c;
171 
172 	/* First try to probe all given codec slots */
173 	for (c = 0; c < HDA_MAX_CODECS; c++) {
174 		if (!(bus->codec_mask & BIT(c)))
175 			continue;
176 
177 		ret = probe_codec(bus, c);
178 		/* Ignore codecs with no supporting driver. */
179 		if (!ret || ret == -ENODEV)
180 			continue;
181 
182 		/*
183 		 * Some BIOSen give you wrong codec addresses
184 		 * that don't exist
185 		 */
186 		dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
187 		bus->codec_mask &= ~BIT(c);
188 		/*
189 		 * More badly, accessing to a non-existing
190 		 * codec often screws up the controller bus,
191 		 * and disturbs the further communications.
192 		 * Thus if an error occurs during probing,
193 		 * better to reset the controller bus to get
194 		 * back to the sanity state.
195 		 */
196 		snd_hdac_bus_stop_chip(bus);
197 		avs_hdac_bus_init_chip(bus, true);
198 	}
199 }
200 
avs_hda_probe_work(struct work_struct * work)201 static void avs_hda_probe_work(struct work_struct *work)
202 {
203 	struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
204 	struct hdac_bus *bus = &adev->base.core;
205 	struct hdac_ext_link *hlink;
206 	int ret;
207 
208 	pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
209 
210 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
211 	avs_hdac_bus_init_chip(bus, true);
212 	avs_hdac_bus_probe_codecs(bus);
213 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
214 
215 	/* with all codecs probed, links can be powered down */
216 	list_for_each_entry(hlink, &bus->hlink_list, list)
217 		snd_hdac_ext_bus_link_put(bus, hlink);
218 
219 	snd_hdac_ext_bus_ppcap_enable(bus, true);
220 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
221 	avs_debugfs_init(adev);
222 
223 	ret = avs_dsp_first_boot_firmware(adev);
224 	if (ret < 0)
225 		return;
226 
227 	acpi_nhlt_get_gbl_table();
228 
229 	avs_register_all_boards(adev);
230 
231 	/* configure PM */
232 	pm_runtime_set_autosuspend_delay(bus->dev, 2000);
233 	pm_runtime_use_autosuspend(bus->dev);
234 	pm_runtime_put_autosuspend(bus->dev);
235 	pm_runtime_allow(bus->dev);
236 }
237 
hdac_stream_update_pos(struct hdac_stream * stream,u64 buffer_size)238 static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
239 {
240 	u64 prev_pos, pos, num_bytes;
241 
242 	div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
243 	pos = snd_hdac_stream_get_pos_posbuf(stream);
244 
245 	if (pos < prev_pos)
246 		num_bytes = (buffer_size - prev_pos) +  pos;
247 	else
248 		num_bytes = pos - prev_pos;
249 
250 	stream->curr_pos += num_bytes;
251 }
252 
253 /* called from IRQ */
hdac_update_stream(struct hdac_bus * bus,struct hdac_stream * stream)254 static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
255 {
256 	if (stream->substream) {
257 		avs_period_elapsed(stream->substream);
258 	} else if (stream->cstream) {
259 		u64 buffer_size = stream->cstream->runtime->buffer_size;
260 
261 		hdac_stream_update_pos(stream, buffer_size);
262 		snd_compr_fragment_elapsed(stream->cstream);
263 	}
264 }
265 
avs_hda_interrupt(struct hdac_bus * bus)266 static irqreturn_t avs_hda_interrupt(struct hdac_bus *bus)
267 {
268 	irqreturn_t ret = IRQ_NONE;
269 	u32 status;
270 
271 	status = snd_hdac_chip_readl(bus, INTSTS);
272 	if (snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream))
273 		ret = IRQ_HANDLED;
274 
275 	spin_lock_irq(&bus->reg_lock);
276 	/* Clear RIRB interrupt. */
277 	status = snd_hdac_chip_readb(bus, RIRBSTS);
278 	if (status & RIRB_INT_MASK) {
279 		if (status & RIRB_INT_RESPONSE)
280 			snd_hdac_bus_update_rirb(bus);
281 		snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
282 		ret = IRQ_HANDLED;
283 	}
284 
285 	spin_unlock_irq(&bus->reg_lock);
286 	return ret;
287 }
288 
avs_hda_irq_handler(int irq,void * dev_id)289 static irqreturn_t avs_hda_irq_handler(int irq, void *dev_id)
290 {
291 	struct hdac_bus *bus = dev_id;
292 	u32 intsts;
293 
294 	intsts = snd_hdac_chip_readl(bus, INTSTS);
295 	if (intsts == UINT_MAX || !(intsts & AZX_INT_GLOBAL_EN))
296 		return IRQ_NONE;
297 
298 	/* Mask GIE, unmasked in irq_thread(). */
299 	snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, 0);
300 
301 	return IRQ_WAKE_THREAD;
302 }
303 
avs_hda_irq_thread(int irq,void * dev_id)304 static irqreturn_t avs_hda_irq_thread(int irq, void *dev_id)
305 {
306 	struct hdac_bus *bus = dev_id;
307 	u32 status;
308 
309 	status = snd_hdac_chip_readl(bus, INTSTS);
310 	if (status & ~AZX_INT_GLOBAL_EN)
311 		avs_hda_interrupt(bus);
312 
313 	/* Unmask GIE, masked in irq_handler(). */
314 	snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
315 
316 	return IRQ_HANDLED;
317 }
318 
avs_dsp_irq_handler(int irq,void * dev_id)319 static irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
320 {
321 	struct avs_dev *adev = dev_id;
322 
323 	return avs_hda_irq_handler(irq, &adev->base.core);
324 }
325 
avs_dsp_irq_thread(int irq,void * dev_id)326 static irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
327 {
328 	struct avs_dev *adev = dev_id;
329 	struct hdac_bus *bus = &adev->base.core;
330 	u32 status;
331 
332 	status = readl(bus->ppcap + AZX_REG_PP_PPSTS);
333 	if (status & AZX_PPCTL_PIE)
334 		avs_dsp_op(adev, dsp_interrupt);
335 
336 	/* Unmask GIE, masked in irq_handler(). */
337 	snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
338 
339 	return IRQ_HANDLED;
340 }
341 
avs_hdac_acquire_irq(struct avs_dev * adev)342 static int avs_hdac_acquire_irq(struct avs_dev *adev)
343 {
344 	struct hdac_bus *bus = &adev->base.core;
345 	struct pci_dev *pci = to_pci_dev(bus->dev);
346 	int ret;
347 
348 	/* request one and check that we only got one interrupt */
349 	ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_INTX);
350 	if (ret != 1) {
351 		dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
352 		return ret;
353 	}
354 
355 	ret = pci_request_irq(pci, 0, avs_hda_irq_handler, avs_hda_irq_thread, bus,
356 			      KBUILD_MODNAME);
357 	if (ret < 0) {
358 		dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
359 		goto free_vector;
360 	}
361 
362 	ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
363 			      KBUILD_MODNAME);
364 	if (ret < 0) {
365 		dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
366 		goto free_stream_irq;
367 	}
368 
369 	return 0;
370 
371 free_stream_irq:
372 	pci_free_irq(pci, 0, bus);
373 free_vector:
374 	pci_free_irq_vectors(pci);
375 	return ret;
376 }
377 
avs_bus_init(struct avs_dev * adev,struct pci_dev * pci,const struct pci_device_id * id)378 static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
379 {
380 	struct hda_bus *bus = &adev->base;
381 	struct avs_ipc *ipc;
382 	struct device *dev = &pci->dev;
383 	int ret;
384 
385 	ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
386 	if (ret < 0)
387 		return ret;
388 
389 	bus->core.use_posbuf = 1;
390 	bus->core.bdl_pos_adj = 0;
391 	bus->core.sync_write = 1;
392 	bus->pci = pci;
393 	bus->mixer_assigned = -1;
394 	mutex_init(&bus->prepare_mutex);
395 
396 	ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
397 	if (!ipc)
398 		return -ENOMEM;
399 	ret = avs_ipc_init(ipc, dev);
400 	if (ret < 0)
401 		return ret;
402 
403 	adev->modcfg_buf = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
404 	if (!adev->modcfg_buf)
405 		return -ENOMEM;
406 
407 	adev->dev = dev;
408 	adev->spec = (const struct avs_spec *)id->driver_data;
409 	adev->ipc = ipc;
410 	adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
411 	INIT_WORK(&adev->probe_work, avs_hda_probe_work);
412 	INIT_LIST_HEAD(&adev->comp_list);
413 	INIT_LIST_HEAD(&adev->path_list);
414 	INIT_LIST_HEAD(&adev->fw_list);
415 	init_completion(&adev->fw_ready);
416 	spin_lock_init(&adev->path_list_lock);
417 	mutex_init(&adev->modres_mutex);
418 	mutex_init(&adev->comp_list_mutex);
419 	mutex_init(&adev->path_mutex);
420 
421 	return 0;
422 }
423 
avs_pci_probe(struct pci_dev * pci,const struct pci_device_id * id)424 static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
425 {
426 	struct hdac_bus *bus;
427 	struct avs_dev *adev;
428 	struct device *dev = &pci->dev;
429 	int ret;
430 
431 	ret = snd_intel_dsp_driver_probe(pci);
432 	switch (ret) {
433 	case SND_INTEL_DSP_DRIVER_ANY:
434 	case SND_INTEL_DSP_DRIVER_SST:
435 	case SND_INTEL_DSP_DRIVER_AVS:
436 		break;
437 	default:
438 		return -ENODEV;
439 	}
440 
441 	ret = pcim_enable_device(pci);
442 	if (ret < 0)
443 		return ret;
444 
445 	adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
446 	if (!adev)
447 		return -ENOMEM;
448 	ret = avs_bus_init(adev, pci, id);
449 	if (ret < 0) {
450 		dev_err(dev, "failed to init avs bus: %d\n", ret);
451 		return ret;
452 	}
453 
454 	ret = pcim_request_all_regions(pci, "AVS HDAudio");
455 	if (ret < 0)
456 		return ret;
457 
458 	bus = &adev->base.core;
459 	bus->addr = pci_resource_start(pci, 0);
460 	bus->remap_addr = pci_ioremap_bar(pci, 0);
461 	if (!bus->remap_addr) {
462 		dev_err(bus->dev, "ioremap error\n");
463 		return -ENXIO;
464 	}
465 
466 	adev->dsp_ba = pci_ioremap_bar(pci, 4);
467 	if (!adev->dsp_ba) {
468 		dev_err(bus->dev, "ioremap error\n");
469 		ret = -ENXIO;
470 		goto err_remap_bar4;
471 	}
472 
473 	snd_hdac_bus_parse_capabilities(bus);
474 	if (bus->mlcap)
475 		snd_hdac_ext_bus_get_ml_capabilities(bus);
476 
477 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
478 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
479 	dma_set_max_seg_size(dev, UINT_MAX);
480 
481 	ret = avs_hdac_bus_init_streams(bus);
482 	if (ret < 0) {
483 		dev_err(dev, "failed to init streams: %d\n", ret);
484 		goto err_init_streams;
485 	}
486 
487 	ret = avs_hdac_acquire_irq(adev);
488 	if (ret < 0) {
489 		dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
490 		goto err_acquire_irq;
491 	}
492 
493 	pci_set_master(pci);
494 	pci_set_drvdata(pci, bus);
495 	device_disable_async_suspend(dev);
496 
497 	ret = snd_hdac_i915_init(bus);
498 	if (ret == -EPROBE_DEFER)
499 		goto err_i915_init;
500 	else if (ret < 0)
501 		dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
502 
503 	schedule_work(&adev->probe_work);
504 
505 	return 0;
506 
507 err_i915_init:
508 	pci_free_irq(pci, 0, adev);
509 	pci_free_irq(pci, 0, bus);
510 	pci_free_irq_vectors(pci);
511 	pci_clear_master(pci);
512 	pci_set_drvdata(pci, NULL);
513 err_acquire_irq:
514 	snd_hdac_bus_free_stream_pages(bus);
515 	snd_hdac_ext_stream_free_all(bus);
516 err_init_streams:
517 	iounmap(adev->dsp_ba);
518 err_remap_bar4:
519 	iounmap(bus->remap_addr);
520 	return ret;
521 }
522 
avs_pci_shutdown(struct pci_dev * pci)523 static void avs_pci_shutdown(struct pci_dev *pci)
524 {
525 	struct hdac_bus *bus = pci_get_drvdata(pci);
526 	struct avs_dev *adev = hdac_to_avs(bus);
527 
528 	cancel_work_sync(&adev->probe_work);
529 	avs_ipc_block(adev->ipc);
530 
531 	snd_hdac_stop_streams(bus);
532 	avs_dsp_op(adev, int_control, false);
533 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
534 	snd_hdac_ext_bus_link_power_down_all(bus);
535 
536 	snd_hdac_bus_stop_chip(bus);
537 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
538 
539 	pci_free_irq(pci, 0, adev);
540 	pci_free_irq(pci, 0, bus);
541 	pci_free_irq_vectors(pci);
542 }
543 
avs_pci_remove(struct pci_dev * pci)544 static void avs_pci_remove(struct pci_dev *pci)
545 {
546 	struct hdac_device *hdev, *save;
547 	struct hdac_bus *bus = pci_get_drvdata(pci);
548 	struct avs_dev *adev = hdac_to_avs(bus);
549 
550 	cancel_work_sync(&adev->probe_work);
551 	avs_ipc_block(adev->ipc);
552 
553 	avs_unregister_all_boards(adev);
554 
555 	acpi_nhlt_put_gbl_table();
556 	avs_debugfs_exit(adev);
557 
558 	if (avs_platattr_test(adev, CLDMA))
559 		hda_cldma_free(&code_loader);
560 
561 	snd_hdac_stop_streams_and_chip(bus);
562 	avs_dsp_op(adev, int_control, false);
563 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
564 
565 	/* it is safe to remove all codecs from the system now */
566 	list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
567 		snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
568 
569 	snd_hdac_bus_free_stream_pages(bus);
570 	snd_hdac_ext_stream_free_all(bus);
571 	/* reverse ml_capabilities */
572 	snd_hdac_ext_link_free_all(bus);
573 	snd_hdac_ext_bus_exit(bus);
574 
575 	avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
576 	snd_hdac_ext_bus_ppcap_enable(bus, false);
577 
578 	/* snd_hdac_stop_streams_and_chip does that already? */
579 	snd_hdac_bus_stop_chip(bus);
580 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
581 	if (bus->audio_component)
582 		snd_hdac_i915_exit(bus);
583 
584 	avs_module_info_free(adev);
585 	pci_free_irq(pci, 0, adev);
586 	pci_free_irq(pci, 0, bus);
587 	pci_free_irq_vectors(pci);
588 	iounmap(bus->remap_addr);
589 	iounmap(adev->dsp_ba);
590 
591 	/* Firmware is not needed anymore */
592 	avs_release_firmwares(adev);
593 
594 	/* pm_runtime_forbid() can rpm_resume() which we do not want */
595 	pm_runtime_disable(&pci->dev);
596 	pm_runtime_forbid(&pci->dev);
597 	pm_runtime_enable(&pci->dev);
598 	pm_runtime_get_noresume(&pci->dev);
599 }
600 
avs_suspend_standby(struct avs_dev * adev)601 static int avs_suspend_standby(struct avs_dev *adev)
602 {
603 	struct hdac_bus *bus = &adev->base.core;
604 	struct pci_dev *pci = adev->base.pci;
605 
606 	if (bus->cmd_dma_state)
607 		snd_hdac_bus_stop_cmd_io(bus);
608 
609 	snd_hdac_ext_bus_link_power_down_all(bus);
610 
611 	enable_irq_wake(pci->irq);
612 	pci_save_state(pci);
613 
614 	return 0;
615 }
616 
avs_suspend_common(struct avs_dev * adev,bool low_power)617 static int avs_suspend_common(struct avs_dev *adev, bool low_power)
618 {
619 	struct hdac_bus *bus = &adev->base.core;
620 	int ret;
621 
622 	flush_work(&adev->probe_work);
623 	if (low_power && adev->num_lp_paths)
624 		return avs_suspend_standby(adev);
625 
626 	snd_hdac_ext_bus_link_power_down_all(bus);
627 
628 	ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
629 	/*
630 	 * pm_runtime is blocked on DSP failure but system-wide suspend is not.
631 	 * Do not block entire system from suspending if that's the case.
632 	 */
633 	if (ret && ret != -EPERM) {
634 		dev_err(adev->dev, "set dx failed: %d\n", ret);
635 		return AVS_IPC_RET(ret);
636 	}
637 
638 	avs_ipc_block(adev->ipc);
639 	avs_dsp_op(adev, int_control, false);
640 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
641 
642 	ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
643 	if (ret < 0) {
644 		dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
645 		return ret;
646 	}
647 
648 	snd_hdac_ext_bus_ppcap_enable(bus, false);
649 	/* disable LP SRAM retention */
650 	avs_hda_power_gating_enable(adev, false);
651 	snd_hdac_bus_stop_chip(bus);
652 	/* disable CG when putting controller to reset */
653 	avs_hdac_clock_gating_enable(bus, false);
654 	snd_hdac_bus_enter_link_reset(bus);
655 	avs_hdac_clock_gating_enable(bus, true);
656 
657 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
658 
659 	return 0;
660 }
661 
avs_resume_standby(struct avs_dev * adev)662 static int avs_resume_standby(struct avs_dev *adev)
663 {
664 	struct hdac_bus *bus = &adev->base.core;
665 	struct pci_dev *pci = adev->base.pci;
666 
667 	pci_restore_state(pci);
668 	disable_irq_wake(pci->irq);
669 
670 	snd_hdac_ext_bus_link_power_up_all(bus);
671 
672 	if (bus->cmd_dma_state)
673 		snd_hdac_bus_init_cmd_io(bus);
674 
675 	return 0;
676 }
677 
avs_resume_common(struct avs_dev * adev,bool low_power,bool purge)678 static int avs_resume_common(struct avs_dev *adev, bool low_power, bool purge)
679 {
680 	struct hdac_bus *bus = &adev->base.core;
681 	int ret;
682 
683 	if (low_power && adev->num_lp_paths)
684 		return avs_resume_standby(adev);
685 
686 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
687 	avs_hdac_bus_init_chip(bus, true);
688 
689 	snd_hdac_ext_bus_ppcap_enable(bus, true);
690 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
691 
692 	ret = avs_dsp_boot_firmware(adev, purge);
693 	if (ret < 0) {
694 		dev_err(adev->dev, "firmware boot failed: %d\n", ret);
695 		return ret;
696 	}
697 
698 	return 0;
699 }
700 
avs_suspend(struct device * dev)701 static int avs_suspend(struct device *dev)
702 {
703 	return avs_suspend_common(to_avs_dev(dev), true);
704 }
705 
avs_resume(struct device * dev)706 static int avs_resume(struct device *dev)
707 {
708 	return avs_resume_common(to_avs_dev(dev), true, true);
709 }
710 
avs_runtime_suspend(struct device * dev)711 static int avs_runtime_suspend(struct device *dev)
712 {
713 	return avs_suspend_common(to_avs_dev(dev), true);
714 }
715 
avs_runtime_resume(struct device * dev)716 static int avs_runtime_resume(struct device *dev)
717 {
718 	return avs_resume_common(to_avs_dev(dev), true, false);
719 }
720 
avs_freeze(struct device * dev)721 static int avs_freeze(struct device *dev)
722 {
723 	return avs_suspend_common(to_avs_dev(dev), false);
724 }
avs_thaw(struct device * dev)725 static int avs_thaw(struct device *dev)
726 {
727 	return avs_resume_common(to_avs_dev(dev), false, true);
728 }
729 
avs_poweroff(struct device * dev)730 static int avs_poweroff(struct device *dev)
731 {
732 	return avs_suspend_common(to_avs_dev(dev), false);
733 }
734 
avs_restore(struct device * dev)735 static int avs_restore(struct device *dev)
736 {
737 	return avs_resume_common(to_avs_dev(dev), false, true);
738 }
739 
740 static const struct dev_pm_ops avs_dev_pm = {
741 	.suspend = avs_suspend,
742 	.resume = avs_resume,
743 	.freeze = avs_freeze,
744 	.thaw = avs_thaw,
745 	.poweroff = avs_poweroff,
746 	.restore = avs_restore,
747 	RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
748 };
749 
750 static const struct avs_sram_spec skl_sram_spec = {
751 	.base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
752 	.window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
753 };
754 
755 static const struct avs_sram_spec apl_sram_spec = {
756 	.base_offset = APL_ADSP_SRAM_BASE_OFFSET,
757 	.window_size = APL_ADSP_SRAM_WINDOW_SIZE,
758 };
759 
760 static const struct avs_sram_spec mtl_sram_spec = {
761 	.base_offset = MTL_ADSP_SRAM_BASE_OFFSET,
762 	.window_size = MTL_ADSP_SRAM_WINDOW_SIZE,
763 };
764 
765 static const struct avs_hipc_spec skl_hipc_spec = {
766 	.req_offset = SKL_ADSP_REG_HIPCI,
767 	.req_ext_offset = SKL_ADSP_REG_HIPCIE,
768 	.req_busy_mask = SKL_ADSP_HIPCI_BUSY,
769 	.ack_offset = SKL_ADSP_REG_HIPCIE,
770 	.ack_done_mask = SKL_ADSP_HIPCIE_DONE,
771 	.rsp_offset = SKL_ADSP_REG_HIPCT,
772 	.rsp_busy_mask = SKL_ADSP_HIPCT_BUSY,
773 	.ctl_offset = SKL_ADSP_REG_HIPCCTL,
774 	.sts_offset = SKL_ADSP_SRAM_BASE_OFFSET,
775 };
776 
777 static const struct avs_hipc_spec apl_hipc_spec = {
778 	.req_offset = SKL_ADSP_REG_HIPCI,
779 	.req_ext_offset = SKL_ADSP_REG_HIPCIE,
780 	.req_busy_mask = SKL_ADSP_HIPCI_BUSY,
781 	.ack_offset = SKL_ADSP_REG_HIPCIE,
782 	.ack_done_mask = SKL_ADSP_HIPCIE_DONE,
783 	.rsp_offset = SKL_ADSP_REG_HIPCT,
784 	.rsp_busy_mask = SKL_ADSP_HIPCT_BUSY,
785 	.ctl_offset = SKL_ADSP_REG_HIPCCTL,
786 	.sts_offset = APL_ADSP_SRAM_BASE_OFFSET,
787 };
788 
789 static const struct avs_hipc_spec cnl_hipc_spec = {
790 	.req_offset = CNL_ADSP_REG_HIPCIDR,
791 	.req_ext_offset = CNL_ADSP_REG_HIPCIDD,
792 	.req_busy_mask = CNL_ADSP_HIPCIDR_BUSY,
793 	.ack_offset = CNL_ADSP_REG_HIPCIDA,
794 	.ack_done_mask = CNL_ADSP_HIPCIDA_DONE,
795 	.rsp_offset = CNL_ADSP_REG_HIPCTDR,
796 	.rsp_busy_mask = CNL_ADSP_HIPCTDR_BUSY,
797 	.ctl_offset = CNL_ADSP_REG_HIPCCTL,
798 	.sts_offset = APL_ADSP_SRAM_BASE_OFFSET,
799 };
800 
801 static const struct avs_hipc_spec lnl_hipc_spec = {
802 	.req_offset = MTL_REG_HfIPCxIDR,
803 	.req_ext_offset = MTL_REG_HfIPCxIDD,
804 	.req_busy_mask = MTL_HfIPCxIDR_BUSY,
805 	.ack_offset = MTL_REG_HfIPCxIDA,
806 	.ack_done_mask = MTL_HfIPCxIDA_DONE,
807 	.rsp_offset = MTL_REG_HfIPCxTDR,
808 	.rsp_busy_mask = MTL_HfIPCxTDR_BUSY,
809 	.ctl_offset = MTL_REG_HfIPCxCTL,
810 	.sts_offset = LNL_REG_HfDFR(0),
811 };
812 
813 static const struct avs_spec skl_desc = {
814 	.name = "skl",
815 	.min_fw_version = { 9, 21, 0, 4732 },
816 	.dsp_ops = &avs_skl_dsp_ops,
817 	.core_init_mask = 1,
818 	.attributes = AVS_PLATATTR_CLDMA,
819 	.sram = &skl_sram_spec,
820 	.hipc = &skl_hipc_spec,
821 };
822 
823 static const struct avs_spec apl_desc = {
824 	.name = "apl",
825 	.min_fw_version = { 9, 22, 1, 4323 },
826 	.dsp_ops = &avs_apl_dsp_ops,
827 	.core_init_mask = 3,
828 	.attributes = AVS_PLATATTR_IMR,
829 	.sram = &apl_sram_spec,
830 	.hipc = &apl_hipc_spec,
831 };
832 
833 static const struct avs_spec cnl_desc = {
834 	.name = "cnl",
835 	.min_fw_version = { 10, 23, 0, 5314 },
836 	.dsp_ops = &avs_cnl_dsp_ops,
837 	.core_init_mask = 1,
838 	.attributes = AVS_PLATATTR_IMR,
839 	.sram = &apl_sram_spec,
840 	.hipc = &cnl_hipc_spec,
841 };
842 
843 static const struct avs_spec icl_desc = {
844 	.name = "icl",
845 	.min_fw_version = { 10, 23, 0, 5040 },
846 	.dsp_ops = &avs_icl_dsp_ops,
847 	.core_init_mask = 1,
848 	.attributes = AVS_PLATATTR_IMR,
849 	.sram = &apl_sram_spec,
850 	.hipc = &cnl_hipc_spec,
851 };
852 
853 static const struct avs_spec jsl_desc = {
854 	.name = "jsl",
855 	.min_fw_version = { 10, 26, 0, 5872 },
856 	.dsp_ops = &avs_icl_dsp_ops,
857 	.core_init_mask = 1,
858 	.attributes = AVS_PLATATTR_IMR,
859 	.sram = &apl_sram_spec,
860 	.hipc = &cnl_hipc_spec,
861 };
862 
863 #define AVS_TGL_BASED_SPEC(sname, min)		\
864 static const struct avs_spec sname##_desc = {	\
865 	.name = #sname,				\
866 	.min_fw_version = { 10,	min, 0, 5646 },	\
867 	.dsp_ops = &avs_tgl_dsp_ops,		\
868 	.core_init_mask = 1,			\
869 	.attributes = AVS_PLATATTR_IMR,		\
870 	.sram = &apl_sram_spec,			\
871 	.hipc = &cnl_hipc_spec,			\
872 }
873 
874 AVS_TGL_BASED_SPEC(lkf, 28);
875 AVS_TGL_BASED_SPEC(tgl, 29);
876 AVS_TGL_BASED_SPEC(ehl, 30);
877 AVS_TGL_BASED_SPEC(adl, 35);
878 AVS_TGL_BASED_SPEC(adl_n, 35);
879 
880 static const struct avs_spec fcl_desc = {
881 	.name = "fcl",
882 	.min_fw_version = { 0 },
883 	.dsp_ops = &avs_ptl_dsp_ops,
884 	.core_init_mask = 1,
885 	.attributes = AVS_PLATATTR_IMR | AVS_PLATATTR_ACE | AVS_PLATATTR_ALTHDA,
886 	.sram = &mtl_sram_spec,
887 	.hipc = &lnl_hipc_spec,
888 };
889 
890 static const struct pci_device_id avs_ids[] = {
891 	{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
892 	{ PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) },
893 	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) },
894 	{ PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) },
895 	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) },
896 	{ PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) },
897 	{ PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) },
898 	{ PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) },
899 	{ PCI_DEVICE_DATA(INTEL, HDA_CNL_LP,	&cnl_desc) },
900 	{ PCI_DEVICE_DATA(INTEL, HDA_CNL_H,	&cnl_desc) },
901 	{ PCI_DEVICE_DATA(INTEL, HDA_CML_LP,	&cnl_desc) },
902 	{ PCI_DEVICE_DATA(INTEL, HDA_CML_H,	&cnl_desc) },
903 	{ PCI_DEVICE_DATA(INTEL, HDA_RKL_S,	&cnl_desc) },
904 	{ PCI_DEVICE_DATA(INTEL, HDA_ICL_LP,	&icl_desc) },
905 	{ PCI_DEVICE_DATA(INTEL, HDA_ICL_N,	&icl_desc) },
906 	{ PCI_DEVICE_DATA(INTEL, HDA_ICL_H,	&icl_desc) },
907 	{ PCI_DEVICE_DATA(INTEL, HDA_JSL_N,	&jsl_desc) },
908 	{ PCI_DEVICE_DATA(INTEL, HDA_LKF,	&lkf_desc) },
909 	{ PCI_DEVICE_DATA(INTEL, HDA_TGL_LP,	&tgl_desc) },
910 	{ PCI_DEVICE_DATA(INTEL, HDA_TGL_H,	&tgl_desc) },
911 	{ PCI_DEVICE_DATA(INTEL, HDA_CML_R,	&tgl_desc) },
912 	{ PCI_DEVICE_DATA(INTEL, HDA_EHL_0,	&ehl_desc) },
913 	{ PCI_DEVICE_DATA(INTEL, HDA_EHL_3,	&ehl_desc) },
914 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_S,	&adl_desc) },
915 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_P,	&adl_desc) },
916 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_PS,	&adl_desc) },
917 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_M,	&adl_desc) },
918 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_PX,	&adl_desc) },
919 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_N,	&adl_n_desc) },
920 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_S,	&adl_desc) },
921 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_P_0,	&adl_desc) },
922 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_P_1,	&adl_desc) },
923 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_M,	&adl_desc) },
924 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_PX,	&adl_desc) },
925 	{ PCI_DEVICE_DATA(INTEL, HDA_FCL,	&fcl_desc) },
926 	{ 0 }
927 };
928 MODULE_DEVICE_TABLE(pci, avs_ids);
929 
930 static struct pci_driver avs_pci_driver = {
931 	.name = KBUILD_MODNAME,
932 	.id_table = avs_ids,
933 	.probe = avs_pci_probe,
934 	.remove = avs_pci_remove,
935 	.shutdown = avs_pci_shutdown,
936 	.dev_groups = avs_attr_groups,
937 	.driver = {
938 		.pm = pm_ptr(&avs_dev_pm),
939 	},
940 };
941 module_pci_driver(avs_pci_driver);
942 
943 MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
944 MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
945 MODULE_DESCRIPTION("Intel cAVS sound driver");
946 MODULE_LICENSE("GPL");
947 MODULE_FIRMWARE("intel/avs/skl/dsp_basefw.bin");
948 MODULE_FIRMWARE("intel/avs/apl/dsp_basefw.bin");
949 MODULE_FIRMWARE("intel/avs/cnl/dsp_basefw.bin");
950 MODULE_FIRMWARE("intel/avs/icl/dsp_basefw.bin");
951 MODULE_FIRMWARE("intel/avs/jsl/dsp_basefw.bin");
952 MODULE_FIRMWARE("intel/avs/lkf/dsp_basefw.bin");
953 MODULE_FIRMWARE("intel/avs/tgl/dsp_basefw.bin");
954 MODULE_FIRMWARE("intel/avs/ehl/dsp_basefw.bin");
955 MODULE_FIRMWARE("intel/avs/adl/dsp_basefw.bin");
956 MODULE_FIRMWARE("intel/avs/adl_n/dsp_basefw.bin");
957 MODULE_FIRMWARE("intel/fcl/dsp_basefw.bin");
958