xref: /linux/sound/soc/intel/avs/core.c (revision 01154cc30e343952d7ab1c6b35c3577725dc5d54)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 // Special thanks to:
9 //    Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
10 //    Michal Sienkiewicz <michal.sienkiewicz@intel.com>
11 //    Filip Proborszcz
12 //
13 // for sharing Intel AudioDSP expertise and helping shape the very
14 // foundation of this driver
15 //
16 
17 #include <linux/acpi.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <acpi/nhlt.h>
21 #include <sound/hda_codec.h>
22 #include <sound/hda_i915.h>
23 #include <sound/hda_register.h>
24 #include <sound/hdaudio.h>
25 #include <sound/hdaudio_ext.h>
26 #include <sound/intel-dsp-config.h>
27 #include "../../codecs/hda.h"
28 #include "avs.h"
29 #include "cldma.h"
30 #include "messages.h"
31 #include "pcm.h"
32 
33 static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
34 module_param(pgctl_mask, uint, 0444);
35 MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override");
36 
37 static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK;
38 module_param(cgctl_mask, uint, 0444);
39 MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override");
40 
41 static void
avs_hda_update_config_dword(struct hdac_bus * bus,u32 reg,u32 mask,u32 value)42 avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
43 {
44 	struct pci_dev *pci = to_pci_dev(bus->dev);
45 	u32 data;
46 
47 	pci_read_config_dword(pci, reg, &data);
48 	data &= ~mask;
49 	data |= (value & mask);
50 	pci_write_config_dword(pci, reg, data);
51 }
52 
avs_hda_power_gating_enable(struct avs_dev * adev,bool enable)53 void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
54 {
55 	u32 value = enable ? 0 : pgctl_mask;
56 
57 	avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value);
58 }
59 
avs_hdac_clock_gating_enable(struct hdac_bus * bus,bool enable)60 static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
61 {
62 	u32 value = enable ? cgctl_mask : 0;
63 
64 	avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value);
65 }
66 
avs_hda_clock_gating_enable(struct avs_dev * adev,bool enable)67 void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
68 {
69 	avs_hdac_clock_gating_enable(&adev->base.core, enable);
70 }
71 
avs_hda_l1sen_enable(struct avs_dev * adev,bool enable)72 void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
73 {
74 	if (enable) {
75 		if (atomic_inc_and_test(&adev->l1sen_counter))
76 			snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN,
77 					      AZX_VS_EM2_L1SEN);
78 	} else {
79 		if (atomic_dec_return(&adev->l1sen_counter) == -1)
80 			snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, 0);
81 	}
82 }
83 
avs_hdac_bus_init_streams(struct hdac_bus * bus)84 static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
85 {
86 	unsigned int cp_streams, pb_streams;
87 	unsigned int gcap;
88 
89 	gcap = snd_hdac_chip_readw(bus, GCAP);
90 	cp_streams = (gcap >> 8) & 0x0F;
91 	pb_streams = (gcap >> 12) & 0x0F;
92 	bus->num_streams = cp_streams + pb_streams;
93 
94 	snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
95 	snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
96 
97 	return snd_hdac_bus_alloc_stream_pages(bus);
98 }
99 
avs_hdac_bus_init_chip(struct hdac_bus * bus,bool full_reset)100 static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
101 {
102 	struct hdac_ext_link *hlink;
103 	bool ret;
104 
105 	avs_hdac_clock_gating_enable(bus, false);
106 	ret = snd_hdac_bus_init_chip(bus, full_reset);
107 
108 	/* Reset stream-to-link mapping */
109 	list_for_each_entry(hlink, &bus->hlink_list, list)
110 		writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
111 
112 	avs_hdac_clock_gating_enable(bus, true);
113 
114 	/* Set DUM bit to address incorrect position reporting for capture
115 	 * streams. In order to do so, CTRL needs to be out of reset state
116 	 */
117 	snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
118 
119 	return ret;
120 }
121 
probe_codec(struct hdac_bus * bus,int addr)122 static int probe_codec(struct hdac_bus *bus, int addr)
123 {
124 	struct hda_codec *codec;
125 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
126 			   (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
127 	unsigned int res = -1;
128 	int ret;
129 
130 	mutex_lock(&bus->cmd_mutex);
131 	snd_hdac_bus_send_cmd(bus, cmd);
132 	snd_hdac_bus_get_response(bus, addr, &res);
133 	mutex_unlock(&bus->cmd_mutex);
134 	if (res == -1)
135 		return -EIO;
136 
137 	dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
138 
139 	codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
140 	if (IS_ERR(codec)) {
141 		dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
142 		return PTR_ERR(codec);
143 	}
144 	/*
145 	 * Allow avs_core suspend by forcing suspended state on all
146 	 * of its codec child devices. Component interested in
147 	 * dealing with hda codecs directly takes pm responsibilities
148 	 */
149 	pm_runtime_set_suspended(hda_codec_dev(codec));
150 
151 	/* configure effectively creates new ASoC component */
152 	ret = snd_hda_codec_configure(codec);
153 	if (ret < 0) {
154 		dev_warn(bus->dev, "failed to config codec #%d: %d\n", addr, ret);
155 		return ret;
156 	}
157 
158 	return 0;
159 }
160 
avs_hdac_bus_probe_codecs(struct hdac_bus * bus)161 static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
162 {
163 	int ret, c;
164 
165 	/* First try to probe all given codec slots */
166 	for (c = 0; c < HDA_MAX_CODECS; c++) {
167 		if (!(bus->codec_mask & BIT(c)))
168 			continue;
169 
170 		ret = probe_codec(bus, c);
171 		/* Ignore codecs with no supporting driver. */
172 		if (!ret || ret == -ENODEV)
173 			continue;
174 
175 		/*
176 		 * Some BIOSen give you wrong codec addresses
177 		 * that don't exist
178 		 */
179 		dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
180 		bus->codec_mask &= ~BIT(c);
181 		/*
182 		 * More badly, accessing to a non-existing
183 		 * codec often screws up the controller bus,
184 		 * and disturbs the further communications.
185 		 * Thus if an error occurs during probing,
186 		 * better to reset the controller bus to get
187 		 * back to the sanity state.
188 		 */
189 		snd_hdac_bus_stop_chip(bus);
190 		avs_hdac_bus_init_chip(bus, true);
191 	}
192 }
193 
avs_hda_probe_work(struct work_struct * work)194 static void avs_hda_probe_work(struct work_struct *work)
195 {
196 	struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
197 	struct hdac_bus *bus = &adev->base.core;
198 	struct hdac_ext_link *hlink;
199 	int ret;
200 
201 	pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
202 
203 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
204 	avs_hdac_bus_init_chip(bus, true);
205 	avs_hdac_bus_probe_codecs(bus);
206 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
207 
208 	/* with all codecs probed, links can be powered down */
209 	list_for_each_entry(hlink, &bus->hlink_list, list)
210 		snd_hdac_ext_bus_link_put(bus, hlink);
211 
212 	snd_hdac_ext_bus_ppcap_enable(bus, true);
213 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
214 	avs_debugfs_init(adev);
215 
216 	ret = avs_dsp_first_boot_firmware(adev);
217 	if (ret < 0)
218 		return;
219 
220 	acpi_nhlt_get_gbl_table();
221 
222 	avs_register_all_boards(adev);
223 
224 	/* configure PM */
225 	pm_runtime_set_autosuspend_delay(bus->dev, 2000);
226 	pm_runtime_use_autosuspend(bus->dev);
227 	pm_runtime_mark_last_busy(bus->dev);
228 	pm_runtime_put_autosuspend(bus->dev);
229 	pm_runtime_allow(bus->dev);
230 }
231 
hdac_stream_update_pos(struct hdac_stream * stream,u64 buffer_size)232 static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
233 {
234 	u64 prev_pos, pos, num_bytes;
235 
236 	div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
237 	pos = snd_hdac_stream_get_pos_posbuf(stream);
238 
239 	if (pos < prev_pos)
240 		num_bytes = (buffer_size - prev_pos) +  pos;
241 	else
242 		num_bytes = pos - prev_pos;
243 
244 	stream->curr_pos += num_bytes;
245 }
246 
247 /* called from IRQ */
hdac_update_stream(struct hdac_bus * bus,struct hdac_stream * stream)248 static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
249 {
250 	if (stream->substream) {
251 		avs_period_elapsed(stream->substream);
252 	} else if (stream->cstream) {
253 		u64 buffer_size = stream->cstream->runtime->buffer_size;
254 
255 		hdac_stream_update_pos(stream, buffer_size);
256 		snd_compr_fragment_elapsed(stream->cstream);
257 	}
258 }
259 
avs_hda_interrupt(struct hdac_bus * bus)260 static irqreturn_t avs_hda_interrupt(struct hdac_bus *bus)
261 {
262 	irqreturn_t ret = IRQ_NONE;
263 	u32 status;
264 
265 	status = snd_hdac_chip_readl(bus, INTSTS);
266 	if (snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream))
267 		ret = IRQ_HANDLED;
268 
269 	spin_lock_irq(&bus->reg_lock);
270 	/* Clear RIRB interrupt. */
271 	status = snd_hdac_chip_readb(bus, RIRBSTS);
272 	if (status & RIRB_INT_MASK) {
273 		if (status & RIRB_INT_RESPONSE)
274 			snd_hdac_bus_update_rirb(bus);
275 		snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
276 		ret = IRQ_HANDLED;
277 	}
278 
279 	spin_unlock_irq(&bus->reg_lock);
280 	return ret;
281 }
282 
avs_hda_irq_handler(int irq,void * dev_id)283 static irqreturn_t avs_hda_irq_handler(int irq, void *dev_id)
284 {
285 	struct hdac_bus *bus = dev_id;
286 	u32 intsts;
287 
288 	intsts = snd_hdac_chip_readl(bus, INTSTS);
289 	if (intsts == UINT_MAX || !(intsts & AZX_INT_GLOBAL_EN))
290 		return IRQ_NONE;
291 
292 	/* Mask GIE, unmasked in irq_thread(). */
293 	snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, 0);
294 
295 	return IRQ_WAKE_THREAD;
296 }
297 
avs_hda_irq_thread(int irq,void * dev_id)298 static irqreturn_t avs_hda_irq_thread(int irq, void *dev_id)
299 {
300 	struct hdac_bus *bus = dev_id;
301 	u32 status;
302 
303 	status = snd_hdac_chip_readl(bus, INTSTS);
304 	if (status & ~AZX_INT_GLOBAL_EN)
305 		avs_hda_interrupt(bus);
306 
307 	/* Unmask GIE, masked in irq_handler(). */
308 	snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
309 
310 	return IRQ_HANDLED;
311 }
312 
avs_dsp_irq_handler(int irq,void * dev_id)313 static irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
314 {
315 	struct avs_dev *adev = dev_id;
316 
317 	return avs_hda_irq_handler(irq, &adev->base.core);
318 }
319 
avs_dsp_irq_thread(int irq,void * dev_id)320 static irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
321 {
322 	struct avs_dev *adev = dev_id;
323 	struct hdac_bus *bus = &adev->base.core;
324 	u32 status;
325 
326 	status = readl(bus->ppcap + AZX_REG_PP_PPSTS);
327 	if (status & AZX_PPCTL_PIE)
328 		avs_dsp_op(adev, dsp_interrupt);
329 
330 	/* Unmask GIE, masked in irq_handler(). */
331 	snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
332 
333 	return IRQ_HANDLED;
334 }
335 
avs_hdac_acquire_irq(struct avs_dev * adev)336 static int avs_hdac_acquire_irq(struct avs_dev *adev)
337 {
338 	struct hdac_bus *bus = &adev->base.core;
339 	struct pci_dev *pci = to_pci_dev(bus->dev);
340 	int ret;
341 
342 	/* request one and check that we only got one interrupt */
343 	ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_INTX);
344 	if (ret != 1) {
345 		dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
346 		return ret;
347 	}
348 
349 	ret = pci_request_irq(pci, 0, avs_hda_irq_handler, avs_hda_irq_thread, bus,
350 			      KBUILD_MODNAME);
351 	if (ret < 0) {
352 		dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
353 		goto free_vector;
354 	}
355 
356 	ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
357 			      KBUILD_MODNAME);
358 	if (ret < 0) {
359 		dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
360 		goto free_stream_irq;
361 	}
362 
363 	return 0;
364 
365 free_stream_irq:
366 	pci_free_irq(pci, 0, bus);
367 free_vector:
368 	pci_free_irq_vectors(pci);
369 	return ret;
370 }
371 
avs_bus_init(struct avs_dev * adev,struct pci_dev * pci,const struct pci_device_id * id)372 static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
373 {
374 	struct hda_bus *bus = &adev->base;
375 	struct avs_ipc *ipc;
376 	struct device *dev = &pci->dev;
377 	int ret;
378 
379 	ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
380 	if (ret < 0)
381 		return ret;
382 
383 	bus->core.use_posbuf = 1;
384 	bus->core.bdl_pos_adj = 0;
385 	bus->core.sync_write = 1;
386 	bus->pci = pci;
387 	bus->mixer_assigned = -1;
388 	mutex_init(&bus->prepare_mutex);
389 
390 	ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
391 	if (!ipc)
392 		return -ENOMEM;
393 	ret = avs_ipc_init(ipc, dev);
394 	if (ret < 0)
395 		return ret;
396 
397 	adev->modcfg_buf = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
398 	if (!adev->modcfg_buf)
399 		return -ENOMEM;
400 
401 	adev->dev = dev;
402 	adev->spec = (const struct avs_spec *)id->driver_data;
403 	adev->ipc = ipc;
404 	adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
405 	INIT_WORK(&adev->probe_work, avs_hda_probe_work);
406 	INIT_LIST_HEAD(&adev->comp_list);
407 	INIT_LIST_HEAD(&adev->path_list);
408 	INIT_LIST_HEAD(&adev->fw_list);
409 	init_completion(&adev->fw_ready);
410 	spin_lock_init(&adev->path_list_lock);
411 	mutex_init(&adev->modres_mutex);
412 	mutex_init(&adev->comp_list_mutex);
413 	mutex_init(&adev->path_mutex);
414 
415 	return 0;
416 }
417 
avs_pci_probe(struct pci_dev * pci,const struct pci_device_id * id)418 static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
419 {
420 	struct hdac_bus *bus;
421 	struct avs_dev *adev;
422 	struct device *dev = &pci->dev;
423 	int ret;
424 
425 	ret = snd_intel_dsp_driver_probe(pci);
426 	switch (ret) {
427 	case SND_INTEL_DSP_DRIVER_ANY:
428 	case SND_INTEL_DSP_DRIVER_SST:
429 	case SND_INTEL_DSP_DRIVER_AVS:
430 		break;
431 	default:
432 		return -ENODEV;
433 	}
434 
435 	ret = pcim_enable_device(pci);
436 	if (ret < 0)
437 		return ret;
438 
439 	adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
440 	if (!adev)
441 		return -ENOMEM;
442 	ret = avs_bus_init(adev, pci, id);
443 	if (ret < 0) {
444 		dev_err(dev, "failed to init avs bus: %d\n", ret);
445 		return ret;
446 	}
447 
448 	ret = pci_request_regions(pci, "AVS HDAudio");
449 	if (ret < 0)
450 		return ret;
451 
452 	bus = &adev->base.core;
453 	bus->addr = pci_resource_start(pci, 0);
454 	bus->remap_addr = pci_ioremap_bar(pci, 0);
455 	if (!bus->remap_addr) {
456 		dev_err(bus->dev, "ioremap error\n");
457 		ret = -ENXIO;
458 		goto err_remap_bar0;
459 	}
460 
461 	adev->dsp_ba = pci_ioremap_bar(pci, 4);
462 	if (!adev->dsp_ba) {
463 		dev_err(bus->dev, "ioremap error\n");
464 		ret = -ENXIO;
465 		goto err_remap_bar4;
466 	}
467 
468 	snd_hdac_bus_parse_capabilities(bus);
469 	if (bus->mlcap)
470 		snd_hdac_ext_bus_get_ml_capabilities(bus);
471 
472 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
473 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
474 	dma_set_max_seg_size(dev, UINT_MAX);
475 
476 	ret = avs_hdac_bus_init_streams(bus);
477 	if (ret < 0) {
478 		dev_err(dev, "failed to init streams: %d\n", ret);
479 		goto err_init_streams;
480 	}
481 
482 	ret = avs_hdac_acquire_irq(adev);
483 	if (ret < 0) {
484 		dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
485 		goto err_acquire_irq;
486 	}
487 
488 	pci_set_master(pci);
489 	pci_set_drvdata(pci, bus);
490 	device_disable_async_suspend(dev);
491 
492 	ret = snd_hdac_i915_init(bus);
493 	if (ret == -EPROBE_DEFER)
494 		goto err_i915_init;
495 	else if (ret < 0)
496 		dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
497 
498 	schedule_work(&adev->probe_work);
499 
500 	return 0;
501 
502 err_i915_init:
503 	pci_free_irq(pci, 0, adev);
504 	pci_free_irq(pci, 0, bus);
505 	pci_free_irq_vectors(pci);
506 	pci_clear_master(pci);
507 	pci_set_drvdata(pci, NULL);
508 err_acquire_irq:
509 	snd_hdac_bus_free_stream_pages(bus);
510 	snd_hdac_ext_stream_free_all(bus);
511 err_init_streams:
512 	iounmap(adev->dsp_ba);
513 err_remap_bar4:
514 	iounmap(bus->remap_addr);
515 err_remap_bar0:
516 	pci_release_regions(pci);
517 	return ret;
518 }
519 
avs_pci_shutdown(struct pci_dev * pci)520 static void avs_pci_shutdown(struct pci_dev *pci)
521 {
522 	struct hdac_bus *bus = pci_get_drvdata(pci);
523 	struct avs_dev *adev = hdac_to_avs(bus);
524 
525 	cancel_work_sync(&adev->probe_work);
526 	avs_ipc_block(adev->ipc);
527 
528 	snd_hdac_stop_streams(bus);
529 	avs_dsp_op(adev, int_control, false);
530 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
531 	snd_hdac_ext_bus_link_power_down_all(bus);
532 
533 	snd_hdac_bus_stop_chip(bus);
534 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
535 
536 	pci_free_irq(pci, 0, adev);
537 	pci_free_irq(pci, 0, bus);
538 	pci_free_irq_vectors(pci);
539 }
540 
avs_pci_remove(struct pci_dev * pci)541 static void avs_pci_remove(struct pci_dev *pci)
542 {
543 	struct hdac_device *hdev, *save;
544 	struct hdac_bus *bus = pci_get_drvdata(pci);
545 	struct avs_dev *adev = hdac_to_avs(bus);
546 
547 	cancel_work_sync(&adev->probe_work);
548 	avs_ipc_block(adev->ipc);
549 
550 	avs_unregister_all_boards(adev);
551 
552 	acpi_nhlt_put_gbl_table();
553 	avs_debugfs_exit(adev);
554 
555 	if (avs_platattr_test(adev, CLDMA))
556 		hda_cldma_free(&code_loader);
557 
558 	snd_hdac_stop_streams_and_chip(bus);
559 	avs_dsp_op(adev, int_control, false);
560 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
561 
562 	/* it is safe to remove all codecs from the system now */
563 	list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
564 		snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
565 
566 	snd_hdac_bus_free_stream_pages(bus);
567 	snd_hdac_ext_stream_free_all(bus);
568 	/* reverse ml_capabilities */
569 	snd_hdac_ext_link_free_all(bus);
570 	snd_hdac_ext_bus_exit(bus);
571 
572 	avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
573 	snd_hdac_ext_bus_ppcap_enable(bus, false);
574 
575 	/* snd_hdac_stop_streams_and_chip does that already? */
576 	snd_hdac_bus_stop_chip(bus);
577 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
578 	if (bus->audio_component)
579 		snd_hdac_i915_exit(bus);
580 
581 	avs_module_info_free(adev);
582 	pci_free_irq(pci, 0, adev);
583 	pci_free_irq(pci, 0, bus);
584 	pci_free_irq_vectors(pci);
585 	iounmap(bus->remap_addr);
586 	iounmap(adev->dsp_ba);
587 	pci_release_regions(pci);
588 
589 	/* Firmware is not needed anymore */
590 	avs_release_firmwares(adev);
591 
592 	/* pm_runtime_forbid() can rpm_resume() which we do not want */
593 	pm_runtime_disable(&pci->dev);
594 	pm_runtime_forbid(&pci->dev);
595 	pm_runtime_enable(&pci->dev);
596 	pm_runtime_get_noresume(&pci->dev);
597 }
598 
avs_suspend_standby(struct avs_dev * adev)599 static int avs_suspend_standby(struct avs_dev *adev)
600 {
601 	struct hdac_bus *bus = &adev->base.core;
602 	struct pci_dev *pci = adev->base.pci;
603 
604 	if (bus->cmd_dma_state)
605 		snd_hdac_bus_stop_cmd_io(bus);
606 
607 	snd_hdac_ext_bus_link_power_down_all(bus);
608 
609 	enable_irq_wake(pci->irq);
610 	pci_save_state(pci);
611 
612 	return 0;
613 }
614 
avs_suspend_common(struct avs_dev * adev,bool low_power)615 static int __maybe_unused avs_suspend_common(struct avs_dev *adev, bool low_power)
616 {
617 	struct hdac_bus *bus = &adev->base.core;
618 	int ret;
619 
620 	flush_work(&adev->probe_work);
621 	if (low_power && adev->num_lp_paths)
622 		return avs_suspend_standby(adev);
623 
624 	snd_hdac_ext_bus_link_power_down_all(bus);
625 
626 	ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
627 	/*
628 	 * pm_runtime is blocked on DSP failure but system-wide suspend is not.
629 	 * Do not block entire system from suspending if that's the case.
630 	 */
631 	if (ret && ret != -EPERM) {
632 		dev_err(adev->dev, "set dx failed: %d\n", ret);
633 		return AVS_IPC_RET(ret);
634 	}
635 
636 	avs_ipc_block(adev->ipc);
637 	avs_dsp_op(adev, int_control, false);
638 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
639 
640 	ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
641 	if (ret < 0) {
642 		dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
643 		return ret;
644 	}
645 
646 	snd_hdac_ext_bus_ppcap_enable(bus, false);
647 	/* disable LP SRAM retention */
648 	avs_hda_power_gating_enable(adev, false);
649 	snd_hdac_bus_stop_chip(bus);
650 	/* disable CG when putting controller to reset */
651 	avs_hdac_clock_gating_enable(bus, false);
652 	snd_hdac_bus_enter_link_reset(bus);
653 	avs_hdac_clock_gating_enable(bus, true);
654 
655 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
656 
657 	return 0;
658 }
659 
avs_resume_standby(struct avs_dev * adev)660 static int avs_resume_standby(struct avs_dev *adev)
661 {
662 	struct hdac_bus *bus = &adev->base.core;
663 	struct pci_dev *pci = adev->base.pci;
664 
665 	pci_restore_state(pci);
666 	disable_irq_wake(pci->irq);
667 
668 	snd_hdac_ext_bus_link_power_up_all(bus);
669 
670 	if (bus->cmd_dma_state)
671 		snd_hdac_bus_init_cmd_io(bus);
672 
673 	return 0;
674 }
675 
avs_resume_common(struct avs_dev * adev,bool low_power,bool purge)676 static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool low_power, bool purge)
677 {
678 	struct hdac_bus *bus = &adev->base.core;
679 	int ret;
680 
681 	if (low_power && adev->num_lp_paths)
682 		return avs_resume_standby(adev);
683 
684 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
685 	avs_hdac_bus_init_chip(bus, true);
686 
687 	snd_hdac_ext_bus_ppcap_enable(bus, true);
688 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
689 
690 	ret = avs_dsp_boot_firmware(adev, purge);
691 	if (ret < 0) {
692 		dev_err(adev->dev, "firmware boot failed: %d\n", ret);
693 		return ret;
694 	}
695 
696 	return 0;
697 }
698 
avs_suspend(struct device * dev)699 static int __maybe_unused avs_suspend(struct device *dev)
700 {
701 	return avs_suspend_common(to_avs_dev(dev), true);
702 }
703 
avs_resume(struct device * dev)704 static int __maybe_unused avs_resume(struct device *dev)
705 {
706 	return avs_resume_common(to_avs_dev(dev), true, true);
707 }
708 
avs_runtime_suspend(struct device * dev)709 static int __maybe_unused avs_runtime_suspend(struct device *dev)
710 {
711 	return avs_suspend_common(to_avs_dev(dev), true);
712 }
713 
avs_runtime_resume(struct device * dev)714 static int __maybe_unused avs_runtime_resume(struct device *dev)
715 {
716 	return avs_resume_common(to_avs_dev(dev), true, false);
717 }
718 
avs_freeze(struct device * dev)719 static int __maybe_unused avs_freeze(struct device *dev)
720 {
721 	return avs_suspend_common(to_avs_dev(dev), false);
722 }
avs_thaw(struct device * dev)723 static int __maybe_unused avs_thaw(struct device *dev)
724 {
725 	return avs_resume_common(to_avs_dev(dev), false, true);
726 }
727 
avs_poweroff(struct device * dev)728 static int __maybe_unused avs_poweroff(struct device *dev)
729 {
730 	return avs_suspend_common(to_avs_dev(dev), false);
731 }
732 
avs_restore(struct device * dev)733 static int __maybe_unused avs_restore(struct device *dev)
734 {
735 	return avs_resume_common(to_avs_dev(dev), false, true);
736 }
737 
738 static const struct dev_pm_ops avs_dev_pm = {
739 	.suspend = avs_suspend,
740 	.resume = avs_resume,
741 	.freeze = avs_freeze,
742 	.thaw = avs_thaw,
743 	.poweroff = avs_poweroff,
744 	.restore = avs_restore,
745 	SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
746 };
747 
748 static const struct avs_sram_spec skl_sram_spec = {
749 	.base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
750 	.window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
751 	.rom_status_offset = SKL_ADSP_SRAM_BASE_OFFSET,
752 };
753 
754 static const struct avs_sram_spec apl_sram_spec = {
755 	.base_offset = APL_ADSP_SRAM_BASE_OFFSET,
756 	.window_size = APL_ADSP_SRAM_WINDOW_SIZE,
757 	.rom_status_offset = APL_ADSP_SRAM_BASE_OFFSET,
758 };
759 
760 static const struct avs_hipc_spec skl_hipc_spec = {
761 	.req_offset = SKL_ADSP_REG_HIPCI,
762 	.req_ext_offset = SKL_ADSP_REG_HIPCIE,
763 	.req_busy_mask = SKL_ADSP_HIPCI_BUSY,
764 	.ack_offset = SKL_ADSP_REG_HIPCIE,
765 	.ack_done_mask = SKL_ADSP_HIPCIE_DONE,
766 	.rsp_offset = SKL_ADSP_REG_HIPCT,
767 	.rsp_busy_mask = SKL_ADSP_HIPCT_BUSY,
768 	.ctl_offset = SKL_ADSP_REG_HIPCCTL,
769 };
770 
771 static const struct avs_hipc_spec cnl_hipc_spec = {
772 	.req_offset = CNL_ADSP_REG_HIPCIDR,
773 	.req_ext_offset = CNL_ADSP_REG_HIPCIDD,
774 	.req_busy_mask = CNL_ADSP_HIPCIDR_BUSY,
775 	.ack_offset = CNL_ADSP_REG_HIPCIDA,
776 	.ack_done_mask = CNL_ADSP_HIPCIDA_DONE,
777 	.rsp_offset = CNL_ADSP_REG_HIPCTDR,
778 	.rsp_busy_mask = CNL_ADSP_HIPCTDR_BUSY,
779 	.ctl_offset = CNL_ADSP_REG_HIPCCTL,
780 };
781 
782 static const struct avs_spec skl_desc = {
783 	.name = "skl",
784 	.min_fw_version = { 9, 21, 0, 4732 },
785 	.dsp_ops = &avs_skl_dsp_ops,
786 	.core_init_mask = 1,
787 	.attributes = AVS_PLATATTR_CLDMA,
788 	.sram = &skl_sram_spec,
789 	.hipc = &skl_hipc_spec,
790 };
791 
792 static const struct avs_spec apl_desc = {
793 	.name = "apl",
794 	.min_fw_version = { 9, 22, 1, 4323 },
795 	.dsp_ops = &avs_apl_dsp_ops,
796 	.core_init_mask = 3,
797 	.attributes = AVS_PLATATTR_IMR,
798 	.sram = &apl_sram_spec,
799 	.hipc = &skl_hipc_spec,
800 };
801 
802 static const struct avs_spec cnl_desc = {
803 	.name = "cnl",
804 	.min_fw_version = { 10, 23, 0, 5314 },
805 	.dsp_ops = &avs_cnl_dsp_ops,
806 	.core_init_mask = 1,
807 	.attributes = AVS_PLATATTR_IMR,
808 	.sram = &apl_sram_spec,
809 	.hipc = &cnl_hipc_spec,
810 };
811 
812 static const struct avs_spec icl_desc = {
813 	.name = "icl",
814 	.min_fw_version = { 10, 23, 0, 5040 },
815 	.dsp_ops = &avs_icl_dsp_ops,
816 	.core_init_mask = 1,
817 	.attributes = AVS_PLATATTR_IMR,
818 	.sram = &apl_sram_spec,
819 	.hipc = &cnl_hipc_spec,
820 };
821 
822 static const struct avs_spec jsl_desc = {
823 	.name = "jsl",
824 	.min_fw_version = { 10, 26, 0, 5872 },
825 	.dsp_ops = &avs_icl_dsp_ops,
826 	.core_init_mask = 1,
827 	.attributes = AVS_PLATATTR_IMR,
828 	.sram = &apl_sram_spec,
829 	.hipc = &cnl_hipc_spec,
830 };
831 
832 #define AVS_TGL_BASED_SPEC(sname)		\
833 static const struct avs_spec sname##_desc = {	\
834 	.name = #sname,				\
835 	.min_fw_version = { 10,	29, 0, 5646 },	\
836 	.dsp_ops = &avs_tgl_dsp_ops,		\
837 	.core_init_mask = 1,			\
838 	.attributes = AVS_PLATATTR_IMR,		\
839 	.sram = &apl_sram_spec,			\
840 	.hipc = &cnl_hipc_spec,			\
841 }
842 
843 AVS_TGL_BASED_SPEC(lkf);
844 AVS_TGL_BASED_SPEC(tgl);
845 AVS_TGL_BASED_SPEC(ehl);
846 AVS_TGL_BASED_SPEC(adl);
847 AVS_TGL_BASED_SPEC(adl_n);
848 
849 static const struct pci_device_id avs_ids[] = {
850 	{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
851 	{ PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) },
852 	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) },
853 	{ PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) },
854 	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) },
855 	{ PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) },
856 	{ PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) },
857 	{ PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) },
858 	{ PCI_DEVICE_DATA(INTEL, HDA_CNL_LP,	&cnl_desc) },
859 	{ PCI_DEVICE_DATA(INTEL, HDA_CNL_H,	&cnl_desc) },
860 	{ PCI_DEVICE_DATA(INTEL, HDA_CML_LP,	&cnl_desc) },
861 	{ PCI_DEVICE_DATA(INTEL, HDA_CML_H,	&cnl_desc) },
862 	{ PCI_DEVICE_DATA(INTEL, HDA_RKL_S,	&cnl_desc) },
863 	{ PCI_DEVICE_DATA(INTEL, HDA_ICL_LP,	&icl_desc) },
864 	{ PCI_DEVICE_DATA(INTEL, HDA_ICL_N,	&icl_desc) },
865 	{ PCI_DEVICE_DATA(INTEL, HDA_ICL_H,	&icl_desc) },
866 	{ PCI_DEVICE_DATA(INTEL, HDA_JSL_N,	&jsl_desc) },
867 	{ PCI_DEVICE_DATA(INTEL, HDA_LKF,	&lkf_desc) },
868 	{ PCI_DEVICE_DATA(INTEL, HDA_TGL_LP,	&tgl_desc) },
869 	{ PCI_DEVICE_DATA(INTEL, HDA_TGL_H,	&tgl_desc) },
870 	{ PCI_DEVICE_DATA(INTEL, HDA_CML_R,	&tgl_desc) },
871 	{ PCI_DEVICE_DATA(INTEL, HDA_EHL_0,	&ehl_desc) },
872 	{ PCI_DEVICE_DATA(INTEL, HDA_EHL_3,	&ehl_desc) },
873 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_S,	&adl_desc) },
874 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_P,	&adl_desc) },
875 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_PS,	&adl_desc) },
876 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_M,	&adl_desc) },
877 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_PX,	&adl_desc) },
878 	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_N,	&adl_n_desc) },
879 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_S,	&adl_desc) },
880 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_P_0,	&adl_desc) },
881 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_P_1,	&adl_desc) },
882 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_M,	&adl_desc) },
883 	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_PX,	&adl_desc) },
884 	{ 0 }
885 };
886 MODULE_DEVICE_TABLE(pci, avs_ids);
887 
888 static struct pci_driver avs_pci_driver = {
889 	.name = KBUILD_MODNAME,
890 	.id_table = avs_ids,
891 	.probe = avs_pci_probe,
892 	.remove = avs_pci_remove,
893 	.shutdown = avs_pci_shutdown,
894 	.dev_groups = avs_attr_groups,
895 	.driver = {
896 		.pm = &avs_dev_pm,
897 	},
898 };
899 module_pci_driver(avs_pci_driver);
900 
901 MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
902 MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
903 MODULE_DESCRIPTION("Intel cAVS sound driver");
904 MODULE_LICENSE("GPL");
905