xref: /linux/sound/soc/sof/intel/hda-loader.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for HDA DSP code loader
16  */
17 
18 #include <linux/firmware.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include <sound/sof.h>
22 #include <sound/sof/ipc4/header.h>
23 #include "ext_manifest.h"
24 #include "../ipc4-priv.h"
25 #include "../ops.h"
26 #include "../sof-priv.h"
27 #include "hda.h"
28 
29 static bool persistent_cl_buffer = true;
30 module_param(persistent_cl_buffer, bool, 0444);
31 MODULE_PARM_DESC(persistent_cl_buffer, "Persistent Code Loader DMA buffer "
32 		 "(default = Y, use N to force buffer re-allocation)");
33 
hda_ssp_set_cbp_cfp(struct snd_sof_dev * sdev)34 static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev)
35 {
36 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
37 	const struct sof_intel_dsp_desc *chip = hda->desc;
38 	int i;
39 
40 	/* DSP is powered up, set all SSPs to clock consumer/codec provider mode */
41 	for (i = 0; i < chip->ssp_count; i++) {
42 		snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
43 						 chip->ssp_base_offset
44 						 + i * SSP_DEV_MEM_SIZE
45 						 + SSP_SSC1_OFFSET,
46 						 SSP_SET_CBP_CFP,
47 						 SSP_SET_CBP_CFP);
48 	}
49 }
50 
51 struct hdac_ext_stream*
hda_cl_prepare(struct device * dev,unsigned int format,unsigned int size,struct snd_dma_buffer * dmab,bool persistent_buffer,int direction,bool is_iccmax)52 hda_cl_prepare(struct device *dev, unsigned int format, unsigned int size,
53 	       struct snd_dma_buffer *dmab, bool persistent_buffer, int direction,
54 	       bool is_iccmax)
55 {
56 	struct snd_sof_dev *sdev = dev_get_drvdata(dev);
57 	struct hdac_ext_stream *hext_stream;
58 	struct hdac_stream *hstream;
59 	int ret;
60 
61 	hext_stream = hda_dsp_stream_get(sdev, direction, 0);
62 
63 	if (!hext_stream) {
64 		dev_err(sdev->dev, "error: no stream available\n");
65 		return ERR_PTR(-ENODEV);
66 	}
67 	hstream = &hext_stream->hstream;
68 	hstream->substream = NULL;
69 
70 	/*
71 	 * Allocate DMA buffer if it is temporary or if the buffer is intended
72 	 * to be persistent but not yet allocated.
73 	 * We cannot rely solely on !dmab->area as caller might use a struct on
74 	 * stack (when it is temporary) without clearing it to 0.
75 	 */
76 	if (!persistent_buffer || !dmab->area) {
77 		ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, dev, size, dmab);
78 		if (ret < 0) {
79 			dev_err(sdev->dev, "%s: memory alloc failed: %d\n",
80 				__func__, ret);
81 			goto out_put;
82 		}
83 	}
84 
85 	hstream->period_bytes = 0;/* initialize period_bytes */
86 	hstream->format_val = format;
87 	hstream->bufsize = size;
88 
89 	if (is_iccmax) {
90 		ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL);
91 		if (ret < 0) {
92 			dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret);
93 			goto out_free;
94 		}
95 	} else {
96 		ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
97 		if (ret < 0) {
98 			dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
99 			goto out_free;
100 		}
101 		hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size);
102 	}
103 
104 	return hext_stream;
105 
106 out_free:
107 	snd_dma_free_pages(dmab);
108 	dmab->area = NULL;
109 	dmab->bytes = 0;
110 	hstream->bufsize = 0;
111 	hstream->format_val = 0;
112 out_put:
113 	hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
114 	return ERR_PTR(ret);
115 }
116 EXPORT_SYMBOL_NS(hda_cl_prepare, "SND_SOC_SOF_INTEL_HDA_COMMON");
117 
118 /*
119  * first boot sequence has some extra steps.
120  * power on all host managed cores and only unstall/run the boot core to boot the
121  * DSP then turn off all non boot cores (if any) is powered on.
122  */
cl_dsp_init(struct snd_sof_dev * sdev,int stream_tag,bool imr_boot)123 int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
124 {
125 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
126 	const struct sof_intel_dsp_desc *chip = hda->desc;
127 	unsigned int status, target_status;
128 	u32 flags, ipc_hdr, j;
129 	unsigned long mask;
130 	char *dump_msg;
131 	int ret;
132 
133 	/* step 1: power up corex */
134 	ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
135 	if (ret < 0) {
136 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
137 			dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
138 		goto err;
139 	}
140 
141 	hda_ssp_set_cbp_cfp(sdev);
142 
143 	/* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */
144 	ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
145 	if (!imr_boot)
146 		ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
147 
148 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
149 
150 	/* step 3: unset core 0 reset state & unstall/run core 0 */
151 	ret = hda_dsp_core_run(sdev, chip->init_core_mask);
152 	if (ret < 0) {
153 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
154 			dev_err(sdev->dev,
155 				"error: dsp core start failed %d\n", ret);
156 		ret = -EIO;
157 		goto err;
158 	}
159 
160 	/* step 4: wait for IPC DONE bit from ROM */
161 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
162 					    chip->ipc_ack, status,
163 					    ((status & chip->ipc_ack_mask)
164 						    == chip->ipc_ack_mask),
165 					    HDA_DSP_REG_POLL_INTERVAL_US,
166 					    HDA_DSP_INIT_TIMEOUT_US);
167 
168 	if (ret < 0) {
169 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
170 			dev_err(sdev->dev,
171 				"error: %s: timeout for HIPCIE done\n",
172 				__func__);
173 		goto err;
174 	}
175 
176 	/* set DONE bit to clear the reply IPC message */
177 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
178 				       chip->ipc_ack,
179 				       chip->ipc_ack_mask,
180 				       chip->ipc_ack_mask);
181 
182 	/* step 5: power down cores that are no longer needed */
183 	ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask &
184 					   ~(chip->init_core_mask));
185 	if (ret < 0) {
186 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
187 			dev_err(sdev->dev,
188 				"error: dsp core x power down failed\n");
189 		goto err;
190 	}
191 
192 	/* step 6: enable IPC interrupts */
193 	hda_dsp_ipc_int_enable(sdev);
194 
195 	/*
196 	 * step 7:
197 	 * - Cold/Full boot: wait for ROM init to proceed to download the firmware
198 	 * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
199 	 */
200 	if (imr_boot)
201 		target_status = FSR_STATE_FW_ENTERED;
202 	else
203 		target_status = FSR_STATE_INIT_DONE;
204 
205 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
206 					chip->rom_status_reg, status,
207 					(FSR_TO_STATE_CODE(status) == target_status),
208 					HDA_DSP_REG_POLL_INTERVAL_US,
209 					chip->rom_init_timeout *
210 					USEC_PER_MSEC);
211 	if (!ret) {
212 		/* set enabled cores mask and increment ref count for cores in init_core_mask */
213 		sdev->enabled_cores_mask |= chip->init_core_mask;
214 		mask = sdev->enabled_cores_mask;
215 		for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES)
216 			sdev->dsp_core_ref_count[j]++;
217 		return 0;
218 	}
219 
220 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
221 		dev_err(sdev->dev,
222 			"%s: timeout with rom_status_reg (%#x) read\n",
223 			__func__, chip->rom_status_reg);
224 
225 err:
226 	flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
227 
228 	/* after max boot attempts make sure that the dump is printed */
229 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
230 		flags &= ~SOF_DBG_DUMP_OPTIONAL;
231 
232 	dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
233 			     hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
234 	snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
235 	hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
236 
237 	kfree(dump_msg);
238 	return ret;
239 }
240 EXPORT_SYMBOL_NS(cl_dsp_init, "SND_SOC_SOF_INTEL_HDA_COMMON");
241 
hda_cl_trigger(struct device * dev,struct hdac_ext_stream * hext_stream,int cmd)242 int hda_cl_trigger(struct device *dev, struct hdac_ext_stream *hext_stream, int cmd)
243 {
244 	struct snd_sof_dev *sdev = dev_get_drvdata(dev);
245 	struct hdac_stream *hstream = &hext_stream->hstream;
246 	int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
247 	struct sof_intel_hda_stream *hda_stream;
248 
249 	/* code loader is special case that reuses stream ops */
250 	switch (cmd) {
251 	case SNDRV_PCM_TRIGGER_START:
252 		hda_stream = container_of(hext_stream, struct sof_intel_hda_stream,
253 					  hext_stream);
254 		reinit_completion(&hda_stream->ioc);
255 
256 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
257 					1 << hstream->index,
258 					1 << hstream->index);
259 
260 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
261 					sd_offset,
262 					SOF_HDA_SD_CTL_DMA_START |
263 					SOF_HDA_CL_DMA_SD_INT_MASK,
264 					SOF_HDA_SD_CTL_DMA_START |
265 					SOF_HDA_CL_DMA_SD_INT_MASK);
266 
267 		hstream->running = true;
268 		return 0;
269 	default:
270 		return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
271 	}
272 }
273 EXPORT_SYMBOL_NS(hda_cl_trigger, "SND_SOC_SOF_INTEL_HDA_COMMON");
274 
hda_cl_cleanup(struct device * dev,struct snd_dma_buffer * dmab,bool persistent_buffer,struct hdac_ext_stream * hext_stream)275 int hda_cl_cleanup(struct device *dev, struct snd_dma_buffer *dmab,
276 			  bool persistent_buffer, struct hdac_ext_stream *hext_stream)
277 {
278 	struct snd_sof_dev *sdev =  dev_get_drvdata(dev);
279 	struct hdac_stream *hstream = &hext_stream->hstream;
280 	int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
281 	int ret = 0;
282 
283 	if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
284 		ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
285 	else
286 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
287 					SOF_HDA_SD_CTL_DMA_START, 0);
288 
289 	hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag);
290 	hstream->running = 0;
291 	hstream->substream = NULL;
292 
293 	/* reset BDL address */
294 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
295 			  sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, 0);
296 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
297 			  sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0);
298 
299 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
300 
301 	if (!persistent_buffer) {
302 		snd_dma_free_pages(dmab);
303 		dmab->area = NULL;
304 		dmab->bytes = 0;
305 		hstream->bufsize = 0;
306 		hstream->format_val = 0;
307 	}
308 
309 	return ret;
310 }
311 EXPORT_SYMBOL_NS(hda_cl_cleanup, "SND_SOC_SOF_INTEL_HDA_COMMON");
312 
313 #define HDA_CL_DMA_IOC_TIMEOUT_MS 500
314 
hda_cl_copy_fw(struct snd_sof_dev * sdev,struct hdac_ext_stream * hext_stream)315 int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
316 {
317 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
318 	const struct sof_intel_dsp_desc *chip = hda->desc;
319 	unsigned int reg;
320 	int ret, status;
321 
322 	dev_dbg(sdev->dev, "Code loader DMA starting\n");
323 
324 	ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START);
325 	if (ret < 0) {
326 		dev_err(sdev->dev, "error: DMA trigger start failed\n");
327 		return ret;
328 	}
329 
330 	dev_dbg(sdev->dev, "waiting for FW_ENTERED status\n");
331 
332 	status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
333 					chip->rom_status_reg, reg,
334 					(FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED),
335 					HDA_DSP_REG_POLL_INTERVAL_US,
336 					HDA_DSP_BASEFW_TIMEOUT_US);
337 
338 	/*
339 	 * even in case of errors we still need to stop the DMAs,
340 	 * but we return the initial error should the DMA stop also fail
341 	 */
342 
343 	if (status < 0) {
344 		dev_err(sdev->dev,
345 			"%s: timeout with rom_status_reg (%#x) read\n",
346 			__func__, chip->rom_status_reg);
347 	} else {
348 		dev_dbg(sdev->dev, "Code loader FW_ENTERED status\n");
349 	}
350 
351 	ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
352 	if (ret < 0) {
353 		dev_err(sdev->dev, "error: DMA trigger stop failed\n");
354 		if (!status)
355 			status = ret;
356 	} else {
357 		dev_dbg(sdev->dev, "Code loader DMA stopped\n");
358 	}
359 
360 	return status;
361 }
362 
hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev * sdev)363 int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
364 {
365 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
366 	struct hdac_ext_stream *iccmax_stream;
367 	int ret, ret1;
368 	u8 original_gb;
369 
370 	/* save the original LTRP guardband value */
371 	original_gb = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP) &
372 		HDA_VS_INTEL_LTRP_GB_MASK;
373 
374 	/*
375 	 * Prepare capture stream for ICCMAX. We do not need to store
376 	 * the data, so use a buffer of PAGE_SIZE for receiving.
377 	 */
378 	iccmax_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, PAGE_SIZE,
379 				       &hda->iccmax_dmab, persistent_cl_buffer,
380 				       SNDRV_PCM_STREAM_CAPTURE, true);
381 	if (IS_ERR(iccmax_stream)) {
382 		dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
383 		return PTR_ERR(iccmax_stream);
384 	}
385 
386 	ret = hda_dsp_cl_boot_firmware(sdev);
387 
388 	/*
389 	 * Perform iccmax stream cleanup. This should be done even if firmware loading fails.
390 	 * If the cleanup also fails, we return the initial error
391 	 */
392 	ret1 = hda_cl_cleanup(sdev->dev, &hda->iccmax_dmab,
393 			      persistent_cl_buffer, iccmax_stream);
394 	if (ret1 < 0) {
395 		dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
396 
397 		/* set return value to indicate cleanup failure */
398 		if (!ret)
399 			ret = ret1;
400 	}
401 
402 	/* restore the original guardband value after FW boot */
403 	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP,
404 			    HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
405 
406 	return ret;
407 }
408 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware_iccmax, "SND_SOC_SOF_INTEL_CNL");
409 
hda_dsp_boot_imr(struct snd_sof_dev * sdev)410 static int hda_dsp_boot_imr(struct snd_sof_dev *sdev)
411 {
412 	const struct sof_intel_dsp_desc *chip_info;
413 	int ret;
414 
415 	chip_info = get_chip_info(sdev->pdata);
416 	if (chip_info->cl_init)
417 		ret = chip_info->cl_init(sdev, 0, true);
418 	else
419 		ret = -EINVAL;
420 
421 	if (!ret)
422 		hda_sdw_process_wakeen(sdev);
423 
424 	return ret;
425 }
426 
hda_dsp_cl_boot_firmware(struct snd_sof_dev * sdev)427 int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
428 {
429 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
430 	struct snd_sof_pdata *plat_data = sdev->pdata;
431 	const struct sof_dev_desc *desc = plat_data->desc;
432 	const struct sof_intel_dsp_desc *chip_info;
433 	struct hdac_ext_stream *hext_stream;
434 	struct firmware stripped_firmware;
435 	int ret, ret1, i;
436 
437 	if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) {
438 		dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
439 		hda->boot_iteration = 0;
440 		ret = hda_dsp_boot_imr(sdev);
441 		if (!ret) {
442 			hda->booted_from_imr = true;
443 			return 0;
444 		}
445 
446 		dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n");
447 	}
448 
449 	hda->booted_from_imr = false;
450 
451 	chip_info = desc->chip_info;
452 
453 	if (sdev->basefw.fw->size <= sdev->basefw.payload_offset) {
454 		dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
455 		return -EINVAL;
456 	}
457 
458 	/* init for booting wait */
459 	init_waitqueue_head(&sdev->boot_wait);
460 
461 	/* prepare DMA for code loader stream */
462 	stripped_firmware.size = sdev->basefw.fw->size - sdev->basefw.payload_offset;
463 	hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT,
464 				     stripped_firmware.size,
465 				     &hda->cl_dmab, persistent_cl_buffer,
466 				     SNDRV_PCM_STREAM_PLAYBACK, false);
467 	if (IS_ERR(hext_stream)) {
468 		dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
469 		return PTR_ERR(hext_stream);
470 	}
471 
472 	/*
473 	 * Copy the payload to the DMA buffer if it is temporary or if the
474 	 * buffer is  persistent but it does not have the basefw payload either
475 	 * because this is the first boot and the buffer needs to be initialized,
476 	 * or a library got loaded and it replaced the basefw.
477 	 */
478 	if (!persistent_cl_buffer || !hda->cl_dmab_contains_basefw) {
479 		stripped_firmware.data = sdev->basefw.fw->data + sdev->basefw.payload_offset;
480 		memcpy(hda->cl_dmab.area, stripped_firmware.data, stripped_firmware.size);
481 		hda->cl_dmab_contains_basefw = true;
482 	}
483 
484 	/* try ROM init a few times before giving up */
485 	for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
486 		dev_dbg(sdev->dev,
487 			"Attempting iteration %d of Core En/ROM load...\n", i);
488 
489 		hda->boot_iteration = i + 1;
490 		if (chip_info->cl_init)
491 			ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false);
492 		else
493 			ret = -EINVAL;
494 
495 		/* don't retry anymore if successful */
496 		if (!ret)
497 			break;
498 	}
499 
500 	if (i == HDA_FW_BOOT_ATTEMPTS) {
501 		dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
502 			i, ret);
503 		goto cleanup;
504 	}
505 
506 	/*
507 	 * When a SoundWire link is in clock stop state, a Slave
508 	 * device may trigger in-band wakes for events such as jack
509 	 * insertion or acoustic event detection. This event will lead
510 	 * to a WAKEEN interrupt, handled by the PCI device and routed
511 	 * to PME if the PCI device is in D3. The resume function in
512 	 * audio PCI driver will be invoked by ACPI for PME event and
513 	 * initialize the device and process WAKEEN interrupt.
514 	 *
515 	 * The WAKEEN interrupt should be processed ASAP to prevent an
516 	 * interrupt flood, otherwise other interrupts, such IPC,
517 	 * cannot work normally.  The WAKEEN is handled after the ROM
518 	 * is initialized successfully, which ensures power rails are
519 	 * enabled before accessing the SoundWire SHIM registers
520 	 */
521 	if (!sdev->first_boot)
522 		hda_sdw_process_wakeen(sdev);
523 
524 	/*
525 	 * Set the boot_iteration to the last attempt, indicating that the
526 	 * DSP ROM has been initialized and from this point there will be no
527 	 * retry done to boot.
528 	 *
529 	 * Continue with code loading and firmware boot
530 	 */
531 	hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS;
532 	ret = hda_cl_copy_fw(sdev, hext_stream);
533 	if (!ret) {
534 		dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
535 		hda->skip_imr_boot = false;
536 	} else {
537 		snd_sof_dsp_dbg_dump(sdev, "Firmware download failed",
538 				     SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX);
539 		hda->skip_imr_boot = true;
540 	}
541 
542 cleanup:
543 	/*
544 	 * Perform codeloader stream cleanup.
545 	 * This should be done even if firmware loading fails.
546 	 * If the cleanup also fails, we return the initial error
547 	 */
548 	ret1 = hda_cl_cleanup(sdev->dev, &hda->cl_dmab,
549 			      persistent_cl_buffer, hext_stream);
550 	if (ret1 < 0) {
551 		dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
552 
553 		/* set return value to indicate cleanup failure */
554 		if (!ret)
555 			ret = ret1;
556 	}
557 
558 	/*
559 	 * return primary core id if both fw copy
560 	 * and stream clean up are successful
561 	 */
562 	if (!ret)
563 		return chip_info->init_core_mask;
564 
565 	/* disable DSP */
566 	hda_dsp_ctrl_ppcap_enable(sdev, false);
567 
568 	return ret;
569 }
570 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware, "SND_SOC_SOF_INTEL_HDA_COMMON");
571 
hda_dsp_ipc4_load_library(struct snd_sof_dev * sdev,struct sof_ipc4_fw_library * fw_lib,bool reload)572 int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev,
573 			      struct sof_ipc4_fw_library *fw_lib, bool reload)
574 {
575 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
576 	struct sof_ipc4_fw_data *ipc4_data = sdev->private;
577 	struct hdac_ext_stream *hext_stream;
578 	struct firmware stripped_firmware;
579 	struct sof_ipc4_msg msg = {};
580 	int ret, ret1;
581 
582 	/* if IMR booting is enabled and fw context is saved for D3 state, skip the loading */
583 	if (reload && hda->booted_from_imr && ipc4_data->fw_context_save)
584 		return 0;
585 
586 	/* the fw_lib has been verified during loading, we can trust the validity here */
587 	stripped_firmware.data = fw_lib->sof_fw.fw->data + fw_lib->sof_fw.payload_offset;
588 	stripped_firmware.size = fw_lib->sof_fw.fw->size - fw_lib->sof_fw.payload_offset;
589 
590 	/*
591 	 * force re-allocation of the cl_dmab if the preserved DMA buffer is
592 	 * smaller than what is needed for the library
593 	 */
594 	if (persistent_cl_buffer && stripped_firmware.size > hda->cl_dmab.bytes) {
595 		snd_dma_free_pages(&hda->cl_dmab);
596 		hda->cl_dmab.area = NULL;
597 		hda->cl_dmab.bytes = 0;
598 	}
599 
600 	/* prepare DMA for code loader stream */
601 	hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT,
602 				     stripped_firmware.size,
603 				     &hda->cl_dmab, persistent_cl_buffer,
604 				     SNDRV_PCM_STREAM_PLAYBACK, false);
605 	if (IS_ERR(hext_stream)) {
606 		dev_err(sdev->dev, "%s: DMA prepare failed\n", __func__);
607 		return PTR_ERR(hext_stream);
608 	}
609 
610 	memcpy(hda->cl_dmab.area, stripped_firmware.data, stripped_firmware.size);
611 	hda->cl_dmab_contains_basefw = false;
612 
613 	/*
614 	 * 1st stage: SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE
615 	 * Message includes the dma_id to be prepared for the library loading.
616 	 * If the firmware does not have support for the message, we will
617 	 * receive -EOPNOTSUPP. In this case we will use single step library
618 	 * loading and proceed to send the LOAD_LIBRARY message.
619 	 */
620 	msg.primary = hext_stream->hstream.stream_tag - 1;
621 	msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE);
622 	msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
623 	msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
624 	ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
625 	if (!ret) {
626 		int sd_offset = SOF_STREAM_SD_OFFSET(&hext_stream->hstream);
627 		unsigned int status;
628 
629 		/*
630 		 * Make sure that the FIFOS value is not 0 in SDxFIFOS register
631 		 * which indicates that the firmware set the GEN bit and we can
632 		 * continue to start the DMA
633 		 */
634 		ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
635 					sd_offset + SOF_HDA_ADSP_REG_SD_FIFOSIZE,
636 					status,
637 					status & SOF_HDA_SD_FIFOSIZE_FIFOS_MASK,
638 					HDA_DSP_REG_POLL_INTERVAL_US,
639 					HDA_DSP_BASEFW_TIMEOUT_US);
640 
641 		if (ret < 0)
642 			dev_warn(sdev->dev,
643 				 "%s: timeout waiting for FIFOS\n", __func__);
644 	} else if (ret != -EOPNOTSUPP) {
645 		goto cleanup;
646 	}
647 
648 	ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START);
649 	if (ret < 0) {
650 		dev_err(sdev->dev, "%s: DMA trigger start failed\n", __func__);
651 		goto cleanup;
652 	}
653 
654 	/*
655 	 * 2nd stage: LOAD_LIBRARY
656 	 * Message includes the dma_id and the lib_id, the dma_id must be
657 	 * identical to the one sent via LOAD_LIBRARY_PREPARE
658 	 */
659 	msg.primary &= ~SOF_IPC4_MSG_TYPE_MASK;
660 	msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY);
661 	msg.primary |= SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(fw_lib->id);
662 	ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
663 
664 	/* Stop the DMA channel */
665 	ret1 = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
666 	if (ret1 < 0) {
667 		dev_err(sdev->dev, "%s: DMA trigger stop failed\n", __func__);
668 		if (!ret)
669 			ret = ret1;
670 	}
671 
672 cleanup:
673 	/* clean up even in case of error and return the first error */
674 	ret1 = hda_cl_cleanup(sdev->dev, &hda->cl_dmab, persistent_cl_buffer,
675 			      hext_stream);
676 	if (ret1 < 0) {
677 		dev_err(sdev->dev, "%s: Code loader DSP cleanup failed\n", __func__);
678 
679 		/* set return value to indicate cleanup failure */
680 		if (!ret)
681 			ret = ret1;
682 	}
683 
684 	return ret;
685 }
686 EXPORT_SYMBOL_NS(hda_dsp_ipc4_load_library, "SND_SOC_SOF_INTEL_HDA_COMMON");
687 
hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev * sdev,const struct sof_ext_man_elem_header * hdr)688 int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
689 					 const struct sof_ext_man_elem_header *hdr)
690 {
691 	const struct sof_ext_man_cavs_config_data *config_data =
692 		container_of(hdr, struct sof_ext_man_cavs_config_data, hdr);
693 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
694 	int i, elem_num;
695 
696 	/* calculate total number of config data elements */
697 	elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header))
698 		   / sizeof(struct sof_config_elem);
699 	if (elem_num <= 0) {
700 		dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num);
701 		return -EINVAL;
702 	}
703 
704 	for (i = 0; i < elem_num; i++)
705 		switch (config_data->elems[i].token) {
706 		case SOF_EXT_MAN_CAVS_CONFIG_EMPTY:
707 			/* skip empty token */
708 			break;
709 		case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO:
710 			hda->clk_config_lpro = config_data->elems[i].value;
711 			dev_dbg(sdev->dev, "FW clock config: %s\n",
712 				hda->clk_config_lpro ? "LPRO" : "HPRO");
713 			break;
714 		case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE:
715 		case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE:
716 			/* These elements are defined but not being used yet. No warn is required */
717 			break;
718 		default:
719 			dev_info(sdev->dev, "unsupported token type: %d\n",
720 				 config_data->elems[i].token);
721 		}
722 
723 	return 0;
724 }
725 EXPORT_SYMBOL_NS(hda_dsp_ext_man_get_cavs_config_data, "SND_SOC_SOF_INTEL_HDA_COMMON");
726