xref: /linux/sound/soc/sof/intel/hda-loader.c (revision 8834ae896bfe10f239d49adb9cc76bb6a57c431c)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for HDA DSP code loader
16  */
17 
18 #include <linux/firmware.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include <sound/sof.h>
22 #include <sound/sof/ipc4/header.h>
23 #include "ext_manifest.h"
24 #include "../ipc4-priv.h"
25 #include "../ops.h"
26 #include "../sof-priv.h"
27 #include "hda.h"
28 
29 static bool persistent_cl_buffer = true;
30 module_param(persistent_cl_buffer, bool, 0444);
31 MODULE_PARM_DESC(persistent_cl_buffer, "Persistent Code Loader DMA buffer "
32 		 "(default = Y, use N to force buffer re-allocation)");
33 
34 static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev)
35 {
36 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
37 	const struct sof_intel_dsp_desc *chip = hda->desc;
38 	int i;
39 
40 	/* DSP is powered up, set all SSPs to clock consumer/codec provider mode */
41 	for (i = 0; i < chip->ssp_count; i++) {
42 		snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
43 						 chip->ssp_base_offset
44 						 + i * SSP_DEV_MEM_SIZE
45 						 + SSP_SSC1_OFFSET,
46 						 SSP_SET_CBP_CFP,
47 						 SSP_SET_CBP_CFP);
48 	}
49 }
50 
51 struct hdac_ext_stream*
52 hda_cl_prepare(struct device *dev, unsigned int format, unsigned int size,
53 	       struct snd_dma_buffer *dmab, bool persistent_buffer, int direction,
54 	       bool is_iccmax)
55 {
56 	return hda_data_stream_prepare(dev, format, size, dmab, persistent_buffer,
57 				       direction, is_iccmax, false);
58 }
59 EXPORT_SYMBOL_NS(hda_cl_prepare, "SND_SOC_SOF_INTEL_HDA_COMMON");
60 
61 /*
62  * first boot sequence has some extra steps.
63  * power on all host managed cores and only unstall/run the boot core to boot the
64  * DSP then turn off all non boot cores (if any) is powered on.
65  */
66 int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
67 {
68 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
69 	const struct sof_intel_dsp_desc *chip = hda->desc;
70 	unsigned int status, target_status;
71 	u32 flags, ipc_hdr, j;
72 	unsigned long mask;
73 	char *dump_msg;
74 	int ret;
75 
76 	/* step 1: power up corex */
77 	ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
78 	if (ret < 0) {
79 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
80 			dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
81 		goto err;
82 	}
83 
84 	hda_ssp_set_cbp_cfp(sdev);
85 
86 	/* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */
87 	ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
88 	if (!imr_boot)
89 		ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
90 
91 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
92 
93 	/* step 3: unset core 0 reset state & unstall/run core 0 */
94 	ret = hda_dsp_core_run(sdev, chip->init_core_mask);
95 	if (ret < 0) {
96 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
97 			dev_err(sdev->dev,
98 				"error: dsp core start failed %d\n", ret);
99 		ret = -EIO;
100 		goto err;
101 	}
102 
103 	/* step 4: wait for IPC DONE bit from ROM */
104 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
105 					    chip->ipc_ack, status,
106 					    ((status & chip->ipc_ack_mask)
107 						    == chip->ipc_ack_mask),
108 					    HDA_DSP_REG_POLL_INTERVAL_US,
109 					    HDA_DSP_INIT_TIMEOUT_US);
110 
111 	if (ret < 0) {
112 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
113 			dev_err(sdev->dev,
114 				"error: %s: timeout for HIPCIE done\n",
115 				__func__);
116 		goto err;
117 	}
118 
119 	/* set DONE bit to clear the reply IPC message */
120 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
121 				       chip->ipc_ack,
122 				       chip->ipc_ack_mask,
123 				       chip->ipc_ack_mask);
124 
125 	/* step 5: power down cores that are no longer needed */
126 	ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask &
127 					   ~(chip->init_core_mask));
128 	if (ret < 0) {
129 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
130 			dev_err(sdev->dev,
131 				"error: dsp core x power down failed\n");
132 		goto err;
133 	}
134 
135 	/* step 6: enable IPC interrupts */
136 	hda_dsp_ipc_int_enable(sdev);
137 
138 	/*
139 	 * step 7:
140 	 * - Cold/Full boot: wait for ROM init to proceed to download the firmware
141 	 * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
142 	 */
143 	if (imr_boot)
144 		target_status = FSR_STATE_FW_ENTERED;
145 	else
146 		target_status = FSR_STATE_INIT_DONE;
147 
148 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
149 					chip->rom_status_reg, status,
150 					(FSR_TO_STATE_CODE(status) == target_status),
151 					HDA_DSP_REG_POLL_INTERVAL_US,
152 					chip->rom_init_timeout *
153 					USEC_PER_MSEC);
154 	if (!ret) {
155 		/* set enabled cores mask and increment ref count for cores in init_core_mask */
156 		sdev->enabled_cores_mask |= chip->init_core_mask;
157 		mask = sdev->enabled_cores_mask;
158 		for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES)
159 			sdev->dsp_core_ref_count[j]++;
160 		return 0;
161 	}
162 
163 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
164 		dev_err(sdev->dev,
165 			"%s: timeout with rom_status_reg (%#x) read\n",
166 			__func__, chip->rom_status_reg);
167 
168 err:
169 	flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
170 
171 	/* after max boot attempts make sure that the dump is printed */
172 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
173 		flags &= ~SOF_DBG_DUMP_OPTIONAL;
174 
175 	dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
176 			     hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
177 	snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
178 	hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
179 
180 	kfree(dump_msg);
181 	return ret;
182 }
183 EXPORT_SYMBOL_NS(cl_dsp_init, "SND_SOC_SOF_INTEL_HDA_COMMON");
184 
185 int hda_cl_trigger(struct device *dev, struct hdac_ext_stream *hext_stream, int cmd)
186 {
187 	struct snd_sof_dev *sdev = dev_get_drvdata(dev);
188 	struct hdac_stream *hstream = &hext_stream->hstream;
189 	int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
190 	struct sof_intel_hda_stream *hda_stream;
191 
192 	/* code loader is special case that reuses stream ops */
193 	switch (cmd) {
194 	case SNDRV_PCM_TRIGGER_START:
195 		hda_stream = container_of(hext_stream, struct sof_intel_hda_stream,
196 					  hext_stream);
197 		reinit_completion(&hda_stream->ioc);
198 
199 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
200 					1 << hstream->index,
201 					1 << hstream->index);
202 
203 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
204 					sd_offset,
205 					SOF_HDA_SD_CTL_DMA_START |
206 					SOF_HDA_CL_DMA_SD_INT_MASK,
207 					SOF_HDA_SD_CTL_DMA_START |
208 					SOF_HDA_CL_DMA_SD_INT_MASK);
209 
210 		hstream->running = true;
211 		return 0;
212 	default:
213 		return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
214 	}
215 }
216 EXPORT_SYMBOL_NS(hda_cl_trigger, "SND_SOC_SOF_INTEL_HDA_COMMON");
217 
218 int hda_cl_cleanup(struct device *dev, struct snd_dma_buffer *dmab,
219 			  bool persistent_buffer, struct hdac_ext_stream *hext_stream)
220 {
221 	return hda_data_stream_cleanup(dev, dmab, persistent_buffer, hext_stream, false);
222 }
223 EXPORT_SYMBOL_NS(hda_cl_cleanup, "SND_SOC_SOF_INTEL_HDA_COMMON");
224 
225 #define HDA_CL_DMA_IOC_TIMEOUT_MS 500
226 
227 int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
228 {
229 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
230 	const struct sof_intel_dsp_desc *chip = hda->desc;
231 	unsigned int reg;
232 	int ret, status;
233 
234 	dev_dbg(sdev->dev, "Code loader DMA starting\n");
235 
236 	ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START);
237 	if (ret < 0) {
238 		dev_err(sdev->dev, "error: DMA trigger start failed\n");
239 		return ret;
240 	}
241 
242 	dev_dbg(sdev->dev, "waiting for FW_ENTERED status\n");
243 
244 	status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
245 					chip->rom_status_reg, reg,
246 					(FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED),
247 					HDA_DSP_REG_POLL_INTERVAL_US,
248 					HDA_DSP_BASEFW_TIMEOUT_US);
249 
250 	/*
251 	 * even in case of errors we still need to stop the DMAs,
252 	 * but we return the initial error should the DMA stop also fail
253 	 */
254 
255 	if (status < 0) {
256 		dev_err(sdev->dev,
257 			"%s: timeout with rom_status_reg (%#x) read\n",
258 			__func__, chip->rom_status_reg);
259 	} else {
260 		dev_dbg(sdev->dev, "Code loader FW_ENTERED status\n");
261 	}
262 
263 	ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
264 	if (ret < 0) {
265 		dev_err(sdev->dev, "error: DMA trigger stop failed\n");
266 		if (!status)
267 			status = ret;
268 	} else {
269 		dev_dbg(sdev->dev, "Code loader DMA stopped\n");
270 	}
271 
272 	return status;
273 }
274 
275 int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
276 {
277 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
278 	struct hdac_ext_stream *iccmax_stream;
279 	int ret, ret1;
280 	u8 original_gb;
281 
282 	/* save the original LTRP guardband value */
283 	original_gb = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP) &
284 		HDA_VS_INTEL_LTRP_GB_MASK;
285 
286 	/*
287 	 * Prepare capture stream for ICCMAX. We do not need to store
288 	 * the data, so use a buffer of PAGE_SIZE for receiving.
289 	 */
290 	iccmax_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, PAGE_SIZE,
291 				       &hda->iccmax_dmab, persistent_cl_buffer,
292 				       SNDRV_PCM_STREAM_CAPTURE, true);
293 	if (IS_ERR(iccmax_stream)) {
294 		dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
295 		return PTR_ERR(iccmax_stream);
296 	}
297 
298 	ret = hda_dsp_cl_boot_firmware(sdev);
299 
300 	/*
301 	 * Perform iccmax stream cleanup. This should be done even if firmware loading fails.
302 	 * If the cleanup also fails, we return the initial error
303 	 */
304 	ret1 = hda_cl_cleanup(sdev->dev, &hda->iccmax_dmab,
305 			      persistent_cl_buffer, iccmax_stream);
306 	if (ret1 < 0) {
307 		dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
308 
309 		/* set return value to indicate cleanup failure */
310 		if (!ret)
311 			ret = ret1;
312 	}
313 
314 	/* restore the original guardband value after FW boot */
315 	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP,
316 			    HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
317 
318 	return ret;
319 }
320 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware_iccmax, "SND_SOC_SOF_INTEL_CNL");
321 
322 static int hda_dsp_boot_imr(struct snd_sof_dev *sdev)
323 {
324 	const struct sof_intel_dsp_desc *chip_info;
325 	int ret;
326 
327 	chip_info = get_chip_info(sdev->pdata);
328 	if (chip_info->cl_init)
329 		ret = chip_info->cl_init(sdev, 0, true);
330 	else
331 		ret = -EINVAL;
332 
333 	if (!ret)
334 		hda_sdw_process_wakeen(sdev);
335 
336 	return ret;
337 }
338 
339 int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
340 {
341 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
342 	struct snd_sof_pdata *plat_data = sdev->pdata;
343 	const struct sof_dev_desc *desc = plat_data->desc;
344 	const struct sof_intel_dsp_desc *chip_info;
345 	struct hdac_ext_stream *hext_stream;
346 	struct firmware stripped_firmware;
347 	int ret, ret1, i;
348 
349 	if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) {
350 		dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
351 		hda->boot_iteration = 0;
352 		ret = hda_dsp_boot_imr(sdev);
353 		if (!ret) {
354 			hda->booted_from_imr = true;
355 			return 0;
356 		}
357 
358 		dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n");
359 	}
360 
361 	hda->booted_from_imr = false;
362 
363 	chip_info = desc->chip_info;
364 
365 	if (sdev->basefw.fw->size <= sdev->basefw.payload_offset) {
366 		dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
367 		return -EINVAL;
368 	}
369 
370 	/* init for booting wait */
371 	init_waitqueue_head(&sdev->boot_wait);
372 
373 	/* prepare DMA for code loader stream */
374 	stripped_firmware.size = sdev->basefw.fw->size - sdev->basefw.payload_offset;
375 	hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT,
376 				     stripped_firmware.size,
377 				     &hda->cl_dmab, persistent_cl_buffer,
378 				     SNDRV_PCM_STREAM_PLAYBACK, false);
379 	if (IS_ERR(hext_stream)) {
380 		dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
381 		return PTR_ERR(hext_stream);
382 	}
383 
384 	/*
385 	 * Copy the payload to the DMA buffer if it is temporary or if the
386 	 * buffer is  persistent but it does not have the basefw payload either
387 	 * because this is the first boot and the buffer needs to be initialized,
388 	 * or a library got loaded and it replaced the basefw.
389 	 */
390 	if (!persistent_cl_buffer || !hda->cl_dmab_contains_basefw) {
391 		stripped_firmware.data = sdev->basefw.fw->data + sdev->basefw.payload_offset;
392 		memcpy(hda->cl_dmab.area, stripped_firmware.data, stripped_firmware.size);
393 		hda->cl_dmab_contains_basefw = true;
394 	}
395 
396 	/* try ROM init a few times before giving up */
397 	for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
398 		dev_dbg(sdev->dev,
399 			"Attempting iteration %d of Core En/ROM load...\n", i);
400 
401 		hda->boot_iteration = i + 1;
402 		if (chip_info->cl_init)
403 			ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false);
404 		else
405 			ret = -EINVAL;
406 
407 		/* don't retry anymore if successful */
408 		if (!ret)
409 			break;
410 	}
411 
412 	if (i == HDA_FW_BOOT_ATTEMPTS) {
413 		dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
414 			i, ret);
415 		goto cleanup;
416 	}
417 
418 	/*
419 	 * When a SoundWire link is in clock stop state, a Slave
420 	 * device may trigger in-band wakes for events such as jack
421 	 * insertion or acoustic event detection. This event will lead
422 	 * to a WAKEEN interrupt, handled by the PCI device and routed
423 	 * to PME if the PCI device is in D3. The resume function in
424 	 * audio PCI driver will be invoked by ACPI for PME event and
425 	 * initialize the device and process WAKEEN interrupt.
426 	 *
427 	 * The WAKEEN interrupt should be processed ASAP to prevent an
428 	 * interrupt flood, otherwise other interrupts, such IPC,
429 	 * cannot work normally.  The WAKEEN is handled after the ROM
430 	 * is initialized successfully, which ensures power rails are
431 	 * enabled before accessing the SoundWire SHIM registers
432 	 */
433 	if (!sdev->first_boot)
434 		hda_sdw_process_wakeen(sdev);
435 
436 	/*
437 	 * Set the boot_iteration to the last attempt, indicating that the
438 	 * DSP ROM has been initialized and from this point there will be no
439 	 * retry done to boot.
440 	 *
441 	 * Continue with code loading and firmware boot
442 	 */
443 	hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS;
444 	ret = hda_cl_copy_fw(sdev, hext_stream);
445 	if (!ret) {
446 		dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
447 		hda->skip_imr_boot = false;
448 	} else {
449 		snd_sof_dsp_dbg_dump(sdev, "Firmware download failed",
450 				     SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX);
451 		hda->skip_imr_boot = true;
452 	}
453 
454 cleanup:
455 	/*
456 	 * Perform codeloader stream cleanup.
457 	 * This should be done even if firmware loading fails.
458 	 * If the cleanup also fails, we return the initial error
459 	 */
460 	ret1 = hda_cl_cleanup(sdev->dev, &hda->cl_dmab,
461 			      persistent_cl_buffer, hext_stream);
462 	if (ret1 < 0) {
463 		dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
464 
465 		/* set return value to indicate cleanup failure */
466 		if (!ret)
467 			ret = ret1;
468 	}
469 
470 	/*
471 	 * return primary core id if both fw copy
472 	 * and stream clean up are successful
473 	 */
474 	if (!ret)
475 		return chip_info->init_core_mask;
476 
477 	/* disable DSP */
478 	hda_dsp_ctrl_ppcap_enable(sdev, false);
479 
480 	return ret;
481 }
482 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware, "SND_SOC_SOF_INTEL_HDA_COMMON");
483 
484 int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev,
485 			      struct sof_ipc4_fw_library *fw_lib, bool reload)
486 {
487 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
488 	struct sof_ipc4_fw_data *ipc4_data = sdev->private;
489 	struct hdac_ext_stream *hext_stream;
490 	struct firmware stripped_firmware;
491 	struct sof_ipc4_msg msg = {};
492 	int ret, ret1;
493 
494 	/*
495 	 * if IMR booting is enabled and libraries have been restored during fw
496 	 * boot, skip the loading
497 	 */
498 	if (reload && hda->booted_from_imr && ipc4_data->libraries_restored)
499 		return 0;
500 
501 	/* the fw_lib has been verified during loading, we can trust the validity here */
502 	stripped_firmware.data = fw_lib->sof_fw.fw->data + fw_lib->sof_fw.payload_offset;
503 	stripped_firmware.size = fw_lib->sof_fw.fw->size - fw_lib->sof_fw.payload_offset;
504 
505 	/*
506 	 * force re-allocation of the cl_dmab if the preserved DMA buffer is
507 	 * smaller than what is needed for the library
508 	 */
509 	if (persistent_cl_buffer && stripped_firmware.size > hda->cl_dmab.bytes) {
510 		snd_dma_free_pages(&hda->cl_dmab);
511 		hda->cl_dmab.area = NULL;
512 		hda->cl_dmab.bytes = 0;
513 	}
514 
515 	/* prepare DMA for code loader stream */
516 	hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT,
517 				     stripped_firmware.size,
518 				     &hda->cl_dmab, persistent_cl_buffer,
519 				     SNDRV_PCM_STREAM_PLAYBACK, false);
520 	if (IS_ERR(hext_stream)) {
521 		dev_err(sdev->dev, "%s: DMA prepare failed\n", __func__);
522 		return PTR_ERR(hext_stream);
523 	}
524 
525 	memcpy(hda->cl_dmab.area, stripped_firmware.data, stripped_firmware.size);
526 	hda->cl_dmab_contains_basefw = false;
527 
528 	/*
529 	 * 1st stage: SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE
530 	 * Message includes the dma_id to be prepared for the library loading.
531 	 * If the firmware does not have support for the message, we will
532 	 * receive -EOPNOTSUPP. In this case we will use single step library
533 	 * loading and proceed to send the LOAD_LIBRARY message.
534 	 */
535 	msg.primary = hext_stream->hstream.stream_tag - 1;
536 	msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE);
537 	msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
538 	msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
539 	ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
540 	if (!ret) {
541 		int sd_offset = SOF_STREAM_SD_OFFSET(&hext_stream->hstream);
542 		unsigned int status;
543 
544 		/*
545 		 * Make sure that the FIFOS value is not 0 in SDxFIFOS register
546 		 * which indicates that the firmware set the GEN bit and we can
547 		 * continue to start the DMA
548 		 */
549 		ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
550 					sd_offset + SOF_HDA_ADSP_REG_SD_FIFOSIZE,
551 					status,
552 					status & SOF_HDA_SD_FIFOSIZE_FIFOS_MASK,
553 					HDA_DSP_REG_POLL_INTERVAL_US,
554 					HDA_DSP_BASEFW_TIMEOUT_US);
555 
556 		if (ret < 0)
557 			dev_warn(sdev->dev,
558 				 "%s: timeout waiting for FIFOS\n", __func__);
559 	} else if (ret != -EOPNOTSUPP) {
560 		goto cleanup;
561 	}
562 
563 	ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START);
564 	if (ret < 0) {
565 		dev_err(sdev->dev, "%s: DMA trigger start failed\n", __func__);
566 		goto cleanup;
567 	}
568 
569 	/*
570 	 * 2nd stage: LOAD_LIBRARY
571 	 * Message includes the dma_id and the lib_id, the dma_id must be
572 	 * identical to the one sent via LOAD_LIBRARY_PREPARE
573 	 */
574 	msg.primary &= ~SOF_IPC4_MSG_TYPE_MASK;
575 	msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY);
576 	msg.primary |= SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(fw_lib->id);
577 	ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
578 
579 	/* Stop the DMA channel */
580 	ret1 = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
581 	if (ret1 < 0) {
582 		dev_err(sdev->dev, "%s: DMA trigger stop failed\n", __func__);
583 		if (!ret)
584 			ret = ret1;
585 	}
586 
587 cleanup:
588 	/* clean up even in case of error and return the first error */
589 	ret1 = hda_cl_cleanup(sdev->dev, &hda->cl_dmab, persistent_cl_buffer,
590 			      hext_stream);
591 	if (ret1 < 0) {
592 		dev_err(sdev->dev, "%s: Code loader DSP cleanup failed\n", __func__);
593 
594 		/* set return value to indicate cleanup failure */
595 		if (!ret)
596 			ret = ret1;
597 	}
598 
599 	return ret;
600 }
601 EXPORT_SYMBOL_NS(hda_dsp_ipc4_load_library, "SND_SOC_SOF_INTEL_HDA_COMMON");
602 
603 int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
604 					 const struct sof_ext_man_elem_header *hdr)
605 {
606 	const struct sof_ext_man_cavs_config_data *config_data =
607 		container_of(hdr, struct sof_ext_man_cavs_config_data, hdr);
608 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
609 	int i, elem_num;
610 
611 	/* calculate total number of config data elements */
612 	elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header))
613 		   / sizeof(struct sof_config_elem);
614 	if (elem_num <= 0) {
615 		dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num);
616 		return -EINVAL;
617 	}
618 
619 	for (i = 0; i < elem_num; i++)
620 		switch (config_data->elems[i].token) {
621 		case SOF_EXT_MAN_CAVS_CONFIG_EMPTY:
622 			/* skip empty token */
623 			break;
624 		case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO:
625 			hda->clk_config_lpro = config_data->elems[i].value;
626 			dev_dbg(sdev->dev, "FW clock config: %s\n",
627 				hda->clk_config_lpro ? "LPRO" : "HPRO");
628 			break;
629 		case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE:
630 		case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE:
631 			/* These elements are defined but not being used yet. No warn is required */
632 			break;
633 		default:
634 			dev_info(sdev->dev, "unsupported token type: %d\n",
635 				 config_data->elems[i].token);
636 		}
637 
638 	return 0;
639 }
640 EXPORT_SYMBOL_NS(hda_dsp_ext_man_get_cavs_config_data, "SND_SOC_SOF_INTEL_HDA_COMMON");
641