1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license. When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 // Rander Wang <rander.wang@intel.com>
11 // Keyon Jie <yang.jie@linux.intel.com>
12 //
13
14 /*
15 * Hardware interface for HDA DSP code loader
16 */
17
18 #include <linux/firmware.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include <sound/sof.h>
22 #include <sound/sof/ipc4/header.h>
23 #include "ext_manifest.h"
24 #include "../ipc4-priv.h"
25 #include "../ops.h"
26 #include "../sof-priv.h"
27 #include "hda.h"
28
hda_ssp_set_cbp_cfp(struct snd_sof_dev * sdev)29 static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev)
30 {
31 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
32 const struct sof_intel_dsp_desc *chip = hda->desc;
33 int i;
34
35 /* DSP is powered up, set all SSPs to clock consumer/codec provider mode */
36 for (i = 0; i < chip->ssp_count; i++) {
37 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
38 chip->ssp_base_offset
39 + i * SSP_DEV_MEM_SIZE
40 + SSP_SSC1_OFFSET,
41 SSP_SET_CBP_CFP,
42 SSP_SET_CBP_CFP);
43 }
44 }
45
hda_cl_prepare(struct device * dev,unsigned int format,unsigned int size,struct snd_dma_buffer * dmab,int direction,bool is_iccmax)46 struct hdac_ext_stream *hda_cl_prepare(struct device *dev, unsigned int format,
47 unsigned int size, struct snd_dma_buffer *dmab,
48 int direction, bool is_iccmax)
49 {
50 struct snd_sof_dev *sdev = dev_get_drvdata(dev);
51 struct hdac_ext_stream *hext_stream;
52 struct hdac_stream *hstream;
53 int ret;
54
55 hext_stream = hda_dsp_stream_get(sdev, direction, 0);
56
57 if (!hext_stream) {
58 dev_err(sdev->dev, "error: no stream available\n");
59 return ERR_PTR(-ENODEV);
60 }
61 hstream = &hext_stream->hstream;
62 hstream->substream = NULL;
63
64 /* allocate DMA buffer */
65 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, dev, size, dmab);
66 if (ret < 0) {
67 dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret);
68 goto out_put;
69 }
70
71 hstream->period_bytes = 0;/* initialize period_bytes */
72 hstream->format_val = format;
73 hstream->bufsize = size;
74
75 if (is_iccmax) {
76 ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL);
77 if (ret < 0) {
78 dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret);
79 goto out_free;
80 }
81 } else {
82 ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
83 if (ret < 0) {
84 dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
85 goto out_free;
86 }
87 hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size);
88 }
89
90 return hext_stream;
91
92 out_free:
93 snd_dma_free_pages(dmab);
94 out_put:
95 hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
96 return ERR_PTR(ret);
97 }
98 EXPORT_SYMBOL_NS(hda_cl_prepare, SND_SOC_SOF_INTEL_HDA_COMMON);
99
100 /*
101 * first boot sequence has some extra steps.
102 * power on all host managed cores and only unstall/run the boot core to boot the
103 * DSP then turn off all non boot cores (if any) is powered on.
104 */
cl_dsp_init(struct snd_sof_dev * sdev,int stream_tag,bool imr_boot)105 int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
106 {
107 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
108 const struct sof_intel_dsp_desc *chip = hda->desc;
109 unsigned int status, target_status;
110 u32 flags, ipc_hdr, j;
111 unsigned long mask;
112 char *dump_msg;
113 int ret;
114
115 /* step 1: power up corex */
116 ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
117 if (ret < 0) {
118 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
119 dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
120 goto err;
121 }
122
123 hda_ssp_set_cbp_cfp(sdev);
124
125 /* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */
126 ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
127 if (!imr_boot)
128 ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
129
130 snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
131
132 /* step 3: unset core 0 reset state & unstall/run core 0 */
133 ret = hda_dsp_core_run(sdev, chip->init_core_mask);
134 if (ret < 0) {
135 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
136 dev_err(sdev->dev,
137 "error: dsp core start failed %d\n", ret);
138 ret = -EIO;
139 goto err;
140 }
141
142 /* step 4: wait for IPC DONE bit from ROM */
143 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
144 chip->ipc_ack, status,
145 ((status & chip->ipc_ack_mask)
146 == chip->ipc_ack_mask),
147 HDA_DSP_REG_POLL_INTERVAL_US,
148 HDA_DSP_INIT_TIMEOUT_US);
149
150 if (ret < 0) {
151 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
152 dev_err(sdev->dev,
153 "error: %s: timeout for HIPCIE done\n",
154 __func__);
155 goto err;
156 }
157
158 /* set DONE bit to clear the reply IPC message */
159 snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
160 chip->ipc_ack,
161 chip->ipc_ack_mask,
162 chip->ipc_ack_mask);
163
164 /* step 5: power down cores that are no longer needed */
165 ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask &
166 ~(chip->init_core_mask));
167 if (ret < 0) {
168 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
169 dev_err(sdev->dev,
170 "error: dsp core x power down failed\n");
171 goto err;
172 }
173
174 /* step 6: enable IPC interrupts */
175 hda_dsp_ipc_int_enable(sdev);
176
177 /*
178 * step 7:
179 * - Cold/Full boot: wait for ROM init to proceed to download the firmware
180 * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
181 */
182 if (imr_boot)
183 target_status = FSR_STATE_FW_ENTERED;
184 else
185 target_status = FSR_STATE_INIT_DONE;
186
187 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
188 chip->rom_status_reg, status,
189 (FSR_TO_STATE_CODE(status) == target_status),
190 HDA_DSP_REG_POLL_INTERVAL_US,
191 chip->rom_init_timeout *
192 USEC_PER_MSEC);
193 if (!ret) {
194 /* set enabled cores mask and increment ref count for cores in init_core_mask */
195 sdev->enabled_cores_mask |= chip->init_core_mask;
196 mask = sdev->enabled_cores_mask;
197 for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES)
198 sdev->dsp_core_ref_count[j]++;
199 return 0;
200 }
201
202 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
203 dev_err(sdev->dev,
204 "%s: timeout with rom_status_reg (%#x) read\n",
205 __func__, chip->rom_status_reg);
206
207 err:
208 flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
209
210 /* after max boot attempts make sure that the dump is printed */
211 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
212 flags &= ~SOF_DBG_DUMP_OPTIONAL;
213
214 dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
215 hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
216 snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
217 hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
218
219 kfree(dump_msg);
220 return ret;
221 }
222 EXPORT_SYMBOL_NS(cl_dsp_init, SND_SOC_SOF_INTEL_HDA_COMMON);
223
hda_cl_trigger(struct device * dev,struct hdac_ext_stream * hext_stream,int cmd)224 int hda_cl_trigger(struct device *dev, struct hdac_ext_stream *hext_stream, int cmd)
225 {
226 struct snd_sof_dev *sdev = dev_get_drvdata(dev);
227 struct hdac_stream *hstream = &hext_stream->hstream;
228 int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
229 struct sof_intel_hda_stream *hda_stream;
230
231 /* code loader is special case that reuses stream ops */
232 switch (cmd) {
233 case SNDRV_PCM_TRIGGER_START:
234 hda_stream = container_of(hext_stream, struct sof_intel_hda_stream,
235 hext_stream);
236 reinit_completion(&hda_stream->ioc);
237
238 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
239 1 << hstream->index,
240 1 << hstream->index);
241
242 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
243 sd_offset,
244 SOF_HDA_SD_CTL_DMA_START |
245 SOF_HDA_CL_DMA_SD_INT_MASK,
246 SOF_HDA_SD_CTL_DMA_START |
247 SOF_HDA_CL_DMA_SD_INT_MASK);
248
249 hstream->running = true;
250 return 0;
251 default:
252 return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
253 }
254 }
255 EXPORT_SYMBOL_NS(hda_cl_trigger, SND_SOC_SOF_INTEL_HDA_COMMON);
256
hda_cl_cleanup(struct device * dev,struct snd_dma_buffer * dmab,struct hdac_ext_stream * hext_stream)257 int hda_cl_cleanup(struct device *dev, struct snd_dma_buffer *dmab,
258 struct hdac_ext_stream *hext_stream)
259 {
260 struct snd_sof_dev *sdev = dev_get_drvdata(dev);
261 struct hdac_stream *hstream = &hext_stream->hstream;
262 int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
263 int ret = 0;
264
265 if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
266 ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
267 else
268 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
269 SOF_HDA_SD_CTL_DMA_START, 0);
270
271 hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag);
272 hstream->running = 0;
273 hstream->substream = NULL;
274
275 /* reset BDL address */
276 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
277 sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, 0);
278 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
279 sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0);
280
281 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
282 snd_dma_free_pages(dmab);
283 dmab->area = NULL;
284 hstream->bufsize = 0;
285 hstream->format_val = 0;
286
287 return ret;
288 }
289 EXPORT_SYMBOL_NS(hda_cl_cleanup, SND_SOC_SOF_INTEL_HDA_COMMON);
290
291 #define HDA_CL_DMA_IOC_TIMEOUT_MS 500
292
hda_cl_copy_fw(struct snd_sof_dev * sdev,struct hdac_ext_stream * hext_stream)293 int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
294 {
295 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
296 const struct sof_intel_dsp_desc *chip = hda->desc;
297 unsigned int reg;
298 int ret, status;
299
300 dev_dbg(sdev->dev, "Code loader DMA starting\n");
301
302 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START);
303 if (ret < 0) {
304 dev_err(sdev->dev, "error: DMA trigger start failed\n");
305 return ret;
306 }
307
308 dev_dbg(sdev->dev, "waiting for FW_ENTERED status\n");
309
310 status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
311 chip->rom_status_reg, reg,
312 (FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED),
313 HDA_DSP_REG_POLL_INTERVAL_US,
314 HDA_DSP_BASEFW_TIMEOUT_US);
315
316 /*
317 * even in case of errors we still need to stop the DMAs,
318 * but we return the initial error should the DMA stop also fail
319 */
320
321 if (status < 0) {
322 dev_err(sdev->dev,
323 "%s: timeout with rom_status_reg (%#x) read\n",
324 __func__, chip->rom_status_reg);
325 } else {
326 dev_dbg(sdev->dev, "Code loader FW_ENTERED status\n");
327 }
328
329 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
330 if (ret < 0) {
331 dev_err(sdev->dev, "error: DMA trigger stop failed\n");
332 if (!status)
333 status = ret;
334 } else {
335 dev_dbg(sdev->dev, "Code loader DMA stopped\n");
336 }
337
338 return status;
339 }
340
hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev * sdev)341 int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
342 {
343 struct hdac_ext_stream *iccmax_stream;
344 struct snd_dma_buffer dmab_bdl;
345 int ret, ret1;
346 u8 original_gb;
347
348 /* save the original LTRP guardband value */
349 original_gb = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP) &
350 HDA_VS_INTEL_LTRP_GB_MASK;
351
352 /*
353 * Prepare capture stream for ICCMAX. We do not need to store
354 * the data, so use a buffer of PAGE_SIZE for receiving.
355 */
356 iccmax_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, PAGE_SIZE,
357 &dmab_bdl, SNDRV_PCM_STREAM_CAPTURE, true);
358 if (IS_ERR(iccmax_stream)) {
359 dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
360 return PTR_ERR(iccmax_stream);
361 }
362
363 ret = hda_dsp_cl_boot_firmware(sdev);
364
365 /*
366 * Perform iccmax stream cleanup. This should be done even if firmware loading fails.
367 * If the cleanup also fails, we return the initial error
368 */
369 ret1 = hda_cl_cleanup(sdev->dev, &dmab_bdl, iccmax_stream);
370 if (ret1 < 0) {
371 dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
372
373 /* set return value to indicate cleanup failure */
374 if (!ret)
375 ret = ret1;
376 }
377
378 /* restore the original guardband value after FW boot */
379 snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP,
380 HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
381
382 return ret;
383 }
384 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware_iccmax, SND_SOC_SOF_INTEL_CNL);
385
hda_dsp_boot_imr(struct snd_sof_dev * sdev)386 static int hda_dsp_boot_imr(struct snd_sof_dev *sdev)
387 {
388 const struct sof_intel_dsp_desc *chip_info;
389 int ret;
390
391 chip_info = get_chip_info(sdev->pdata);
392 if (chip_info->cl_init)
393 ret = chip_info->cl_init(sdev, 0, true);
394 else
395 ret = -EINVAL;
396
397 if (!ret)
398 hda_sdw_process_wakeen(sdev);
399
400 return ret;
401 }
402
hda_dsp_cl_boot_firmware(struct snd_sof_dev * sdev)403 int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
404 {
405 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
406 struct snd_sof_pdata *plat_data = sdev->pdata;
407 const struct sof_dev_desc *desc = plat_data->desc;
408 const struct sof_intel_dsp_desc *chip_info;
409 struct hdac_ext_stream *hext_stream;
410 struct firmware stripped_firmware;
411 struct snd_dma_buffer dmab;
412 int ret, ret1, i;
413
414 if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) {
415 dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
416 hda->boot_iteration = 0;
417 ret = hda_dsp_boot_imr(sdev);
418 if (!ret) {
419 hda->booted_from_imr = true;
420 return 0;
421 }
422
423 dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n");
424 }
425
426 hda->booted_from_imr = false;
427
428 chip_info = desc->chip_info;
429
430 if (sdev->basefw.fw->size <= sdev->basefw.payload_offset) {
431 dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
432 return -EINVAL;
433 }
434
435 stripped_firmware.data = sdev->basefw.fw->data + sdev->basefw.payload_offset;
436 stripped_firmware.size = sdev->basefw.fw->size - sdev->basefw.payload_offset;
437
438 /* init for booting wait */
439 init_waitqueue_head(&sdev->boot_wait);
440
441 /* prepare DMA for code loader stream */
442 hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT,
443 stripped_firmware.size,
444 &dmab, SNDRV_PCM_STREAM_PLAYBACK, false);
445 if (IS_ERR(hext_stream)) {
446 dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
447 return PTR_ERR(hext_stream);
448 }
449
450 memcpy(dmab.area, stripped_firmware.data,
451 stripped_firmware.size);
452
453 /* try ROM init a few times before giving up */
454 for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
455 dev_dbg(sdev->dev,
456 "Attempting iteration %d of Core En/ROM load...\n", i);
457
458 hda->boot_iteration = i + 1;
459 if (chip_info->cl_init)
460 ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false);
461 else
462 ret = -EINVAL;
463
464 /* don't retry anymore if successful */
465 if (!ret)
466 break;
467 }
468
469 if (i == HDA_FW_BOOT_ATTEMPTS) {
470 dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
471 i, ret);
472 goto cleanup;
473 }
474
475 /*
476 * When a SoundWire link is in clock stop state, a Slave
477 * device may trigger in-band wakes for events such as jack
478 * insertion or acoustic event detection. This event will lead
479 * to a WAKEEN interrupt, handled by the PCI device and routed
480 * to PME if the PCI device is in D3. The resume function in
481 * audio PCI driver will be invoked by ACPI for PME event and
482 * initialize the device and process WAKEEN interrupt.
483 *
484 * The WAKEEN interrupt should be processed ASAP to prevent an
485 * interrupt flood, otherwise other interrupts, such IPC,
486 * cannot work normally. The WAKEEN is handled after the ROM
487 * is initialized successfully, which ensures power rails are
488 * enabled before accessing the SoundWire SHIM registers
489 */
490 if (!sdev->first_boot)
491 hda_sdw_process_wakeen(sdev);
492
493 /*
494 * Set the boot_iteration to the last attempt, indicating that the
495 * DSP ROM has been initialized and from this point there will be no
496 * retry done to boot.
497 *
498 * Continue with code loading and firmware boot
499 */
500 hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS;
501 ret = hda_cl_copy_fw(sdev, hext_stream);
502 if (!ret) {
503 dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
504 hda->skip_imr_boot = false;
505 } else {
506 snd_sof_dsp_dbg_dump(sdev, "Firmware download failed",
507 SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX);
508 hda->skip_imr_boot = true;
509 }
510
511 cleanup:
512 /*
513 * Perform codeloader stream cleanup.
514 * This should be done even if firmware loading fails.
515 * If the cleanup also fails, we return the initial error
516 */
517 ret1 = hda_cl_cleanup(sdev->dev, &dmab, hext_stream);
518 if (ret1 < 0) {
519 dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
520
521 /* set return value to indicate cleanup failure */
522 if (!ret)
523 ret = ret1;
524 }
525
526 /*
527 * return primary core id if both fw copy
528 * and stream clean up are successful
529 */
530 if (!ret)
531 return chip_info->init_core_mask;
532
533 /* disable DSP */
534 hda_dsp_ctrl_ppcap_enable(sdev, false);
535
536 return ret;
537 }
538 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware, SND_SOC_SOF_INTEL_HDA_COMMON);
539
hda_dsp_ipc4_load_library(struct snd_sof_dev * sdev,struct sof_ipc4_fw_library * fw_lib,bool reload)540 int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev,
541 struct sof_ipc4_fw_library *fw_lib, bool reload)
542 {
543 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
544 struct sof_ipc4_fw_data *ipc4_data = sdev->private;
545 struct hdac_ext_stream *hext_stream;
546 struct firmware stripped_firmware;
547 struct sof_ipc4_msg msg = {};
548 struct snd_dma_buffer dmab;
549 int ret, ret1;
550
551 /* if IMR booting is enabled and fw context is saved for D3 state, skip the loading */
552 if (reload && hda->booted_from_imr && ipc4_data->fw_context_save)
553 return 0;
554
555 /* the fw_lib has been verified during loading, we can trust the validity here */
556 stripped_firmware.data = fw_lib->sof_fw.fw->data + fw_lib->sof_fw.payload_offset;
557 stripped_firmware.size = fw_lib->sof_fw.fw->size - fw_lib->sof_fw.payload_offset;
558
559 /* prepare DMA for code loader stream */
560 hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT,
561 stripped_firmware.size,
562 &dmab, SNDRV_PCM_STREAM_PLAYBACK, false);
563 if (IS_ERR(hext_stream)) {
564 dev_err(sdev->dev, "%s: DMA prepare failed\n", __func__);
565 return PTR_ERR(hext_stream);
566 }
567
568 memcpy(dmab.area, stripped_firmware.data, stripped_firmware.size);
569
570 /*
571 * 1st stage: SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE
572 * Message includes the dma_id to be prepared for the library loading.
573 * If the firmware does not have support for the message, we will
574 * receive -EOPNOTSUPP. In this case we will use single step library
575 * loading and proceed to send the LOAD_LIBRARY message.
576 */
577 msg.primary = hext_stream->hstream.stream_tag - 1;
578 msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE);
579 msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
580 msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
581 ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
582 if (!ret) {
583 int sd_offset = SOF_STREAM_SD_OFFSET(&hext_stream->hstream);
584 unsigned int status;
585
586 /*
587 * Make sure that the FIFOS value is not 0 in SDxFIFOS register
588 * which indicates that the firmware set the GEN bit and we can
589 * continue to start the DMA
590 */
591 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
592 sd_offset + SOF_HDA_ADSP_REG_SD_FIFOSIZE,
593 status,
594 status & SOF_HDA_SD_FIFOSIZE_FIFOS_MASK,
595 HDA_DSP_REG_POLL_INTERVAL_US,
596 HDA_DSP_BASEFW_TIMEOUT_US);
597
598 if (ret < 0)
599 dev_warn(sdev->dev,
600 "%s: timeout waiting for FIFOS\n", __func__);
601 } else if (ret != -EOPNOTSUPP) {
602 goto cleanup;
603 }
604
605 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START);
606 if (ret < 0) {
607 dev_err(sdev->dev, "%s: DMA trigger start failed\n", __func__);
608 goto cleanup;
609 }
610
611 /*
612 * 2nd stage: LOAD_LIBRARY
613 * Message includes the dma_id and the lib_id, the dma_id must be
614 * identical to the one sent via LOAD_LIBRARY_PREPARE
615 */
616 msg.primary &= ~SOF_IPC4_MSG_TYPE_MASK;
617 msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY);
618 msg.primary |= SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(fw_lib->id);
619 ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
620
621 /* Stop the DMA channel */
622 ret1 = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
623 if (ret1 < 0) {
624 dev_err(sdev->dev, "%s: DMA trigger stop failed\n", __func__);
625 if (!ret)
626 ret = ret1;
627 }
628
629 cleanup:
630 /* clean up even in case of error and return the first error */
631 ret1 = hda_cl_cleanup(sdev->dev, &dmab, hext_stream);
632 if (ret1 < 0) {
633 dev_err(sdev->dev, "%s: Code loader DSP cleanup failed\n", __func__);
634
635 /* set return value to indicate cleanup failure */
636 if (!ret)
637 ret = ret1;
638 }
639
640 return ret;
641 }
642 EXPORT_SYMBOL_NS(hda_dsp_ipc4_load_library, SND_SOC_SOF_INTEL_HDA_COMMON);
643
hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev * sdev,const struct sof_ext_man_elem_header * hdr)644 int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
645 const struct sof_ext_man_elem_header *hdr)
646 {
647 const struct sof_ext_man_cavs_config_data *config_data =
648 container_of(hdr, struct sof_ext_man_cavs_config_data, hdr);
649 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
650 int i, elem_num;
651
652 /* calculate total number of config data elements */
653 elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header))
654 / sizeof(struct sof_config_elem);
655 if (elem_num <= 0) {
656 dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num);
657 return -EINVAL;
658 }
659
660 for (i = 0; i < elem_num; i++)
661 switch (config_data->elems[i].token) {
662 case SOF_EXT_MAN_CAVS_CONFIG_EMPTY:
663 /* skip empty token */
664 break;
665 case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO:
666 hda->clk_config_lpro = config_data->elems[i].value;
667 dev_dbg(sdev->dev, "FW clock config: %s\n",
668 hda->clk_config_lpro ? "LPRO" : "HPRO");
669 break;
670 case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE:
671 case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE:
672 /* These elements are defined but not being used yet. No warn is required */
673 break;
674 default:
675 dev_info(sdev->dev, "unsupported token type: %d\n",
676 config_data->elems[i].token);
677 }
678
679 return 0;
680 }
681 EXPORT_SYMBOL_NS(hda_dsp_ext_man_get_cavs_config_data, SND_SOC_SOF_INTEL_HDA_COMMON);
682