xref: /linux/sound/soc/intel/avs/path.c (revision cfc4ca8986bb1f6182da6cd7bb57f228590b4643)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021 Intel Corporation
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 
9 #include <linux/acpi.h>
10 #include <acpi/nhlt.h>
11 #include <sound/pcm_params.h>
12 #include <sound/soc.h>
13 #include "avs.h"
14 #include "control.h"
15 #include "path.h"
16 #include "topology.h"
17 
18 /* Must be called with adev->comp_list_mutex held. */
19 static struct avs_tplg *
20 avs_path_find_tplg(struct avs_dev *adev, const char *name)
21 {
22 	struct avs_soc_component *acomp;
23 
24 	list_for_each_entry(acomp, &adev->comp_list, node)
25 		if (!strcmp(acomp->tplg->name, name))
26 			return acomp->tplg;
27 	return NULL;
28 }
29 
30 static struct avs_path_module *
31 avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id)
32 {
33 	struct avs_path_module *mod;
34 
35 	list_for_each_entry(mod, &ppl->mod_list, node)
36 		if (mod->template->id == template_id)
37 			return mod;
38 	return NULL;
39 }
40 
41 static struct avs_path_pipeline *
42 avs_path_find_pipeline(struct avs_path *path, u32 template_id)
43 {
44 	struct avs_path_pipeline *ppl;
45 
46 	list_for_each_entry(ppl, &path->ppl_list, node)
47 		if (ppl->template->id == template_id)
48 			return ppl;
49 	return NULL;
50 }
51 
52 static struct avs_path *
53 avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id)
54 {
55 	struct avs_tplg_path_template *pos, *template = NULL;
56 	struct avs_tplg *tplg;
57 	struct avs_path *path;
58 
59 	tplg = avs_path_find_tplg(adev, name);
60 	if (!tplg)
61 		return NULL;
62 
63 	list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
64 		if (pos->id == template_id) {
65 			template = pos;
66 			break;
67 		}
68 	}
69 	if (!template)
70 		return NULL;
71 
72 	spin_lock(&adev->path_list_lock);
73 	/* Only one variant of given path template may be instantiated at a time. */
74 	list_for_each_entry(path, &adev->path_list, node) {
75 		if (path->template->owner == template) {
76 			spin_unlock(&adev->path_list_lock);
77 			return path;
78 		}
79 	}
80 
81 	spin_unlock(&adev->path_list_lock);
82 	return NULL;
83 }
84 
85 static bool avs_test_hw_params(struct snd_pcm_hw_params *params,
86 			       struct avs_audio_format *fmt)
87 {
88 	return (params_rate(params) == fmt->sampling_freq &&
89 		params_channels(params) == fmt->num_channels &&
90 		params_physical_width(params) == fmt->bit_depth &&
91 		snd_pcm_hw_params_bits(params) == fmt->valid_bit_depth);
92 }
93 
94 static struct avs_tplg_path *
95 avs_path_find_variant(struct avs_dev *adev,
96 		      struct avs_tplg_path_template *template,
97 		      struct snd_pcm_hw_params *fe_params,
98 		      struct snd_pcm_hw_params *be_params)
99 {
100 	struct avs_tplg_path *variant;
101 
102 	list_for_each_entry(variant, &template->path_list, node) {
103 		dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n",
104 			variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels,
105 			variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth);
106 		dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n",
107 			variant->be_fmt->sampling_freq, variant->be_fmt->num_channels,
108 			variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth);
109 
110 		if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) &&
111 		    variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt))
112 			return variant;
113 	}
114 
115 	return NULL;
116 }
117 
118 static struct acpi_nhlt_config *
119 avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t);
120 
121 int avs_path_set_constraint(struct avs_dev *adev, struct avs_tplg_path_template *template,
122 			    struct snd_pcm_hw_constraint_list *rate_list,
123 			    struct snd_pcm_hw_constraint_list *channels_list,
124 			    struct snd_pcm_hw_constraint_list *sample_bits_list)
125 {
126 	struct avs_tplg_path *path_template;
127 	unsigned int *rlist, *clist, *slist;
128 	size_t i;
129 
130 	i = 0;
131 	list_for_each_entry(path_template, &template->path_list, node)
132 		i++;
133 
134 	rlist = kcalloc(i, sizeof(*rlist), GFP_KERNEL);
135 	clist = kcalloc(i, sizeof(*clist), GFP_KERNEL);
136 	slist = kcalloc(i, sizeof(*slist), GFP_KERNEL);
137 
138 	i = 0;
139 	list_for_each_entry(path_template, &template->path_list, node) {
140 		struct avs_tplg_pipeline *pipeline_template;
141 
142 		list_for_each_entry(pipeline_template, &path_template->ppl_list, node) {
143 			struct avs_tplg_module *module_template;
144 
145 			list_for_each_entry(module_template, &pipeline_template->mod_list, node) {
146 				const guid_t *type = &module_template->cfg_ext->type;
147 				struct acpi_nhlt_config *blob;
148 
149 				if (!guid_equal(type, &AVS_COPIER_MOD_UUID) &&
150 				    !guid_equal(type, &AVS_WOVHOSTM_MOD_UUID))
151 					continue;
152 
153 				switch (module_template->cfg_ext->copier.dma_type) {
154 				case AVS_DMA_DMIC_LINK_INPUT:
155 				case AVS_DMA_I2S_LINK_OUTPUT:
156 				case AVS_DMA_I2S_LINK_INPUT:
157 					break;
158 				default:
159 					continue;
160 				}
161 
162 				blob = avs_nhlt_config_or_default(adev, module_template);
163 				if (IS_ERR(blob))
164 					continue;
165 
166 				rlist[i] = path_template->fe_fmt->sampling_freq;
167 				clist[i] = path_template->fe_fmt->num_channels;
168 				slist[i] = path_template->fe_fmt->bit_depth;
169 				i++;
170 			}
171 		}
172 	}
173 
174 	if (i) {
175 		rate_list->count = i;
176 		rate_list->list = rlist;
177 		channels_list->count = i;
178 		channels_list->list = clist;
179 		sample_bits_list->count = i;
180 		sample_bits_list->list = slist;
181 	} else {
182 		kfree(rlist);
183 		kfree(clist);
184 		kfree(slist);
185 	}
186 
187 	return i;
188 }
189 
190 static void avs_init_node_id(union avs_connector_node_id *node_id,
191 			     struct avs_tplg_modcfg_ext *te, u32 dma_id)
192 {
193 	node_id->val = 0;
194 	node_id->dma_type = te->copier.dma_type;
195 
196 	switch (node_id->dma_type) {
197 	case AVS_DMA_DMIC_LINK_INPUT:
198 	case AVS_DMA_I2S_LINK_OUTPUT:
199 	case AVS_DMA_I2S_LINK_INPUT:
200 		/* Gateway's virtual index is statically assigned in the topology. */
201 		node_id->vindex = te->copier.vindex.val;
202 		break;
203 
204 	case AVS_DMA_HDA_HOST_OUTPUT:
205 	case AVS_DMA_HDA_HOST_INPUT:
206 		/* Gateway's virtual index is dynamically assigned with DMA ID */
207 		node_id->vindex = dma_id;
208 		break;
209 
210 	case AVS_DMA_HDA_LINK_OUTPUT:
211 	case AVS_DMA_HDA_LINK_INPUT:
212 		node_id->vindex = te->copier.vindex.val | dma_id;
213 		break;
214 
215 	default:
216 		*node_id = INVALID_NODE_ID;
217 		break;
218 	}
219 }
220 
221 /* Every BLOB contains at least gateway attributes. */
222 static struct acpi_nhlt_config *default_blob = (struct acpi_nhlt_config *)&(u32[2]) {4};
223 
224 static struct acpi_nhlt_config *
225 avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t)
226 {
227 	struct acpi_nhlt_format_config *fmtcfg;
228 	struct avs_tplg_modcfg_ext *te;
229 	struct avs_audio_format *fmt;
230 	int link_type, dev_type;
231 	int bus_id, dir;
232 
233 	te = t->cfg_ext;
234 
235 	switch (te->copier.dma_type) {
236 	case AVS_DMA_I2S_LINK_OUTPUT:
237 		link_type = ACPI_NHLT_LINKTYPE_SSP;
238 		dev_type = ACPI_NHLT_DEVICETYPE_CODEC;
239 		bus_id = te->copier.vindex.i2s.instance;
240 		dir = SNDRV_PCM_STREAM_PLAYBACK;
241 		fmt = te->copier.out_fmt;
242 		break;
243 
244 	case AVS_DMA_I2S_LINK_INPUT:
245 		link_type = ACPI_NHLT_LINKTYPE_SSP;
246 		dev_type = ACPI_NHLT_DEVICETYPE_CODEC;
247 		bus_id = te->copier.vindex.i2s.instance;
248 		dir = SNDRV_PCM_STREAM_CAPTURE;
249 		fmt = t->in_fmt;
250 		break;
251 
252 	case AVS_DMA_DMIC_LINK_INPUT:
253 		link_type = ACPI_NHLT_LINKTYPE_PDM;
254 		dev_type = -1; /* ignored */
255 		bus_id = 0;
256 		dir = SNDRV_PCM_STREAM_CAPTURE;
257 		fmt = t->in_fmt;
258 		break;
259 
260 	default:
261 		return default_blob;
262 	}
263 
264 	/* Override format selection if necessary. */
265 	if (te->copier.blob_fmt)
266 		fmt = te->copier.blob_fmt;
267 
268 	fmtcfg = acpi_nhlt_find_fmtcfg(link_type, dev_type, dir, bus_id,
269 				       fmt->num_channels, fmt->sampling_freq, fmt->valid_bit_depth,
270 				       fmt->bit_depth);
271 	if (!fmtcfg) {
272 		dev_warn(adev->dev, "Endpoint format configuration not found.\n");
273 		return ERR_PTR(-ENOENT);
274 	}
275 
276 	if (fmtcfg->config.capabilities_size < default_blob->capabilities_size)
277 		return ERR_PTR(-ETOOSMALL);
278 	/* The firmware expects the payload to be DWORD-aligned. */
279 	if (fmtcfg->config.capabilities_size % sizeof(u32))
280 		return ERR_PTR(-EINVAL);
281 
282 	return &fmtcfg->config;
283 }
284 
285 static int avs_append_dma_cfg(struct avs_dev *adev, struct avs_copier_gtw_cfg *gtw,
286 			      struct avs_tplg_module *t, u32 dma_id, size_t *cfg_size)
287 {
288 	u32 dma_type = t->cfg_ext->copier.dma_type;
289 	struct avs_dma_cfg *dma;
290 	struct avs_tlv *tlv;
291 	size_t tlv_size;
292 
293 	if (!avs_platattr_test(adev, ALTHDA))
294 		return 0;
295 
296 	switch (dma_type) {
297 	case AVS_DMA_HDA_HOST_OUTPUT:
298 	case AVS_DMA_HDA_HOST_INPUT:
299 	case AVS_DMA_HDA_LINK_OUTPUT:
300 	case AVS_DMA_HDA_LINK_INPUT:
301 		return 0;
302 	default:
303 		break;
304 	}
305 
306 	tlv_size = sizeof(*tlv) + sizeof(*dma);
307 	if (*cfg_size + tlv_size > AVS_MAILBOX_SIZE)
308 		return -E2BIG;
309 
310 	/* DMA config is a TLV tailing the existing payload. */
311 	tlv = (struct avs_tlv *)&gtw->config.blob[gtw->config_length];
312 	tlv->type = AVS_GTW_DMA_CONFIG_ID;
313 	tlv->length = sizeof(*dma);
314 
315 	dma = (struct avs_dma_cfg *)tlv->value;
316 	memset(dma, 0, sizeof(*dma));
317 	dma->dma_method = AVS_DMA_METHOD_HDA;
318 	dma->pre_allocated = true;
319 	dma->dma_channel_id = dma_id;
320 	dma->stream_id = dma_id + 1;
321 
322 	gtw->config_length += tlv_size / sizeof(u32);
323 	*cfg_size += tlv_size;
324 
325 	return 0;
326 }
327 
328 static int avs_fill_gtw_config(struct avs_dev *adev, struct avs_copier_gtw_cfg *gtw,
329 			       struct avs_tplg_module *t, u32 dma_id, size_t *cfg_size)
330 {
331 	struct acpi_nhlt_config *blob;
332 	size_t gtw_size;
333 
334 	blob = avs_nhlt_config_or_default(adev, t);
335 	if (IS_ERR(blob))
336 		return PTR_ERR(blob);
337 
338 	gtw_size = blob->capabilities_size;
339 	if (*cfg_size + gtw_size > AVS_MAILBOX_SIZE)
340 		return -E2BIG;
341 
342 	gtw->config_length = gtw_size / sizeof(u32);
343 	memcpy(gtw->config.blob, blob->capabilities, blob->capabilities_size);
344 	*cfg_size += gtw_size;
345 
346 	return avs_append_dma_cfg(adev, gtw, t, dma_id, cfg_size);
347 }
348 
349 static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod)
350 {
351 	struct avs_tplg_module *t = mod->template;
352 	struct avs_tplg_modcfg_ext *te;
353 	struct avs_copier_cfg *cfg;
354 	size_t cfg_size;
355 	u32 dma_id;
356 	int ret;
357 
358 	te = t->cfg_ext;
359 	cfg = adev->modcfg_buf;
360 	dma_id = mod->owner->owner->dma_id;
361 	cfg_size = offsetof(struct avs_copier_cfg, gtw_cfg.config);
362 
363 	ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, dma_id, &cfg_size);
364 	if (ret)
365 		return ret;
366 
367 	cfg->base.cpc = t->cfg_base->cpc;
368 	cfg->base.ibs = t->cfg_base->ibs;
369 	cfg->base.obs = t->cfg_base->obs;
370 	cfg->base.is_pages = t->cfg_base->is_pages;
371 	cfg->base.audio_fmt = *t->in_fmt;
372 	cfg->out_fmt = *te->copier.out_fmt;
373 	cfg->feature_mask = te->copier.feature_mask;
374 	avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id);
375 	cfg->gtw_cfg.dma_buffer_size = te->copier.dma_buffer_size;
376 	mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
377 
378 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
379 				  t->domain, cfg, cfg_size, &mod->instance_id);
380 	return ret;
381 }
382 
383 static int avs_whm_create(struct avs_dev *adev, struct avs_path_module *mod)
384 {
385 	struct avs_tplg_module *t = mod->template;
386 	struct avs_tplg_modcfg_ext *te;
387 	struct avs_whm_cfg *cfg;
388 	size_t cfg_size;
389 	u32 dma_id;
390 	int ret;
391 
392 	te = t->cfg_ext;
393 	cfg = adev->modcfg_buf;
394 	dma_id = mod->owner->owner->dma_id;
395 	cfg_size = offsetof(struct avs_whm_cfg, gtw_cfg.config);
396 
397 	ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, dma_id, &cfg_size);
398 	if (ret)
399 		return ret;
400 
401 	cfg->base.cpc = t->cfg_base->cpc;
402 	cfg->base.ibs = t->cfg_base->ibs;
403 	cfg->base.obs = t->cfg_base->obs;
404 	cfg->base.is_pages = t->cfg_base->is_pages;
405 	cfg->base.audio_fmt = *t->in_fmt;
406 	cfg->ref_fmt = *te->whm.ref_fmt;
407 	cfg->out_fmt = *te->whm.out_fmt;
408 	cfg->wake_tick_period = te->whm.wake_tick_period;
409 	avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id);
410 	cfg->gtw_cfg.dma_buffer_size = te->whm.dma_buffer_size;
411 	mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
412 
413 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
414 				  t->domain, cfg, cfg_size, &mod->instance_id);
415 	return ret;
416 }
417 
418 static struct soc_mixer_control *avs_get_module_control(struct avs_path_module *mod,
419 							const char *name)
420 {
421 	struct avs_tplg_module *t = mod->template;
422 	struct avs_tplg_path_template *path_tmpl;
423 	struct snd_soc_dapm_widget *w;
424 	int i;
425 
426 	path_tmpl = t->owner->owner->owner;
427 	w = path_tmpl->w;
428 
429 	for (i = 0; i < w->num_kcontrols; i++) {
430 		struct avs_control_data *ctl_data;
431 		struct soc_mixer_control *mc;
432 
433 		mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value;
434 		ctl_data = (struct avs_control_data *)mc->dobj.private;
435 		if (ctl_data->id == t->ctl_id && strstr(w->kcontrols[i]->id.name, name))
436 			return mc;
437 	}
438 
439 	return NULL;
440 }
441 
442 int avs_peakvol_set_volume(struct avs_dev *adev, struct avs_path_module *mod,
443 			   struct soc_mixer_control *mc, long *input)
444 {
445 	struct avs_volume_cfg vols[SND_SOC_TPLG_MAX_CHAN] = {{0}};
446 	struct avs_control_data *ctl_data;
447 	struct avs_tplg_module *t;
448 	int ret, i;
449 
450 	ctl_data = mc->dobj.private;
451 	t = mod->template;
452 	if (!input)
453 		input = ctl_data->values;
454 
455 	if (mc->num_channels) {
456 		for (i = 0; i < mc->num_channels; i++) {
457 			vols[i].channel_id = i;
458 			vols[i].target_volume = input[i];
459 			vols[i].curve_type = t->cfg_ext->peakvol.curve_type;
460 			vols[i].curve_duration = t->cfg_ext->peakvol.curve_duration;
461 		}
462 
463 		ret = avs_ipc_peakvol_set_volumes(adev, mod->module_id, mod->instance_id, vols,
464 						  mc->num_channels);
465 		return AVS_IPC_RET(ret);
466 	}
467 
468 	/* Target all channels if no individual selected. */
469 	vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
470 	vols[0].target_volume = input[0];
471 	vols[0].curve_type = t->cfg_ext->peakvol.curve_type;
472 	vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
473 
474 	ret = avs_ipc_peakvol_set_volume(adev, mod->module_id, mod->instance_id, &vols[0]);
475 	return AVS_IPC_RET(ret);
476 }
477 
478 int avs_peakvol_set_mute(struct avs_dev *adev, struct avs_path_module *mod,
479 			 struct soc_mixer_control *mc, long *input)
480 {
481 	struct avs_mute_cfg mutes[SND_SOC_TPLG_MAX_CHAN] = {{0}};
482 	struct avs_control_data *ctl_data;
483 	struct avs_tplg_module *t;
484 	int ret, i;
485 
486 	ctl_data = mc->dobj.private;
487 	t = mod->template;
488 	if (!input)
489 		input = ctl_data->values;
490 
491 	if (mc->num_channels) {
492 		for (i = 0; i < mc->num_channels; i++) {
493 			mutes[i].channel_id = i;
494 			mutes[i].mute = !input[i];
495 			mutes[i].curve_type = t->cfg_ext->peakvol.curve_type;
496 			mutes[i].curve_duration = t->cfg_ext->peakvol.curve_duration;
497 		}
498 
499 		ret = avs_ipc_peakvol_set_mutes(adev, mod->module_id, mod->instance_id, mutes,
500 						mc->num_channels);
501 		return AVS_IPC_RET(ret);
502 	}
503 
504 	/* Target all channels if no individual selected. */
505 	mutes[0].channel_id = AVS_ALL_CHANNELS_MASK;
506 	mutes[0].mute = !input[0];
507 	mutes[0].curve_type = t->cfg_ext->peakvol.curve_type;
508 	mutes[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
509 
510 	ret = avs_ipc_peakvol_set_mute(adev, mod->module_id, mod->instance_id, &mutes[0]);
511 	return AVS_IPC_RET(ret);
512 }
513 
514 static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod)
515 {
516 	struct avs_tplg_module *t = mod->template;
517 	struct soc_mixer_control *mc;
518 	struct avs_peakvol_cfg *cfg;
519 	size_t cfg_size;
520 	int ret;
521 
522 	cfg_size = struct_size(cfg, vols, 1);
523 	if (cfg_size > AVS_MAILBOX_SIZE)
524 		return -EINVAL;
525 
526 	cfg = adev->modcfg_buf;
527 	memset(cfg, 0, cfg_size);
528 	cfg->base.cpc = t->cfg_base->cpc;
529 	cfg->base.ibs = t->cfg_base->ibs;
530 	cfg->base.obs = t->cfg_base->obs;
531 	cfg->base.is_pages = t->cfg_base->is_pages;
532 	cfg->base.audio_fmt = *t->in_fmt;
533 	cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
534 	cfg->vols[0].target_volume = S32_MAX;
535 	cfg->vols[0].curve_type = t->cfg_ext->peakvol.curve_type;
536 	cfg->vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
537 
538 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
539 				  t->domain, cfg, cfg_size, &mod->instance_id);
540 	if (ret)
541 		return ret;
542 
543 	/* Now configure both VOLUME and MUTE parameters. */
544 	mc = avs_get_module_control(mod, "Volume");
545 	if (mc) {
546 		ret = avs_peakvol_set_volume(adev, mod, mc, NULL);
547 		if (ret)
548 			return ret;
549 	}
550 
551 	mc = avs_get_module_control(mod, "Switch");
552 	if (mc)
553 		return avs_peakvol_set_mute(adev, mod, mc, NULL);
554 	return 0;
555 }
556 
557 static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod)
558 {
559 	struct avs_tplg_module *t = mod->template;
560 	struct avs_updown_mixer_cfg cfg;
561 	int i;
562 
563 	cfg.base.cpc = t->cfg_base->cpc;
564 	cfg.base.ibs = t->cfg_base->ibs;
565 	cfg.base.obs = t->cfg_base->obs;
566 	cfg.base.is_pages = t->cfg_base->is_pages;
567 	cfg.base.audio_fmt = *t->in_fmt;
568 	cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config;
569 	cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select;
570 	for (i = 0; i < AVS_COEFF_CHANNELS_MAX; i++)
571 		cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i];
572 	cfg.channel_map = t->cfg_ext->updown_mix.channel_map;
573 
574 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
575 				   t->core_id, t->domain, &cfg, sizeof(cfg),
576 				   &mod->instance_id);
577 }
578 
579 static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod)
580 {
581 	struct avs_tplg_module *t = mod->template;
582 	struct avs_src_cfg cfg;
583 
584 	cfg.base.cpc = t->cfg_base->cpc;
585 	cfg.base.ibs = t->cfg_base->ibs;
586 	cfg.base.obs = t->cfg_base->obs;
587 	cfg.base.is_pages = t->cfg_base->is_pages;
588 	cfg.base.audio_fmt = *t->in_fmt;
589 	cfg.out_freq = t->cfg_ext->src.out_freq;
590 
591 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
592 				   t->core_id, t->domain, &cfg, sizeof(cfg),
593 				   &mod->instance_id);
594 }
595 
596 static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
597 {
598 	struct avs_tplg_module *t = mod->template;
599 	struct avs_asrc_cfg cfg;
600 
601 	memset(&cfg, 0, sizeof(cfg));
602 	cfg.base.cpc = t->cfg_base->cpc;
603 	cfg.base.ibs = t->cfg_base->ibs;
604 	cfg.base.obs = t->cfg_base->obs;
605 	cfg.base.is_pages = t->cfg_base->is_pages;
606 	cfg.base.audio_fmt = *t->in_fmt;
607 	cfg.out_freq = t->cfg_ext->asrc.out_freq;
608 	cfg.mode = t->cfg_ext->asrc.mode;
609 	cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer;
610 
611 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
612 				   t->core_id, t->domain, &cfg, sizeof(cfg),
613 				   &mod->instance_id);
614 }
615 
616 static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod)
617 {
618 	struct avs_tplg_module *t = mod->template;
619 	struct avs_aec_cfg cfg;
620 
621 	cfg.base.cpc = t->cfg_base->cpc;
622 	cfg.base.ibs = t->cfg_base->ibs;
623 	cfg.base.obs = t->cfg_base->obs;
624 	cfg.base.is_pages = t->cfg_base->is_pages;
625 	cfg.base.audio_fmt = *t->in_fmt;
626 	cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt;
627 	cfg.out_fmt = *t->cfg_ext->aec.out_fmt;
628 	cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode;
629 
630 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
631 				   t->core_id, t->domain, &cfg, sizeof(cfg),
632 				   &mod->instance_id);
633 }
634 
635 static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod)
636 {
637 	struct avs_tplg_module *t = mod->template;
638 	struct avs_mux_cfg cfg;
639 
640 	cfg.base.cpc = t->cfg_base->cpc;
641 	cfg.base.ibs = t->cfg_base->ibs;
642 	cfg.base.obs = t->cfg_base->obs;
643 	cfg.base.is_pages = t->cfg_base->is_pages;
644 	cfg.base.audio_fmt = *t->in_fmt;
645 	cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt;
646 	cfg.out_fmt = *t->cfg_ext->mux.out_fmt;
647 
648 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
649 				   t->core_id, t->domain, &cfg, sizeof(cfg),
650 				   &mod->instance_id);
651 }
652 
653 static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod)
654 {
655 	struct avs_tplg_module *t = mod->template;
656 	struct avs_wov_cfg cfg;
657 
658 	cfg.base.cpc = t->cfg_base->cpc;
659 	cfg.base.ibs = t->cfg_base->ibs;
660 	cfg.base.obs = t->cfg_base->obs;
661 	cfg.base.is_pages = t->cfg_base->is_pages;
662 	cfg.base.audio_fmt = *t->in_fmt;
663 	cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode;
664 
665 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
666 				   t->core_id, t->domain, &cfg, sizeof(cfg),
667 				   &mod->instance_id);
668 }
669 
670 static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod)
671 {
672 	struct avs_tplg_module *t = mod->template;
673 	struct avs_micsel_cfg cfg;
674 
675 	cfg.base.cpc = t->cfg_base->cpc;
676 	cfg.base.ibs = t->cfg_base->ibs;
677 	cfg.base.obs = t->cfg_base->obs;
678 	cfg.base.is_pages = t->cfg_base->is_pages;
679 	cfg.base.audio_fmt = *t->in_fmt;
680 	cfg.out_fmt = *t->cfg_ext->micsel.out_fmt;
681 
682 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
683 				   t->core_id, t->domain, &cfg, sizeof(cfg),
684 				   &mod->instance_id);
685 }
686 
687 static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod)
688 {
689 	struct avs_tplg_module *t = mod->template;
690 	struct avs_modcfg_base cfg;
691 
692 	cfg.cpc = t->cfg_base->cpc;
693 	cfg.ibs = t->cfg_base->ibs;
694 	cfg.obs = t->cfg_base->obs;
695 	cfg.is_pages = t->cfg_base->is_pages;
696 	cfg.audio_fmt = *t->in_fmt;
697 
698 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
699 				   t->core_id, t->domain, &cfg, sizeof(cfg),
700 				   &mod->instance_id);
701 }
702 
703 static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
704 {
705 	struct avs_tplg_module *t = mod->template;
706 	struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext;
707 	struct avs_modcfg_ext *cfg;
708 	size_t cfg_size, num_pins;
709 	int ret, i;
710 
711 	num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins;
712 	cfg_size = struct_size(cfg, pin_fmts, num_pins);
713 
714 	if (cfg_size > AVS_MAILBOX_SIZE)
715 		return -EINVAL;
716 
717 	cfg = adev->modcfg_buf;
718 	memset(cfg, 0, cfg_size);
719 	cfg->base.cpc = t->cfg_base->cpc;
720 	cfg->base.ibs = t->cfg_base->ibs;
721 	cfg->base.obs = t->cfg_base->obs;
722 	cfg->base.is_pages = t->cfg_base->is_pages;
723 	cfg->base.audio_fmt = *t->in_fmt;
724 	cfg->num_input_pins = tcfg->generic.num_input_pins;
725 	cfg->num_output_pins = tcfg->generic.num_output_pins;
726 
727 	/* configure pin formats */
728 	for (i = 0; i < num_pins; i++) {
729 		struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i];
730 		struct avs_pin_format *pin = &cfg->pin_fmts[i];
731 
732 		pin->pin_index = tpin->pin_index;
733 		pin->iobs = tpin->iobs;
734 		pin->audio_fmt = *tpin->fmt;
735 	}
736 
737 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
738 				  t->core_id, t->domain, cfg, cfg_size,
739 				  &mod->instance_id);
740 	return ret;
741 }
742 
743 static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
744 {
745 	dev_err(adev->dev, "Probe module can't be instantiated by topology");
746 	return -EINVAL;
747 }
748 
749 struct avs_module_create {
750 	guid_t *guid;
751 	int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
752 };
753 
754 static struct avs_module_create avs_module_create[] = {
755 	{ &AVS_MIXIN_MOD_UUID, avs_modbase_create },
756 	{ &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
757 	{ &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
758 	{ &AVS_COPIER_MOD_UUID, avs_copier_create },
759 	{ &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create },
760 	{ &AVS_GAIN_MOD_UUID, avs_peakvol_create },
761 	{ &AVS_MICSEL_MOD_UUID, avs_micsel_create },
762 	{ &AVS_MUX_MOD_UUID, avs_mux_create },
763 	{ &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
764 	{ &AVS_SRCINTC_MOD_UUID, avs_src_create },
765 	{ &AVS_AEC_MOD_UUID, avs_aec_create },
766 	{ &AVS_ASRC_MOD_UUID, avs_asrc_create },
767 	{ &AVS_INTELWOV_MOD_UUID, avs_wov_create },
768 	{ &AVS_PROBE_MOD_UUID, avs_probe_create },
769 	{ &AVS_WOVHOSTM_MOD_UUID, avs_whm_create },
770 };
771 
772 static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
773 {
774 	const guid_t *type = &mod->template->cfg_ext->type;
775 
776 	for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
777 		if (guid_equal(type, avs_module_create[i].guid))
778 			return avs_module_create[i].create(adev, mod);
779 
780 	return avs_modext_create(adev, mod);
781 }
782 
783 static int avs_path_module_send_init_configs(struct avs_dev *adev, struct avs_path_module *mod)
784 {
785 	struct avs_soc_component *acomp;
786 
787 	acomp = to_avs_soc_component(mod->template->owner->owner->owner->owner->comp);
788 
789 	u32 num_ids = mod->template->num_config_ids;
790 	u32 *ids = mod->template->config_ids;
791 
792 	for (int i = 0; i < num_ids; i++) {
793 		struct avs_tplg_init_config *config = &acomp->tplg->init_configs[ids[i]];
794 		size_t len = config->length;
795 		void *data = config->data;
796 		u32 param = config->param;
797 		int ret;
798 
799 		ret = avs_ipc_set_large_config(adev, mod->module_id, mod->instance_id,
800 					       param, data, len);
801 		if (ret) {
802 			dev_err(adev->dev, "send initial module config failed: %d\n", ret);
803 			return AVS_IPC_RET(ret);
804 		}
805 	}
806 
807 	return 0;
808 }
809 
810 static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod)
811 {
812 	kfree(mod);
813 }
814 
815 static struct avs_path_module *
816 avs_path_module_create(struct avs_dev *adev,
817 		       struct avs_path_pipeline *owner,
818 		       struct avs_tplg_module *template)
819 {
820 	struct avs_path_module *mod;
821 	int module_id, ret;
822 
823 	module_id = avs_get_module_id(adev, &template->cfg_ext->type);
824 	if (module_id < 0)
825 		return ERR_PTR(module_id);
826 
827 	mod = kzalloc(sizeof(*mod), GFP_KERNEL);
828 	if (!mod)
829 		return ERR_PTR(-ENOMEM);
830 
831 	mod->template = template;
832 	mod->module_id = module_id;
833 	mod->owner = owner;
834 	INIT_LIST_HEAD(&mod->node);
835 
836 	ret = avs_path_module_type_create(adev, mod);
837 	if (ret) {
838 		dev_err(adev->dev, "module-type create failed: %d\n", ret);
839 		kfree(mod);
840 		return ERR_PTR(ret);
841 	}
842 
843 	ret = avs_path_module_send_init_configs(adev, mod);
844 	if (ret) {
845 		kfree(mod);
846 		return ERR_PTR(ret);
847 	}
848 
849 	return mod;
850 }
851 
852 static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding)
853 {
854 	struct avs_path_module *this_mod, *target_mod;
855 	struct avs_path_pipeline *target_ppl;
856 	struct avs_path *target_path;
857 	struct avs_tplg_binding *t;
858 
859 	t = binding->template;
860 	this_mod = avs_path_find_module(binding->owner,
861 					t->mod_id);
862 	if (!this_mod) {
863 		dev_err(adev->dev, "path mod %d not found\n", t->mod_id);
864 		return -EINVAL;
865 	}
866 
867 	/* update with target_tplg_name too */
868 	target_path = avs_path_find_path(adev, t->target_tplg_name,
869 					 t->target_path_tmpl_id);
870 	if (!target_path) {
871 		dev_err(adev->dev, "target path %s:%d not found\n",
872 			t->target_tplg_name, t->target_path_tmpl_id);
873 		return -EINVAL;
874 	}
875 
876 	target_ppl = avs_path_find_pipeline(target_path,
877 					    t->target_ppl_id);
878 	if (!target_ppl) {
879 		dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id);
880 		return -EINVAL;
881 	}
882 
883 	target_mod = avs_path_find_module(target_ppl, t->target_mod_id);
884 	if (!target_mod) {
885 		dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id);
886 		return -EINVAL;
887 	}
888 
889 	if (t->is_sink) {
890 		binding->sink = this_mod;
891 		binding->sink_pin = t->mod_pin;
892 		binding->source = target_mod;
893 		binding->source_pin = t->target_mod_pin;
894 	} else {
895 		binding->sink = target_mod;
896 		binding->sink_pin = t->target_mod_pin;
897 		binding->source = this_mod;
898 		binding->source_pin = t->mod_pin;
899 	}
900 
901 	return 0;
902 }
903 
904 static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding)
905 {
906 	kfree(binding);
907 }
908 
909 static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev,
910 							struct avs_path_pipeline *owner,
911 							struct avs_tplg_binding *t)
912 {
913 	struct avs_path_binding *binding;
914 
915 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
916 	if (!binding)
917 		return ERR_PTR(-ENOMEM);
918 
919 	binding->template = t;
920 	binding->owner = owner;
921 	INIT_LIST_HEAD(&binding->node);
922 
923 	return binding;
924 }
925 
926 static int avs_path_pipeline_arm(struct avs_dev *adev,
927 				 struct avs_path_pipeline *ppl)
928 {
929 	struct avs_path_module *mod;
930 
931 	list_for_each_entry(mod, &ppl->mod_list, node) {
932 		struct avs_path_module *source, *sink;
933 		int ret;
934 
935 		/*
936 		 * Only one module (so it's implicitly last) or it is the last
937 		 * one, either way we don't have next module to bind it to.
938 		 */
939 		if (mod == list_last_entry(&ppl->mod_list,
940 					   struct avs_path_module, node))
941 			break;
942 
943 		/* bind current module to next module on list */
944 		source = mod;
945 		sink = list_next_entry(mod, node);
946 
947 		ret = avs_ipc_bind(adev, source->module_id, source->instance_id,
948 				   sink->module_id, sink->instance_id, 0, 0);
949 		if (ret)
950 			return AVS_IPC_RET(ret);
951 	}
952 
953 	return 0;
954 }
955 
956 static void avs_path_pipeline_free(struct avs_dev *adev,
957 				   struct avs_path_pipeline *ppl)
958 {
959 	struct avs_path_binding *binding, *bsave;
960 	struct avs_path_module *mod, *save;
961 
962 	list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
963 		list_del(&binding->node);
964 		avs_path_binding_free(adev, binding);
965 	}
966 
967 	avs_dsp_delete_pipeline(adev, ppl->instance_id);
968 
969 	/* Unload resources occupied by owned modules */
970 	list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
971 		avs_dsp_delete_module(adev, mod->module_id, mod->instance_id,
972 				      mod->owner->instance_id,
973 				      mod->template->core_id);
974 		avs_path_module_free(adev, mod);
975 	}
976 
977 	list_del(&ppl->node);
978 	kfree(ppl);
979 }
980 
981 static struct avs_path_pipeline *
982 avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner,
983 			 struct avs_tplg_pipeline *template)
984 {
985 	struct avs_path_pipeline *ppl;
986 	struct avs_tplg_pplcfg *cfg = template->cfg;
987 	struct avs_tplg_module *tmod;
988 	int ret, i;
989 
990 	ppl = kzalloc(sizeof(*ppl), GFP_KERNEL);
991 	if (!ppl)
992 		return ERR_PTR(-ENOMEM);
993 
994 	ppl->template = template;
995 	ppl->owner = owner;
996 	INIT_LIST_HEAD(&ppl->binding_list);
997 	INIT_LIST_HEAD(&ppl->mod_list);
998 	INIT_LIST_HEAD(&ppl->node);
999 
1000 	ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority,
1001 				      cfg->lp, cfg->attributes,
1002 				      &ppl->instance_id);
1003 	if (ret) {
1004 		dev_err(adev->dev, "error creating pipeline %d\n", ret);
1005 		kfree(ppl);
1006 		return ERR_PTR(ret);
1007 	}
1008 
1009 	list_for_each_entry(tmod, &template->mod_list, node) {
1010 		struct avs_path_module *mod;
1011 
1012 		mod = avs_path_module_create(adev, ppl, tmod);
1013 		if (IS_ERR(mod)) {
1014 			ret = PTR_ERR(mod);
1015 			dev_err(adev->dev, "error creating module %d\n", ret);
1016 			goto init_err;
1017 		}
1018 
1019 		list_add_tail(&mod->node, &ppl->mod_list);
1020 	}
1021 
1022 	for (i = 0; i < template->num_bindings; i++) {
1023 		struct avs_path_binding *binding;
1024 
1025 		binding = avs_path_binding_create(adev, ppl, template->bindings[i]);
1026 		if (IS_ERR(binding)) {
1027 			ret = PTR_ERR(binding);
1028 			dev_err(adev->dev, "error creating binding %d\n", ret);
1029 			goto init_err;
1030 		}
1031 
1032 		list_add_tail(&binding->node, &ppl->binding_list);
1033 	}
1034 
1035 	return ppl;
1036 
1037 init_err:
1038 	avs_path_pipeline_free(adev, ppl);
1039 	return ERR_PTR(ret);
1040 }
1041 
1042 static int avs_path_init(struct avs_dev *adev, struct avs_path *path,
1043 			 struct avs_tplg_path *template, u32 dma_id)
1044 {
1045 	struct avs_tplg_pipeline *tppl;
1046 
1047 	path->owner = adev;
1048 	path->template = template;
1049 	path->dma_id = dma_id;
1050 	INIT_LIST_HEAD(&path->ppl_list);
1051 	INIT_LIST_HEAD(&path->node);
1052 
1053 	/* create all the pipelines */
1054 	list_for_each_entry(tppl, &template->ppl_list, node) {
1055 		struct avs_path_pipeline *ppl;
1056 
1057 		ppl = avs_path_pipeline_create(adev, path, tppl);
1058 		if (IS_ERR(ppl))
1059 			return PTR_ERR(ppl);
1060 
1061 		list_add_tail(&ppl->node, &path->ppl_list);
1062 	}
1063 
1064 	spin_lock(&adev->path_list_lock);
1065 	list_add_tail(&path->node, &adev->path_list);
1066 	spin_unlock(&adev->path_list_lock);
1067 
1068 	return 0;
1069 }
1070 
1071 static int avs_path_arm(struct avs_dev *adev, struct avs_path *path)
1072 {
1073 	struct avs_path_pipeline *ppl;
1074 	struct avs_path_binding *binding;
1075 	int ret;
1076 
1077 	list_for_each_entry(ppl, &path->ppl_list, node) {
1078 		/*
1079 		 * Arm all ppl bindings before binding internal modules
1080 		 * as it costs no IPCs which isn't true for the latter.
1081 		 */
1082 		list_for_each_entry(binding, &ppl->binding_list, node) {
1083 			ret = avs_path_binding_arm(adev, binding);
1084 			if (ret < 0)
1085 				return ret;
1086 		}
1087 
1088 		ret = avs_path_pipeline_arm(adev, ppl);
1089 		if (ret < 0)
1090 			return ret;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 static void avs_path_free_unlocked(struct avs_path *path)
1097 {
1098 	struct avs_path_pipeline *ppl, *save;
1099 
1100 	spin_lock(&path->owner->path_list_lock);
1101 	list_del(&path->node);
1102 	spin_unlock(&path->owner->path_list_lock);
1103 
1104 	list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
1105 		avs_path_pipeline_free(path->owner, ppl);
1106 
1107 	kfree(path);
1108 }
1109 
1110 static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id,
1111 						 struct avs_tplg_path *template)
1112 {
1113 	struct avs_path *path;
1114 	int ret;
1115 
1116 	path = kzalloc(sizeof(*path), GFP_KERNEL);
1117 	if (!path)
1118 		return ERR_PTR(-ENOMEM);
1119 
1120 	ret = avs_path_init(adev, path, template, dma_id);
1121 	if (ret < 0)
1122 		goto err;
1123 
1124 	ret = avs_path_arm(adev, path);
1125 	if (ret < 0)
1126 		goto err;
1127 
1128 	path->state = AVS_PPL_STATE_INVALID;
1129 	return path;
1130 err:
1131 	avs_path_free_unlocked(path);
1132 	return ERR_PTR(ret);
1133 }
1134 
1135 void avs_path_free(struct avs_path *path)
1136 {
1137 	struct avs_dev *adev = path->owner;
1138 
1139 	mutex_lock(&adev->path_mutex);
1140 	avs_path_free_unlocked(path);
1141 	mutex_unlock(&adev->path_mutex);
1142 }
1143 
1144 struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id,
1145 				 struct avs_tplg_path_template *template,
1146 				 struct snd_pcm_hw_params *fe_params,
1147 				 struct snd_pcm_hw_params *be_params)
1148 {
1149 	struct avs_tplg_path *variant;
1150 	struct avs_path *path;
1151 
1152 	variant = avs_path_find_variant(adev, template, fe_params, be_params);
1153 	if (!variant) {
1154 		dev_err(adev->dev, "no matching variant found\n");
1155 		return ERR_PTR(-ENOENT);
1156 	}
1157 
1158 	/* Serialize path and its components creation. */
1159 	mutex_lock(&adev->path_mutex);
1160 	/* Satisfy needs of avs_path_find_tplg(). */
1161 	mutex_lock(&adev->comp_list_mutex);
1162 
1163 	path = avs_path_create_unlocked(adev, dma_id, variant);
1164 
1165 	mutex_unlock(&adev->comp_list_mutex);
1166 	mutex_unlock(&adev->path_mutex);
1167 
1168 	return path;
1169 }
1170 
1171 static int avs_path_bind_prepare(struct avs_dev *adev,
1172 				 struct avs_path_binding *binding)
1173 {
1174 	const struct avs_audio_format *src_fmt, *sink_fmt;
1175 	struct avs_tplg_module *tsource = binding->source->template;
1176 	struct avs_path_module *source = binding->source;
1177 	int ret;
1178 
1179 	/*
1180 	 * only copier modules about to be bound
1181 	 * to output pin other than 0 need preparation
1182 	 */
1183 	if (!binding->source_pin)
1184 		return 0;
1185 	if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID))
1186 		return 0;
1187 
1188 	src_fmt = tsource->in_fmt;
1189 	sink_fmt = binding->sink->template->in_fmt;
1190 
1191 	ret = avs_ipc_copier_set_sink_format(adev, source->module_id,
1192 					     source->instance_id, binding->source_pin,
1193 					     src_fmt, sink_fmt);
1194 	if (ret) {
1195 		dev_err(adev->dev, "config copier failed: %d\n", ret);
1196 		return AVS_IPC_RET(ret);
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 int avs_path_bind(struct avs_path *path)
1203 {
1204 	struct avs_path_pipeline *ppl;
1205 	struct avs_dev *adev = path->owner;
1206 	int ret;
1207 
1208 	list_for_each_entry(ppl, &path->ppl_list, node) {
1209 		struct avs_path_binding *binding;
1210 
1211 		list_for_each_entry(binding, &ppl->binding_list, node) {
1212 			struct avs_path_module *source, *sink;
1213 
1214 			source = binding->source;
1215 			sink = binding->sink;
1216 
1217 			ret = avs_path_bind_prepare(adev, binding);
1218 			if (ret < 0)
1219 				return ret;
1220 
1221 			ret = avs_ipc_bind(adev, source->module_id,
1222 					   source->instance_id, sink->module_id,
1223 					   sink->instance_id, binding->sink_pin,
1224 					   binding->source_pin);
1225 			if (ret) {
1226 				dev_err(adev->dev, "bind path failed: %d\n", ret);
1227 				return AVS_IPC_RET(ret);
1228 			}
1229 		}
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 int avs_path_unbind(struct avs_path *path)
1236 {
1237 	struct avs_path_pipeline *ppl;
1238 	struct avs_dev *adev = path->owner;
1239 	int ret;
1240 
1241 	list_for_each_entry(ppl, &path->ppl_list, node) {
1242 		struct avs_path_binding *binding;
1243 
1244 		list_for_each_entry(binding, &ppl->binding_list, node) {
1245 			struct avs_path_module *source, *sink;
1246 
1247 			source = binding->source;
1248 			sink = binding->sink;
1249 
1250 			ret = avs_ipc_unbind(adev, source->module_id,
1251 					     source->instance_id, sink->module_id,
1252 					     sink->instance_id, binding->sink_pin,
1253 					     binding->source_pin);
1254 			if (ret) {
1255 				dev_err(adev->dev, "unbind path failed: %d\n", ret);
1256 				return AVS_IPC_RET(ret);
1257 			}
1258 		}
1259 	}
1260 
1261 	return 0;
1262 }
1263 
1264 int avs_path_reset(struct avs_path *path)
1265 {
1266 	struct avs_path_pipeline *ppl;
1267 	struct avs_dev *adev = path->owner;
1268 	int ret;
1269 
1270 	if (path->state == AVS_PPL_STATE_RESET)
1271 		return 0;
1272 
1273 	list_for_each_entry(ppl, &path->ppl_list, node) {
1274 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1275 						 AVS_PPL_STATE_RESET);
1276 		if (ret) {
1277 			dev_err(adev->dev, "reset path failed: %d\n", ret);
1278 			path->state = AVS_PPL_STATE_INVALID;
1279 			return AVS_IPC_RET(ret);
1280 		}
1281 	}
1282 
1283 	path->state = AVS_PPL_STATE_RESET;
1284 	return 0;
1285 }
1286 
1287 int avs_path_pause(struct avs_path *path)
1288 {
1289 	struct avs_path_pipeline *ppl;
1290 	struct avs_dev *adev = path->owner;
1291 	int ret;
1292 
1293 	if (path->state == AVS_PPL_STATE_PAUSED)
1294 		return 0;
1295 
1296 	list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
1297 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1298 						 AVS_PPL_STATE_PAUSED);
1299 		if (ret) {
1300 			dev_err(adev->dev, "pause path failed: %d\n", ret);
1301 			path->state = AVS_PPL_STATE_INVALID;
1302 			return AVS_IPC_RET(ret);
1303 		}
1304 	}
1305 
1306 	path->state = AVS_PPL_STATE_PAUSED;
1307 	return 0;
1308 }
1309 
1310 int avs_path_run(struct avs_path *path, int trigger)
1311 {
1312 	struct avs_path_pipeline *ppl;
1313 	struct avs_dev *adev = path->owner;
1314 	int ret;
1315 
1316 	if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO)
1317 		return 0;
1318 
1319 	list_for_each_entry(ppl, &path->ppl_list, node) {
1320 		if (ppl->template->cfg->trigger != trigger)
1321 			continue;
1322 
1323 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1324 						 AVS_PPL_STATE_RUNNING);
1325 		if (ret) {
1326 			dev_err(adev->dev, "run path failed: %d\n", ret);
1327 			path->state = AVS_PPL_STATE_INVALID;
1328 			return AVS_IPC_RET(ret);
1329 		}
1330 	}
1331 
1332 	path->state = AVS_PPL_STATE_RUNNING;
1333 	return 0;
1334 }
1335