xref: /linux/sound/soc/intel/avs/path.c (revision b61104e7a6349bd2c2b3e2fb3260d87f15eda8f4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021 Intel Corporation
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 
9 #include <linux/acpi.h>
10 #include <acpi/nhlt.h>
11 #include <sound/pcm_params.h>
12 #include <sound/soc.h>
13 #include "avs.h"
14 #include "control.h"
15 #include "path.h"
16 #include "topology.h"
17 
18 /* Must be called with adev->comp_list_mutex held. */
19 static struct avs_tplg *
20 avs_path_find_tplg(struct avs_dev *adev, const char *name)
21 {
22 	struct avs_soc_component *acomp;
23 
24 	list_for_each_entry(acomp, &adev->comp_list, node)
25 		if (!strcmp(acomp->tplg->name, name))
26 			return acomp->tplg;
27 	return NULL;
28 }
29 
30 static struct avs_path_module *
31 avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id)
32 {
33 	struct avs_path_module *mod;
34 
35 	list_for_each_entry(mod, &ppl->mod_list, node)
36 		if (mod->template->id == template_id)
37 			return mod;
38 	return NULL;
39 }
40 
41 static struct avs_path_pipeline *
42 avs_path_find_pipeline(struct avs_path *path, u32 template_id)
43 {
44 	struct avs_path_pipeline *ppl;
45 
46 	list_for_each_entry(ppl, &path->ppl_list, node)
47 		if (ppl->template->id == template_id)
48 			return ppl;
49 	return NULL;
50 }
51 
52 static struct avs_path *
53 avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id)
54 {
55 	struct avs_tplg_path_template *pos, *template = NULL;
56 	struct avs_tplg *tplg;
57 	struct avs_path *path;
58 
59 	tplg = avs_path_find_tplg(adev, name);
60 	if (!tplg)
61 		return NULL;
62 
63 	list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
64 		if (pos->id == template_id) {
65 			template = pos;
66 			break;
67 		}
68 	}
69 	if (!template)
70 		return NULL;
71 
72 	spin_lock(&adev->path_list_lock);
73 	/* Only one variant of given path template may be instantiated at a time. */
74 	list_for_each_entry(path, &adev->path_list, node) {
75 		if (path->template->owner == template) {
76 			spin_unlock(&adev->path_list_lock);
77 			return path;
78 		}
79 	}
80 
81 	spin_unlock(&adev->path_list_lock);
82 	return NULL;
83 }
84 
85 static bool avs_test_hw_params(struct snd_pcm_hw_params *params,
86 			       struct avs_audio_format *fmt)
87 {
88 	return (params_rate(params) == fmt->sampling_freq &&
89 		params_channels(params) == fmt->num_channels &&
90 		params_physical_width(params) == fmt->bit_depth &&
91 		snd_pcm_hw_params_bits(params) == fmt->valid_bit_depth);
92 }
93 
94 static struct avs_tplg_path *
95 avs_path_find_variant(struct avs_dev *adev,
96 		      struct avs_tplg_path_template *template,
97 		      struct snd_pcm_hw_params *fe_params,
98 		      struct snd_pcm_hw_params *be_params)
99 {
100 	struct avs_tplg_path *variant;
101 
102 	list_for_each_entry(variant, &template->path_list, node) {
103 		dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n",
104 			variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels,
105 			variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth);
106 		dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n",
107 			variant->be_fmt->sampling_freq, variant->be_fmt->num_channels,
108 			variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth);
109 
110 		if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) &&
111 		    variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt))
112 			return variant;
113 	}
114 
115 	return NULL;
116 }
117 
118 static struct avs_tplg_path *avs_condpath_find_variant(struct avs_dev *adev,
119 						       struct avs_tplg_path_template *template,
120 						       struct avs_path *source,
121 						       struct avs_path *sink)
122 {
123 	struct avs_tplg_path *variant;
124 
125 	list_for_each_entry(variant, &template->path_list, node) {
126 		if (variant->source_path_id == source->template->id &&
127 		    variant->sink_path_id == sink->template->id)
128 			return variant;
129 	}
130 
131 	return NULL;
132 }
133 
134 static bool avs_tplg_path_template_id_equal(struct avs_tplg_path_template_id *id,
135 					    struct avs_tplg_path_template_id *id2)
136 {
137 	return id->id == id2->id && !strcmp(id->tplg_name, id2->tplg_name);
138 }
139 
140 static struct avs_path *avs_condpath_find_match(struct avs_dev *adev,
141 						struct avs_tplg_path_template *template,
142 						struct avs_path *path, int dir)
143 {
144 	struct avs_tplg_path_template_id *id, *id2;
145 
146 	if (dir) {
147 		id = &template->source;
148 		id2 = &template->sink;
149 	} else {
150 		id = &template->sink;
151 		id2 = &template->source;
152 	}
153 
154 	/* Check whether this path is either source or sink of condpath template. */
155 	if (id->id != path->template->owner->id ||
156 	    strcmp(id->tplg_name, path->template->owner->owner->name))
157 		return NULL;
158 
159 	/* Unidirectional condpaths are allowed. */
160 	if (avs_tplg_path_template_id_equal(id, id2))
161 		return path;
162 
163 	/* Now find the counterpart. */
164 	return avs_path_find_path(adev, id2->tplg_name, id2->id);
165 }
166 
167 static struct acpi_nhlt_config *
168 avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t);
169 
170 int avs_path_set_constraint(struct avs_dev *adev, struct avs_tplg_path_template *template,
171 			    struct snd_pcm_hw_constraint_list *rate_list,
172 			    struct snd_pcm_hw_constraint_list *channels_list,
173 			    struct snd_pcm_hw_constraint_list *sample_bits_list)
174 {
175 	struct avs_tplg_path *path_template;
176 	unsigned int *rlist, *clist, *slist;
177 	size_t i;
178 
179 	i = 0;
180 	list_for_each_entry(path_template, &template->path_list, node)
181 		i++;
182 
183 	rlist = kcalloc(i, sizeof(*rlist), GFP_KERNEL);
184 	clist = kcalloc(i, sizeof(*clist), GFP_KERNEL);
185 	slist = kcalloc(i, sizeof(*slist), GFP_KERNEL);
186 	if (!rlist || !clist || !slist)
187 		return -ENOMEM;
188 
189 	i = 0;
190 	list_for_each_entry(path_template, &template->path_list, node) {
191 		struct avs_tplg_pipeline *pipeline_template;
192 
193 		list_for_each_entry(pipeline_template, &path_template->ppl_list, node) {
194 			struct avs_tplg_module *module_template;
195 
196 			list_for_each_entry(module_template, &pipeline_template->mod_list, node) {
197 				const guid_t *type = &module_template->cfg_ext->type;
198 				struct acpi_nhlt_config *blob;
199 
200 				if (!guid_equal(type, &AVS_COPIER_MOD_UUID) &&
201 				    !guid_equal(type, &AVS_WOVHOSTM_MOD_UUID))
202 					continue;
203 
204 				switch (module_template->cfg_ext->copier.dma_type) {
205 				case AVS_DMA_DMIC_LINK_INPUT:
206 				case AVS_DMA_I2S_LINK_OUTPUT:
207 				case AVS_DMA_I2S_LINK_INPUT:
208 					break;
209 				default:
210 					continue;
211 				}
212 
213 				if (!module_template->nhlt_config) {
214 					blob = avs_nhlt_config_or_default(adev, module_template);
215 					if (IS_ERR(blob))
216 						continue;
217 				}
218 
219 				rlist[i] = path_template->fe_fmt->sampling_freq;
220 				clist[i] = path_template->fe_fmt->num_channels;
221 				slist[i] = path_template->fe_fmt->bit_depth;
222 				i++;
223 			}
224 		}
225 	}
226 
227 	if (i) {
228 		rate_list->count = i;
229 		rate_list->list = rlist;
230 		channels_list->count = i;
231 		channels_list->list = clist;
232 		sample_bits_list->count = i;
233 		sample_bits_list->list = slist;
234 	} else {
235 		kfree(rlist);
236 		kfree(clist);
237 		kfree(slist);
238 	}
239 
240 	return i;
241 }
242 
243 static void avs_init_node_id(union avs_connector_node_id *node_id,
244 			     struct avs_tplg_modcfg_ext *te, u32 dma_id)
245 {
246 	node_id->val = 0;
247 	node_id->dma_type = te->copier.dma_type;
248 
249 	switch (node_id->dma_type) {
250 	case AVS_DMA_DMIC_LINK_INPUT:
251 	case AVS_DMA_I2S_LINK_OUTPUT:
252 	case AVS_DMA_I2S_LINK_INPUT:
253 		/* Gateway's virtual index is statically assigned in the topology. */
254 		node_id->vindex = te->copier.vindex.val;
255 		break;
256 
257 	case AVS_DMA_HDA_HOST_OUTPUT:
258 	case AVS_DMA_HDA_HOST_INPUT:
259 		/* Gateway's virtual index is dynamically assigned with DMA ID */
260 		node_id->vindex = dma_id;
261 		break;
262 
263 	case AVS_DMA_HDA_LINK_OUTPUT:
264 	case AVS_DMA_HDA_LINK_INPUT:
265 		node_id->vindex = te->copier.vindex.val | dma_id;
266 		break;
267 
268 	default:
269 		*node_id = INVALID_NODE_ID;
270 		break;
271 	}
272 }
273 
274 /* Every BLOB contains at least gateway attributes. */
275 static struct acpi_nhlt_config *default_blob = (struct acpi_nhlt_config *)&(u32[2]) {4};
276 
277 static struct acpi_nhlt_config *
278 avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t)
279 {
280 	struct acpi_nhlt_format_config *fmtcfg;
281 	struct avs_tplg_modcfg_ext *te;
282 	struct avs_audio_format *fmt;
283 	int link_type, dev_type;
284 	int bus_id, dir;
285 
286 	te = t->cfg_ext;
287 
288 	switch (te->copier.dma_type) {
289 	case AVS_DMA_I2S_LINK_OUTPUT:
290 		link_type = ACPI_NHLT_LINKTYPE_SSP;
291 		dev_type = ACPI_NHLT_DEVICETYPE_CODEC;
292 		bus_id = te->copier.vindex.i2s.instance;
293 		dir = SNDRV_PCM_STREAM_PLAYBACK;
294 		fmt = te->copier.out_fmt;
295 		break;
296 
297 	case AVS_DMA_I2S_LINK_INPUT:
298 		link_type = ACPI_NHLT_LINKTYPE_SSP;
299 		dev_type = ACPI_NHLT_DEVICETYPE_CODEC;
300 		bus_id = te->copier.vindex.i2s.instance;
301 		dir = SNDRV_PCM_STREAM_CAPTURE;
302 		fmt = t->in_fmt;
303 		break;
304 
305 	case AVS_DMA_DMIC_LINK_INPUT:
306 		link_type = ACPI_NHLT_LINKTYPE_PDM;
307 		dev_type = -1; /* ignored */
308 		bus_id = 0;
309 		dir = SNDRV_PCM_STREAM_CAPTURE;
310 		fmt = t->in_fmt;
311 		break;
312 
313 	default:
314 		return default_blob;
315 	}
316 
317 	/* Override format selection if necessary. */
318 	if (te->copier.blob_fmt)
319 		fmt = te->copier.blob_fmt;
320 
321 	fmtcfg = acpi_nhlt_find_fmtcfg(link_type, dev_type, dir, bus_id,
322 				       fmt->num_channels, fmt->sampling_freq, fmt->valid_bit_depth,
323 				       fmt->bit_depth);
324 	if (!fmtcfg) {
325 		dev_warn(adev->dev, "Endpoint format configuration not found.\n");
326 		return ERR_PTR(-ENOENT);
327 	}
328 
329 	if (fmtcfg->config.capabilities_size < default_blob->capabilities_size)
330 		return ERR_PTR(-ETOOSMALL);
331 	/* The firmware expects the payload to be DWORD-aligned. */
332 	if (fmtcfg->config.capabilities_size % sizeof(u32))
333 		return ERR_PTR(-EINVAL);
334 
335 	return &fmtcfg->config;
336 }
337 
338 static int avs_append_dma_cfg(struct avs_dev *adev, struct avs_copier_gtw_cfg *gtw,
339 			      struct avs_tplg_module *t, u32 dma_id, size_t *cfg_size)
340 {
341 	u32 dma_type = t->cfg_ext->copier.dma_type;
342 	struct avs_dma_cfg *dma;
343 	struct avs_tlv *tlv;
344 	size_t tlv_size;
345 
346 	if (!avs_platattr_test(adev, ALTHDA))
347 		return 0;
348 
349 	switch (dma_type) {
350 	case AVS_DMA_HDA_HOST_OUTPUT:
351 	case AVS_DMA_HDA_HOST_INPUT:
352 	case AVS_DMA_HDA_LINK_OUTPUT:
353 	case AVS_DMA_HDA_LINK_INPUT:
354 		return 0;
355 	default:
356 		break;
357 	}
358 
359 	tlv_size = sizeof(*tlv) + sizeof(*dma);
360 	if (*cfg_size + tlv_size > AVS_MAILBOX_SIZE)
361 		return -E2BIG;
362 
363 	/* DMA config is a TLV tailing the existing payload. */
364 	tlv = (struct avs_tlv *)&gtw->config.blob[gtw->config_length];
365 	tlv->type = AVS_GTW_DMA_CONFIG_ID;
366 	tlv->length = sizeof(*dma);
367 
368 	dma = (struct avs_dma_cfg *)tlv->value;
369 	memset(dma, 0, sizeof(*dma));
370 	dma->dma_method = AVS_DMA_METHOD_HDA;
371 	dma->pre_allocated = true;
372 	dma->dma_channel_id = dma_id;
373 	dma->stream_id = dma_id + 1;
374 
375 	gtw->config_length += tlv_size / sizeof(u32);
376 	*cfg_size += tlv_size;
377 
378 	return 0;
379 }
380 
381 static int avs_fill_gtw_config(struct avs_dev *adev, struct avs_copier_gtw_cfg *gtw,
382 			       struct avs_tplg_module *t, u32 dma_id, size_t *cfg_size)
383 {
384 	struct acpi_nhlt_config *blob;
385 	size_t gtw_size;
386 
387 	if (t->nhlt_config)
388 		blob = t->nhlt_config->blob;
389 	else
390 		blob = avs_nhlt_config_or_default(adev, t);
391 	if (IS_ERR(blob))
392 		return PTR_ERR(blob);
393 
394 	gtw_size = blob->capabilities_size;
395 	if (*cfg_size + gtw_size > AVS_MAILBOX_SIZE)
396 		return -E2BIG;
397 
398 	gtw->config_length = gtw_size / sizeof(u32);
399 	memcpy(gtw->config.blob, blob->capabilities, blob->capabilities_size);
400 	*cfg_size += gtw_size;
401 
402 	return avs_append_dma_cfg(adev, gtw, t, dma_id, cfg_size);
403 }
404 
405 static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod)
406 {
407 	struct avs_tplg_module *t = mod->template;
408 	struct avs_tplg_modcfg_ext *te;
409 	struct avs_copier_cfg *cfg;
410 	size_t cfg_size;
411 	u32 dma_id;
412 	int ret;
413 
414 	te = t->cfg_ext;
415 	cfg = adev->modcfg_buf;
416 	dma_id = mod->owner->owner->dma_id;
417 	cfg_size = offsetof(struct avs_copier_cfg, gtw_cfg.config);
418 
419 	ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, dma_id, &cfg_size);
420 	if (ret)
421 		return ret;
422 
423 	cfg->base.cpc = t->cfg_base->cpc;
424 	cfg->base.ibs = t->cfg_base->ibs;
425 	cfg->base.obs = t->cfg_base->obs;
426 	cfg->base.is_pages = t->cfg_base->is_pages;
427 	cfg->base.audio_fmt = *t->in_fmt;
428 	cfg->out_fmt = *te->copier.out_fmt;
429 	cfg->feature_mask = te->copier.feature_mask;
430 	avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id);
431 	cfg->gtw_cfg.dma_buffer_size = te->copier.dma_buffer_size;
432 	mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
433 
434 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
435 				  t->domain, cfg, cfg_size, &mod->instance_id);
436 	return ret;
437 }
438 
439 static int avs_whm_create(struct avs_dev *adev, struct avs_path_module *mod)
440 {
441 	struct avs_tplg_module *t = mod->template;
442 	struct avs_tplg_modcfg_ext *te;
443 	struct avs_whm_cfg *cfg;
444 	size_t cfg_size;
445 	u32 dma_id;
446 	int ret;
447 
448 	te = t->cfg_ext;
449 	cfg = adev->modcfg_buf;
450 	dma_id = mod->owner->owner->dma_id;
451 	cfg_size = offsetof(struct avs_whm_cfg, gtw_cfg.config);
452 
453 	ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, dma_id, &cfg_size);
454 	if (ret)
455 		return ret;
456 
457 	cfg->base.cpc = t->cfg_base->cpc;
458 	cfg->base.ibs = t->cfg_base->ibs;
459 	cfg->base.obs = t->cfg_base->obs;
460 	cfg->base.is_pages = t->cfg_base->is_pages;
461 	cfg->base.audio_fmt = *t->in_fmt;
462 	cfg->ref_fmt = *te->whm.ref_fmt;
463 	cfg->out_fmt = *te->whm.out_fmt;
464 	cfg->wake_tick_period = te->whm.wake_tick_period;
465 	avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id);
466 	cfg->gtw_cfg.dma_buffer_size = te->whm.dma_buffer_size;
467 	mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
468 
469 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
470 				  t->domain, cfg, cfg_size, &mod->instance_id);
471 	return ret;
472 }
473 
474 static struct soc_mixer_control *avs_get_module_control(struct avs_path_module *mod,
475 							const char *name)
476 {
477 	struct avs_tplg_module *t = mod->template;
478 	struct avs_tplg_path_template *path_tmpl;
479 	struct snd_soc_dapm_widget *w;
480 	int i;
481 
482 	path_tmpl = t->owner->owner->owner;
483 	w = path_tmpl->w;
484 
485 	for (i = 0; i < w->num_kcontrols; i++) {
486 		struct avs_control_data *ctl_data;
487 		struct soc_mixer_control *mc;
488 
489 		mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value;
490 		ctl_data = (struct avs_control_data *)mc->dobj.private;
491 		if (ctl_data->id == t->ctl_id && strstr(w->kcontrols[i]->id.name, name))
492 			return mc;
493 	}
494 
495 	return NULL;
496 }
497 
498 int avs_peakvol_set_volume(struct avs_dev *adev, struct avs_path_module *mod,
499 			   struct soc_mixer_control *mc, long *input)
500 {
501 	struct avs_volume_cfg vols[SND_SOC_TPLG_MAX_CHAN] = {{0}};
502 	struct avs_control_data *ctl_data;
503 	struct avs_tplg_module *t;
504 	int ret, i;
505 
506 	ctl_data = mc->dobj.private;
507 	t = mod->template;
508 	if (!input)
509 		input = ctl_data->values;
510 
511 	if (mc->num_channels) {
512 		for (i = 0; i < mc->num_channels; i++) {
513 			vols[i].channel_id = i;
514 			vols[i].target_volume = input[i];
515 			vols[i].curve_type = t->cfg_ext->peakvol.curve_type;
516 			vols[i].curve_duration = t->cfg_ext->peakvol.curve_duration;
517 		}
518 
519 		ret = avs_ipc_peakvol_set_volumes(adev, mod->module_id, mod->instance_id, vols,
520 						  mc->num_channels);
521 		return AVS_IPC_RET(ret);
522 	}
523 
524 	/* Target all channels if no individual selected. */
525 	vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
526 	vols[0].target_volume = input[0];
527 	vols[0].curve_type = t->cfg_ext->peakvol.curve_type;
528 	vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
529 
530 	ret = avs_ipc_peakvol_set_volume(adev, mod->module_id, mod->instance_id, &vols[0]);
531 	return AVS_IPC_RET(ret);
532 }
533 
534 int avs_peakvol_set_mute(struct avs_dev *adev, struct avs_path_module *mod,
535 			 struct soc_mixer_control *mc, long *input)
536 {
537 	struct avs_mute_cfg mutes[SND_SOC_TPLG_MAX_CHAN] = {{0}};
538 	struct avs_control_data *ctl_data;
539 	struct avs_tplg_module *t;
540 	int ret, i;
541 
542 	ctl_data = mc->dobj.private;
543 	t = mod->template;
544 	if (!input)
545 		input = ctl_data->values;
546 
547 	if (mc->num_channels) {
548 		for (i = 0; i < mc->num_channels; i++) {
549 			mutes[i].channel_id = i;
550 			mutes[i].mute = !input[i];
551 			mutes[i].curve_type = t->cfg_ext->peakvol.curve_type;
552 			mutes[i].curve_duration = t->cfg_ext->peakvol.curve_duration;
553 		}
554 
555 		ret = avs_ipc_peakvol_set_mutes(adev, mod->module_id, mod->instance_id, mutes,
556 						mc->num_channels);
557 		return AVS_IPC_RET(ret);
558 	}
559 
560 	/* Target all channels if no individual selected. */
561 	mutes[0].channel_id = AVS_ALL_CHANNELS_MASK;
562 	mutes[0].mute = !input[0];
563 	mutes[0].curve_type = t->cfg_ext->peakvol.curve_type;
564 	mutes[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
565 
566 	ret = avs_ipc_peakvol_set_mute(adev, mod->module_id, mod->instance_id, &mutes[0]);
567 	return AVS_IPC_RET(ret);
568 }
569 
570 static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod)
571 {
572 	struct avs_tplg_module *t = mod->template;
573 	struct soc_mixer_control *mc;
574 	struct avs_peakvol_cfg *cfg;
575 	size_t cfg_size;
576 	int ret;
577 
578 	cfg_size = struct_size(cfg, vols, 1);
579 	if (cfg_size > AVS_MAILBOX_SIZE)
580 		return -EINVAL;
581 
582 	cfg = adev->modcfg_buf;
583 	memset(cfg, 0, cfg_size);
584 	cfg->base.cpc = t->cfg_base->cpc;
585 	cfg->base.ibs = t->cfg_base->ibs;
586 	cfg->base.obs = t->cfg_base->obs;
587 	cfg->base.is_pages = t->cfg_base->is_pages;
588 	cfg->base.audio_fmt = *t->in_fmt;
589 	cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
590 	cfg->vols[0].target_volume = S32_MAX;
591 	cfg->vols[0].curve_type = t->cfg_ext->peakvol.curve_type;
592 	cfg->vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
593 
594 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
595 				  t->domain, cfg, cfg_size, &mod->instance_id);
596 	if (ret)
597 		return ret;
598 
599 	/* Now configure both VOLUME and MUTE parameters. */
600 	mc = avs_get_module_control(mod, "Volume");
601 	if (mc) {
602 		ret = avs_peakvol_set_volume(adev, mod, mc, NULL);
603 		if (ret)
604 			return ret;
605 	}
606 
607 	mc = avs_get_module_control(mod, "Switch");
608 	if (mc)
609 		return avs_peakvol_set_mute(adev, mod, mc, NULL);
610 	return 0;
611 }
612 
613 static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod)
614 {
615 	struct avs_tplg_module *t = mod->template;
616 	struct avs_updown_mixer_cfg cfg;
617 	int i;
618 
619 	cfg.base.cpc = t->cfg_base->cpc;
620 	cfg.base.ibs = t->cfg_base->ibs;
621 	cfg.base.obs = t->cfg_base->obs;
622 	cfg.base.is_pages = t->cfg_base->is_pages;
623 	cfg.base.audio_fmt = *t->in_fmt;
624 	cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config;
625 	cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select;
626 	for (i = 0; i < AVS_COEFF_CHANNELS_MAX; i++)
627 		cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i];
628 	cfg.channel_map = t->cfg_ext->updown_mix.channel_map;
629 
630 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
631 				   t->core_id, t->domain, &cfg, sizeof(cfg),
632 				   &mod->instance_id);
633 }
634 
635 static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod)
636 {
637 	struct avs_tplg_module *t = mod->template;
638 	struct avs_src_cfg cfg;
639 
640 	cfg.base.cpc = t->cfg_base->cpc;
641 	cfg.base.ibs = t->cfg_base->ibs;
642 	cfg.base.obs = t->cfg_base->obs;
643 	cfg.base.is_pages = t->cfg_base->is_pages;
644 	cfg.base.audio_fmt = *t->in_fmt;
645 	cfg.out_freq = t->cfg_ext->src.out_freq;
646 
647 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
648 				   t->core_id, t->domain, &cfg, sizeof(cfg),
649 				   &mod->instance_id);
650 }
651 
652 static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
653 {
654 	struct avs_tplg_module *t = mod->template;
655 	struct avs_asrc_cfg cfg;
656 
657 	memset(&cfg, 0, sizeof(cfg));
658 	cfg.base.cpc = t->cfg_base->cpc;
659 	cfg.base.ibs = t->cfg_base->ibs;
660 	cfg.base.obs = t->cfg_base->obs;
661 	cfg.base.is_pages = t->cfg_base->is_pages;
662 	cfg.base.audio_fmt = *t->in_fmt;
663 	cfg.out_freq = t->cfg_ext->asrc.out_freq;
664 	cfg.mode = t->cfg_ext->asrc.mode;
665 	cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer;
666 
667 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
668 				   t->core_id, t->domain, &cfg, sizeof(cfg),
669 				   &mod->instance_id);
670 }
671 
672 static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod)
673 {
674 	struct avs_tplg_module *t = mod->template;
675 	struct avs_aec_cfg cfg;
676 
677 	cfg.base.cpc = t->cfg_base->cpc;
678 	cfg.base.ibs = t->cfg_base->ibs;
679 	cfg.base.obs = t->cfg_base->obs;
680 	cfg.base.is_pages = t->cfg_base->is_pages;
681 	cfg.base.audio_fmt = *t->in_fmt;
682 	cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt;
683 	cfg.out_fmt = *t->cfg_ext->aec.out_fmt;
684 	cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode;
685 
686 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
687 				   t->core_id, t->domain, &cfg, sizeof(cfg),
688 				   &mod->instance_id);
689 }
690 
691 static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod)
692 {
693 	struct avs_tplg_module *t = mod->template;
694 	struct avs_mux_cfg cfg;
695 
696 	cfg.base.cpc = t->cfg_base->cpc;
697 	cfg.base.ibs = t->cfg_base->ibs;
698 	cfg.base.obs = t->cfg_base->obs;
699 	cfg.base.is_pages = t->cfg_base->is_pages;
700 	cfg.base.audio_fmt = *t->in_fmt;
701 	cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt;
702 	cfg.out_fmt = *t->cfg_ext->mux.out_fmt;
703 
704 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
705 				   t->core_id, t->domain, &cfg, sizeof(cfg),
706 				   &mod->instance_id);
707 }
708 
709 static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod)
710 {
711 	struct avs_tplg_module *t = mod->template;
712 	struct avs_wov_cfg cfg;
713 
714 	cfg.base.cpc = t->cfg_base->cpc;
715 	cfg.base.ibs = t->cfg_base->ibs;
716 	cfg.base.obs = t->cfg_base->obs;
717 	cfg.base.is_pages = t->cfg_base->is_pages;
718 	cfg.base.audio_fmt = *t->in_fmt;
719 	cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode;
720 
721 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
722 				   t->core_id, t->domain, &cfg, sizeof(cfg),
723 				   &mod->instance_id);
724 }
725 
726 static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod)
727 {
728 	struct avs_tplg_module *t = mod->template;
729 	struct avs_micsel_cfg cfg;
730 
731 	cfg.base.cpc = t->cfg_base->cpc;
732 	cfg.base.ibs = t->cfg_base->ibs;
733 	cfg.base.obs = t->cfg_base->obs;
734 	cfg.base.is_pages = t->cfg_base->is_pages;
735 	cfg.base.audio_fmt = *t->in_fmt;
736 	cfg.out_fmt = *t->cfg_ext->micsel.out_fmt;
737 
738 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
739 				   t->core_id, t->domain, &cfg, sizeof(cfg),
740 				   &mod->instance_id);
741 }
742 
743 static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod)
744 {
745 	struct avs_tplg_module *t = mod->template;
746 	struct avs_modcfg_base cfg;
747 
748 	cfg.cpc = t->cfg_base->cpc;
749 	cfg.ibs = t->cfg_base->ibs;
750 	cfg.obs = t->cfg_base->obs;
751 	cfg.is_pages = t->cfg_base->is_pages;
752 	cfg.audio_fmt = *t->in_fmt;
753 
754 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
755 				   t->core_id, t->domain, &cfg, sizeof(cfg),
756 				   &mod->instance_id);
757 }
758 
759 static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
760 {
761 	struct avs_tplg_module *t = mod->template;
762 	struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext;
763 	struct avs_modcfg_ext *cfg;
764 	size_t cfg_size, num_pins;
765 	int ret, i;
766 
767 	num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins;
768 	cfg_size = struct_size(cfg, pin_fmts, num_pins);
769 
770 	if (cfg_size > AVS_MAILBOX_SIZE)
771 		return -EINVAL;
772 
773 	cfg = adev->modcfg_buf;
774 	memset(cfg, 0, cfg_size);
775 	cfg->base.cpc = t->cfg_base->cpc;
776 	cfg->base.ibs = t->cfg_base->ibs;
777 	cfg->base.obs = t->cfg_base->obs;
778 	cfg->base.is_pages = t->cfg_base->is_pages;
779 	cfg->base.audio_fmt = *t->in_fmt;
780 	cfg->num_input_pins = tcfg->generic.num_input_pins;
781 	cfg->num_output_pins = tcfg->generic.num_output_pins;
782 
783 	/* configure pin formats */
784 	for (i = 0; i < num_pins; i++) {
785 		struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i];
786 		struct avs_pin_format *pin = &cfg->pin_fmts[i];
787 
788 		pin->pin_index = tpin->pin_index;
789 		pin->iobs = tpin->iobs;
790 		pin->audio_fmt = *tpin->fmt;
791 	}
792 
793 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
794 				  t->core_id, t->domain, cfg, cfg_size,
795 				  &mod->instance_id);
796 	return ret;
797 }
798 
799 static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
800 {
801 	dev_err(adev->dev, "Probe module can't be instantiated by topology");
802 	return -EINVAL;
803 }
804 
805 struct avs_module_create {
806 	guid_t *guid;
807 	int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
808 };
809 
810 static struct avs_module_create avs_module_create[] = {
811 	{ &AVS_MIXIN_MOD_UUID, avs_modbase_create },
812 	{ &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
813 	{ &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
814 	{ &AVS_COPIER_MOD_UUID, avs_copier_create },
815 	{ &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create },
816 	{ &AVS_GAIN_MOD_UUID, avs_peakvol_create },
817 	{ &AVS_MICSEL_MOD_UUID, avs_micsel_create },
818 	{ &AVS_MUX_MOD_UUID, avs_mux_create },
819 	{ &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
820 	{ &AVS_SRCINTC_MOD_UUID, avs_src_create },
821 	{ &AVS_AEC_MOD_UUID, avs_aec_create },
822 	{ &AVS_ASRC_MOD_UUID, avs_asrc_create },
823 	{ &AVS_INTELWOV_MOD_UUID, avs_wov_create },
824 	{ &AVS_PROBE_MOD_UUID, avs_probe_create },
825 	{ &AVS_WOVHOSTM_MOD_UUID, avs_whm_create },
826 };
827 
828 static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
829 {
830 	const guid_t *type = &mod->template->cfg_ext->type;
831 
832 	for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
833 		if (guid_equal(type, avs_module_create[i].guid))
834 			return avs_module_create[i].create(adev, mod);
835 
836 	return avs_modext_create(adev, mod);
837 }
838 
839 static int avs_path_module_send_init_configs(struct avs_dev *adev, struct avs_path_module *mod)
840 {
841 	struct avs_soc_component *acomp;
842 
843 	acomp = to_avs_soc_component(mod->template->owner->owner->owner->owner->comp);
844 
845 	u32 num_ids = mod->template->num_config_ids;
846 	u32 *ids = mod->template->config_ids;
847 
848 	for (int i = 0; i < num_ids; i++) {
849 		struct avs_tplg_init_config *config = &acomp->tplg->init_configs[ids[i]];
850 		size_t len = config->length;
851 		void *data = config->data;
852 		u32 param = config->param;
853 		int ret;
854 
855 		ret = avs_ipc_set_large_config(adev, mod->module_id, mod->instance_id,
856 					       param, data, len);
857 		if (ret) {
858 			dev_err(adev->dev, "send initial module config failed: %d\n", ret);
859 			return AVS_IPC_RET(ret);
860 		}
861 	}
862 
863 	return 0;
864 }
865 
866 static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod)
867 {
868 	kfree(mod);
869 }
870 
871 static struct avs_path_module *
872 avs_path_module_create(struct avs_dev *adev,
873 		       struct avs_path_pipeline *owner,
874 		       struct avs_tplg_module *template)
875 {
876 	struct avs_path_module *mod;
877 	int module_id, ret;
878 
879 	module_id = avs_get_module_id(adev, &template->cfg_ext->type);
880 	if (module_id < 0)
881 		return ERR_PTR(module_id);
882 
883 	mod = kzalloc(sizeof(*mod), GFP_KERNEL);
884 	if (!mod)
885 		return ERR_PTR(-ENOMEM);
886 
887 	mod->template = template;
888 	mod->module_id = module_id;
889 	mod->owner = owner;
890 	INIT_LIST_HEAD(&mod->node);
891 
892 	ret = avs_path_module_type_create(adev, mod);
893 	if (ret) {
894 		dev_err(adev->dev, "module-type create failed: %d\n", ret);
895 		kfree(mod);
896 		return ERR_PTR(ret);
897 	}
898 
899 	ret = avs_path_module_send_init_configs(adev, mod);
900 	if (ret) {
901 		kfree(mod);
902 		return ERR_PTR(ret);
903 	}
904 
905 	return mod;
906 }
907 
908 static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding)
909 {
910 	struct avs_path_module *this_mod, *target_mod;
911 	struct avs_path_pipeline *target_ppl;
912 	struct avs_path *target_path;
913 	struct avs_tplg_binding *t;
914 
915 	t = binding->template;
916 	this_mod = avs_path_find_module(binding->owner,
917 					t->mod_id);
918 	if (!this_mod) {
919 		dev_err(adev->dev, "path mod %d not found\n", t->mod_id);
920 		return -EINVAL;
921 	}
922 
923 	/* update with target_tplg_name too */
924 	target_path = avs_path_find_path(adev, t->target_tplg_name,
925 					 t->target_path_tmpl_id);
926 	if (!target_path) {
927 		dev_err(adev->dev, "target path %s:%d not found\n",
928 			t->target_tplg_name, t->target_path_tmpl_id);
929 		return -EINVAL;
930 	}
931 
932 	target_ppl = avs_path_find_pipeline(target_path,
933 					    t->target_ppl_id);
934 	if (!target_ppl) {
935 		dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id);
936 		return -EINVAL;
937 	}
938 
939 	target_mod = avs_path_find_module(target_ppl, t->target_mod_id);
940 	if (!target_mod) {
941 		dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id);
942 		return -EINVAL;
943 	}
944 
945 	if (t->is_sink) {
946 		binding->sink = this_mod;
947 		binding->sink_pin = t->mod_pin;
948 		binding->source = target_mod;
949 		binding->source_pin = t->target_mod_pin;
950 	} else {
951 		binding->sink = target_mod;
952 		binding->sink_pin = t->target_mod_pin;
953 		binding->source = this_mod;
954 		binding->source_pin = t->mod_pin;
955 	}
956 
957 	return 0;
958 }
959 
960 static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding)
961 {
962 	kfree(binding);
963 }
964 
965 static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev,
966 							struct avs_path_pipeline *owner,
967 							struct avs_tplg_binding *t)
968 {
969 	struct avs_path_binding *binding;
970 
971 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
972 	if (!binding)
973 		return ERR_PTR(-ENOMEM);
974 
975 	binding->template = t;
976 	binding->owner = owner;
977 	INIT_LIST_HEAD(&binding->node);
978 
979 	return binding;
980 }
981 
982 static int avs_path_pipeline_arm(struct avs_dev *adev,
983 				 struct avs_path_pipeline *ppl)
984 {
985 	struct avs_path_module *mod;
986 
987 	list_for_each_entry(mod, &ppl->mod_list, node) {
988 		struct avs_path_module *source, *sink;
989 		int ret;
990 
991 		/*
992 		 * Only one module (so it's implicitly last) or it is the last
993 		 * one, either way we don't have next module to bind it to.
994 		 */
995 		if (mod == list_last_entry(&ppl->mod_list,
996 					   struct avs_path_module, node))
997 			break;
998 
999 		/* bind current module to next module on list */
1000 		source = mod;
1001 		sink = list_next_entry(mod, node);
1002 
1003 		ret = avs_ipc_bind(adev, source->module_id, source->instance_id,
1004 				   sink->module_id, sink->instance_id, 0, 0);
1005 		if (ret)
1006 			return AVS_IPC_RET(ret);
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 static void avs_path_pipeline_free(struct avs_dev *adev,
1013 				   struct avs_path_pipeline *ppl)
1014 {
1015 	struct avs_path_binding *binding, *bsave;
1016 	struct avs_path_module *mod, *save;
1017 
1018 	list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
1019 		list_del(&binding->node);
1020 		avs_path_binding_free(adev, binding);
1021 	}
1022 
1023 	avs_dsp_delete_pipeline(adev, ppl->instance_id);
1024 
1025 	/* Unload resources occupied by owned modules */
1026 	list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
1027 		avs_dsp_delete_module(adev, mod->module_id, mod->instance_id,
1028 				      mod->owner->instance_id,
1029 				      mod->template->core_id);
1030 		avs_path_module_free(adev, mod);
1031 	}
1032 
1033 	list_del(&ppl->node);
1034 	kfree(ppl);
1035 }
1036 
1037 static struct avs_path_pipeline *
1038 avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner,
1039 			 struct avs_tplg_pipeline *template)
1040 {
1041 	struct avs_path_pipeline *ppl;
1042 	struct avs_tplg_pplcfg *cfg = template->cfg;
1043 	struct avs_tplg_module *tmod;
1044 	int ret, i;
1045 
1046 	ppl = kzalloc(sizeof(*ppl), GFP_KERNEL);
1047 	if (!ppl)
1048 		return ERR_PTR(-ENOMEM);
1049 
1050 	ppl->template = template;
1051 	ppl->owner = owner;
1052 	INIT_LIST_HEAD(&ppl->binding_list);
1053 	INIT_LIST_HEAD(&ppl->mod_list);
1054 	INIT_LIST_HEAD(&ppl->node);
1055 
1056 	ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority,
1057 				      cfg->lp, cfg->attributes,
1058 				      &ppl->instance_id);
1059 	if (ret) {
1060 		dev_err(adev->dev, "error creating pipeline %d\n", ret);
1061 		kfree(ppl);
1062 		return ERR_PTR(ret);
1063 	}
1064 
1065 	list_for_each_entry(tmod, &template->mod_list, node) {
1066 		struct avs_path_module *mod;
1067 
1068 		mod = avs_path_module_create(adev, ppl, tmod);
1069 		if (IS_ERR(mod)) {
1070 			ret = PTR_ERR(mod);
1071 			dev_err(adev->dev, "error creating module %d\n", ret);
1072 			goto init_err;
1073 		}
1074 
1075 		list_add_tail(&mod->node, &ppl->mod_list);
1076 	}
1077 
1078 	for (i = 0; i < template->num_bindings; i++) {
1079 		struct avs_path_binding *binding;
1080 
1081 		binding = avs_path_binding_create(adev, ppl, template->bindings[i]);
1082 		if (IS_ERR(binding)) {
1083 			ret = PTR_ERR(binding);
1084 			dev_err(adev->dev, "error creating binding %d\n", ret);
1085 			goto init_err;
1086 		}
1087 
1088 		list_add_tail(&binding->node, &ppl->binding_list);
1089 	}
1090 
1091 	return ppl;
1092 
1093 init_err:
1094 	avs_path_pipeline_free(adev, ppl);
1095 	return ERR_PTR(ret);
1096 }
1097 
1098 static int avs_path_init(struct avs_dev *adev, struct avs_path *path,
1099 			 struct avs_tplg_path *template, u32 dma_id)
1100 {
1101 	struct avs_tplg_pipeline *tppl;
1102 
1103 	path->owner = adev;
1104 	path->template = template;
1105 	path->dma_id = dma_id;
1106 	INIT_LIST_HEAD(&path->ppl_list);
1107 	INIT_LIST_HEAD(&path->node);
1108 	INIT_LIST_HEAD(&path->source_list);
1109 	INIT_LIST_HEAD(&path->sink_list);
1110 	INIT_LIST_HEAD(&path->source_node);
1111 	INIT_LIST_HEAD(&path->sink_node);
1112 
1113 	/* create all the pipelines */
1114 	list_for_each_entry(tppl, &template->ppl_list, node) {
1115 		struct avs_path_pipeline *ppl;
1116 
1117 		ppl = avs_path_pipeline_create(adev, path, tppl);
1118 		if (IS_ERR(ppl))
1119 			return PTR_ERR(ppl);
1120 
1121 		list_add_tail(&ppl->node, &path->ppl_list);
1122 	}
1123 
1124 	spin_lock(&adev->path_list_lock);
1125 	list_add_tail(&path->node, &adev->path_list);
1126 	spin_unlock(&adev->path_list_lock);
1127 
1128 	return 0;
1129 }
1130 
1131 static int avs_path_arm(struct avs_dev *adev, struct avs_path *path)
1132 {
1133 	struct avs_path_pipeline *ppl;
1134 	struct avs_path_binding *binding;
1135 	int ret;
1136 
1137 	list_for_each_entry(ppl, &path->ppl_list, node) {
1138 		/*
1139 		 * Arm all ppl bindings before binding internal modules
1140 		 * as it costs no IPCs which isn't true for the latter.
1141 		 */
1142 		list_for_each_entry(binding, &ppl->binding_list, node) {
1143 			ret = avs_path_binding_arm(adev, binding);
1144 			if (ret < 0)
1145 				return ret;
1146 		}
1147 
1148 		ret = avs_path_pipeline_arm(adev, ppl);
1149 		if (ret < 0)
1150 			return ret;
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 static void avs_path_free_unlocked(struct avs_path *path)
1157 {
1158 	struct avs_path_pipeline *ppl, *save;
1159 
1160 	spin_lock(&path->owner->path_list_lock);
1161 	list_del(&path->node);
1162 	spin_unlock(&path->owner->path_list_lock);
1163 
1164 	list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
1165 		avs_path_pipeline_free(path->owner, ppl);
1166 
1167 	kfree(path);
1168 }
1169 
1170 static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id,
1171 						 struct avs_tplg_path *template)
1172 {
1173 	struct avs_path *path;
1174 	int ret;
1175 
1176 	path = kzalloc(sizeof(*path), GFP_KERNEL);
1177 	if (!path)
1178 		return ERR_PTR(-ENOMEM);
1179 
1180 	ret = avs_path_init(adev, path, template, dma_id);
1181 	if (ret < 0)
1182 		goto err;
1183 
1184 	ret = avs_path_arm(adev, path);
1185 	if (ret < 0)
1186 		goto err;
1187 
1188 	path->state = AVS_PPL_STATE_INVALID;
1189 	return path;
1190 err:
1191 	avs_path_free_unlocked(path);
1192 	return ERR_PTR(ret);
1193 }
1194 
1195 static void avs_condpath_free(struct avs_dev *adev, struct avs_path *path)
1196 {
1197 	int ret;
1198 
1199 	list_del(&path->source_node);
1200 	list_del(&path->sink_node);
1201 
1202 	ret = avs_path_reset(path);
1203 	if (ret < 0)
1204 		dev_err(adev->dev, "reset condpath failed: %d\n", ret);
1205 
1206 	ret = avs_path_unbind(path);
1207 	if (ret < 0)
1208 		dev_err(adev->dev, "unbind condpath failed: %d\n", ret);
1209 
1210 	avs_path_free_unlocked(path);
1211 }
1212 
1213 static struct avs_path *avs_condpath_create(struct avs_dev *adev,
1214 					    struct avs_tplg_path *template,
1215 					    struct avs_path *source,
1216 					    struct avs_path *sink)
1217 {
1218 	struct avs_path *path;
1219 	int ret;
1220 
1221 	path = avs_path_create_unlocked(adev, 0, template);
1222 	if (IS_ERR(path))
1223 		return path;
1224 
1225 	ret = avs_path_bind(path);
1226 	if (ret)
1227 		goto err_bind;
1228 
1229 	ret = avs_path_reset(path);
1230 	if (ret)
1231 		goto err_reset;
1232 
1233 	path->source = source;
1234 	path->sink = sink;
1235 	list_add_tail(&path->source_node, &source->source_list);
1236 	list_add_tail(&path->sink_node, &sink->sink_list);
1237 
1238 	return path;
1239 
1240 err_reset:
1241 	avs_path_unbind(path);
1242 err_bind:
1243 	avs_path_free_unlocked(path);
1244 	return ERR_PTR(ret);
1245 }
1246 
1247 static int avs_condpaths_walk(struct avs_dev *adev, struct avs_path *path, int dir)
1248 {
1249 	struct avs_soc_component *acomp;
1250 	struct avs_path *source, *sink;
1251 	struct avs_path **other;
1252 
1253 	if (dir) {
1254 		source = path;
1255 		other = &sink;
1256 	} else {
1257 		sink = path;
1258 		other = &source;
1259 	}
1260 
1261 	list_for_each_entry(acomp, &adev->comp_list, node) {
1262 		for (int i = 0; i < acomp->tplg->num_condpath_tmpls; i++) {
1263 			struct avs_tplg_path_template *template;
1264 			struct avs_tplg_path *variant;
1265 			struct avs_path *cpath;
1266 
1267 			template = &acomp->tplg->condpath_tmpls[i];
1268 
1269 			/* Do not create unidirectional condpaths twice. */
1270 			if (avs_tplg_path_template_id_equal(&template->source,
1271 							    &template->sink) && dir)
1272 				continue;
1273 
1274 			*other = avs_condpath_find_match(adev, template, path, dir);
1275 			if (!*other)
1276 				continue;
1277 
1278 			variant = avs_condpath_find_variant(adev, template, source, sink);
1279 			if (!variant)
1280 				continue;
1281 
1282 			cpath = avs_condpath_create(adev, variant, source, sink);
1283 			if (IS_ERR(cpath))
1284 				return PTR_ERR(cpath);
1285 		}
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 /* Caller responsible for holding adev->path_mutex. */
1292 static int avs_condpaths_walk_all(struct avs_dev *adev, struct avs_path *path)
1293 {
1294 	int ret;
1295 
1296 	ret = avs_condpaths_walk(adev, path, SNDRV_PCM_STREAM_CAPTURE);
1297 	if (ret)
1298 		return ret;
1299 
1300 	return avs_condpaths_walk(adev, path, SNDRV_PCM_STREAM_PLAYBACK);
1301 }
1302 
1303 void avs_path_free(struct avs_path *path)
1304 {
1305 	struct avs_path *cpath, *csave;
1306 	struct avs_dev *adev = path->owner;
1307 
1308 	mutex_lock(&adev->path_mutex);
1309 
1310 	/* Free all condpaths this path spawned. */
1311 	list_for_each_entry_safe(cpath, csave, &path->source_list, source_node)
1312 		avs_condpath_free(path->owner, cpath);
1313 	list_for_each_entry_safe(cpath, csave, &path->sink_list, sink_node)
1314 		avs_condpath_free(path->owner, cpath);
1315 
1316 	avs_path_free_unlocked(path);
1317 
1318 	mutex_unlock(&adev->path_mutex);
1319 }
1320 
1321 struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id,
1322 				 struct avs_tplg_path_template *template,
1323 				 struct snd_pcm_hw_params *fe_params,
1324 				 struct snd_pcm_hw_params *be_params)
1325 {
1326 	struct avs_tplg_path *variant;
1327 	struct avs_path *path;
1328 	int ret;
1329 
1330 	variant = avs_path_find_variant(adev, template, fe_params, be_params);
1331 	if (!variant) {
1332 		dev_err(adev->dev, "no matching variant found\n");
1333 		return ERR_PTR(-ENOENT);
1334 	}
1335 
1336 	/* Serialize path and its components creation. */
1337 	mutex_lock(&adev->path_mutex);
1338 	/* Satisfy needs of avs_path_find_tplg(). */
1339 	mutex_lock(&adev->comp_list_mutex);
1340 
1341 	path = avs_path_create_unlocked(adev, dma_id, variant);
1342 	if (IS_ERR(path))
1343 		goto exit;
1344 
1345 	ret = avs_condpaths_walk_all(adev, path);
1346 	if (ret) {
1347 		avs_path_free_unlocked(path);
1348 		path = ERR_PTR(ret);
1349 	}
1350 
1351 exit:
1352 	mutex_unlock(&adev->comp_list_mutex);
1353 	mutex_unlock(&adev->path_mutex);
1354 
1355 	return path;
1356 }
1357 
1358 static int avs_path_bind_prepare(struct avs_dev *adev,
1359 				 struct avs_path_binding *binding)
1360 {
1361 	const struct avs_audio_format *src_fmt, *sink_fmt;
1362 	struct avs_tplg_module *tsource = binding->source->template;
1363 	struct avs_path_module *source = binding->source;
1364 	int ret;
1365 
1366 	/*
1367 	 * only copier modules about to be bound
1368 	 * to output pin other than 0 need preparation
1369 	 */
1370 	if (!binding->source_pin)
1371 		return 0;
1372 	if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID))
1373 		return 0;
1374 
1375 	src_fmt = tsource->in_fmt;
1376 	sink_fmt = binding->sink->template->in_fmt;
1377 
1378 	ret = avs_ipc_copier_set_sink_format(adev, source->module_id,
1379 					     source->instance_id, binding->source_pin,
1380 					     src_fmt, sink_fmt);
1381 	if (ret) {
1382 		dev_err(adev->dev, "config copier failed: %d\n", ret);
1383 		return AVS_IPC_RET(ret);
1384 	}
1385 
1386 	return 0;
1387 }
1388 
1389 int avs_path_bind(struct avs_path *path)
1390 {
1391 	struct avs_path_pipeline *ppl;
1392 	struct avs_dev *adev = path->owner;
1393 	int ret;
1394 
1395 	list_for_each_entry(ppl, &path->ppl_list, node) {
1396 		struct avs_path_binding *binding;
1397 
1398 		list_for_each_entry(binding, &ppl->binding_list, node) {
1399 			struct avs_path_module *source, *sink;
1400 
1401 			source = binding->source;
1402 			sink = binding->sink;
1403 
1404 			ret = avs_path_bind_prepare(adev, binding);
1405 			if (ret < 0)
1406 				return ret;
1407 
1408 			ret = avs_ipc_bind(adev, source->module_id,
1409 					   source->instance_id, sink->module_id,
1410 					   sink->instance_id, binding->sink_pin,
1411 					   binding->source_pin);
1412 			if (ret) {
1413 				dev_err(adev->dev, "bind path failed: %d\n", ret);
1414 				return AVS_IPC_RET(ret);
1415 			}
1416 		}
1417 	}
1418 
1419 	return 0;
1420 }
1421 
1422 int avs_path_unbind(struct avs_path *path)
1423 {
1424 	struct avs_path_pipeline *ppl;
1425 	struct avs_dev *adev = path->owner;
1426 	int ret;
1427 
1428 	list_for_each_entry(ppl, &path->ppl_list, node) {
1429 		struct avs_path_binding *binding;
1430 
1431 		list_for_each_entry(binding, &ppl->binding_list, node) {
1432 			struct avs_path_module *source, *sink;
1433 
1434 			source = binding->source;
1435 			sink = binding->sink;
1436 
1437 			ret = avs_ipc_unbind(adev, source->module_id,
1438 					     source->instance_id, sink->module_id,
1439 					     sink->instance_id, binding->sink_pin,
1440 					     binding->source_pin);
1441 			if (ret) {
1442 				dev_err(adev->dev, "unbind path failed: %d\n", ret);
1443 				return AVS_IPC_RET(ret);
1444 			}
1445 		}
1446 	}
1447 
1448 	return 0;
1449 }
1450 
1451 int avs_path_reset(struct avs_path *path)
1452 {
1453 	struct avs_path_pipeline *ppl;
1454 	struct avs_dev *adev = path->owner;
1455 	int ret;
1456 
1457 	if (path->state == AVS_PPL_STATE_RESET)
1458 		return 0;
1459 
1460 	list_for_each_entry(ppl, &path->ppl_list, node) {
1461 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1462 						 AVS_PPL_STATE_RESET);
1463 		if (ret) {
1464 			dev_err(adev->dev, "reset path failed: %d\n", ret);
1465 			path->state = AVS_PPL_STATE_INVALID;
1466 			return AVS_IPC_RET(ret);
1467 		}
1468 	}
1469 
1470 	path->state = AVS_PPL_STATE_RESET;
1471 	return 0;
1472 }
1473 
1474 static int avs_condpath_pause(struct avs_dev *adev, struct avs_path *cpath)
1475 {
1476 	struct avs_path_pipeline *ppl;
1477 	int ret;
1478 
1479 	if (cpath->state == AVS_PPL_STATE_PAUSED)
1480 		return 0;
1481 
1482 	list_for_each_entry_reverse(ppl, &cpath->ppl_list, node) {
1483 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, AVS_PPL_STATE_PAUSED);
1484 		if (ret) {
1485 			dev_err(adev->dev, "pause cpath failed: %d\n", ret);
1486 			cpath->state = AVS_PPL_STATE_INVALID;
1487 			return AVS_IPC_RET(ret);
1488 		}
1489 	}
1490 
1491 	cpath->state = AVS_PPL_STATE_PAUSED;
1492 	return 0;
1493 }
1494 
1495 static void avs_condpaths_pause(struct avs_dev *adev, struct avs_path *path)
1496 {
1497 	struct avs_path *cpath;
1498 
1499 	mutex_lock(&adev->path_mutex);
1500 
1501 	/* If either source or sink stops, so do the attached conditional paths. */
1502 	list_for_each_entry(cpath, &path->source_list, source_node)
1503 		avs_condpath_pause(adev, cpath);
1504 	list_for_each_entry(cpath, &path->sink_list, sink_node)
1505 		avs_condpath_pause(adev, cpath);
1506 
1507 	mutex_unlock(&adev->path_mutex);
1508 }
1509 
1510 int avs_path_pause(struct avs_path *path)
1511 {
1512 	struct avs_path_pipeline *ppl;
1513 	struct avs_dev *adev = path->owner;
1514 	int ret;
1515 
1516 	if (path->state == AVS_PPL_STATE_PAUSED)
1517 		return 0;
1518 
1519 	avs_condpaths_pause(adev, path);
1520 
1521 	list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
1522 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1523 						 AVS_PPL_STATE_PAUSED);
1524 		if (ret) {
1525 			dev_err(adev->dev, "pause path failed: %d\n", ret);
1526 			path->state = AVS_PPL_STATE_INVALID;
1527 			return AVS_IPC_RET(ret);
1528 		}
1529 	}
1530 
1531 	path->state = AVS_PPL_STATE_PAUSED;
1532 	return 0;
1533 }
1534 
1535 static int avs_condpath_run(struct avs_dev *adev, struct avs_path *cpath, int trigger)
1536 {
1537 	struct avs_path_pipeline *ppl;
1538 	int ret;
1539 
1540 	if (cpath->state == AVS_PPL_STATE_RUNNING)
1541 		return 0;
1542 
1543 	list_for_each_entry(ppl, &cpath->ppl_list, node) {
1544 		if (ppl->template->cfg->trigger != trigger)
1545 			continue;
1546 
1547 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, AVS_PPL_STATE_RUNNING);
1548 		if (ret) {
1549 			dev_err(adev->dev, "run cpath failed: %d\n", ret);
1550 			cpath->state = AVS_PPL_STATE_INVALID;
1551 			return AVS_IPC_RET(ret);
1552 		}
1553 	}
1554 
1555 	cpath->state = AVS_PPL_STATE_RUNNING;
1556 	return 0;
1557 }
1558 
1559 static void avs_condpaths_run(struct avs_dev *adev, struct avs_path *path, int trigger)
1560 {
1561 	struct avs_path *cpath;
1562 
1563 	mutex_lock(&adev->path_mutex);
1564 
1565 	/* Run conditional paths only if source and sink are both running. */
1566 	list_for_each_entry(cpath, &path->source_list, source_node)
1567 		if (cpath->source->state == AVS_PPL_STATE_RUNNING &&
1568 		    cpath->sink->state == AVS_PPL_STATE_RUNNING)
1569 			avs_condpath_run(adev, cpath, trigger);
1570 
1571 	list_for_each_entry(cpath, &path->sink_list, sink_node)
1572 		if (cpath->source->state == AVS_PPL_STATE_RUNNING &&
1573 		    cpath->sink->state == AVS_PPL_STATE_RUNNING)
1574 			avs_condpath_run(adev, cpath, trigger);
1575 
1576 	mutex_unlock(&adev->path_mutex);
1577 }
1578 
1579 int avs_path_run(struct avs_path *path, int trigger)
1580 {
1581 	struct avs_path_pipeline *ppl;
1582 	struct avs_dev *adev = path->owner;
1583 	int ret;
1584 
1585 	if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO)
1586 		return 0;
1587 
1588 	list_for_each_entry(ppl, &path->ppl_list, node) {
1589 		if (ppl->template->cfg->trigger != trigger)
1590 			continue;
1591 
1592 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1593 						 AVS_PPL_STATE_RUNNING);
1594 		if (ret) {
1595 			dev_err(adev->dev, "run path failed: %d\n", ret);
1596 			path->state = AVS_PPL_STATE_INVALID;
1597 			return AVS_IPC_RET(ret);
1598 		}
1599 	}
1600 
1601 	path->state = AVS_PPL_STATE_RUNNING;
1602 
1603 	/* Granular pipeline triggering not intended for conditional paths. */
1604 	if (trigger == AVS_TPLG_TRIGGER_AUTO)
1605 		avs_condpaths_run(adev, path, trigger);
1606 
1607 	return 0;
1608 }
1609