1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright(c) 2021 Intel Corporation 4 // 5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com> 6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> 7 // 8 9 #include <linux/acpi.h> 10 #include <acpi/nhlt.h> 11 #include <sound/pcm_params.h> 12 #include <sound/soc.h> 13 #include "avs.h" 14 #include "control.h" 15 #include "path.h" 16 #include "topology.h" 17 18 /* Must be called with adev->comp_list_mutex held. */ 19 static struct avs_tplg * 20 avs_path_find_tplg(struct avs_dev *adev, const char *name) 21 { 22 struct avs_soc_component *acomp; 23 24 list_for_each_entry(acomp, &adev->comp_list, node) 25 if (!strcmp(acomp->tplg->name, name)) 26 return acomp->tplg; 27 return NULL; 28 } 29 30 static struct avs_path_module * 31 avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id) 32 { 33 struct avs_path_module *mod; 34 35 list_for_each_entry(mod, &ppl->mod_list, node) 36 if (mod->template->id == template_id) 37 return mod; 38 return NULL; 39 } 40 41 static struct avs_path_pipeline * 42 avs_path_find_pipeline(struct avs_path *path, u32 template_id) 43 { 44 struct avs_path_pipeline *ppl; 45 46 list_for_each_entry(ppl, &path->ppl_list, node) 47 if (ppl->template->id == template_id) 48 return ppl; 49 return NULL; 50 } 51 52 static struct avs_path * 53 avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id) 54 { 55 struct avs_tplg_path_template *pos, *template = NULL; 56 struct avs_tplg *tplg; 57 struct avs_path *path; 58 59 tplg = avs_path_find_tplg(adev, name); 60 if (!tplg) 61 return NULL; 62 63 list_for_each_entry(pos, &tplg->path_tmpl_list, node) { 64 if (pos->id == template_id) { 65 template = pos; 66 break; 67 } 68 } 69 if (!template) 70 return NULL; 71 72 spin_lock(&adev->path_list_lock); 73 /* Only one variant of given path template may be instantiated at a time. */ 74 list_for_each_entry(path, &adev->path_list, node) { 75 if (path->template->owner == template) { 76 spin_unlock(&adev->path_list_lock); 77 return path; 78 } 79 } 80 81 spin_unlock(&adev->path_list_lock); 82 return NULL; 83 } 84 85 static bool avs_test_hw_params(struct snd_pcm_hw_params *params, 86 struct avs_audio_format *fmt) 87 { 88 return (params_rate(params) == fmt->sampling_freq && 89 params_channels(params) == fmt->num_channels && 90 params_physical_width(params) == fmt->bit_depth && 91 snd_pcm_hw_params_bits(params) == fmt->valid_bit_depth); 92 } 93 94 static struct avs_tplg_path * 95 avs_path_find_variant(struct avs_dev *adev, 96 struct avs_tplg_path_template *template, 97 struct snd_pcm_hw_params *fe_params, 98 struct snd_pcm_hw_params *be_params) 99 { 100 struct avs_tplg_path *variant; 101 102 list_for_each_entry(variant, &template->path_list, node) { 103 dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n", 104 variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels, 105 variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth); 106 dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n", 107 variant->be_fmt->sampling_freq, variant->be_fmt->num_channels, 108 variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth); 109 110 if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) && 111 variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt)) 112 return variant; 113 } 114 115 return NULL; 116 } 117 118 static void avs_init_node_id(union avs_connector_node_id *node_id, 119 struct avs_tplg_modcfg_ext *te, u32 dma_id) 120 { 121 node_id->val = 0; 122 node_id->dma_type = te->copier.dma_type; 123 124 switch (node_id->dma_type) { 125 case AVS_DMA_DMIC_LINK_INPUT: 126 case AVS_DMA_I2S_LINK_OUTPUT: 127 case AVS_DMA_I2S_LINK_INPUT: 128 /* Gateway's virtual index is statically assigned in the topology. */ 129 node_id->vindex = te->copier.vindex.val; 130 break; 131 132 case AVS_DMA_HDA_HOST_OUTPUT: 133 case AVS_DMA_HDA_HOST_INPUT: 134 /* Gateway's virtual index is dynamically assigned with DMA ID */ 135 node_id->vindex = dma_id; 136 break; 137 138 case AVS_DMA_HDA_LINK_OUTPUT: 139 case AVS_DMA_HDA_LINK_INPUT: 140 node_id->vindex = te->copier.vindex.val | dma_id; 141 break; 142 143 default: 144 *node_id = INVALID_NODE_ID; 145 break; 146 } 147 } 148 149 /* Every BLOB contains at least gateway attributes. */ 150 static struct acpi_nhlt_config *default_blob = (struct acpi_nhlt_config *)&(u32[2]) {4}; 151 152 static struct acpi_nhlt_config * 153 avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t) 154 { 155 struct acpi_nhlt_format_config *fmtcfg; 156 struct avs_tplg_modcfg_ext *te; 157 struct avs_audio_format *fmt; 158 int link_type, dev_type; 159 int bus_id, dir; 160 161 te = t->cfg_ext; 162 163 switch (te->copier.dma_type) { 164 case AVS_DMA_I2S_LINK_OUTPUT: 165 link_type = ACPI_NHLT_LINKTYPE_SSP; 166 dev_type = ACPI_NHLT_DEVICETYPE_CODEC; 167 bus_id = te->copier.vindex.i2s.instance; 168 dir = SNDRV_PCM_STREAM_PLAYBACK; 169 fmt = te->copier.out_fmt; 170 break; 171 172 case AVS_DMA_I2S_LINK_INPUT: 173 link_type = ACPI_NHLT_LINKTYPE_SSP; 174 dev_type = ACPI_NHLT_DEVICETYPE_CODEC; 175 bus_id = te->copier.vindex.i2s.instance; 176 dir = SNDRV_PCM_STREAM_CAPTURE; 177 fmt = t->in_fmt; 178 break; 179 180 case AVS_DMA_DMIC_LINK_INPUT: 181 link_type = ACPI_NHLT_LINKTYPE_PDM; 182 dev_type = -1; /* ignored */ 183 bus_id = 0; 184 dir = SNDRV_PCM_STREAM_CAPTURE; 185 fmt = t->in_fmt; 186 break; 187 188 default: 189 return default_blob; 190 } 191 192 /* Override format selection if necessary. */ 193 if (te->copier.blob_fmt) 194 fmt = te->copier.blob_fmt; 195 196 fmtcfg = acpi_nhlt_find_fmtcfg(link_type, dev_type, dir, bus_id, 197 fmt->num_channels, fmt->sampling_freq, fmt->valid_bit_depth, 198 fmt->bit_depth); 199 if (!fmtcfg) { 200 dev_warn(adev->dev, "Endpoint format configuration not found.\n"); 201 return ERR_PTR(-ENOENT); 202 } 203 204 if (fmtcfg->config.capabilities_size < default_blob->capabilities_size) 205 return ERR_PTR(-ETOOSMALL); 206 /* The firmware expects the payload to be DWORD-aligned. */ 207 if (fmtcfg->config.capabilities_size % sizeof(u32)) 208 return ERR_PTR(-EINVAL); 209 210 return &fmtcfg->config; 211 } 212 213 static int avs_fill_gtw_config(struct avs_dev *adev, struct avs_copier_gtw_cfg *gtw, 214 struct avs_tplg_module *t, size_t *cfg_size) 215 { 216 struct acpi_nhlt_config *blob; 217 size_t gtw_size; 218 219 blob = avs_nhlt_config_or_default(adev, t); 220 if (IS_ERR(blob)) 221 return PTR_ERR(blob); 222 223 gtw_size = blob->capabilities_size; 224 if (*cfg_size + gtw_size > AVS_MAILBOX_SIZE) 225 return -E2BIG; 226 227 gtw->config_length = gtw_size / sizeof(u32); 228 memcpy(gtw->config.blob, blob->capabilities, blob->capabilities_size); 229 *cfg_size += gtw_size; 230 231 return 0; 232 } 233 234 static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod) 235 { 236 struct avs_tplg_module *t = mod->template; 237 struct avs_tplg_modcfg_ext *te; 238 struct avs_copier_cfg *cfg; 239 size_t cfg_size; 240 u32 dma_id; 241 int ret; 242 243 te = t->cfg_ext; 244 cfg = adev->modcfg_buf; 245 dma_id = mod->owner->owner->dma_id; 246 cfg_size = offsetof(struct avs_copier_cfg, gtw_cfg.config); 247 248 ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, &cfg_size); 249 if (ret) 250 return ret; 251 252 cfg->base.cpc = t->cfg_base->cpc; 253 cfg->base.ibs = t->cfg_base->ibs; 254 cfg->base.obs = t->cfg_base->obs; 255 cfg->base.is_pages = t->cfg_base->is_pages; 256 cfg->base.audio_fmt = *t->in_fmt; 257 cfg->out_fmt = *te->copier.out_fmt; 258 cfg->feature_mask = te->copier.feature_mask; 259 avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id); 260 cfg->gtw_cfg.dma_buffer_size = te->copier.dma_buffer_size; 261 mod->gtw_attrs = cfg->gtw_cfg.config.attrs; 262 263 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, 264 t->domain, cfg, cfg_size, &mod->instance_id); 265 return ret; 266 } 267 268 static int avs_whm_create(struct avs_dev *adev, struct avs_path_module *mod) 269 { 270 struct avs_tplg_module *t = mod->template; 271 struct avs_tplg_modcfg_ext *te; 272 struct avs_whm_cfg *cfg; 273 size_t cfg_size; 274 u32 dma_id; 275 int ret; 276 277 te = t->cfg_ext; 278 cfg = adev->modcfg_buf; 279 dma_id = mod->owner->owner->dma_id; 280 cfg_size = offsetof(struct avs_whm_cfg, gtw_cfg.config); 281 282 ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, &cfg_size); 283 if (ret) 284 return ret; 285 286 cfg->base.cpc = t->cfg_base->cpc; 287 cfg->base.ibs = t->cfg_base->ibs; 288 cfg->base.obs = t->cfg_base->obs; 289 cfg->base.is_pages = t->cfg_base->is_pages; 290 cfg->base.audio_fmt = *t->in_fmt; 291 cfg->ref_fmt = *te->whm.ref_fmt; 292 cfg->out_fmt = *te->whm.out_fmt; 293 cfg->wake_tick_period = te->whm.wake_tick_period; 294 avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id); 295 cfg->gtw_cfg.dma_buffer_size = te->whm.dma_buffer_size; 296 mod->gtw_attrs = cfg->gtw_cfg.config.attrs; 297 298 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, 299 t->domain, cfg, cfg_size, &mod->instance_id); 300 return ret; 301 } 302 303 static struct soc_mixer_control *avs_get_module_control(struct avs_path_module *mod, 304 const char *name) 305 { 306 struct avs_tplg_module *t = mod->template; 307 struct avs_tplg_path_template *path_tmpl; 308 struct snd_soc_dapm_widget *w; 309 int i; 310 311 path_tmpl = t->owner->owner->owner; 312 w = path_tmpl->w; 313 314 for (i = 0; i < w->num_kcontrols; i++) { 315 struct avs_control_data *ctl_data; 316 struct soc_mixer_control *mc; 317 318 mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value; 319 ctl_data = (struct avs_control_data *)mc->dobj.private; 320 if (ctl_data->id == t->ctl_id && strstr(w->kcontrols[i]->id.name, name)) 321 return mc; 322 } 323 324 return NULL; 325 } 326 327 int avs_peakvol_set_volume(struct avs_dev *adev, struct avs_path_module *mod, 328 struct soc_mixer_control *mc, long *input) 329 { 330 struct avs_volume_cfg vols[SND_SOC_TPLG_MAX_CHAN] = {{0}}; 331 struct avs_control_data *ctl_data; 332 struct avs_tplg_module *t; 333 int ret, i; 334 335 ctl_data = mc->dobj.private; 336 t = mod->template; 337 if (!input) 338 input = ctl_data->values; 339 340 if (mc->num_channels) { 341 for (i = 0; i < mc->num_channels; i++) { 342 vols[i].channel_id = i; 343 vols[i].target_volume = input[i]; 344 vols[i].curve_type = t->cfg_ext->peakvol.curve_type; 345 vols[i].curve_duration = t->cfg_ext->peakvol.curve_duration; 346 } 347 348 ret = avs_ipc_peakvol_set_volumes(adev, mod->module_id, mod->instance_id, vols, 349 mc->num_channels); 350 return AVS_IPC_RET(ret); 351 } 352 353 /* Target all channels if no individual selected. */ 354 vols[0].channel_id = AVS_ALL_CHANNELS_MASK; 355 vols[0].target_volume = input[0]; 356 vols[0].curve_type = t->cfg_ext->peakvol.curve_type; 357 vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration; 358 359 ret = avs_ipc_peakvol_set_volume(adev, mod->module_id, mod->instance_id, &vols[0]); 360 return AVS_IPC_RET(ret); 361 } 362 363 int avs_peakvol_set_mute(struct avs_dev *adev, struct avs_path_module *mod, 364 struct soc_mixer_control *mc, long *input) 365 { 366 struct avs_mute_cfg mutes[SND_SOC_TPLG_MAX_CHAN] = {{0}}; 367 struct avs_control_data *ctl_data; 368 struct avs_tplg_module *t; 369 int ret, i; 370 371 ctl_data = mc->dobj.private; 372 t = mod->template; 373 if (!input) 374 input = ctl_data->values; 375 376 if (mc->num_channels) { 377 for (i = 0; i < mc->num_channels; i++) { 378 mutes[i].channel_id = i; 379 mutes[i].mute = !input[i]; 380 mutes[i].curve_type = t->cfg_ext->peakvol.curve_type; 381 mutes[i].curve_duration = t->cfg_ext->peakvol.curve_duration; 382 } 383 384 ret = avs_ipc_peakvol_set_mutes(adev, mod->module_id, mod->instance_id, mutes, 385 mc->num_channels); 386 return AVS_IPC_RET(ret); 387 } 388 389 /* Target all channels if no individual selected. */ 390 mutes[0].channel_id = AVS_ALL_CHANNELS_MASK; 391 mutes[0].mute = !input[0]; 392 mutes[0].curve_type = t->cfg_ext->peakvol.curve_type; 393 mutes[0].curve_duration = t->cfg_ext->peakvol.curve_duration; 394 395 ret = avs_ipc_peakvol_set_mute(adev, mod->module_id, mod->instance_id, &mutes[0]); 396 return AVS_IPC_RET(ret); 397 } 398 399 static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod) 400 { 401 struct avs_tplg_module *t = mod->template; 402 struct soc_mixer_control *mc; 403 struct avs_peakvol_cfg *cfg; 404 size_t cfg_size; 405 int ret; 406 407 cfg_size = struct_size(cfg, vols, 1); 408 if (cfg_size > AVS_MAILBOX_SIZE) 409 return -EINVAL; 410 411 cfg = adev->modcfg_buf; 412 memset(cfg, 0, cfg_size); 413 cfg->base.cpc = t->cfg_base->cpc; 414 cfg->base.ibs = t->cfg_base->ibs; 415 cfg->base.obs = t->cfg_base->obs; 416 cfg->base.is_pages = t->cfg_base->is_pages; 417 cfg->base.audio_fmt = *t->in_fmt; 418 cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK; 419 cfg->vols[0].target_volume = S32_MAX; 420 cfg->vols[0].curve_type = t->cfg_ext->peakvol.curve_type; 421 cfg->vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration; 422 423 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, 424 t->domain, cfg, cfg_size, &mod->instance_id); 425 if (ret) 426 return ret; 427 428 /* Now configure both VOLUME and MUTE parameters. */ 429 mc = avs_get_module_control(mod, "Volume"); 430 if (mc) { 431 ret = avs_peakvol_set_volume(adev, mod, mc, NULL); 432 if (ret) 433 return ret; 434 } 435 436 mc = avs_get_module_control(mod, "Switch"); 437 if (mc) 438 return avs_peakvol_set_mute(adev, mod, mc, NULL); 439 return 0; 440 } 441 442 static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod) 443 { 444 struct avs_tplg_module *t = mod->template; 445 struct avs_updown_mixer_cfg cfg; 446 int i; 447 448 cfg.base.cpc = t->cfg_base->cpc; 449 cfg.base.ibs = t->cfg_base->ibs; 450 cfg.base.obs = t->cfg_base->obs; 451 cfg.base.is_pages = t->cfg_base->is_pages; 452 cfg.base.audio_fmt = *t->in_fmt; 453 cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config; 454 cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select; 455 for (i = 0; i < AVS_CHANNELS_MAX; i++) 456 cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i]; 457 cfg.channel_map = t->cfg_ext->updown_mix.channel_map; 458 459 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, 460 t->core_id, t->domain, &cfg, sizeof(cfg), 461 &mod->instance_id); 462 } 463 464 static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod) 465 { 466 struct avs_tplg_module *t = mod->template; 467 struct avs_src_cfg cfg; 468 469 cfg.base.cpc = t->cfg_base->cpc; 470 cfg.base.ibs = t->cfg_base->ibs; 471 cfg.base.obs = t->cfg_base->obs; 472 cfg.base.is_pages = t->cfg_base->is_pages; 473 cfg.base.audio_fmt = *t->in_fmt; 474 cfg.out_freq = t->cfg_ext->src.out_freq; 475 476 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, 477 t->core_id, t->domain, &cfg, sizeof(cfg), 478 &mod->instance_id); 479 } 480 481 static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod) 482 { 483 struct avs_tplg_module *t = mod->template; 484 struct avs_asrc_cfg cfg; 485 486 memset(&cfg, 0, sizeof(cfg)); 487 cfg.base.cpc = t->cfg_base->cpc; 488 cfg.base.ibs = t->cfg_base->ibs; 489 cfg.base.obs = t->cfg_base->obs; 490 cfg.base.is_pages = t->cfg_base->is_pages; 491 cfg.base.audio_fmt = *t->in_fmt; 492 cfg.out_freq = t->cfg_ext->asrc.out_freq; 493 cfg.mode = t->cfg_ext->asrc.mode; 494 cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer; 495 496 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, 497 t->core_id, t->domain, &cfg, sizeof(cfg), 498 &mod->instance_id); 499 } 500 501 static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod) 502 { 503 struct avs_tplg_module *t = mod->template; 504 struct avs_aec_cfg cfg; 505 506 cfg.base.cpc = t->cfg_base->cpc; 507 cfg.base.ibs = t->cfg_base->ibs; 508 cfg.base.obs = t->cfg_base->obs; 509 cfg.base.is_pages = t->cfg_base->is_pages; 510 cfg.base.audio_fmt = *t->in_fmt; 511 cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt; 512 cfg.out_fmt = *t->cfg_ext->aec.out_fmt; 513 cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode; 514 515 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, 516 t->core_id, t->domain, &cfg, sizeof(cfg), 517 &mod->instance_id); 518 } 519 520 static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod) 521 { 522 struct avs_tplg_module *t = mod->template; 523 struct avs_mux_cfg cfg; 524 525 cfg.base.cpc = t->cfg_base->cpc; 526 cfg.base.ibs = t->cfg_base->ibs; 527 cfg.base.obs = t->cfg_base->obs; 528 cfg.base.is_pages = t->cfg_base->is_pages; 529 cfg.base.audio_fmt = *t->in_fmt; 530 cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt; 531 cfg.out_fmt = *t->cfg_ext->mux.out_fmt; 532 533 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, 534 t->core_id, t->domain, &cfg, sizeof(cfg), 535 &mod->instance_id); 536 } 537 538 static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod) 539 { 540 struct avs_tplg_module *t = mod->template; 541 struct avs_wov_cfg cfg; 542 543 cfg.base.cpc = t->cfg_base->cpc; 544 cfg.base.ibs = t->cfg_base->ibs; 545 cfg.base.obs = t->cfg_base->obs; 546 cfg.base.is_pages = t->cfg_base->is_pages; 547 cfg.base.audio_fmt = *t->in_fmt; 548 cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode; 549 550 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, 551 t->core_id, t->domain, &cfg, sizeof(cfg), 552 &mod->instance_id); 553 } 554 555 static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod) 556 { 557 struct avs_tplg_module *t = mod->template; 558 struct avs_micsel_cfg cfg; 559 560 cfg.base.cpc = t->cfg_base->cpc; 561 cfg.base.ibs = t->cfg_base->ibs; 562 cfg.base.obs = t->cfg_base->obs; 563 cfg.base.is_pages = t->cfg_base->is_pages; 564 cfg.base.audio_fmt = *t->in_fmt; 565 cfg.out_fmt = *t->cfg_ext->micsel.out_fmt; 566 567 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, 568 t->core_id, t->domain, &cfg, sizeof(cfg), 569 &mod->instance_id); 570 } 571 572 static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod) 573 { 574 struct avs_tplg_module *t = mod->template; 575 struct avs_modcfg_base cfg; 576 577 cfg.cpc = t->cfg_base->cpc; 578 cfg.ibs = t->cfg_base->ibs; 579 cfg.obs = t->cfg_base->obs; 580 cfg.is_pages = t->cfg_base->is_pages; 581 cfg.audio_fmt = *t->in_fmt; 582 583 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, 584 t->core_id, t->domain, &cfg, sizeof(cfg), 585 &mod->instance_id); 586 } 587 588 static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod) 589 { 590 struct avs_tplg_module *t = mod->template; 591 struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext; 592 struct avs_modcfg_ext *cfg; 593 size_t cfg_size, num_pins; 594 int ret, i; 595 596 num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins; 597 cfg_size = struct_size(cfg, pin_fmts, num_pins); 598 599 if (cfg_size > AVS_MAILBOX_SIZE) 600 return -EINVAL; 601 602 cfg = adev->modcfg_buf; 603 memset(cfg, 0, cfg_size); 604 cfg->base.cpc = t->cfg_base->cpc; 605 cfg->base.ibs = t->cfg_base->ibs; 606 cfg->base.obs = t->cfg_base->obs; 607 cfg->base.is_pages = t->cfg_base->is_pages; 608 cfg->base.audio_fmt = *t->in_fmt; 609 cfg->num_input_pins = tcfg->generic.num_input_pins; 610 cfg->num_output_pins = tcfg->generic.num_output_pins; 611 612 /* configure pin formats */ 613 for (i = 0; i < num_pins; i++) { 614 struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i]; 615 struct avs_pin_format *pin = &cfg->pin_fmts[i]; 616 617 pin->pin_index = tpin->pin_index; 618 pin->iobs = tpin->iobs; 619 pin->audio_fmt = *tpin->fmt; 620 } 621 622 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, 623 t->core_id, t->domain, cfg, cfg_size, 624 &mod->instance_id); 625 return ret; 626 } 627 628 static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod) 629 { 630 dev_err(adev->dev, "Probe module can't be instantiated by topology"); 631 return -EINVAL; 632 } 633 634 struct avs_module_create { 635 guid_t *guid; 636 int (*create)(struct avs_dev *adev, struct avs_path_module *mod); 637 }; 638 639 static struct avs_module_create avs_module_create[] = { 640 { &AVS_MIXIN_MOD_UUID, avs_modbase_create }, 641 { &AVS_MIXOUT_MOD_UUID, avs_modbase_create }, 642 { &AVS_KPBUFF_MOD_UUID, avs_modbase_create }, 643 { &AVS_COPIER_MOD_UUID, avs_copier_create }, 644 { &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create }, 645 { &AVS_GAIN_MOD_UUID, avs_peakvol_create }, 646 { &AVS_MICSEL_MOD_UUID, avs_micsel_create }, 647 { &AVS_MUX_MOD_UUID, avs_mux_create }, 648 { &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create }, 649 { &AVS_SRCINTC_MOD_UUID, avs_src_create }, 650 { &AVS_AEC_MOD_UUID, avs_aec_create }, 651 { &AVS_ASRC_MOD_UUID, avs_asrc_create }, 652 { &AVS_INTELWOV_MOD_UUID, avs_wov_create }, 653 { &AVS_PROBE_MOD_UUID, avs_probe_create }, 654 { &AVS_WOVHOSTM_MOD_UUID, avs_whm_create }, 655 }; 656 657 static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod) 658 { 659 const guid_t *type = &mod->template->cfg_ext->type; 660 661 for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++) 662 if (guid_equal(type, avs_module_create[i].guid)) 663 return avs_module_create[i].create(adev, mod); 664 665 return avs_modext_create(adev, mod); 666 } 667 668 static int avs_path_module_send_init_configs(struct avs_dev *adev, struct avs_path_module *mod) 669 { 670 struct avs_soc_component *acomp; 671 672 acomp = to_avs_soc_component(mod->template->owner->owner->owner->owner->comp); 673 674 u32 num_ids = mod->template->num_config_ids; 675 u32 *ids = mod->template->config_ids; 676 677 for (int i = 0; i < num_ids; i++) { 678 struct avs_tplg_init_config *config = &acomp->tplg->init_configs[ids[i]]; 679 size_t len = config->length; 680 void *data = config->data; 681 u32 param = config->param; 682 int ret; 683 684 ret = avs_ipc_set_large_config(adev, mod->module_id, mod->instance_id, 685 param, data, len); 686 if (ret) { 687 dev_err(adev->dev, "send initial module config failed: %d\n", ret); 688 return AVS_IPC_RET(ret); 689 } 690 } 691 692 return 0; 693 } 694 695 static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod) 696 { 697 kfree(mod); 698 } 699 700 static struct avs_path_module * 701 avs_path_module_create(struct avs_dev *adev, 702 struct avs_path_pipeline *owner, 703 struct avs_tplg_module *template) 704 { 705 struct avs_path_module *mod; 706 int module_id, ret; 707 708 module_id = avs_get_module_id(adev, &template->cfg_ext->type); 709 if (module_id < 0) 710 return ERR_PTR(module_id); 711 712 mod = kzalloc(sizeof(*mod), GFP_KERNEL); 713 if (!mod) 714 return ERR_PTR(-ENOMEM); 715 716 mod->template = template; 717 mod->module_id = module_id; 718 mod->owner = owner; 719 INIT_LIST_HEAD(&mod->node); 720 721 ret = avs_path_module_type_create(adev, mod); 722 if (ret) { 723 dev_err(adev->dev, "module-type create failed: %d\n", ret); 724 kfree(mod); 725 return ERR_PTR(ret); 726 } 727 728 ret = avs_path_module_send_init_configs(adev, mod); 729 if (ret) { 730 kfree(mod); 731 return ERR_PTR(ret); 732 } 733 734 return mod; 735 } 736 737 static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding) 738 { 739 struct avs_path_module *this_mod, *target_mod; 740 struct avs_path_pipeline *target_ppl; 741 struct avs_path *target_path; 742 struct avs_tplg_binding *t; 743 744 t = binding->template; 745 this_mod = avs_path_find_module(binding->owner, 746 t->mod_id); 747 if (!this_mod) { 748 dev_err(adev->dev, "path mod %d not found\n", t->mod_id); 749 return -EINVAL; 750 } 751 752 /* update with target_tplg_name too */ 753 target_path = avs_path_find_path(adev, t->target_tplg_name, 754 t->target_path_tmpl_id); 755 if (!target_path) { 756 dev_err(adev->dev, "target path %s:%d not found\n", 757 t->target_tplg_name, t->target_path_tmpl_id); 758 return -EINVAL; 759 } 760 761 target_ppl = avs_path_find_pipeline(target_path, 762 t->target_ppl_id); 763 if (!target_ppl) { 764 dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id); 765 return -EINVAL; 766 } 767 768 target_mod = avs_path_find_module(target_ppl, t->target_mod_id); 769 if (!target_mod) { 770 dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id); 771 return -EINVAL; 772 } 773 774 if (t->is_sink) { 775 binding->sink = this_mod; 776 binding->sink_pin = t->mod_pin; 777 binding->source = target_mod; 778 binding->source_pin = t->target_mod_pin; 779 } else { 780 binding->sink = target_mod; 781 binding->sink_pin = t->target_mod_pin; 782 binding->source = this_mod; 783 binding->source_pin = t->mod_pin; 784 } 785 786 return 0; 787 } 788 789 static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding) 790 { 791 kfree(binding); 792 } 793 794 static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev, 795 struct avs_path_pipeline *owner, 796 struct avs_tplg_binding *t) 797 { 798 struct avs_path_binding *binding; 799 800 binding = kzalloc(sizeof(*binding), GFP_KERNEL); 801 if (!binding) 802 return ERR_PTR(-ENOMEM); 803 804 binding->template = t; 805 binding->owner = owner; 806 INIT_LIST_HEAD(&binding->node); 807 808 return binding; 809 } 810 811 static int avs_path_pipeline_arm(struct avs_dev *adev, 812 struct avs_path_pipeline *ppl) 813 { 814 struct avs_path_module *mod; 815 816 list_for_each_entry(mod, &ppl->mod_list, node) { 817 struct avs_path_module *source, *sink; 818 int ret; 819 820 /* 821 * Only one module (so it's implicitly last) or it is the last 822 * one, either way we don't have next module to bind it to. 823 */ 824 if (mod == list_last_entry(&ppl->mod_list, 825 struct avs_path_module, node)) 826 break; 827 828 /* bind current module to next module on list */ 829 source = mod; 830 sink = list_next_entry(mod, node); 831 832 ret = avs_ipc_bind(adev, source->module_id, source->instance_id, 833 sink->module_id, sink->instance_id, 0, 0); 834 if (ret) 835 return AVS_IPC_RET(ret); 836 } 837 838 return 0; 839 } 840 841 static void avs_path_pipeline_free(struct avs_dev *adev, 842 struct avs_path_pipeline *ppl) 843 { 844 struct avs_path_binding *binding, *bsave; 845 struct avs_path_module *mod, *save; 846 847 list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) { 848 list_del(&binding->node); 849 avs_path_binding_free(adev, binding); 850 } 851 852 avs_dsp_delete_pipeline(adev, ppl->instance_id); 853 854 /* Unload resources occupied by owned modules */ 855 list_for_each_entry_safe(mod, save, &ppl->mod_list, node) { 856 avs_dsp_delete_module(adev, mod->module_id, mod->instance_id, 857 mod->owner->instance_id, 858 mod->template->core_id); 859 avs_path_module_free(adev, mod); 860 } 861 862 list_del(&ppl->node); 863 kfree(ppl); 864 } 865 866 static struct avs_path_pipeline * 867 avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner, 868 struct avs_tplg_pipeline *template) 869 { 870 struct avs_path_pipeline *ppl; 871 struct avs_tplg_pplcfg *cfg = template->cfg; 872 struct avs_tplg_module *tmod; 873 int ret, i; 874 875 ppl = kzalloc(sizeof(*ppl), GFP_KERNEL); 876 if (!ppl) 877 return ERR_PTR(-ENOMEM); 878 879 ppl->template = template; 880 ppl->owner = owner; 881 INIT_LIST_HEAD(&ppl->binding_list); 882 INIT_LIST_HEAD(&ppl->mod_list); 883 INIT_LIST_HEAD(&ppl->node); 884 885 ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority, 886 cfg->lp, cfg->attributes, 887 &ppl->instance_id); 888 if (ret) { 889 dev_err(adev->dev, "error creating pipeline %d\n", ret); 890 kfree(ppl); 891 return ERR_PTR(ret); 892 } 893 894 list_for_each_entry(tmod, &template->mod_list, node) { 895 struct avs_path_module *mod; 896 897 mod = avs_path_module_create(adev, ppl, tmod); 898 if (IS_ERR(mod)) { 899 ret = PTR_ERR(mod); 900 dev_err(adev->dev, "error creating module %d\n", ret); 901 goto init_err; 902 } 903 904 list_add_tail(&mod->node, &ppl->mod_list); 905 } 906 907 for (i = 0; i < template->num_bindings; i++) { 908 struct avs_path_binding *binding; 909 910 binding = avs_path_binding_create(adev, ppl, template->bindings[i]); 911 if (IS_ERR(binding)) { 912 ret = PTR_ERR(binding); 913 dev_err(adev->dev, "error creating binding %d\n", ret); 914 goto init_err; 915 } 916 917 list_add_tail(&binding->node, &ppl->binding_list); 918 } 919 920 return ppl; 921 922 init_err: 923 avs_path_pipeline_free(adev, ppl); 924 return ERR_PTR(ret); 925 } 926 927 static int avs_path_init(struct avs_dev *adev, struct avs_path *path, 928 struct avs_tplg_path *template, u32 dma_id) 929 { 930 struct avs_tplg_pipeline *tppl; 931 932 path->owner = adev; 933 path->template = template; 934 path->dma_id = dma_id; 935 INIT_LIST_HEAD(&path->ppl_list); 936 INIT_LIST_HEAD(&path->node); 937 938 /* create all the pipelines */ 939 list_for_each_entry(tppl, &template->ppl_list, node) { 940 struct avs_path_pipeline *ppl; 941 942 ppl = avs_path_pipeline_create(adev, path, tppl); 943 if (IS_ERR(ppl)) 944 return PTR_ERR(ppl); 945 946 list_add_tail(&ppl->node, &path->ppl_list); 947 } 948 949 spin_lock(&adev->path_list_lock); 950 list_add_tail(&path->node, &adev->path_list); 951 spin_unlock(&adev->path_list_lock); 952 953 return 0; 954 } 955 956 static int avs_path_arm(struct avs_dev *adev, struct avs_path *path) 957 { 958 struct avs_path_pipeline *ppl; 959 struct avs_path_binding *binding; 960 int ret; 961 962 list_for_each_entry(ppl, &path->ppl_list, node) { 963 /* 964 * Arm all ppl bindings before binding internal modules 965 * as it costs no IPCs which isn't true for the latter. 966 */ 967 list_for_each_entry(binding, &ppl->binding_list, node) { 968 ret = avs_path_binding_arm(adev, binding); 969 if (ret < 0) 970 return ret; 971 } 972 973 ret = avs_path_pipeline_arm(adev, ppl); 974 if (ret < 0) 975 return ret; 976 } 977 978 return 0; 979 } 980 981 static void avs_path_free_unlocked(struct avs_path *path) 982 { 983 struct avs_path_pipeline *ppl, *save; 984 985 spin_lock(&path->owner->path_list_lock); 986 list_del(&path->node); 987 spin_unlock(&path->owner->path_list_lock); 988 989 list_for_each_entry_safe(ppl, save, &path->ppl_list, node) 990 avs_path_pipeline_free(path->owner, ppl); 991 992 kfree(path); 993 } 994 995 static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id, 996 struct avs_tplg_path *template) 997 { 998 struct avs_path *path; 999 int ret; 1000 1001 path = kzalloc(sizeof(*path), GFP_KERNEL); 1002 if (!path) 1003 return ERR_PTR(-ENOMEM); 1004 1005 ret = avs_path_init(adev, path, template, dma_id); 1006 if (ret < 0) 1007 goto err; 1008 1009 ret = avs_path_arm(adev, path); 1010 if (ret < 0) 1011 goto err; 1012 1013 path->state = AVS_PPL_STATE_INVALID; 1014 return path; 1015 err: 1016 avs_path_free_unlocked(path); 1017 return ERR_PTR(ret); 1018 } 1019 1020 void avs_path_free(struct avs_path *path) 1021 { 1022 struct avs_dev *adev = path->owner; 1023 1024 mutex_lock(&adev->path_mutex); 1025 avs_path_free_unlocked(path); 1026 mutex_unlock(&adev->path_mutex); 1027 } 1028 1029 struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id, 1030 struct avs_tplg_path_template *template, 1031 struct snd_pcm_hw_params *fe_params, 1032 struct snd_pcm_hw_params *be_params) 1033 { 1034 struct avs_tplg_path *variant; 1035 struct avs_path *path; 1036 1037 variant = avs_path_find_variant(adev, template, fe_params, be_params); 1038 if (!variant) { 1039 dev_err(adev->dev, "no matching variant found\n"); 1040 return ERR_PTR(-ENOENT); 1041 } 1042 1043 /* Serialize path and its components creation. */ 1044 mutex_lock(&adev->path_mutex); 1045 /* Satisfy needs of avs_path_find_tplg(). */ 1046 mutex_lock(&adev->comp_list_mutex); 1047 1048 path = avs_path_create_unlocked(adev, dma_id, variant); 1049 1050 mutex_unlock(&adev->comp_list_mutex); 1051 mutex_unlock(&adev->path_mutex); 1052 1053 return path; 1054 } 1055 1056 static int avs_path_bind_prepare(struct avs_dev *adev, 1057 struct avs_path_binding *binding) 1058 { 1059 const struct avs_audio_format *src_fmt, *sink_fmt; 1060 struct avs_tplg_module *tsource = binding->source->template; 1061 struct avs_path_module *source = binding->source; 1062 int ret; 1063 1064 /* 1065 * only copier modules about to be bound 1066 * to output pin other than 0 need preparation 1067 */ 1068 if (!binding->source_pin) 1069 return 0; 1070 if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID)) 1071 return 0; 1072 1073 src_fmt = tsource->in_fmt; 1074 sink_fmt = binding->sink->template->in_fmt; 1075 1076 ret = avs_ipc_copier_set_sink_format(adev, source->module_id, 1077 source->instance_id, binding->source_pin, 1078 src_fmt, sink_fmt); 1079 if (ret) { 1080 dev_err(adev->dev, "config copier failed: %d\n", ret); 1081 return AVS_IPC_RET(ret); 1082 } 1083 1084 return 0; 1085 } 1086 1087 int avs_path_bind(struct avs_path *path) 1088 { 1089 struct avs_path_pipeline *ppl; 1090 struct avs_dev *adev = path->owner; 1091 int ret; 1092 1093 list_for_each_entry(ppl, &path->ppl_list, node) { 1094 struct avs_path_binding *binding; 1095 1096 list_for_each_entry(binding, &ppl->binding_list, node) { 1097 struct avs_path_module *source, *sink; 1098 1099 source = binding->source; 1100 sink = binding->sink; 1101 1102 ret = avs_path_bind_prepare(adev, binding); 1103 if (ret < 0) 1104 return ret; 1105 1106 ret = avs_ipc_bind(adev, source->module_id, 1107 source->instance_id, sink->module_id, 1108 sink->instance_id, binding->sink_pin, 1109 binding->source_pin); 1110 if (ret) { 1111 dev_err(adev->dev, "bind path failed: %d\n", ret); 1112 return AVS_IPC_RET(ret); 1113 } 1114 } 1115 } 1116 1117 return 0; 1118 } 1119 1120 int avs_path_unbind(struct avs_path *path) 1121 { 1122 struct avs_path_pipeline *ppl; 1123 struct avs_dev *adev = path->owner; 1124 int ret; 1125 1126 list_for_each_entry(ppl, &path->ppl_list, node) { 1127 struct avs_path_binding *binding; 1128 1129 list_for_each_entry(binding, &ppl->binding_list, node) { 1130 struct avs_path_module *source, *sink; 1131 1132 source = binding->source; 1133 sink = binding->sink; 1134 1135 ret = avs_ipc_unbind(adev, source->module_id, 1136 source->instance_id, sink->module_id, 1137 sink->instance_id, binding->sink_pin, 1138 binding->source_pin); 1139 if (ret) { 1140 dev_err(adev->dev, "unbind path failed: %d\n", ret); 1141 return AVS_IPC_RET(ret); 1142 } 1143 } 1144 } 1145 1146 return 0; 1147 } 1148 1149 int avs_path_reset(struct avs_path *path) 1150 { 1151 struct avs_path_pipeline *ppl; 1152 struct avs_dev *adev = path->owner; 1153 int ret; 1154 1155 if (path->state == AVS_PPL_STATE_RESET) 1156 return 0; 1157 1158 list_for_each_entry(ppl, &path->ppl_list, node) { 1159 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, 1160 AVS_PPL_STATE_RESET); 1161 if (ret) { 1162 dev_err(adev->dev, "reset path failed: %d\n", ret); 1163 path->state = AVS_PPL_STATE_INVALID; 1164 return AVS_IPC_RET(ret); 1165 } 1166 } 1167 1168 path->state = AVS_PPL_STATE_RESET; 1169 return 0; 1170 } 1171 1172 int avs_path_pause(struct avs_path *path) 1173 { 1174 struct avs_path_pipeline *ppl; 1175 struct avs_dev *adev = path->owner; 1176 int ret; 1177 1178 if (path->state == AVS_PPL_STATE_PAUSED) 1179 return 0; 1180 1181 list_for_each_entry_reverse(ppl, &path->ppl_list, node) { 1182 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, 1183 AVS_PPL_STATE_PAUSED); 1184 if (ret) { 1185 dev_err(adev->dev, "pause path failed: %d\n", ret); 1186 path->state = AVS_PPL_STATE_INVALID; 1187 return AVS_IPC_RET(ret); 1188 } 1189 } 1190 1191 path->state = AVS_PPL_STATE_PAUSED; 1192 return 0; 1193 } 1194 1195 int avs_path_run(struct avs_path *path, int trigger) 1196 { 1197 struct avs_path_pipeline *ppl; 1198 struct avs_dev *adev = path->owner; 1199 int ret; 1200 1201 if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO) 1202 return 0; 1203 1204 list_for_each_entry(ppl, &path->ppl_list, node) { 1205 if (ppl->template->cfg->trigger != trigger) 1206 continue; 1207 1208 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, 1209 AVS_PPL_STATE_RUNNING); 1210 if (ret) { 1211 dev_err(adev->dev, "run path failed: %d\n", ret); 1212 path->state = AVS_PPL_STATE_INVALID; 1213 return AVS_IPC_RET(ret); 1214 } 1215 } 1216 1217 path->state = AVS_PPL_STATE_RUNNING; 1218 return 0; 1219 } 1220