1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // Copyright(c) 2023 Intel Corporation 3 4 /* 5 * Soundwire Intel ops for LunarLake 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/cleanup.h> 10 #include <linux/device.h> 11 #include <linux/soundwire/sdw_registers.h> 12 #include <linux/soundwire/sdw.h> 13 #include <linux/soundwire/sdw_intel.h> 14 #include <linux/string_choices.h> 15 #include <sound/hdaudio.h> 16 #include <sound/hda-mlink.h> 17 #include <sound/hda-sdw-bpt.h> 18 #include <sound/hda_register.h> 19 #include <sound/pcm_params.h> 20 #include "cadence_master.h" 21 #include "bus.h" 22 #include "intel.h" 23 24 static int sdw_slave_bpt_stream_add(struct sdw_slave *slave, struct sdw_stream_runtime *stream) 25 { 26 struct sdw_stream_config sconfig = {0}; 27 struct sdw_port_config pconfig = {0}; 28 int ret; 29 30 /* arbitrary configuration */ 31 sconfig.frame_rate = 16000; 32 sconfig.ch_count = 1; 33 sconfig.bps = 32; /* this is required for BPT/BRA */ 34 sconfig.direction = SDW_DATA_DIR_RX; 35 sconfig.type = SDW_STREAM_BPT; 36 37 pconfig.num = 0; 38 pconfig.ch_mask = BIT(0); 39 40 ret = sdw_stream_add_slave(slave, &sconfig, &pconfig, 1, stream); 41 if (ret) 42 dev_err(&slave->dev, "%s: failed: %d\n", __func__, ret); 43 44 return ret; 45 } 46 47 #define READ_PDI1_MIN_SIZE 12 48 49 static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave *slave, 50 struct sdw_bpt_msg *msg) 51 { 52 struct sdw_cdns *cdns = &sdw->cdns; 53 struct sdw_bus *bus = &cdns->bus; 54 struct sdw_master_prop *prop = &bus->prop; 55 struct sdw_stream_runtime *stream; 56 struct sdw_stream_config sconfig; 57 struct sdw_port_config *pconfig; 58 unsigned int pdi0_buf_size_pre_frame; 59 unsigned int pdi1_buf_size_pre_frame; 60 unsigned int pdi0_buffer_size_; 61 unsigned int pdi1_buffer_size_; 62 unsigned int pdi0_buffer_size; 63 unsigned int tx_dma_bandwidth; 64 unsigned int pdi1_buffer_size; 65 unsigned int rx_dma_bandwidth; 66 unsigned int fake_num_frames; 67 unsigned int data_per_frame; 68 unsigned int tx_total_bytes; 69 struct sdw_cdns_pdi *pdi0; 70 struct sdw_cdns_pdi *pdi1; 71 unsigned int rx_alignment; 72 unsigned int tx_alignment; 73 unsigned int num_frames_; 74 unsigned int num_frames; 75 unsigned int fake_size; 76 unsigned int tx_pad; 77 unsigned int rx_pad; 78 int command; 79 int ret1; 80 int ret; 81 int dir; 82 int len; 83 int i; 84 85 if (cdns->bus.bpt_stream) { 86 dev_err(cdns->dev, "%s: BPT stream already exists\n", __func__); 87 return -EAGAIN; 88 } 89 90 stream = sdw_alloc_stream("BPT", SDW_STREAM_BPT); 91 if (!stream) 92 return -ENOMEM; 93 94 cdns->bus.bpt_stream = stream; 95 96 ret = sdw_slave_bpt_stream_add(slave, stream); 97 if (ret < 0) 98 goto release_stream; 99 100 /* handle PDI0 first */ 101 dir = SDW_DATA_DIR_TX; 102 103 pdi0 = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, 1, dir, 0); 104 if (!pdi0) { 105 dev_err(cdns->dev, "%s: sdw_cdns_alloc_pdi0 failed\n", __func__); 106 ret = -EINVAL; 107 goto remove_slave; 108 } 109 110 sdw_cdns_config_stream(cdns, 1, dir, pdi0); 111 112 /* handle PDI1 */ 113 dir = SDW_DATA_DIR_RX; 114 115 pdi1 = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, 1, dir, 1); 116 if (!pdi1) { 117 dev_err(cdns->dev, "%s: sdw_cdns_alloc_pdi1 failed\n", __func__); 118 ret = -EINVAL; 119 goto remove_slave; 120 } 121 122 sdw_cdns_config_stream(cdns, 1, dir, pdi1); 123 124 /* 125 * the port config direction, number of channels and frame 126 * rate is totally arbitrary 127 */ 128 sconfig.direction = dir; 129 sconfig.ch_count = 1; 130 sconfig.frame_rate = 16000; 131 sconfig.type = SDW_STREAM_BPT; 132 sconfig.bps = 32; /* this is required for BPT/BRA */ 133 134 /* Port configuration */ 135 pconfig = kzalloc_objs(*pconfig, 2); 136 if (!pconfig) { 137 ret = -ENOMEM; 138 goto remove_slave; 139 } 140 141 for (i = 0; i < 2 /* num_pdi */; i++) { 142 pconfig[i].num = i; 143 pconfig[i].ch_mask = 1; 144 } 145 146 ret = sdw_stream_add_master(&cdns->bus, &sconfig, pconfig, 2, stream); 147 kfree(pconfig); 148 149 if (ret < 0) { 150 dev_err(cdns->dev, "add master to stream failed:%d\n", ret); 151 goto remove_slave; 152 } 153 154 ret = sdw_prepare_stream(cdns->bus.bpt_stream); 155 if (ret < 0) 156 goto remove_master; 157 158 command = (msg->flags & SDW_MSG_FLAG_WRITE) ? 0 : 1; 159 160 ret = sdw_cdns_bpt_find_bandwidth(command, cdns->bus.params.row, 161 cdns->bus.params.col, 162 prop->default_frame_rate, 163 &tx_dma_bandwidth, &rx_dma_bandwidth); 164 if (ret < 0) 165 goto deprepare_stream; 166 167 len = 0; 168 pdi0_buffer_size = 0; 169 pdi1_buffer_size = 0; 170 num_frames = 0; 171 /* Add up pdi buffer size and frame numbers of each BPT sections */ 172 for (i = 0; i < msg->sections; i++) { 173 ret = sdw_cdns_bpt_find_buffer_sizes(command, cdns->bus.params.row, 174 cdns->bus.params.col, 175 msg->sec[i].len, SDW_BPT_MSG_MAX_BYTES, 176 &data_per_frame, &pdi0_buffer_size_, 177 &pdi1_buffer_size_, &num_frames_); 178 if (ret < 0) 179 goto deprepare_stream; 180 181 len += msg->sec[i].len; 182 pdi0_buffer_size += pdi0_buffer_size_; 183 pdi1_buffer_size += pdi1_buffer_size_; 184 num_frames += num_frames_; 185 } 186 187 sdw->bpt_ctx.pdi0_buffer_size = pdi0_buffer_size; 188 sdw->bpt_ctx.pdi1_buffer_size = pdi1_buffer_size; 189 sdw->bpt_ctx.num_frames = num_frames; 190 sdw->bpt_ctx.data_per_frame = data_per_frame; 191 192 rx_alignment = hda_sdw_bpt_get_buf_size_alignment(rx_dma_bandwidth); 193 tx_alignment = hda_sdw_bpt_get_buf_size_alignment(tx_dma_bandwidth); 194 195 if (command) { /* read */ 196 /* Get buffer size of a full frame */ 197 ret = sdw_cdns_bpt_find_buffer_sizes(command, cdns->bus.params.row, 198 cdns->bus.params.col, 199 data_per_frame, SDW_BPT_MSG_MAX_BYTES, 200 &data_per_frame, &pdi0_buf_size_pre_frame, 201 &pdi1_buf_size_pre_frame, &fake_num_frames); 202 if (ret < 0) 203 goto deprepare_stream; 204 205 /* find fake pdi1 buffer size */ 206 rx_pad = rx_alignment - (pdi1_buffer_size % rx_alignment); 207 while (rx_pad <= READ_PDI1_MIN_SIZE) 208 rx_pad += rx_alignment; 209 210 pdi1_buffer_size += rx_pad; 211 /* It is fine if we request more than enough byte to read */ 212 fake_num_frames = DIV_ROUND_UP(rx_pad, pdi1_buf_size_pre_frame); 213 fake_size = fake_num_frames * data_per_frame; 214 215 /* find fake pdi0 buffer size */ 216 pdi0_buffer_size += (fake_num_frames * pdi0_buf_size_pre_frame); 217 tx_pad = tx_alignment - (pdi0_buffer_size % tx_alignment); 218 pdi0_buffer_size += tx_pad; 219 } else { /* write */ 220 /* 221 * For the write command, the rx data block is 4, and the rx buffer size of a frame 222 * is 8. So the rx buffer size (pdi0_buffer_size) is always a multiple of rx 223 * alignment. 224 */ 225 tx_pad = tx_alignment - (pdi0_buffer_size % tx_alignment); 226 pdi0_buffer_size += tx_pad; 227 } 228 229 dev_dbg(cdns->dev, "Message len %d transferred in %d frames (%d per frame)\n", 230 len, num_frames, data_per_frame); 231 dev_dbg(cdns->dev, "sizes pdi0 %d pdi1 %d tx_bandwidth %d rx_bandwidth %d\n", 232 pdi0_buffer_size, pdi1_buffer_size, tx_dma_bandwidth, rx_dma_bandwidth); 233 234 ret = hda_sdw_bpt_open(cdns->dev->parent, /* PCI device */ 235 sdw->instance, &sdw->bpt_ctx.bpt_tx_stream, 236 &sdw->bpt_ctx.dmab_tx_bdl, pdi0_buffer_size, tx_dma_bandwidth, 237 &sdw->bpt_ctx.bpt_rx_stream, &sdw->bpt_ctx.dmab_rx_bdl, 238 pdi1_buffer_size, rx_dma_bandwidth); 239 if (ret < 0) { 240 dev_err(cdns->dev, "%s: hda_sdw_bpt_open failed %d\n", __func__, ret); 241 goto deprepare_stream; 242 } 243 244 if (!command) { 245 ret = sdw_cdns_prepare_write_dma_buffer(msg->dev_num, msg->sec, msg->sections, 246 data_per_frame, 247 sdw->bpt_ctx.dmab_tx_bdl.area, 248 pdi0_buffer_size, &tx_total_bytes); 249 } else { 250 ret = sdw_cdns_prepare_read_dma_buffer(msg->dev_num, msg->sec, msg->sections, 251 data_per_frame, 252 sdw->bpt_ctx.dmab_tx_bdl.area, 253 pdi0_buffer_size, &tx_total_bytes, 254 fake_size); 255 } 256 257 if (!ret) 258 return 0; 259 260 dev_err(cdns->dev, "%s: sdw_prepare_%s_dma_buffer failed %d\n", 261 __func__, str_read_write(command), ret); 262 263 ret1 = hda_sdw_bpt_close(cdns->dev->parent, /* PCI device */ 264 sdw->bpt_ctx.bpt_tx_stream, &sdw->bpt_ctx.dmab_tx_bdl, 265 sdw->bpt_ctx.bpt_rx_stream, &sdw->bpt_ctx.dmab_rx_bdl); 266 if (ret1 < 0) 267 dev_err(cdns->dev, "%s: hda_sdw_bpt_close failed: ret %d\n", 268 __func__, ret1); 269 270 deprepare_stream: 271 sdw_deprepare_stream(cdns->bus.bpt_stream); 272 273 remove_master: 274 ret1 = sdw_stream_remove_master(&cdns->bus, cdns->bus.bpt_stream); 275 if (ret1 < 0) 276 dev_err(cdns->dev, "%s: remove master failed: %d\n", 277 __func__, ret1); 278 279 remove_slave: 280 ret1 = sdw_stream_remove_slave(slave, cdns->bus.bpt_stream); 281 if (ret1 < 0) 282 dev_err(cdns->dev, "%s: remove slave failed: %d\n", 283 __func__, ret1); 284 285 release_stream: 286 sdw_release_stream(cdns->bus.bpt_stream); 287 cdns->bus.bpt_stream = NULL; 288 289 return ret; 290 } 291 292 static void intel_ace2x_bpt_close_stream(struct sdw_intel *sdw, struct sdw_slave *slave, 293 struct sdw_bpt_msg *msg) 294 { 295 struct sdw_cdns *cdns = &sdw->cdns; 296 int ret; 297 298 ret = hda_sdw_bpt_close(cdns->dev->parent /* PCI device */, sdw->bpt_ctx.bpt_tx_stream, 299 &sdw->bpt_ctx.dmab_tx_bdl, sdw->bpt_ctx.bpt_rx_stream, 300 &sdw->bpt_ctx.dmab_rx_bdl); 301 if (ret < 0) 302 dev_err(cdns->dev, "%s: hda_sdw_bpt_close failed: ret %d\n", 303 __func__, ret); 304 305 ret = sdw_deprepare_stream(cdns->bus.bpt_stream); 306 if (ret < 0) 307 dev_err(cdns->dev, "%s: sdw_deprepare_stream failed: ret %d\n", 308 __func__, ret); 309 310 ret = sdw_stream_remove_master(&cdns->bus, cdns->bus.bpt_stream); 311 if (ret < 0) 312 dev_err(cdns->dev, "%s: remove master failed: %d\n", 313 __func__, ret); 314 315 ret = sdw_stream_remove_slave(slave, cdns->bus.bpt_stream); 316 if (ret < 0) 317 dev_err(cdns->dev, "%s: remove slave failed: %d\n", 318 __func__, ret); 319 320 cdns->bus.bpt_stream = NULL; 321 } 322 323 #define INTEL_BPT_MSG_BYTE_MIN 16 324 325 static int intel_ace2x_bpt_send_async(struct sdw_intel *sdw, struct sdw_slave *slave, 326 struct sdw_bpt_msg *msg) 327 { 328 struct sdw_cdns *cdns = &sdw->cdns; 329 int len = 0; 330 int ret; 331 int i; 332 333 for (i = 0; i < msg->sections; i++) 334 len += msg->sec[i].len; 335 336 if (len < INTEL_BPT_MSG_BYTE_MIN) { 337 dev_err(cdns->dev, "BPT message length %d is less than the minimum bytes %d\n", 338 len, INTEL_BPT_MSG_BYTE_MIN); 339 return -EINVAL; 340 } 341 342 dev_dbg(cdns->dev, "BPT Transfer start\n"); 343 344 ret = intel_ace2x_bpt_open_stream(sdw, slave, msg); 345 if (ret < 0) 346 return ret; 347 348 ret = hda_sdw_bpt_send_async(cdns->dev->parent, /* PCI device */ 349 sdw->bpt_ctx.bpt_tx_stream, sdw->bpt_ctx.bpt_rx_stream); 350 if (ret < 0) { 351 dev_err(cdns->dev, "%s: hda_sdw_bpt_send_async failed: %d\n", 352 __func__, ret); 353 354 intel_ace2x_bpt_close_stream(sdw, slave, msg); 355 356 return ret; 357 } 358 359 ret = sdw_enable_stream(cdns->bus.bpt_stream); 360 if (ret < 0) { 361 dev_err(cdns->dev, "%s: sdw_stream_enable failed: %d\n", 362 __func__, ret); 363 intel_ace2x_bpt_close_stream(sdw, slave, msg); 364 } 365 366 return ret; 367 } 368 369 static int intel_ace2x_bpt_wait(struct sdw_intel *sdw, struct sdw_slave *slave, 370 struct sdw_bpt_msg *msg) 371 { 372 struct sdw_cdns *cdns = &sdw->cdns; 373 int ret; 374 375 dev_dbg(cdns->dev, "BPT Transfer wait\n"); 376 377 ret = hda_sdw_bpt_wait(cdns->dev->parent, /* PCI device */ 378 sdw->bpt_ctx.bpt_tx_stream, sdw->bpt_ctx.bpt_rx_stream); 379 if (ret < 0) 380 dev_err(cdns->dev, "%s: hda_sdw_bpt_wait failed: %d\n", __func__, ret); 381 382 ret = sdw_disable_stream(cdns->bus.bpt_stream); 383 if (ret < 0) { 384 dev_err(cdns->dev, "%s: sdw_stream_enable failed: %d\n", 385 __func__, ret); 386 goto err; 387 } 388 389 if (msg->flags & SDW_MSG_FLAG_WRITE) { 390 ret = sdw_cdns_check_write_response(cdns->dev, sdw->bpt_ctx.dmab_rx_bdl.area, 391 sdw->bpt_ctx.pdi1_buffer_size, 392 sdw->bpt_ctx.num_frames); 393 if (ret < 0) 394 dev_err(cdns->dev, "%s: BPT Write failed %d\n", __func__, ret); 395 } else { 396 ret = sdw_cdns_check_read_response(cdns->dev, sdw->bpt_ctx.dmab_rx_bdl.area, 397 sdw->bpt_ctx.pdi1_buffer_size, 398 msg->sec, msg->sections, sdw->bpt_ctx.num_frames, 399 sdw->bpt_ctx.data_per_frame); 400 if (ret < 0) 401 dev_err(cdns->dev, "%s: BPT Read failed %d\n", __func__, ret); 402 } 403 404 err: 405 intel_ace2x_bpt_close_stream(sdw, slave, msg); 406 407 return ret; 408 } 409 410 /* 411 * shim vendor-specific (vs) ops 412 */ 413 414 static void intel_shim_vs_init(struct sdw_intel *sdw) 415 { 416 void __iomem *shim_vs = sdw->link_res->shim_vs; 417 struct sdw_bus *bus = &sdw->cdns.bus; 418 struct sdw_intel_prop *intel_prop; 419 u16 clde; 420 u16 doaise2; 421 u16 dodse2; 422 u16 clds; 423 u16 clss; 424 u16 doaise; 425 u16 doais; 426 u16 dodse; 427 u16 dods; 428 u16 act; 429 430 intel_prop = bus->vendor_specific_prop; 431 clde = intel_prop->clde; 432 doaise2 = intel_prop->doaise2; 433 dodse2 = intel_prop->dodse2; 434 clds = intel_prop->clds; 435 clss = intel_prop->clss; 436 doaise = intel_prop->doaise; 437 doais = intel_prop->doais; 438 dodse = intel_prop->dodse; 439 dods = intel_prop->dods; 440 441 act = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL); 442 u16p_replace_bits(&act, clde, SDW_SHIM3_INTEL_VS_ACTMCTL_CLDE); 443 u16p_replace_bits(&act, doaise2, SDW_SHIM3_INTEL_VS_ACTMCTL_DOAISE2); 444 u16p_replace_bits(&act, dodse2, SDW_SHIM3_INTEL_VS_ACTMCTL_DODSE2); 445 u16p_replace_bits(&act, clds, SDW_SHIM3_INTEL_VS_ACTMCTL_CLDS); 446 u16p_replace_bits(&act, clss, SDW_SHIM3_INTEL_VS_ACTMCTL_CLSS); 447 u16p_replace_bits(&act, doaise, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE); 448 u16p_replace_bits(&act, doais, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS); 449 u16p_replace_bits(&act, dodse, SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE); 450 u16p_replace_bits(&act, dods, SDW_SHIM2_INTEL_VS_ACTMCTL_DODS); 451 act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE; 452 intel_writew(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL, act); 453 usleep_range(10, 15); 454 } 455 456 static void intel_shim_vs_set_clock_source(struct sdw_intel *sdw, u32 source) 457 { 458 void __iomem *shim_vs = sdw->link_res->shim_vs; 459 u32 val; 460 461 val = intel_readl(shim_vs, SDW_SHIM2_INTEL_VS_LVSCTL); 462 463 u32p_replace_bits(&val, source, SDW_SHIM2_INTEL_VS_LVSCTL_MLCS); 464 465 intel_writel(shim_vs, SDW_SHIM2_INTEL_VS_LVSCTL, val); 466 467 dev_dbg(sdw->cdns.dev, "clock source %d LVSCTL %#x\n", source, val); 468 } 469 470 static int intel_shim_check_wake(struct sdw_intel *sdw) 471 { 472 /* 473 * We follow the HDaudio example and resume unconditionally 474 * without checking the WAKESTS bit for that specific link 475 */ 476 477 return 1; 478 } 479 480 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) 481 { 482 u16 lsdiid = 0; 483 u16 wake_en; 484 u16 wake_sts; 485 int ret; 486 487 mutex_lock(sdw->link_res->shim_lock); 488 489 ret = hdac_bus_eml_sdw_get_lsdiid_unlocked(sdw->link_res->hbus, sdw->instance, &lsdiid); 490 if (ret < 0) 491 goto unlock; 492 493 wake_en = snd_hdac_chip_readw(sdw->link_res->hbus, WAKEEN); 494 495 if (wake_enable) { 496 /* Enable the wakeup */ 497 wake_en |= lsdiid; 498 499 snd_hdac_chip_writew(sdw->link_res->hbus, WAKEEN, wake_en); 500 } else { 501 /* Disable the wake up interrupt */ 502 wake_en &= ~lsdiid; 503 snd_hdac_chip_writew(sdw->link_res->hbus, WAKEEN, wake_en); 504 505 /* Clear wake status (W1C) */ 506 wake_sts = snd_hdac_chip_readw(sdw->link_res->hbus, STATESTS); 507 wake_sts |= lsdiid; 508 snd_hdac_chip_writew(sdw->link_res->hbus, STATESTS, wake_sts); 509 } 510 unlock: 511 mutex_unlock(sdw->link_res->shim_lock); 512 } 513 514 static int intel_link_power_up(struct sdw_intel *sdw) 515 { 516 struct sdw_bus *bus = &sdw->cdns.bus; 517 struct sdw_master_prop *prop = &bus->prop; 518 u32 *shim_mask = sdw->link_res->shim_mask; 519 unsigned int link_id = sdw->instance; 520 u32 clock_source; 521 u32 syncprd; 522 int ret; 523 524 if (prop->mclk_freq % 6000000) { 525 if (prop->mclk_freq % 2400000) { 526 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24_576; 527 clock_source = SDW_SHIM2_MLCS_CARDINAL_CLK; 528 } else { 529 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4; 530 clock_source = SDW_SHIM2_MLCS_XTAL_CLK; 531 } 532 } else { 533 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_96; 534 clock_source = SDW_SHIM2_MLCS_AUDIO_PLL_CLK; 535 } 536 537 mutex_lock(sdw->link_res->shim_lock); 538 539 ret = hdac_bus_eml_sdw_power_up_unlocked(sdw->link_res->hbus, link_id); 540 if (ret < 0) { 541 dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_power_up failed: %d\n", 542 __func__, ret); 543 goto out; 544 } 545 546 intel_shim_vs_set_clock_source(sdw, clock_source); 547 548 if (!*shim_mask) { 549 /* we first need to program the SyncPRD/CPU registers */ 550 dev_dbg(sdw->cdns.dev, "first link up, programming SYNCPRD\n"); 551 552 ret = hdac_bus_eml_sdw_set_syncprd_unlocked(sdw->link_res->hbus, syncprd); 553 if (ret < 0) { 554 dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_set_syncprd failed: %d\n", 555 __func__, ret); 556 goto out; 557 } 558 559 /* SYNCPU will change once link is active */ 560 ret = hdac_bus_eml_sdw_wait_syncpu_unlocked(sdw->link_res->hbus); 561 if (ret < 0) { 562 dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_wait_syncpu failed: %d\n", 563 __func__, ret); 564 goto out; 565 } 566 567 hdac_bus_eml_enable_interrupt_unlocked(sdw->link_res->hbus, true, 568 AZX_REG_ML_LEPTR_ID_SDW, true); 569 } 570 571 *shim_mask |= BIT(link_id); 572 573 sdw->cdns.link_up = true; 574 575 intel_shim_vs_init(sdw); 576 577 out: 578 mutex_unlock(sdw->link_res->shim_lock); 579 580 return ret; 581 } 582 583 static int intel_link_power_down(struct sdw_intel *sdw) 584 { 585 u32 *shim_mask = sdw->link_res->shim_mask; 586 unsigned int link_id = sdw->instance; 587 int ret; 588 589 mutex_lock(sdw->link_res->shim_lock); 590 591 sdw->cdns.link_up = false; 592 593 *shim_mask &= ~BIT(link_id); 594 595 if (!*shim_mask) 596 hdac_bus_eml_enable_interrupt_unlocked(sdw->link_res->hbus, true, 597 AZX_REG_ML_LEPTR_ID_SDW, false); 598 599 ret = hdac_bus_eml_sdw_power_down_unlocked(sdw->link_res->hbus, link_id); 600 if (ret < 0) { 601 dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_power_down failed: %d\n", 602 __func__, ret); 603 604 /* 605 * we leave the sdw->cdns.link_up flag as false since we've disabled 606 * the link at this point and cannot handle interrupts any longer. 607 */ 608 } 609 610 mutex_unlock(sdw->link_res->shim_lock); 611 612 return ret; 613 } 614 615 static void intel_sync_arm(struct sdw_intel *sdw) 616 { 617 unsigned int link_id = sdw->instance; 618 619 mutex_lock(sdw->link_res->shim_lock); 620 621 hdac_bus_eml_sdw_sync_arm_unlocked(sdw->link_res->hbus, link_id); 622 623 mutex_unlock(sdw->link_res->shim_lock); 624 } 625 626 static int intel_sync_go_unlocked(struct sdw_intel *sdw) 627 { 628 int ret; 629 630 ret = hdac_bus_eml_sdw_sync_go_unlocked(sdw->link_res->hbus); 631 if (ret < 0) 632 dev_err(sdw->cdns.dev, "%s: SyncGO clear failed: %d\n", __func__, ret); 633 634 return ret; 635 } 636 637 static int intel_sync_go(struct sdw_intel *sdw) 638 { 639 int ret; 640 641 mutex_lock(sdw->link_res->shim_lock); 642 643 ret = intel_sync_go_unlocked(sdw); 644 645 mutex_unlock(sdw->link_res->shim_lock); 646 647 return ret; 648 } 649 650 static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw) 651 { 652 return hdac_bus_eml_sdw_check_cmdsync_unlocked(sdw->link_res->hbus); 653 } 654 655 /* DAI callbacks */ 656 static int intel_params_stream(struct sdw_intel *sdw, 657 struct snd_pcm_substream *substream, 658 struct snd_soc_dai *dai, 659 struct snd_pcm_hw_params *hw_params, 660 int link_id, int alh_stream_id) 661 { 662 struct sdw_intel_link_res *res = sdw->link_res; 663 struct sdw_intel_stream_params_data params_data; 664 665 params_data.substream = substream; 666 params_data.dai = dai; 667 params_data.hw_params = hw_params; 668 params_data.link_id = link_id; 669 params_data.alh_stream_id = alh_stream_id; 670 671 if (res->ops && res->ops->params_stream && res->dev) 672 return res->ops->params_stream(res->dev, 673 ¶ms_data); 674 return -EIO; 675 } 676 677 static int intel_free_stream(struct sdw_intel *sdw, 678 struct snd_pcm_substream *substream, 679 struct snd_soc_dai *dai, 680 int link_id) 681 682 { 683 struct sdw_intel_link_res *res = sdw->link_res; 684 struct sdw_intel_stream_free_data free_data; 685 686 free_data.substream = substream; 687 free_data.dai = dai; 688 free_data.link_id = link_id; 689 690 if (res->ops && res->ops->free_stream && res->dev) 691 return res->ops->free_stream(res->dev, 692 &free_data); 693 694 return 0; 695 } 696 697 /* 698 * DAI operations 699 */ 700 static int intel_hw_params(struct snd_pcm_substream *substream, 701 struct snd_pcm_hw_params *params, 702 struct snd_soc_dai *dai) 703 { 704 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 705 struct sdw_intel *sdw = cdns_to_intel(cdns); 706 struct sdw_cdns_dai_runtime *dai_runtime; 707 struct sdw_cdns_pdi *pdi; 708 struct sdw_stream_config sconfig; 709 int ch, dir; 710 int ret; 711 712 dai_runtime = cdns->dai_runtime_array[dai->id]; 713 if (!dai_runtime) 714 return -EIO; 715 716 ch = params_channels(params); 717 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 718 dir = SDW_DATA_DIR_RX; 719 else 720 dir = SDW_DATA_DIR_TX; 721 722 pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id); 723 if (!pdi) 724 return -EINVAL; 725 726 /* use same definitions for alh_id as previous generations */ 727 pdi->intel_alh_id = (sdw->instance * 16) + pdi->num + 3; 728 if (pdi->num >= 2) 729 pdi->intel_alh_id += 2; 730 731 /* the SHIM will be configured in the callback functions */ 732 733 sdw_cdns_config_stream(cdns, ch, dir, pdi); 734 735 /* store pdi and state, may be needed in prepare step */ 736 dai_runtime->paused = false; 737 dai_runtime->suspended = false; 738 dai_runtime->pdi = pdi; 739 740 /* Inform DSP about PDI stream number */ 741 ret = intel_params_stream(sdw, substream, dai, params, 742 sdw->instance, 743 pdi->intel_alh_id); 744 if (ret) 745 return ret; 746 747 sconfig.direction = dir; 748 sconfig.ch_count = ch; 749 sconfig.frame_rate = params_rate(params); 750 sconfig.type = dai_runtime->stream_type; 751 752 sconfig.bps = snd_pcm_format_width(params_format(params)); 753 754 /* Port configuration */ 755 struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig); 756 if (!pconfig) 757 return -ENOMEM; 758 759 pconfig->num = pdi->num; 760 pconfig->ch_mask = (1 << ch) - 1; 761 762 ret = sdw_stream_add_master(&cdns->bus, &sconfig, 763 pconfig, 1, dai_runtime->stream); 764 if (ret) 765 dev_err(cdns->dev, "add master to stream failed:%d\n", ret); 766 767 return ret; 768 } 769 770 static int intel_prepare(struct snd_pcm_substream *substream, 771 struct snd_soc_dai *dai) 772 { 773 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 774 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 775 struct sdw_intel *sdw = cdns_to_intel(cdns); 776 struct sdw_cdns_dai_runtime *dai_runtime; 777 struct snd_pcm_hw_params *hw_params; 778 int ch, dir; 779 780 dai_runtime = cdns->dai_runtime_array[dai->id]; 781 if (!dai_runtime) { 782 dev_err(dai->dev, "failed to get dai runtime in %s\n", 783 __func__); 784 return -EIO; 785 } 786 787 hw_params = &rtd->dpcm[substream->stream].hw_params; 788 if (dai_runtime->suspended) { 789 dai_runtime->suspended = false; 790 791 /* 792 * .prepare() is called after system resume, where we 793 * need to reinitialize the SHIM/ALH/Cadence IP. 794 * .prepare() is also called to deal with underflows, 795 * but in those cases we cannot touch ALH/SHIM 796 * registers 797 */ 798 799 /* configure stream */ 800 ch = params_channels(hw_params); 801 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 802 dir = SDW_DATA_DIR_RX; 803 else 804 dir = SDW_DATA_DIR_TX; 805 806 /* the SHIM will be configured in the callback functions */ 807 808 sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi); 809 } 810 811 /* Inform DSP about PDI stream number */ 812 return intel_params_stream(sdw, substream, dai, hw_params, sdw->instance, 813 dai_runtime->pdi->intel_alh_id); 814 } 815 816 static int 817 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) 818 { 819 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 820 struct sdw_intel *sdw = cdns_to_intel(cdns); 821 struct sdw_cdns_dai_runtime *dai_runtime; 822 int ret; 823 824 dai_runtime = cdns->dai_runtime_array[dai->id]; 825 if (!dai_runtime) 826 return -EIO; 827 828 /* 829 * The sdw stream state will transition to RELEASED when stream-> 830 * master_list is empty. So the stream state will transition to 831 * DEPREPARED for the first cpu-dai and to RELEASED for the last 832 * cpu-dai. 833 */ 834 ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream); 835 if (ret < 0) { 836 dev_err(dai->dev, "remove master from stream %s failed: %d\n", 837 dai_runtime->stream->name, ret); 838 return ret; 839 } 840 841 ret = intel_free_stream(sdw, substream, dai, sdw->instance); 842 if (ret < 0) { 843 dev_err(dai->dev, "intel_free_stream: failed %d\n", ret); 844 return ret; 845 } 846 847 dai_runtime->pdi = NULL; 848 849 return 0; 850 } 851 852 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai, 853 void *stream, int direction) 854 { 855 return cdns_set_sdw_stream(dai, stream, direction); 856 } 857 858 static void *intel_get_sdw_stream(struct snd_soc_dai *dai, 859 int direction) 860 { 861 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 862 struct sdw_cdns_dai_runtime *dai_runtime; 863 864 dai_runtime = cdns->dai_runtime_array[dai->id]; 865 if (!dai_runtime) 866 return ERR_PTR(-EINVAL); 867 868 return dai_runtime->stream; 869 } 870 871 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) 872 { 873 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 874 struct sdw_intel *sdw = cdns_to_intel(cdns); 875 struct sdw_intel_link_res *res = sdw->link_res; 876 struct sdw_cdns_dai_runtime *dai_runtime; 877 int ret = 0; 878 879 /* 880 * The .trigger callback is used to program HDaudio DMA and send required IPC to audio 881 * firmware. 882 */ 883 if (res->ops && res->ops->trigger) { 884 ret = res->ops->trigger(substream, cmd, dai); 885 if (ret < 0) 886 return ret; 887 } 888 889 dai_runtime = cdns->dai_runtime_array[dai->id]; 890 if (!dai_runtime) { 891 dev_err(dai->dev, "failed to get dai runtime in %s\n", 892 __func__); 893 return -EIO; 894 } 895 896 switch (cmd) { 897 case SNDRV_PCM_TRIGGER_SUSPEND: 898 899 /* 900 * The .prepare callback is used to deal with xruns and resume operations. 901 * In the case of xruns, the DMAs and SHIM registers cannot be touched, 902 * but for resume operations the DMAs and SHIM registers need to be initialized. 903 * the .trigger callback is used to track the suspend case only. 904 */ 905 906 dai_runtime->suspended = true; 907 908 break; 909 910 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 911 dai_runtime->paused = true; 912 break; 913 case SNDRV_PCM_TRIGGER_STOP: 914 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 915 dai_runtime->paused = false; 916 break; 917 default: 918 break; 919 } 920 921 return ret; 922 } 923 924 static const struct snd_soc_dai_ops intel_pcm_dai_ops = { 925 .hw_params = intel_hw_params, 926 .prepare = intel_prepare, 927 .hw_free = intel_hw_free, 928 .trigger = intel_trigger, 929 .set_stream = intel_pcm_set_sdw_stream, 930 .get_stream = intel_get_sdw_stream, 931 }; 932 933 static const struct snd_soc_component_driver dai_component = { 934 .name = "soundwire", 935 }; 936 937 /* 938 * PDI routines 939 */ 940 static void intel_pdi_init(struct sdw_intel *sdw, 941 struct sdw_cdns_stream_config *config) 942 { 943 void __iomem *shim = sdw->link_res->shim; 944 int pcm_cap; 945 946 /* PCM Stream Capability */ 947 pcm_cap = intel_readw(shim, SDW_SHIM2_PCMSCAP); 948 949 config->pcm_bd = FIELD_GET(SDW_SHIM2_PCMSCAP_BSS, pcm_cap); 950 config->pcm_in = FIELD_GET(SDW_SHIM2_PCMSCAP_ISS, pcm_cap); 951 config->pcm_out = FIELD_GET(SDW_SHIM2_PCMSCAP_ISS, pcm_cap); 952 953 dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n", 954 config->pcm_bd, config->pcm_in, config->pcm_out); 955 } 956 957 static int 958 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num) 959 { 960 void __iomem *shim = sdw->link_res->shim; 961 962 /* zero based values for channel count in register */ 963 return intel_readw(shim, SDW_SHIM2_PCMSYCHC(pdi_num)) + 1; 964 } 965 966 static void intel_pdi_get_ch_update(struct sdw_intel *sdw, 967 struct sdw_cdns_pdi *pdi, 968 unsigned int num_pdi, 969 unsigned int *num_ch) 970 { 971 int ch_count = 0; 972 int i; 973 974 for (i = 0; i < num_pdi; i++) { 975 pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num); 976 ch_count += pdi->ch_count; 977 pdi++; 978 } 979 980 *num_ch = ch_count; 981 } 982 983 static void intel_pdi_stream_ch_update(struct sdw_intel *sdw, 984 struct sdw_cdns_streams *stream) 985 { 986 intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd, 987 &stream->num_ch_bd); 988 989 intel_pdi_get_ch_update(sdw, stream->in, stream->num_in, 990 &stream->num_ch_in); 991 992 intel_pdi_get_ch_update(sdw, stream->out, stream->num_out, 993 &stream->num_ch_out); 994 } 995 996 static int intel_create_dai(struct sdw_cdns *cdns, 997 struct snd_soc_dai_driver *dais, 998 enum intel_pdi_type type, 999 u32 num, u32 off, u32 max_ch) 1000 { 1001 int i; 1002 1003 if (!num) 1004 return 0; 1005 1006 for (i = off; i < (off + num); i++) { 1007 dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, 1008 "SDW%d Pin%d", 1009 cdns->instance, i); 1010 if (!dais[i].name) 1011 return -ENOMEM; 1012 1013 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) { 1014 dais[i].playback.channels_min = 1; 1015 dais[i].playback.channels_max = max_ch; 1016 } 1017 1018 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) { 1019 dais[i].capture.channels_min = 1; 1020 dais[i].capture.channels_max = max_ch; 1021 } 1022 1023 dais[i].ops = &intel_pcm_dai_ops; 1024 } 1025 1026 return 0; 1027 } 1028 1029 static int intel_register_dai(struct sdw_intel *sdw) 1030 { 1031 struct sdw_cdns_dai_runtime **dai_runtime_array; 1032 struct sdw_cdns_stream_config config; 1033 struct sdw_cdns *cdns = &sdw->cdns; 1034 struct sdw_cdns_streams *stream; 1035 struct snd_soc_dai_driver *dais; 1036 int num_dai; 1037 int ret; 1038 int off = 0; 1039 1040 /* Read the PDI config and initialize cadence PDI */ 1041 intel_pdi_init(sdw, &config); 1042 ret = sdw_cdns_pdi_init(cdns, config); 1043 if (ret) 1044 return ret; 1045 1046 intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm); 1047 1048 /* DAIs are created based on total number of PDIs supported */ 1049 num_dai = cdns->pcm.num_pdi; 1050 1051 dai_runtime_array = devm_kcalloc(cdns->dev, num_dai, 1052 sizeof(struct sdw_cdns_dai_runtime *), 1053 GFP_KERNEL); 1054 if (!dai_runtime_array) 1055 return -ENOMEM; 1056 cdns->dai_runtime_array = dai_runtime_array; 1057 1058 dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL); 1059 if (!dais) 1060 return -ENOMEM; 1061 1062 /* Create PCM DAIs */ 1063 stream = &cdns->pcm; 1064 1065 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in, 1066 off, stream->num_ch_in); 1067 if (ret) 1068 return ret; 1069 1070 off += cdns->pcm.num_in; 1071 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out, 1072 off, stream->num_ch_out); 1073 if (ret) 1074 return ret; 1075 1076 off += cdns->pcm.num_out; 1077 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd, 1078 off, stream->num_ch_bd); 1079 if (ret) 1080 return ret; 1081 1082 return devm_snd_soc_register_component(cdns->dev, &dai_component, 1083 dais, num_dai); 1084 } 1085 1086 static void intel_program_sdi(struct sdw_intel *sdw, int dev_num) 1087 { 1088 int ret; 1089 1090 ret = hdac_bus_eml_sdw_set_lsdiid(sdw->link_res->hbus, sdw->instance, dev_num); 1091 if (ret < 0) 1092 dev_err(sdw->cdns.dev, "%s: could not set lsdiid for link %d %d\n", 1093 __func__, sdw->instance, dev_num); 1094 } 1095 1096 static int intel_get_link_count(struct sdw_intel *sdw) 1097 { 1098 int ret; 1099 1100 ret = hdac_bus_eml_get_count(sdw->link_res->hbus, true, AZX_REG_ML_LEPTR_ID_SDW); 1101 if (!ret) { 1102 dev_err(sdw->cdns.dev, "%s: could not retrieve link count\n", __func__); 1103 return -ENODEV; 1104 } 1105 1106 if (ret > SDW_INTEL_MAX_LINKS) { 1107 dev_err(sdw->cdns.dev, "%s: link count %d exceed max %d\n", __func__, ret, SDW_INTEL_MAX_LINKS); 1108 return -EINVAL; 1109 } 1110 1111 return ret; 1112 } 1113 1114 const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops = { 1115 .debugfs_init = intel_ace2x_debugfs_init, 1116 .debugfs_exit = intel_ace2x_debugfs_exit, 1117 1118 .get_link_count = intel_get_link_count, 1119 1120 .register_dai = intel_register_dai, 1121 1122 .check_clock_stop = intel_check_clock_stop, 1123 .start_bus = intel_start_bus, 1124 .start_bus_after_reset = intel_start_bus_after_reset, 1125 .start_bus_after_clock_stop = intel_start_bus_after_clock_stop, 1126 .stop_bus = intel_stop_bus, 1127 1128 .link_power_up = intel_link_power_up, 1129 .link_power_down = intel_link_power_down, 1130 1131 .shim_check_wake = intel_shim_check_wake, 1132 .shim_wake = intel_shim_wake, 1133 1134 .pre_bank_switch = intel_pre_bank_switch, 1135 .post_bank_switch = intel_post_bank_switch, 1136 1137 .sync_arm = intel_sync_arm, 1138 .sync_go_unlocked = intel_sync_go_unlocked, 1139 .sync_go = intel_sync_go, 1140 .sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked, 1141 1142 .program_sdi = intel_program_sdi, 1143 1144 .bpt_send_async = intel_ace2x_bpt_send_async, 1145 .bpt_wait = intel_ace2x_bpt_wait, 1146 }; 1147 EXPORT_SYMBOL_NS(sdw_intel_lnl_hw_ops, "SOUNDWIRE_INTEL"); 1148 1149 MODULE_IMPORT_NS("SND_SOC_SOF_HDA_MLINK"); 1150 MODULE_IMPORT_NS("SND_SOC_SOF_INTEL_HDA_SDW_BPT"); 1151