1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // Copyright(c) 2015-17 Intel Corporation. 3 4 /* 5 * Soundwire Intel Master Driver 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/debugfs.h> 10 #include <linux/delay.h> 11 #include <linux/io.h> 12 #include <sound/pcm_params.h> 13 #include <linux/pm_runtime.h> 14 #include <sound/soc.h> 15 #include <linux/soundwire/sdw_registers.h> 16 #include <linux/soundwire/sdw.h> 17 #include <linux/soundwire/sdw_intel.h> 18 #include "cadence_master.h" 19 #include "bus.h" 20 #include "intel.h" 21 22 static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target) 23 { 24 int timeout = 10; 25 u32 reg_read; 26 27 do { 28 reg_read = readl(base + offset); 29 if ((reg_read & mask) == target) 30 return 0; 31 32 timeout--; 33 usleep_range(50, 100); 34 } while (timeout != 0); 35 36 return -EAGAIN; 37 } 38 39 static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask) 40 { 41 writel(value, base + offset); 42 return intel_wait_bit(base, offset, mask, 0); 43 } 44 45 static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask) 46 { 47 writel(value, base + offset); 48 return intel_wait_bit(base, offset, mask, mask); 49 } 50 51 /* 52 * debugfs 53 */ 54 #ifdef CONFIG_DEBUG_FS 55 56 #define RD_BUF (2 * PAGE_SIZE) 57 58 static ssize_t intel_sprintf(void __iomem *mem, bool l, 59 char *buf, size_t pos, unsigned int reg) 60 { 61 int value; 62 63 if (l) 64 value = intel_readl(mem, reg); 65 else 66 value = intel_readw(mem, reg); 67 68 return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value); 69 } 70 71 static int intel_reg_show(struct seq_file *s_file, void *data) 72 { 73 struct sdw_intel *sdw = s_file->private; 74 void __iomem *s = sdw->link_res->shim; 75 void __iomem *a = sdw->link_res->alh; 76 char *buf; 77 ssize_t ret; 78 int i, j; 79 unsigned int links, reg; 80 81 buf = kzalloc(RD_BUF, GFP_KERNEL); 82 if (!buf) 83 return -ENOMEM; 84 85 links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK; 86 87 ret = scnprintf(buf, RD_BUF, "Register Value\n"); 88 ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n"); 89 90 for (i = 0; i < links; i++) { 91 reg = SDW_SHIM_LCAP + i * 4; 92 ret += intel_sprintf(s, true, buf, ret, reg); 93 } 94 95 for (i = 0; i < links; i++) { 96 ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i); 97 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i)); 98 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i)); 99 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i)); 100 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i)); 101 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i)); 102 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i)); 103 104 ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n"); 105 106 /* 107 * the value 10 is the number of PDIs. We will need a 108 * cleanup to remove hard-coded Intel configurations 109 * from cadence_master.c 110 */ 111 for (j = 0; j < 10; j++) { 112 ret += intel_sprintf(s, false, buf, ret, 113 SDW_SHIM_PCMSYCHM(i, j)); 114 ret += intel_sprintf(s, false, buf, ret, 115 SDW_SHIM_PCMSYCHC(i, j)); 116 } 117 ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n"); 118 119 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i)); 120 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i)); 121 } 122 123 ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n"); 124 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN); 125 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS); 126 127 ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n"); 128 for (i = 0; i < SDW_ALH_NUM_STREAMS; i++) 129 ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i)); 130 131 seq_printf(s_file, "%s", buf); 132 kfree(buf); 133 134 return 0; 135 } 136 DEFINE_SHOW_ATTRIBUTE(intel_reg); 137 138 static int intel_set_m_datamode(void *data, u64 value) 139 { 140 struct sdw_intel *sdw = data; 141 struct sdw_bus *bus = &sdw->cdns.bus; 142 143 if (value > SDW_PORT_DATA_MODE_STATIC_1) 144 return -EINVAL; 145 146 /* Userspace changed the hardware state behind the kernel's back */ 147 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 148 149 bus->params.m_data_mode = value; 150 151 return 0; 152 } 153 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL, 154 intel_set_m_datamode, "%llu\n"); 155 156 static int intel_set_s_datamode(void *data, u64 value) 157 { 158 struct sdw_intel *sdw = data; 159 struct sdw_bus *bus = &sdw->cdns.bus; 160 161 if (value > SDW_PORT_DATA_MODE_STATIC_1) 162 return -EINVAL; 163 164 /* Userspace changed the hardware state behind the kernel's back */ 165 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 166 167 bus->params.s_data_mode = value; 168 169 return 0; 170 } 171 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL, 172 intel_set_s_datamode, "%llu\n"); 173 174 static void intel_debugfs_init(struct sdw_intel *sdw) 175 { 176 struct dentry *root = sdw->cdns.bus.debugfs; 177 178 if (!root) 179 return; 180 181 sdw->debugfs = debugfs_create_dir("intel-sdw", root); 182 183 debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw, 184 &intel_reg_fops); 185 186 debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw, 187 &intel_set_m_datamode_fops); 188 189 debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw, 190 &intel_set_s_datamode_fops); 191 192 sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs); 193 } 194 195 static void intel_debugfs_exit(struct sdw_intel *sdw) 196 { 197 debugfs_remove_recursive(sdw->debugfs); 198 } 199 #else 200 static void intel_debugfs_init(struct sdw_intel *sdw) {} 201 static void intel_debugfs_exit(struct sdw_intel *sdw) {} 202 #endif /* CONFIG_DEBUG_FS */ 203 204 /* 205 * shim ops 206 */ 207 /* this needs to be called with shim_lock */ 208 static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw) 209 { 210 void __iomem *shim = sdw->link_res->shim; 211 unsigned int link_id = sdw->instance; 212 u16 ioctl; 213 214 /* Switch to MIP from Glue logic */ 215 ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id)); 216 217 ioctl &= ~(SDW_SHIM_IOCTL_DOE); 218 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 219 usleep_range(10, 15); 220 221 ioctl &= ~(SDW_SHIM_IOCTL_DO); 222 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 223 usleep_range(10, 15); 224 225 ioctl |= (SDW_SHIM_IOCTL_MIF); 226 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 227 usleep_range(10, 15); 228 229 ioctl &= ~(SDW_SHIM_IOCTL_BKE); 230 ioctl &= ~(SDW_SHIM_IOCTL_COE); 231 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 232 usleep_range(10, 15); 233 234 /* at this point Master IP has full control of the I/Os */ 235 } 236 237 /* this needs to be called with shim_lock */ 238 static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw) 239 { 240 unsigned int link_id = sdw->instance; 241 void __iomem *shim = sdw->link_res->shim; 242 u16 ioctl; 243 244 /* Glue logic */ 245 ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id)); 246 ioctl |= SDW_SHIM_IOCTL_BKE; 247 ioctl |= SDW_SHIM_IOCTL_COE; 248 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 249 usleep_range(10, 15); 250 251 ioctl &= ~(SDW_SHIM_IOCTL_MIF); 252 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 253 usleep_range(10, 15); 254 255 /* at this point Integration Glue has full control of the I/Os */ 256 } 257 258 /* this needs to be called with shim_lock */ 259 static void intel_shim_init(struct sdw_intel *sdw) 260 { 261 void __iomem *shim = sdw->link_res->shim; 262 unsigned int link_id = sdw->instance; 263 u16 ioctl = 0, act; 264 265 /* Initialize Shim */ 266 ioctl |= SDW_SHIM_IOCTL_BKE; 267 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 268 usleep_range(10, 15); 269 270 ioctl |= SDW_SHIM_IOCTL_WPDD; 271 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 272 usleep_range(10, 15); 273 274 ioctl |= SDW_SHIM_IOCTL_DO; 275 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 276 usleep_range(10, 15); 277 278 ioctl |= SDW_SHIM_IOCTL_DOE; 279 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 280 usleep_range(10, 15); 281 282 intel_shim_glue_to_master_ip(sdw); 283 284 act = intel_readw(shim, SDW_SHIM_CTMCTL(link_id)); 285 u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS); 286 act |= SDW_SHIM_CTMCTL_DACTQE; 287 act |= SDW_SHIM_CTMCTL_DODS; 288 intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act); 289 usleep_range(10, 15); 290 } 291 292 static int intel_shim_check_wake(struct sdw_intel *sdw) 293 { 294 void __iomem *shim; 295 u16 wake_sts; 296 297 shim = sdw->link_res->shim; 298 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); 299 300 return wake_sts & BIT(sdw->instance); 301 } 302 303 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) 304 { 305 void __iomem *shim = sdw->link_res->shim; 306 unsigned int link_id = sdw->instance; 307 u16 wake_en, wake_sts; 308 309 mutex_lock(sdw->link_res->shim_lock); 310 wake_en = intel_readw(shim, SDW_SHIM_WAKEEN); 311 312 if (wake_enable) { 313 /* Enable the wakeup */ 314 wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id); 315 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en); 316 } else { 317 /* Disable the wake up interrupt */ 318 wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id); 319 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en); 320 321 /* Clear wake status */ 322 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); 323 wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id); 324 intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts); 325 } 326 mutex_unlock(sdw->link_res->shim_lock); 327 } 328 329 static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw) 330 { 331 void __iomem *shim = sdw->link_res->shim; 332 int sync_reg; 333 334 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 335 return !!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK); 336 } 337 338 static int intel_link_power_up(struct sdw_intel *sdw) 339 { 340 unsigned int link_id = sdw->instance; 341 void __iomem *shim = sdw->link_res->shim; 342 u32 *shim_mask = sdw->link_res->shim_mask; 343 struct sdw_bus *bus = &sdw->cdns.bus; 344 struct sdw_master_prop *prop = &bus->prop; 345 u32 spa_mask, cpa_mask; 346 u32 link_control; 347 int ret = 0; 348 u32 clock_source; 349 u32 syncprd; 350 u32 sync_reg; 351 bool lcap_mlcs; 352 353 mutex_lock(sdw->link_res->shim_lock); 354 355 /* 356 * The hardware relies on an internal counter, typically 4kHz, 357 * to generate the SoundWire SSP - which defines a 'safe' 358 * synchronization point between commands and audio transport 359 * and allows for multi link synchronization. The SYNCPRD value 360 * is only dependent on the oscillator clock provided to 361 * the IP, so adjust based on _DSD properties reported in DSDT 362 * tables. The values reported are based on either 24MHz 363 * (CNL/CML) or 38.4 MHz (ICL/TGL+). On MeteorLake additional 364 * frequencies are available with the MLCS clock source selection. 365 */ 366 lcap_mlcs = intel_readl(shim, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_MLCS_MASK; 367 368 if (prop->mclk_freq % 6000000) { 369 if (prop->mclk_freq % 2400000) { 370 if (lcap_mlcs) { 371 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24_576; 372 clock_source = SDW_SHIM_MLCS_CARDINAL_CLK; 373 } else { 374 dev_err(sdw->cdns.dev, "%s: invalid clock configuration, mclk %d lcap_mlcs %d\n", 375 __func__, prop->mclk_freq, lcap_mlcs); 376 ret = -EINVAL; 377 goto out; 378 } 379 } else { 380 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4; 381 clock_source = SDW_SHIM_MLCS_XTAL_CLK; 382 } 383 } else { 384 if (lcap_mlcs) { 385 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_96; 386 clock_source = SDW_SHIM_MLCS_AUDIO_PLL_CLK; 387 } else { 388 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24; 389 clock_source = SDW_SHIM_MLCS_XTAL_CLK; 390 } 391 } 392 393 if (!*shim_mask) { 394 dev_dbg(sdw->cdns.dev, "powering up all links\n"); 395 396 /* we first need to program the SyncPRD/CPU registers */ 397 dev_dbg(sdw->cdns.dev, 398 "first link up, programming SYNCPRD\n"); 399 400 /* set SyncPRD period */ 401 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 402 u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD); 403 404 /* Set SyncCPU bit */ 405 sync_reg |= SDW_SHIM_SYNC_SYNCCPU; 406 intel_writel(shim, SDW_SHIM_SYNC, sync_reg); 407 408 /* Link power up sequence */ 409 link_control = intel_readl(shim, SDW_SHIM_LCTL); 410 411 /* only power-up enabled links */ 412 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask); 413 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask); 414 415 link_control |= spa_mask; 416 417 ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask); 418 if (ret < 0) { 419 dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret); 420 goto out; 421 } 422 423 /* SyncCPU will change once link is active */ 424 ret = intel_wait_bit(shim, SDW_SHIM_SYNC, 425 SDW_SHIM_SYNC_SYNCCPU, 0); 426 if (ret < 0) { 427 dev_err(sdw->cdns.dev, 428 "Failed to set SHIM_SYNC: %d\n", ret); 429 goto out; 430 } 431 432 /* update link clock if needed */ 433 if (lcap_mlcs) { 434 link_control = intel_readl(shim, SDW_SHIM_LCTL); 435 u32p_replace_bits(&link_control, clock_source, SDW_SHIM_LCTL_MLCS_MASK); 436 intel_writel(shim, SDW_SHIM_LCTL, link_control); 437 } 438 } 439 440 *shim_mask |= BIT(link_id); 441 442 sdw->cdns.link_up = true; 443 444 intel_shim_init(sdw); 445 446 out: 447 mutex_unlock(sdw->link_res->shim_lock); 448 449 return ret; 450 } 451 452 static int intel_link_power_down(struct sdw_intel *sdw) 453 { 454 u32 link_control, spa_mask, cpa_mask; 455 unsigned int link_id = sdw->instance; 456 void __iomem *shim = sdw->link_res->shim; 457 u32 *shim_mask = sdw->link_res->shim_mask; 458 int ret = 0; 459 460 mutex_lock(sdw->link_res->shim_lock); 461 462 if (!(*shim_mask & BIT(link_id))) 463 dev_err(sdw->cdns.dev, 464 "%s: Unbalanced power-up/down calls\n", __func__); 465 466 sdw->cdns.link_up = false; 467 468 intel_shim_master_ip_to_glue(sdw); 469 470 *shim_mask &= ~BIT(link_id); 471 472 if (!*shim_mask) { 473 474 dev_dbg(sdw->cdns.dev, "powering down all links\n"); 475 476 /* Link power down sequence */ 477 link_control = intel_readl(shim, SDW_SHIM_LCTL); 478 479 /* only power-down enabled links */ 480 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask); 481 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask); 482 483 link_control &= spa_mask; 484 485 ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask); 486 if (ret < 0) { 487 dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__); 488 489 /* 490 * we leave the sdw->cdns.link_up flag as false since we've disabled 491 * the link at this point and cannot handle interrupts any longer. 492 */ 493 } 494 } 495 496 mutex_unlock(sdw->link_res->shim_lock); 497 498 return ret; 499 } 500 501 static void intel_shim_sync_arm(struct sdw_intel *sdw) 502 { 503 void __iomem *shim = sdw->link_res->shim; 504 u32 sync_reg; 505 506 mutex_lock(sdw->link_res->shim_lock); 507 508 /* update SYNC register */ 509 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 510 sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance); 511 intel_writel(shim, SDW_SHIM_SYNC, sync_reg); 512 513 mutex_unlock(sdw->link_res->shim_lock); 514 } 515 516 static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw) 517 { 518 void __iomem *shim = sdw->link_res->shim; 519 u32 sync_reg; 520 521 /* Read SYNC register */ 522 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 523 524 /* 525 * Set SyncGO bit to synchronously trigger a bank switch for 526 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all 527 * the Masters. 528 */ 529 sync_reg |= SDW_SHIM_SYNC_SYNCGO; 530 531 intel_writel(shim, SDW_SHIM_SYNC, sync_reg); 532 533 return 0; 534 } 535 536 static int intel_shim_sync_go(struct sdw_intel *sdw) 537 { 538 int ret; 539 540 mutex_lock(sdw->link_res->shim_lock); 541 542 ret = intel_shim_sync_go_unlocked(sdw); 543 544 mutex_unlock(sdw->link_res->shim_lock); 545 546 return ret; 547 } 548 549 /* 550 * PDI routines 551 */ 552 static void intel_pdi_init(struct sdw_intel *sdw, 553 struct sdw_cdns_stream_config *config) 554 { 555 void __iomem *shim = sdw->link_res->shim; 556 unsigned int link_id = sdw->instance; 557 int pcm_cap; 558 559 /* PCM Stream Capability */ 560 pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id)); 561 562 config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap); 563 config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap); 564 config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap); 565 566 dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n", 567 config->pcm_bd, config->pcm_in, config->pcm_out); 568 } 569 570 static int 571 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num) 572 { 573 void __iomem *shim = sdw->link_res->shim; 574 unsigned int link_id = sdw->instance; 575 int count; 576 577 count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num)); 578 579 /* 580 * WORKAROUND: on all existing Intel controllers, pdi 581 * number 2 reports channel count as 1 even though it 582 * supports 8 channels. Performing hardcoding for pdi 583 * number 2. 584 */ 585 if (pdi_num == 2) 586 count = 7; 587 588 /* zero based values for channel count in register */ 589 count++; 590 591 return count; 592 } 593 594 static int intel_pdi_get_ch_update(struct sdw_intel *sdw, 595 struct sdw_cdns_pdi *pdi, 596 unsigned int num_pdi, 597 unsigned int *num_ch) 598 { 599 int i, ch_count = 0; 600 601 for (i = 0; i < num_pdi; i++) { 602 pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num); 603 ch_count += pdi->ch_count; 604 pdi++; 605 } 606 607 *num_ch = ch_count; 608 return 0; 609 } 610 611 static int intel_pdi_stream_ch_update(struct sdw_intel *sdw, 612 struct sdw_cdns_streams *stream) 613 { 614 intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd, 615 &stream->num_ch_bd); 616 617 intel_pdi_get_ch_update(sdw, stream->in, stream->num_in, 618 &stream->num_ch_in); 619 620 intel_pdi_get_ch_update(sdw, stream->out, stream->num_out, 621 &stream->num_ch_out); 622 623 return 0; 624 } 625 626 static void 627 intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) 628 { 629 void __iomem *shim = sdw->link_res->shim; 630 unsigned int link_id = sdw->instance; 631 int pdi_conf = 0; 632 633 /* the Bulk and PCM streams are not contiguous */ 634 pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; 635 if (pdi->num >= 2) 636 pdi->intel_alh_id += 2; 637 638 /* 639 * Program stream parameters to stream SHIM register 640 * This is applicable for PCM stream only. 641 */ 642 if (pdi->type != SDW_STREAM_PCM) 643 return; 644 645 if (pdi->dir == SDW_DATA_DIR_RX) 646 pdi_conf |= SDW_SHIM_PCMSYCM_DIR; 647 else 648 pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR); 649 650 u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM); 651 u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN); 652 u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN); 653 654 intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf); 655 } 656 657 static void 658 intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) 659 { 660 void __iomem *alh = sdw->link_res->alh; 661 unsigned int link_id = sdw->instance; 662 unsigned int conf; 663 664 /* the Bulk and PCM streams are not contiguous */ 665 pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; 666 if (pdi->num >= 2) 667 pdi->intel_alh_id += 2; 668 669 /* Program Stream config ALH register */ 670 conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id)); 671 672 u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT); 673 u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN); 674 675 intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf); 676 } 677 678 static int intel_params_stream(struct sdw_intel *sdw, 679 struct snd_pcm_substream *substream, 680 struct snd_soc_dai *dai, 681 struct snd_pcm_hw_params *hw_params, 682 int link_id, int alh_stream_id) 683 { 684 struct sdw_intel_link_res *res = sdw->link_res; 685 struct sdw_intel_stream_params_data params_data; 686 687 params_data.substream = substream; 688 params_data.dai = dai; 689 params_data.hw_params = hw_params; 690 params_data.link_id = link_id; 691 params_data.alh_stream_id = alh_stream_id; 692 693 if (res->ops && res->ops->params_stream && res->dev) 694 return res->ops->params_stream(res->dev, 695 ¶ms_data); 696 return -EIO; 697 } 698 699 /* 700 * DAI routines 701 */ 702 703 static int intel_free_stream(struct sdw_intel *sdw, 704 struct snd_pcm_substream *substream, 705 struct snd_soc_dai *dai, 706 int link_id) 707 { 708 struct sdw_intel_link_res *res = sdw->link_res; 709 struct sdw_intel_stream_free_data free_data; 710 711 free_data.substream = substream; 712 free_data.dai = dai; 713 free_data.link_id = link_id; 714 715 if (res->ops && res->ops->free_stream && res->dev) 716 return res->ops->free_stream(res->dev, &free_data); 717 718 return 0; 719 } 720 721 static int intel_hw_params(struct snd_pcm_substream *substream, 722 struct snd_pcm_hw_params *params, 723 struct snd_soc_dai *dai) 724 { 725 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 726 struct sdw_intel *sdw = cdns_to_intel(cdns); 727 struct sdw_cdns_dai_runtime *dai_runtime; 728 struct sdw_cdns_pdi *pdi; 729 struct sdw_stream_config sconfig; 730 struct sdw_port_config *pconfig; 731 int ch, dir; 732 int ret; 733 734 dai_runtime = cdns->dai_runtime_array[dai->id]; 735 if (!dai_runtime) 736 return -EIO; 737 738 ch = params_channels(params); 739 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 740 dir = SDW_DATA_DIR_RX; 741 else 742 dir = SDW_DATA_DIR_TX; 743 744 pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id); 745 746 if (!pdi) { 747 ret = -EINVAL; 748 goto error; 749 } 750 751 /* do run-time configurations for SHIM, ALH and PDI/PORT */ 752 intel_pdi_shim_configure(sdw, pdi); 753 intel_pdi_alh_configure(sdw, pdi); 754 sdw_cdns_config_stream(cdns, ch, dir, pdi); 755 756 /* store pdi and hw_params, may be needed in prepare step */ 757 dai_runtime->paused = false; 758 dai_runtime->suspended = false; 759 dai_runtime->pdi = pdi; 760 761 /* Inform DSP about PDI stream number */ 762 ret = intel_params_stream(sdw, substream, dai, params, 763 sdw->instance, 764 pdi->intel_alh_id); 765 if (ret) 766 goto error; 767 768 sconfig.direction = dir; 769 sconfig.ch_count = ch; 770 sconfig.frame_rate = params_rate(params); 771 sconfig.type = dai_runtime->stream_type; 772 773 sconfig.bps = snd_pcm_format_width(params_format(params)); 774 775 /* Port configuration */ 776 pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL); 777 if (!pconfig) { 778 ret = -ENOMEM; 779 goto error; 780 } 781 782 pconfig->num = pdi->num; 783 pconfig->ch_mask = (1 << ch) - 1; 784 785 ret = sdw_stream_add_master(&cdns->bus, &sconfig, 786 pconfig, 1, dai_runtime->stream); 787 if (ret) 788 dev_err(cdns->dev, "add master to stream failed:%d\n", ret); 789 790 kfree(pconfig); 791 error: 792 return ret; 793 } 794 795 static int intel_prepare(struct snd_pcm_substream *substream, 796 struct snd_soc_dai *dai) 797 { 798 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 799 struct sdw_intel *sdw = cdns_to_intel(cdns); 800 struct sdw_cdns_dai_runtime *dai_runtime; 801 int ch, dir; 802 int ret = 0; 803 804 dai_runtime = cdns->dai_runtime_array[dai->id]; 805 if (!dai_runtime) { 806 dev_err(dai->dev, "failed to get dai runtime in %s\n", 807 __func__); 808 return -EIO; 809 } 810 811 if (dai_runtime->suspended) { 812 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 813 struct snd_pcm_hw_params *hw_params; 814 815 hw_params = &rtd->dpcm[substream->stream].hw_params; 816 817 dai_runtime->suspended = false; 818 819 /* 820 * .prepare() is called after system resume, where we 821 * need to reinitialize the SHIM/ALH/Cadence IP. 822 * .prepare() is also called to deal with underflows, 823 * but in those cases we cannot touch ALH/SHIM 824 * registers 825 */ 826 827 /* configure stream */ 828 ch = params_channels(hw_params); 829 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 830 dir = SDW_DATA_DIR_RX; 831 else 832 dir = SDW_DATA_DIR_TX; 833 834 intel_pdi_shim_configure(sdw, dai_runtime->pdi); 835 intel_pdi_alh_configure(sdw, dai_runtime->pdi); 836 sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi); 837 838 /* Inform DSP about PDI stream number */ 839 ret = intel_params_stream(sdw, substream, dai, 840 hw_params, 841 sdw->instance, 842 dai_runtime->pdi->intel_alh_id); 843 } 844 845 return ret; 846 } 847 848 static int 849 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) 850 { 851 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 852 struct sdw_intel *sdw = cdns_to_intel(cdns); 853 struct sdw_cdns_dai_runtime *dai_runtime; 854 int ret; 855 856 dai_runtime = cdns->dai_runtime_array[dai->id]; 857 if (!dai_runtime) 858 return -EIO; 859 860 /* 861 * The sdw stream state will transition to RELEASED when stream-> 862 * master_list is empty. So the stream state will transition to 863 * DEPREPARED for the first cpu-dai and to RELEASED for the last 864 * cpu-dai. 865 */ 866 ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream); 867 if (ret < 0) { 868 dev_err(dai->dev, "remove master from stream %s failed: %d\n", 869 dai_runtime->stream->name, ret); 870 return ret; 871 } 872 873 ret = intel_free_stream(sdw, substream, dai, sdw->instance); 874 if (ret < 0) { 875 dev_err(dai->dev, "intel_free_stream: failed %d\n", ret); 876 return ret; 877 } 878 879 dai_runtime->pdi = NULL; 880 881 return 0; 882 } 883 884 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai, 885 void *stream, int direction) 886 { 887 return cdns_set_sdw_stream(dai, stream, direction); 888 } 889 890 static void *intel_get_sdw_stream(struct snd_soc_dai *dai, 891 int direction) 892 { 893 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 894 struct sdw_cdns_dai_runtime *dai_runtime; 895 896 dai_runtime = cdns->dai_runtime_array[dai->id]; 897 if (!dai_runtime) 898 return ERR_PTR(-EINVAL); 899 900 return dai_runtime->stream; 901 } 902 903 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) 904 { 905 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 906 struct sdw_cdns_dai_runtime *dai_runtime; 907 int ret = 0; 908 909 dai_runtime = cdns->dai_runtime_array[dai->id]; 910 if (!dai_runtime) { 911 dev_err(dai->dev, "failed to get dai runtime in %s\n", 912 __func__); 913 return -EIO; 914 } 915 916 switch (cmd) { 917 case SNDRV_PCM_TRIGGER_SUSPEND: 918 919 /* 920 * The .prepare callback is used to deal with xruns and resume operations. 921 * In the case of xruns, the DMAs and SHIM registers cannot be touched, 922 * but for resume operations the DMAs and SHIM registers need to be initialized. 923 * the .trigger callback is used to track the suspend case only. 924 */ 925 926 dai_runtime->suspended = true; 927 928 break; 929 930 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 931 dai_runtime->paused = true; 932 break; 933 case SNDRV_PCM_TRIGGER_STOP: 934 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 935 dai_runtime->paused = false; 936 break; 937 default: 938 break; 939 } 940 941 return ret; 942 } 943 944 static int intel_component_probe(struct snd_soc_component *component) 945 { 946 int ret; 947 948 /* 949 * make sure the device is pm_runtime_active before initiating 950 * bus transactions during the card registration. 951 * We use pm_runtime_resume() here, without taking a reference 952 * and releasing it immediately. 953 */ 954 ret = pm_runtime_resume(component->dev); 955 if (ret < 0 && ret != -EACCES) 956 return ret; 957 958 return 0; 959 } 960 961 static int intel_component_dais_suspend(struct snd_soc_component *component) 962 { 963 struct snd_soc_dai *dai; 964 965 /* 966 * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core 967 * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state. 968 * Since the component suspend is called last, we can trap this corner case 969 * and force the DAIs to release their resources. 970 */ 971 for_each_component_dais(component, dai) { 972 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 973 struct sdw_cdns_dai_runtime *dai_runtime; 974 975 dai_runtime = cdns->dai_runtime_array[dai->id]; 976 977 if (!dai_runtime) 978 continue; 979 980 if (dai_runtime->suspended) 981 continue; 982 983 if (dai_runtime->paused) 984 dai_runtime->suspended = true; 985 } 986 987 return 0; 988 } 989 990 static const struct snd_soc_dai_ops intel_pcm_dai_ops = { 991 .hw_params = intel_hw_params, 992 .prepare = intel_prepare, 993 .hw_free = intel_hw_free, 994 .trigger = intel_trigger, 995 .set_stream = intel_pcm_set_sdw_stream, 996 .get_stream = intel_get_sdw_stream, 997 }; 998 999 static const struct snd_soc_component_driver dai_component = { 1000 .name = "soundwire", 1001 .probe = intel_component_probe, 1002 .suspend = intel_component_dais_suspend, 1003 .legacy_dai_naming = 1, 1004 }; 1005 1006 static int intel_create_dai(struct sdw_cdns *cdns, 1007 struct snd_soc_dai_driver *dais, 1008 enum intel_pdi_type type, 1009 u32 num, u32 off, u32 max_ch) 1010 { 1011 int i; 1012 1013 if (num == 0) 1014 return 0; 1015 1016 for (i = off; i < (off + num); i++) { 1017 dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, 1018 "SDW%d Pin%d", 1019 cdns->instance, i); 1020 if (!dais[i].name) 1021 return -ENOMEM; 1022 1023 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) { 1024 dais[i].playback.channels_min = 1; 1025 dais[i].playback.channels_max = max_ch; 1026 } 1027 1028 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) { 1029 dais[i].capture.channels_min = 1; 1030 dais[i].capture.channels_max = max_ch; 1031 } 1032 1033 dais[i].ops = &intel_pcm_dai_ops; 1034 } 1035 1036 return 0; 1037 } 1038 1039 static int intel_register_dai(struct sdw_intel *sdw) 1040 { 1041 struct sdw_cdns_dai_runtime **dai_runtime_array; 1042 struct sdw_cdns_stream_config config; 1043 struct sdw_cdns *cdns = &sdw->cdns; 1044 struct sdw_cdns_streams *stream; 1045 struct snd_soc_dai_driver *dais; 1046 int num_dai, ret, off = 0; 1047 1048 /* Read the PDI config and initialize cadence PDI */ 1049 intel_pdi_init(sdw, &config); 1050 ret = sdw_cdns_pdi_init(cdns, config); 1051 if (ret) 1052 return ret; 1053 1054 intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm); 1055 1056 /* DAIs are created based on total number of PDIs supported */ 1057 num_dai = cdns->pcm.num_pdi; 1058 1059 dai_runtime_array = devm_kcalloc(cdns->dev, num_dai, 1060 sizeof(struct sdw_cdns_dai_runtime *), 1061 GFP_KERNEL); 1062 if (!dai_runtime_array) 1063 return -ENOMEM; 1064 cdns->dai_runtime_array = dai_runtime_array; 1065 1066 dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL); 1067 if (!dais) 1068 return -ENOMEM; 1069 1070 /* Create PCM DAIs */ 1071 stream = &cdns->pcm; 1072 1073 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in, 1074 off, stream->num_ch_in); 1075 if (ret) 1076 return ret; 1077 1078 off += cdns->pcm.num_in; 1079 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out, 1080 off, stream->num_ch_out); 1081 if (ret) 1082 return ret; 1083 1084 off += cdns->pcm.num_out; 1085 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd, 1086 off, stream->num_ch_bd); 1087 if (ret) 1088 return ret; 1089 1090 return devm_snd_soc_register_component(cdns->dev, &dai_component, 1091 dais, num_dai); 1092 } 1093 1094 1095 const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = { 1096 .debugfs_init = intel_debugfs_init, 1097 .debugfs_exit = intel_debugfs_exit, 1098 1099 .register_dai = intel_register_dai, 1100 1101 .check_clock_stop = intel_check_clock_stop, 1102 .start_bus = intel_start_bus, 1103 .start_bus_after_reset = intel_start_bus_after_reset, 1104 .start_bus_after_clock_stop = intel_start_bus_after_clock_stop, 1105 .stop_bus = intel_stop_bus, 1106 1107 .link_power_up = intel_link_power_up, 1108 .link_power_down = intel_link_power_down, 1109 1110 .shim_check_wake = intel_shim_check_wake, 1111 .shim_wake = intel_shim_wake, 1112 1113 .pre_bank_switch = intel_pre_bank_switch, 1114 .post_bank_switch = intel_post_bank_switch, 1115 1116 .sync_arm = intel_shim_sync_arm, 1117 .sync_go_unlocked = intel_shim_sync_go_unlocked, 1118 .sync_go = intel_shim_sync_go, 1119 .sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked, 1120 }; 1121 EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL); 1122