1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // Copyright(c) 2015-17 Intel Corporation. 3 4 /* 5 * Soundwire Intel Master Driver 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/debugfs.h> 10 #include <linux/delay.h> 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/auxiliary_bus.h> 15 #include <sound/pcm_params.h> 16 #include <linux/pm_runtime.h> 17 #include <sound/soc.h> 18 #include <linux/soundwire/sdw_registers.h> 19 #include <linux/soundwire/sdw.h> 20 #include <linux/soundwire/sdw_intel.h> 21 #include "cadence_master.h" 22 #include "bus.h" 23 #include "intel.h" 24 25 #define INTEL_MASTER_SUSPEND_DELAY_MS 3000 26 #define INTEL_MASTER_RESET_ITERATIONS 10 27 28 /* 29 * debug/config flags for the Intel SoundWire Master. 30 * 31 * Since we may have multiple masters active, we can have up to 8 32 * flags reused in each byte, with master0 using the ls-byte, etc. 33 */ 34 35 #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME BIT(0) 36 #define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP BIT(1) 37 #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE BIT(2) 38 #define SDW_INTEL_MASTER_DISABLE_MULTI_LINK BIT(3) 39 40 static int md_flags; 41 module_param_named(sdw_md_flags, md_flags, int, 0444); 42 MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)"); 43 44 /* Intel SHIM Registers Definition */ 45 #define SDW_SHIM_LCAP 0x0 46 #define SDW_SHIM_LCTL 0x4 47 #define SDW_SHIM_IPPTR 0x8 48 #define SDW_SHIM_SYNC 0xC 49 50 #define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x)) 51 #define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x)) 52 #define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x)) 53 #define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x)) 54 #define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x)) 55 #define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x)) 56 57 #define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y))) 58 #define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y))) 59 #define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * (x)) 60 #define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x)) 61 #define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x)) 62 63 #define SDW_SHIM_WAKEEN 0x190 64 #define SDW_SHIM_WAKESTS 0x192 65 66 #define SDW_SHIM_LCTL_SPA BIT(0) 67 #define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0) 68 #define SDW_SHIM_LCTL_CPA BIT(8) 69 #define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8) 70 71 #define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1) 72 #define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1) 73 #define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0) 74 #define SDW_SHIM_SYNC_SYNCCPU BIT(15) 75 #define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16) 76 #define SDW_SHIM_SYNC_CMDSYNC BIT(16) 77 #define SDW_SHIM_SYNC_SYNCGO BIT(24) 78 79 #define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0) 80 #define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4) 81 #define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8) 82 83 #define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0) 84 #define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4) 85 #define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8) 86 #define SDW_SHIM_PCMSYCM_DIR BIT(15) 87 88 #define SDW_SHIM_PDMSCAP_ISS GENMASK(3, 0) 89 #define SDW_SHIM_PDMSCAP_OSS GENMASK(7, 4) 90 #define SDW_SHIM_PDMSCAP_BSS GENMASK(12, 8) 91 #define SDW_SHIM_PDMSCAP_CPSS GENMASK(15, 13) 92 93 #define SDW_SHIM_IOCTL_MIF BIT(0) 94 #define SDW_SHIM_IOCTL_CO BIT(1) 95 #define SDW_SHIM_IOCTL_COE BIT(2) 96 #define SDW_SHIM_IOCTL_DO BIT(3) 97 #define SDW_SHIM_IOCTL_DOE BIT(4) 98 #define SDW_SHIM_IOCTL_BKE BIT(5) 99 #define SDW_SHIM_IOCTL_WPDD BIT(6) 100 #define SDW_SHIM_IOCTL_CIBD BIT(8) 101 #define SDW_SHIM_IOCTL_DIBD BIT(9) 102 103 #define SDW_SHIM_CTMCTL_DACTQE BIT(0) 104 #define SDW_SHIM_CTMCTL_DODS BIT(1) 105 #define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3) 106 107 #define SDW_SHIM_WAKEEN_ENABLE BIT(0) 108 #define SDW_SHIM_WAKESTS_STATUS BIT(0) 109 110 /* Intel ALH Register definitions */ 111 #define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x))) 112 #define SDW_ALH_NUM_STREAMS 64 113 114 #define SDW_ALH_STRMZCFG_DMAT_VAL 0x3 115 #define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0) 116 #define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16) 117 118 enum intel_pdi_type { 119 INTEL_PDI_IN = 0, 120 INTEL_PDI_OUT = 1, 121 INTEL_PDI_BD = 2, 122 }; 123 124 #define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns) 125 126 /* 127 * Read, write helpers for HW registers 128 */ 129 static inline int intel_readl(void __iomem *base, int offset) 130 { 131 return readl(base + offset); 132 } 133 134 static inline void intel_writel(void __iomem *base, int offset, int value) 135 { 136 writel(value, base + offset); 137 } 138 139 static inline u16 intel_readw(void __iomem *base, int offset) 140 { 141 return readw(base + offset); 142 } 143 144 static inline void intel_writew(void __iomem *base, int offset, u16 value) 145 { 146 writew(value, base + offset); 147 } 148 149 static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target) 150 { 151 int timeout = 10; 152 u32 reg_read; 153 154 do { 155 reg_read = readl(base + offset); 156 if ((reg_read & mask) == target) 157 return 0; 158 159 timeout--; 160 usleep_range(50, 100); 161 } while (timeout != 0); 162 163 return -EAGAIN; 164 } 165 166 static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask) 167 { 168 writel(value, base + offset); 169 return intel_wait_bit(base, offset, mask, 0); 170 } 171 172 static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask) 173 { 174 writel(value, base + offset); 175 return intel_wait_bit(base, offset, mask, mask); 176 } 177 178 /* 179 * debugfs 180 */ 181 #ifdef CONFIG_DEBUG_FS 182 183 #define RD_BUF (2 * PAGE_SIZE) 184 185 static ssize_t intel_sprintf(void __iomem *mem, bool l, 186 char *buf, size_t pos, unsigned int reg) 187 { 188 int value; 189 190 if (l) 191 value = intel_readl(mem, reg); 192 else 193 value = intel_readw(mem, reg); 194 195 return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value); 196 } 197 198 static int intel_reg_show(struct seq_file *s_file, void *data) 199 { 200 struct sdw_intel *sdw = s_file->private; 201 void __iomem *s = sdw->link_res->shim; 202 void __iomem *a = sdw->link_res->alh; 203 char *buf; 204 ssize_t ret; 205 int i, j; 206 unsigned int links, reg; 207 208 buf = kzalloc(RD_BUF, GFP_KERNEL); 209 if (!buf) 210 return -ENOMEM; 211 212 links = intel_readl(s, SDW_SHIM_LCAP) & GENMASK(2, 0); 213 214 ret = scnprintf(buf, RD_BUF, "Register Value\n"); 215 ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n"); 216 217 for (i = 0; i < links; i++) { 218 reg = SDW_SHIM_LCAP + i * 4; 219 ret += intel_sprintf(s, true, buf, ret, reg); 220 } 221 222 for (i = 0; i < links; i++) { 223 ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i); 224 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i)); 225 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i)); 226 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i)); 227 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i)); 228 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i)); 229 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i)); 230 231 ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n"); 232 233 /* 234 * the value 10 is the number of PDIs. We will need a 235 * cleanup to remove hard-coded Intel configurations 236 * from cadence_master.c 237 */ 238 for (j = 0; j < 10; j++) { 239 ret += intel_sprintf(s, false, buf, ret, 240 SDW_SHIM_PCMSYCHM(i, j)); 241 ret += intel_sprintf(s, false, buf, ret, 242 SDW_SHIM_PCMSYCHC(i, j)); 243 } 244 ret += scnprintf(buf + ret, RD_BUF - ret, "\n PDMSCAP, IOCTL, CTMCTL\n"); 245 246 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PDMSCAP(i)); 247 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i)); 248 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i)); 249 } 250 251 ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n"); 252 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN); 253 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS); 254 255 ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n"); 256 for (i = 0; i < SDW_ALH_NUM_STREAMS; i++) 257 ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i)); 258 259 seq_printf(s_file, "%s", buf); 260 kfree(buf); 261 262 return 0; 263 } 264 DEFINE_SHOW_ATTRIBUTE(intel_reg); 265 266 static int intel_set_m_datamode(void *data, u64 value) 267 { 268 struct sdw_intel *sdw = data; 269 struct sdw_bus *bus = &sdw->cdns.bus; 270 271 if (value > SDW_PORT_DATA_MODE_STATIC_1) 272 return -EINVAL; 273 274 /* Userspace changed the hardware state behind the kernel's back */ 275 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 276 277 bus->params.m_data_mode = value; 278 279 return 0; 280 } 281 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL, 282 intel_set_m_datamode, "%llu\n"); 283 284 static int intel_set_s_datamode(void *data, u64 value) 285 { 286 struct sdw_intel *sdw = data; 287 struct sdw_bus *bus = &sdw->cdns.bus; 288 289 if (value > SDW_PORT_DATA_MODE_STATIC_1) 290 return -EINVAL; 291 292 /* Userspace changed the hardware state behind the kernel's back */ 293 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 294 295 bus->params.s_data_mode = value; 296 297 return 0; 298 } 299 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL, 300 intel_set_s_datamode, "%llu\n"); 301 302 static void intel_debugfs_init(struct sdw_intel *sdw) 303 { 304 struct dentry *root = sdw->cdns.bus.debugfs; 305 306 if (!root) 307 return; 308 309 sdw->debugfs = debugfs_create_dir("intel-sdw", root); 310 311 debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw, 312 &intel_reg_fops); 313 314 debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw, 315 &intel_set_m_datamode_fops); 316 317 debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw, 318 &intel_set_s_datamode_fops); 319 320 sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs); 321 } 322 323 static void intel_debugfs_exit(struct sdw_intel *sdw) 324 { 325 debugfs_remove_recursive(sdw->debugfs); 326 } 327 #else 328 static void intel_debugfs_init(struct sdw_intel *sdw) {} 329 static void intel_debugfs_exit(struct sdw_intel *sdw) {} 330 #endif /* CONFIG_DEBUG_FS */ 331 332 /* 333 * shim ops 334 */ 335 336 static int intel_link_power_up(struct sdw_intel *sdw) 337 { 338 unsigned int link_id = sdw->instance; 339 void __iomem *shim = sdw->link_res->shim; 340 u32 *shim_mask = sdw->link_res->shim_mask; 341 struct sdw_bus *bus = &sdw->cdns.bus; 342 struct sdw_master_prop *prop = &bus->prop; 343 u32 spa_mask, cpa_mask; 344 u32 link_control; 345 int ret = 0; 346 u32 syncprd; 347 u32 sync_reg; 348 349 mutex_lock(sdw->link_res->shim_lock); 350 351 /* 352 * The hardware relies on an internal counter, typically 4kHz, 353 * to generate the SoundWire SSP - which defines a 'safe' 354 * synchronization point between commands and audio transport 355 * and allows for multi link synchronization. The SYNCPRD value 356 * is only dependent on the oscillator clock provided to 357 * the IP, so adjust based on _DSD properties reported in DSDT 358 * tables. The values reported are based on either 24MHz 359 * (CNL/CML) or 38.4 MHz (ICL/TGL+). 360 */ 361 if (prop->mclk_freq % 6000000) 362 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4; 363 else 364 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24; 365 366 if (!*shim_mask) { 367 dev_dbg(sdw->cdns.dev, "%s: powering up all links\n", __func__); 368 369 /* we first need to program the SyncPRD/CPU registers */ 370 dev_dbg(sdw->cdns.dev, 371 "%s: first link up, programming SYNCPRD\n", __func__); 372 373 /* set SyncPRD period */ 374 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 375 u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD); 376 377 /* Set SyncCPU bit */ 378 sync_reg |= SDW_SHIM_SYNC_SYNCCPU; 379 intel_writel(shim, SDW_SHIM_SYNC, sync_reg); 380 381 /* Link power up sequence */ 382 link_control = intel_readl(shim, SDW_SHIM_LCTL); 383 384 /* only power-up enabled links */ 385 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask); 386 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask); 387 388 link_control |= spa_mask; 389 390 ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask); 391 if (ret < 0) { 392 dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret); 393 goto out; 394 } 395 396 /* SyncCPU will change once link is active */ 397 ret = intel_wait_bit(shim, SDW_SHIM_SYNC, 398 SDW_SHIM_SYNC_SYNCCPU, 0); 399 if (ret < 0) { 400 dev_err(sdw->cdns.dev, 401 "Failed to set SHIM_SYNC: %d\n", ret); 402 goto out; 403 } 404 } 405 406 *shim_mask |= BIT(link_id); 407 408 sdw->cdns.link_up = true; 409 out: 410 mutex_unlock(sdw->link_res->shim_lock); 411 412 return ret; 413 } 414 415 /* this needs to be called with shim_lock */ 416 static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw) 417 { 418 void __iomem *shim = sdw->link_res->shim; 419 unsigned int link_id = sdw->instance; 420 u16 ioctl; 421 422 /* Switch to MIP from Glue logic */ 423 ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id)); 424 425 ioctl &= ~(SDW_SHIM_IOCTL_DOE); 426 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 427 usleep_range(10, 15); 428 429 ioctl &= ~(SDW_SHIM_IOCTL_DO); 430 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 431 usleep_range(10, 15); 432 433 ioctl |= (SDW_SHIM_IOCTL_MIF); 434 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 435 usleep_range(10, 15); 436 437 ioctl &= ~(SDW_SHIM_IOCTL_BKE); 438 ioctl &= ~(SDW_SHIM_IOCTL_COE); 439 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 440 usleep_range(10, 15); 441 442 /* at this point Master IP has full control of the I/Os */ 443 } 444 445 /* this needs to be called with shim_lock */ 446 static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw) 447 { 448 unsigned int link_id = sdw->instance; 449 void __iomem *shim = sdw->link_res->shim; 450 u16 ioctl; 451 452 /* Glue logic */ 453 ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id)); 454 ioctl |= SDW_SHIM_IOCTL_BKE; 455 ioctl |= SDW_SHIM_IOCTL_COE; 456 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 457 usleep_range(10, 15); 458 459 ioctl &= ~(SDW_SHIM_IOCTL_MIF); 460 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 461 usleep_range(10, 15); 462 463 /* at this point Integration Glue has full control of the I/Os */ 464 } 465 466 static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop) 467 { 468 void __iomem *shim = sdw->link_res->shim; 469 unsigned int link_id = sdw->instance; 470 int ret = 0; 471 u16 ioctl = 0, act = 0; 472 473 mutex_lock(sdw->link_res->shim_lock); 474 475 /* Initialize Shim */ 476 ioctl |= SDW_SHIM_IOCTL_BKE; 477 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 478 usleep_range(10, 15); 479 480 ioctl |= SDW_SHIM_IOCTL_WPDD; 481 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 482 usleep_range(10, 15); 483 484 ioctl |= SDW_SHIM_IOCTL_DO; 485 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 486 usleep_range(10, 15); 487 488 ioctl |= SDW_SHIM_IOCTL_DOE; 489 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 490 usleep_range(10, 15); 491 492 intel_shim_glue_to_master_ip(sdw); 493 494 u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS); 495 act |= SDW_SHIM_CTMCTL_DACTQE; 496 act |= SDW_SHIM_CTMCTL_DODS; 497 intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act); 498 usleep_range(10, 15); 499 500 mutex_unlock(sdw->link_res->shim_lock); 501 502 return ret; 503 } 504 505 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) 506 { 507 void __iomem *shim = sdw->link_res->shim; 508 unsigned int link_id = sdw->instance; 509 u16 wake_en, wake_sts; 510 511 mutex_lock(sdw->link_res->shim_lock); 512 wake_en = intel_readw(shim, SDW_SHIM_WAKEEN); 513 514 if (wake_enable) { 515 /* Enable the wakeup */ 516 wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id); 517 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en); 518 } else { 519 /* Disable the wake up interrupt */ 520 wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id); 521 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en); 522 523 /* Clear wake status */ 524 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); 525 wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id); 526 intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts); 527 } 528 mutex_unlock(sdw->link_res->shim_lock); 529 } 530 531 static int intel_link_power_down(struct sdw_intel *sdw) 532 { 533 u32 link_control, spa_mask, cpa_mask; 534 unsigned int link_id = sdw->instance; 535 void __iomem *shim = sdw->link_res->shim; 536 u32 *shim_mask = sdw->link_res->shim_mask; 537 int ret = 0; 538 539 mutex_lock(sdw->link_res->shim_lock); 540 541 if (!(*shim_mask & BIT(link_id))) 542 dev_err(sdw->cdns.dev, 543 "%s: Unbalanced power-up/down calls\n", __func__); 544 545 sdw->cdns.link_up = false; 546 547 intel_shim_master_ip_to_glue(sdw); 548 549 *shim_mask &= ~BIT(link_id); 550 551 if (!*shim_mask) { 552 553 dev_dbg(sdw->cdns.dev, "%s: powering down all links\n", __func__); 554 555 /* Link power down sequence */ 556 link_control = intel_readl(shim, SDW_SHIM_LCTL); 557 558 /* only power-down enabled links */ 559 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask); 560 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask); 561 562 link_control &= spa_mask; 563 564 ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask); 565 if (ret < 0) { 566 dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__); 567 568 /* 569 * we leave the sdw->cdns.link_up flag as false since we've disabled 570 * the link at this point and cannot handle interrupts any longer. 571 */ 572 } 573 } 574 575 mutex_unlock(sdw->link_res->shim_lock); 576 577 return ret; 578 } 579 580 static void intel_shim_sync_arm(struct sdw_intel *sdw) 581 { 582 void __iomem *shim = sdw->link_res->shim; 583 u32 sync_reg; 584 585 mutex_lock(sdw->link_res->shim_lock); 586 587 /* update SYNC register */ 588 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 589 sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance); 590 intel_writel(shim, SDW_SHIM_SYNC, sync_reg); 591 592 mutex_unlock(sdw->link_res->shim_lock); 593 } 594 595 static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw) 596 { 597 void __iomem *shim = sdw->link_res->shim; 598 u32 sync_reg; 599 int ret; 600 601 /* Read SYNC register */ 602 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 603 604 /* 605 * Set SyncGO bit to synchronously trigger a bank switch for 606 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all 607 * the Masters. 608 */ 609 sync_reg |= SDW_SHIM_SYNC_SYNCGO; 610 611 ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg, 612 SDW_SHIM_SYNC_SYNCGO); 613 614 if (ret < 0) 615 dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret); 616 617 return ret; 618 } 619 620 static int intel_shim_sync_go(struct sdw_intel *sdw) 621 { 622 int ret; 623 624 mutex_lock(sdw->link_res->shim_lock); 625 626 ret = intel_shim_sync_go_unlocked(sdw); 627 628 mutex_unlock(sdw->link_res->shim_lock); 629 630 return ret; 631 } 632 633 /* 634 * PDI routines 635 */ 636 static void intel_pdi_init(struct sdw_intel *sdw, 637 struct sdw_cdns_stream_config *config) 638 { 639 void __iomem *shim = sdw->link_res->shim; 640 unsigned int link_id = sdw->instance; 641 int pcm_cap, pdm_cap; 642 643 /* PCM Stream Capability */ 644 pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id)); 645 646 config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap); 647 config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap); 648 config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap); 649 650 dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n", 651 config->pcm_bd, config->pcm_in, config->pcm_out); 652 653 /* PDM Stream Capability */ 654 pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id)); 655 656 config->pdm_bd = FIELD_GET(SDW_SHIM_PDMSCAP_BSS, pdm_cap); 657 config->pdm_in = FIELD_GET(SDW_SHIM_PDMSCAP_ISS, pdm_cap); 658 config->pdm_out = FIELD_GET(SDW_SHIM_PDMSCAP_OSS, pdm_cap); 659 660 dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n", 661 config->pdm_bd, config->pdm_in, config->pdm_out); 662 } 663 664 static int 665 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm) 666 { 667 void __iomem *shim = sdw->link_res->shim; 668 unsigned int link_id = sdw->instance; 669 int count; 670 671 if (pcm) { 672 count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num)); 673 674 /* 675 * WORKAROUND: on all existing Intel controllers, pdi 676 * number 2 reports channel count as 1 even though it 677 * supports 8 channels. Performing hardcoding for pdi 678 * number 2. 679 */ 680 if (pdi_num == 2) 681 count = 7; 682 683 } else { 684 count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id)); 685 count = FIELD_GET(SDW_SHIM_PDMSCAP_CPSS, count); 686 } 687 688 /* zero based values for channel count in register */ 689 count++; 690 691 return count; 692 } 693 694 static int intel_pdi_get_ch_update(struct sdw_intel *sdw, 695 struct sdw_cdns_pdi *pdi, 696 unsigned int num_pdi, 697 unsigned int *num_ch, bool pcm) 698 { 699 int i, ch_count = 0; 700 701 for (i = 0; i < num_pdi; i++) { 702 pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num, pcm); 703 ch_count += pdi->ch_count; 704 pdi++; 705 } 706 707 *num_ch = ch_count; 708 return 0; 709 } 710 711 static int intel_pdi_stream_ch_update(struct sdw_intel *sdw, 712 struct sdw_cdns_streams *stream, bool pcm) 713 { 714 intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd, 715 &stream->num_ch_bd, pcm); 716 717 intel_pdi_get_ch_update(sdw, stream->in, stream->num_in, 718 &stream->num_ch_in, pcm); 719 720 intel_pdi_get_ch_update(sdw, stream->out, stream->num_out, 721 &stream->num_ch_out, pcm); 722 723 return 0; 724 } 725 726 static int intel_pdi_ch_update(struct sdw_intel *sdw) 727 { 728 /* First update PCM streams followed by PDM streams */ 729 intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm, true); 730 intel_pdi_stream_ch_update(sdw, &sdw->cdns.pdm, false); 731 732 return 0; 733 } 734 735 static void 736 intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) 737 { 738 void __iomem *shim = sdw->link_res->shim; 739 unsigned int link_id = sdw->instance; 740 int pdi_conf = 0; 741 742 /* the Bulk and PCM streams are not contiguous */ 743 pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; 744 if (pdi->num >= 2) 745 pdi->intel_alh_id += 2; 746 747 /* 748 * Program stream parameters to stream SHIM register 749 * This is applicable for PCM stream only. 750 */ 751 if (pdi->type != SDW_STREAM_PCM) 752 return; 753 754 if (pdi->dir == SDW_DATA_DIR_RX) 755 pdi_conf |= SDW_SHIM_PCMSYCM_DIR; 756 else 757 pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR); 758 759 u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM); 760 u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN); 761 u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN); 762 763 intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf); 764 } 765 766 static void 767 intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) 768 { 769 void __iomem *alh = sdw->link_res->alh; 770 unsigned int link_id = sdw->instance; 771 unsigned int conf; 772 773 /* the Bulk and PCM streams are not contiguous */ 774 pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; 775 if (pdi->num >= 2) 776 pdi->intel_alh_id += 2; 777 778 /* Program Stream config ALH register */ 779 conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id)); 780 781 u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT); 782 u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN); 783 784 intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf); 785 } 786 787 static int intel_params_stream(struct sdw_intel *sdw, 788 struct snd_pcm_substream *substream, 789 struct snd_soc_dai *dai, 790 struct snd_pcm_hw_params *hw_params, 791 int link_id, int alh_stream_id) 792 { 793 struct sdw_intel_link_res *res = sdw->link_res; 794 struct sdw_intel_stream_params_data params_data; 795 796 params_data.substream = substream; 797 params_data.dai = dai; 798 params_data.hw_params = hw_params; 799 params_data.link_id = link_id; 800 params_data.alh_stream_id = alh_stream_id; 801 802 if (res->ops && res->ops->params_stream && res->dev) 803 return res->ops->params_stream(res->dev, 804 ¶ms_data); 805 return -EIO; 806 } 807 808 static int intel_free_stream(struct sdw_intel *sdw, 809 struct snd_pcm_substream *substream, 810 struct snd_soc_dai *dai, 811 int link_id) 812 { 813 struct sdw_intel_link_res *res = sdw->link_res; 814 struct sdw_intel_stream_free_data free_data; 815 816 free_data.substream = substream; 817 free_data.dai = dai; 818 free_data.link_id = link_id; 819 820 if (res->ops && res->ops->free_stream && res->dev) 821 return res->ops->free_stream(res->dev, 822 &free_data); 823 824 return 0; 825 } 826 827 /* 828 * bank switch routines 829 */ 830 831 static int intel_pre_bank_switch(struct sdw_bus *bus) 832 { 833 struct sdw_cdns *cdns = bus_to_cdns(bus); 834 struct sdw_intel *sdw = cdns_to_intel(cdns); 835 836 /* Write to register only for multi-link */ 837 if (!bus->multi_link) 838 return 0; 839 840 intel_shim_sync_arm(sdw); 841 842 return 0; 843 } 844 845 static int intel_post_bank_switch(struct sdw_bus *bus) 846 { 847 struct sdw_cdns *cdns = bus_to_cdns(bus); 848 struct sdw_intel *sdw = cdns_to_intel(cdns); 849 void __iomem *shim = sdw->link_res->shim; 850 int sync_reg, ret; 851 852 /* Write to register only for multi-link */ 853 if (!bus->multi_link) 854 return 0; 855 856 mutex_lock(sdw->link_res->shim_lock); 857 858 /* Read SYNC register */ 859 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 860 861 /* 862 * post_bank_switch() ops is called from the bus in loop for 863 * all the Masters in the steam with the expectation that 864 * we trigger the bankswitch for the only first Master in the list 865 * and do nothing for the other Masters 866 * 867 * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master. 868 */ 869 if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) { 870 ret = 0; 871 goto unlock; 872 } 873 874 ret = intel_shim_sync_go_unlocked(sdw); 875 unlock: 876 mutex_unlock(sdw->link_res->shim_lock); 877 878 if (ret < 0) 879 dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret); 880 881 return ret; 882 } 883 884 /* 885 * DAI routines 886 */ 887 888 static int intel_startup(struct snd_pcm_substream *substream, 889 struct snd_soc_dai *dai) 890 { 891 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 892 int ret; 893 894 ret = pm_runtime_get_sync(cdns->dev); 895 if (ret < 0 && ret != -EACCES) { 896 dev_err_ratelimited(cdns->dev, 897 "pm_runtime_get_sync failed in %s, ret %d\n", 898 __func__, ret); 899 pm_runtime_put_noidle(cdns->dev); 900 return ret; 901 } 902 return 0; 903 } 904 905 static int intel_hw_params(struct snd_pcm_substream *substream, 906 struct snd_pcm_hw_params *params, 907 struct snd_soc_dai *dai) 908 { 909 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 910 struct sdw_intel *sdw = cdns_to_intel(cdns); 911 struct sdw_cdns_dma_data *dma; 912 struct sdw_cdns_pdi *pdi; 913 struct sdw_stream_config sconfig; 914 struct sdw_port_config *pconfig; 915 int ch, dir; 916 int ret; 917 bool pcm = true; 918 919 dma = snd_soc_dai_get_dma_data(dai, substream); 920 if (!dma) 921 return -EIO; 922 923 ch = params_channels(params); 924 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 925 dir = SDW_DATA_DIR_RX; 926 else 927 dir = SDW_DATA_DIR_TX; 928 929 if (dma->stream_type == SDW_STREAM_PDM) 930 pcm = false; 931 932 if (pcm) 933 pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id); 934 else 935 pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id); 936 937 if (!pdi) { 938 ret = -EINVAL; 939 goto error; 940 } 941 942 /* do run-time configurations for SHIM, ALH and PDI/PORT */ 943 intel_pdi_shim_configure(sdw, pdi); 944 intel_pdi_alh_configure(sdw, pdi); 945 sdw_cdns_config_stream(cdns, ch, dir, pdi); 946 947 /* store pdi and hw_params, may be needed in prepare step */ 948 dma->suspended = false; 949 dma->pdi = pdi; 950 dma->hw_params = params; 951 952 /* Inform DSP about PDI stream number */ 953 ret = intel_params_stream(sdw, substream, dai, params, 954 sdw->instance, 955 pdi->intel_alh_id); 956 if (ret) 957 goto error; 958 959 sconfig.direction = dir; 960 sconfig.ch_count = ch; 961 sconfig.frame_rate = params_rate(params); 962 sconfig.type = dma->stream_type; 963 964 if (dma->stream_type == SDW_STREAM_PDM) { 965 sconfig.frame_rate *= 50; 966 sconfig.bps = 1; 967 } else { 968 sconfig.bps = snd_pcm_format_width(params_format(params)); 969 } 970 971 /* Port configuration */ 972 pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL); 973 if (!pconfig) { 974 ret = -ENOMEM; 975 goto error; 976 } 977 978 pconfig->num = pdi->num; 979 pconfig->ch_mask = (1 << ch) - 1; 980 981 ret = sdw_stream_add_master(&cdns->bus, &sconfig, 982 pconfig, 1, dma->stream); 983 if (ret) 984 dev_err(cdns->dev, "add master to stream failed:%d\n", ret); 985 986 kfree(pconfig); 987 error: 988 return ret; 989 } 990 991 static int intel_prepare(struct snd_pcm_substream *substream, 992 struct snd_soc_dai *dai) 993 { 994 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 995 struct sdw_intel *sdw = cdns_to_intel(cdns); 996 struct sdw_cdns_dma_data *dma; 997 int ch, dir; 998 int ret = 0; 999 1000 dma = snd_soc_dai_get_dma_data(dai, substream); 1001 if (!dma) { 1002 dev_err(dai->dev, "failed to get dma data in %s\n", 1003 __func__); 1004 return -EIO; 1005 } 1006 1007 if (dma->suspended) { 1008 dma->suspended = false; 1009 1010 /* 1011 * .prepare() is called after system resume, where we 1012 * need to reinitialize the SHIM/ALH/Cadence IP. 1013 * .prepare() is also called to deal with underflows, 1014 * but in those cases we cannot touch ALH/SHIM 1015 * registers 1016 */ 1017 1018 /* configure stream */ 1019 ch = params_channels(dma->hw_params); 1020 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 1021 dir = SDW_DATA_DIR_RX; 1022 else 1023 dir = SDW_DATA_DIR_TX; 1024 1025 intel_pdi_shim_configure(sdw, dma->pdi); 1026 intel_pdi_alh_configure(sdw, dma->pdi); 1027 sdw_cdns_config_stream(cdns, ch, dir, dma->pdi); 1028 1029 /* Inform DSP about PDI stream number */ 1030 ret = intel_params_stream(sdw, substream, dai, 1031 dma->hw_params, 1032 sdw->instance, 1033 dma->pdi->intel_alh_id); 1034 } 1035 1036 return ret; 1037 } 1038 1039 static int 1040 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) 1041 { 1042 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 1043 struct sdw_intel *sdw = cdns_to_intel(cdns); 1044 struct sdw_cdns_dma_data *dma; 1045 int ret; 1046 1047 dma = snd_soc_dai_get_dma_data(dai, substream); 1048 if (!dma) 1049 return -EIO; 1050 1051 /* 1052 * The sdw stream state will transition to RELEASED when stream-> 1053 * master_list is empty. So the stream state will transition to 1054 * DEPREPARED for the first cpu-dai and to RELEASED for the last 1055 * cpu-dai. 1056 */ 1057 ret = sdw_stream_remove_master(&cdns->bus, dma->stream); 1058 if (ret < 0) { 1059 dev_err(dai->dev, "remove master from stream %s failed: %d\n", 1060 dma->stream->name, ret); 1061 return ret; 1062 } 1063 1064 ret = intel_free_stream(sdw, substream, dai, sdw->instance); 1065 if (ret < 0) { 1066 dev_err(dai->dev, "intel_free_stream: failed %d\n", ret); 1067 return ret; 1068 } 1069 1070 dma->hw_params = NULL; 1071 dma->pdi = NULL; 1072 1073 return 0; 1074 } 1075 1076 static void intel_shutdown(struct snd_pcm_substream *substream, 1077 struct snd_soc_dai *dai) 1078 { 1079 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 1080 1081 pm_runtime_mark_last_busy(cdns->dev); 1082 pm_runtime_put_autosuspend(cdns->dev); 1083 } 1084 1085 static int intel_component_dais_suspend(struct snd_soc_component *component) 1086 { 1087 struct sdw_cdns_dma_data *dma; 1088 struct snd_soc_dai *dai; 1089 1090 for_each_component_dais(component, dai) { 1091 /* 1092 * we don't have a .suspend dai_ops, and we don't have access 1093 * to the substream, so let's mark both capture and playback 1094 * DMA contexts as suspended 1095 */ 1096 dma = dai->playback_dma_data; 1097 if (dma) 1098 dma->suspended = true; 1099 1100 dma = dai->capture_dma_data; 1101 if (dma) 1102 dma->suspended = true; 1103 } 1104 1105 return 0; 1106 } 1107 1108 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai, 1109 void *stream, int direction) 1110 { 1111 return cdns_set_sdw_stream(dai, stream, true, direction); 1112 } 1113 1114 static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai, 1115 void *stream, int direction) 1116 { 1117 return cdns_set_sdw_stream(dai, stream, false, direction); 1118 } 1119 1120 static void *intel_get_sdw_stream(struct snd_soc_dai *dai, 1121 int direction) 1122 { 1123 struct sdw_cdns_dma_data *dma; 1124 1125 if (direction == SNDRV_PCM_STREAM_PLAYBACK) 1126 dma = dai->playback_dma_data; 1127 else 1128 dma = dai->capture_dma_data; 1129 1130 if (!dma) 1131 return ERR_PTR(-EINVAL); 1132 1133 return dma->stream; 1134 } 1135 1136 static const struct snd_soc_dai_ops intel_pcm_dai_ops = { 1137 .startup = intel_startup, 1138 .hw_params = intel_hw_params, 1139 .prepare = intel_prepare, 1140 .hw_free = intel_hw_free, 1141 .shutdown = intel_shutdown, 1142 .set_sdw_stream = intel_pcm_set_sdw_stream, 1143 .get_sdw_stream = intel_get_sdw_stream, 1144 }; 1145 1146 static const struct snd_soc_dai_ops intel_pdm_dai_ops = { 1147 .startup = intel_startup, 1148 .hw_params = intel_hw_params, 1149 .prepare = intel_prepare, 1150 .hw_free = intel_hw_free, 1151 .shutdown = intel_shutdown, 1152 .set_sdw_stream = intel_pdm_set_sdw_stream, 1153 .get_sdw_stream = intel_get_sdw_stream, 1154 }; 1155 1156 static const struct snd_soc_component_driver dai_component = { 1157 .name = "soundwire", 1158 .suspend = intel_component_dais_suspend 1159 }; 1160 1161 static int intel_create_dai(struct sdw_cdns *cdns, 1162 struct snd_soc_dai_driver *dais, 1163 enum intel_pdi_type type, 1164 u32 num, u32 off, u32 max_ch, bool pcm) 1165 { 1166 int i; 1167 1168 if (num == 0) 1169 return 0; 1170 1171 /* TODO: Read supported rates/formats from hardware */ 1172 for (i = off; i < (off + num); i++) { 1173 dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, 1174 "SDW%d Pin%d", 1175 cdns->instance, i); 1176 if (!dais[i].name) 1177 return -ENOMEM; 1178 1179 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) { 1180 dais[i].playback.channels_min = 1; 1181 dais[i].playback.channels_max = max_ch; 1182 dais[i].playback.rates = SNDRV_PCM_RATE_48000; 1183 dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE; 1184 } 1185 1186 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) { 1187 dais[i].capture.channels_min = 1; 1188 dais[i].capture.channels_max = max_ch; 1189 dais[i].capture.rates = SNDRV_PCM_RATE_48000; 1190 dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE; 1191 } 1192 1193 if (pcm) 1194 dais[i].ops = &intel_pcm_dai_ops; 1195 else 1196 dais[i].ops = &intel_pdm_dai_ops; 1197 } 1198 1199 return 0; 1200 } 1201 1202 static int intel_register_dai(struct sdw_intel *sdw) 1203 { 1204 struct sdw_cdns *cdns = &sdw->cdns; 1205 struct sdw_cdns_streams *stream; 1206 struct snd_soc_dai_driver *dais; 1207 int num_dai, ret, off = 0; 1208 1209 /* DAIs are created based on total number of PDIs supported */ 1210 num_dai = cdns->pcm.num_pdi + cdns->pdm.num_pdi; 1211 1212 dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL); 1213 if (!dais) 1214 return -ENOMEM; 1215 1216 /* Create PCM DAIs */ 1217 stream = &cdns->pcm; 1218 1219 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in, 1220 off, stream->num_ch_in, true); 1221 if (ret) 1222 return ret; 1223 1224 off += cdns->pcm.num_in; 1225 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out, 1226 off, stream->num_ch_out, true); 1227 if (ret) 1228 return ret; 1229 1230 off += cdns->pcm.num_out; 1231 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd, 1232 off, stream->num_ch_bd, true); 1233 if (ret) 1234 return ret; 1235 1236 /* Create PDM DAIs */ 1237 stream = &cdns->pdm; 1238 off += cdns->pcm.num_bd; 1239 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in, 1240 off, stream->num_ch_in, false); 1241 if (ret) 1242 return ret; 1243 1244 off += cdns->pdm.num_in; 1245 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out, 1246 off, stream->num_ch_out, false); 1247 if (ret) 1248 return ret; 1249 1250 off += cdns->pdm.num_out; 1251 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd, 1252 off, stream->num_ch_bd, false); 1253 if (ret) 1254 return ret; 1255 1256 return snd_soc_register_component(cdns->dev, &dai_component, 1257 dais, num_dai); 1258 } 1259 1260 static int sdw_master_read_intel_prop(struct sdw_bus *bus) 1261 { 1262 struct sdw_master_prop *prop = &bus->prop; 1263 struct fwnode_handle *link; 1264 char name[32]; 1265 u32 quirk_mask; 1266 1267 /* Find master handle */ 1268 snprintf(name, sizeof(name), 1269 "mipi-sdw-link-%d-subproperties", bus->link_id); 1270 1271 link = device_get_named_child_node(bus->dev, name); 1272 if (!link) { 1273 dev_err(bus->dev, "Master node %s not found\n", name); 1274 return -EIO; 1275 } 1276 1277 fwnode_property_read_u32(link, 1278 "intel-sdw-ip-clock", 1279 &prop->mclk_freq); 1280 1281 /* the values reported by BIOS are the 2x clock, not the bus clock */ 1282 prop->mclk_freq /= 2; 1283 1284 fwnode_property_read_u32(link, 1285 "intel-quirk-mask", 1286 &quirk_mask); 1287 1288 if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE) 1289 prop->hw_disabled = true; 1290 1291 prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH | 1292 SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY; 1293 1294 return 0; 1295 } 1296 1297 static int intel_prop_read(struct sdw_bus *bus) 1298 { 1299 /* Initialize with default handler to read all DisCo properties */ 1300 sdw_master_read_prop(bus); 1301 1302 /* read Intel-specific properties */ 1303 sdw_master_read_intel_prop(bus); 1304 1305 return 0; 1306 } 1307 1308 static struct sdw_master_ops sdw_intel_ops = { 1309 .read_prop = sdw_master_read_prop, 1310 .override_adr = sdw_dmi_override_adr, 1311 .xfer_msg = cdns_xfer_msg, 1312 .xfer_msg_defer = cdns_xfer_msg_defer, 1313 .reset_page_addr = cdns_reset_page_addr, 1314 .set_bus_conf = cdns_bus_conf, 1315 .pre_bank_switch = intel_pre_bank_switch, 1316 .post_bank_switch = intel_post_bank_switch, 1317 }; 1318 1319 static int intel_init(struct sdw_intel *sdw) 1320 { 1321 bool clock_stop; 1322 1323 /* Initialize shim and controller */ 1324 intel_link_power_up(sdw); 1325 1326 clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns); 1327 1328 intel_shim_init(sdw, clock_stop); 1329 1330 return 0; 1331 } 1332 1333 /* 1334 * probe and init (aux_dev_id argument is required by function prototype but not used) 1335 */ 1336 static int intel_link_probe(struct auxiliary_device *auxdev, 1337 const struct auxiliary_device_id *aux_dev_id) 1338 1339 { 1340 struct device *dev = &auxdev->dev; 1341 struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev); 1342 struct sdw_intel *sdw; 1343 struct sdw_cdns *cdns; 1344 struct sdw_bus *bus; 1345 int ret; 1346 1347 sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL); 1348 if (!sdw) 1349 return -ENOMEM; 1350 1351 cdns = &sdw->cdns; 1352 bus = &cdns->bus; 1353 1354 sdw->instance = auxdev->id; 1355 sdw->link_res = &ldev->link_res; 1356 cdns->dev = dev; 1357 cdns->registers = sdw->link_res->registers; 1358 cdns->instance = sdw->instance; 1359 cdns->msg_count = 0; 1360 1361 bus->link_id = auxdev->id; 1362 1363 sdw_cdns_probe(cdns); 1364 1365 /* Set property read ops */ 1366 sdw_intel_ops.read_prop = intel_prop_read; 1367 bus->ops = &sdw_intel_ops; 1368 1369 /* set driver data, accessed by snd_soc_dai_get_drvdata() */ 1370 dev_set_drvdata(dev, cdns); 1371 1372 /* use generic bandwidth allocation algorithm */ 1373 sdw->cdns.bus.compute_params = sdw_compute_params; 1374 1375 ret = sdw_bus_master_add(bus, dev, dev->fwnode); 1376 if (ret) { 1377 dev_err(dev, "sdw_bus_master_add fail: %d\n", ret); 1378 return ret; 1379 } 1380 1381 if (bus->prop.hw_disabled) 1382 dev_info(dev, 1383 "SoundWire master %d is disabled, will be ignored\n", 1384 bus->link_id); 1385 /* 1386 * Ignore BIOS err_threshold, it's a really bad idea when dealing 1387 * with multiple hardware synchronized links 1388 */ 1389 bus->prop.err_threshold = 0; 1390 1391 return 0; 1392 } 1393 1394 int intel_link_startup(struct auxiliary_device *auxdev) 1395 { 1396 struct sdw_cdns_stream_config config; 1397 struct device *dev = &auxdev->dev; 1398 struct sdw_cdns *cdns = dev_get_drvdata(dev); 1399 struct sdw_intel *sdw = cdns_to_intel(cdns); 1400 struct sdw_bus *bus = &cdns->bus; 1401 int link_flags; 1402 bool multi_link; 1403 u32 clock_stop_quirks; 1404 int ret; 1405 1406 if (bus->prop.hw_disabled) { 1407 dev_info(dev, 1408 "SoundWire master %d is disabled, ignoring\n", 1409 sdw->instance); 1410 return 0; 1411 } 1412 1413 link_flags = md_flags >> (bus->link_id * 8); 1414 multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK); 1415 if (!multi_link) { 1416 dev_dbg(dev, "Multi-link is disabled\n"); 1417 bus->multi_link = false; 1418 } else { 1419 /* 1420 * hardware-based synchronization is required regardless 1421 * of the number of segments used by a stream: SSP-based 1422 * synchronization is gated by gsync when the multi-master 1423 * mode is set. 1424 */ 1425 bus->multi_link = true; 1426 bus->hw_sync_min_links = 1; 1427 } 1428 1429 /* Initialize shim, controller */ 1430 ret = intel_init(sdw); 1431 if (ret) 1432 goto err_init; 1433 1434 /* Read the PDI config and initialize cadence PDI */ 1435 intel_pdi_init(sdw, &config); 1436 ret = sdw_cdns_pdi_init(cdns, config); 1437 if (ret) 1438 goto err_init; 1439 1440 intel_pdi_ch_update(sdw); 1441 1442 ret = sdw_cdns_enable_interrupt(cdns, true); 1443 if (ret < 0) { 1444 dev_err(dev, "cannot enable interrupts\n"); 1445 goto err_init; 1446 } 1447 1448 /* 1449 * follow recommended programming flows to avoid timeouts when 1450 * gsync is enabled 1451 */ 1452 if (multi_link) 1453 intel_shim_sync_arm(sdw); 1454 1455 ret = sdw_cdns_init(cdns); 1456 if (ret < 0) { 1457 dev_err(dev, "unable to initialize Cadence IP\n"); 1458 goto err_interrupt; 1459 } 1460 1461 ret = sdw_cdns_exit_reset(cdns); 1462 if (ret < 0) { 1463 dev_err(dev, "unable to exit bus reset sequence\n"); 1464 goto err_interrupt; 1465 } 1466 1467 if (multi_link) { 1468 ret = intel_shim_sync_go(sdw); 1469 if (ret < 0) { 1470 dev_err(dev, "sync go failed: %d\n", ret); 1471 goto err_interrupt; 1472 } 1473 } 1474 sdw_cdns_check_self_clearing_bits(cdns, __func__, 1475 true, INTEL_MASTER_RESET_ITERATIONS); 1476 1477 /* Register DAIs */ 1478 ret = intel_register_dai(sdw); 1479 if (ret) { 1480 dev_err(dev, "DAI registration failed: %d\n", ret); 1481 snd_soc_unregister_component(dev); 1482 goto err_interrupt; 1483 } 1484 1485 intel_debugfs_init(sdw); 1486 1487 /* Enable runtime PM */ 1488 if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) { 1489 pm_runtime_set_autosuspend_delay(dev, 1490 INTEL_MASTER_SUSPEND_DELAY_MS); 1491 pm_runtime_use_autosuspend(dev); 1492 pm_runtime_mark_last_busy(dev); 1493 1494 pm_runtime_set_active(dev); 1495 pm_runtime_enable(dev); 1496 } 1497 1498 clock_stop_quirks = sdw->link_res->clock_stop_quirks; 1499 if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) { 1500 /* 1501 * To keep the clock running we need to prevent 1502 * pm_runtime suspend from happening by increasing the 1503 * reference count. 1504 * This quirk is specified by the parent PCI device in 1505 * case of specific latency requirements. It will have 1506 * no effect if pm_runtime is disabled by the user via 1507 * a module parameter for testing purposes. 1508 */ 1509 pm_runtime_get_noresume(dev); 1510 } 1511 1512 /* 1513 * The runtime PM status of Slave devices is "Unsupported" 1514 * until they report as ATTACHED. If they don't, e.g. because 1515 * there are no Slave devices populated or if the power-on is 1516 * delayed or dependent on a power switch, the Master will 1517 * remain active and prevent its parent from suspending. 1518 * 1519 * Conditionally force the pm_runtime core to re-evaluate the 1520 * Master status in the absence of any Slave activity. A quirk 1521 * is provided to e.g. deal with Slaves that may be powered on 1522 * with a delay. A more complete solution would require the 1523 * definition of Master properties. 1524 */ 1525 if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) 1526 pm_runtime_idle(dev); 1527 1528 sdw->startup_done = true; 1529 return 0; 1530 1531 err_interrupt: 1532 sdw_cdns_enable_interrupt(cdns, false); 1533 err_init: 1534 return ret; 1535 } 1536 1537 static void intel_link_remove(struct auxiliary_device *auxdev) 1538 { 1539 struct device *dev = &auxdev->dev; 1540 struct sdw_cdns *cdns = dev_get_drvdata(dev); 1541 struct sdw_intel *sdw = cdns_to_intel(cdns); 1542 struct sdw_bus *bus = &cdns->bus; 1543 1544 /* 1545 * Since pm_runtime is already disabled, we don't decrease 1546 * the refcount when the clock_stop_quirk is 1547 * SDW_INTEL_CLK_STOP_NOT_ALLOWED 1548 */ 1549 if (!bus->prop.hw_disabled) { 1550 intel_debugfs_exit(sdw); 1551 sdw_cdns_enable_interrupt(cdns, false); 1552 snd_soc_unregister_component(dev); 1553 } 1554 sdw_bus_master_delete(bus); 1555 } 1556 1557 int intel_link_process_wakeen_event(struct auxiliary_device *auxdev) 1558 { 1559 struct device *dev = &auxdev->dev; 1560 struct sdw_intel *sdw; 1561 struct sdw_bus *bus; 1562 void __iomem *shim; 1563 u16 wake_sts; 1564 1565 sdw = dev_get_drvdata(dev); 1566 bus = &sdw->cdns.bus; 1567 1568 if (bus->prop.hw_disabled || !sdw->startup_done) { 1569 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", 1570 bus->link_id); 1571 return 0; 1572 } 1573 1574 shim = sdw->link_res->shim; 1575 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); 1576 1577 if (!(wake_sts & BIT(sdw->instance))) 1578 return 0; 1579 1580 /* disable WAKEEN interrupt ASAP to prevent interrupt flood */ 1581 intel_shim_wake(sdw, false); 1582 1583 /* 1584 * resume the Master, which will generate a bus reset and result in 1585 * Slaves re-attaching and be re-enumerated. The SoundWire physical 1586 * device which generated the wake will trigger an interrupt, which 1587 * will in turn cause the corresponding Linux Slave device to be 1588 * resumed and the Slave codec driver to check the status. 1589 */ 1590 pm_request_resume(dev); 1591 1592 return 0; 1593 } 1594 1595 /* 1596 * PM calls 1597 */ 1598 1599 static int intel_resume_child_device(struct device *dev, void *data) 1600 { 1601 int ret; 1602 struct sdw_slave *slave = dev_to_sdw_dev(dev); 1603 1604 if (!slave->probed) { 1605 dev_dbg(dev, "%s: skipping device, no probed driver\n", __func__); 1606 return 0; 1607 } 1608 if (!slave->dev_num_sticky) { 1609 dev_dbg(dev, "%s: skipping device, never detected on bus\n", __func__); 1610 return 0; 1611 } 1612 1613 ret = pm_request_resume(dev); 1614 if (ret < 0) 1615 dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret); 1616 1617 return ret; 1618 } 1619 1620 static int __maybe_unused intel_pm_prepare(struct device *dev) 1621 { 1622 struct sdw_cdns *cdns = dev_get_drvdata(dev); 1623 struct sdw_intel *sdw = cdns_to_intel(cdns); 1624 struct sdw_bus *bus = &cdns->bus; 1625 u32 clock_stop_quirks; 1626 int ret = 0; 1627 1628 if (bus->prop.hw_disabled || !sdw->startup_done) { 1629 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", 1630 bus->link_id); 1631 return 0; 1632 } 1633 1634 clock_stop_quirks = sdw->link_res->clock_stop_quirks; 1635 1636 if (pm_runtime_suspended(dev) && 1637 pm_runtime_suspended(dev->parent) && 1638 ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) || 1639 !clock_stop_quirks)) { 1640 /* 1641 * if we've enabled clock stop, and the parent is suspended, the SHIM registers 1642 * are not accessible and the shim wake cannot be disabled. 1643 * The only solution is to resume the entire bus to full power 1644 */ 1645 1646 /* 1647 * If any operation in this block fails, we keep going since we don't want 1648 * to prevent system suspend from happening and errors should be recoverable 1649 * on resume. 1650 */ 1651 1652 /* 1653 * first resume the device for this link. This will also by construction 1654 * resume the PCI parent device. 1655 */ 1656 ret = pm_request_resume(dev); 1657 if (ret < 0) { 1658 dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret); 1659 return 0; 1660 } 1661 1662 /* 1663 * Continue resuming the entire bus (parent + child devices) to exit 1664 * the clock stop mode. If there are no devices connected on this link 1665 * this is a no-op. 1666 * The resume to full power could have been implemented with a .prepare 1667 * step in SoundWire codec drivers. This would however require a lot 1668 * of code to handle an Intel-specific corner case. It is simpler in 1669 * practice to add a loop at the link level. 1670 */ 1671 ret = device_for_each_child(bus->dev, NULL, intel_resume_child_device); 1672 1673 if (ret < 0) 1674 dev_err(dev, "%s: intel_resume_child_device failed: %d\n", __func__, ret); 1675 } 1676 1677 return 0; 1678 } 1679 1680 static int __maybe_unused intel_suspend(struct device *dev) 1681 { 1682 struct sdw_cdns *cdns = dev_get_drvdata(dev); 1683 struct sdw_intel *sdw = cdns_to_intel(cdns); 1684 struct sdw_bus *bus = &cdns->bus; 1685 u32 clock_stop_quirks; 1686 int ret; 1687 1688 if (bus->prop.hw_disabled || !sdw->startup_done) { 1689 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", 1690 bus->link_id); 1691 return 0; 1692 } 1693 1694 if (pm_runtime_suspended(dev)) { 1695 dev_dbg(dev, "%s: pm_runtime status: suspended\n", __func__); 1696 1697 clock_stop_quirks = sdw->link_res->clock_stop_quirks; 1698 1699 if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) || 1700 !clock_stop_quirks) { 1701 1702 if (pm_runtime_suspended(dev->parent)) { 1703 /* 1704 * paranoia check: this should not happen with the .prepare 1705 * resume to full power 1706 */ 1707 dev_err(dev, "%s: invalid config: parent is suspended\n", __func__); 1708 } else { 1709 intel_shim_wake(sdw, false); 1710 } 1711 } 1712 1713 return 0; 1714 } 1715 1716 ret = sdw_cdns_enable_interrupt(cdns, false); 1717 if (ret < 0) { 1718 dev_err(dev, "cannot disable interrupts on suspend\n"); 1719 return ret; 1720 } 1721 1722 ret = intel_link_power_down(sdw); 1723 if (ret) { 1724 dev_err(dev, "Link power down failed: %d\n", ret); 1725 return ret; 1726 } 1727 1728 intel_shim_wake(sdw, false); 1729 1730 return 0; 1731 } 1732 1733 static int __maybe_unused intel_suspend_runtime(struct device *dev) 1734 { 1735 struct sdw_cdns *cdns = dev_get_drvdata(dev); 1736 struct sdw_intel *sdw = cdns_to_intel(cdns); 1737 struct sdw_bus *bus = &cdns->bus; 1738 u32 clock_stop_quirks; 1739 int ret; 1740 1741 if (bus->prop.hw_disabled || !sdw->startup_done) { 1742 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", 1743 bus->link_id); 1744 return 0; 1745 } 1746 1747 clock_stop_quirks = sdw->link_res->clock_stop_quirks; 1748 1749 if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) { 1750 1751 ret = sdw_cdns_enable_interrupt(cdns, false); 1752 if (ret < 0) { 1753 dev_err(dev, "cannot disable interrupts on suspend\n"); 1754 return ret; 1755 } 1756 1757 ret = intel_link_power_down(sdw); 1758 if (ret) { 1759 dev_err(dev, "Link power down failed: %d\n", ret); 1760 return ret; 1761 } 1762 1763 intel_shim_wake(sdw, false); 1764 1765 } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET || 1766 !clock_stop_quirks) { 1767 bool wake_enable = true; 1768 1769 ret = sdw_cdns_clock_stop(cdns, true); 1770 if (ret < 0) { 1771 dev_err(dev, "cannot enable clock stop on suspend\n"); 1772 wake_enable = false; 1773 } 1774 1775 ret = sdw_cdns_enable_interrupt(cdns, false); 1776 if (ret < 0) { 1777 dev_err(dev, "cannot disable interrupts on suspend\n"); 1778 return ret; 1779 } 1780 1781 ret = intel_link_power_down(sdw); 1782 if (ret) { 1783 dev_err(dev, "Link power down failed: %d\n", ret); 1784 return ret; 1785 } 1786 1787 intel_shim_wake(sdw, wake_enable); 1788 } else { 1789 dev_err(dev, "%s clock_stop_quirks %x unsupported\n", 1790 __func__, clock_stop_quirks); 1791 ret = -EINVAL; 1792 } 1793 1794 return ret; 1795 } 1796 1797 static int __maybe_unused intel_resume(struct device *dev) 1798 { 1799 struct sdw_cdns *cdns = dev_get_drvdata(dev); 1800 struct sdw_intel *sdw = cdns_to_intel(cdns); 1801 struct sdw_bus *bus = &cdns->bus; 1802 int link_flags; 1803 bool multi_link; 1804 int ret; 1805 1806 if (bus->prop.hw_disabled || !sdw->startup_done) { 1807 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", 1808 bus->link_id); 1809 return 0; 1810 } 1811 1812 link_flags = md_flags >> (bus->link_id * 8); 1813 multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK); 1814 1815 if (pm_runtime_suspended(dev)) { 1816 dev_dbg(dev, "%s: pm_runtime status was suspended, forcing active\n", __func__); 1817 1818 /* follow required sequence from runtime_pm.rst */ 1819 pm_runtime_disable(dev); 1820 pm_runtime_set_active(dev); 1821 pm_runtime_mark_last_busy(dev); 1822 pm_runtime_enable(dev); 1823 1824 link_flags = md_flags >> (bus->link_id * 8); 1825 1826 if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) 1827 pm_runtime_idle(dev); 1828 } 1829 1830 ret = intel_init(sdw); 1831 if (ret) { 1832 dev_err(dev, "%s failed: %d\n", __func__, ret); 1833 return ret; 1834 } 1835 1836 /* 1837 * make sure all Slaves are tagged as UNATTACHED and provide 1838 * reason for reinitialization 1839 */ 1840 sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET); 1841 1842 ret = sdw_cdns_enable_interrupt(cdns, true); 1843 if (ret < 0) { 1844 dev_err(dev, "cannot enable interrupts during resume\n"); 1845 return ret; 1846 } 1847 1848 /* 1849 * follow recommended programming flows to avoid timeouts when 1850 * gsync is enabled 1851 */ 1852 if (multi_link) 1853 intel_shim_sync_arm(sdw); 1854 1855 ret = sdw_cdns_init(&sdw->cdns); 1856 if (ret < 0) { 1857 dev_err(dev, "unable to initialize Cadence IP during resume\n"); 1858 return ret; 1859 } 1860 1861 ret = sdw_cdns_exit_reset(cdns); 1862 if (ret < 0) { 1863 dev_err(dev, "unable to exit bus reset sequence during resume\n"); 1864 return ret; 1865 } 1866 1867 if (multi_link) { 1868 ret = intel_shim_sync_go(sdw); 1869 if (ret < 0) { 1870 dev_err(dev, "sync go failed during resume\n"); 1871 return ret; 1872 } 1873 } 1874 sdw_cdns_check_self_clearing_bits(cdns, __func__, 1875 true, INTEL_MASTER_RESET_ITERATIONS); 1876 1877 /* 1878 * after system resume, the pm_runtime suspend() may kick in 1879 * during the enumeration, before any children device force the 1880 * master device to remain active. Using pm_runtime_get() 1881 * routines is not really possible, since it'd prevent the 1882 * master from suspending. 1883 * A reasonable compromise is to update the pm_runtime 1884 * counters and delay the pm_runtime suspend by several 1885 * seconds, by when all enumeration should be complete. 1886 */ 1887 pm_runtime_mark_last_busy(dev); 1888 1889 return ret; 1890 } 1891 1892 static int __maybe_unused intel_resume_runtime(struct device *dev) 1893 { 1894 struct sdw_cdns *cdns = dev_get_drvdata(dev); 1895 struct sdw_intel *sdw = cdns_to_intel(cdns); 1896 struct sdw_bus *bus = &cdns->bus; 1897 u32 clock_stop_quirks; 1898 bool clock_stop0; 1899 int link_flags; 1900 bool multi_link; 1901 int status; 1902 int ret; 1903 1904 if (bus->prop.hw_disabled || !sdw->startup_done) { 1905 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", 1906 bus->link_id); 1907 return 0; 1908 } 1909 1910 link_flags = md_flags >> (bus->link_id * 8); 1911 multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK); 1912 1913 clock_stop_quirks = sdw->link_res->clock_stop_quirks; 1914 1915 if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) { 1916 ret = intel_init(sdw); 1917 if (ret) { 1918 dev_err(dev, "%s failed: %d\n", __func__, ret); 1919 return ret; 1920 } 1921 1922 /* 1923 * make sure all Slaves are tagged as UNATTACHED and provide 1924 * reason for reinitialization 1925 */ 1926 sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET); 1927 1928 ret = sdw_cdns_enable_interrupt(cdns, true); 1929 if (ret < 0) { 1930 dev_err(dev, "cannot enable interrupts during resume\n"); 1931 return ret; 1932 } 1933 1934 /* 1935 * follow recommended programming flows to avoid 1936 * timeouts when gsync is enabled 1937 */ 1938 if (multi_link) 1939 intel_shim_sync_arm(sdw); 1940 1941 ret = sdw_cdns_init(&sdw->cdns); 1942 if (ret < 0) { 1943 dev_err(dev, "unable to initialize Cadence IP during resume\n"); 1944 return ret; 1945 } 1946 1947 ret = sdw_cdns_exit_reset(cdns); 1948 if (ret < 0) { 1949 dev_err(dev, "unable to exit bus reset sequence during resume\n"); 1950 return ret; 1951 } 1952 1953 if (multi_link) { 1954 ret = intel_shim_sync_go(sdw); 1955 if (ret < 0) { 1956 dev_err(dev, "sync go failed during resume\n"); 1957 return ret; 1958 } 1959 } 1960 sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime TEARDOWN", 1961 true, INTEL_MASTER_RESET_ITERATIONS); 1962 1963 } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) { 1964 ret = intel_init(sdw); 1965 if (ret) { 1966 dev_err(dev, "%s failed: %d\n", __func__, ret); 1967 return ret; 1968 } 1969 1970 /* 1971 * An exception condition occurs for the CLK_STOP_BUS_RESET 1972 * case if one or more masters remain active. In this condition, 1973 * all the masters are powered on for they are in the same power 1974 * domain. Master can preserve its context for clock stop0, so 1975 * there is no need to clear slave status and reset bus. 1976 */ 1977 clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns); 1978 1979 if (!clock_stop0) { 1980 1981 /* 1982 * make sure all Slaves are tagged as UNATTACHED and 1983 * provide reason for reinitialization 1984 */ 1985 1986 status = SDW_UNATTACH_REQUEST_MASTER_RESET; 1987 sdw_clear_slave_status(bus, status); 1988 1989 ret = sdw_cdns_enable_interrupt(cdns, true); 1990 if (ret < 0) { 1991 dev_err(dev, "cannot enable interrupts during resume\n"); 1992 return ret; 1993 } 1994 1995 /* 1996 * follow recommended programming flows to avoid 1997 * timeouts when gsync is enabled 1998 */ 1999 if (multi_link) 2000 intel_shim_sync_arm(sdw); 2001 2002 /* 2003 * Re-initialize the IP since it was powered-off 2004 */ 2005 sdw_cdns_init(&sdw->cdns); 2006 2007 } else { 2008 ret = sdw_cdns_enable_interrupt(cdns, true); 2009 if (ret < 0) { 2010 dev_err(dev, "cannot enable interrupts during resume\n"); 2011 return ret; 2012 } 2013 } 2014 2015 ret = sdw_cdns_clock_restart(cdns, !clock_stop0); 2016 if (ret < 0) { 2017 dev_err(dev, "unable to restart clock during resume\n"); 2018 return ret; 2019 } 2020 2021 if (!clock_stop0) { 2022 ret = sdw_cdns_exit_reset(cdns); 2023 if (ret < 0) { 2024 dev_err(dev, "unable to exit bus reset sequence during resume\n"); 2025 return ret; 2026 } 2027 2028 if (multi_link) { 2029 ret = intel_shim_sync_go(sdw); 2030 if (ret < 0) { 2031 dev_err(sdw->cdns.dev, "sync go failed during resume\n"); 2032 return ret; 2033 } 2034 } 2035 } 2036 sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime BUS_RESET", 2037 true, INTEL_MASTER_RESET_ITERATIONS); 2038 2039 } else if (!clock_stop_quirks) { 2040 2041 clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns); 2042 if (!clock_stop0) 2043 dev_err(dev, "%s invalid configuration, clock was not stopped", __func__); 2044 2045 ret = intel_init(sdw); 2046 if (ret) { 2047 dev_err(dev, "%s failed: %d\n", __func__, ret); 2048 return ret; 2049 } 2050 2051 ret = sdw_cdns_enable_interrupt(cdns, true); 2052 if (ret < 0) { 2053 dev_err(dev, "cannot enable interrupts during resume\n"); 2054 return ret; 2055 } 2056 2057 ret = sdw_cdns_clock_restart(cdns, false); 2058 if (ret < 0) { 2059 dev_err(dev, "unable to resume master during resume\n"); 2060 return ret; 2061 } 2062 2063 sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks", 2064 true, INTEL_MASTER_RESET_ITERATIONS); 2065 } else { 2066 dev_err(dev, "%s clock_stop_quirks %x unsupported\n", 2067 __func__, clock_stop_quirks); 2068 ret = -EINVAL; 2069 } 2070 2071 return ret; 2072 } 2073 2074 static const struct dev_pm_ops intel_pm = { 2075 .prepare = intel_pm_prepare, 2076 SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume) 2077 SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL) 2078 }; 2079 2080 static const struct auxiliary_device_id intel_link_id_table[] = { 2081 { .name = "soundwire_intel.link" }, 2082 {}, 2083 }; 2084 MODULE_DEVICE_TABLE(auxiliary, intel_link_id_table); 2085 2086 static struct auxiliary_driver sdw_intel_drv = { 2087 .probe = intel_link_probe, 2088 .remove = intel_link_remove, 2089 .driver = { 2090 /* auxiliary_driver_register() sets .name to be the modname */ 2091 .pm = &intel_pm, 2092 }, 2093 .id_table = intel_link_id_table 2094 }; 2095 module_auxiliary_driver(sdw_intel_drv); 2096 2097 MODULE_LICENSE("Dual BSD/GPL"); 2098 MODULE_DESCRIPTION("Intel Soundwire Link Driver"); 2099