xref: /linux/drivers/soundwire/intel.c (revision 46e6acfe3501fa938af9c5bd730f0020235b08a2)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3 
4 /*
5  * Soundwire Intel Master Driver
6  */
7 
8 #include <linux/acpi.h>
9 #include <linux/cleanup.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/io.h>
13 #include <sound/pcm_params.h>
14 #include <linux/pm_runtime.h>
15 #include <sound/soc.h>
16 #include <linux/soundwire/sdw_registers.h>
17 #include <linux/soundwire/sdw.h>
18 #include <linux/soundwire/sdw_intel.h>
19 #include "cadence_master.h"
20 #include "bus.h"
21 #include "intel.h"
22 
23 static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
24 {
25 	int timeout = 10;
26 	u32 reg_read;
27 
28 	do {
29 		reg_read = readl(base + offset);
30 		if ((reg_read & mask) == target)
31 			return 0;
32 
33 		timeout--;
34 		usleep_range(50, 100);
35 	} while (timeout != 0);
36 
37 	return -EAGAIN;
38 }
39 
40 static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
41 {
42 	writel(value, base + offset);
43 	return intel_wait_bit(base, offset, mask, 0);
44 }
45 
46 static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
47 {
48 	writel(value, base + offset);
49 	return intel_wait_bit(base, offset, mask, mask);
50 }
51 
52 /*
53  * debugfs
54  */
55 #ifdef CONFIG_DEBUG_FS
56 
57 #define RD_BUF (2 * PAGE_SIZE)
58 
59 static ssize_t intel_sprintf(void __iomem *mem, bool l,
60 			     char *buf, size_t pos, unsigned int reg)
61 {
62 	int value;
63 
64 	if (l)
65 		value = intel_readl(mem, reg);
66 	else
67 		value = intel_readw(mem, reg);
68 
69 	return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
70 }
71 
72 static int intel_reg_show(struct seq_file *s_file, void *data)
73 {
74 	struct sdw_intel *sdw = s_file->private;
75 	void __iomem *s = sdw->link_res->shim;
76 	void __iomem *a = sdw->link_res->alh;
77 	ssize_t ret;
78 	int i, j;
79 	unsigned int links, reg;
80 
81 	char *buf __free(kfree) = kzalloc(RD_BUF, GFP_KERNEL);
82 	if (!buf)
83 		return -ENOMEM;
84 
85 	links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
86 
87 	ret = scnprintf(buf, RD_BUF, "Register  Value\n");
88 	ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
89 
90 	for (i = 0; i < links; i++) {
91 		reg = SDW_SHIM_LCAP + i * 4;
92 		ret += intel_sprintf(s, true, buf, ret, reg);
93 	}
94 
95 	for (i = 0; i < links; i++) {
96 		ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
97 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
98 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
99 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
100 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
101 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
102 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
103 
104 		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
105 
106 		/*
107 		 * the value 10 is the number of PDIs. We will need a
108 		 * cleanup to remove hard-coded Intel configurations
109 		 * from cadence_master.c
110 		 */
111 		for (j = 0; j < 10; j++) {
112 			ret += intel_sprintf(s, false, buf, ret,
113 					SDW_SHIM_PCMSYCHM(i, j));
114 			ret += intel_sprintf(s, false, buf, ret,
115 					SDW_SHIM_PCMSYCHC(i, j));
116 		}
117 		ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
118 
119 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
120 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
121 	}
122 
123 	ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
124 	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
125 	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
126 
127 	ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
128 	for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
129 		ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
130 
131 	seq_printf(s_file, "%s", buf);
132 
133 	return 0;
134 }
135 DEFINE_SHOW_ATTRIBUTE(intel_reg);
136 
137 static int intel_set_m_datamode(void *data, u64 value)
138 {
139 	struct sdw_intel *sdw = data;
140 	struct sdw_bus *bus = &sdw->cdns.bus;
141 
142 	if (value > SDW_PORT_DATA_MODE_STATIC_1)
143 		return -EINVAL;
144 
145 	/* Userspace changed the hardware state behind the kernel's back */
146 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
147 
148 	bus->params.m_data_mode = value;
149 
150 	return 0;
151 }
152 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
153 			 intel_set_m_datamode, "%llu\n");
154 
155 static int intel_set_s_datamode(void *data, u64 value)
156 {
157 	struct sdw_intel *sdw = data;
158 	struct sdw_bus *bus = &sdw->cdns.bus;
159 
160 	if (value > SDW_PORT_DATA_MODE_STATIC_1)
161 		return -EINVAL;
162 
163 	/* Userspace changed the hardware state behind the kernel's back */
164 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
165 
166 	bus->params.s_data_mode = value;
167 
168 	return 0;
169 }
170 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
171 			 intel_set_s_datamode, "%llu\n");
172 
173 static void intel_debugfs_init(struct sdw_intel *sdw)
174 {
175 	struct dentry *root = sdw->cdns.bus.debugfs;
176 
177 	if (!root)
178 		return;
179 
180 	sdw->debugfs = debugfs_create_dir("intel-sdw", root);
181 
182 	debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
183 			    &intel_reg_fops);
184 
185 	debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
186 			    &intel_set_m_datamode_fops);
187 
188 	debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
189 			    &intel_set_s_datamode_fops);
190 
191 	sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
192 }
193 
194 static void intel_debugfs_exit(struct sdw_intel *sdw)
195 {
196 	debugfs_remove_recursive(sdw->debugfs);
197 }
198 #else
199 static void intel_debugfs_init(struct sdw_intel *sdw) {}
200 static void intel_debugfs_exit(struct sdw_intel *sdw) {}
201 #endif /* CONFIG_DEBUG_FS */
202 
203 /*
204  * shim ops
205  */
206 /* this needs to be called with shim_lock */
207 static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
208 {
209 	void __iomem *shim = sdw->link_res->shim;
210 	unsigned int link_id = sdw->instance;
211 	u16 ioctl;
212 
213 	/* Switch to MIP from Glue logic */
214 	ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
215 
216 	ioctl &= ~(SDW_SHIM_IOCTL_DOE);
217 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
218 	usleep_range(10, 15);
219 
220 	ioctl &= ~(SDW_SHIM_IOCTL_DO);
221 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
222 	usleep_range(10, 15);
223 
224 	ioctl |= (SDW_SHIM_IOCTL_MIF);
225 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
226 	usleep_range(10, 15);
227 
228 	ioctl &= ~(SDW_SHIM_IOCTL_BKE);
229 	ioctl &= ~(SDW_SHIM_IOCTL_COE);
230 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
231 	usleep_range(10, 15);
232 
233 	/* at this point Master IP has full control of the I/Os */
234 }
235 
236 /* this needs to be called with shim_lock */
237 static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
238 {
239 	unsigned int link_id = sdw->instance;
240 	void __iomem *shim = sdw->link_res->shim;
241 	u16 ioctl;
242 
243 	/* Glue logic */
244 	ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
245 	ioctl |= SDW_SHIM_IOCTL_BKE;
246 	ioctl |= SDW_SHIM_IOCTL_COE;
247 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
248 	usleep_range(10, 15);
249 
250 	ioctl &= ~(SDW_SHIM_IOCTL_MIF);
251 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
252 	usleep_range(10, 15);
253 
254 	/* at this point Integration Glue has full control of the I/Os */
255 }
256 
257 /* this needs to be called with shim_lock */
258 static void intel_shim_init(struct sdw_intel *sdw)
259 {
260 	void __iomem *shim = sdw->link_res->shim;
261 	unsigned int link_id = sdw->instance;
262 	u16 ioctl = 0, act;
263 
264 	/* Initialize Shim */
265 	ioctl |= SDW_SHIM_IOCTL_BKE;
266 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
267 	usleep_range(10, 15);
268 
269 	ioctl |= SDW_SHIM_IOCTL_WPDD;
270 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
271 	usleep_range(10, 15);
272 
273 	ioctl |= SDW_SHIM_IOCTL_DO;
274 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
275 	usleep_range(10, 15);
276 
277 	ioctl |= SDW_SHIM_IOCTL_DOE;
278 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
279 	usleep_range(10, 15);
280 
281 	intel_shim_glue_to_master_ip(sdw);
282 
283 	act = intel_readw(shim, SDW_SHIM_CTMCTL(link_id));
284 	u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
285 	act |= SDW_SHIM_CTMCTL_DACTQE;
286 	act |= SDW_SHIM_CTMCTL_DODS;
287 	intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
288 	usleep_range(10, 15);
289 }
290 
291 static int intel_shim_check_wake(struct sdw_intel *sdw)
292 {
293 	void __iomem *shim;
294 	u16 wake_sts;
295 
296 	shim = sdw->link_res->shim;
297 	wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
298 
299 	return wake_sts & BIT(sdw->instance);
300 }
301 
302 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
303 {
304 	void __iomem *shim = sdw->link_res->shim;
305 	unsigned int link_id = sdw->instance;
306 	u16 wake_en, wake_sts;
307 
308 	mutex_lock(sdw->link_res->shim_lock);
309 	wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
310 
311 	if (wake_enable) {
312 		/* Enable the wakeup */
313 		wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
314 		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
315 	} else {
316 		/* Disable the wake up interrupt */
317 		wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
318 		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
319 
320 		/* Clear wake status */
321 		wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
322 		wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
323 		intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
324 	}
325 	mutex_unlock(sdw->link_res->shim_lock);
326 }
327 
328 static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
329 {
330 	void __iomem *shim = sdw->link_res->shim;
331 	int sync_reg;
332 
333 	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
334 	return !!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK);
335 }
336 
337 static int intel_link_power_up(struct sdw_intel *sdw)
338 {
339 	unsigned int link_id = sdw->instance;
340 	void __iomem *shim = sdw->link_res->shim;
341 	u32 *shim_mask = sdw->link_res->shim_mask;
342 	struct sdw_bus *bus = &sdw->cdns.bus;
343 	struct sdw_master_prop *prop = &bus->prop;
344 	u32 spa_mask, cpa_mask;
345 	u32 link_control;
346 	int ret = 0;
347 	u32 clock_source;
348 	u32 syncprd;
349 	u32 sync_reg;
350 	bool lcap_mlcs;
351 
352 	mutex_lock(sdw->link_res->shim_lock);
353 
354 	/*
355 	 * The hardware relies on an internal counter, typically 4kHz,
356 	 * to generate the SoundWire SSP - which defines a 'safe'
357 	 * synchronization point between commands and audio transport
358 	 * and allows for multi link synchronization. The SYNCPRD value
359 	 * is only dependent on the oscillator clock provided to
360 	 * the IP, so adjust based on _DSD properties reported in DSDT
361 	 * tables. The values reported are based on either 24MHz
362 	 * (CNL/CML) or 38.4 MHz (ICL/TGL+). On MeteorLake additional
363 	 * frequencies are available with the MLCS clock source selection.
364 	 */
365 	lcap_mlcs = intel_readl(shim, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_MLCS_MASK;
366 
367 	if (prop->mclk_freq % 6000000) {
368 		if (prop->mclk_freq % 2400000) {
369 			if (lcap_mlcs) {
370 				syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24_576;
371 				clock_source = SDW_SHIM_MLCS_CARDINAL_CLK;
372 			} else {
373 				dev_err(sdw->cdns.dev, "%s: invalid clock configuration, mclk %d lcap_mlcs %d\n",
374 					__func__, prop->mclk_freq, lcap_mlcs);
375 				ret = -EINVAL;
376 				goto out;
377 			}
378 		} else {
379 			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
380 			clock_source = SDW_SHIM_MLCS_XTAL_CLK;
381 		}
382 	} else {
383 		if (lcap_mlcs) {
384 			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_96;
385 			clock_source = SDW_SHIM_MLCS_AUDIO_PLL_CLK;
386 		} else {
387 			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
388 			clock_source = SDW_SHIM_MLCS_XTAL_CLK;
389 		}
390 	}
391 
392 	if (!*shim_mask) {
393 		dev_dbg(sdw->cdns.dev, "powering up all links\n");
394 
395 		/* we first need to program the SyncPRD/CPU registers */
396 		dev_dbg(sdw->cdns.dev,
397 			"first link up, programming SYNCPRD\n");
398 
399 		/* set SyncPRD period */
400 		sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
401 		u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
402 
403 		/* Set SyncCPU bit */
404 		sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
405 		intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
406 
407 		/* Link power up sequence */
408 		link_control = intel_readl(shim, SDW_SHIM_LCTL);
409 
410 		/* only power-up enabled links */
411 		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
412 		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
413 
414 		link_control |=  spa_mask;
415 
416 		ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
417 		if (ret < 0) {
418 			dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
419 			goto out;
420 		}
421 
422 		/* SyncCPU will change once link is active */
423 		ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
424 				     SDW_SHIM_SYNC_SYNCCPU, 0);
425 		if (ret < 0) {
426 			dev_err(sdw->cdns.dev,
427 				"Failed to set SHIM_SYNC: %d\n", ret);
428 			goto out;
429 		}
430 
431 		/* update link clock if needed */
432 		if (lcap_mlcs) {
433 			link_control = intel_readl(shim, SDW_SHIM_LCTL);
434 			u32p_replace_bits(&link_control, clock_source, SDW_SHIM_LCTL_MLCS_MASK);
435 			intel_writel(shim, SDW_SHIM_LCTL, link_control);
436 		}
437 	}
438 
439 	*shim_mask |= BIT(link_id);
440 
441 	sdw->cdns.link_up = true;
442 
443 	intel_shim_init(sdw);
444 
445 out:
446 	mutex_unlock(sdw->link_res->shim_lock);
447 
448 	return ret;
449 }
450 
451 static int intel_link_power_down(struct sdw_intel *sdw)
452 {
453 	u32 link_control, spa_mask, cpa_mask;
454 	unsigned int link_id = sdw->instance;
455 	void __iomem *shim = sdw->link_res->shim;
456 	u32 *shim_mask = sdw->link_res->shim_mask;
457 	int ret = 0;
458 
459 	mutex_lock(sdw->link_res->shim_lock);
460 
461 	if (!(*shim_mask & BIT(link_id)))
462 		dev_err(sdw->cdns.dev,
463 			"%s: Unbalanced power-up/down calls\n", __func__);
464 
465 	sdw->cdns.link_up = false;
466 
467 	intel_shim_master_ip_to_glue(sdw);
468 
469 	*shim_mask &= ~BIT(link_id);
470 
471 	if (!*shim_mask) {
472 
473 		dev_dbg(sdw->cdns.dev, "powering down all links\n");
474 
475 		/* Link power down sequence */
476 		link_control = intel_readl(shim, SDW_SHIM_LCTL);
477 
478 		/* only power-down enabled links */
479 		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
480 		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
481 
482 		link_control &=  spa_mask;
483 
484 		ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
485 		if (ret < 0) {
486 			dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
487 
488 			/*
489 			 * we leave the sdw->cdns.link_up flag as false since we've disabled
490 			 * the link at this point and cannot handle interrupts any longer.
491 			 */
492 		}
493 	}
494 
495 	mutex_unlock(sdw->link_res->shim_lock);
496 
497 	return ret;
498 }
499 
500 static void intel_shim_sync_arm(struct sdw_intel *sdw)
501 {
502 	void __iomem *shim = sdw->link_res->shim;
503 	u32 sync_reg;
504 
505 	mutex_lock(sdw->link_res->shim_lock);
506 
507 	/* update SYNC register */
508 	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
509 	sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
510 	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
511 
512 	mutex_unlock(sdw->link_res->shim_lock);
513 }
514 
515 static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
516 {
517 	void __iomem *shim = sdw->link_res->shim;
518 	u32 sync_reg;
519 
520 	/* Read SYNC register */
521 	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
522 
523 	/*
524 	 * Set SyncGO bit to synchronously trigger a bank switch for
525 	 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
526 	 * the Masters.
527 	 */
528 	sync_reg |= SDW_SHIM_SYNC_SYNCGO;
529 
530 	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
531 
532 	return 0;
533 }
534 
535 static int intel_shim_sync_go(struct sdw_intel *sdw)
536 {
537 	int ret;
538 
539 	mutex_lock(sdw->link_res->shim_lock);
540 
541 	ret = intel_shim_sync_go_unlocked(sdw);
542 
543 	mutex_unlock(sdw->link_res->shim_lock);
544 
545 	return ret;
546 }
547 
548 /*
549  * PDI routines
550  */
551 static void intel_pdi_init(struct sdw_intel *sdw,
552 			   struct sdw_cdns_stream_config *config)
553 {
554 	void __iomem *shim = sdw->link_res->shim;
555 	unsigned int link_id = sdw->instance;
556 	int pcm_cap;
557 
558 	/* PCM Stream Capability */
559 	pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
560 
561 	config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
562 	config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
563 	config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
564 
565 	dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
566 		config->pcm_bd, config->pcm_in, config->pcm_out);
567 }
568 
569 static int
570 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
571 {
572 	void __iomem *shim = sdw->link_res->shim;
573 	unsigned int link_id = sdw->instance;
574 	int count;
575 
576 	count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
577 
578 	/*
579 	 * WORKAROUND: on all existing Intel controllers, pdi
580 	 * number 2 reports channel count as 1 even though it
581 	 * supports 8 channels. Performing hardcoding for pdi
582 	 * number 2.
583 	 */
584 	if (pdi_num == 2)
585 		count = 7;
586 
587 	/* zero based values for channel count in register */
588 	count++;
589 
590 	return count;
591 }
592 
593 static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
594 				   struct sdw_cdns_pdi *pdi,
595 				   unsigned int num_pdi,
596 				   unsigned int *num_ch)
597 {
598 	int i, ch_count = 0;
599 
600 	for (i = 0; i < num_pdi; i++) {
601 		pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
602 		ch_count += pdi->ch_count;
603 		pdi++;
604 	}
605 
606 	*num_ch = ch_count;
607 	return 0;
608 }
609 
610 static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
611 				      struct sdw_cdns_streams *stream)
612 {
613 	intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
614 				&stream->num_ch_bd);
615 
616 	intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
617 				&stream->num_ch_in);
618 
619 	intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
620 				&stream->num_ch_out);
621 
622 	return 0;
623 }
624 
625 static void
626 intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
627 {
628 	void __iomem *shim = sdw->link_res->shim;
629 	unsigned int link_id = sdw->instance;
630 	int pdi_conf = 0;
631 
632 	/* the Bulk and PCM streams are not contiguous */
633 	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
634 	if (pdi->num >= 2)
635 		pdi->intel_alh_id += 2;
636 
637 	/*
638 	 * Program stream parameters to stream SHIM register
639 	 * This is applicable for PCM stream only.
640 	 */
641 	if (pdi->type != SDW_STREAM_PCM)
642 		return;
643 
644 	if (pdi->dir == SDW_DATA_DIR_RX)
645 		pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
646 	else
647 		pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
648 
649 	u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
650 	u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
651 	u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
652 
653 	intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
654 }
655 
656 static void
657 intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
658 {
659 	void __iomem *alh = sdw->link_res->alh;
660 	unsigned int link_id = sdw->instance;
661 	unsigned int conf;
662 
663 	/* the Bulk and PCM streams are not contiguous */
664 	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
665 	if (pdi->num >= 2)
666 		pdi->intel_alh_id += 2;
667 
668 	/* Program Stream config ALH register */
669 	conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
670 
671 	u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
672 	u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
673 
674 	intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
675 }
676 
677 static int intel_params_stream(struct sdw_intel *sdw,
678 			       struct snd_pcm_substream *substream,
679 			       struct snd_soc_dai *dai,
680 			       struct snd_pcm_hw_params *hw_params,
681 			       int link_id, int alh_stream_id)
682 {
683 	struct sdw_intel_link_res *res = sdw->link_res;
684 	struct sdw_intel_stream_params_data params_data;
685 
686 	params_data.substream = substream;
687 	params_data.dai = dai;
688 	params_data.hw_params = hw_params;
689 	params_data.link_id = link_id;
690 	params_data.alh_stream_id = alh_stream_id;
691 
692 	if (res->ops && res->ops->params_stream && res->dev)
693 		return res->ops->params_stream(res->dev,
694 					       &params_data);
695 	return -EIO;
696 }
697 
698 /*
699  * DAI routines
700  */
701 
702 static int intel_free_stream(struct sdw_intel *sdw,
703 			     struct snd_pcm_substream *substream,
704 			     struct snd_soc_dai *dai,
705 			     int link_id)
706 {
707 	struct sdw_intel_link_res *res = sdw->link_res;
708 	struct sdw_intel_stream_free_data free_data;
709 
710 	free_data.substream = substream;
711 	free_data.dai = dai;
712 	free_data.link_id = link_id;
713 
714 	if (res->ops && res->ops->free_stream && res->dev)
715 		return res->ops->free_stream(res->dev, &free_data);
716 
717 	return 0;
718 }
719 
720 static int intel_hw_params(struct snd_pcm_substream *substream,
721 			   struct snd_pcm_hw_params *params,
722 			   struct snd_soc_dai *dai)
723 {
724 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
725 	struct sdw_intel *sdw = cdns_to_intel(cdns);
726 	struct sdw_cdns_dai_runtime *dai_runtime;
727 	struct sdw_cdns_pdi *pdi;
728 	struct sdw_stream_config sconfig;
729 	int ch, dir;
730 	int ret;
731 
732 	dai_runtime = cdns->dai_runtime_array[dai->id];
733 	if (!dai_runtime)
734 		return -EIO;
735 
736 	ch = params_channels(params);
737 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
738 		dir = SDW_DATA_DIR_RX;
739 	else
740 		dir = SDW_DATA_DIR_TX;
741 
742 	pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
743 
744 	if (!pdi)
745 		return -EINVAL;
746 
747 	/* do run-time configurations for SHIM, ALH and PDI/PORT */
748 	intel_pdi_shim_configure(sdw, pdi);
749 	intel_pdi_alh_configure(sdw, pdi);
750 	sdw_cdns_config_stream(cdns, ch, dir, pdi);
751 
752 	/* store pdi and hw_params, may be needed in prepare step */
753 	dai_runtime->paused = false;
754 	dai_runtime->suspended = false;
755 	dai_runtime->pdi = pdi;
756 
757 	/* Inform DSP about PDI stream number */
758 	ret = intel_params_stream(sdw, substream, dai, params,
759 				  sdw->instance,
760 				  pdi->intel_alh_id);
761 	if (ret)
762 		return ret;
763 
764 	sconfig.direction = dir;
765 	sconfig.ch_count = ch;
766 	sconfig.frame_rate = params_rate(params);
767 	sconfig.type = dai_runtime->stream_type;
768 
769 	sconfig.bps = snd_pcm_format_width(params_format(params));
770 
771 	/* Port configuration */
772 	struct sdw_port_config *pconfig __free(kfree) = kzalloc(sizeof(*pconfig),
773 								GFP_KERNEL);
774 	if (!pconfig)
775 		return -ENOMEM;
776 
777 	pconfig->num = pdi->num;
778 	pconfig->ch_mask = (1 << ch) - 1;
779 
780 	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
781 				    pconfig, 1, dai_runtime->stream);
782 	if (ret)
783 		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
784 
785 	return ret;
786 }
787 
788 static int intel_prepare(struct snd_pcm_substream *substream,
789 			 struct snd_soc_dai *dai)
790 {
791 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
792 	struct sdw_intel *sdw = cdns_to_intel(cdns);
793 	struct sdw_cdns_dai_runtime *dai_runtime;
794 	int ch, dir;
795 	int ret = 0;
796 
797 	dai_runtime = cdns->dai_runtime_array[dai->id];
798 	if (!dai_runtime) {
799 		dev_err(dai->dev, "failed to get dai runtime in %s\n",
800 			__func__);
801 		return -EIO;
802 	}
803 
804 	if (dai_runtime->suspended) {
805 		struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
806 		struct snd_pcm_hw_params *hw_params;
807 
808 		hw_params = &rtd->dpcm[substream->stream].hw_params;
809 
810 		dai_runtime->suspended = false;
811 
812 		/*
813 		 * .prepare() is called after system resume, where we
814 		 * need to reinitialize the SHIM/ALH/Cadence IP.
815 		 * .prepare() is also called to deal with underflows,
816 		 * but in those cases we cannot touch ALH/SHIM
817 		 * registers
818 		 */
819 
820 		/* configure stream */
821 		ch = params_channels(hw_params);
822 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
823 			dir = SDW_DATA_DIR_RX;
824 		else
825 			dir = SDW_DATA_DIR_TX;
826 
827 		intel_pdi_shim_configure(sdw, dai_runtime->pdi);
828 		intel_pdi_alh_configure(sdw, dai_runtime->pdi);
829 		sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
830 
831 		/* Inform DSP about PDI stream number */
832 		ret = intel_params_stream(sdw, substream, dai,
833 					  hw_params,
834 					  sdw->instance,
835 					  dai_runtime->pdi->intel_alh_id);
836 	}
837 
838 	return ret;
839 }
840 
841 static int
842 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
843 {
844 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
845 	struct sdw_intel *sdw = cdns_to_intel(cdns);
846 	struct sdw_cdns_dai_runtime *dai_runtime;
847 	int ret;
848 
849 	dai_runtime = cdns->dai_runtime_array[dai->id];
850 	if (!dai_runtime)
851 		return -EIO;
852 
853 	/*
854 	 * The sdw stream state will transition to RELEASED when stream->
855 	 * master_list is empty. So the stream state will transition to
856 	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
857 	 * cpu-dai.
858 	 */
859 	ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
860 	if (ret < 0) {
861 		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
862 			dai_runtime->stream->name, ret);
863 		return ret;
864 	}
865 
866 	ret = intel_free_stream(sdw, substream, dai, sdw->instance);
867 	if (ret < 0) {
868 		dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
869 		return ret;
870 	}
871 
872 	dai_runtime->pdi = NULL;
873 
874 	return 0;
875 }
876 
877 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
878 				    void *stream, int direction)
879 {
880 	return cdns_set_sdw_stream(dai, stream, direction);
881 }
882 
883 static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
884 				  int direction)
885 {
886 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
887 	struct sdw_cdns_dai_runtime *dai_runtime;
888 
889 	dai_runtime = cdns->dai_runtime_array[dai->id];
890 	if (!dai_runtime)
891 		return ERR_PTR(-EINVAL);
892 
893 	return dai_runtime->stream;
894 }
895 
896 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
897 {
898 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
899 	struct sdw_cdns_dai_runtime *dai_runtime;
900 	int ret = 0;
901 
902 	dai_runtime = cdns->dai_runtime_array[dai->id];
903 	if (!dai_runtime) {
904 		dev_err(dai->dev, "failed to get dai runtime in %s\n",
905 			__func__);
906 		return -EIO;
907 	}
908 
909 	switch (cmd) {
910 	case SNDRV_PCM_TRIGGER_SUSPEND:
911 
912 		/*
913 		 * The .prepare callback is used to deal with xruns and resume operations.
914 		 * In the case of xruns, the DMAs and SHIM registers cannot be touched,
915 		 * but for resume operations the DMAs and SHIM registers need to be initialized.
916 		 * the .trigger callback is used to track the suspend case only.
917 		 */
918 
919 		dai_runtime->suspended = true;
920 
921 		break;
922 
923 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
924 		dai_runtime->paused = true;
925 		break;
926 	case SNDRV_PCM_TRIGGER_STOP:
927 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
928 		dai_runtime->paused = false;
929 		break;
930 	default:
931 		break;
932 	}
933 
934 	return ret;
935 }
936 
937 static int intel_component_probe(struct snd_soc_component *component)
938 {
939 	int ret;
940 
941 	/*
942 	 * make sure the device is pm_runtime_active before initiating
943 	 * bus transactions during the card registration.
944 	 * We use pm_runtime_resume() here, without taking a reference
945 	 * and releasing it immediately.
946 	 */
947 	ret = pm_runtime_resume(component->dev);
948 	if (ret < 0 && ret != -EACCES)
949 		return ret;
950 
951 	return 0;
952 }
953 
954 static int intel_component_dais_suspend(struct snd_soc_component *component)
955 {
956 	struct snd_soc_dai *dai;
957 
958 	/*
959 	 * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
960 	 * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
961 	 * Since the component suspend is called last, we can trap this corner case
962 	 * and force the DAIs to release their resources.
963 	 */
964 	for_each_component_dais(component, dai) {
965 		struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
966 		struct sdw_cdns_dai_runtime *dai_runtime;
967 
968 		dai_runtime = cdns->dai_runtime_array[dai->id];
969 
970 		if (!dai_runtime)
971 			continue;
972 
973 		if (dai_runtime->suspended)
974 			continue;
975 
976 		if (dai_runtime->paused)
977 			dai_runtime->suspended = true;
978 	}
979 
980 	return 0;
981 }
982 
983 static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
984 	.hw_params = intel_hw_params,
985 	.prepare = intel_prepare,
986 	.hw_free = intel_hw_free,
987 	.trigger = intel_trigger,
988 	.set_stream = intel_pcm_set_sdw_stream,
989 	.get_stream = intel_get_sdw_stream,
990 };
991 
992 static const struct snd_soc_component_driver dai_component = {
993 	.name			= "soundwire",
994 	.probe			= intel_component_probe,
995 	.suspend		= intel_component_dais_suspend,
996 	.legacy_dai_naming	= 1,
997 };
998 
999 static int intel_create_dai(struct sdw_cdns *cdns,
1000 			    struct snd_soc_dai_driver *dais,
1001 			    enum intel_pdi_type type,
1002 			    u32 num, u32 off, u32 max_ch)
1003 {
1004 	int i;
1005 
1006 	if (num == 0)
1007 		return 0;
1008 
1009 	for (i = off; i < (off + num); i++) {
1010 		dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1011 					      "SDW%d Pin%d",
1012 					      cdns->instance, i);
1013 		if (!dais[i].name)
1014 			return -ENOMEM;
1015 
1016 		if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1017 			dais[i].playback.channels_min = 1;
1018 			dais[i].playback.channels_max = max_ch;
1019 		}
1020 
1021 		if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1022 			dais[i].capture.channels_min = 1;
1023 			dais[i].capture.channels_max = max_ch;
1024 		}
1025 
1026 		dais[i].ops = &intel_pcm_dai_ops;
1027 	}
1028 
1029 	return 0;
1030 }
1031 
1032 static int intel_register_dai(struct sdw_intel *sdw)
1033 {
1034 	struct sdw_cdns_dai_runtime **dai_runtime_array;
1035 	struct sdw_cdns_stream_config config;
1036 	struct sdw_cdns *cdns = &sdw->cdns;
1037 	struct sdw_cdns_streams *stream;
1038 	struct snd_soc_dai_driver *dais;
1039 	int num_dai, ret, off = 0;
1040 
1041 	/* Read the PDI config and initialize cadence PDI */
1042 	intel_pdi_init(sdw, &config);
1043 	ret = sdw_cdns_pdi_init(cdns, config);
1044 	if (ret)
1045 		return ret;
1046 
1047 	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
1048 
1049 	/* DAIs are created based on total number of PDIs supported */
1050 	num_dai = cdns->pcm.num_pdi;
1051 
1052 	dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
1053 					 sizeof(struct sdw_cdns_dai_runtime *),
1054 					 GFP_KERNEL);
1055 	if (!dai_runtime_array)
1056 		return -ENOMEM;
1057 	cdns->dai_runtime_array = dai_runtime_array;
1058 
1059 	dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1060 	if (!dais)
1061 		return -ENOMEM;
1062 
1063 	/* Create PCM DAIs */
1064 	stream = &cdns->pcm;
1065 
1066 	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1067 			       off, stream->num_ch_in);
1068 	if (ret)
1069 		return ret;
1070 
1071 	off += cdns->pcm.num_in;
1072 	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1073 			       off, stream->num_ch_out);
1074 	if (ret)
1075 		return ret;
1076 
1077 	off += cdns->pcm.num_out;
1078 	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1079 			       off, stream->num_ch_bd);
1080 	if (ret)
1081 		return ret;
1082 
1083 	return devm_snd_soc_register_component(cdns->dev, &dai_component,
1084 					       dais, num_dai);
1085 }
1086 
1087 
1088 const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = {
1089 	.debugfs_init = intel_debugfs_init,
1090 	.debugfs_exit = intel_debugfs_exit,
1091 
1092 	.register_dai = intel_register_dai,
1093 
1094 	.check_clock_stop = intel_check_clock_stop,
1095 	.start_bus = intel_start_bus,
1096 	.start_bus_after_reset = intel_start_bus_after_reset,
1097 	.start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
1098 	.stop_bus = intel_stop_bus,
1099 
1100 	.link_power_up = intel_link_power_up,
1101 	.link_power_down = intel_link_power_down,
1102 
1103 	.shim_check_wake = intel_shim_check_wake,
1104 	.shim_wake = intel_shim_wake,
1105 
1106 	.pre_bank_switch = intel_pre_bank_switch,
1107 	.post_bank_switch = intel_post_bank_switch,
1108 
1109 	.sync_arm = intel_shim_sync_arm,
1110 	.sync_go_unlocked = intel_shim_sync_go_unlocked,
1111 	.sync_go = intel_shim_sync_go,
1112 	.sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked,
1113 };
1114 EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL);
1115