xref: /linux/drivers/soundwire/cadence_master.c (revision cbfea84f820962c3c5394ff06e7e9344c96bf761)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3 
4 /*
5  * Cadence SoundWire Master module
6  * Used by Master driver
7  */
8 
9 #include <linux/cleanup.h>
10 #include <linux/crc8.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/debugfs.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/soundwire/sdw_registers.h>
20 #include <linux/soundwire/sdw.h>
21 #include <sound/pcm_params.h>
22 #include <sound/soc.h>
23 #include <linux/workqueue.h>
24 #include "bus.h"
25 #include "cadence_master.h"
26 
27 static int interrupt_mask;
28 module_param_named(cnds_mcp_int_mask, interrupt_mask, int, 0444);
29 MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
30 
31 #define CDNS_MCP_CONFIG				0x0
32 #define CDNS_MCP_CONFIG_BUS_REL			BIT(6)
33 
34 #define CDNS_IP_MCP_CONFIG			0x0 /* IP offset added at run-time */
35 
36 #define CDNS_IP_MCP_CONFIG_MCMD_RETRY		GENMASK(27, 24)
37 #define CDNS_IP_MCP_CONFIG_MPREQ_DELAY		GENMASK(20, 16)
38 #define CDNS_IP_MCP_CONFIG_MMASTER		BIT(7)
39 #define CDNS_IP_MCP_CONFIG_SNIFFER		BIT(5)
40 #define CDNS_IP_MCP_CONFIG_CMD			BIT(3)
41 #define CDNS_IP_MCP_CONFIG_OP			GENMASK(2, 0)
42 #define CDNS_IP_MCP_CONFIG_OP_NORMAL		0
43 
44 #define CDNS_MCP_CONTROL			0x4
45 
46 #define CDNS_MCP_CONTROL_CMD_RST		BIT(7)
47 #define CDNS_MCP_CONTROL_SOFT_RST		BIT(6)
48 #define CDNS_MCP_CONTROL_HW_RST			BIT(4)
49 #define CDNS_MCP_CONTROL_CLK_STOP_CLR		BIT(2)
50 
51 #define CDNS_IP_MCP_CONTROL			0x4  /* IP offset added at run-time */
52 
53 #define CDNS_IP_MCP_CONTROL_RST_DELAY		GENMASK(10, 8)
54 #define CDNS_IP_MCP_CONTROL_SW_RST		BIT(5)
55 #define CDNS_IP_MCP_CONTROL_CLK_PAUSE		BIT(3)
56 #define CDNS_IP_MCP_CONTROL_CMD_ACCEPT		BIT(1)
57 #define CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP	BIT(0)
58 
59 #define CDNS_IP_MCP_CMDCTRL			0x8 /* IP offset added at run-time */
60 
61 #define CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR	BIT(2)
62 
63 #define CDNS_MCP_SSPSTAT			0xC
64 #define CDNS_MCP_FRAME_SHAPE			0x10
65 #define CDNS_MCP_FRAME_SHAPE_INIT		0x14
66 #define CDNS_MCP_FRAME_SHAPE_COL_MASK		GENMASK(2, 0)
67 #define CDNS_MCP_FRAME_SHAPE_ROW_MASK		GENMASK(7, 3)
68 
69 #define CDNS_MCP_CONFIG_UPDATE			0x18
70 #define CDNS_MCP_CONFIG_UPDATE_BIT		BIT(0)
71 
72 #define CDNS_MCP_PHYCTRL			0x1C
73 #define CDNS_MCP_SSP_CTRL0			0x20
74 #define CDNS_MCP_SSP_CTRL1			0x28
75 #define CDNS_MCP_CLK_CTRL0			0x30
76 #define CDNS_MCP_CLK_CTRL1			0x38
77 #define CDNS_MCP_CLK_MCLKD_MASK		GENMASK(7, 0)
78 
79 #define CDNS_MCP_STAT				0x40
80 
81 #define CDNS_MCP_STAT_ACTIVE_BANK		BIT(20)
82 #define CDNS_MCP_STAT_CLK_STOP			BIT(16)
83 
84 #define CDNS_MCP_INTSTAT			0x44
85 #define CDNS_MCP_INTMASK			0x48
86 
87 #define CDNS_MCP_INT_IRQ			BIT(31)
88 #define CDNS_MCP_INT_RESERVED1			GENMASK(30, 17)
89 #define CDNS_MCP_INT_WAKEUP			BIT(16)
90 #define CDNS_MCP_INT_SLAVE_RSVD			BIT(15)
91 #define CDNS_MCP_INT_SLAVE_ALERT		BIT(14)
92 #define CDNS_MCP_INT_SLAVE_ATTACH		BIT(13)
93 #define CDNS_MCP_INT_SLAVE_NATTACH		BIT(12)
94 #define CDNS_MCP_INT_SLAVE_MASK			GENMASK(15, 12)
95 #define CDNS_MCP_INT_DPINT			BIT(11)
96 #define CDNS_MCP_INT_CTRL_CLASH			BIT(10)
97 #define CDNS_MCP_INT_DATA_CLASH			BIT(9)
98 #define CDNS_MCP_INT_PARITY			BIT(8)
99 #define CDNS_MCP_INT_CMD_ERR			BIT(7)
100 #define CDNS_MCP_INT_RESERVED2			GENMASK(6, 4)
101 #define CDNS_MCP_INT_RX_NE			BIT(3)
102 #define CDNS_MCP_INT_RX_WL			BIT(2)
103 #define CDNS_MCP_INT_TXE			BIT(1)
104 #define CDNS_MCP_INT_TXF			BIT(0)
105 #define CDNS_MCP_INT_RESERVED (CDNS_MCP_INT_RESERVED1 | CDNS_MCP_INT_RESERVED2)
106 
107 #define CDNS_MCP_INTSET				0x4C
108 
109 #define CDNS_MCP_SLAVE_STAT			0x50
110 #define CDNS_MCP_SLAVE_STAT_MASK		GENMASK(1, 0)
111 
112 #define CDNS_MCP_SLAVE_INTSTAT0			0x54
113 #define CDNS_MCP_SLAVE_INTSTAT1			0x58
114 #define CDNS_MCP_SLAVE_INTSTAT_NPRESENT		BIT(0)
115 #define CDNS_MCP_SLAVE_INTSTAT_ATTACHED		BIT(1)
116 #define CDNS_MCP_SLAVE_INTSTAT_ALERT		BIT(2)
117 #define CDNS_MCP_SLAVE_INTSTAT_RESERVED		BIT(3)
118 #define CDNS_MCP_SLAVE_STATUS_BITS		GENMASK(3, 0)
119 #define CDNS_MCP_SLAVE_STATUS_NUM		4
120 
121 #define CDNS_MCP_SLAVE_INTMASK0			0x5C
122 #define CDNS_MCP_SLAVE_INTMASK1			0x60
123 
124 #define CDNS_MCP_SLAVE_INTMASK0_MASK		GENMASK(31, 0)
125 #define CDNS_MCP_SLAVE_INTMASK1_MASK		GENMASK(15, 0)
126 
127 #define CDNS_MCP_PORT_INTSTAT			0x64
128 #define CDNS_MCP_PDI_STAT			0x6C
129 
130 #define CDNS_MCP_FIFOLEVEL			0x78
131 #define CDNS_MCP_FIFOSTAT			0x7C
132 #define CDNS_MCP_RX_FIFO_AVAIL			GENMASK(5, 0)
133 
134 #define CDNS_IP_MCP_CMD_BASE			0x80 /* IP offset added at run-time */
135 #define CDNS_IP_MCP_RESP_BASE			0x80 /* IP offset added at run-time */
136 /* FIFO can hold 8 commands */
137 #define CDNS_MCP_CMD_LEN			8
138 #define CDNS_MCP_CMD_WORD_LEN			0x4
139 
140 #define CDNS_MCP_CMD_SSP_TAG			BIT(31)
141 #define CDNS_MCP_CMD_COMMAND			GENMASK(30, 28)
142 #define CDNS_MCP_CMD_DEV_ADDR			GENMASK(27, 24)
143 #define CDNS_MCP_CMD_REG_ADDR			GENMASK(23, 8)
144 #define CDNS_MCP_CMD_REG_DATA			GENMASK(7, 0)
145 
146 #define CDNS_MCP_CMD_READ			2
147 #define CDNS_MCP_CMD_WRITE			3
148 
149 #define CDNS_MCP_RESP_RDATA			GENMASK(15, 8)
150 #define CDNS_MCP_RESP_ACK			BIT(0)
151 #define CDNS_MCP_RESP_NACK			BIT(1)
152 
153 #define CDNS_DP_SIZE				128
154 
155 #define CDNS_DPN_B0_CONFIG(n)			(0x100 + CDNS_DP_SIZE * (n))
156 #define CDNS_DPN_B0_CH_EN(n)			(0x104 + CDNS_DP_SIZE * (n))
157 #define CDNS_DPN_B0_SAMPLE_CTRL(n)		(0x108 + CDNS_DP_SIZE * (n))
158 #define CDNS_DPN_B0_OFFSET_CTRL(n)		(0x10C + CDNS_DP_SIZE * (n))
159 #define CDNS_DPN_B0_HCTRL(n)			(0x110 + CDNS_DP_SIZE * (n))
160 #define CDNS_DPN_B0_ASYNC_CTRL(n)		(0x114 + CDNS_DP_SIZE * (n))
161 
162 #define CDNS_DPN_B1_CONFIG(n)			(0x118 + CDNS_DP_SIZE * (n))
163 #define CDNS_DPN_B1_CH_EN(n)			(0x11C + CDNS_DP_SIZE * (n))
164 #define CDNS_DPN_B1_SAMPLE_CTRL(n)		(0x120 + CDNS_DP_SIZE * (n))
165 #define CDNS_DPN_B1_OFFSET_CTRL(n)		(0x124 + CDNS_DP_SIZE * (n))
166 #define CDNS_DPN_B1_HCTRL(n)			(0x128 + CDNS_DP_SIZE * (n))
167 #define CDNS_DPN_B1_ASYNC_CTRL(n)		(0x12C + CDNS_DP_SIZE * (n))
168 
169 #define CDNS_DPN_CONFIG_BPM			BIT(18)
170 #define CDNS_DPN_CONFIG_BGC			GENMASK(17, 16)
171 #define CDNS_DPN_CONFIG_WL			GENMASK(12, 8)
172 #define CDNS_DPN_CONFIG_PORT_DAT		GENMASK(3, 2)
173 #define CDNS_DPN_CONFIG_PORT_FLOW		GENMASK(1, 0)
174 
175 #define CDNS_DPN_SAMPLE_CTRL_SI			GENMASK(15, 0)
176 
177 #define CDNS_DPN_OFFSET_CTRL_1			GENMASK(7, 0)
178 #define CDNS_DPN_OFFSET_CTRL_2			GENMASK(15, 8)
179 
180 #define CDNS_DPN_HCTRL_HSTOP			GENMASK(3, 0)
181 #define CDNS_DPN_HCTRL_HSTART			GENMASK(7, 4)
182 #define CDNS_DPN_HCTRL_LCTRL			GENMASK(10, 8)
183 
184 #define CDNS_PORTCTRL				0x130
185 #define CDNS_PORTCTRL_TEST_FAILED		BIT(1)
186 #define CDNS_PORTCTRL_DIRN			BIT(7)
187 #define CDNS_PORTCTRL_BANK_INVERT		BIT(8)
188 #define CDNS_PORTCTRL_BULK_ENABLE		BIT(16)
189 
190 #define CDNS_PORT_OFFSET			0x80
191 
192 #define CDNS_PDI_CONFIG(n)			(0x1100 + (n) * 16)
193 
194 #define CDNS_PDI_CONFIG_SOFT_RESET		BIT(24)
195 #define CDNS_PDI_CONFIG_CHANNEL			GENMASK(15, 8)
196 #define CDNS_PDI_CONFIG_PORT			GENMASK(4, 0)
197 
198 /* Driver defaults */
199 #define CDNS_TX_TIMEOUT				500
200 
201 #define CDNS_SCP_RX_FIFOLEVEL			0x2
202 
203 /*
204  * register accessor helpers
205  */
206 static inline u32 cdns_readl(struct sdw_cdns *cdns, int offset)
207 {
208 	return readl(cdns->registers + offset);
209 }
210 
211 static inline void cdns_writel(struct sdw_cdns *cdns, int offset, u32 value)
212 {
213 	writel(value, cdns->registers + offset);
214 }
215 
216 static inline u32 cdns_ip_readl(struct sdw_cdns *cdns, int offset)
217 {
218 	return cdns_readl(cdns, cdns->ip_offset + offset);
219 }
220 
221 static inline void cdns_ip_writel(struct sdw_cdns *cdns, int offset, u32 value)
222 {
223 	return cdns_writel(cdns, cdns->ip_offset + offset, value);
224 }
225 
226 static inline void cdns_updatel(struct sdw_cdns *cdns,
227 				int offset, u32 mask, u32 val)
228 {
229 	u32 tmp;
230 
231 	tmp = cdns_readl(cdns, offset);
232 	tmp = (tmp & ~mask) | val;
233 	cdns_writel(cdns, offset, tmp);
234 }
235 
236 static inline void cdns_ip_updatel(struct sdw_cdns *cdns,
237 				   int offset, u32 mask, u32 val)
238 {
239 	cdns_updatel(cdns, cdns->ip_offset + offset, mask, val);
240 }
241 
242 static int cdns_set_wait(struct sdw_cdns *cdns, int offset, u32 mask, u32 value)
243 {
244 	int timeout = 10;
245 	u32 reg_read;
246 
247 	/* Wait for bit to be set */
248 	do {
249 		reg_read = readl(cdns->registers + offset);
250 		if ((reg_read & mask) == value)
251 			return 0;
252 
253 		timeout--;
254 		usleep_range(50, 100);
255 	} while (timeout != 0);
256 
257 	return -ETIMEDOUT;
258 }
259 
260 static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
261 {
262 	writel(value, cdns->registers + offset);
263 
264 	/* Wait for bit to be self cleared */
265 	return cdns_set_wait(cdns, offset, value, 0);
266 }
267 
268 /*
269  * all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL
270  * need to be confirmed with a write to MCP_CONFIG_UPDATE
271  */
272 static int cdns_config_update(struct sdw_cdns *cdns)
273 {
274 	int ret;
275 
276 	if (sdw_cdns_is_clock_stop(cdns)) {
277 		dev_err(cdns->dev, "Cannot program MCP_CONFIG_UPDATE in ClockStopMode\n");
278 		return -EINVAL;
279 	}
280 
281 	ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
282 			     CDNS_MCP_CONFIG_UPDATE_BIT);
283 	if (ret < 0)
284 		dev_err(cdns->dev, "Config update timedout\n");
285 
286 	return ret;
287 }
288 
289 /**
290  * sdw_cdns_config_update() - Update configurations
291  * @cdns: Cadence instance
292  */
293 void sdw_cdns_config_update(struct sdw_cdns *cdns)
294 {
295 	/* commit changes */
296 	cdns_writel(cdns, CDNS_MCP_CONFIG_UPDATE, CDNS_MCP_CONFIG_UPDATE_BIT);
297 }
298 EXPORT_SYMBOL(sdw_cdns_config_update);
299 
300 /**
301  * sdw_cdns_config_update_set_wait() - wait until configuration update bit is self-cleared
302  * @cdns: Cadence instance
303  */
304 int sdw_cdns_config_update_set_wait(struct sdw_cdns *cdns)
305 {
306 	/* the hardware recommendation is to wait at least 300us */
307 	return cdns_set_wait(cdns, CDNS_MCP_CONFIG_UPDATE,
308 			     CDNS_MCP_CONFIG_UPDATE_BIT, 0);
309 }
310 EXPORT_SYMBOL(sdw_cdns_config_update_set_wait);
311 
312 /*
313  * debugfs
314  */
315 #ifdef CONFIG_DEBUG_FS
316 
317 #define RD_BUF (2 * PAGE_SIZE)
318 
319 static ssize_t cdns_sprintf(struct sdw_cdns *cdns,
320 			    char *buf, size_t pos, unsigned int reg)
321 {
322 	return scnprintf(buf + pos, RD_BUF - pos,
323 			 "%4x\t%8x\n", reg, cdns_readl(cdns, reg));
324 }
325 
326 static int cdns_reg_show(struct seq_file *s, void *data)
327 {
328 	struct sdw_cdns *cdns = s->private;
329 	ssize_t ret;
330 	int num_ports;
331 	int i, j;
332 
333 	char *buf __free(kfree) = kzalloc(RD_BUF, GFP_KERNEL);
334 	if (!buf)
335 		return -ENOMEM;
336 
337 	ret = scnprintf(buf, RD_BUF, "Register  Value\n");
338 	ret += scnprintf(buf + ret, RD_BUF - ret, "\nMCP Registers\n");
339 	/* 8 MCP registers */
340 	for (i = CDNS_MCP_CONFIG; i <= CDNS_MCP_PHYCTRL; i += sizeof(u32))
341 		ret += cdns_sprintf(cdns, buf, ret, i);
342 
343 	ret += scnprintf(buf + ret, RD_BUF - ret,
344 			 "\nStatus & Intr Registers\n");
345 	/* 13 Status & Intr registers (offsets 0x70 and 0x74 not defined) */
346 	for (i = CDNS_MCP_STAT; i <=  CDNS_MCP_FIFOSTAT; i += sizeof(u32))
347 		ret += cdns_sprintf(cdns, buf, ret, i);
348 
349 	ret += scnprintf(buf + ret, RD_BUF - ret,
350 			 "\nSSP & Clk ctrl Registers\n");
351 	ret += cdns_sprintf(cdns, buf, ret, CDNS_MCP_SSP_CTRL0);
352 	ret += cdns_sprintf(cdns, buf, ret, CDNS_MCP_SSP_CTRL1);
353 	ret += cdns_sprintf(cdns, buf, ret, CDNS_MCP_CLK_CTRL0);
354 	ret += cdns_sprintf(cdns, buf, ret, CDNS_MCP_CLK_CTRL1);
355 
356 	ret += scnprintf(buf + ret, RD_BUF - ret,
357 			 "\nDPn B0 Registers\n");
358 
359 	num_ports = cdns->num_ports;
360 
361 	for (i = 0; i < num_ports; i++) {
362 		ret += scnprintf(buf + ret, RD_BUF - ret,
363 				 "\nDP-%d\n", i);
364 		for (j = CDNS_DPN_B0_CONFIG(i);
365 		     j < CDNS_DPN_B0_ASYNC_CTRL(i); j += sizeof(u32))
366 			ret += cdns_sprintf(cdns, buf, ret, j);
367 	}
368 
369 	ret += scnprintf(buf + ret, RD_BUF - ret,
370 			 "\nDPn B1 Registers\n");
371 	for (i = 0; i < num_ports; i++) {
372 		ret += scnprintf(buf + ret, RD_BUF - ret,
373 				 "\nDP-%d\n", i);
374 
375 		for (j = CDNS_DPN_B1_CONFIG(i);
376 		     j < CDNS_DPN_B1_ASYNC_CTRL(i); j += sizeof(u32))
377 			ret += cdns_sprintf(cdns, buf, ret, j);
378 	}
379 
380 	ret += scnprintf(buf + ret, RD_BUF - ret,
381 			 "\nDPn Control Registers\n");
382 	for (i = 0; i < num_ports; i++)
383 		ret += cdns_sprintf(cdns, buf, ret,
384 				CDNS_PORTCTRL + i * CDNS_PORT_OFFSET);
385 
386 	ret += scnprintf(buf + ret, RD_BUF - ret,
387 			 "\nPDIn Config Registers\n");
388 
389 	/* number of PDI and ports is interchangeable */
390 	for (i = 0; i < num_ports; i++)
391 		ret += cdns_sprintf(cdns, buf, ret, CDNS_PDI_CONFIG(i));
392 
393 	seq_printf(s, "%s", buf);
394 
395 	return 0;
396 }
397 DEFINE_SHOW_ATTRIBUTE(cdns_reg);
398 
399 static int cdns_hw_reset(void *data, u64 value)
400 {
401 	struct sdw_cdns *cdns = data;
402 	int ret;
403 
404 	if (value != 1)
405 		return -EINVAL;
406 
407 	/* Userspace changed the hardware state behind the kernel's back */
408 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
409 
410 	ret = sdw_cdns_exit_reset(cdns);
411 
412 	dev_dbg(cdns->dev, "link hw_reset done: %d\n", ret);
413 
414 	return ret;
415 }
416 
417 DEFINE_DEBUGFS_ATTRIBUTE(cdns_hw_reset_fops, NULL, cdns_hw_reset, "%llu\n");
418 
419 static int cdns_parity_error_injection(void *data, u64 value)
420 {
421 	struct sdw_cdns *cdns = data;
422 	struct sdw_bus *bus;
423 	int ret;
424 
425 	if (value != 1)
426 		return -EINVAL;
427 
428 	bus = &cdns->bus;
429 
430 	/*
431 	 * Resume Master device. If this results in a bus reset, the
432 	 * Slave devices will re-attach and be re-enumerated.
433 	 */
434 	ret = pm_runtime_resume_and_get(bus->dev);
435 	if (ret < 0 && ret != -EACCES) {
436 		dev_err_ratelimited(cdns->dev,
437 				    "pm_runtime_resume_and_get failed in %s, ret %d\n",
438 				    __func__, ret);
439 		return ret;
440 	}
441 
442 	/*
443 	 * wait long enough for Slave(s) to be in steady state. This
444 	 * does not need to be super precise.
445 	 */
446 	msleep(200);
447 
448 	/*
449 	 * Take the bus lock here to make sure that any bus transactions
450 	 * will be queued while we inject a parity error on a dummy read
451 	 */
452 	mutex_lock(&bus->bus_lock);
453 
454 	/* program hardware to inject parity error */
455 	cdns_ip_updatel(cdns, CDNS_IP_MCP_CMDCTRL,
456 			CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR,
457 			CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR);
458 
459 	/* commit changes */
460 	ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, CDNS_MCP_CONFIG_UPDATE_BIT);
461 	if (ret < 0)
462 		goto unlock;
463 
464 	/* do a broadcast dummy read to avoid bus clashes */
465 	ret = sdw_bread_no_pm_unlocked(&cdns->bus, 0xf, SDW_SCP_DEVID_0);
466 	dev_info(cdns->dev, "parity error injection, read: %d\n", ret);
467 
468 	/* program hardware to disable parity error */
469 	cdns_ip_updatel(cdns, CDNS_IP_MCP_CMDCTRL,
470 			CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR,
471 			0);
472 
473 	/* commit changes */
474 	ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, CDNS_MCP_CONFIG_UPDATE_BIT);
475 	if (ret < 0)
476 		goto unlock;
477 
478 	/* Userspace changed the hardware state behind the kernel's back */
479 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
480 
481 unlock:
482 	/* Continue bus operation with parity error injection disabled */
483 	mutex_unlock(&bus->bus_lock);
484 
485 	/*
486 	 * allow Master device to enter pm_runtime suspend. This may
487 	 * also result in Slave devices suspending.
488 	 */
489 	pm_runtime_mark_last_busy(bus->dev);
490 	pm_runtime_put_autosuspend(bus->dev);
491 
492 	return 0;
493 }
494 
495 DEFINE_DEBUGFS_ATTRIBUTE(cdns_parity_error_fops, NULL,
496 			 cdns_parity_error_injection, "%llu\n");
497 
498 static int cdns_set_pdi_loopback_source(void *data, u64 value)
499 {
500 	struct sdw_cdns *cdns = data;
501 	unsigned int pdi_out_num = cdns->pcm.num_bd + cdns->pcm.num_out;
502 
503 	if (value > pdi_out_num)
504 		return -EINVAL;
505 
506 	/* Userspace changed the hardware state behind the kernel's back */
507 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
508 
509 	cdns->pdi_loopback_source = value;
510 
511 	return 0;
512 }
513 DEFINE_DEBUGFS_ATTRIBUTE(cdns_pdi_loopback_source_fops, NULL, cdns_set_pdi_loopback_source, "%llu\n");
514 
515 static int cdns_set_pdi_loopback_target(void *data, u64 value)
516 {
517 	struct sdw_cdns *cdns = data;
518 	unsigned int pdi_in_num = cdns->pcm.num_bd + cdns->pcm.num_in;
519 
520 	if (value > pdi_in_num)
521 		return -EINVAL;
522 
523 	/* Userspace changed the hardware state behind the kernel's back */
524 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
525 
526 	cdns->pdi_loopback_target = value;
527 
528 	return 0;
529 }
530 DEFINE_DEBUGFS_ATTRIBUTE(cdns_pdi_loopback_target_fops, NULL, cdns_set_pdi_loopback_target, "%llu\n");
531 
532 /**
533  * sdw_cdns_debugfs_init() - Cadence debugfs init
534  * @cdns: Cadence instance
535  * @root: debugfs root
536  */
537 void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root)
538 {
539 	debugfs_create_file("cdns-registers", 0400, root, cdns, &cdns_reg_fops);
540 
541 	debugfs_create_file("cdns-hw-reset", 0200, root, cdns,
542 			    &cdns_hw_reset_fops);
543 
544 	debugfs_create_file("cdns-parity-error-injection", 0200, root, cdns,
545 			    &cdns_parity_error_fops);
546 
547 	cdns->pdi_loopback_source = -1;
548 	cdns->pdi_loopback_target = -1;
549 
550 	debugfs_create_file("cdns-pdi-loopback-source", 0200, root, cdns,
551 			    &cdns_pdi_loopback_source_fops);
552 
553 	debugfs_create_file("cdns-pdi-loopback-target", 0200, root, cdns,
554 			    &cdns_pdi_loopback_target_fops);
555 
556 }
557 EXPORT_SYMBOL_GPL(sdw_cdns_debugfs_init);
558 
559 #endif /* CONFIG_DEBUG_FS */
560 
561 /*
562  * IO Calls
563  */
564 static enum sdw_command_response
565 cdns_fill_msg_resp(struct sdw_cdns *cdns,
566 		   struct sdw_msg *msg, int count, int offset)
567 {
568 	int nack = 0, no_ack = 0;
569 	int i;
570 
571 	/* check message response */
572 	for (i = 0; i < count; i++) {
573 		if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
574 			no_ack = 1;
575 			dev_vdbg(cdns->dev, "Msg Ack not received, cmd %d\n", i);
576 		}
577 		if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
578 			nack = 1;
579 			dev_err_ratelimited(cdns->dev, "Msg NACK received, cmd %d\n", i);
580 		}
581 	}
582 
583 	if (nack) {
584 		dev_err_ratelimited(cdns->dev, "Msg NACKed for Slave %d\n", msg->dev_num);
585 		return SDW_CMD_FAIL;
586 	}
587 
588 	if (no_ack) {
589 		dev_dbg_ratelimited(cdns->dev, "Msg ignored for Slave %d\n", msg->dev_num);
590 		return SDW_CMD_IGNORED;
591 	}
592 
593 	if (msg->flags == SDW_MSG_FLAG_READ) {
594 		/* fill response */
595 		for (i = 0; i < count; i++)
596 			msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA,
597 							 cdns->response_buf[i]);
598 	}
599 
600 	return SDW_CMD_OK;
601 }
602 
603 static void cdns_read_response(struct sdw_cdns *cdns)
604 {
605 	u32 num_resp, cmd_base;
606 	int i;
607 
608 	/* RX_FIFO_AVAIL can be 2 entries more than the FIFO size */
609 	BUILD_BUG_ON(ARRAY_SIZE(cdns->response_buf) < CDNS_MCP_CMD_LEN + 2);
610 
611 	num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
612 	num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
613 	if (num_resp > ARRAY_SIZE(cdns->response_buf)) {
614 		dev_warn(cdns->dev, "RX AVAIL %d too long\n", num_resp);
615 		num_resp = ARRAY_SIZE(cdns->response_buf);
616 	}
617 
618 	cmd_base = CDNS_IP_MCP_CMD_BASE;
619 
620 	for (i = 0; i < num_resp; i++) {
621 		cdns->response_buf[i] = cdns_ip_readl(cdns, cmd_base);
622 		cmd_base += CDNS_MCP_CMD_WORD_LEN;
623 	}
624 }
625 
626 static enum sdw_command_response
627 _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
628 	       int offset, int count, bool defer)
629 {
630 	unsigned long time;
631 	u32 base, i, data;
632 	u16 addr;
633 
634 	/* Program the watermark level for RX FIFO */
635 	if (cdns->msg_count != count) {
636 		cdns_writel(cdns, CDNS_MCP_FIFOLEVEL, count);
637 		cdns->msg_count = count;
638 	}
639 
640 	base = CDNS_IP_MCP_CMD_BASE;
641 	addr = msg->addr + offset;
642 
643 	for (i = 0; i < count; i++) {
644 		data = FIELD_PREP(CDNS_MCP_CMD_DEV_ADDR, msg->dev_num);
645 		data |= FIELD_PREP(CDNS_MCP_CMD_COMMAND, cmd);
646 		data |= FIELD_PREP(CDNS_MCP_CMD_REG_ADDR, addr);
647 		addr++;
648 
649 		if (msg->flags == SDW_MSG_FLAG_WRITE)
650 			data |= msg->buf[i + offset];
651 
652 		data |= FIELD_PREP(CDNS_MCP_CMD_SSP_TAG, msg->ssp_sync);
653 		cdns_ip_writel(cdns, base, data);
654 		base += CDNS_MCP_CMD_WORD_LEN;
655 	}
656 
657 	if (defer)
658 		return SDW_CMD_OK;
659 
660 	/* wait for timeout or response */
661 	time = wait_for_completion_timeout(&cdns->tx_complete,
662 					   msecs_to_jiffies(CDNS_TX_TIMEOUT));
663 	if (!time) {
664 		dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n",
665 			cmd, msg->dev_num, msg->addr, msg->len);
666 		msg->len = 0;
667 
668 		/* Drain anything in the RX_FIFO */
669 		cdns_read_response(cdns);
670 
671 		return SDW_CMD_TIMEOUT;
672 	}
673 
674 	return cdns_fill_msg_resp(cdns, msg, count, offset);
675 }
676 
677 static enum sdw_command_response
678 cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg)
679 {
680 	int nack = 0, no_ack = 0;
681 	unsigned long time;
682 	u32 data[2], base;
683 	int i;
684 
685 	/* Program the watermark level for RX FIFO */
686 	if (cdns->msg_count != CDNS_SCP_RX_FIFOLEVEL) {
687 		cdns_writel(cdns, CDNS_MCP_FIFOLEVEL, CDNS_SCP_RX_FIFOLEVEL);
688 		cdns->msg_count = CDNS_SCP_RX_FIFOLEVEL;
689 	}
690 
691 	data[0] = FIELD_PREP(CDNS_MCP_CMD_DEV_ADDR, msg->dev_num);
692 	data[0] |= FIELD_PREP(CDNS_MCP_CMD_COMMAND, 0x3);
693 	data[1] = data[0];
694 
695 	data[0] |= FIELD_PREP(CDNS_MCP_CMD_REG_ADDR, SDW_SCP_ADDRPAGE1);
696 	data[1] |= FIELD_PREP(CDNS_MCP_CMD_REG_ADDR, SDW_SCP_ADDRPAGE2);
697 
698 	data[0] |= msg->addr_page1;
699 	data[1] |= msg->addr_page2;
700 
701 	base = CDNS_IP_MCP_CMD_BASE;
702 	cdns_ip_writel(cdns, base, data[0]);
703 	base += CDNS_MCP_CMD_WORD_LEN;
704 	cdns_ip_writel(cdns, base, data[1]);
705 
706 	time = wait_for_completion_timeout(&cdns->tx_complete,
707 					   msecs_to_jiffies(CDNS_TX_TIMEOUT));
708 	if (!time) {
709 		dev_err(cdns->dev, "SCP Msg trf timed out\n");
710 		msg->len = 0;
711 		return SDW_CMD_TIMEOUT;
712 	}
713 
714 	/* check response the writes */
715 	for (i = 0; i < 2; i++) {
716 		if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
717 			no_ack = 1;
718 			dev_err(cdns->dev, "Program SCP Ack not received\n");
719 			if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
720 				nack = 1;
721 				dev_err(cdns->dev, "Program SCP NACK received\n");
722 			}
723 		}
724 	}
725 
726 	/* For NACK, NO ack, don't return err if we are in Broadcast mode */
727 	if (nack) {
728 		dev_err_ratelimited(cdns->dev,
729 				    "SCP_addrpage NACKed for Slave %d\n", msg->dev_num);
730 		return SDW_CMD_FAIL;
731 	}
732 
733 	if (no_ack) {
734 		dev_dbg_ratelimited(cdns->dev,
735 				    "SCP_addrpage ignored for Slave %d\n", msg->dev_num);
736 		return SDW_CMD_IGNORED;
737 	}
738 
739 	return SDW_CMD_OK;
740 }
741 
742 static int cdns_prep_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int *cmd)
743 {
744 	int ret;
745 
746 	if (msg->page) {
747 		ret = cdns_program_scp_addr(cdns, msg);
748 		if (ret) {
749 			msg->len = 0;
750 			return ret;
751 		}
752 	}
753 
754 	switch (msg->flags) {
755 	case SDW_MSG_FLAG_READ:
756 		*cmd = CDNS_MCP_CMD_READ;
757 		break;
758 
759 	case SDW_MSG_FLAG_WRITE:
760 		*cmd = CDNS_MCP_CMD_WRITE;
761 		break;
762 
763 	default:
764 		dev_err(cdns->dev, "Invalid msg cmd: %d\n", msg->flags);
765 		return -EINVAL;
766 	}
767 
768 	return 0;
769 }
770 
771 enum sdw_command_response
772 cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
773 {
774 	struct sdw_cdns *cdns = bus_to_cdns(bus);
775 	int cmd = 0, ret, i;
776 
777 	ret = cdns_prep_msg(cdns, msg, &cmd);
778 	if (ret)
779 		return SDW_CMD_FAIL_OTHER;
780 
781 	for (i = 0; i < msg->len / CDNS_MCP_CMD_LEN; i++) {
782 		ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
783 				     CDNS_MCP_CMD_LEN, false);
784 		if (ret != SDW_CMD_OK)
785 			return ret;
786 	}
787 
788 	if (!(msg->len % CDNS_MCP_CMD_LEN))
789 		return SDW_CMD_OK;
790 
791 	return _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
792 			      msg->len % CDNS_MCP_CMD_LEN, false);
793 }
794 EXPORT_SYMBOL(cdns_xfer_msg);
795 
796 enum sdw_command_response
797 cdns_xfer_msg_defer(struct sdw_bus *bus)
798 {
799 	struct sdw_cdns *cdns = bus_to_cdns(bus);
800 	struct sdw_defer *defer = &bus->defer_msg;
801 	struct sdw_msg *msg = defer->msg;
802 	int cmd = 0, ret;
803 
804 	/* for defer only 1 message is supported */
805 	if (msg->len > 1)
806 		return -ENOTSUPP;
807 
808 	ret = cdns_prep_msg(cdns, msg, &cmd);
809 	if (ret)
810 		return SDW_CMD_FAIL_OTHER;
811 
812 	return _cdns_xfer_msg(cdns, msg, cmd, 0, msg->len, true);
813 }
814 EXPORT_SYMBOL(cdns_xfer_msg_defer);
815 
816 u32 cdns_read_ping_status(struct sdw_bus *bus)
817 {
818 	struct sdw_cdns *cdns = bus_to_cdns(bus);
819 
820 	return cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
821 }
822 EXPORT_SYMBOL(cdns_read_ping_status);
823 
824 /*
825  * IRQ handling
826  */
827 
828 static int cdns_update_slave_status(struct sdw_cdns *cdns,
829 				    u64 slave_intstat)
830 {
831 	enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
832 	bool is_slave = false;
833 	u32 mask;
834 	u32 val;
835 	int i, set_status;
836 
837 	memset(status, 0, sizeof(status));
838 
839 	for (i = 0; i <= SDW_MAX_DEVICES; i++) {
840 		mask = (slave_intstat >> (i * CDNS_MCP_SLAVE_STATUS_NUM)) &
841 			CDNS_MCP_SLAVE_STATUS_BITS;
842 
843 		set_status = 0;
844 
845 		if (mask) {
846 			is_slave = true;
847 
848 			if (mask & CDNS_MCP_SLAVE_INTSTAT_RESERVED) {
849 				status[i] = SDW_SLAVE_RESERVED;
850 				set_status++;
851 			}
852 
853 			if (mask & CDNS_MCP_SLAVE_INTSTAT_ATTACHED) {
854 				status[i] = SDW_SLAVE_ATTACHED;
855 				set_status++;
856 			}
857 
858 			if (mask & CDNS_MCP_SLAVE_INTSTAT_ALERT) {
859 				status[i] = SDW_SLAVE_ALERT;
860 				set_status++;
861 			}
862 
863 			if (mask & CDNS_MCP_SLAVE_INTSTAT_NPRESENT) {
864 				status[i] = SDW_SLAVE_UNATTACHED;
865 				set_status++;
866 			}
867 		}
868 
869 		/*
870 		 * check that there was a single reported Slave status and when
871 		 * there is not use the latest status extracted from PING commands
872 		 */
873 		if (set_status != 1) {
874 			val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
875 			val >>= (i * 2);
876 
877 			switch (val & 0x3) {
878 			case 0:
879 				status[i] = SDW_SLAVE_UNATTACHED;
880 				break;
881 			case 1:
882 				status[i] = SDW_SLAVE_ATTACHED;
883 				break;
884 			case 2:
885 				status[i] = SDW_SLAVE_ALERT;
886 				break;
887 			case 3:
888 			default:
889 				status[i] = SDW_SLAVE_RESERVED;
890 				break;
891 			}
892 		}
893 	}
894 
895 	if (is_slave) {
896 		int ret;
897 
898 		mutex_lock(&cdns->status_update_lock);
899 		ret = sdw_handle_slave_status(&cdns->bus, status);
900 		mutex_unlock(&cdns->status_update_lock);
901 		return ret;
902 	}
903 
904 	return 0;
905 }
906 
907 /**
908  * sdw_cdns_irq() - Cadence interrupt handler
909  * @irq: irq number
910  * @dev_id: irq context
911  */
912 irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
913 {
914 	struct sdw_cdns *cdns = dev_id;
915 	u32 int_status;
916 
917 	/* Check if the link is up */
918 	if (!cdns->link_up)
919 		return IRQ_NONE;
920 
921 	int_status = cdns_readl(cdns, CDNS_MCP_INTSTAT);
922 
923 	/* check for reserved values read as zero */
924 	if (int_status & CDNS_MCP_INT_RESERVED)
925 		return IRQ_NONE;
926 
927 	if (!(int_status & CDNS_MCP_INT_IRQ))
928 		return IRQ_NONE;
929 
930 	if (int_status & CDNS_MCP_INT_RX_WL) {
931 		struct sdw_bus *bus = &cdns->bus;
932 		struct sdw_defer *defer = &bus->defer_msg;
933 
934 		cdns_read_response(cdns);
935 
936 		/*
937 		 * Clear interrupt before signalling the completion to avoid
938 		 * a race between this thread and the main thread starting
939 		 * another TX.
940 		 */
941 		cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_RX_WL);
942 		int_status &= ~CDNS_MCP_INT_RX_WL;
943 
944 		if (defer && defer->msg) {
945 			cdns_fill_msg_resp(cdns, defer->msg,
946 					   defer->length, 0);
947 			complete(&defer->complete);
948 		} else {
949 			complete(&cdns->tx_complete);
950 		}
951 	}
952 
953 	if (int_status & CDNS_MCP_INT_PARITY) {
954 		/* Parity error detected by Master */
955 		dev_err_ratelimited(cdns->dev, "Parity error\n");
956 	}
957 
958 	if (int_status & CDNS_MCP_INT_CTRL_CLASH) {
959 		/* Slave is driving bit slot during control word */
960 		dev_err_ratelimited(cdns->dev, "Bus clash for control word\n");
961 	}
962 
963 	if (int_status & CDNS_MCP_INT_DATA_CLASH) {
964 		/*
965 		 * Multiple slaves trying to drive bit slot, or issue with
966 		 * ownership of data bits or Slave gone bonkers
967 		 */
968 		dev_err_ratelimited(cdns->dev, "Bus clash for data word\n");
969 	}
970 
971 	if (cdns->bus.params.m_data_mode != SDW_PORT_DATA_MODE_NORMAL &&
972 	    int_status & CDNS_MCP_INT_DPINT) {
973 		u32 port_intstat;
974 
975 		/* just log which ports report an error */
976 		port_intstat = cdns_readl(cdns, CDNS_MCP_PORT_INTSTAT);
977 		dev_err_ratelimited(cdns->dev, "DP interrupt: PortIntStat %8x\n",
978 				    port_intstat);
979 
980 		/* clear status w/ write1 */
981 		cdns_writel(cdns, CDNS_MCP_PORT_INTSTAT, port_intstat);
982 	}
983 
984 	if (int_status & CDNS_MCP_INT_SLAVE_MASK) {
985 		/* Mask the Slave interrupt and wake thread */
986 		cdns_updatel(cdns, CDNS_MCP_INTMASK,
987 			     CDNS_MCP_INT_SLAVE_MASK, 0);
988 
989 		int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
990 
991 		/*
992 		 * Deal with possible race condition between interrupt
993 		 * handling and disabling interrupts on suspend.
994 		 *
995 		 * If the master is in the process of disabling
996 		 * interrupts, don't schedule a workqueue
997 		 */
998 		if (cdns->interrupt_enabled)
999 			schedule_work(&cdns->work);
1000 	}
1001 
1002 	cdns_writel(cdns, CDNS_MCP_INTSTAT, int_status);
1003 	return IRQ_HANDLED;
1004 }
1005 EXPORT_SYMBOL(sdw_cdns_irq);
1006 
1007 static void cdns_check_attached_status_dwork(struct work_struct *work)
1008 {
1009 	struct sdw_cdns *cdns =
1010 		container_of(work, struct sdw_cdns, attach_dwork.work);
1011 	enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
1012 	u32 val;
1013 	int ret;
1014 	int i;
1015 
1016 	val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
1017 
1018 	for (i = 0; i <= SDW_MAX_DEVICES; i++) {
1019 		status[i] = val & 0x3;
1020 		if (status[i])
1021 			dev_dbg(cdns->dev, "Peripheral %d status: %d\n", i, status[i]);
1022 		val >>= 2;
1023 	}
1024 
1025 	mutex_lock(&cdns->status_update_lock);
1026 	ret = sdw_handle_slave_status(&cdns->bus, status);
1027 	mutex_unlock(&cdns->status_update_lock);
1028 	if (ret < 0)
1029 		dev_err(cdns->dev, "%s: sdw_handle_slave_status failed: %d\n", __func__, ret);
1030 }
1031 
1032 /**
1033  * cdns_update_slave_status_work - update slave status in a work since we will need to handle
1034  * other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave
1035  * process.
1036  * @work: cdns worker thread
1037  */
1038 static void cdns_update_slave_status_work(struct work_struct *work)
1039 {
1040 	struct sdw_cdns *cdns =
1041 		container_of(work, struct sdw_cdns, work);
1042 	u32 slave0, slave1;
1043 	u64 slave_intstat;
1044 	u32 device0_status;
1045 	int retry_count = 0;
1046 
1047 	/*
1048 	 * Clear main interrupt first so we don't lose any assertions
1049 	 * that happen during this function.
1050 	 */
1051 	cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
1052 
1053 	slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
1054 	slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
1055 
1056 	/*
1057 	 * Clear the bits before handling so we don't lose any
1058 	 * bits that re-assert.
1059 	 */
1060 	cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
1061 	cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
1062 
1063 	/* combine the two status */
1064 	slave_intstat = ((u64)slave1 << 32) | slave0;
1065 
1066 	dev_dbg_ratelimited(cdns->dev, "Slave status change: 0x%llx\n", slave_intstat);
1067 
1068 update_status:
1069 	cdns_update_slave_status(cdns, slave_intstat);
1070 
1071 	/*
1072 	 * When there is more than one peripheral per link, it's
1073 	 * possible that a deviceB becomes attached after we deal with
1074 	 * the attachment of deviceA. Since the hardware does a
1075 	 * logical AND, the attachment of the second device does not
1076 	 * change the status seen by the driver.
1077 	 *
1078 	 * In that case, clearing the registers above would result in
1079 	 * the deviceB never being detected - until a change of status
1080 	 * is observed on the bus.
1081 	 *
1082 	 * To avoid this race condition, re-check if any device0 needs
1083 	 * attention with PING commands. There is no need to check for
1084 	 * ALERTS since they are not allowed until a non-zero
1085 	 * device_number is assigned.
1086 	 *
1087 	 * Do not clear the INTSTAT0/1. While looping to enumerate devices on
1088 	 * #0 there could be status changes on other devices - these must
1089 	 * be kept in the INTSTAT so they can be handled when all #0 devices
1090 	 * have been handled.
1091 	 */
1092 
1093 	device0_status = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
1094 	device0_status &= 3;
1095 
1096 	if (device0_status == SDW_SLAVE_ATTACHED) {
1097 		if (retry_count++ < SDW_MAX_DEVICES) {
1098 			dev_dbg_ratelimited(cdns->dev,
1099 					    "Device0 detected after clearing status, iteration %d\n",
1100 					    retry_count);
1101 			slave_intstat = CDNS_MCP_SLAVE_INTSTAT_ATTACHED;
1102 			goto update_status;
1103 		} else {
1104 			dev_err_ratelimited(cdns->dev,
1105 					    "Device0 detected after %d iterations\n",
1106 					    retry_count);
1107 		}
1108 	}
1109 
1110 	/* unmask Slave interrupt now */
1111 	cdns_updatel(cdns, CDNS_MCP_INTMASK,
1112 		     CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
1113 
1114 }
1115 
1116 /* paranoia check to make sure self-cleared bits are indeed cleared */
1117 void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string,
1118 				       bool initial_delay, int reset_iterations)
1119 {
1120 	u32 ip_mcp_control;
1121 	u32 mcp_control;
1122 	u32 mcp_config_update;
1123 	int i;
1124 
1125 	if (initial_delay)
1126 		usleep_range(1000, 1500);
1127 
1128 	ip_mcp_control = cdns_ip_readl(cdns, CDNS_IP_MCP_CONTROL);
1129 
1130 	/* the following bits should be cleared immediately */
1131 	if (ip_mcp_control & CDNS_IP_MCP_CONTROL_SW_RST)
1132 		dev_err(cdns->dev, "%s failed: IP_MCP_CONTROL_SW_RST is not cleared\n", string);
1133 
1134 	mcp_control = cdns_readl(cdns, CDNS_MCP_CONTROL);
1135 
1136 	/* the following bits should be cleared immediately */
1137 	if (mcp_control & CDNS_MCP_CONTROL_CMD_RST)
1138 		dev_err(cdns->dev, "%s failed: MCP_CONTROL_CMD_RST is not cleared\n", string);
1139 	if (mcp_control & CDNS_MCP_CONTROL_SOFT_RST)
1140 		dev_err(cdns->dev, "%s failed: MCP_CONTROL_SOFT_RST is not cleared\n", string);
1141 	if (mcp_control & CDNS_MCP_CONTROL_CLK_STOP_CLR)
1142 		dev_err(cdns->dev, "%s failed: MCP_CONTROL_CLK_STOP_CLR is not cleared\n", string);
1143 
1144 	mcp_config_update = cdns_readl(cdns, CDNS_MCP_CONFIG_UPDATE);
1145 	if (mcp_config_update & CDNS_MCP_CONFIG_UPDATE_BIT)
1146 		dev_err(cdns->dev, "%s failed: MCP_CONFIG_UPDATE_BIT is not cleared\n", string);
1147 
1148 	i = 0;
1149 	while (mcp_control & CDNS_MCP_CONTROL_HW_RST) {
1150 		if (i == reset_iterations) {
1151 			dev_err(cdns->dev, "%s failed: MCP_CONTROL_HW_RST is not cleared\n", string);
1152 			break;
1153 		}
1154 
1155 		dev_dbg(cdns->dev, "%s: MCP_CONTROL_HW_RST is not cleared at iteration %d\n", string, i);
1156 		i++;
1157 
1158 		usleep_range(1000, 1500);
1159 		mcp_control = cdns_readl(cdns, CDNS_MCP_CONTROL);
1160 	}
1161 
1162 }
1163 EXPORT_SYMBOL(sdw_cdns_check_self_clearing_bits);
1164 
1165 /*
1166  * init routines
1167  */
1168 
1169 /**
1170  * sdw_cdns_exit_reset() - Program reset parameters and start bus operations
1171  * @cdns: Cadence instance
1172  */
1173 int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
1174 {
1175 	/* keep reset delay unchanged to 4096 cycles */
1176 
1177 	/* use hardware generated reset */
1178 	cdns_updatel(cdns, CDNS_MCP_CONTROL,
1179 		     CDNS_MCP_CONTROL_HW_RST,
1180 		     CDNS_MCP_CONTROL_HW_RST);
1181 
1182 	/* commit changes */
1183 	return cdns_config_update(cdns);
1184 }
1185 EXPORT_SYMBOL(sdw_cdns_exit_reset);
1186 
1187 /**
1188  * cdns_enable_slave_interrupts() - Enable SDW slave interrupts
1189  * @cdns: Cadence instance
1190  * @state: boolean for true/false
1191  */
1192 static void cdns_enable_slave_interrupts(struct sdw_cdns *cdns, bool state)
1193 {
1194 	u32 mask;
1195 
1196 	mask = cdns_readl(cdns, CDNS_MCP_INTMASK);
1197 	if (state)
1198 		mask |= CDNS_MCP_INT_SLAVE_MASK;
1199 	else
1200 		mask &= ~CDNS_MCP_INT_SLAVE_MASK;
1201 
1202 	cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
1203 }
1204 
1205 /**
1206  * sdw_cdns_enable_interrupt() - Enable SDW interrupts
1207  * @cdns: Cadence instance
1208  * @state: True if we are trying to enable interrupt.
1209  */
1210 int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
1211 {
1212 	u32 slave_intmask0 = 0;
1213 	u32 slave_intmask1 = 0;
1214 	u32 mask = 0;
1215 
1216 	if (!state)
1217 		goto update_masks;
1218 
1219 	slave_intmask0 = CDNS_MCP_SLAVE_INTMASK0_MASK;
1220 	slave_intmask1 = CDNS_MCP_SLAVE_INTMASK1_MASK;
1221 
1222 	/* enable detection of all slave state changes */
1223 	mask = CDNS_MCP_INT_SLAVE_MASK;
1224 
1225 	/* enable detection of bus issues */
1226 	mask |= CDNS_MCP_INT_CTRL_CLASH | CDNS_MCP_INT_DATA_CLASH |
1227 		CDNS_MCP_INT_PARITY;
1228 
1229 	/* port interrupt limited to test modes for now */
1230 	if (cdns->bus.params.m_data_mode != SDW_PORT_DATA_MODE_NORMAL)
1231 		mask |= CDNS_MCP_INT_DPINT;
1232 
1233 	/* enable detection of RX fifo level */
1234 	mask |= CDNS_MCP_INT_RX_WL;
1235 
1236 	/*
1237 	 * CDNS_MCP_INT_IRQ needs to be set otherwise all previous
1238 	 * settings are irrelevant
1239 	 */
1240 	mask |= CDNS_MCP_INT_IRQ;
1241 
1242 	if (interrupt_mask) /* parameter override */
1243 		mask = interrupt_mask;
1244 
1245 update_masks:
1246 	/* clear slave interrupt status before enabling interrupt */
1247 	if (state) {
1248 		u32 slave_state;
1249 
1250 		slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
1251 		cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave_state);
1252 		slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
1253 		cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave_state);
1254 	}
1255 	cdns->interrupt_enabled = state;
1256 
1257 	/*
1258 	 * Complete any on-going status updates before updating masks,
1259 	 * and cancel queued status updates.
1260 	 *
1261 	 * There could be a race with a new interrupt thrown before
1262 	 * the 3 mask updates below are complete, so in the interrupt
1263 	 * we use the 'interrupt_enabled' status to prevent new work
1264 	 * from being queued.
1265 	 */
1266 	if (!state)
1267 		cancel_work_sync(&cdns->work);
1268 
1269 	cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
1270 	cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
1271 	cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
1272 
1273 	return 0;
1274 }
1275 EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
1276 
1277 static int cdns_allocate_pdi(struct sdw_cdns *cdns,
1278 			     struct sdw_cdns_pdi **stream,
1279 			     u32 num)
1280 {
1281 	struct sdw_cdns_pdi *pdi;
1282 	int i;
1283 
1284 	if (!num)
1285 		return 0;
1286 
1287 	pdi = devm_kcalloc(cdns->dev, num, sizeof(*pdi), GFP_KERNEL);
1288 	if (!pdi)
1289 		return -ENOMEM;
1290 
1291 	for (i = 0; i < num; i++) {
1292 		pdi[i].num = i;
1293 	}
1294 
1295 	*stream = pdi;
1296 	return 0;
1297 }
1298 
1299 /**
1300  * sdw_cdns_pdi_init() - PDI initialization routine
1301  *
1302  * @cdns: Cadence instance
1303  * @config: Stream configurations
1304  */
1305 int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
1306 		      struct sdw_cdns_stream_config config)
1307 {
1308 	struct sdw_cdns_streams *stream;
1309 	int ret;
1310 
1311 	cdns->pcm.num_bd = config.pcm_bd;
1312 	cdns->pcm.num_in = config.pcm_in;
1313 	cdns->pcm.num_out = config.pcm_out;
1314 
1315 	/* Allocate PDIs for PCMs */
1316 	stream = &cdns->pcm;
1317 
1318 	/* we allocate PDI0 and PDI1 which are used for Bulk */
1319 	ret = cdns_allocate_pdi(cdns, &stream->bd, stream->num_bd);
1320 	if (ret)
1321 		return ret;
1322 
1323 	ret = cdns_allocate_pdi(cdns, &stream->in, stream->num_in);
1324 	if (ret)
1325 		return ret;
1326 
1327 	ret = cdns_allocate_pdi(cdns, &stream->out, stream->num_out);
1328 	if (ret)
1329 		return ret;
1330 
1331 	/* Update total number of PCM PDIs */
1332 	stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out;
1333 	cdns->num_ports = stream->num_pdi;
1334 
1335 	return 0;
1336 }
1337 EXPORT_SYMBOL(sdw_cdns_pdi_init);
1338 
1339 static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
1340 {
1341 	u32 val;
1342 	int c;
1343 	int r;
1344 
1345 	r = sdw_find_row_index(n_rows);
1346 	c = sdw_find_col_index(n_cols);
1347 
1348 	val = FIELD_PREP(CDNS_MCP_FRAME_SHAPE_ROW_MASK, r);
1349 	val |= FIELD_PREP(CDNS_MCP_FRAME_SHAPE_COL_MASK, c);
1350 
1351 	return val;
1352 }
1353 
1354 static int cdns_init_clock_ctrl(struct sdw_cdns *cdns)
1355 {
1356 	struct sdw_bus *bus = &cdns->bus;
1357 	struct sdw_master_prop *prop = &bus->prop;
1358 	u32 val;
1359 	u32 ssp_interval;
1360 	int divider;
1361 
1362 	dev_dbg(cdns->dev, "mclk %d max %d row %d col %d\n",
1363 		prop->mclk_freq,
1364 		prop->max_clk_freq,
1365 		prop->default_row,
1366 		prop->default_col);
1367 
1368 	if (!prop->default_frame_rate || !prop->default_row) {
1369 		dev_err(cdns->dev, "Default frame_rate %d or row %d is invalid\n",
1370 			prop->default_frame_rate, prop->default_row);
1371 		return -EINVAL;
1372 	}
1373 
1374 	/* Set clock divider */
1375 	divider	= (prop->mclk_freq * SDW_DOUBLE_RATE_FACTOR /
1376 		bus->params.curr_dr_freq) - 1;
1377 
1378 	cdns_updatel(cdns, CDNS_MCP_CLK_CTRL0,
1379 		     CDNS_MCP_CLK_MCLKD_MASK, divider);
1380 	cdns_updatel(cdns, CDNS_MCP_CLK_CTRL1,
1381 		     CDNS_MCP_CLK_MCLKD_MASK, divider);
1382 
1383 	/* Set frame shape base on the actual bus frequency. */
1384 	prop->default_col = bus->params.curr_dr_freq /
1385 			    prop->default_frame_rate / prop->default_row;
1386 
1387 	/*
1388 	 * Frame shape changes after initialization have to be done
1389 	 * with the bank switch mechanism
1390 	 */
1391 	val = cdns_set_initial_frame_shape(prop->default_row,
1392 					   prop->default_col);
1393 	cdns_writel(cdns, CDNS_MCP_FRAME_SHAPE_INIT, val);
1394 
1395 	/* Set SSP interval to default value */
1396 	ssp_interval = prop->default_frame_rate / SDW_CADENCE_GSYNC_HZ;
1397 	cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, ssp_interval);
1398 	cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, ssp_interval);
1399 
1400 	return 0;
1401 }
1402 
1403 /**
1404  * sdw_cdns_soft_reset() - Cadence soft-reset
1405  * @cdns: Cadence instance
1406  */
1407 int sdw_cdns_soft_reset(struct sdw_cdns *cdns)
1408 {
1409 	int ret;
1410 
1411 	cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_SOFT_RST,
1412 		     CDNS_MCP_CONTROL_SOFT_RST);
1413 
1414 	ret = cdns_config_update(cdns);
1415 	if (ret < 0) {
1416 		dev_err(cdns->dev, "%s: config update failed\n", __func__);
1417 		return ret;
1418 	}
1419 
1420 	ret = cdns_set_wait(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_SOFT_RST, 0);
1421 	if (ret < 0)
1422 		dev_err(cdns->dev, "%s: Soft Reset timed out\n", __func__);
1423 
1424 	return ret;
1425 }
1426 EXPORT_SYMBOL(sdw_cdns_soft_reset);
1427 
1428 /**
1429  * sdw_cdns_init() - Cadence initialization
1430  * @cdns: Cadence instance
1431  */
1432 int sdw_cdns_init(struct sdw_cdns *cdns)
1433 {
1434 	int ret;
1435 	u32 val;
1436 
1437 	ret = cdns_init_clock_ctrl(cdns);
1438 	if (ret)
1439 		return ret;
1440 
1441 	sdw_cdns_check_self_clearing_bits(cdns, __func__, false, 0);
1442 
1443 	/* reset msg_count to default value of FIFOLEVEL */
1444 	cdns->msg_count = cdns_readl(cdns, CDNS_MCP_FIFOLEVEL);
1445 
1446 	/* flush command FIFOs */
1447 	cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST,
1448 		     CDNS_MCP_CONTROL_CMD_RST);
1449 
1450 	/* Set cmd accept mode */
1451 	cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, CDNS_IP_MCP_CONTROL_CMD_ACCEPT,
1452 			CDNS_IP_MCP_CONTROL_CMD_ACCEPT);
1453 
1454 	/* disable wakeup */
1455 	cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL,
1456 			CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP,
1457 			0);
1458 
1459 	/* Configure mcp config */
1460 	val = cdns_readl(cdns, CDNS_MCP_CONFIG);
1461 
1462 	/* Disable auto bus release */
1463 	val &= ~CDNS_MCP_CONFIG_BUS_REL;
1464 
1465 	cdns_writel(cdns, CDNS_MCP_CONFIG, val);
1466 
1467 	/* Configure IP mcp config */
1468 	val = cdns_ip_readl(cdns, CDNS_IP_MCP_CONFIG);
1469 
1470 	/* enable bus operations with clock and data */
1471 	val &= ~CDNS_IP_MCP_CONFIG_OP;
1472 	val |= CDNS_IP_MCP_CONFIG_OP_NORMAL;
1473 
1474 	/* Set cmd mode for Tx and Rx cmds */
1475 	val &= ~CDNS_IP_MCP_CONFIG_CMD;
1476 
1477 	/* Disable sniffer mode */
1478 	val &= ~CDNS_IP_MCP_CONFIG_SNIFFER;
1479 
1480 	if (cdns->bus.multi_link)
1481 		/* Set Multi-master mode to take gsync into account */
1482 		val |= CDNS_IP_MCP_CONFIG_MMASTER;
1483 
1484 	/* leave frame delay to hardware default of 0x1F */
1485 
1486 	/* leave command retry to hardware default of 0 */
1487 
1488 	cdns_ip_writel(cdns, CDNS_IP_MCP_CONFIG, val);
1489 
1490 	/* changes will be committed later */
1491 	return 0;
1492 }
1493 EXPORT_SYMBOL(sdw_cdns_init);
1494 
1495 int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params)
1496 {
1497 	struct sdw_master_prop *prop = &bus->prop;
1498 	struct sdw_cdns *cdns = bus_to_cdns(bus);
1499 	int mcp_clkctrl_off;
1500 	int divider;
1501 
1502 	if (!params->curr_dr_freq) {
1503 		dev_err(cdns->dev, "NULL curr_dr_freq\n");
1504 		return -EINVAL;
1505 	}
1506 
1507 	divider	= prop->mclk_freq * SDW_DOUBLE_RATE_FACTOR /
1508 		params->curr_dr_freq;
1509 	divider--; /* divider is 1/(N+1) */
1510 
1511 	if (params->next_bank)
1512 		mcp_clkctrl_off = CDNS_MCP_CLK_CTRL1;
1513 	else
1514 		mcp_clkctrl_off = CDNS_MCP_CLK_CTRL0;
1515 
1516 	cdns_updatel(cdns, mcp_clkctrl_off, CDNS_MCP_CLK_MCLKD_MASK, divider);
1517 
1518 	return 0;
1519 }
1520 EXPORT_SYMBOL(cdns_bus_conf);
1521 
1522 static int cdns_port_params(struct sdw_bus *bus,
1523 			    struct sdw_port_params *p_params, unsigned int bank)
1524 {
1525 	struct sdw_cdns *cdns = bus_to_cdns(bus);
1526 	int dpn_config_off_source;
1527 	int dpn_config_off_target;
1528 	int target_num = p_params->num;
1529 	int source_num = p_params->num;
1530 	bool override = false;
1531 	int dpn_config;
1532 
1533 	if (target_num == cdns->pdi_loopback_target &&
1534 	    cdns->pdi_loopback_source != -1) {
1535 		source_num = cdns->pdi_loopback_source;
1536 		override = true;
1537 	}
1538 
1539 	if (bank) {
1540 		dpn_config_off_source = CDNS_DPN_B1_CONFIG(source_num);
1541 		dpn_config_off_target = CDNS_DPN_B1_CONFIG(target_num);
1542 	} else {
1543 		dpn_config_off_source = CDNS_DPN_B0_CONFIG(source_num);
1544 		dpn_config_off_target = CDNS_DPN_B0_CONFIG(target_num);
1545 	}
1546 
1547 	dpn_config = cdns_readl(cdns, dpn_config_off_source);
1548 
1549 	/* use port params if there is no loopback, otherwise use source as is */
1550 	if (!override) {
1551 		u32p_replace_bits(&dpn_config, p_params->bps - 1, CDNS_DPN_CONFIG_WL);
1552 		u32p_replace_bits(&dpn_config, p_params->flow_mode, CDNS_DPN_CONFIG_PORT_FLOW);
1553 		u32p_replace_bits(&dpn_config, p_params->data_mode, CDNS_DPN_CONFIG_PORT_DAT);
1554 	}
1555 
1556 	cdns_writel(cdns, dpn_config_off_target, dpn_config);
1557 
1558 	return 0;
1559 }
1560 
1561 static int cdns_transport_params(struct sdw_bus *bus,
1562 				 struct sdw_transport_params *t_params,
1563 				 enum sdw_reg_bank bank)
1564 {
1565 	struct sdw_cdns *cdns = bus_to_cdns(bus);
1566 	int dpn_config;
1567 	int dpn_config_off_source;
1568 	int dpn_config_off_target;
1569 	int dpn_hctrl;
1570 	int dpn_hctrl_off_source;
1571 	int dpn_hctrl_off_target;
1572 	int dpn_offsetctrl;
1573 	int dpn_offsetctrl_off_source;
1574 	int dpn_offsetctrl_off_target;
1575 	int dpn_samplectrl;
1576 	int dpn_samplectrl_off_source;
1577 	int dpn_samplectrl_off_target;
1578 	int source_num = t_params->port_num;
1579 	int target_num = t_params->port_num;
1580 	bool override = false;
1581 
1582 	if (target_num == cdns->pdi_loopback_target &&
1583 	    cdns->pdi_loopback_source != -1) {
1584 		source_num = cdns->pdi_loopback_source;
1585 		override = true;
1586 	}
1587 
1588 	/*
1589 	 * Note: Only full data port is supported on the Master side for
1590 	 * both PCM and PDM ports.
1591 	 */
1592 
1593 	if (bank) {
1594 		dpn_config_off_source = CDNS_DPN_B1_CONFIG(source_num);
1595 		dpn_hctrl_off_source = CDNS_DPN_B1_HCTRL(source_num);
1596 		dpn_offsetctrl_off_source = CDNS_DPN_B1_OFFSET_CTRL(source_num);
1597 		dpn_samplectrl_off_source = CDNS_DPN_B1_SAMPLE_CTRL(source_num);
1598 
1599 		dpn_config_off_target = CDNS_DPN_B1_CONFIG(target_num);
1600 		dpn_hctrl_off_target = CDNS_DPN_B1_HCTRL(target_num);
1601 		dpn_offsetctrl_off_target = CDNS_DPN_B1_OFFSET_CTRL(target_num);
1602 		dpn_samplectrl_off_target = CDNS_DPN_B1_SAMPLE_CTRL(target_num);
1603 
1604 	} else {
1605 		dpn_config_off_source = CDNS_DPN_B0_CONFIG(source_num);
1606 		dpn_hctrl_off_source = CDNS_DPN_B0_HCTRL(source_num);
1607 		dpn_offsetctrl_off_source = CDNS_DPN_B0_OFFSET_CTRL(source_num);
1608 		dpn_samplectrl_off_source = CDNS_DPN_B0_SAMPLE_CTRL(source_num);
1609 
1610 		dpn_config_off_target = CDNS_DPN_B0_CONFIG(target_num);
1611 		dpn_hctrl_off_target = CDNS_DPN_B0_HCTRL(target_num);
1612 		dpn_offsetctrl_off_target = CDNS_DPN_B0_OFFSET_CTRL(target_num);
1613 		dpn_samplectrl_off_target = CDNS_DPN_B0_SAMPLE_CTRL(target_num);
1614 	}
1615 
1616 	dpn_config = cdns_readl(cdns, dpn_config_off_source);
1617 	if (!override) {
1618 		u32p_replace_bits(&dpn_config, t_params->blk_grp_ctrl, CDNS_DPN_CONFIG_BGC);
1619 		u32p_replace_bits(&dpn_config, t_params->blk_pkg_mode, CDNS_DPN_CONFIG_BPM);
1620 	}
1621 	cdns_writel(cdns, dpn_config_off_target, dpn_config);
1622 
1623 	if (!override) {
1624 		dpn_offsetctrl = 0;
1625 		u32p_replace_bits(&dpn_offsetctrl, t_params->offset1, CDNS_DPN_OFFSET_CTRL_1);
1626 		u32p_replace_bits(&dpn_offsetctrl, t_params->offset2, CDNS_DPN_OFFSET_CTRL_2);
1627 	} else {
1628 		dpn_offsetctrl = cdns_readl(cdns, dpn_offsetctrl_off_source);
1629 	}
1630 	cdns_writel(cdns, dpn_offsetctrl_off_target,  dpn_offsetctrl);
1631 
1632 	if (!override) {
1633 		dpn_hctrl = 0;
1634 		u32p_replace_bits(&dpn_hctrl, t_params->hstart, CDNS_DPN_HCTRL_HSTART);
1635 		u32p_replace_bits(&dpn_hctrl, t_params->hstop, CDNS_DPN_HCTRL_HSTOP);
1636 		u32p_replace_bits(&dpn_hctrl, t_params->lane_ctrl, CDNS_DPN_HCTRL_LCTRL);
1637 	} else {
1638 		dpn_hctrl = cdns_readl(cdns, dpn_hctrl_off_source);
1639 	}
1640 	cdns_writel(cdns, dpn_hctrl_off_target, dpn_hctrl);
1641 
1642 	if (!override)
1643 		dpn_samplectrl = t_params->sample_interval - 1;
1644 	else
1645 		dpn_samplectrl = cdns_readl(cdns, dpn_samplectrl_off_source);
1646 	cdns_writel(cdns, dpn_samplectrl_off_target, dpn_samplectrl);
1647 
1648 	return 0;
1649 }
1650 
1651 static int cdns_port_enable(struct sdw_bus *bus,
1652 			    struct sdw_enable_ch *enable_ch, unsigned int bank)
1653 {
1654 	struct sdw_cdns *cdns = bus_to_cdns(bus);
1655 	int dpn_chnen_off, ch_mask;
1656 
1657 	if (bank)
1658 		dpn_chnen_off = CDNS_DPN_B1_CH_EN(enable_ch->port_num);
1659 	else
1660 		dpn_chnen_off = CDNS_DPN_B0_CH_EN(enable_ch->port_num);
1661 
1662 	ch_mask = enable_ch->ch_mask * enable_ch->enable;
1663 	cdns_writel(cdns, dpn_chnen_off, ch_mask);
1664 
1665 	return 0;
1666 }
1667 
1668 static const struct sdw_master_port_ops cdns_port_ops = {
1669 	.dpn_set_port_params = cdns_port_params,
1670 	.dpn_set_port_transport_params = cdns_transport_params,
1671 	.dpn_port_enable_ch = cdns_port_enable,
1672 };
1673 
1674 /**
1675  * sdw_cdns_is_clock_stop: Check clock status
1676  *
1677  * @cdns: Cadence instance
1678  */
1679 bool sdw_cdns_is_clock_stop(struct sdw_cdns *cdns)
1680 {
1681 	return !!(cdns_readl(cdns, CDNS_MCP_STAT) & CDNS_MCP_STAT_CLK_STOP);
1682 }
1683 EXPORT_SYMBOL(sdw_cdns_is_clock_stop);
1684 
1685 /**
1686  * sdw_cdns_clock_stop: Cadence clock stop configuration routine
1687  *
1688  * @cdns: Cadence instance
1689  * @block_wake: prevent wakes if required by the platform
1690  */
1691 int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
1692 {
1693 	bool slave_present = false;
1694 	struct sdw_slave *slave;
1695 	int ret;
1696 
1697 	sdw_cdns_check_self_clearing_bits(cdns, __func__, false, 0);
1698 
1699 	/* Check suspend status */
1700 	if (sdw_cdns_is_clock_stop(cdns)) {
1701 		dev_dbg(cdns->dev, "Clock is already stopped\n");
1702 		return 0;
1703 	}
1704 
1705 	/*
1706 	 * Before entering clock stop we mask the Slave
1707 	 * interrupts. This helps avoid having to deal with e.g. a
1708 	 * Slave becoming UNATTACHED while the clock is being stopped
1709 	 */
1710 	cdns_enable_slave_interrupts(cdns, false);
1711 
1712 	/*
1713 	 * For specific platforms, it is required to be able to put
1714 	 * master into a state in which it ignores wake-up trials
1715 	 * in clock stop state
1716 	 */
1717 	if (block_wake)
1718 		cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL,
1719 				CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP,
1720 				CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP);
1721 
1722 	list_for_each_entry(slave, &cdns->bus.slaves, node) {
1723 		if (slave->status == SDW_SLAVE_ATTACHED ||
1724 		    slave->status == SDW_SLAVE_ALERT) {
1725 			slave_present = true;
1726 			break;
1727 		}
1728 	}
1729 
1730 	/* commit changes */
1731 	ret = cdns_config_update(cdns);
1732 	if (ret < 0) {
1733 		dev_err(cdns->dev, "%s: config_update failed\n", __func__);
1734 		return ret;
1735 	}
1736 
1737 	/* Prepare slaves for clock stop */
1738 	if (slave_present) {
1739 		ret = sdw_bus_prep_clk_stop(&cdns->bus);
1740 		if (ret < 0 && ret != -ENODATA) {
1741 			dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
1742 			return ret;
1743 		}
1744 	}
1745 
1746 	/*
1747 	 * Enter clock stop mode and only report errors if there are
1748 	 * Slave devices present (ALERT or ATTACHED)
1749 	 */
1750 	ret = sdw_bus_clk_stop(&cdns->bus);
1751 	if (ret < 0 && slave_present && ret != -ENODATA) {
1752 		dev_err(cdns->dev, "bus clock stop failed %d\n", ret);
1753 		return ret;
1754 	}
1755 
1756 	ret = cdns_set_wait(cdns, CDNS_MCP_STAT,
1757 			    CDNS_MCP_STAT_CLK_STOP,
1758 			    CDNS_MCP_STAT_CLK_STOP);
1759 	if (ret < 0)
1760 		dev_err(cdns->dev, "Clock stop failed %d\n", ret);
1761 
1762 	return ret;
1763 }
1764 EXPORT_SYMBOL(sdw_cdns_clock_stop);
1765 
1766 /**
1767  * sdw_cdns_clock_restart: Cadence PM clock restart configuration routine
1768  *
1769  * @cdns: Cadence instance
1770  * @bus_reset: context may be lost while in low power modes and the bus
1771  * may require a Severe Reset and re-enumeration after a wake.
1772  */
1773 int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset)
1774 {
1775 	int ret;
1776 
1777 	/* unmask Slave interrupts that were masked when stopping the clock */
1778 	cdns_enable_slave_interrupts(cdns, true);
1779 
1780 	ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
1781 			     CDNS_MCP_CONTROL_CLK_STOP_CLR);
1782 	if (ret < 0) {
1783 		dev_err(cdns->dev, "Couldn't exit from clock stop\n");
1784 		return ret;
1785 	}
1786 
1787 	ret = cdns_set_wait(cdns, CDNS_MCP_STAT, CDNS_MCP_STAT_CLK_STOP, 0);
1788 	if (ret < 0) {
1789 		dev_err(cdns->dev, "clock stop exit failed %d\n", ret);
1790 		return ret;
1791 	}
1792 
1793 	cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL,
1794 			CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP, 0);
1795 
1796 	cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, CDNS_IP_MCP_CONTROL_CMD_ACCEPT,
1797 			CDNS_IP_MCP_CONTROL_CMD_ACCEPT);
1798 
1799 	if (!bus_reset) {
1800 
1801 		/* enable bus operations with clock and data */
1802 		cdns_ip_updatel(cdns, CDNS_IP_MCP_CONFIG,
1803 				CDNS_IP_MCP_CONFIG_OP,
1804 				CDNS_IP_MCP_CONFIG_OP_NORMAL);
1805 
1806 		ret = cdns_config_update(cdns);
1807 		if (ret < 0) {
1808 			dev_err(cdns->dev, "%s: config_update failed\n", __func__);
1809 			return ret;
1810 		}
1811 
1812 		ret = sdw_bus_exit_clk_stop(&cdns->bus);
1813 		if (ret < 0)
1814 			dev_err(cdns->dev, "bus failed to exit clock stop %d\n", ret);
1815 	}
1816 
1817 	return ret;
1818 }
1819 EXPORT_SYMBOL(sdw_cdns_clock_restart);
1820 
1821 /**
1822  * sdw_cdns_probe() - Cadence probe routine
1823  * @cdns: Cadence instance
1824  */
1825 int sdw_cdns_probe(struct sdw_cdns *cdns)
1826 {
1827 	init_completion(&cdns->tx_complete);
1828 	cdns->bus.port_ops = &cdns_port_ops;
1829 
1830 	mutex_init(&cdns->status_update_lock);
1831 
1832 	INIT_WORK(&cdns->work, cdns_update_slave_status_work);
1833 	INIT_DELAYED_WORK(&cdns->attach_dwork, cdns_check_attached_status_dwork);
1834 
1835 	return 0;
1836 }
1837 EXPORT_SYMBOL(sdw_cdns_probe);
1838 
1839 int cdns_set_sdw_stream(struct snd_soc_dai *dai,
1840 			void *stream, int direction)
1841 {
1842 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1843 	struct sdw_cdns_dai_runtime *dai_runtime;
1844 
1845 	dai_runtime = cdns->dai_runtime_array[dai->id];
1846 
1847 	if (stream) {
1848 		/* first paranoia check */
1849 		if (dai_runtime) {
1850 			dev_err(dai->dev,
1851 				"dai_runtime already allocated for dai %s\n",
1852 				dai->name);
1853 			return -EINVAL;
1854 		}
1855 
1856 		/* allocate and set dai_runtime info */
1857 		dai_runtime = kzalloc_obj(*dai_runtime);
1858 		if (!dai_runtime)
1859 			return -ENOMEM;
1860 
1861 		dai_runtime->stream_type = SDW_STREAM_PCM;
1862 
1863 		dai_runtime->bus = &cdns->bus;
1864 		dai_runtime->link_id = cdns->instance;
1865 
1866 		dai_runtime->stream = stream;
1867 		dai_runtime->direction = direction;
1868 
1869 		cdns->dai_runtime_array[dai->id] = dai_runtime;
1870 	} else {
1871 		/* second paranoia check */
1872 		if (!dai_runtime) {
1873 			dev_err(dai->dev,
1874 				"dai_runtime not allocated for dai %s\n",
1875 				dai->name);
1876 			return -EINVAL;
1877 		}
1878 
1879 		/* for NULL stream we release allocated dai_runtime */
1880 		kfree(dai_runtime);
1881 		cdns->dai_runtime_array[dai->id] = NULL;
1882 	}
1883 	return 0;
1884 }
1885 EXPORT_SYMBOL(cdns_set_sdw_stream);
1886 
1887 /**
1888  * cdns_find_pdi() - Find a free PDI
1889  *
1890  * @cdns: Cadence instance
1891  * @num: Number of PDIs
1892  * @pdi: PDI instances
1893  * @dai_id: DAI id
1894  *
1895  * Find a PDI for a given PDI array. The PDI num and dai_id are
1896  * expected to match, return NULL otherwise.
1897  */
1898 static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
1899 					  unsigned int num,
1900 					  struct sdw_cdns_pdi *pdi,
1901 					  int dai_id)
1902 {
1903 	int i;
1904 
1905 	for (i = 0; i < num; i++)
1906 		if (pdi[i].num == dai_id)
1907 			return &pdi[i];
1908 
1909 	return NULL;
1910 }
1911 
1912 /**
1913  * sdw_cdns_config_stream: Configure a stream
1914  *
1915  * @cdns: Cadence instance
1916  * @ch: Channel count
1917  * @dir: Data direction
1918  * @pdi: PDI to be used
1919  */
1920 void sdw_cdns_config_stream(struct sdw_cdns *cdns,
1921 			    u32 ch, u32 dir, struct sdw_cdns_pdi *pdi)
1922 {
1923 	u32 offset, val = 0;
1924 
1925 	if (dir == SDW_DATA_DIR_RX) {
1926 		val = CDNS_PORTCTRL_DIRN;
1927 
1928 		if (cdns->bus.params.m_data_mode != SDW_PORT_DATA_MODE_NORMAL)
1929 			val |= CDNS_PORTCTRL_TEST_FAILED;
1930 	} else if (pdi->num == 0 || pdi->num == 1) {
1931 		val |= CDNS_PORTCTRL_BULK_ENABLE;
1932 	}
1933 	offset = CDNS_PORTCTRL + pdi->num * CDNS_PORT_OFFSET;
1934 	cdns_updatel(cdns, offset,
1935 		     CDNS_PORTCTRL_DIRN | CDNS_PORTCTRL_TEST_FAILED |
1936 		     CDNS_PORTCTRL_BULK_ENABLE,
1937 		     val);
1938 
1939 	/* The DataPort0 needs to be mapped to both PDI0 and PDI1 ! */
1940 	if (pdi->num == 1)
1941 		val = 0;
1942 	else
1943 		val = pdi->num;
1944 	val |= CDNS_PDI_CONFIG_SOFT_RESET;
1945 	val |= FIELD_PREP(CDNS_PDI_CONFIG_CHANNEL, (1 << ch) - 1);
1946 	cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
1947 }
1948 EXPORT_SYMBOL(sdw_cdns_config_stream);
1949 
1950 /**
1951  * sdw_cdns_alloc_pdi() - Allocate a PDI
1952  *
1953  * @cdns: Cadence instance
1954  * @stream: Stream to be allocated
1955  * @ch: Channel count
1956  * @dir: Data direction
1957  * @dai_id: DAI id
1958  */
1959 struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
1960 					struct sdw_cdns_streams *stream,
1961 					u32 ch, u32 dir, int dai_id)
1962 {
1963 	struct sdw_cdns_pdi *pdi = NULL;
1964 
1965 	if (dir == SDW_DATA_DIR_RX)
1966 		pdi = cdns_find_pdi(cdns, stream->num_in, stream->in,
1967 				    dai_id);
1968 	else
1969 		pdi = cdns_find_pdi(cdns, stream->num_out, stream->out,
1970 				    dai_id);
1971 
1972 	/* check if we found a PDI, else find in bi-directional */
1973 	if (!pdi)
1974 		pdi = cdns_find_pdi(cdns, stream->num_bd, stream->bd,
1975 				    dai_id);
1976 
1977 	if (pdi) {
1978 		pdi->l_ch_num = 0;
1979 		pdi->h_ch_num = ch - 1;
1980 		pdi->dir = dir;
1981 		pdi->ch_count = ch;
1982 	}
1983 
1984 	return pdi;
1985 }
1986 EXPORT_SYMBOL(sdw_cdns_alloc_pdi);
1987 
1988 /*
1989  * the MIPI SoundWire CRC8 polynomial is X^8 + X^6 + X^3 + X^2 + 1, MSB first
1990  * The value is (1)01001101 = 0x4D
1991  *
1992  * the table below was generated with
1993  *
1994  *	u8 crc8_lookup_table[CRC8_TABLE_SIZE];
1995  *	crc8_populate_msb(crc8_lookup_table, SDW_CRC8_POLY);
1996  *
1997  */
1998 #define SDW_CRC8_SEED 0xFF
1999 #define SDW_CRC8_POLY 0x4D
2000 
2001 static const u8 sdw_crc8_lookup_msb[CRC8_TABLE_SIZE] = {
2002 	0x00, 0x4d, 0x9a, 0xd7, 0x79, 0x34, 0xe3, 0xae, /* 0 - 7 */
2003 	0xf2, 0xbf, 0x68, 0x25, 0x8b, 0xc6, 0x11, 0x5c, /* 8 -15 */
2004 	0xa9, 0xe4, 0x33, 0x7e, 0xd0, 0x9d, 0x4a, 0x07, /* 16 - 23 */
2005 	0x5b, 0x16, 0xc1, 0x8c, 0x22, 0x6f, 0xb8, 0xf5, /* 24 - 31 */
2006 	0x1f, 0x52, 0x85, 0xc8, 0x66, 0x2b, 0xfc, 0xb1, /* 32 - 39 */
2007 	0xed, 0xa0, 0x77, 0x3a, 0x94, 0xd9, 0x0e, 0x43, /* 40 - 47 */
2008 	0xb6, 0xfb, 0x2c, 0x61, 0xcf, 0x82, 0x55, 0x18, /* 48 - 55 */
2009 	0x44, 0x09, 0xde, 0x93, 0x3d, 0x70, 0xa7, 0xea, /* 56 - 63 */
2010 	0x3e, 0x73, 0xa4, 0xe9, 0x47, 0x0a, 0xdd, 0x90, /* 64 - 71 */
2011 	0xcc, 0x81, 0x56, 0x1b, 0xb5, 0xf8, 0x2f, 0x62, /* 72 - 79 */
2012 	0x97, 0xda, 0x0d, 0x40, 0xee, 0xa3, 0x74, 0x39, /* 80 - 87 */
2013 	0x65, 0x28, 0xff, 0xb2, 0x1c, 0x51, 0x86, 0xcb, /* 88 - 95 */
2014 	0x21, 0x6c, 0xbb, 0xf6, 0x58, 0x15, 0xc2, 0x8f, /* 96 - 103 */
2015 	0xd3, 0x9e, 0x49, 0x04, 0xaa, 0xe7, 0x30, 0x7d, /* 104 - 111 */
2016 	0x88, 0xc5, 0x12, 0x5f, 0xf1, 0xbc, 0x6b, 0x26, /* 112 - 119 */
2017 	0x7a, 0x37, 0xe0, 0xad, 0x03, 0x4e, 0x99, 0xd4, /* 120 - 127 */
2018 	0x7c, 0x31, 0xe6, 0xab, 0x05, 0x48, 0x9f, 0xd2, /* 128 - 135 */
2019 	0x8e, 0xc3, 0x14, 0x59, 0xf7, 0xba, 0x6d, 0x20, /* 136 - 143 */
2020 	0xd5, 0x98, 0x4f, 0x02, 0xac, 0xe1, 0x36, 0x7b, /* 144 - 151 */
2021 	0x27, 0x6a, 0xbd, 0xf0, 0x5e, 0x13, 0xc4, 0x89, /* 152 - 159 */
2022 	0x63, 0x2e, 0xf9, 0xb4, 0x1a, 0x57, 0x80, 0xcd, /* 160 - 167 */
2023 	0x91, 0xdc, 0x0b, 0x46, 0xe8, 0xa5, 0x72, 0x3f, /* 168 - 175 */
2024 	0xca, 0x87, 0x50, 0x1d, 0xb3, 0xfe, 0x29, 0x64, /* 176 - 183 */
2025 	0x38, 0x75, 0xa2, 0xef, 0x41, 0x0c, 0xdb, 0x96, /* 184 - 191 */
2026 	0x42, 0x0f, 0xd8, 0x95, 0x3b, 0x76, 0xa1, 0xec, /* 192 - 199 */
2027 	0xb0, 0xfd, 0x2a, 0x67, 0xc9, 0x84, 0x53, 0x1e, /* 200 - 207 */
2028 	0xeb, 0xa6, 0x71, 0x3c, 0x92, 0xdf, 0x08, 0x45, /* 208 - 215 */
2029 	0x19, 0x54, 0x83, 0xce, 0x60, 0x2d, 0xfa, 0xb7, /* 216 - 223 */
2030 	0x5d, 0x10, 0xc7, 0x8a, 0x24, 0x69, 0xbe, 0xf3, /* 224 - 231 */
2031 	0xaf, 0xe2, 0x35, 0x78, 0xd6, 0x9b, 0x4c, 0x01, /* 232 - 239 */
2032 	0xf4, 0xb9, 0x6e, 0x23, 0x8d, 0xc0, 0x17, 0x5a, /* 240 - 247 */
2033 	0x06, 0x4b, 0x9c, 0xd1, 0x7f, 0x32, 0xe5, 0xa8  /* 248 - 255 */
2034 };
2035 
2036 /* BPT/BRA helpers */
2037 
2038 #define SDW_CDNS_BRA_HDR			6 /* defined by MIPI */
2039 #define SDW_CDNS_BRA_HDR_CRC			1 /* defined by MIPI */
2040 #define SDW_CDNS_BRA_HDR_CRC_PAD		1 /* Cadence only */
2041 #define SDW_CDNS_BRA_HDR_RESP			1 /* defined by MIPI */
2042 #define SDW_CDNS_BRA_HDR_RESP_PAD		1 /* Cadence only */
2043 
2044 #define SDW_CDNS_BRA_DATA_PAD			1 /* Cadence only */
2045 #define SDW_CDNS_BRA_DATA_CRC			1 /* defined by MIPI */
2046 #define SDW_CDNS_BRA_DATA_CRC_PAD		1 /* Cadence only */
2047 
2048 #define SDW_CDNS_BRA_FOOTER_RESP		1 /* defined by MIPI */
2049 #define SDW_CDNS_BRA_FOOTER_RESP_PAD		1 /* Cadence only */
2050 
2051 #define SDW_CDNS_WRITE_PDI1_BUFFER_SIZE							\
2052 	((SDW_CDNS_BRA_HDR_RESP + SDW_CDNS_BRA_HDR_RESP_PAD +				\
2053 	 SDW_CDNS_BRA_FOOTER_RESP + SDW_CDNS_BRA_FOOTER_RESP_PAD) * 2)
2054 
2055 #define SDW_CDNS_READ_PDI0_BUFFER_SIZE							\
2056 	((SDW_CDNS_BRA_HDR + SDW_CDNS_BRA_HDR_CRC + SDW_CDNS_BRA_HDR_CRC_PAD) * 2)
2057 
2058 static unsigned int sdw_cdns_bra_actual_data_size(unsigned int allocated_bytes_per_frame)
2059 {
2060 	unsigned int total;
2061 
2062 	if (allocated_bytes_per_frame < (SDW_CDNS_BRA_HDR + SDW_CDNS_BRA_HDR_CRC +
2063 					 SDW_CDNS_BRA_HDR_RESP + SDW_CDNS_BRA_DATA_CRC +
2064 					 SDW_CDNS_BRA_FOOTER_RESP))
2065 		return 0;
2066 
2067 	total = allocated_bytes_per_frame - SDW_CDNS_BRA_HDR - SDW_CDNS_BRA_HDR_CRC -
2068 		SDW_CDNS_BRA_HDR_RESP - SDW_CDNS_BRA_DATA_CRC - SDW_CDNS_BRA_FOOTER_RESP;
2069 
2070 	return total;
2071 }
2072 
2073 static unsigned int sdw_cdns_write_pdi0_buffer_size(unsigned int actual_data_size)
2074 {
2075 	unsigned int total;
2076 
2077 	total = SDW_CDNS_BRA_HDR + SDW_CDNS_BRA_HDR_CRC + SDW_CDNS_BRA_HDR_CRC_PAD;
2078 
2079 	total += actual_data_size;
2080 	if (actual_data_size & 1)
2081 		total += SDW_CDNS_BRA_DATA_PAD;
2082 
2083 	total += SDW_CDNS_BRA_DATA_CRC + SDW_CDNS_BRA_DATA_CRC_PAD;
2084 
2085 	return total * 2;
2086 }
2087 
2088 static unsigned int sdw_cdns_read_pdi1_buffer_size(unsigned int actual_data_size)
2089 {
2090 	unsigned int total;
2091 
2092 	total = SDW_CDNS_BRA_HDR_RESP + SDW_CDNS_BRA_HDR_RESP_PAD;
2093 
2094 	total += actual_data_size;
2095 	if (actual_data_size & 1)
2096 		total += SDW_CDNS_BRA_DATA_PAD;
2097 
2098 	total += SDW_CDNS_BRA_HDR_CRC +	SDW_CDNS_BRA_HDR_CRC_PAD;
2099 
2100 	total += SDW_CDNS_BRA_FOOTER_RESP + SDW_CDNS_BRA_FOOTER_RESP_PAD;
2101 
2102 	return total * 2;
2103 }
2104 
2105 int sdw_cdns_bpt_find_bandwidth(int command, /* 0: write, 1: read */
2106 				int row, int col, int frame_rate,
2107 				unsigned int *tx_dma_bandwidth,
2108 				unsigned int *rx_dma_bandwidth)
2109 {
2110 	unsigned int bpt_bits = row * (col - 1);
2111 	unsigned int bpt_bytes = bpt_bits >> 3;
2112 	unsigned int pdi0_buffer_size;
2113 	unsigned int pdi1_buffer_size;
2114 	unsigned int data_per_frame;
2115 
2116 	data_per_frame = sdw_cdns_bra_actual_data_size(bpt_bytes);
2117 	if (!data_per_frame)
2118 		return -EINVAL;
2119 
2120 	if (command == 0) {
2121 		pdi0_buffer_size = sdw_cdns_write_pdi0_buffer_size(data_per_frame);
2122 		pdi1_buffer_size = SDW_CDNS_WRITE_PDI1_BUFFER_SIZE;
2123 	} else {
2124 		pdi0_buffer_size = SDW_CDNS_READ_PDI0_BUFFER_SIZE;
2125 		pdi1_buffer_size = sdw_cdns_read_pdi1_buffer_size(data_per_frame);
2126 	}
2127 
2128 	*tx_dma_bandwidth = pdi0_buffer_size * 8 * frame_rate;
2129 	*rx_dma_bandwidth = pdi1_buffer_size * 8 * frame_rate;
2130 
2131 	return 0;
2132 }
2133 EXPORT_SYMBOL(sdw_cdns_bpt_find_bandwidth);
2134 
2135 int sdw_cdns_bpt_find_buffer_sizes(int command, /* 0: write, 1: read */
2136 				   int row, int col, unsigned int data_bytes,
2137 				   unsigned int requested_bytes_per_frame,
2138 				   unsigned int *data_per_frame, unsigned int *pdi0_buffer_size,
2139 				   unsigned int *pdi1_buffer_size, unsigned int *num_frames)
2140 {
2141 	unsigned int bpt_bits = row * (col - 1);
2142 	unsigned int bpt_bytes = bpt_bits >> 3;
2143 	unsigned int actual_bpt_bytes;
2144 	unsigned int pdi0_tx_size;
2145 	unsigned int pdi1_rx_size;
2146 	unsigned int remainder;
2147 
2148 	if (!data_bytes)
2149 		return -EINVAL;
2150 
2151 	actual_bpt_bytes = sdw_cdns_bra_actual_data_size(bpt_bytes);
2152 	if (!actual_bpt_bytes)
2153 		return -EINVAL;
2154 
2155 	/*
2156 	 * the caller may want to set the number of bytes per frame,
2157 	 * allow when possible
2158 	 */
2159 	if (requested_bytes_per_frame < actual_bpt_bytes)
2160 		actual_bpt_bytes = requested_bytes_per_frame;
2161 
2162 	*data_per_frame = actual_bpt_bytes;
2163 
2164 	if (data_bytes < actual_bpt_bytes)
2165 		actual_bpt_bytes = data_bytes;
2166 
2167 	if (command == 0) {
2168 		/*
2169 		 * for writes we need to send all the data_bytes per frame,
2170 		 * even for the last frame which may only transport fewer bytes
2171 		 */
2172 
2173 		*num_frames = DIV_ROUND_UP(data_bytes, actual_bpt_bytes);
2174 
2175 		pdi0_tx_size = sdw_cdns_write_pdi0_buffer_size(actual_bpt_bytes);
2176 		pdi1_rx_size = SDW_CDNS_WRITE_PDI1_BUFFER_SIZE;
2177 
2178 		*pdi0_buffer_size = pdi0_tx_size * *num_frames;
2179 		*pdi1_buffer_size = pdi1_rx_size * *num_frames;
2180 	} else {
2181 		/*
2182 		 * for reads we need to retrieve only what is requested in the BPT
2183 		 * header, so the last frame needs to be special-cased
2184 		 */
2185 		*num_frames = data_bytes / actual_bpt_bytes;
2186 
2187 		pdi0_tx_size = SDW_CDNS_READ_PDI0_BUFFER_SIZE;
2188 		pdi1_rx_size = sdw_cdns_read_pdi1_buffer_size(actual_bpt_bytes);
2189 
2190 		*pdi0_buffer_size = pdi0_tx_size * *num_frames;
2191 		*pdi1_buffer_size = pdi1_rx_size * *num_frames;
2192 
2193 		remainder = data_bytes % actual_bpt_bytes;
2194 		if (remainder) {
2195 			pdi0_tx_size = SDW_CDNS_READ_PDI0_BUFFER_SIZE;
2196 			pdi1_rx_size = sdw_cdns_read_pdi1_buffer_size(remainder);
2197 
2198 			*num_frames = *num_frames + 1;
2199 			*pdi0_buffer_size += pdi0_tx_size;
2200 			*pdi1_buffer_size += pdi1_rx_size;
2201 		}
2202 	}
2203 
2204 	return 0;
2205 }
2206 EXPORT_SYMBOL(sdw_cdns_bpt_find_buffer_sizes);
2207 
2208 static int sdw_cdns_copy_write_data(u8 *data, int data_size, u8 *dma_buffer, int dma_buffer_size)
2209 {
2210 	/*
2211 	 * the implementation copies the data one byte at a time. Experiments with
2212 	 * two bytes at a time did not seem to improve the performance
2213 	 */
2214 	int i, j;
2215 
2216 	/* size check to prevent out of bounds access */
2217 	i = data_size - 1;
2218 	j = (2 * i) - (i & 1);
2219 	if (data_size & 1)
2220 		j++;
2221 	j += 2;
2222 	if (j >= dma_buffer_size)
2223 		return -EINVAL;
2224 
2225 	/* copy data */
2226 	for (i = 0; i < data_size; i++) {
2227 		j = (2 * i) - (i & 1);
2228 		dma_buffer[j] = data[i];
2229 	}
2230 	/* add required pad */
2231 	if (data_size & 1)
2232 		dma_buffer[++j] = 0;
2233 	/* skip last two bytes */
2234 	j += 2;
2235 
2236 	/* offset and data are off-by-one */
2237 	return j + 1;
2238 }
2239 
2240 static int sdw_cdns_prepare_write_pd0_buffer(u8 *header, unsigned int header_size,
2241 					     u8 *data, unsigned int data_size,
2242 					     u8 *dma_buffer, unsigned int dma_buffer_size,
2243 					     unsigned int *dma_data_written,
2244 					     unsigned int frame_counter)
2245 {
2246 	int data_written;
2247 	u8 *last_byte;
2248 	u8 crc;
2249 
2250 	*dma_data_written = 0;
2251 
2252 	data_written = sdw_cdns_copy_write_data(header, header_size, dma_buffer, dma_buffer_size);
2253 	if (data_written < 0)
2254 		return data_written;
2255 	dma_buffer[3] = BIT(7);
2256 	dma_buffer[3] |= frame_counter & GENMASK(3, 0);
2257 
2258 	dma_buffer += data_written;
2259 	dma_buffer_size -= data_written;
2260 	*dma_data_written += data_written;
2261 
2262 	crc = SDW_CRC8_SEED;
2263 	crc = crc8(sdw_crc8_lookup_msb, header, header_size, crc);
2264 
2265 	data_written = sdw_cdns_copy_write_data(&crc, 1, dma_buffer, dma_buffer_size);
2266 	if (data_written < 0)
2267 		return data_written;
2268 	dma_buffer += data_written;
2269 	dma_buffer_size -= data_written;
2270 	*dma_data_written += data_written;
2271 
2272 	data_written = sdw_cdns_copy_write_data(data, data_size, dma_buffer, dma_buffer_size);
2273 	if (data_written < 0)
2274 		return data_written;
2275 	dma_buffer += data_written;
2276 	dma_buffer_size -= data_written;
2277 	*dma_data_written += data_written;
2278 
2279 	crc = SDW_CRC8_SEED;
2280 	crc = crc8(sdw_crc8_lookup_msb, data, data_size, crc);
2281 	data_written = sdw_cdns_copy_write_data(&crc, 1, dma_buffer, dma_buffer_size);
2282 	if (data_written < 0)
2283 		return data_written;
2284 	dma_buffer += data_written;
2285 	dma_buffer_size -= data_written;
2286 	*dma_data_written += data_written;
2287 
2288 	/* tag last byte */
2289 	last_byte = dma_buffer - 1;
2290 	last_byte[0] = BIT(6);
2291 
2292 	return 0;
2293 }
2294 
2295 static int sdw_cdns_prepare_read_pd0_buffer(u8 *header, unsigned int header_size,
2296 					    u8 *dma_buffer, unsigned int dma_buffer_size,
2297 					    unsigned int *dma_data_written,
2298 					    unsigned int frame_counter)
2299 {
2300 	int data_written;
2301 	u8 *last_byte;
2302 	u8 crc;
2303 
2304 	*dma_data_written = 0;
2305 
2306 	data_written = sdw_cdns_copy_write_data(header, header_size, dma_buffer, dma_buffer_size);
2307 	if (data_written < 0)
2308 		return data_written;
2309 	dma_buffer[3] = BIT(7);
2310 	dma_buffer[3] |= frame_counter & GENMASK(3, 0);
2311 
2312 	dma_buffer += data_written;
2313 	dma_buffer_size -= data_written;
2314 	*dma_data_written += data_written;
2315 
2316 	crc = SDW_CRC8_SEED;
2317 	crc = crc8(sdw_crc8_lookup_msb, header, header_size, crc);
2318 
2319 	data_written = sdw_cdns_copy_write_data(&crc, 1, dma_buffer, dma_buffer_size);
2320 	if (data_written < 0)
2321 		return data_written;
2322 	dma_buffer += data_written;
2323 	dma_buffer_size -= data_written;
2324 	*dma_data_written += data_written;
2325 
2326 	/* tag last byte */
2327 	last_byte = dma_buffer - 1;
2328 	last_byte[0] = BIT(6);
2329 
2330 	return 0;
2331 }
2332 
2333 #define CDNS_BPT_ROLLING_COUNTER_START 1
2334 
2335 int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, struct sdw_bpt_section *sec, int num_sec,
2336 				      int data_per_frame, u8 *dma_buffer,
2337 				      int dma_buffer_size, int *dma_buffer_total_bytes)
2338 {
2339 	int total_dma_data_written = 0;
2340 	u8 *p_dma_buffer = dma_buffer;
2341 	u8 header[SDW_CDNS_BRA_HDR];
2342 	unsigned int start_register;
2343 	unsigned int section_size;
2344 	int dma_data_written;
2345 	u8 *p_data;
2346 	u8 counter;
2347 	int ret;
2348 	int i;
2349 
2350 	counter = CDNS_BPT_ROLLING_COUNTER_START;
2351 
2352 	header[0] = BIT(1);		/* write command: BIT(1) set */
2353 	header[0] |= GENMASK(7, 6);	/* header is active */
2354 	header[0] |= (dev_num << 2);
2355 
2356 	for (i = 0; i < num_sec; i++) {
2357 		start_register = sec[i].addr;
2358 		section_size = sec[i].len;
2359 		p_data = sec[i].buf;
2360 
2361 		while (section_size >= data_per_frame) {
2362 			header[1] = data_per_frame;
2363 			header[2] = start_register >> 24 & 0xFF;
2364 			header[3] = start_register >> 16 & 0xFF;
2365 			header[4] = start_register >> 8 & 0xFF;
2366 			header[5] = start_register >> 0 & 0xFF;
2367 
2368 			ret = sdw_cdns_prepare_write_pd0_buffer(header, SDW_CDNS_BRA_HDR,
2369 								p_data, data_per_frame,
2370 								p_dma_buffer, dma_buffer_size,
2371 								&dma_data_written, counter);
2372 			if (ret < 0)
2373 				return ret;
2374 
2375 			counter++;
2376 
2377 			p_data += data_per_frame;
2378 			section_size -= data_per_frame;
2379 
2380 			p_dma_buffer += dma_data_written;
2381 			dma_buffer_size -= dma_data_written;
2382 			total_dma_data_written += dma_data_written;
2383 
2384 			start_register += data_per_frame;
2385 		}
2386 
2387 		if (section_size) {
2388 			header[1] = section_size;
2389 			header[2] = start_register >> 24 & 0xFF;
2390 			header[3] = start_register >> 16 & 0xFF;
2391 			header[4] = start_register >> 8 & 0xFF;
2392 			header[5] = start_register >> 0 & 0xFF;
2393 
2394 			ret = sdw_cdns_prepare_write_pd0_buffer(header, SDW_CDNS_BRA_HDR,
2395 								p_data, section_size,
2396 								p_dma_buffer, dma_buffer_size,
2397 								&dma_data_written, counter);
2398 			if (ret < 0)
2399 				return ret;
2400 
2401 			counter++;
2402 
2403 			p_dma_buffer += dma_data_written;
2404 			dma_buffer_size -= dma_data_written;
2405 			total_dma_data_written += dma_data_written;
2406 		}
2407 	}
2408 
2409 	*dma_buffer_total_bytes = total_dma_data_written;
2410 
2411 	return 0;
2412 }
2413 EXPORT_SYMBOL(sdw_cdns_prepare_write_dma_buffer);
2414 
2415 int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, struct sdw_bpt_section *sec, int num_sec,
2416 				     int data_per_frame, u8 *dma_buffer, int dma_buffer_size,
2417 				     int *dma_buffer_total_bytes, unsigned int fake_size)
2418 {
2419 	int total_dma_data_written = 0;
2420 	u8 *p_dma_buffer = dma_buffer;
2421 	u8 header[SDW_CDNS_BRA_HDR];
2422 	unsigned int start_register;
2423 	unsigned int data_size;
2424 	int dma_data_written;
2425 	u8 counter;
2426 	int ret;
2427 	int i;
2428 
2429 	counter = CDNS_BPT_ROLLING_COUNTER_START;
2430 
2431 	header[0] = 0;			/* read command: BIT(1) cleared */
2432 	header[0] |= GENMASK(7, 6);	/* header is active */
2433 	header[0] |= (dev_num << 2);
2434 
2435 	for (i = 0; i < num_sec; i++) {
2436 		start_register = sec[i].addr;
2437 		data_size = sec[i].len;
2438 		while (data_size >= data_per_frame) {
2439 			header[1] = data_per_frame;
2440 			header[2] = start_register >> 24 & 0xFF;
2441 			header[3] = start_register >> 16 & 0xFF;
2442 			header[4] = start_register >> 8 & 0xFF;
2443 			header[5] = start_register >> 0 & 0xFF;
2444 
2445 			ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR,
2446 							       p_dma_buffer, dma_buffer_size,
2447 							       &dma_data_written, counter);
2448 			if (ret < 0)
2449 				return ret;
2450 
2451 			counter++;
2452 
2453 			data_size -= data_per_frame;
2454 
2455 			p_dma_buffer += dma_data_written;
2456 			dma_buffer_size -= dma_data_written;
2457 			total_dma_data_written += dma_data_written;
2458 
2459 			start_register += data_per_frame;
2460 		}
2461 
2462 		if (data_size) {
2463 			header[1] = data_size;
2464 			header[2] = start_register >> 24 & 0xFF;
2465 			header[3] = start_register >> 16 & 0xFF;
2466 			header[4] = start_register >> 8 & 0xFF;
2467 			header[5] = start_register >> 0 & 0xFF;
2468 
2469 			ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR,
2470 							       p_dma_buffer, dma_buffer_size,
2471 							       &dma_data_written, counter);
2472 			if (ret < 0)
2473 				return ret;
2474 
2475 			counter++;
2476 
2477 			p_dma_buffer += dma_data_written;
2478 			dma_buffer_size -= dma_data_written;
2479 			total_dma_data_written += dma_data_written;
2480 		}
2481 	}
2482 
2483 	/* Add fake frame */
2484 	header[0] &= ~GENMASK(7, 6);	/* Set inactive flag in BPT/BRA frame heade */
2485 	while (fake_size >= data_per_frame) {
2486 		header[1] = data_per_frame;
2487 		ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR, p_dma_buffer,
2488 						       dma_buffer_size, &dma_data_written,
2489 						       counter);
2490 		if (ret < 0)
2491 			return ret;
2492 
2493 		counter++;
2494 
2495 		fake_size -= data_per_frame;
2496 		p_dma_buffer += dma_data_written;
2497 		dma_buffer_size -= dma_data_written;
2498 		total_dma_data_written += dma_data_written;
2499 	}
2500 
2501 	if (fake_size) {
2502 		header[1] = fake_size;
2503 		ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR, p_dma_buffer,
2504 						       dma_buffer_size, &dma_data_written,
2505 						       counter);
2506 		if (ret < 0)
2507 			return ret;
2508 
2509 		counter++;
2510 
2511 		p_dma_buffer += dma_data_written;
2512 		dma_buffer_size -= dma_data_written;
2513 		total_dma_data_written += dma_data_written;
2514 	}
2515 
2516 	*dma_buffer_total_bytes = total_dma_data_written;
2517 
2518 	return 0;
2519 }
2520 EXPORT_SYMBOL(sdw_cdns_prepare_read_dma_buffer);
2521 
2522 static int check_counter(u32 val, u8 counter)
2523 {
2524 	u8 frame;
2525 
2526 	frame = (val >> 24) & GENMASK(3, 0);
2527 	if (counter != frame)
2528 		return -EIO;
2529 	return 0;
2530 }
2531 
2532 static int check_response(u32 val)
2533 {
2534 	u8 response;
2535 
2536 	response = (val >> 3) & GENMASK(1, 0);
2537 	if (response == 0) /* Ignored */
2538 		return -ENODATA;
2539 	if (response != 1) /* ACK */
2540 		return -EIO;
2541 
2542 	return 0;
2543 }
2544 
2545 static int check_frame_start(u32 header, u8 counter)
2546 {
2547 	int ret;
2548 
2549 	/* check frame_start marker */
2550 	if (!(header & BIT(31)))
2551 		return -EIO;
2552 
2553 	ret = check_counter(header, counter);
2554 	if (ret < 0)
2555 		return ret;
2556 
2557 	return check_response(header);
2558 }
2559 
2560 static int check_frame_end(u32 footer)
2561 {
2562 	/* check frame_end marker */
2563 	if (!(footer & BIT(30)))
2564 		return -EIO;
2565 
2566 	return check_response(footer);
2567 }
2568 
2569 int sdw_cdns_check_write_response(struct device *dev, u8 *dma_buffer,
2570 				  int dma_buffer_size, int num_frames)
2571 {
2572 	u32 *p_data;
2573 	int counter;
2574 	u32 header;
2575 	u32 footer;
2576 	int ret;
2577 	int i;
2578 
2579 	/* paranoia check on buffer size */
2580 	if (dma_buffer_size != num_frames * 8)
2581 		return -EINVAL;
2582 
2583 	counter = CDNS_BPT_ROLLING_COUNTER_START;
2584 	p_data = (u32 *)dma_buffer;
2585 
2586 	for (i = 0; i < num_frames; i++) {
2587 		header = *p_data++;
2588 		footer = *p_data++;
2589 
2590 		ret = check_frame_start(header, counter);
2591 		if (ret < 0) {
2592 			dev_err(dev, "%s: bad frame %d/%d start header %x\n",
2593 				__func__, i + 1, num_frames, header);
2594 			return ret;
2595 		}
2596 
2597 		ret = check_frame_end(footer);
2598 		if (ret < 0) {
2599 			dev_err(dev, "%s: bad frame %d/%d end footer %x\n",
2600 				__func__, i + 1, num_frames, footer);
2601 			return ret;
2602 		}
2603 
2604 		counter++;
2605 		counter &= GENMASK(3, 0);
2606 	}
2607 	return 0;
2608 }
2609 EXPORT_SYMBOL(sdw_cdns_check_write_response);
2610 
2611 static u8 extract_read_data(u32 *data, int num_bytes, u8 *buffer)
2612 {
2613 	u32 val;
2614 	int i;
2615 	u8 crc;
2616 	u8 b0;
2617 	u8 b1;
2618 
2619 	crc = SDW_CRC8_SEED;
2620 
2621 	/* process two bytes at a time */
2622 	for (i = 0; i < num_bytes / 2; i++) {
2623 		val = *data++;
2624 
2625 		b0 = val & 0xff;
2626 		b1 = (val >> 8) & 0xff;
2627 
2628 		*buffer++ = b0;
2629 		crc = crc8(sdw_crc8_lookup_msb, &b0, 1, crc);
2630 
2631 		*buffer++ = b1;
2632 		crc = crc8(sdw_crc8_lookup_msb, &b1, 1, crc);
2633 	}
2634 	/* handle remaining byte if it exists */
2635 	if (num_bytes & 1) {
2636 		val = *data;
2637 
2638 		b0 = val & 0xff;
2639 
2640 		*buffer++ = b0;
2641 		crc = crc8(sdw_crc8_lookup_msb, &b0, 1, crc);
2642 	}
2643 	return crc;
2644 }
2645 
2646 int sdw_cdns_check_read_response(struct device *dev, u8 *dma_buffer, int dma_buffer_size,
2647 				 struct sdw_bpt_section *sec, int num_sec, int num_frames,
2648 				 int data_per_frame)
2649 {
2650 	int total_num_bytes = 0;
2651 	int buffer_size = 0;
2652 	int sec_index;
2653 	u32 *p_data;
2654 	u8 *p_buf;
2655 	int counter;
2656 	u32 header;
2657 	u32 footer;
2658 	u8 expected_crc;
2659 	u8 crc;
2660 	int len;
2661 	int ret;
2662 	int i;
2663 
2664 	counter = CDNS_BPT_ROLLING_COUNTER_START;
2665 	p_data = (u32 *)dma_buffer;
2666 
2667 	sec_index = 0;
2668 	p_buf = sec[sec_index].buf;
2669 	buffer_size = sec[sec_index].len;
2670 
2671 	for (i = 0; i < num_frames; i++) {
2672 		header = *p_data++;
2673 
2674 		ret = check_frame_start(header, counter);
2675 		if (ret < 0) {
2676 			dev_err(dev, "%s: bad frame %d/%d start header %x\n",
2677 				__func__, i + 1, num_frames, header);
2678 			return ret;
2679 		}
2680 
2681 		len = data_per_frame;
2682 		if (total_num_bytes + data_per_frame > buffer_size)
2683 			len = buffer_size - total_num_bytes;
2684 
2685 		crc = extract_read_data(p_data, len, p_buf);
2686 
2687 		p_data += (len + 1) / 2;
2688 		expected_crc = *p_data++ & 0xff;
2689 
2690 		if (crc != expected_crc) {
2691 			dev_err(dev, "%s: bad frame %d/%d crc %#x expected %#x\n",
2692 				__func__, i + 1, num_frames, crc, expected_crc);
2693 			return -EIO;
2694 		}
2695 
2696 		p_buf += len;
2697 		total_num_bytes += len;
2698 
2699 		footer = *p_data++;
2700 		ret = check_frame_end(footer);
2701 		if (ret < 0) {
2702 			dev_err(dev, "%s: bad frame %d/%d end footer %x\n",
2703 				__func__, i + 1, num_frames, footer);
2704 			return ret;
2705 		}
2706 
2707 		counter++;
2708 		counter &= GENMASK(3, 0);
2709 
2710 		if (buffer_size == total_num_bytes && (i + 1) < num_frames) {
2711 			sec_index++;
2712 			if (sec_index >= num_sec) {
2713 				dev_err(dev, "%s: incorrect section index %d i %d\n",
2714 					__func__, sec_index, i);
2715 				return -EINVAL;
2716 			}
2717 			p_buf = sec[sec_index].buf;
2718 			buffer_size = sec[sec_index].len;
2719 			total_num_bytes = 0;
2720 		}
2721 	}
2722 	return 0;
2723 }
2724 EXPORT_SYMBOL(sdw_cdns_check_read_response);
2725 
2726 MODULE_LICENSE("Dual BSD/GPL");
2727 MODULE_DESCRIPTION("Cadence Soundwire Library");
2728