xref: /linux/drivers/soc/fsl/qe/tsa.c (revision bfd4f092c49fe20a802f703e79df4926b70f3564)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * TSA driver
4  *
5  * Copyright 2022 CS GROUP France
6  *
7  * Author: Herve Codina <herve.codina@bootlin.com>
8  */
9 
10 #include "tsa.h"
11 #include <dt-bindings/soc/cpm1-fsl,tsa.h>
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 
21 /* TSA SI RAM routing tables entry */
22 #define TSA_SIRAM_ENTRY_LAST		BIT(16)
23 #define TSA_SIRAM_ENTRY_BYTE		BIT(17)
24 #define TSA_SIRAM_ENTRY_CNT_MASK	GENMASK(21, 18)
25 #define TSA_SIRAM_ENTRY_CNT(x)		FIELD_PREP(TSA_SIRAM_ENTRY_CNT_MASK, x)
26 #define TSA_SIRAM_ENTRY_CSEL_MASK	GENMASK(24, 22)
27 #define TSA_SIRAM_ENTRY_CSEL_NU		FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x0)
28 #define TSA_SIRAM_ENTRY_CSEL_SCC2	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x2)
29 #define TSA_SIRAM_ENTRY_CSEL_SCC3	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x3)
30 #define TSA_SIRAM_ENTRY_CSEL_SCC4	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x4)
31 #define TSA_SIRAM_ENTRY_CSEL_SMC1	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x5)
32 #define TSA_SIRAM_ENTRY_CSEL_SMC2	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x6)
33 
34 /* SI mode register (32 bits) */
35 #define TSA_SIMODE	0x00
36 #define   TSA_SIMODE_SMC2			BIT(31)
37 #define   TSA_SIMODE_SMC1			BIT(15)
38 #define   TSA_SIMODE_TDMA_MASK			GENMASK(11, 0)
39 #define   TSA_SIMODE_TDMA(x)			FIELD_PREP(TSA_SIMODE_TDMA_MASK, x)
40 #define   TSA_SIMODE_TDMB_MASK			GENMASK(27, 16)
41 #define   TSA_SIMODE_TDMB(x)			FIELD_PREP(TSA_SIMODE_TDMB_MASK, x)
42 #define     TSA_SIMODE_TDM_MASK			GENMASK(11, 0)
43 #define     TSA_SIMODE_TDM_SDM_MASK		GENMASK(11, 10)
44 #define       TSA_SIMODE_TDM_SDM_NORM		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0)
45 #define       TSA_SIMODE_TDM_SDM_ECHO		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1)
46 #define       TSA_SIMODE_TDM_SDM_INTL_LOOP	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2)
47 #define       TSA_SIMODE_TDM_SDM_LOOP_CTRL	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3)
48 #define     TSA_SIMODE_TDM_RFSD_MASK		GENMASK(9, 8)
49 #define     TSA_SIMODE_TDM_RFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x)
50 #define     TSA_SIMODE_TDM_DSC			BIT(7)
51 #define     TSA_SIMODE_TDM_CRT			BIT(6)
52 #define     TSA_SIMODE_TDM_STZ			BIT(5)
53 #define     TSA_SIMODE_TDM_CE			BIT(4)
54 #define     TSA_SIMODE_TDM_FE			BIT(3)
55 #define     TSA_SIMODE_TDM_GM			BIT(2)
56 #define     TSA_SIMODE_TDM_TFSD_MASK		GENMASK(1, 0)
57 #define     TSA_SIMODE_TDM_TFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x)
58 
59 /* SI global mode register (8 bits) */
60 #define TSA_SIGMR	0x04
61 #define TSA_SIGMR_ENB			BIT(3)
62 #define TSA_SIGMR_ENA			BIT(2)
63 #define TSA_SIGMR_RDM_MASK		GENMASK(1, 0)
64 #define   TSA_SIGMR_RDM_STATIC_TDMA	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x0)
65 #define   TSA_SIGMR_RDM_DYN_TDMA	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x1)
66 #define   TSA_SIGMR_RDM_STATIC_TDMAB	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x2)
67 #define   TSA_SIGMR_RDM_DYN_TDMAB	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x3)
68 
69 /* SI status register (8 bits) */
70 #define TSA_SISTR	0x06
71 
72 /* SI command register (8 bits) */
73 #define TSA_SICMR	0x07
74 
75 /* SI clock route register (32 bits) */
76 #define TSA_SICR	0x0C
77 #define   TSA_SICR_SCC2_MASK		GENMASK(15, 8)
78 #define   TSA_SICR_SCC2(x)		FIELD_PREP(TSA_SICR_SCC2_MASK, x)
79 #define   TSA_SICR_SCC3_MASK		GENMASK(23, 16)
80 #define   TSA_SICR_SCC3(x)		FIELD_PREP(TSA_SICR_SCC3_MASK, x)
81 #define   TSA_SICR_SCC4_MASK		GENMASK(31, 24)
82 #define   TSA_SICR_SCC4(x)		FIELD_PREP(TSA_SICR_SCC4_MASK, x)
83 #define     TSA_SICR_SCC_MASK		GENMASK(7, 0)
84 #define     TSA_SICR_SCC_GRX		BIT(7)
85 #define     TSA_SICR_SCC_SCX_TSA	BIT(6)
86 #define     TSA_SICR_SCC_RXCS_MASK	GENMASK(5, 3)
87 #define       TSA_SICR_SCC_RXCS_BRG1	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x0)
88 #define       TSA_SICR_SCC_RXCS_BRG2	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x1)
89 #define       TSA_SICR_SCC_RXCS_BRG3	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x2)
90 #define       TSA_SICR_SCC_RXCS_BRG4	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x3)
91 #define       TSA_SICR_SCC_RXCS_CLK15	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x4)
92 #define       TSA_SICR_SCC_RXCS_CLK26	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x5)
93 #define       TSA_SICR_SCC_RXCS_CLK37	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x6)
94 #define       TSA_SICR_SCC_RXCS_CLK48	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x7)
95 #define     TSA_SICR_SCC_TXCS_MASK	GENMASK(2, 0)
96 #define       TSA_SICR_SCC_TXCS_BRG1	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x0)
97 #define       TSA_SICR_SCC_TXCS_BRG2	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x1)
98 #define       TSA_SICR_SCC_TXCS_BRG3	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x2)
99 #define       TSA_SICR_SCC_TXCS_BRG4	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x3)
100 #define       TSA_SICR_SCC_TXCS_CLK15	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x4)
101 #define       TSA_SICR_SCC_TXCS_CLK26	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x5)
102 #define       TSA_SICR_SCC_TXCS_CLK37	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x6)
103 #define       TSA_SICR_SCC_TXCS_CLK48	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x7)
104 
105 /* Serial interface RAM pointer register (32 bits) */
106 #define TSA_SIRP	0x10
107 
108 struct tsa_entries_area {
109 	void __iomem *entries_start;
110 	void __iomem *entries_next;
111 	void __iomem *last_entry;
112 };
113 
114 struct tsa_tdm {
115 	bool is_enable;
116 	struct clk *l1rclk_clk;
117 	struct clk *l1rsync_clk;
118 	struct clk *l1tclk_clk;
119 	struct clk *l1tsync_clk;
120 	u32 simode_tdm;
121 };
122 
123 #define TSA_TDMA	0
124 #define TSA_TDMB	1
125 
126 struct tsa {
127 	struct device *dev;
128 	void __iomem *si_regs;
129 	void __iomem *si_ram;
130 	resource_size_t si_ram_sz;
131 	spinlock_t	lock;
132 	int tdms; /* TSA_TDMx ORed */
133 	struct tsa_tdm tdm[2]; /* TDMa and TDMb */
134 	struct tsa_serial {
135 		unsigned int id;
136 		struct tsa_serial_info info;
137 	} serials[6];
138 };
139 
140 static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
141 {
142 	/* The serials table is indexed by the serial id */
143 	return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
144 }
145 
146 static inline void tsa_write32(void __iomem *addr, u32 val)
147 {
148 	iowrite32be(val, addr);
149 }
150 
151 static inline void tsa_write8(void __iomem *addr, u8 val)
152 {
153 	iowrite8(val, addr);
154 }
155 
156 static inline u32 tsa_read32(void __iomem *addr)
157 {
158 	return ioread32be(addr);
159 }
160 
161 static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
162 {
163 	tsa_write32(addr, tsa_read32(addr) & ~clr);
164 }
165 
166 static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
167 {
168 	tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
169 }
170 
171 int tsa_serial_connect(struct tsa_serial *tsa_serial)
172 {
173 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
174 	unsigned long flags;
175 	u32 clear;
176 	u32 set;
177 
178 	switch (tsa_serial->id) {
179 	case FSL_CPM_TSA_SCC2:
180 		clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
181 		set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA);
182 		break;
183 	case FSL_CPM_TSA_SCC3:
184 		clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
185 		set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA);
186 		break;
187 	case FSL_CPM_TSA_SCC4:
188 		clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
189 		set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA);
190 		break;
191 	default:
192 		dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
193 		return -EINVAL;
194 	}
195 
196 	spin_lock_irqsave(&tsa->lock, flags);
197 	tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set);
198 	spin_unlock_irqrestore(&tsa->lock, flags);
199 
200 	return 0;
201 }
202 EXPORT_SYMBOL(tsa_serial_connect);
203 
204 int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
205 {
206 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
207 	unsigned long flags;
208 	u32 clear;
209 
210 	switch (tsa_serial->id) {
211 	case FSL_CPM_TSA_SCC2:
212 		clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
213 		break;
214 	case FSL_CPM_TSA_SCC3:
215 		clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
216 		break;
217 	case FSL_CPM_TSA_SCC4:
218 		clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
219 		break;
220 	default:
221 		dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
222 		return -EINVAL;
223 	}
224 
225 	spin_lock_irqsave(&tsa->lock, flags);
226 	tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0);
227 	spin_unlock_irqrestore(&tsa->lock, flags);
228 
229 	return 0;
230 }
231 EXPORT_SYMBOL(tsa_serial_disconnect);
232 
233 int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
234 {
235 	memcpy(info, &tsa_serial->info, sizeof(*info));
236 	return 0;
237 }
238 EXPORT_SYMBOL(tsa_serial_get_info);
239 
240 static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
241 				  u32 tdms, u32 tdm_id, bool is_rx)
242 {
243 	resource_size_t quarter;
244 	resource_size_t half;
245 
246 	quarter = tsa->si_ram_sz / 4;
247 	half = tsa->si_ram_sz / 2;
248 
249 	if (tdms == BIT(TSA_TDMA)) {
250 		/* Only TDMA */
251 		if (is_rx) {
252 			/* First half of si_ram */
253 			area->entries_start = tsa->si_ram;
254 			area->entries_next = area->entries_start + half;
255 			area->last_entry = NULL;
256 		} else {
257 			/* Second half of si_ram */
258 			area->entries_start = tsa->si_ram + half;
259 			area->entries_next = area->entries_start + half;
260 			area->last_entry = NULL;
261 		}
262 	} else {
263 		/* Only TDMB or both TDMs */
264 		if (tdm_id == TSA_TDMA) {
265 			if (is_rx) {
266 				/* First half of first half of si_ram */
267 				area->entries_start = tsa->si_ram;
268 				area->entries_next = area->entries_start + quarter;
269 				area->last_entry = NULL;
270 			} else {
271 				/* First half of second half of si_ram */
272 				area->entries_start = tsa->si_ram + (2 * quarter);
273 				area->entries_next = area->entries_start + quarter;
274 				area->last_entry = NULL;
275 			}
276 		} else {
277 			if (is_rx) {
278 				/* Second half of first half of si_ram */
279 				area->entries_start = tsa->si_ram + quarter;
280 				area->entries_next = area->entries_start + quarter;
281 				area->last_entry = NULL;
282 			} else {
283 				/* Second half of second half of si_ram */
284 				area->entries_start = tsa->si_ram + (3 * quarter);
285 				area->entries_next = area->entries_start + quarter;
286 				area->last_entry = NULL;
287 			}
288 		}
289 	}
290 }
291 
292 static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
293 {
294 	switch (serial_id) {
295 	case FSL_CPM_TSA_NU:	return "Not used";
296 	case FSL_CPM_TSA_SCC2:	return "SCC2";
297 	case FSL_CPM_TSA_SCC3:	return "SCC3";
298 	case FSL_CPM_TSA_SCC4:	return "SCC4";
299 	case FSL_CPM_TSA_SMC1:	return "SMC1";
300 	case FSL_CPM_TSA_SMC2:	return "SMC2";
301 	default:
302 		break;
303 	}
304 	return NULL;
305 }
306 
307 static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
308 {
309 	switch (serial_id) {
310 	case FSL_CPM_TSA_SCC2:	return TSA_SIRAM_ENTRY_CSEL_SCC2;
311 	case FSL_CPM_TSA_SCC3:	return TSA_SIRAM_ENTRY_CSEL_SCC3;
312 	case FSL_CPM_TSA_SCC4:	return TSA_SIRAM_ENTRY_CSEL_SCC4;
313 	case FSL_CPM_TSA_SMC1:	return TSA_SIRAM_ENTRY_CSEL_SMC1;
314 	case FSL_CPM_TSA_SMC2:	return TSA_SIRAM_ENTRY_CSEL_SMC2;
315 	default:
316 		break;
317 	}
318 	return TSA_SIRAM_ENTRY_CSEL_NU;
319 }
320 
321 static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
322 			 u32 count, u32 serial_id)
323 {
324 	void __iomem *addr;
325 	u32 left;
326 	u32 val;
327 	u32 cnt;
328 	u32 nb;
329 
330 	addr = area->last_entry ? area->last_entry + 4 : area->entries_start;
331 
332 	nb = DIV_ROUND_UP(count, 8);
333 	if ((addr + (nb * 4)) > area->entries_next) {
334 		dev_err(tsa->dev, "si ram area full\n");
335 		return -ENOSPC;
336 	}
337 
338 	if (area->last_entry) {
339 		/* Clear last flag */
340 		tsa_clrbits32(area->last_entry, TSA_SIRAM_ENTRY_LAST);
341 	}
342 
343 	left = count;
344 	while (left) {
345 		val = TSA_SIRAM_ENTRY_BYTE | tsa_serial_id2csel(tsa, serial_id);
346 
347 		if (left > 16) {
348 			cnt = 16;
349 		} else {
350 			cnt = left;
351 			val |= TSA_SIRAM_ENTRY_LAST;
352 			area->last_entry = addr;
353 		}
354 		val |= TSA_SIRAM_ENTRY_CNT(cnt - 1);
355 
356 		tsa_write32(addr, val);
357 		addr += 4;
358 		left -= cnt;
359 	}
360 
361 	return 0;
362 }
363 
364 static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
365 				  u32 tdms, u32 tdm_id, bool is_rx)
366 {
367 	struct tsa_entries_area area;
368 	const char *route_name;
369 	u32 serial_id;
370 	int len, i;
371 	u32 count;
372 	const char *serial_name;
373 	struct tsa_serial_info *serial_info;
374 	struct tsa_tdm *tdm;
375 	int ret;
376 	u32 ts;
377 
378 	route_name = is_rx ? "fsl,rx-ts-routes" : "fsl,tx-ts-routes";
379 
380 	len = of_property_count_u32_elems(tdm_np,  route_name);
381 	if (len < 0) {
382 		dev_err(tsa->dev, "%pOF: failed to read %s\n", tdm_np, route_name);
383 		return len;
384 	}
385 	if (len % 2 != 0) {
386 		dev_err(tsa->dev, "%pOF: wrong %s format\n", tdm_np, route_name);
387 		return -EINVAL;
388 	}
389 
390 	tsa_init_entries_area(tsa, &area, tdms, tdm_id, is_rx);
391 	ts = 0;
392 	for (i = 0; i < len; i += 2) {
393 		of_property_read_u32_index(tdm_np, route_name, i, &count);
394 		of_property_read_u32_index(tdm_np, route_name, i + 1, &serial_id);
395 
396 		if (serial_id >= ARRAY_SIZE(tsa->serials)) {
397 			dev_err(tsa->dev, "%pOF: invalid serial id (%u)\n",
398 				tdm_np, serial_id);
399 			return -EINVAL;
400 		}
401 
402 		serial_name = tsa_serial_id2name(tsa, serial_id);
403 		if (!serial_name) {
404 			dev_err(tsa->dev, "%pOF: unsupported serial id (%u)\n",
405 				tdm_np, serial_id);
406 			return -EINVAL;
407 		}
408 
409 		dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
410 			tdm_id, route_name, ts, ts + count - 1, serial_name);
411 		ts += count;
412 
413 		ret = tsa_add_entry(tsa, &area, count, serial_id);
414 		if (ret)
415 			return ret;
416 
417 		serial_info = &tsa->serials[serial_id].info;
418 		tdm = &tsa->tdm[tdm_id];
419 		if (is_rx) {
420 			serial_info->rx_fs_rate = clk_get_rate(tdm->l1rsync_clk);
421 			serial_info->rx_bit_rate = clk_get_rate(tdm->l1rclk_clk);
422 			serial_info->nb_rx_ts += count;
423 		} else {
424 			serial_info->tx_fs_rate = tdm->l1tsync_clk ?
425 				clk_get_rate(tdm->l1tsync_clk) :
426 				clk_get_rate(tdm->l1rsync_clk);
427 			serial_info->tx_bit_rate = tdm->l1tclk_clk ?
428 				clk_get_rate(tdm->l1tclk_clk) :
429 				clk_get_rate(tdm->l1rclk_clk);
430 			serial_info->nb_tx_ts += count;
431 		}
432 	}
433 	return 0;
434 }
435 
436 static inline int tsa_of_parse_tdm_rx_route(struct tsa *tsa,
437 					    struct device_node *tdm_np,
438 					    u32 tdms, u32 tdm_id)
439 {
440 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, true);
441 }
442 
443 static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
444 					    struct device_node *tdm_np,
445 					    u32 tdms, u32 tdm_id)
446 {
447 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, false);
448 }
449 
450 static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
451 {
452 	struct device_node *tdm_np;
453 	struct tsa_tdm *tdm;
454 	struct clk *clk;
455 	u32 tdm_id, val;
456 	int ret;
457 	int i;
458 
459 	tsa->tdms = 0;
460 	tsa->tdm[0].is_enable = false;
461 	tsa->tdm[1].is_enable = false;
462 
463 	for_each_available_child_of_node(np, tdm_np) {
464 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
465 		if (ret) {
466 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
467 			of_node_put(tdm_np);
468 			return ret;
469 		}
470 		switch (tdm_id) {
471 		case 0:
472 			tsa->tdms |= BIT(TSA_TDMA);
473 			break;
474 		case 1:
475 			tsa->tdms |= BIT(TSA_TDMB);
476 			break;
477 		default:
478 			dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
479 				tdm_id);
480 			of_node_put(tdm_np);
481 			return -EINVAL;
482 		}
483 	}
484 
485 	for_each_available_child_of_node(np, tdm_np) {
486 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
487 		if (ret) {
488 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
489 			of_node_put(tdm_np);
490 			return ret;
491 		}
492 
493 		tdm = &tsa->tdm[tdm_id];
494 		tdm->simode_tdm = TSA_SIMODE_TDM_SDM_NORM;
495 
496 		val = 0;
497 		ret = of_property_read_u32(tdm_np, "fsl,rx-frame-sync-delay-bits",
498 					   &val);
499 		if (ret && ret != -EINVAL) {
500 			dev_err(tsa->dev,
501 				"%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
502 				tdm_np);
503 			of_node_put(tdm_np);
504 			return ret;
505 		}
506 		if (val > 3) {
507 			dev_err(tsa->dev,
508 				"%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
509 				tdm_np, val);
510 			of_node_put(tdm_np);
511 			return -EINVAL;
512 		}
513 		tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
514 
515 		val = 0;
516 		ret = of_property_read_u32(tdm_np, "fsl,tx-frame-sync-delay-bits",
517 					   &val);
518 		if (ret && ret != -EINVAL) {
519 			dev_err(tsa->dev,
520 				"%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
521 				tdm_np);
522 			of_node_put(tdm_np);
523 			return ret;
524 		}
525 		if (val > 3) {
526 			dev_err(tsa->dev,
527 				"%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
528 				tdm_np, val);
529 			of_node_put(tdm_np);
530 			return -EINVAL;
531 		}
532 		tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
533 
534 		if (of_property_read_bool(tdm_np, "fsl,common-rxtx-pins"))
535 			tdm->simode_tdm |= TSA_SIMODE_TDM_CRT;
536 
537 		if (of_property_read_bool(tdm_np, "fsl,clock-falling-edge"))
538 			tdm->simode_tdm |= TSA_SIMODE_TDM_CE;
539 
540 		if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
541 			tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
542 
543 		if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
544 			tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
545 
546 		clk = of_clk_get_by_name(tdm_np, "l1rsync");
547 		if (IS_ERR(clk)) {
548 			ret = PTR_ERR(clk);
549 			of_node_put(tdm_np);
550 			goto err;
551 		}
552 		ret = clk_prepare_enable(clk);
553 		if (ret) {
554 			clk_put(clk);
555 			of_node_put(tdm_np);
556 			goto err;
557 		}
558 		tdm->l1rsync_clk = clk;
559 
560 		clk = of_clk_get_by_name(tdm_np, "l1rclk");
561 		if (IS_ERR(clk)) {
562 			ret = PTR_ERR(clk);
563 			of_node_put(tdm_np);
564 			goto err;
565 		}
566 		ret = clk_prepare_enable(clk);
567 		if (ret) {
568 			clk_put(clk);
569 			of_node_put(tdm_np);
570 			goto err;
571 		}
572 		tdm->l1rclk_clk = clk;
573 
574 		if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
575 			clk = of_clk_get_by_name(tdm_np, "l1tsync");
576 			if (IS_ERR(clk)) {
577 				ret = PTR_ERR(clk);
578 				of_node_put(tdm_np);
579 				goto err;
580 			}
581 			ret = clk_prepare_enable(clk);
582 			if (ret) {
583 				clk_put(clk);
584 				of_node_put(tdm_np);
585 				goto err;
586 			}
587 			tdm->l1tsync_clk = clk;
588 
589 			clk = of_clk_get_by_name(tdm_np, "l1tclk");
590 			if (IS_ERR(clk)) {
591 				ret = PTR_ERR(clk);
592 				of_node_put(tdm_np);
593 				goto err;
594 			}
595 			ret = clk_prepare_enable(clk);
596 			if (ret) {
597 				clk_put(clk);
598 				of_node_put(tdm_np);
599 				goto err;
600 			}
601 			tdm->l1tclk_clk = clk;
602 		}
603 
604 		ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
605 		if (ret) {
606 			of_node_put(tdm_np);
607 			goto err;
608 		}
609 
610 		ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
611 		if (ret) {
612 			of_node_put(tdm_np);
613 			goto err;
614 		}
615 
616 		tdm->is_enable = true;
617 	}
618 	return 0;
619 
620 err:
621 	for (i = 0; i < 2; i++) {
622 		if (tsa->tdm[i].l1rsync_clk) {
623 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
624 			clk_put(tsa->tdm[i].l1rsync_clk);
625 		}
626 		if (tsa->tdm[i].l1rclk_clk) {
627 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
628 			clk_put(tsa->tdm[i].l1rclk_clk);
629 		}
630 		if (tsa->tdm[i].l1tsync_clk) {
631 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
632 			clk_put(tsa->tdm[i].l1rsync_clk);
633 		}
634 		if (tsa->tdm[i].l1tclk_clk) {
635 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
636 			clk_put(tsa->tdm[i].l1rclk_clk);
637 		}
638 	}
639 	return ret;
640 }
641 
642 static void tsa_init_si_ram(struct tsa *tsa)
643 {
644 	resource_size_t i;
645 
646 	/* Fill all entries as the last one */
647 	for (i = 0; i < tsa->si_ram_sz; i += 4)
648 		tsa_write32(tsa->si_ram + i, TSA_SIRAM_ENTRY_LAST);
649 }
650 
651 static int tsa_probe(struct platform_device *pdev)
652 {
653 	struct device_node *np = pdev->dev.of_node;
654 	struct resource *res;
655 	struct tsa *tsa;
656 	unsigned int i;
657 	u32 val;
658 	int ret;
659 
660 	tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
661 	if (!tsa)
662 		return -ENOMEM;
663 
664 	tsa->dev = &pdev->dev;
665 
666 	for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
667 		tsa->serials[i].id = i;
668 
669 	spin_lock_init(&tsa->lock);
670 
671 	tsa->si_regs = devm_platform_ioremap_resource_byname(pdev, "si_regs");
672 	if (IS_ERR(tsa->si_regs))
673 		return PTR_ERR(tsa->si_regs);
674 
675 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "si_ram");
676 	if (!res) {
677 		dev_err(tsa->dev, "si_ram resource missing\n");
678 		return -EINVAL;
679 	}
680 	tsa->si_ram_sz = resource_size(res);
681 	tsa->si_ram = devm_ioremap_resource(&pdev->dev, res);
682 	if (IS_ERR(tsa->si_ram))
683 		return PTR_ERR(tsa->si_ram);
684 
685 	tsa_init_si_ram(tsa);
686 
687 	ret = tsa_of_parse_tdms(tsa, np);
688 	if (ret)
689 		return ret;
690 
691 	/* Set SIMODE */
692 	val = 0;
693 	if (tsa->tdm[0].is_enable)
694 		val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
695 	if (tsa->tdm[1].is_enable)
696 		val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
697 
698 	tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE,
699 			 TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) |
700 			 TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK),
701 			 val);
702 
703 	/* Set SIGMR */
704 	val = (tsa->tdms == BIT(TSA_TDMA)) ?
705 		TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB;
706 	if (tsa->tdms & BIT(TSA_TDMA))
707 		val |= TSA_SIGMR_ENA;
708 	if (tsa->tdms & BIT(TSA_TDMB))
709 		val |= TSA_SIGMR_ENB;
710 	tsa_write8(tsa->si_regs + TSA_SIGMR, val);
711 
712 	platform_set_drvdata(pdev, tsa);
713 
714 	return 0;
715 }
716 
717 static void tsa_remove(struct platform_device *pdev)
718 {
719 	struct tsa *tsa = platform_get_drvdata(pdev);
720 	int i;
721 
722 	for (i = 0; i < 2; i++) {
723 		if (tsa->tdm[i].l1rsync_clk) {
724 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
725 			clk_put(tsa->tdm[i].l1rsync_clk);
726 		}
727 		if (tsa->tdm[i].l1rclk_clk) {
728 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
729 			clk_put(tsa->tdm[i].l1rclk_clk);
730 		}
731 		if (tsa->tdm[i].l1tsync_clk) {
732 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
733 			clk_put(tsa->tdm[i].l1rsync_clk);
734 		}
735 		if (tsa->tdm[i].l1tclk_clk) {
736 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
737 			clk_put(tsa->tdm[i].l1rclk_clk);
738 		}
739 	}
740 }
741 
742 static const struct of_device_id tsa_id_table[] = {
743 	{ .compatible = "fsl,cpm1-tsa" },
744 	{} /* sentinel */
745 };
746 MODULE_DEVICE_TABLE(of, tsa_id_table);
747 
748 static struct platform_driver tsa_driver = {
749 	.driver = {
750 		.name = "fsl-tsa",
751 		.of_match_table = of_match_ptr(tsa_id_table),
752 	},
753 	.probe = tsa_probe,
754 	.remove_new = tsa_remove,
755 };
756 module_platform_driver(tsa_driver);
757 
758 struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
759 					    const char *phandle_name)
760 {
761 	struct of_phandle_args out_args;
762 	struct platform_device *pdev;
763 	struct tsa_serial *tsa_serial;
764 	struct tsa *tsa;
765 	int ret;
766 
767 	ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, &out_args);
768 	if (ret < 0)
769 		return ERR_PTR(ret);
770 
771 	if (!of_match_node(tsa_driver.driver.of_match_table, out_args.np)) {
772 		of_node_put(out_args.np);
773 		return ERR_PTR(-EINVAL);
774 	}
775 
776 	pdev = of_find_device_by_node(out_args.np);
777 	of_node_put(out_args.np);
778 	if (!pdev)
779 		return ERR_PTR(-ENODEV);
780 
781 	tsa = platform_get_drvdata(pdev);
782 	if (!tsa) {
783 		platform_device_put(pdev);
784 		return ERR_PTR(-EPROBE_DEFER);
785 	}
786 
787 	if (out_args.args_count != 1) {
788 		platform_device_put(pdev);
789 		return ERR_PTR(-EINVAL);
790 	}
791 
792 	if (out_args.args[0] >= ARRAY_SIZE(tsa->serials)) {
793 		platform_device_put(pdev);
794 		return ERR_PTR(-EINVAL);
795 	}
796 
797 	tsa_serial = &tsa->serials[out_args.args[0]];
798 
799 	/*
800 	 * Be sure that the serial id matches the phandle arg.
801 	 * The tsa_serials table is indexed by serial ids. The serial id is set
802 	 * during the probe() call and needs to be coherent.
803 	 */
804 	if (WARN_ON(tsa_serial->id != out_args.args[0])) {
805 		platform_device_put(pdev);
806 		return ERR_PTR(-EINVAL);
807 	}
808 
809 	return tsa_serial;
810 }
811 EXPORT_SYMBOL(tsa_serial_get_byphandle);
812 
813 void tsa_serial_put(struct tsa_serial *tsa_serial)
814 {
815 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
816 
817 	put_device(tsa->dev);
818 }
819 EXPORT_SYMBOL(tsa_serial_put);
820 
821 static void devm_tsa_serial_release(struct device *dev, void *res)
822 {
823 	struct tsa_serial **tsa_serial = res;
824 
825 	tsa_serial_put(*tsa_serial);
826 }
827 
828 struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
829 						 struct device_node *np,
830 						 const char *phandle_name)
831 {
832 	struct tsa_serial *tsa_serial;
833 	struct tsa_serial **dr;
834 
835 	dr = devres_alloc(devm_tsa_serial_release, sizeof(*dr), GFP_KERNEL);
836 	if (!dr)
837 		return ERR_PTR(-ENOMEM);
838 
839 	tsa_serial = tsa_serial_get_byphandle(np, phandle_name);
840 	if (!IS_ERR(tsa_serial)) {
841 		*dr = tsa_serial;
842 		devres_add(dev, dr);
843 	} else {
844 		devres_free(dr);
845 	}
846 
847 	return tsa_serial;
848 }
849 EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
850 
851 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
852 MODULE_DESCRIPTION("CPM TSA driver");
853 MODULE_LICENSE("GPL");
854