xref: /linux/drivers/soc/fsl/qe/tsa.c (revision 8c8e1ba3e98c29e3bee1c7c0f19ff1ecd89f197f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * TSA driver
4  *
5  * Copyright 2022 CS GROUP France
6  *
7  * Author: Herve Codina <herve.codina@bootlin.com>
8  */
9 
10 #include "tsa.h"
11 #include <dt-bindings/soc/cpm1-fsl,tsa.h>
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 
21 /* TSA SI RAM routing tables entry (CPM1) */
22 #define TSA_CPM1_SIRAM_ENTRY_LAST	BIT(16)
23 #define TSA_CPM1_SIRAM_ENTRY_BYTE	BIT(17)
24 #define TSA_CPM1_SIRAM_ENTRY_CNT_MASK	GENMASK(21, 18)
25 #define TSA_CPM1_SIRAM_ENTRY_CNT(x)	FIELD_PREP(TSA_CPM1_SIRAM_ENTRY_CNT_MASK, x)
26 #define TSA_CPM1_SIRAM_ENTRY_CSEL_MASK	GENMASK(24, 22)
27 #define TSA_CPM1_SIRAM_ENTRY_CSEL_NU	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x0)
28 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x2)
29 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x3)
30 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x4)
31 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x5)
32 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x6)
33 
34 /* SI mode register (32 bits) */
35 #define TSA_SIMODE	0x00
36 #define   TSA_SIMODE_SMC2			BIT(31)
37 #define   TSA_SIMODE_SMC1			BIT(15)
38 #define   TSA_SIMODE_TDMA_MASK			GENMASK(11, 0)
39 #define   TSA_SIMODE_TDMA(x)			FIELD_PREP(TSA_SIMODE_TDMA_MASK, x)
40 #define   TSA_SIMODE_TDMB_MASK			GENMASK(27, 16)
41 #define   TSA_SIMODE_TDMB(x)			FIELD_PREP(TSA_SIMODE_TDMB_MASK, x)
42 #define     TSA_SIMODE_TDM_MASK			GENMASK(11, 0)
43 #define     TSA_SIMODE_TDM_SDM_MASK		GENMASK(11, 10)
44 #define       TSA_SIMODE_TDM_SDM_NORM		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0)
45 #define       TSA_SIMODE_TDM_SDM_ECHO		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1)
46 #define       TSA_SIMODE_TDM_SDM_INTL_LOOP	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2)
47 #define       TSA_SIMODE_TDM_SDM_LOOP_CTRL	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3)
48 #define     TSA_SIMODE_TDM_RFSD_MASK		GENMASK(9, 8)
49 #define     TSA_SIMODE_TDM_RFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x)
50 #define     TSA_SIMODE_TDM_DSC			BIT(7)
51 #define     TSA_SIMODE_TDM_CRT			BIT(6)
52 #define     TSA_SIMODE_TDM_STZ			BIT(5)
53 #define     TSA_SIMODE_TDM_CE			BIT(4)
54 #define     TSA_SIMODE_TDM_FE			BIT(3)
55 #define     TSA_SIMODE_TDM_GM			BIT(2)
56 #define     TSA_SIMODE_TDM_TFSD_MASK		GENMASK(1, 0)
57 #define     TSA_SIMODE_TDM_TFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x)
58 
59 /* SI global mode register (8 bits) */
60 #define TSA_SIGMR	0x04
61 #define TSA_SIGMR_ENB			BIT(3)
62 #define TSA_SIGMR_ENA			BIT(2)
63 #define TSA_SIGMR_RDM_MASK		GENMASK(1, 0)
64 #define   TSA_SIGMR_RDM_STATIC_TDMA	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x0)
65 #define   TSA_SIGMR_RDM_DYN_TDMA	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x1)
66 #define   TSA_SIGMR_RDM_STATIC_TDMAB	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x2)
67 #define   TSA_SIGMR_RDM_DYN_TDMAB	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x3)
68 
69 /* SI clock route register (32 bits) */
70 #define TSA_SICR	0x0C
71 #define   TSA_SICR_SCC2_MASK		GENMASK(15, 8)
72 #define   TSA_SICR_SCC2(x)		FIELD_PREP(TSA_SICR_SCC2_MASK, x)
73 #define   TSA_SICR_SCC3_MASK		GENMASK(23, 16)
74 #define   TSA_SICR_SCC3(x)		FIELD_PREP(TSA_SICR_SCC3_MASK, x)
75 #define   TSA_SICR_SCC4_MASK		GENMASK(31, 24)
76 #define   TSA_SICR_SCC4(x)		FIELD_PREP(TSA_SICR_SCC4_MASK, x)
77 #define     TSA_SICR_SCC_MASK		GENMASK(7, 0)
78 #define     TSA_SICR_SCC_GRX		BIT(7)
79 #define     TSA_SICR_SCC_SCX_TSA	BIT(6)
80 #define     TSA_SICR_SCC_RXCS_MASK	GENMASK(5, 3)
81 #define       TSA_SICR_SCC_RXCS_BRG1	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x0)
82 #define       TSA_SICR_SCC_RXCS_BRG2	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x1)
83 #define       TSA_SICR_SCC_RXCS_BRG3	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x2)
84 #define       TSA_SICR_SCC_RXCS_BRG4	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x3)
85 #define       TSA_SICR_SCC_RXCS_CLK15	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x4)
86 #define       TSA_SICR_SCC_RXCS_CLK26	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x5)
87 #define       TSA_SICR_SCC_RXCS_CLK37	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x6)
88 #define       TSA_SICR_SCC_RXCS_CLK48	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x7)
89 #define     TSA_SICR_SCC_TXCS_MASK	GENMASK(2, 0)
90 #define       TSA_SICR_SCC_TXCS_BRG1	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x0)
91 #define       TSA_SICR_SCC_TXCS_BRG2	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x1)
92 #define       TSA_SICR_SCC_TXCS_BRG3	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x2)
93 #define       TSA_SICR_SCC_TXCS_BRG4	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x3)
94 #define       TSA_SICR_SCC_TXCS_CLK15	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x4)
95 #define       TSA_SICR_SCC_TXCS_CLK26	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x5)
96 #define       TSA_SICR_SCC_TXCS_CLK37	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x6)
97 #define       TSA_SICR_SCC_TXCS_CLK48	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x7)
98 
99 struct tsa_entries_area {
100 	void __iomem *entries_start;
101 	void __iomem *entries_next;
102 	void __iomem *last_entry;
103 };
104 
105 struct tsa_tdm {
106 	bool is_enable;
107 	struct clk *l1rclk_clk;
108 	struct clk *l1rsync_clk;
109 	struct clk *l1tclk_clk;
110 	struct clk *l1tsync_clk;
111 	u32 simode_tdm;
112 };
113 
114 #define TSA_TDMA	0
115 #define TSA_TDMB	1
116 
117 struct tsa {
118 	struct device *dev;
119 	void __iomem *si_regs;
120 	void __iomem *si_ram;
121 	resource_size_t si_ram_sz;
122 	spinlock_t	lock; /* Lock for read/modify/write sequence */
123 	int tdms; /* TSA_TDMx ORed */
124 	struct tsa_tdm tdm[2]; /* TDMa and TDMb */
125 	struct tsa_serial {
126 		unsigned int id;
127 		struct tsa_serial_info info;
128 	} serials[6];
129 };
130 
131 static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
132 {
133 	/* The serials table is indexed by the serial id */
134 	return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
135 }
136 
137 static inline void tsa_write32(void __iomem *addr, u32 val)
138 {
139 	iowrite32be(val, addr);
140 }
141 
142 static inline void tsa_write8(void __iomem *addr, u8 val)
143 {
144 	iowrite8(val, addr);
145 }
146 
147 static inline u32 tsa_read32(void __iomem *addr)
148 {
149 	return ioread32be(addr);
150 }
151 
152 static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
153 {
154 	tsa_write32(addr, tsa_read32(addr) & ~clr);
155 }
156 
157 static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
158 {
159 	tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
160 }
161 
162 int tsa_serial_connect(struct tsa_serial *tsa_serial)
163 {
164 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
165 	unsigned long flags;
166 	u32 clear;
167 	u32 set;
168 
169 	switch (tsa_serial->id) {
170 	case FSL_CPM_TSA_SCC2:
171 		clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
172 		set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA);
173 		break;
174 	case FSL_CPM_TSA_SCC3:
175 		clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
176 		set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA);
177 		break;
178 	case FSL_CPM_TSA_SCC4:
179 		clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
180 		set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA);
181 		break;
182 	default:
183 		dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
184 		return -EINVAL;
185 	}
186 
187 	spin_lock_irqsave(&tsa->lock, flags);
188 	tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set);
189 	spin_unlock_irqrestore(&tsa->lock, flags);
190 
191 	return 0;
192 }
193 EXPORT_SYMBOL(tsa_serial_connect);
194 
195 int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
196 {
197 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
198 	unsigned long flags;
199 	u32 clear;
200 
201 	switch (tsa_serial->id) {
202 	case FSL_CPM_TSA_SCC2:
203 		clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
204 		break;
205 	case FSL_CPM_TSA_SCC3:
206 		clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
207 		break;
208 	case FSL_CPM_TSA_SCC4:
209 		clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
210 		break;
211 	default:
212 		dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
213 		return -EINVAL;
214 	}
215 
216 	spin_lock_irqsave(&tsa->lock, flags);
217 	tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0);
218 	spin_unlock_irqrestore(&tsa->lock, flags);
219 
220 	return 0;
221 }
222 EXPORT_SYMBOL(tsa_serial_disconnect);
223 
224 int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
225 {
226 	memcpy(info, &tsa_serial->info, sizeof(*info));
227 	return 0;
228 }
229 EXPORT_SYMBOL(tsa_serial_get_info);
230 
231 static void tsa_cpm1_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
232 				       u32 tdms, u32 tdm_id, bool is_rx)
233 {
234 	resource_size_t quarter;
235 	resource_size_t half;
236 
237 	quarter = tsa->si_ram_sz / 4;
238 	half = tsa->si_ram_sz / 2;
239 
240 	if (tdms == BIT(TSA_TDMA)) {
241 		/* Only TDMA */
242 		if (is_rx) {
243 			/* First half of si_ram */
244 			area->entries_start = tsa->si_ram;
245 			area->entries_next = area->entries_start + half;
246 			area->last_entry = NULL;
247 		} else {
248 			/* Second half of si_ram */
249 			area->entries_start = tsa->si_ram + half;
250 			area->entries_next = area->entries_start + half;
251 			area->last_entry = NULL;
252 		}
253 	} else {
254 		/* Only TDMB or both TDMs */
255 		if (tdm_id == TSA_TDMA) {
256 			if (is_rx) {
257 				/* First half of first half of si_ram */
258 				area->entries_start = tsa->si_ram;
259 				area->entries_next = area->entries_start + quarter;
260 				area->last_entry = NULL;
261 			} else {
262 				/* First half of second half of si_ram */
263 				area->entries_start = tsa->si_ram + (2 * quarter);
264 				area->entries_next = area->entries_start + quarter;
265 				area->last_entry = NULL;
266 			}
267 		} else {
268 			if (is_rx) {
269 				/* Second half of first half of si_ram */
270 				area->entries_start = tsa->si_ram + quarter;
271 				area->entries_next = area->entries_start + quarter;
272 				area->last_entry = NULL;
273 			} else {
274 				/* Second half of second half of si_ram */
275 				area->entries_start = tsa->si_ram + (3 * quarter);
276 				area->entries_next = area->entries_start + quarter;
277 				area->last_entry = NULL;
278 			}
279 		}
280 	}
281 }
282 
283 static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
284 				  u32 tdms, u32 tdm_id, bool is_rx)
285 {
286 	tsa_cpm1_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
287 }
288 
289 static const char *tsa_cpm1_serial_id2name(struct tsa *tsa, u32 serial_id)
290 {
291 	switch (serial_id) {
292 	case FSL_CPM_TSA_NU:	return "Not used";
293 	case FSL_CPM_TSA_SCC2:	return "SCC2";
294 	case FSL_CPM_TSA_SCC3:	return "SCC3";
295 	case FSL_CPM_TSA_SCC4:	return "SCC4";
296 	case FSL_CPM_TSA_SMC1:	return "SMC1";
297 	case FSL_CPM_TSA_SMC2:	return "SMC2";
298 	default:
299 		break;
300 	}
301 	return NULL;
302 }
303 
304 static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
305 {
306 	return tsa_cpm1_serial_id2name(tsa, serial_id);
307 }
308 
309 static u32 tsa_cpm1_serial_id2csel(struct tsa *tsa, u32 serial_id)
310 {
311 	switch (serial_id) {
312 	case FSL_CPM_TSA_SCC2:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2;
313 	case FSL_CPM_TSA_SCC3:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3;
314 	case FSL_CPM_TSA_SCC4:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4;
315 	case FSL_CPM_TSA_SMC1:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1;
316 	case FSL_CPM_TSA_SMC2:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2;
317 	default:
318 		break;
319 	}
320 	return TSA_CPM1_SIRAM_ENTRY_CSEL_NU;
321 }
322 
323 static int tsa_cpm1_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
324 			      u32 count, u32 serial_id)
325 {
326 	void __iomem *addr;
327 	u32 left;
328 	u32 val;
329 	u32 cnt;
330 	u32 nb;
331 
332 	addr = area->last_entry ? area->last_entry + 4 : area->entries_start;
333 
334 	nb = DIV_ROUND_UP(count, 8);
335 	if ((addr + (nb * 4)) > area->entries_next) {
336 		dev_err(tsa->dev, "si ram area full\n");
337 		return -ENOSPC;
338 	}
339 
340 	if (area->last_entry) {
341 		/* Clear last flag */
342 		tsa_clrbits32(area->last_entry, TSA_CPM1_SIRAM_ENTRY_LAST);
343 	}
344 
345 	left = count;
346 	while (left) {
347 		val = TSA_CPM1_SIRAM_ENTRY_BYTE | tsa_cpm1_serial_id2csel(tsa, serial_id);
348 
349 		if (left > 16) {
350 			cnt = 16;
351 		} else {
352 			cnt = left;
353 			val |= TSA_CPM1_SIRAM_ENTRY_LAST;
354 			area->last_entry = addr;
355 		}
356 		val |= TSA_CPM1_SIRAM_ENTRY_CNT(cnt - 1);
357 
358 		tsa_write32(addr, val);
359 		addr += 4;
360 		left -= cnt;
361 	}
362 
363 	return 0;
364 }
365 
366 static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
367 			 u32 count, u32 serial_id)
368 {
369 	return tsa_cpm1_add_entry(tsa, area, count, serial_id);
370 }
371 
372 static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
373 				  u32 tdms, u32 tdm_id, bool is_rx)
374 {
375 	struct tsa_entries_area area;
376 	const char *route_name;
377 	u32 serial_id;
378 	int len, i;
379 	u32 count;
380 	const char *serial_name;
381 	struct tsa_serial_info *serial_info;
382 	struct tsa_tdm *tdm;
383 	int ret;
384 	u32 ts;
385 
386 	route_name = is_rx ? "fsl,rx-ts-routes" : "fsl,tx-ts-routes";
387 
388 	len = of_property_count_u32_elems(tdm_np,  route_name);
389 	if (len < 0) {
390 		dev_err(tsa->dev, "%pOF: failed to read %s\n", tdm_np, route_name);
391 		return len;
392 	}
393 	if (len % 2 != 0) {
394 		dev_err(tsa->dev, "%pOF: wrong %s format\n", tdm_np, route_name);
395 		return -EINVAL;
396 	}
397 
398 	tsa_init_entries_area(tsa, &area, tdms, tdm_id, is_rx);
399 	ts = 0;
400 	for (i = 0; i < len; i += 2) {
401 		of_property_read_u32_index(tdm_np, route_name, i, &count);
402 		of_property_read_u32_index(tdm_np, route_name, i + 1, &serial_id);
403 
404 		if (serial_id >= ARRAY_SIZE(tsa->serials)) {
405 			dev_err(tsa->dev, "%pOF: invalid serial id (%u)\n",
406 				tdm_np, serial_id);
407 			return -EINVAL;
408 		}
409 
410 		serial_name = tsa_serial_id2name(tsa, serial_id);
411 		if (!serial_name) {
412 			dev_err(tsa->dev, "%pOF: unsupported serial id (%u)\n",
413 				tdm_np, serial_id);
414 			return -EINVAL;
415 		}
416 
417 		dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
418 			tdm_id, route_name, ts, ts + count - 1, serial_name);
419 		ts += count;
420 
421 		ret = tsa_add_entry(tsa, &area, count, serial_id);
422 		if (ret)
423 			return ret;
424 
425 		serial_info = &tsa->serials[serial_id].info;
426 		tdm = &tsa->tdm[tdm_id];
427 		if (is_rx) {
428 			serial_info->rx_fs_rate = clk_get_rate(tdm->l1rsync_clk);
429 			serial_info->rx_bit_rate = clk_get_rate(tdm->l1rclk_clk);
430 			serial_info->nb_rx_ts += count;
431 		} else {
432 			serial_info->tx_fs_rate = tdm->l1tsync_clk ?
433 				clk_get_rate(tdm->l1tsync_clk) :
434 				clk_get_rate(tdm->l1rsync_clk);
435 			serial_info->tx_bit_rate = tdm->l1tclk_clk ?
436 				clk_get_rate(tdm->l1tclk_clk) :
437 				clk_get_rate(tdm->l1rclk_clk);
438 			serial_info->nb_tx_ts += count;
439 		}
440 	}
441 	return 0;
442 }
443 
444 static inline int tsa_of_parse_tdm_rx_route(struct tsa *tsa,
445 					    struct device_node *tdm_np,
446 					    u32 tdms, u32 tdm_id)
447 {
448 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, true);
449 }
450 
451 static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
452 					    struct device_node *tdm_np,
453 					    u32 tdms, u32 tdm_id)
454 {
455 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, false);
456 }
457 
458 static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
459 {
460 	struct device_node *tdm_np;
461 	struct tsa_tdm *tdm;
462 	struct clk *clk;
463 	u32 tdm_id, val;
464 	int ret;
465 	int i;
466 
467 	tsa->tdms = 0;
468 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++)
469 		tsa->tdm[i].is_enable = false;
470 
471 	for_each_available_child_of_node(np, tdm_np) {
472 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
473 		if (ret) {
474 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
475 			of_node_put(tdm_np);
476 			return ret;
477 		}
478 		switch (tdm_id) {
479 		case 0:
480 			tsa->tdms |= BIT(TSA_TDMA);
481 			break;
482 		case 1:
483 			tsa->tdms |= BIT(TSA_TDMB);
484 			break;
485 		default:
486 			dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
487 				tdm_id);
488 			of_node_put(tdm_np);
489 			return -EINVAL;
490 		}
491 	}
492 
493 	for_each_available_child_of_node(np, tdm_np) {
494 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
495 		if (ret) {
496 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
497 			of_node_put(tdm_np);
498 			return ret;
499 		}
500 
501 		tdm = &tsa->tdm[tdm_id];
502 		tdm->simode_tdm = TSA_SIMODE_TDM_SDM_NORM;
503 
504 		val = 0;
505 		ret = of_property_read_u32(tdm_np, "fsl,rx-frame-sync-delay-bits",
506 					   &val);
507 		if (ret && ret != -EINVAL) {
508 			dev_err(tsa->dev,
509 				"%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
510 				tdm_np);
511 			of_node_put(tdm_np);
512 			return ret;
513 		}
514 		if (val > 3) {
515 			dev_err(tsa->dev,
516 				"%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
517 				tdm_np, val);
518 			of_node_put(tdm_np);
519 			return -EINVAL;
520 		}
521 		tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
522 
523 		val = 0;
524 		ret = of_property_read_u32(tdm_np, "fsl,tx-frame-sync-delay-bits",
525 					   &val);
526 		if (ret && ret != -EINVAL) {
527 			dev_err(tsa->dev,
528 				"%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
529 				tdm_np);
530 			of_node_put(tdm_np);
531 			return ret;
532 		}
533 		if (val > 3) {
534 			dev_err(tsa->dev,
535 				"%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
536 				tdm_np, val);
537 			of_node_put(tdm_np);
538 			return -EINVAL;
539 		}
540 		tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
541 
542 		if (of_property_read_bool(tdm_np, "fsl,common-rxtx-pins"))
543 			tdm->simode_tdm |= TSA_SIMODE_TDM_CRT;
544 
545 		if (of_property_read_bool(tdm_np, "fsl,clock-falling-edge"))
546 			tdm->simode_tdm |= TSA_SIMODE_TDM_CE;
547 
548 		if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
549 			tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
550 
551 		if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
552 			tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
553 
554 		clk = of_clk_get_by_name(tdm_np, "l1rsync");
555 		if (IS_ERR(clk)) {
556 			ret = PTR_ERR(clk);
557 			of_node_put(tdm_np);
558 			goto err;
559 		}
560 		ret = clk_prepare_enable(clk);
561 		if (ret) {
562 			clk_put(clk);
563 			of_node_put(tdm_np);
564 			goto err;
565 		}
566 		tdm->l1rsync_clk = clk;
567 
568 		clk = of_clk_get_by_name(tdm_np, "l1rclk");
569 		if (IS_ERR(clk)) {
570 			ret = PTR_ERR(clk);
571 			of_node_put(tdm_np);
572 			goto err;
573 		}
574 		ret = clk_prepare_enable(clk);
575 		if (ret) {
576 			clk_put(clk);
577 			of_node_put(tdm_np);
578 			goto err;
579 		}
580 		tdm->l1rclk_clk = clk;
581 
582 		if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
583 			clk = of_clk_get_by_name(tdm_np, "l1tsync");
584 			if (IS_ERR(clk)) {
585 				ret = PTR_ERR(clk);
586 				of_node_put(tdm_np);
587 				goto err;
588 			}
589 			ret = clk_prepare_enable(clk);
590 			if (ret) {
591 				clk_put(clk);
592 				of_node_put(tdm_np);
593 				goto err;
594 			}
595 			tdm->l1tsync_clk = clk;
596 
597 			clk = of_clk_get_by_name(tdm_np, "l1tclk");
598 			if (IS_ERR(clk)) {
599 				ret = PTR_ERR(clk);
600 				of_node_put(tdm_np);
601 				goto err;
602 			}
603 			ret = clk_prepare_enable(clk);
604 			if (ret) {
605 				clk_put(clk);
606 				of_node_put(tdm_np);
607 				goto err;
608 			}
609 			tdm->l1tclk_clk = clk;
610 		}
611 
612 		ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
613 		if (ret) {
614 			of_node_put(tdm_np);
615 			goto err;
616 		}
617 
618 		ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
619 		if (ret) {
620 			of_node_put(tdm_np);
621 			goto err;
622 		}
623 
624 		tdm->is_enable = true;
625 	}
626 	return 0;
627 
628 err:
629 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
630 		if (tsa->tdm[i].l1rsync_clk) {
631 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
632 			clk_put(tsa->tdm[i].l1rsync_clk);
633 		}
634 		if (tsa->tdm[i].l1rclk_clk) {
635 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
636 			clk_put(tsa->tdm[i].l1rclk_clk);
637 		}
638 		if (tsa->tdm[i].l1tsync_clk) {
639 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
640 			clk_put(tsa->tdm[i].l1rsync_clk);
641 		}
642 		if (tsa->tdm[i].l1tclk_clk) {
643 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
644 			clk_put(tsa->tdm[i].l1rclk_clk);
645 		}
646 	}
647 	return ret;
648 }
649 
650 static void tsa_init_si_ram(struct tsa *tsa)
651 {
652 	resource_size_t i;
653 
654 	/* Fill all entries as the last one */
655 	for (i = 0; i < tsa->si_ram_sz; i += 4)
656 		tsa_write32(tsa->si_ram + i, TSA_CPM1_SIRAM_ENTRY_LAST);
657 }
658 
659 static int tsa_probe(struct platform_device *pdev)
660 {
661 	struct device_node *np = pdev->dev.of_node;
662 	struct resource *res;
663 	struct tsa *tsa;
664 	unsigned int i;
665 	u32 val;
666 	int ret;
667 
668 	tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
669 	if (!tsa)
670 		return -ENOMEM;
671 
672 	tsa->dev = &pdev->dev;
673 
674 	for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
675 		tsa->serials[i].id = i;
676 
677 	spin_lock_init(&tsa->lock);
678 
679 	tsa->si_regs = devm_platform_ioremap_resource_byname(pdev, "si_regs");
680 	if (IS_ERR(tsa->si_regs))
681 		return PTR_ERR(tsa->si_regs);
682 
683 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "si_ram");
684 	if (!res) {
685 		dev_err(tsa->dev, "si_ram resource missing\n");
686 		return -EINVAL;
687 	}
688 	tsa->si_ram_sz = resource_size(res);
689 	tsa->si_ram = devm_ioremap_resource(&pdev->dev, res);
690 	if (IS_ERR(tsa->si_ram))
691 		return PTR_ERR(tsa->si_ram);
692 
693 	tsa_init_si_ram(tsa);
694 
695 	ret = tsa_of_parse_tdms(tsa, np);
696 	if (ret)
697 		return ret;
698 
699 	/* Set SIMODE */
700 	val = 0;
701 	if (tsa->tdm[0].is_enable)
702 		val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
703 	if (tsa->tdm[1].is_enable)
704 		val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
705 
706 	tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE,
707 			 TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) |
708 			 TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK),
709 			 val);
710 
711 	/* Set SIGMR */
712 	val = (tsa->tdms == BIT(TSA_TDMA)) ?
713 		TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB;
714 	if (tsa->tdms & BIT(TSA_TDMA))
715 		val |= TSA_SIGMR_ENA;
716 	if (tsa->tdms & BIT(TSA_TDMB))
717 		val |= TSA_SIGMR_ENB;
718 	tsa_write8(tsa->si_regs + TSA_SIGMR, val);
719 
720 	platform_set_drvdata(pdev, tsa);
721 
722 	return 0;
723 }
724 
725 static void tsa_remove(struct platform_device *pdev)
726 {
727 	struct tsa *tsa = platform_get_drvdata(pdev);
728 	int i;
729 
730 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
731 		if (tsa->tdm[i].l1rsync_clk) {
732 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
733 			clk_put(tsa->tdm[i].l1rsync_clk);
734 		}
735 		if (tsa->tdm[i].l1rclk_clk) {
736 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
737 			clk_put(tsa->tdm[i].l1rclk_clk);
738 		}
739 		if (tsa->tdm[i].l1tsync_clk) {
740 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
741 			clk_put(tsa->tdm[i].l1rsync_clk);
742 		}
743 		if (tsa->tdm[i].l1tclk_clk) {
744 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
745 			clk_put(tsa->tdm[i].l1rclk_clk);
746 		}
747 	}
748 }
749 
750 static const struct of_device_id tsa_id_table[] = {
751 	{ .compatible = "fsl,cpm1-tsa" },
752 	{} /* sentinel */
753 };
754 MODULE_DEVICE_TABLE(of, tsa_id_table);
755 
756 static struct platform_driver tsa_driver = {
757 	.driver = {
758 		.name = "fsl-tsa",
759 		.of_match_table = of_match_ptr(tsa_id_table),
760 	},
761 	.probe = tsa_probe,
762 	.remove_new = tsa_remove,
763 };
764 module_platform_driver(tsa_driver);
765 
766 struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
767 					    const char *phandle_name)
768 {
769 	struct of_phandle_args out_args;
770 	struct platform_device *pdev;
771 	struct tsa_serial *tsa_serial;
772 	struct tsa *tsa;
773 	int ret;
774 
775 	ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, &out_args);
776 	if (ret < 0)
777 		return ERR_PTR(ret);
778 
779 	if (!of_match_node(tsa_driver.driver.of_match_table, out_args.np)) {
780 		of_node_put(out_args.np);
781 		return ERR_PTR(-EINVAL);
782 	}
783 
784 	pdev = of_find_device_by_node(out_args.np);
785 	of_node_put(out_args.np);
786 	if (!pdev)
787 		return ERR_PTR(-ENODEV);
788 
789 	tsa = platform_get_drvdata(pdev);
790 	if (!tsa) {
791 		platform_device_put(pdev);
792 		return ERR_PTR(-EPROBE_DEFER);
793 	}
794 
795 	if (out_args.args_count != 1) {
796 		platform_device_put(pdev);
797 		return ERR_PTR(-EINVAL);
798 	}
799 
800 	if (out_args.args[0] >= ARRAY_SIZE(tsa->serials)) {
801 		platform_device_put(pdev);
802 		return ERR_PTR(-EINVAL);
803 	}
804 
805 	tsa_serial = &tsa->serials[out_args.args[0]];
806 
807 	/*
808 	 * Be sure that the serial id matches the phandle arg.
809 	 * The tsa_serials table is indexed by serial ids. The serial id is set
810 	 * during the probe() call and needs to be coherent.
811 	 */
812 	if (WARN_ON(tsa_serial->id != out_args.args[0])) {
813 		platform_device_put(pdev);
814 		return ERR_PTR(-EINVAL);
815 	}
816 
817 	return tsa_serial;
818 }
819 EXPORT_SYMBOL(tsa_serial_get_byphandle);
820 
821 void tsa_serial_put(struct tsa_serial *tsa_serial)
822 {
823 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
824 
825 	put_device(tsa->dev);
826 }
827 EXPORT_SYMBOL(tsa_serial_put);
828 
829 static void devm_tsa_serial_release(struct device *dev, void *res)
830 {
831 	struct tsa_serial **tsa_serial = res;
832 
833 	tsa_serial_put(*tsa_serial);
834 }
835 
836 struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
837 						 struct device_node *np,
838 						 const char *phandle_name)
839 {
840 	struct tsa_serial *tsa_serial;
841 	struct tsa_serial **dr;
842 
843 	dr = devres_alloc(devm_tsa_serial_release, sizeof(*dr), GFP_KERNEL);
844 	if (!dr)
845 		return ERR_PTR(-ENOMEM);
846 
847 	tsa_serial = tsa_serial_get_byphandle(np, phandle_name);
848 	if (!IS_ERR(tsa_serial)) {
849 		*dr = tsa_serial;
850 		devres_add(dev, dr);
851 	} else {
852 		devres_free(dr);
853 	}
854 
855 	return tsa_serial;
856 }
857 EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
858 
859 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
860 MODULE_DESCRIPTION("CPM TSA driver");
861 MODULE_LICENSE("GPL");
862