xref: /linux/drivers/soc/fsl/qe/tsa.c (revision 572312a5bb49885a6bda4652810bab0319ba89c3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * TSA driver
4  *
5  * Copyright 2022 CS GROUP France
6  *
7  * Author: Herve Codina <herve.codina@bootlin.com>
8  */
9 
10 #include "tsa.h"
11 #include <dt-bindings/soc/cpm1-fsl,tsa.h>
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 
21 /* TSA SI RAM routing tables entry (CPM1) */
22 #define TSA_CPM1_SIRAM_ENTRY_LAST	BIT(16)
23 #define TSA_CPM1_SIRAM_ENTRY_BYTE	BIT(17)
24 #define TSA_CPM1_SIRAM_ENTRY_CNT_MASK	GENMASK(21, 18)
25 #define TSA_CPM1_SIRAM_ENTRY_CNT(x)	FIELD_PREP(TSA_CPM1_SIRAM_ENTRY_CNT_MASK, x)
26 #define TSA_CPM1_SIRAM_ENTRY_CSEL_MASK	GENMASK(24, 22)
27 #define TSA_CPM1_SIRAM_ENTRY_CSEL_NU	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x0)
28 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x2)
29 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x3)
30 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x4)
31 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x5)
32 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x6)
33 
34 /* SI mode register (32 bits) */
35 #define TSA_CPM1_SIMODE		0x00
36 #define   TSA_CPM1_SIMODE_SMC2			BIT(31)
37 #define   TSA_CPM1_SIMODE_SMC1			BIT(15)
38 #define   TSA_CPM1_SIMODE_TDMA_MASK		GENMASK(11, 0)
39 #define   TSA_CPM1_SIMODE_TDMA(x)		FIELD_PREP(TSA_CPM1_SIMODE_TDMA_MASK, x)
40 #define   TSA_CPM1_SIMODE_TDMB_MASK		GENMASK(27, 16)
41 #define   TSA_CPM1_SIMODE_TDMB(x)		FIELD_PREP(TSA_CPM1_SIMODE_TDMB_MASK, x)
42 #define     TSA_CPM1_SIMODE_TDM_MASK		GENMASK(11, 0)
43 #define     TSA_SIMODE_TDM_SDM_MASK		GENMASK(11, 10)
44 #define       TSA_SIMODE_TDM_SDM_NORM		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0)
45 #define       TSA_SIMODE_TDM_SDM_ECHO		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1)
46 #define       TSA_SIMODE_TDM_SDM_INTL_LOOP	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2)
47 #define       TSA_SIMODE_TDM_SDM_LOOP_CTRL	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3)
48 #define     TSA_SIMODE_TDM_RFSD_MASK		GENMASK(9, 8)
49 #define     TSA_SIMODE_TDM_RFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x)
50 #define     TSA_SIMODE_TDM_DSC			BIT(7)
51 #define     TSA_SIMODE_TDM_CRT			BIT(6)
52 #define     TSA_CPM1_SIMODE_TDM_STZ		BIT(5)
53 #define     TSA_SIMODE_TDM_CE			BIT(4)
54 #define     TSA_SIMODE_TDM_FE			BIT(3)
55 #define     TSA_SIMODE_TDM_GM			BIT(2)
56 #define     TSA_SIMODE_TDM_TFSD_MASK		GENMASK(1, 0)
57 #define     TSA_SIMODE_TDM_TFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x)
58 
59 /* CPM SI global mode register (8 bits) */
60 #define TSA_CPM1_SIGMR	0x04
61 #define TSA_CPM1_SIGMR_ENB			BIT(3)
62 #define TSA_CPM1_SIGMR_ENA			BIT(2)
63 #define TSA_CPM1_SIGMR_RDM_MASK			GENMASK(1, 0)
64 #define   TSA_CPM1_SIGMR_RDM_STATIC_TDMA	FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x0)
65 #define   TSA_CPM1_SIGMR_RDM_DYN_TDMA		FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x1)
66 #define   TSA_CPM1_SIGMR_RDM_STATIC_TDMAB	FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x2)
67 #define   TSA_CPM1_SIGMR_RDM_DYN_TDMAB		FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x3)
68 
69 /* SI clock route register (32 bits) */
70 #define TSA_CPM1_SICR	0x0C
71 #define   TSA_CPM1_SICR_SCC2_MASK		GENMASK(15, 8)
72 #define   TSA_CPM1_SICR_SCC2(x)			FIELD_PREP(TSA_CPM1_SICR_SCC2_MASK, x)
73 #define   TSA_CPM1_SICR_SCC3_MASK		GENMASK(23, 16)
74 #define   TSA_CPM1_SICR_SCC3(x)			FIELD_PREP(TSA_CPM1_SICR_SCC3_MASK, x)
75 #define   TSA_CPM1_SICR_SCC4_MASK		GENMASK(31, 24)
76 #define   TSA_CPM1_SICR_SCC4(x)			FIELD_PREP(TSA_CPM1_SICR_SCC4_MASK, x)
77 #define     TSA_CPM1_SICR_SCC_MASK		GENMASK(7, 0)
78 #define     TSA_CPM1_SICR_SCC_GRX		BIT(7)
79 #define     TSA_CPM1_SICR_SCC_SCX_TSA		BIT(6)
80 #define     TSA_CPM1_SICR_SCC_RXCS_MASK		GENMASK(5, 3)
81 #define       TSA_CPM1_SICR_SCC_RXCS_BRG1	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x0)
82 #define       TSA_CPM1_SICR_SCC_RXCS_BRG2	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x1)
83 #define       TSA_CPM1_SICR_SCC_RXCS_BRG3	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x2)
84 #define       TSA_CPM1_SICR_SCC_RXCS_BRG4	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x3)
85 #define       TSA_CPM1_SICR_SCC_RXCS_CLK15	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x4)
86 #define       TSA_CPM1_SICR_SCC_RXCS_CLK26	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x5)
87 #define       TSA_CPM1_SICR_SCC_RXCS_CLK37	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x6)
88 #define       TSA_CPM1_SICR_SCC_RXCS_CLK48	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x7)
89 #define     TSA_CPM1_SICR_SCC_TXCS_MASK		GENMASK(2, 0)
90 #define       TSA_CPM1_SICR_SCC_TXCS_BRG1	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x0)
91 #define       TSA_CPM1_SICR_SCC_TXCS_BRG2	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x1)
92 #define       TSA_CPM1_SICR_SCC_TXCS_BRG3	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x2)
93 #define       TSA_CPM1_SICR_SCC_TXCS_BRG4	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x3)
94 #define       TSA_CPM1_SICR_SCC_TXCS_CLK15	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x4)
95 #define       TSA_CPM1_SICR_SCC_TXCS_CLK26	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x5)
96 #define       TSA_CPM1_SICR_SCC_TXCS_CLK37	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x6)
97 #define       TSA_CPM1_SICR_SCC_TXCS_CLK48	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x7)
98 
99 struct tsa_entries_area {
100 	void __iomem *entries_start;
101 	void __iomem *entries_next;
102 	void __iomem *last_entry;
103 };
104 
105 struct tsa_tdm {
106 	bool is_enable;
107 	struct clk *l1rclk_clk;
108 	struct clk *l1rsync_clk;
109 	struct clk *l1tclk_clk;
110 	struct clk *l1tsync_clk;
111 	u32 simode_tdm;
112 };
113 
114 #define TSA_TDMA	0
115 #define TSA_TDMB	1
116 
117 enum tsa_version {
118 	TSA_CPM1 = 1, /* Avoid 0 value */
119 };
120 
121 struct tsa {
122 	struct device *dev;
123 	void __iomem *si_regs;
124 	void __iomem *si_ram;
125 	resource_size_t si_ram_sz;
126 	spinlock_t	lock; /* Lock for read/modify/write sequence */
127 	enum tsa_version version;
128 	int tdms; /* TSA_TDMx ORed */
129 	struct tsa_tdm tdm[2]; /* TDMa and TDMb */
130 	struct tsa_serial {
131 		unsigned int id;
132 		struct tsa_serial_info info;
133 	} serials[6];
134 };
135 
136 static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
137 {
138 	/* The serials table is indexed by the serial id */
139 	return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
140 }
141 
142 static inline void tsa_write32(void __iomem *addr, u32 val)
143 {
144 	iowrite32be(val, addr);
145 }
146 
147 static inline void tsa_write8(void __iomem *addr, u8 val)
148 {
149 	iowrite8(val, addr);
150 }
151 
152 static inline u32 tsa_read32(void __iomem *addr)
153 {
154 	return ioread32be(addr);
155 }
156 
157 static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
158 {
159 	tsa_write32(addr, tsa_read32(addr) & ~clr);
160 }
161 
162 static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
163 {
164 	tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
165 }
166 
167 static int tsa_cpm1_serial_connect(struct tsa_serial *tsa_serial, bool connect)
168 {
169 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
170 	unsigned long flags;
171 	u32 clear;
172 	u32 set;
173 
174 	switch (tsa_serial->id) {
175 	case FSL_CPM_TSA_SCC2:
176 		clear = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_MASK);
177 		set = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_SCX_TSA);
178 		break;
179 	case FSL_CPM_TSA_SCC3:
180 		clear = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_MASK);
181 		set = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_SCX_TSA);
182 		break;
183 	case FSL_CPM_TSA_SCC4:
184 		clear = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_MASK);
185 		set = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_SCX_TSA);
186 		break;
187 	default:
188 		dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
189 		return -EINVAL;
190 	}
191 
192 	spin_lock_irqsave(&tsa->lock, flags);
193 	tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SICR, clear,
194 			 connect ? set : 0);
195 	spin_unlock_irqrestore(&tsa->lock, flags);
196 
197 	return 0;
198 }
199 
200 int tsa_serial_connect(struct tsa_serial *tsa_serial)
201 {
202 	return tsa_cpm1_serial_connect(tsa_serial, true);
203 }
204 EXPORT_SYMBOL(tsa_serial_connect);
205 
206 int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
207 {
208 	return tsa_cpm1_serial_connect(tsa_serial, false);
209 }
210 EXPORT_SYMBOL(tsa_serial_disconnect);
211 
212 int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
213 {
214 	memcpy(info, &tsa_serial->info, sizeof(*info));
215 	return 0;
216 }
217 EXPORT_SYMBOL(tsa_serial_get_info);
218 
219 static void tsa_cpm1_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
220 				       u32 tdms, u32 tdm_id, bool is_rx)
221 {
222 	resource_size_t quarter;
223 	resource_size_t half;
224 
225 	quarter = tsa->si_ram_sz / 4;
226 	half = tsa->si_ram_sz / 2;
227 
228 	if (tdms == BIT(TSA_TDMA)) {
229 		/* Only TDMA */
230 		if (is_rx) {
231 			/* First half of si_ram */
232 			area->entries_start = tsa->si_ram;
233 			area->entries_next = area->entries_start + half;
234 			area->last_entry = NULL;
235 		} else {
236 			/* Second half of si_ram */
237 			area->entries_start = tsa->si_ram + half;
238 			area->entries_next = area->entries_start + half;
239 			area->last_entry = NULL;
240 		}
241 	} else {
242 		/* Only TDMB or both TDMs */
243 		if (tdm_id == TSA_TDMA) {
244 			if (is_rx) {
245 				/* First half of first half of si_ram */
246 				area->entries_start = tsa->si_ram;
247 				area->entries_next = area->entries_start + quarter;
248 				area->last_entry = NULL;
249 			} else {
250 				/* First half of second half of si_ram */
251 				area->entries_start = tsa->si_ram + (2 * quarter);
252 				area->entries_next = area->entries_start + quarter;
253 				area->last_entry = NULL;
254 			}
255 		} else {
256 			if (is_rx) {
257 				/* Second half of first half of si_ram */
258 				area->entries_start = tsa->si_ram + quarter;
259 				area->entries_next = area->entries_start + quarter;
260 				area->last_entry = NULL;
261 			} else {
262 				/* Second half of second half of si_ram */
263 				area->entries_start = tsa->si_ram + (3 * quarter);
264 				area->entries_next = area->entries_start + quarter;
265 				area->last_entry = NULL;
266 			}
267 		}
268 	}
269 }
270 
271 static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
272 				  u32 tdms, u32 tdm_id, bool is_rx)
273 {
274 	tsa_cpm1_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
275 }
276 
277 static const char *tsa_cpm1_serial_id2name(struct tsa *tsa, u32 serial_id)
278 {
279 	switch (serial_id) {
280 	case FSL_CPM_TSA_NU:	return "Not used";
281 	case FSL_CPM_TSA_SCC2:	return "SCC2";
282 	case FSL_CPM_TSA_SCC3:	return "SCC3";
283 	case FSL_CPM_TSA_SCC4:	return "SCC4";
284 	case FSL_CPM_TSA_SMC1:	return "SMC1";
285 	case FSL_CPM_TSA_SMC2:	return "SMC2";
286 	default:
287 		break;
288 	}
289 	return NULL;
290 }
291 
292 static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
293 {
294 	return tsa_cpm1_serial_id2name(tsa, serial_id);
295 }
296 
297 static u32 tsa_cpm1_serial_id2csel(struct tsa *tsa, u32 serial_id)
298 {
299 	switch (serial_id) {
300 	case FSL_CPM_TSA_SCC2:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2;
301 	case FSL_CPM_TSA_SCC3:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3;
302 	case FSL_CPM_TSA_SCC4:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4;
303 	case FSL_CPM_TSA_SMC1:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1;
304 	case FSL_CPM_TSA_SMC2:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2;
305 	default:
306 		break;
307 	}
308 	return TSA_CPM1_SIRAM_ENTRY_CSEL_NU;
309 }
310 
311 static int tsa_cpm1_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
312 			      u32 count, u32 serial_id)
313 {
314 	void __iomem *addr;
315 	u32 left;
316 	u32 val;
317 	u32 cnt;
318 	u32 nb;
319 
320 	addr = area->last_entry ? area->last_entry + 4 : area->entries_start;
321 
322 	nb = DIV_ROUND_UP(count, 8);
323 	if ((addr + (nb * 4)) > area->entries_next) {
324 		dev_err(tsa->dev, "si ram area full\n");
325 		return -ENOSPC;
326 	}
327 
328 	if (area->last_entry) {
329 		/* Clear last flag */
330 		tsa_clrbits32(area->last_entry, TSA_CPM1_SIRAM_ENTRY_LAST);
331 	}
332 
333 	left = count;
334 	while (left) {
335 		val = TSA_CPM1_SIRAM_ENTRY_BYTE | tsa_cpm1_serial_id2csel(tsa, serial_id);
336 
337 		if (left > 16) {
338 			cnt = 16;
339 		} else {
340 			cnt = left;
341 			val |= TSA_CPM1_SIRAM_ENTRY_LAST;
342 			area->last_entry = addr;
343 		}
344 		val |= TSA_CPM1_SIRAM_ENTRY_CNT(cnt - 1);
345 
346 		tsa_write32(addr, val);
347 		addr += 4;
348 		left -= cnt;
349 	}
350 
351 	return 0;
352 }
353 
354 static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
355 			 u32 count, u32 serial_id)
356 {
357 	return tsa_cpm1_add_entry(tsa, area, count, serial_id);
358 }
359 
360 static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
361 				  u32 tdms, u32 tdm_id, bool is_rx)
362 {
363 	struct tsa_entries_area area;
364 	const char *route_name;
365 	u32 serial_id;
366 	int len, i;
367 	u32 count;
368 	const char *serial_name;
369 	struct tsa_serial_info *serial_info;
370 	struct tsa_tdm *tdm;
371 	int ret;
372 	u32 ts;
373 
374 	route_name = is_rx ? "fsl,rx-ts-routes" : "fsl,tx-ts-routes";
375 
376 	len = of_property_count_u32_elems(tdm_np,  route_name);
377 	if (len < 0) {
378 		dev_err(tsa->dev, "%pOF: failed to read %s\n", tdm_np, route_name);
379 		return len;
380 	}
381 	if (len % 2 != 0) {
382 		dev_err(tsa->dev, "%pOF: wrong %s format\n", tdm_np, route_name);
383 		return -EINVAL;
384 	}
385 
386 	tsa_init_entries_area(tsa, &area, tdms, tdm_id, is_rx);
387 	ts = 0;
388 	for (i = 0; i < len; i += 2) {
389 		of_property_read_u32_index(tdm_np, route_name, i, &count);
390 		of_property_read_u32_index(tdm_np, route_name, i + 1, &serial_id);
391 
392 		if (serial_id >= ARRAY_SIZE(tsa->serials)) {
393 			dev_err(tsa->dev, "%pOF: invalid serial id (%u)\n",
394 				tdm_np, serial_id);
395 			return -EINVAL;
396 		}
397 
398 		serial_name = tsa_serial_id2name(tsa, serial_id);
399 		if (!serial_name) {
400 			dev_err(tsa->dev, "%pOF: unsupported serial id (%u)\n",
401 				tdm_np, serial_id);
402 			return -EINVAL;
403 		}
404 
405 		dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
406 			tdm_id, route_name, ts, ts + count - 1, serial_name);
407 		ts += count;
408 
409 		ret = tsa_add_entry(tsa, &area, count, serial_id);
410 		if (ret)
411 			return ret;
412 
413 		serial_info = &tsa->serials[serial_id].info;
414 		tdm = &tsa->tdm[tdm_id];
415 		if (is_rx) {
416 			serial_info->rx_fs_rate = clk_get_rate(tdm->l1rsync_clk);
417 			serial_info->rx_bit_rate = clk_get_rate(tdm->l1rclk_clk);
418 			serial_info->nb_rx_ts += count;
419 		} else {
420 			serial_info->tx_fs_rate = tdm->l1tsync_clk ?
421 				clk_get_rate(tdm->l1tsync_clk) :
422 				clk_get_rate(tdm->l1rsync_clk);
423 			serial_info->tx_bit_rate = tdm->l1tclk_clk ?
424 				clk_get_rate(tdm->l1tclk_clk) :
425 				clk_get_rate(tdm->l1rclk_clk);
426 			serial_info->nb_tx_ts += count;
427 		}
428 	}
429 	return 0;
430 }
431 
432 static inline int tsa_of_parse_tdm_rx_route(struct tsa *tsa,
433 					    struct device_node *tdm_np,
434 					    u32 tdms, u32 tdm_id)
435 {
436 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, true);
437 }
438 
439 static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
440 					    struct device_node *tdm_np,
441 					    u32 tdms, u32 tdm_id)
442 {
443 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, false);
444 }
445 
446 static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
447 {
448 	struct device_node *tdm_np;
449 	struct tsa_tdm *tdm;
450 	struct clk *clk;
451 	u32 tdm_id, val;
452 	int ret;
453 	int i;
454 
455 	tsa->tdms = 0;
456 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++)
457 		tsa->tdm[i].is_enable = false;
458 
459 	for_each_available_child_of_node(np, tdm_np) {
460 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
461 		if (ret) {
462 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
463 			of_node_put(tdm_np);
464 			return ret;
465 		}
466 		switch (tdm_id) {
467 		case 0:
468 			tsa->tdms |= BIT(TSA_TDMA);
469 			break;
470 		case 1:
471 			tsa->tdms |= BIT(TSA_TDMB);
472 			break;
473 		default:
474 			dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
475 				tdm_id);
476 			of_node_put(tdm_np);
477 			return -EINVAL;
478 		}
479 	}
480 
481 	for_each_available_child_of_node(np, tdm_np) {
482 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
483 		if (ret) {
484 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
485 			of_node_put(tdm_np);
486 			return ret;
487 		}
488 
489 		tdm = &tsa->tdm[tdm_id];
490 		tdm->simode_tdm = TSA_SIMODE_TDM_SDM_NORM;
491 
492 		val = 0;
493 		ret = of_property_read_u32(tdm_np, "fsl,rx-frame-sync-delay-bits",
494 					   &val);
495 		if (ret && ret != -EINVAL) {
496 			dev_err(tsa->dev,
497 				"%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
498 				tdm_np);
499 			of_node_put(tdm_np);
500 			return ret;
501 		}
502 		if (val > 3) {
503 			dev_err(tsa->dev,
504 				"%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
505 				tdm_np, val);
506 			of_node_put(tdm_np);
507 			return -EINVAL;
508 		}
509 		tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
510 
511 		val = 0;
512 		ret = of_property_read_u32(tdm_np, "fsl,tx-frame-sync-delay-bits",
513 					   &val);
514 		if (ret && ret != -EINVAL) {
515 			dev_err(tsa->dev,
516 				"%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
517 				tdm_np);
518 			of_node_put(tdm_np);
519 			return ret;
520 		}
521 		if (val > 3) {
522 			dev_err(tsa->dev,
523 				"%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
524 				tdm_np, val);
525 			of_node_put(tdm_np);
526 			return -EINVAL;
527 		}
528 		tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
529 
530 		if (of_property_read_bool(tdm_np, "fsl,common-rxtx-pins"))
531 			tdm->simode_tdm |= TSA_SIMODE_TDM_CRT;
532 
533 		if (of_property_read_bool(tdm_np, "fsl,clock-falling-edge"))
534 			tdm->simode_tdm |= TSA_SIMODE_TDM_CE;
535 
536 		if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
537 			tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
538 
539 		if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
540 			tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
541 
542 		clk = of_clk_get_by_name(tdm_np, "l1rsync");
543 		if (IS_ERR(clk)) {
544 			ret = PTR_ERR(clk);
545 			of_node_put(tdm_np);
546 			goto err;
547 		}
548 		ret = clk_prepare_enable(clk);
549 		if (ret) {
550 			clk_put(clk);
551 			of_node_put(tdm_np);
552 			goto err;
553 		}
554 		tdm->l1rsync_clk = clk;
555 
556 		clk = of_clk_get_by_name(tdm_np, "l1rclk");
557 		if (IS_ERR(clk)) {
558 			ret = PTR_ERR(clk);
559 			of_node_put(tdm_np);
560 			goto err;
561 		}
562 		ret = clk_prepare_enable(clk);
563 		if (ret) {
564 			clk_put(clk);
565 			of_node_put(tdm_np);
566 			goto err;
567 		}
568 		tdm->l1rclk_clk = clk;
569 
570 		if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
571 			clk = of_clk_get_by_name(tdm_np, "l1tsync");
572 			if (IS_ERR(clk)) {
573 				ret = PTR_ERR(clk);
574 				of_node_put(tdm_np);
575 				goto err;
576 			}
577 			ret = clk_prepare_enable(clk);
578 			if (ret) {
579 				clk_put(clk);
580 				of_node_put(tdm_np);
581 				goto err;
582 			}
583 			tdm->l1tsync_clk = clk;
584 
585 			clk = of_clk_get_by_name(tdm_np, "l1tclk");
586 			if (IS_ERR(clk)) {
587 				ret = PTR_ERR(clk);
588 				of_node_put(tdm_np);
589 				goto err;
590 			}
591 			ret = clk_prepare_enable(clk);
592 			if (ret) {
593 				clk_put(clk);
594 				of_node_put(tdm_np);
595 				goto err;
596 			}
597 			tdm->l1tclk_clk = clk;
598 		}
599 
600 		ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
601 		if (ret) {
602 			of_node_put(tdm_np);
603 			goto err;
604 		}
605 
606 		ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
607 		if (ret) {
608 			of_node_put(tdm_np);
609 			goto err;
610 		}
611 
612 		tdm->is_enable = true;
613 	}
614 	return 0;
615 
616 err:
617 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
618 		if (tsa->tdm[i].l1rsync_clk) {
619 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
620 			clk_put(tsa->tdm[i].l1rsync_clk);
621 		}
622 		if (tsa->tdm[i].l1rclk_clk) {
623 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
624 			clk_put(tsa->tdm[i].l1rclk_clk);
625 		}
626 		if (tsa->tdm[i].l1tsync_clk) {
627 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
628 			clk_put(tsa->tdm[i].l1rsync_clk);
629 		}
630 		if (tsa->tdm[i].l1tclk_clk) {
631 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
632 			clk_put(tsa->tdm[i].l1rclk_clk);
633 		}
634 	}
635 	return ret;
636 }
637 
638 static void tsa_init_si_ram(struct tsa *tsa)
639 {
640 	resource_size_t i;
641 
642 	/* Fill all entries as the last one */
643 	for (i = 0; i < tsa->si_ram_sz; i += 4)
644 		tsa_write32(tsa->si_ram + i, TSA_CPM1_SIRAM_ENTRY_LAST);
645 }
646 
647 static int tsa_cpm1_setup(struct tsa *tsa)
648 {
649 	u32 val;
650 
651 	/* Set SIMODE */
652 	val = 0;
653 	if (tsa->tdm[0].is_enable)
654 		val |= TSA_CPM1_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
655 	if (tsa->tdm[1].is_enable)
656 		val |= TSA_CPM1_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
657 
658 	tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SIMODE,
659 			 TSA_CPM1_SIMODE_TDMA(TSA_CPM1_SIMODE_TDM_MASK) |
660 			 TSA_CPM1_SIMODE_TDMB(TSA_CPM1_SIMODE_TDM_MASK),
661 			 val);
662 
663 	/* Set SIGMR */
664 	val = (tsa->tdms == BIT(TSA_TDMA)) ?
665 		TSA_CPM1_SIGMR_RDM_STATIC_TDMA : TSA_CPM1_SIGMR_RDM_STATIC_TDMAB;
666 	if (tsa->tdms & BIT(TSA_TDMA))
667 		val |= TSA_CPM1_SIGMR_ENA;
668 	if (tsa->tdms & BIT(TSA_TDMB))
669 		val |= TSA_CPM1_SIGMR_ENB;
670 	tsa_write8(tsa->si_regs + TSA_CPM1_SIGMR, val);
671 
672 	return 0;
673 }
674 
675 static int tsa_setup(struct tsa *tsa)
676 {
677 	return tsa_cpm1_setup(tsa);
678 }
679 
680 static int tsa_probe(struct platform_device *pdev)
681 {
682 	struct device_node *np = pdev->dev.of_node;
683 	struct resource *res;
684 	struct tsa *tsa;
685 	unsigned int i;
686 	int ret;
687 
688 	tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
689 	if (!tsa)
690 		return -ENOMEM;
691 
692 	tsa->dev = &pdev->dev;
693 	tsa->version = (enum tsa_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
694 	switch (tsa->version) {
695 	case TSA_CPM1:
696 		dev_info(tsa->dev, "CPM1 version\n");
697 		break;
698 	default:
699 		dev_err(tsa->dev, "Unknown version (%d)\n", tsa->version);
700 		return -EINVAL;
701 	}
702 
703 	for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
704 		tsa->serials[i].id = i;
705 
706 	spin_lock_init(&tsa->lock);
707 
708 	tsa->si_regs = devm_platform_ioremap_resource_byname(pdev, "si_regs");
709 	if (IS_ERR(tsa->si_regs))
710 		return PTR_ERR(tsa->si_regs);
711 
712 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "si_ram");
713 	if (!res) {
714 		dev_err(tsa->dev, "si_ram resource missing\n");
715 		return -EINVAL;
716 	}
717 	tsa->si_ram_sz = resource_size(res);
718 	tsa->si_ram = devm_ioremap_resource(&pdev->dev, res);
719 	if (IS_ERR(tsa->si_ram))
720 		return PTR_ERR(tsa->si_ram);
721 
722 	tsa_init_si_ram(tsa);
723 
724 	ret = tsa_of_parse_tdms(tsa, np);
725 	if (ret)
726 		return ret;
727 
728 	ret = tsa_setup(tsa);
729 	if (ret)
730 		return ret;
731 
732 	platform_set_drvdata(pdev, tsa);
733 
734 	return 0;
735 }
736 
737 static void tsa_remove(struct platform_device *pdev)
738 {
739 	struct tsa *tsa = platform_get_drvdata(pdev);
740 	int i;
741 
742 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
743 		if (tsa->tdm[i].l1rsync_clk) {
744 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
745 			clk_put(tsa->tdm[i].l1rsync_clk);
746 		}
747 		if (tsa->tdm[i].l1rclk_clk) {
748 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
749 			clk_put(tsa->tdm[i].l1rclk_clk);
750 		}
751 		if (tsa->tdm[i].l1tsync_clk) {
752 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
753 			clk_put(tsa->tdm[i].l1rsync_clk);
754 		}
755 		if (tsa->tdm[i].l1tclk_clk) {
756 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
757 			clk_put(tsa->tdm[i].l1rclk_clk);
758 		}
759 	}
760 }
761 
762 static const struct of_device_id tsa_id_table[] = {
763 	{ .compatible = "fsl,cpm1-tsa", .data = (void *)TSA_CPM1 },
764 	{} /* sentinel */
765 };
766 MODULE_DEVICE_TABLE(of, tsa_id_table);
767 
768 static struct platform_driver tsa_driver = {
769 	.driver = {
770 		.name = "fsl-tsa",
771 		.of_match_table = of_match_ptr(tsa_id_table),
772 	},
773 	.probe = tsa_probe,
774 	.remove_new = tsa_remove,
775 };
776 module_platform_driver(tsa_driver);
777 
778 struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
779 					    const char *phandle_name)
780 {
781 	struct of_phandle_args out_args;
782 	struct platform_device *pdev;
783 	struct tsa_serial *tsa_serial;
784 	struct tsa *tsa;
785 	int ret;
786 
787 	ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, &out_args);
788 	if (ret < 0)
789 		return ERR_PTR(ret);
790 
791 	if (!of_match_node(tsa_driver.driver.of_match_table, out_args.np)) {
792 		of_node_put(out_args.np);
793 		return ERR_PTR(-EINVAL);
794 	}
795 
796 	pdev = of_find_device_by_node(out_args.np);
797 	of_node_put(out_args.np);
798 	if (!pdev)
799 		return ERR_PTR(-ENODEV);
800 
801 	tsa = platform_get_drvdata(pdev);
802 	if (!tsa) {
803 		platform_device_put(pdev);
804 		return ERR_PTR(-EPROBE_DEFER);
805 	}
806 
807 	if (out_args.args_count != 1) {
808 		platform_device_put(pdev);
809 		return ERR_PTR(-EINVAL);
810 	}
811 
812 	if (out_args.args[0] >= ARRAY_SIZE(tsa->serials)) {
813 		platform_device_put(pdev);
814 		return ERR_PTR(-EINVAL);
815 	}
816 
817 	tsa_serial = &tsa->serials[out_args.args[0]];
818 
819 	/*
820 	 * Be sure that the serial id matches the phandle arg.
821 	 * The tsa_serials table is indexed by serial ids. The serial id is set
822 	 * during the probe() call and needs to be coherent.
823 	 */
824 	if (WARN_ON(tsa_serial->id != out_args.args[0])) {
825 		platform_device_put(pdev);
826 		return ERR_PTR(-EINVAL);
827 	}
828 
829 	return tsa_serial;
830 }
831 EXPORT_SYMBOL(tsa_serial_get_byphandle);
832 
833 void tsa_serial_put(struct tsa_serial *tsa_serial)
834 {
835 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
836 
837 	put_device(tsa->dev);
838 }
839 EXPORT_SYMBOL(tsa_serial_put);
840 
841 static void devm_tsa_serial_release(struct device *dev, void *res)
842 {
843 	struct tsa_serial **tsa_serial = res;
844 
845 	tsa_serial_put(*tsa_serial);
846 }
847 
848 struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
849 						 struct device_node *np,
850 						 const char *phandle_name)
851 {
852 	struct tsa_serial *tsa_serial;
853 	struct tsa_serial **dr;
854 
855 	dr = devres_alloc(devm_tsa_serial_release, sizeof(*dr), GFP_KERNEL);
856 	if (!dr)
857 		return ERR_PTR(-ENOMEM);
858 
859 	tsa_serial = tsa_serial_get_byphandle(np, phandle_name);
860 	if (!IS_ERR(tsa_serial)) {
861 		*dr = tsa_serial;
862 		devres_add(dev, dr);
863 	} else {
864 		devres_free(dr);
865 	}
866 
867 	return tsa_serial;
868 }
869 EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
870 
871 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
872 MODULE_DESCRIPTION("CPM TSA driver");
873 MODULE_LICENSE("GPL");
874