xref: /linux/drivers/soc/fsl/qe/tsa.c (revision 442f3799fa387ab4c0c9b2d20490d582b96532d1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * TSA driver
4  *
5  * Copyright 2022 CS GROUP France
6  *
7  * Author: Herve Codina <herve.codina@bootlin.com>
8  */
9 
10 #include "tsa.h"
11 #include <dt-bindings/soc/cpm1-fsl,tsa.h>
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 
21 /* TSA SI RAM routing tables entry */
22 #define TSA_SIRAM_ENTRY_LAST		BIT(16)
23 #define TSA_SIRAM_ENTRY_BYTE		BIT(17)
24 #define TSA_SIRAM_ENTRY_CNT_MASK	GENMASK(21, 18)
25 #define TSA_SIRAM_ENTRY_CNT(x)		FIELD_PREP(TSA_SIRAM_ENTRY_CNT_MASK, x)
26 #define TSA_SIRAM_ENTRY_CSEL_MASK	GENMASK(24, 22)
27 #define TSA_SIRAM_ENTRY_CSEL_NU		FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x0)
28 #define TSA_SIRAM_ENTRY_CSEL_SCC2	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x2)
29 #define TSA_SIRAM_ENTRY_CSEL_SCC3	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x3)
30 #define TSA_SIRAM_ENTRY_CSEL_SCC4	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x4)
31 #define TSA_SIRAM_ENTRY_CSEL_SMC1	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x5)
32 #define TSA_SIRAM_ENTRY_CSEL_SMC2	FIELD_PREP_CONST(TSA_SIRAM_ENTRY_CSEL_MASK, 0x6)
33 
34 /* SI mode register (32 bits) */
35 #define TSA_SIMODE	0x00
36 #define   TSA_SIMODE_SMC2			BIT(31)
37 #define   TSA_SIMODE_SMC1			BIT(15)
38 #define   TSA_SIMODE_TDMA_MASK			GENMASK(11, 0)
39 #define   TSA_SIMODE_TDMA(x)			FIELD_PREP(TSA_SIMODE_TDMA_MASK, x)
40 #define   TSA_SIMODE_TDMB_MASK			GENMASK(27, 16)
41 #define   TSA_SIMODE_TDMB(x)			FIELD_PREP(TSA_SIMODE_TDMB_MASK, x)
42 #define     TSA_SIMODE_TDM_MASK			GENMASK(11, 0)
43 #define     TSA_SIMODE_TDM_SDM_MASK		GENMASK(11, 10)
44 #define       TSA_SIMODE_TDM_SDM_NORM		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0)
45 #define       TSA_SIMODE_TDM_SDM_ECHO		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1)
46 #define       TSA_SIMODE_TDM_SDM_INTL_LOOP	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2)
47 #define       TSA_SIMODE_TDM_SDM_LOOP_CTRL	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3)
48 #define     TSA_SIMODE_TDM_RFSD_MASK		GENMASK(9, 8)
49 #define     TSA_SIMODE_TDM_RFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x)
50 #define     TSA_SIMODE_TDM_DSC			BIT(7)
51 #define     TSA_SIMODE_TDM_CRT			BIT(6)
52 #define     TSA_SIMODE_TDM_STZ			BIT(5)
53 #define     TSA_SIMODE_TDM_CE			BIT(4)
54 #define     TSA_SIMODE_TDM_FE			BIT(3)
55 #define     TSA_SIMODE_TDM_GM			BIT(2)
56 #define     TSA_SIMODE_TDM_TFSD_MASK		GENMASK(1, 0)
57 #define     TSA_SIMODE_TDM_TFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x)
58 
59 /* SI global mode register (8 bits) */
60 #define TSA_SIGMR	0x04
61 #define TSA_SIGMR_ENB			BIT(3)
62 #define TSA_SIGMR_ENA			BIT(2)
63 #define TSA_SIGMR_RDM_MASK		GENMASK(1, 0)
64 #define   TSA_SIGMR_RDM_STATIC_TDMA	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x0)
65 #define   TSA_SIGMR_RDM_DYN_TDMA	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x1)
66 #define   TSA_SIGMR_RDM_STATIC_TDMAB	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x2)
67 #define   TSA_SIGMR_RDM_DYN_TDMAB	FIELD_PREP_CONST(TSA_SIGMR_RDM_MASK, 0x3)
68 
69 /* SI clock route register (32 bits) */
70 #define TSA_SICR	0x0C
71 #define   TSA_SICR_SCC2_MASK		GENMASK(15, 8)
72 #define   TSA_SICR_SCC2(x)		FIELD_PREP(TSA_SICR_SCC2_MASK, x)
73 #define   TSA_SICR_SCC3_MASK		GENMASK(23, 16)
74 #define   TSA_SICR_SCC3(x)		FIELD_PREP(TSA_SICR_SCC3_MASK, x)
75 #define   TSA_SICR_SCC4_MASK		GENMASK(31, 24)
76 #define   TSA_SICR_SCC4(x)		FIELD_PREP(TSA_SICR_SCC4_MASK, x)
77 #define     TSA_SICR_SCC_MASK		GENMASK(7, 0)
78 #define     TSA_SICR_SCC_GRX		BIT(7)
79 #define     TSA_SICR_SCC_SCX_TSA	BIT(6)
80 #define     TSA_SICR_SCC_RXCS_MASK	GENMASK(5, 3)
81 #define       TSA_SICR_SCC_RXCS_BRG1	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x0)
82 #define       TSA_SICR_SCC_RXCS_BRG2	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x1)
83 #define       TSA_SICR_SCC_RXCS_BRG3	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x2)
84 #define       TSA_SICR_SCC_RXCS_BRG4	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x3)
85 #define       TSA_SICR_SCC_RXCS_CLK15	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x4)
86 #define       TSA_SICR_SCC_RXCS_CLK26	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x5)
87 #define       TSA_SICR_SCC_RXCS_CLK37	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x6)
88 #define       TSA_SICR_SCC_RXCS_CLK48	FIELD_PREP_CONST(TSA_SICR_SCC_RXCS_MASK, 0x7)
89 #define     TSA_SICR_SCC_TXCS_MASK	GENMASK(2, 0)
90 #define       TSA_SICR_SCC_TXCS_BRG1	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x0)
91 #define       TSA_SICR_SCC_TXCS_BRG2	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x1)
92 #define       TSA_SICR_SCC_TXCS_BRG3	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x2)
93 #define       TSA_SICR_SCC_TXCS_BRG4	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x3)
94 #define       TSA_SICR_SCC_TXCS_CLK15	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x4)
95 #define       TSA_SICR_SCC_TXCS_CLK26	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x5)
96 #define       TSA_SICR_SCC_TXCS_CLK37	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x6)
97 #define       TSA_SICR_SCC_TXCS_CLK48	FIELD_PREP_CONST(TSA_SICR_SCC_TXCS_MASK, 0x7)
98 
99 struct tsa_entries_area {
100 	void __iomem *entries_start;
101 	void __iomem *entries_next;
102 	void __iomem *last_entry;
103 };
104 
105 struct tsa_tdm {
106 	bool is_enable;
107 	struct clk *l1rclk_clk;
108 	struct clk *l1rsync_clk;
109 	struct clk *l1tclk_clk;
110 	struct clk *l1tsync_clk;
111 	u32 simode_tdm;
112 };
113 
114 #define TSA_TDMA	0
115 #define TSA_TDMB	1
116 
117 struct tsa {
118 	struct device *dev;
119 	void __iomem *si_regs;
120 	void __iomem *si_ram;
121 	resource_size_t si_ram_sz;
122 	spinlock_t	lock; /* Lock for read/modify/write sequence */
123 	int tdms; /* TSA_TDMx ORed */
124 	struct tsa_tdm tdm[2]; /* TDMa and TDMb */
125 	struct tsa_serial {
126 		unsigned int id;
127 		struct tsa_serial_info info;
128 	} serials[6];
129 };
130 
131 static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
132 {
133 	/* The serials table is indexed by the serial id */
134 	return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
135 }
136 
137 static inline void tsa_write32(void __iomem *addr, u32 val)
138 {
139 	iowrite32be(val, addr);
140 }
141 
142 static inline void tsa_write8(void __iomem *addr, u8 val)
143 {
144 	iowrite8(val, addr);
145 }
146 
147 static inline u32 tsa_read32(void __iomem *addr)
148 {
149 	return ioread32be(addr);
150 }
151 
152 static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
153 {
154 	tsa_write32(addr, tsa_read32(addr) & ~clr);
155 }
156 
157 static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
158 {
159 	tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
160 }
161 
162 int tsa_serial_connect(struct tsa_serial *tsa_serial)
163 {
164 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
165 	unsigned long flags;
166 	u32 clear;
167 	u32 set;
168 
169 	switch (tsa_serial->id) {
170 	case FSL_CPM_TSA_SCC2:
171 		clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
172 		set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA);
173 		break;
174 	case FSL_CPM_TSA_SCC3:
175 		clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
176 		set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA);
177 		break;
178 	case FSL_CPM_TSA_SCC4:
179 		clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
180 		set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA);
181 		break;
182 	default:
183 		dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
184 		return -EINVAL;
185 	}
186 
187 	spin_lock_irqsave(&tsa->lock, flags);
188 	tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set);
189 	spin_unlock_irqrestore(&tsa->lock, flags);
190 
191 	return 0;
192 }
193 EXPORT_SYMBOL(tsa_serial_connect);
194 
195 int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
196 {
197 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
198 	unsigned long flags;
199 	u32 clear;
200 
201 	switch (tsa_serial->id) {
202 	case FSL_CPM_TSA_SCC2:
203 		clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
204 		break;
205 	case FSL_CPM_TSA_SCC3:
206 		clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
207 		break;
208 	case FSL_CPM_TSA_SCC4:
209 		clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
210 		break;
211 	default:
212 		dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
213 		return -EINVAL;
214 	}
215 
216 	spin_lock_irqsave(&tsa->lock, flags);
217 	tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0);
218 	spin_unlock_irqrestore(&tsa->lock, flags);
219 
220 	return 0;
221 }
222 EXPORT_SYMBOL(tsa_serial_disconnect);
223 
224 int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
225 {
226 	memcpy(info, &tsa_serial->info, sizeof(*info));
227 	return 0;
228 }
229 EXPORT_SYMBOL(tsa_serial_get_info);
230 
231 static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
232 				  u32 tdms, u32 tdm_id, bool is_rx)
233 {
234 	resource_size_t quarter;
235 	resource_size_t half;
236 
237 	quarter = tsa->si_ram_sz / 4;
238 	half = tsa->si_ram_sz / 2;
239 
240 	if (tdms == BIT(TSA_TDMA)) {
241 		/* Only TDMA */
242 		if (is_rx) {
243 			/* First half of si_ram */
244 			area->entries_start = tsa->si_ram;
245 			area->entries_next = area->entries_start + half;
246 			area->last_entry = NULL;
247 		} else {
248 			/* Second half of si_ram */
249 			area->entries_start = tsa->si_ram + half;
250 			area->entries_next = area->entries_start + half;
251 			area->last_entry = NULL;
252 		}
253 	} else {
254 		/* Only TDMB or both TDMs */
255 		if (tdm_id == TSA_TDMA) {
256 			if (is_rx) {
257 				/* First half of first half of si_ram */
258 				area->entries_start = tsa->si_ram;
259 				area->entries_next = area->entries_start + quarter;
260 				area->last_entry = NULL;
261 			} else {
262 				/* First half of second half of si_ram */
263 				area->entries_start = tsa->si_ram + (2 * quarter);
264 				area->entries_next = area->entries_start + quarter;
265 				area->last_entry = NULL;
266 			}
267 		} else {
268 			if (is_rx) {
269 				/* Second half of first half of si_ram */
270 				area->entries_start = tsa->si_ram + quarter;
271 				area->entries_next = area->entries_start + quarter;
272 				area->last_entry = NULL;
273 			} else {
274 				/* Second half of second half of si_ram */
275 				area->entries_start = tsa->si_ram + (3 * quarter);
276 				area->entries_next = area->entries_start + quarter;
277 				area->last_entry = NULL;
278 			}
279 		}
280 	}
281 }
282 
283 static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
284 {
285 	switch (serial_id) {
286 	case FSL_CPM_TSA_NU:	return "Not used";
287 	case FSL_CPM_TSA_SCC2:	return "SCC2";
288 	case FSL_CPM_TSA_SCC3:	return "SCC3";
289 	case FSL_CPM_TSA_SCC4:	return "SCC4";
290 	case FSL_CPM_TSA_SMC1:	return "SMC1";
291 	case FSL_CPM_TSA_SMC2:	return "SMC2";
292 	default:
293 		break;
294 	}
295 	return NULL;
296 }
297 
298 static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
299 {
300 	switch (serial_id) {
301 	case FSL_CPM_TSA_SCC2:	return TSA_SIRAM_ENTRY_CSEL_SCC2;
302 	case FSL_CPM_TSA_SCC3:	return TSA_SIRAM_ENTRY_CSEL_SCC3;
303 	case FSL_CPM_TSA_SCC4:	return TSA_SIRAM_ENTRY_CSEL_SCC4;
304 	case FSL_CPM_TSA_SMC1:	return TSA_SIRAM_ENTRY_CSEL_SMC1;
305 	case FSL_CPM_TSA_SMC2:	return TSA_SIRAM_ENTRY_CSEL_SMC2;
306 	default:
307 		break;
308 	}
309 	return TSA_SIRAM_ENTRY_CSEL_NU;
310 }
311 
312 static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
313 			 u32 count, u32 serial_id)
314 {
315 	void __iomem *addr;
316 	u32 left;
317 	u32 val;
318 	u32 cnt;
319 	u32 nb;
320 
321 	addr = area->last_entry ? area->last_entry + 4 : area->entries_start;
322 
323 	nb = DIV_ROUND_UP(count, 8);
324 	if ((addr + (nb * 4)) > area->entries_next) {
325 		dev_err(tsa->dev, "si ram area full\n");
326 		return -ENOSPC;
327 	}
328 
329 	if (area->last_entry) {
330 		/* Clear last flag */
331 		tsa_clrbits32(area->last_entry, TSA_SIRAM_ENTRY_LAST);
332 	}
333 
334 	left = count;
335 	while (left) {
336 		val = TSA_SIRAM_ENTRY_BYTE | tsa_serial_id2csel(tsa, serial_id);
337 
338 		if (left > 16) {
339 			cnt = 16;
340 		} else {
341 			cnt = left;
342 			val |= TSA_SIRAM_ENTRY_LAST;
343 			area->last_entry = addr;
344 		}
345 		val |= TSA_SIRAM_ENTRY_CNT(cnt - 1);
346 
347 		tsa_write32(addr, val);
348 		addr += 4;
349 		left -= cnt;
350 	}
351 
352 	return 0;
353 }
354 
355 static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
356 				  u32 tdms, u32 tdm_id, bool is_rx)
357 {
358 	struct tsa_entries_area area;
359 	const char *route_name;
360 	u32 serial_id;
361 	int len, i;
362 	u32 count;
363 	const char *serial_name;
364 	struct tsa_serial_info *serial_info;
365 	struct tsa_tdm *tdm;
366 	int ret;
367 	u32 ts;
368 
369 	route_name = is_rx ? "fsl,rx-ts-routes" : "fsl,tx-ts-routes";
370 
371 	len = of_property_count_u32_elems(tdm_np,  route_name);
372 	if (len < 0) {
373 		dev_err(tsa->dev, "%pOF: failed to read %s\n", tdm_np, route_name);
374 		return len;
375 	}
376 	if (len % 2 != 0) {
377 		dev_err(tsa->dev, "%pOF: wrong %s format\n", tdm_np, route_name);
378 		return -EINVAL;
379 	}
380 
381 	tsa_init_entries_area(tsa, &area, tdms, tdm_id, is_rx);
382 	ts = 0;
383 	for (i = 0; i < len; i += 2) {
384 		of_property_read_u32_index(tdm_np, route_name, i, &count);
385 		of_property_read_u32_index(tdm_np, route_name, i + 1, &serial_id);
386 
387 		if (serial_id >= ARRAY_SIZE(tsa->serials)) {
388 			dev_err(tsa->dev, "%pOF: invalid serial id (%u)\n",
389 				tdm_np, serial_id);
390 			return -EINVAL;
391 		}
392 
393 		serial_name = tsa_serial_id2name(tsa, serial_id);
394 		if (!serial_name) {
395 			dev_err(tsa->dev, "%pOF: unsupported serial id (%u)\n",
396 				tdm_np, serial_id);
397 			return -EINVAL;
398 		}
399 
400 		dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
401 			tdm_id, route_name, ts, ts + count - 1, serial_name);
402 		ts += count;
403 
404 		ret = tsa_add_entry(tsa, &area, count, serial_id);
405 		if (ret)
406 			return ret;
407 
408 		serial_info = &tsa->serials[serial_id].info;
409 		tdm = &tsa->tdm[tdm_id];
410 		if (is_rx) {
411 			serial_info->rx_fs_rate = clk_get_rate(tdm->l1rsync_clk);
412 			serial_info->rx_bit_rate = clk_get_rate(tdm->l1rclk_clk);
413 			serial_info->nb_rx_ts += count;
414 		} else {
415 			serial_info->tx_fs_rate = tdm->l1tsync_clk ?
416 				clk_get_rate(tdm->l1tsync_clk) :
417 				clk_get_rate(tdm->l1rsync_clk);
418 			serial_info->tx_bit_rate = tdm->l1tclk_clk ?
419 				clk_get_rate(tdm->l1tclk_clk) :
420 				clk_get_rate(tdm->l1rclk_clk);
421 			serial_info->nb_tx_ts += count;
422 		}
423 	}
424 	return 0;
425 }
426 
427 static inline int tsa_of_parse_tdm_rx_route(struct tsa *tsa,
428 					    struct device_node *tdm_np,
429 					    u32 tdms, u32 tdm_id)
430 {
431 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, true);
432 }
433 
434 static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
435 					    struct device_node *tdm_np,
436 					    u32 tdms, u32 tdm_id)
437 {
438 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, false);
439 }
440 
441 static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
442 {
443 	struct device_node *tdm_np;
444 	struct tsa_tdm *tdm;
445 	struct clk *clk;
446 	u32 tdm_id, val;
447 	int ret;
448 	int i;
449 
450 	tsa->tdms = 0;
451 	tsa->tdm[0].is_enable = false;
452 	tsa->tdm[1].is_enable = false;
453 
454 	for_each_available_child_of_node(np, tdm_np) {
455 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
456 		if (ret) {
457 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
458 			of_node_put(tdm_np);
459 			return ret;
460 		}
461 		switch (tdm_id) {
462 		case 0:
463 			tsa->tdms |= BIT(TSA_TDMA);
464 			break;
465 		case 1:
466 			tsa->tdms |= BIT(TSA_TDMB);
467 			break;
468 		default:
469 			dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
470 				tdm_id);
471 			of_node_put(tdm_np);
472 			return -EINVAL;
473 		}
474 	}
475 
476 	for_each_available_child_of_node(np, tdm_np) {
477 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
478 		if (ret) {
479 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
480 			of_node_put(tdm_np);
481 			return ret;
482 		}
483 
484 		tdm = &tsa->tdm[tdm_id];
485 		tdm->simode_tdm = TSA_SIMODE_TDM_SDM_NORM;
486 
487 		val = 0;
488 		ret = of_property_read_u32(tdm_np, "fsl,rx-frame-sync-delay-bits",
489 					   &val);
490 		if (ret && ret != -EINVAL) {
491 			dev_err(tsa->dev,
492 				"%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
493 				tdm_np);
494 			of_node_put(tdm_np);
495 			return ret;
496 		}
497 		if (val > 3) {
498 			dev_err(tsa->dev,
499 				"%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
500 				tdm_np, val);
501 			of_node_put(tdm_np);
502 			return -EINVAL;
503 		}
504 		tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
505 
506 		val = 0;
507 		ret = of_property_read_u32(tdm_np, "fsl,tx-frame-sync-delay-bits",
508 					   &val);
509 		if (ret && ret != -EINVAL) {
510 			dev_err(tsa->dev,
511 				"%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
512 				tdm_np);
513 			of_node_put(tdm_np);
514 			return ret;
515 		}
516 		if (val > 3) {
517 			dev_err(tsa->dev,
518 				"%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
519 				tdm_np, val);
520 			of_node_put(tdm_np);
521 			return -EINVAL;
522 		}
523 		tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
524 
525 		if (of_property_read_bool(tdm_np, "fsl,common-rxtx-pins"))
526 			tdm->simode_tdm |= TSA_SIMODE_TDM_CRT;
527 
528 		if (of_property_read_bool(tdm_np, "fsl,clock-falling-edge"))
529 			tdm->simode_tdm |= TSA_SIMODE_TDM_CE;
530 
531 		if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
532 			tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
533 
534 		if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
535 			tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
536 
537 		clk = of_clk_get_by_name(tdm_np, "l1rsync");
538 		if (IS_ERR(clk)) {
539 			ret = PTR_ERR(clk);
540 			of_node_put(tdm_np);
541 			goto err;
542 		}
543 		ret = clk_prepare_enable(clk);
544 		if (ret) {
545 			clk_put(clk);
546 			of_node_put(tdm_np);
547 			goto err;
548 		}
549 		tdm->l1rsync_clk = clk;
550 
551 		clk = of_clk_get_by_name(tdm_np, "l1rclk");
552 		if (IS_ERR(clk)) {
553 			ret = PTR_ERR(clk);
554 			of_node_put(tdm_np);
555 			goto err;
556 		}
557 		ret = clk_prepare_enable(clk);
558 		if (ret) {
559 			clk_put(clk);
560 			of_node_put(tdm_np);
561 			goto err;
562 		}
563 		tdm->l1rclk_clk = clk;
564 
565 		if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
566 			clk = of_clk_get_by_name(tdm_np, "l1tsync");
567 			if (IS_ERR(clk)) {
568 				ret = PTR_ERR(clk);
569 				of_node_put(tdm_np);
570 				goto err;
571 			}
572 			ret = clk_prepare_enable(clk);
573 			if (ret) {
574 				clk_put(clk);
575 				of_node_put(tdm_np);
576 				goto err;
577 			}
578 			tdm->l1tsync_clk = clk;
579 
580 			clk = of_clk_get_by_name(tdm_np, "l1tclk");
581 			if (IS_ERR(clk)) {
582 				ret = PTR_ERR(clk);
583 				of_node_put(tdm_np);
584 				goto err;
585 			}
586 			ret = clk_prepare_enable(clk);
587 			if (ret) {
588 				clk_put(clk);
589 				of_node_put(tdm_np);
590 				goto err;
591 			}
592 			tdm->l1tclk_clk = clk;
593 		}
594 
595 		ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
596 		if (ret) {
597 			of_node_put(tdm_np);
598 			goto err;
599 		}
600 
601 		ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
602 		if (ret) {
603 			of_node_put(tdm_np);
604 			goto err;
605 		}
606 
607 		tdm->is_enable = true;
608 	}
609 	return 0;
610 
611 err:
612 	for (i = 0; i < 2; i++) {
613 		if (tsa->tdm[i].l1rsync_clk) {
614 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
615 			clk_put(tsa->tdm[i].l1rsync_clk);
616 		}
617 		if (tsa->tdm[i].l1rclk_clk) {
618 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
619 			clk_put(tsa->tdm[i].l1rclk_clk);
620 		}
621 		if (tsa->tdm[i].l1tsync_clk) {
622 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
623 			clk_put(tsa->tdm[i].l1rsync_clk);
624 		}
625 		if (tsa->tdm[i].l1tclk_clk) {
626 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
627 			clk_put(tsa->tdm[i].l1rclk_clk);
628 		}
629 	}
630 	return ret;
631 }
632 
633 static void tsa_init_si_ram(struct tsa *tsa)
634 {
635 	resource_size_t i;
636 
637 	/* Fill all entries as the last one */
638 	for (i = 0; i < tsa->si_ram_sz; i += 4)
639 		tsa_write32(tsa->si_ram + i, TSA_SIRAM_ENTRY_LAST);
640 }
641 
642 static int tsa_probe(struct platform_device *pdev)
643 {
644 	struct device_node *np = pdev->dev.of_node;
645 	struct resource *res;
646 	struct tsa *tsa;
647 	unsigned int i;
648 	u32 val;
649 	int ret;
650 
651 	tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
652 	if (!tsa)
653 		return -ENOMEM;
654 
655 	tsa->dev = &pdev->dev;
656 
657 	for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
658 		tsa->serials[i].id = i;
659 
660 	spin_lock_init(&tsa->lock);
661 
662 	tsa->si_regs = devm_platform_ioremap_resource_byname(pdev, "si_regs");
663 	if (IS_ERR(tsa->si_regs))
664 		return PTR_ERR(tsa->si_regs);
665 
666 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "si_ram");
667 	if (!res) {
668 		dev_err(tsa->dev, "si_ram resource missing\n");
669 		return -EINVAL;
670 	}
671 	tsa->si_ram_sz = resource_size(res);
672 	tsa->si_ram = devm_ioremap_resource(&pdev->dev, res);
673 	if (IS_ERR(tsa->si_ram))
674 		return PTR_ERR(tsa->si_ram);
675 
676 	tsa_init_si_ram(tsa);
677 
678 	ret = tsa_of_parse_tdms(tsa, np);
679 	if (ret)
680 		return ret;
681 
682 	/* Set SIMODE */
683 	val = 0;
684 	if (tsa->tdm[0].is_enable)
685 		val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
686 	if (tsa->tdm[1].is_enable)
687 		val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
688 
689 	tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE,
690 			 TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) |
691 			 TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK),
692 			 val);
693 
694 	/* Set SIGMR */
695 	val = (tsa->tdms == BIT(TSA_TDMA)) ?
696 		TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB;
697 	if (tsa->tdms & BIT(TSA_TDMA))
698 		val |= TSA_SIGMR_ENA;
699 	if (tsa->tdms & BIT(TSA_TDMB))
700 		val |= TSA_SIGMR_ENB;
701 	tsa_write8(tsa->si_regs + TSA_SIGMR, val);
702 
703 	platform_set_drvdata(pdev, tsa);
704 
705 	return 0;
706 }
707 
708 static void tsa_remove(struct platform_device *pdev)
709 {
710 	struct tsa *tsa = platform_get_drvdata(pdev);
711 	int i;
712 
713 	for (i = 0; i < 2; i++) {
714 		if (tsa->tdm[i].l1rsync_clk) {
715 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
716 			clk_put(tsa->tdm[i].l1rsync_clk);
717 		}
718 		if (tsa->tdm[i].l1rclk_clk) {
719 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
720 			clk_put(tsa->tdm[i].l1rclk_clk);
721 		}
722 		if (tsa->tdm[i].l1tsync_clk) {
723 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
724 			clk_put(tsa->tdm[i].l1rsync_clk);
725 		}
726 		if (tsa->tdm[i].l1tclk_clk) {
727 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
728 			clk_put(tsa->tdm[i].l1rclk_clk);
729 		}
730 	}
731 }
732 
733 static const struct of_device_id tsa_id_table[] = {
734 	{ .compatible = "fsl,cpm1-tsa" },
735 	{} /* sentinel */
736 };
737 MODULE_DEVICE_TABLE(of, tsa_id_table);
738 
739 static struct platform_driver tsa_driver = {
740 	.driver = {
741 		.name = "fsl-tsa",
742 		.of_match_table = of_match_ptr(tsa_id_table),
743 	},
744 	.probe = tsa_probe,
745 	.remove_new = tsa_remove,
746 };
747 module_platform_driver(tsa_driver);
748 
749 struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
750 					    const char *phandle_name)
751 {
752 	struct of_phandle_args out_args;
753 	struct platform_device *pdev;
754 	struct tsa_serial *tsa_serial;
755 	struct tsa *tsa;
756 	int ret;
757 
758 	ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, &out_args);
759 	if (ret < 0)
760 		return ERR_PTR(ret);
761 
762 	if (!of_match_node(tsa_driver.driver.of_match_table, out_args.np)) {
763 		of_node_put(out_args.np);
764 		return ERR_PTR(-EINVAL);
765 	}
766 
767 	pdev = of_find_device_by_node(out_args.np);
768 	of_node_put(out_args.np);
769 	if (!pdev)
770 		return ERR_PTR(-ENODEV);
771 
772 	tsa = platform_get_drvdata(pdev);
773 	if (!tsa) {
774 		platform_device_put(pdev);
775 		return ERR_PTR(-EPROBE_DEFER);
776 	}
777 
778 	if (out_args.args_count != 1) {
779 		platform_device_put(pdev);
780 		return ERR_PTR(-EINVAL);
781 	}
782 
783 	if (out_args.args[0] >= ARRAY_SIZE(tsa->serials)) {
784 		platform_device_put(pdev);
785 		return ERR_PTR(-EINVAL);
786 	}
787 
788 	tsa_serial = &tsa->serials[out_args.args[0]];
789 
790 	/*
791 	 * Be sure that the serial id matches the phandle arg.
792 	 * The tsa_serials table is indexed by serial ids. The serial id is set
793 	 * during the probe() call and needs to be coherent.
794 	 */
795 	if (WARN_ON(tsa_serial->id != out_args.args[0])) {
796 		platform_device_put(pdev);
797 		return ERR_PTR(-EINVAL);
798 	}
799 
800 	return tsa_serial;
801 }
802 EXPORT_SYMBOL(tsa_serial_get_byphandle);
803 
804 void tsa_serial_put(struct tsa_serial *tsa_serial)
805 {
806 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
807 
808 	put_device(tsa->dev);
809 }
810 EXPORT_SYMBOL(tsa_serial_put);
811 
812 static void devm_tsa_serial_release(struct device *dev, void *res)
813 {
814 	struct tsa_serial **tsa_serial = res;
815 
816 	tsa_serial_put(*tsa_serial);
817 }
818 
819 struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
820 						 struct device_node *np,
821 						 const char *phandle_name)
822 {
823 	struct tsa_serial *tsa_serial;
824 	struct tsa_serial **dr;
825 
826 	dr = devres_alloc(devm_tsa_serial_release, sizeof(*dr), GFP_KERNEL);
827 	if (!dr)
828 		return ERR_PTR(-ENOMEM);
829 
830 	tsa_serial = tsa_serial_get_byphandle(np, phandle_name);
831 	if (!IS_ERR(tsa_serial)) {
832 		*dr = tsa_serial;
833 		devres_add(dev, dr);
834 	} else {
835 		devres_free(dr);
836 	}
837 
838 	return tsa_serial;
839 }
840 EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
841 
842 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
843 MODULE_DESCRIPTION("CPM TSA driver");
844 MODULE_LICENSE("GPL");
845