xref: /linux/drivers/soc/fsl/qe/tsa.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * TSA driver
4  *
5  * Copyright 2022 CS GROUP France
6  *
7  * Author: Herve Codina <herve.codina@bootlin.com>
8  */
9 
10 #include "tsa.h"
11 #include <dt-bindings/soc/cpm1-fsl,tsa.h>
12 #include <dt-bindings/soc/qe-fsl,tsa.h>
13 #include <linux/bitfield.h>
14 #include <linux/clk.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_platform.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <soc/fsl/qe/ucc.h>
22 
23 /* TSA SI RAM routing tables entry (CPM1) */
24 #define TSA_CPM1_SIRAM_ENTRY_LAST	BIT(16)
25 #define TSA_CPM1_SIRAM_ENTRY_BYTE	BIT(17)
26 #define TSA_CPM1_SIRAM_ENTRY_CNT_MASK	GENMASK(21, 18)
27 #define TSA_CPM1_SIRAM_ENTRY_CNT(x)	FIELD_PREP(TSA_CPM1_SIRAM_ENTRY_CNT_MASK, x)
28 #define TSA_CPM1_SIRAM_ENTRY_CSEL_MASK	GENMASK(24, 22)
29 #define TSA_CPM1_SIRAM_ENTRY_CSEL_NU	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x0)
30 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x2)
31 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x3)
32 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x4)
33 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x5)
34 #define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x6)
35 
36 /* TSA SI RAM routing tables entry (QE) */
37 #define TSA_QE_SIRAM_ENTRY_LAST		BIT(0)
38 #define TSA_QE_SIRAM_ENTRY_BYTE		BIT(1)
39 #define TSA_QE_SIRAM_ENTRY_CNT_MASK	GENMASK(4, 2)
40 #define TSA_QE_SIRAM_ENTRY_CNT(x)	FIELD_PREP(TSA_QE_SIRAM_ENTRY_CNT_MASK, x)
41 #define TSA_QE_SIRAM_ENTRY_CSEL_MASK	GENMASK(8, 5)
42 #define TSA_QE_SIRAM_ENTRY_CSEL_NU	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x0)
43 #define TSA_QE_SIRAM_ENTRY_CSEL_UCC5	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x1)
44 #define TSA_QE_SIRAM_ENTRY_CSEL_UCC1	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x9)
45 #define TSA_QE_SIRAM_ENTRY_CSEL_UCC2	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xa)
46 #define TSA_QE_SIRAM_ENTRY_CSEL_UCC3	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xb)
47 #define TSA_QE_SIRAM_ENTRY_CSEL_UCC4	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xc)
48 
49 /*
50  * SI mode register :
51  * - CPM1: 32bit register split in 2*16bit (16bit TDM)
52  * - QE: 4x16bit registers, one per TDM
53  */
54 #define TSA_CPM1_SIMODE		0x00
55 #define TSA_QE_SIAMR		0x00
56 #define TSA_QE_SIBMR		0x02
57 #define TSA_QE_SICMR		0x04
58 #define TSA_QE_SIDMR		0x06
59 #define   TSA_CPM1_SIMODE_SMC2			BIT(31)
60 #define   TSA_CPM1_SIMODE_SMC1			BIT(15)
61 #define   TSA_CPM1_SIMODE_TDMA_MASK		GENMASK(11, 0)
62 #define   TSA_CPM1_SIMODE_TDMA(x)		FIELD_PREP(TSA_CPM1_SIMODE_TDMA_MASK, x)
63 #define   TSA_CPM1_SIMODE_TDMB_MASK		GENMASK(27, 16)
64 #define   TSA_CPM1_SIMODE_TDMB(x)		FIELD_PREP(TSA_CPM1_SIMODE_TDMB_MASK, x)
65 #define     TSA_QE_SIMODE_TDM_SAD_MASK		GENMASK(15, 12)
66 #define     TSA_QE_SIMODE_TDM_SAD(x)		FIELD_PREP(TSA_QE_SIMODE_TDM_SAD_MASK, x)
67 #define     TSA_CPM1_SIMODE_TDM_MASK		GENMASK(11, 0)
68 #define     TSA_SIMODE_TDM_SDM_MASK		GENMASK(11, 10)
69 #define       TSA_SIMODE_TDM_SDM_NORM		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0)
70 #define       TSA_SIMODE_TDM_SDM_ECHO		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1)
71 #define       TSA_SIMODE_TDM_SDM_INTL_LOOP	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2)
72 #define       TSA_SIMODE_TDM_SDM_LOOP_CTRL	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3)
73 #define     TSA_SIMODE_TDM_RFSD_MASK		GENMASK(9, 8)
74 #define     TSA_SIMODE_TDM_RFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x)
75 #define     TSA_SIMODE_TDM_DSC			BIT(7)
76 #define     TSA_SIMODE_TDM_CRT			BIT(6)
77 #define     TSA_CPM1_SIMODE_TDM_STZ		BIT(5) /* bit 5: STZ in CPM1 */
78 #define     TSA_QE_SIMODE_TDM_SL		BIT(5) /* bit 5: SL in QE */
79 #define     TSA_SIMODE_TDM_CE			BIT(4)
80 #define     TSA_SIMODE_TDM_FE			BIT(3)
81 #define     TSA_SIMODE_TDM_GM			BIT(2)
82 #define     TSA_SIMODE_TDM_TFSD_MASK		GENMASK(1, 0)
83 #define     TSA_SIMODE_TDM_TFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x)
84 
85 /* CPM SI global mode register (8 bits) */
86 #define TSA_CPM1_SIGMR	0x04
87 #define TSA_CPM1_SIGMR_ENB			BIT(3)
88 #define TSA_CPM1_SIGMR_ENA			BIT(2)
89 #define TSA_CPM1_SIGMR_RDM_MASK			GENMASK(1, 0)
90 #define   TSA_CPM1_SIGMR_RDM_STATIC_TDMA	FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x0)
91 #define   TSA_CPM1_SIGMR_RDM_DYN_TDMA		FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x1)
92 #define   TSA_CPM1_SIGMR_RDM_STATIC_TDMAB	FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x2)
93 #define   TSA_CPM1_SIGMR_RDM_DYN_TDMAB		FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x3)
94 
95 /* QE SI global mode register high (8 bits) */
96 #define TSA_QE_SIGLMRH	0x08
97 #define TSA_QE_SIGLMRH_END	BIT(3)
98 #define TSA_QE_SIGLMRH_ENC	BIT(2)
99 #define TSA_QE_SIGLMRH_ENB	BIT(1)
100 #define TSA_QE_SIGLMRH_ENA	BIT(0)
101 
102 /* SI clock route register (32 bits) */
103 #define TSA_CPM1_SICR	0x0C
104 #define   TSA_CPM1_SICR_SCC2_MASK		GENMASK(15, 8)
105 #define   TSA_CPM1_SICR_SCC2(x)			FIELD_PREP(TSA_CPM1_SICR_SCC2_MASK, x)
106 #define   TSA_CPM1_SICR_SCC3_MASK		GENMASK(23, 16)
107 #define   TSA_CPM1_SICR_SCC3(x)			FIELD_PREP(TSA_CPM1_SICR_SCC3_MASK, x)
108 #define   TSA_CPM1_SICR_SCC4_MASK		GENMASK(31, 24)
109 #define   TSA_CPM1_SICR_SCC4(x)			FIELD_PREP(TSA_CPM1_SICR_SCC4_MASK, x)
110 #define     TSA_CPM1_SICR_SCC_MASK		GENMASK(7, 0)
111 #define     TSA_CPM1_SICR_SCC_GRX		BIT(7)
112 #define     TSA_CPM1_SICR_SCC_SCX_TSA		BIT(6)
113 #define     TSA_CPM1_SICR_SCC_RXCS_MASK		GENMASK(5, 3)
114 #define       TSA_CPM1_SICR_SCC_RXCS_BRG1	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x0)
115 #define       TSA_CPM1_SICR_SCC_RXCS_BRG2	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x1)
116 #define       TSA_CPM1_SICR_SCC_RXCS_BRG3	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x2)
117 #define       TSA_CPM1_SICR_SCC_RXCS_BRG4	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x3)
118 #define       TSA_CPM1_SICR_SCC_RXCS_CLK15	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x4)
119 #define       TSA_CPM1_SICR_SCC_RXCS_CLK26	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x5)
120 #define       TSA_CPM1_SICR_SCC_RXCS_CLK37	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x6)
121 #define       TSA_CPM1_SICR_SCC_RXCS_CLK48	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x7)
122 #define     TSA_CPM1_SICR_SCC_TXCS_MASK		GENMASK(2, 0)
123 #define       TSA_CPM1_SICR_SCC_TXCS_BRG1	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x0)
124 #define       TSA_CPM1_SICR_SCC_TXCS_BRG2	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x1)
125 #define       TSA_CPM1_SICR_SCC_TXCS_BRG3	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x2)
126 #define       TSA_CPM1_SICR_SCC_TXCS_BRG4	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x3)
127 #define       TSA_CPM1_SICR_SCC_TXCS_CLK15	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x4)
128 #define       TSA_CPM1_SICR_SCC_TXCS_CLK26	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x5)
129 #define       TSA_CPM1_SICR_SCC_TXCS_CLK37	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x6)
130 #define       TSA_CPM1_SICR_SCC_TXCS_CLK48	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x7)
131 
132 struct tsa_entries_area {
133 	void __iomem *entries_start;
134 	void __iomem *entries_next;
135 	void __iomem *last_entry;
136 };
137 
138 struct tsa_tdm {
139 	bool is_enable;
140 	struct clk *l1rclk_clk;
141 	struct clk *l1rsync_clk;
142 	struct clk *l1tclk_clk;
143 	struct clk *l1tsync_clk;
144 	u32 simode_tdm;
145 };
146 
147 #define TSA_TDMA	0
148 #define TSA_TDMB	1
149 #define TSA_TDMC	2 /* QE implementation only */
150 #define TSA_TDMD	3 /* QE implementation only */
151 
152 enum tsa_version {
153 	TSA_CPM1 = 1, /* Avoid 0 value */
154 	TSA_QE,
155 };
156 
157 struct tsa {
158 	struct device *dev;
159 	void __iomem *si_regs;
160 	void __iomem *si_ram;
161 	resource_size_t si_ram_sz;
162 	spinlock_t	lock; /* Lock for read/modify/write sequence */
163 	enum tsa_version version;
164 	int tdms; /* TSA_TDMx ORed */
165 #if IS_ENABLED(CONFIG_QUICC_ENGINE)
166 	struct tsa_tdm tdm[4]; /* TDMa, TDMb, TDMc and TDMd */
167 #else
168 	struct tsa_tdm tdm[2]; /* TDMa and TDMb */
169 #endif
170 	/* Same number of serials for CPM1 and QE:
171 	 * CPM1: NU, 3 SCCs and 2 SMCs
172 	 * QE: NU and 5 UCCs
173 	 */
174 	struct tsa_serial {
175 		unsigned int id;
176 		struct tsa_serial_info info;
177 	} serials[6];
178 };
179 
tsa_serial_get_tsa(struct tsa_serial * tsa_serial)180 static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
181 {
182 	/* The serials table is indexed by the serial id */
183 	return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
184 }
185 
tsa_write32(void __iomem * addr,u32 val)186 static inline void tsa_write32(void __iomem *addr, u32 val)
187 {
188 	iowrite32be(val, addr);
189 }
190 
tsa_write16(void __iomem * addr,u16 val)191 static inline void tsa_write16(void __iomem *addr, u16 val)
192 {
193 	iowrite16be(val, addr);
194 }
195 
tsa_write8(void __iomem * addr,u8 val)196 static inline void tsa_write8(void __iomem *addr, u8 val)
197 {
198 	iowrite8(val, addr);
199 }
200 
tsa_read32(void __iomem * addr)201 static inline u32 tsa_read32(void __iomem *addr)
202 {
203 	return ioread32be(addr);
204 }
205 
tsa_read16(void __iomem * addr)206 static inline u16 tsa_read16(void __iomem *addr)
207 {
208 	return ioread16be(addr);
209 }
210 
tsa_clrbits32(void __iomem * addr,u32 clr)211 static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
212 {
213 	tsa_write32(addr, tsa_read32(addr) & ~clr);
214 }
215 
tsa_clrbits16(void __iomem * addr,u16 clr)216 static inline void tsa_clrbits16(void __iomem *addr, u16 clr)
217 {
218 	tsa_write16(addr, tsa_read16(addr) & ~clr);
219 }
220 
tsa_clrsetbits32(void __iomem * addr,u32 clr,u32 set)221 static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
222 {
223 	tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
224 }
225 
tsa_is_qe(const struct tsa * tsa)226 static bool tsa_is_qe(const struct tsa *tsa)
227 {
228 	if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
229 		return tsa->version == TSA_QE;
230 
231 	return IS_ENABLED(CONFIG_QUICC_ENGINE);
232 }
233 
tsa_qe_serial_get_num(struct tsa_serial * tsa_serial)234 static int tsa_qe_serial_get_num(struct tsa_serial *tsa_serial)
235 {
236 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
237 
238 	switch (tsa_serial->id) {
239 	case FSL_QE_TSA_UCC1: return 0;
240 	case FSL_QE_TSA_UCC2: return 1;
241 	case FSL_QE_TSA_UCC3: return 2;
242 	case FSL_QE_TSA_UCC4: return 3;
243 	case FSL_QE_TSA_UCC5: return 4;
244 	default:
245 		break;
246 	}
247 
248 	dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
249 	return -EINVAL;
250 }
251 
tsa_serial_get_num(struct tsa_serial * tsa_serial)252 int tsa_serial_get_num(struct tsa_serial *tsa_serial)
253 {
254 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
255 
256 	/*
257 	 * There is no need to get the serial num out of the TSA driver in the
258 	 * CPM case.
259 	 * Further more, in CPM, we can have 2 types of serial SCCs and FCCs.
260 	 * What kind of numbering to use that can be global to both SCCs and
261 	 * FCCs ?
262 	 */
263 	return tsa_is_qe(tsa) ? tsa_qe_serial_get_num(tsa_serial) : -EOPNOTSUPP;
264 }
265 EXPORT_SYMBOL(tsa_serial_get_num);
266 
tsa_cpm1_serial_connect(struct tsa_serial * tsa_serial,bool connect)267 static int tsa_cpm1_serial_connect(struct tsa_serial *tsa_serial, bool connect)
268 {
269 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
270 	unsigned long flags;
271 	u32 clear;
272 	u32 set;
273 
274 	switch (tsa_serial->id) {
275 	case FSL_CPM_TSA_SCC2:
276 		clear = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_MASK);
277 		set = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_SCX_TSA);
278 		break;
279 	case FSL_CPM_TSA_SCC3:
280 		clear = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_MASK);
281 		set = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_SCX_TSA);
282 		break;
283 	case FSL_CPM_TSA_SCC4:
284 		clear = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_MASK);
285 		set = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_SCX_TSA);
286 		break;
287 	default:
288 		dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
289 		return -EINVAL;
290 	}
291 
292 	spin_lock_irqsave(&tsa->lock, flags);
293 	tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SICR, clear,
294 			 connect ? set : 0);
295 	spin_unlock_irqrestore(&tsa->lock, flags);
296 
297 	return 0;
298 }
299 
tsa_qe_serial_connect(struct tsa_serial * tsa_serial,bool connect)300 static int tsa_qe_serial_connect(struct tsa_serial *tsa_serial, bool connect)
301 {
302 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
303 	unsigned long flags;
304 	int ucc_num;
305 	int ret;
306 
307 	ucc_num = tsa_qe_serial_get_num(tsa_serial);
308 	if (ucc_num < 0)
309 		return ucc_num;
310 
311 	spin_lock_irqsave(&tsa->lock, flags);
312 	ret = ucc_set_qe_mux_tsa(ucc_num, connect);
313 	spin_unlock_irqrestore(&tsa->lock, flags);
314 	if (ret) {
315 		dev_err(tsa->dev, "Connect serial id %u to TSA failed (%d)\n",
316 			tsa_serial->id, ret);
317 		return ret;
318 	}
319 	return 0;
320 }
321 
tsa_serial_connect(struct tsa_serial * tsa_serial)322 int tsa_serial_connect(struct tsa_serial *tsa_serial)
323 {
324 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
325 
326 	return tsa_is_qe(tsa) ?
327 		tsa_qe_serial_connect(tsa_serial, true) :
328 		tsa_cpm1_serial_connect(tsa_serial, true);
329 }
330 EXPORT_SYMBOL(tsa_serial_connect);
331 
tsa_serial_disconnect(struct tsa_serial * tsa_serial)332 int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
333 {
334 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
335 
336 	return tsa_is_qe(tsa) ?
337 		tsa_qe_serial_connect(tsa_serial, false) :
338 		tsa_cpm1_serial_connect(tsa_serial, false);
339 }
340 EXPORT_SYMBOL(tsa_serial_disconnect);
341 
tsa_serial_get_info(struct tsa_serial * tsa_serial,struct tsa_serial_info * info)342 int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
343 {
344 	memcpy(info, &tsa_serial->info, sizeof(*info));
345 	return 0;
346 }
347 EXPORT_SYMBOL(tsa_serial_get_info);
348 
tsa_cpm1_init_entries_area(struct tsa * tsa,struct tsa_entries_area * area,u32 tdms,u32 tdm_id,bool is_rx)349 static void tsa_cpm1_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
350 				       u32 tdms, u32 tdm_id, bool is_rx)
351 {
352 	resource_size_t quarter;
353 	resource_size_t half;
354 
355 	quarter = tsa->si_ram_sz / 4;
356 	half = tsa->si_ram_sz / 2;
357 
358 	if (tdms == BIT(TSA_TDMA)) {
359 		/* Only TDMA */
360 		if (is_rx) {
361 			/* First half of si_ram */
362 			area->entries_start = tsa->si_ram;
363 			area->entries_next = area->entries_start + half;
364 			area->last_entry = NULL;
365 		} else {
366 			/* Second half of si_ram */
367 			area->entries_start = tsa->si_ram + half;
368 			area->entries_next = area->entries_start + half;
369 			area->last_entry = NULL;
370 		}
371 	} else {
372 		/* Only TDMB or both TDMs */
373 		if (tdm_id == TSA_TDMA) {
374 			if (is_rx) {
375 				/* First half of first half of si_ram */
376 				area->entries_start = tsa->si_ram;
377 				area->entries_next = area->entries_start + quarter;
378 				area->last_entry = NULL;
379 			} else {
380 				/* First half of second half of si_ram */
381 				area->entries_start = tsa->si_ram + (2 * quarter);
382 				area->entries_next = area->entries_start + quarter;
383 				area->last_entry = NULL;
384 			}
385 		} else {
386 			if (is_rx) {
387 				/* Second half of first half of si_ram */
388 				area->entries_start = tsa->si_ram + quarter;
389 				area->entries_next = area->entries_start + quarter;
390 				area->last_entry = NULL;
391 			} else {
392 				/* Second half of second half of si_ram */
393 				area->entries_start = tsa->si_ram + (3 * quarter);
394 				area->entries_next = area->entries_start + quarter;
395 				area->last_entry = NULL;
396 			}
397 		}
398 	}
399 }
400 
tsa_qe_init_entries_area(struct tsa * tsa,struct tsa_entries_area * area,u32 tdms,u32 tdm_id,bool is_rx)401 static void tsa_qe_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
402 				     u32 tdms, u32 tdm_id, bool is_rx)
403 {
404 	resource_size_t eighth;
405 	resource_size_t half;
406 
407 	eighth = tsa->si_ram_sz / 8;
408 	half = tsa->si_ram_sz / 2;
409 
410 	/*
411 	 * One half of the SI RAM used for Tx, the other one for Rx.
412 	 * In each half, 1/4 of the area is assigned to each TDM.
413 	 */
414 	if (is_rx) {
415 		/* Rx: Second half of si_ram */
416 		area->entries_start = tsa->si_ram + half + (eighth * tdm_id);
417 		area->entries_next = area->entries_start + eighth;
418 		area->last_entry = NULL;
419 	} else {
420 		/* Tx: First half of si_ram */
421 		area->entries_start = tsa->si_ram + (eighth * tdm_id);
422 		area->entries_next = area->entries_start + eighth;
423 		area->last_entry = NULL;
424 	}
425 }
426 
tsa_init_entries_area(struct tsa * tsa,struct tsa_entries_area * area,u32 tdms,u32 tdm_id,bool is_rx)427 static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
428 				  u32 tdms, u32 tdm_id, bool is_rx)
429 {
430 	if (tsa_is_qe(tsa))
431 		tsa_qe_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
432 	else
433 		tsa_cpm1_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
434 }
435 
tsa_cpm1_serial_id2name(struct tsa * tsa,u32 serial_id)436 static const char *tsa_cpm1_serial_id2name(struct tsa *tsa, u32 serial_id)
437 {
438 	switch (serial_id) {
439 	case FSL_CPM_TSA_NU:	return "Not used";
440 	case FSL_CPM_TSA_SCC2:	return "SCC2";
441 	case FSL_CPM_TSA_SCC3:	return "SCC3";
442 	case FSL_CPM_TSA_SCC4:	return "SCC4";
443 	case FSL_CPM_TSA_SMC1:	return "SMC1";
444 	case FSL_CPM_TSA_SMC2:	return "SMC2";
445 	default:
446 		break;
447 	}
448 	return NULL;
449 }
450 
tsa_qe_serial_id2name(struct tsa * tsa,u32 serial_id)451 static const char *tsa_qe_serial_id2name(struct tsa *tsa, u32 serial_id)
452 {
453 	switch (serial_id) {
454 	case FSL_QE_TSA_NU:	return "Not used";
455 	case FSL_QE_TSA_UCC1:	return "UCC1";
456 	case FSL_QE_TSA_UCC2:	return "UCC2";
457 	case FSL_QE_TSA_UCC3:	return "UCC3";
458 	case FSL_QE_TSA_UCC4:	return "UCC4";
459 	case FSL_QE_TSA_UCC5:	return "UCC5";
460 	default:
461 		break;
462 	}
463 	return NULL;
464 }
465 
tsa_serial_id2name(struct tsa * tsa,u32 serial_id)466 static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
467 {
468 	return tsa_is_qe(tsa) ?
469 		tsa_qe_serial_id2name(tsa, serial_id) :
470 		tsa_cpm1_serial_id2name(tsa, serial_id);
471 }
472 
tsa_cpm1_serial_id2csel(struct tsa * tsa,u32 serial_id)473 static u32 tsa_cpm1_serial_id2csel(struct tsa *tsa, u32 serial_id)
474 {
475 	switch (serial_id) {
476 	case FSL_CPM_TSA_SCC2:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2;
477 	case FSL_CPM_TSA_SCC3:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3;
478 	case FSL_CPM_TSA_SCC4:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4;
479 	case FSL_CPM_TSA_SMC1:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1;
480 	case FSL_CPM_TSA_SMC2:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2;
481 	default:
482 		break;
483 	}
484 	return TSA_CPM1_SIRAM_ENTRY_CSEL_NU;
485 }
486 
tsa_cpm1_add_entry(struct tsa * tsa,struct tsa_entries_area * area,u32 count,u32 serial_id)487 static int tsa_cpm1_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
488 			      u32 count, u32 serial_id)
489 {
490 	void __iomem *addr;
491 	u32 left;
492 	u32 val;
493 	u32 cnt;
494 	u32 nb;
495 
496 	addr = area->last_entry ? area->last_entry + 4 : area->entries_start;
497 
498 	nb = DIV_ROUND_UP(count, 8);
499 	if ((addr + (nb * 4)) > area->entries_next) {
500 		dev_err(tsa->dev, "si ram area full\n");
501 		return -ENOSPC;
502 	}
503 
504 	if (area->last_entry) {
505 		/* Clear last flag */
506 		tsa_clrbits32(area->last_entry, TSA_CPM1_SIRAM_ENTRY_LAST);
507 	}
508 
509 	left = count;
510 	while (left) {
511 		val = TSA_CPM1_SIRAM_ENTRY_BYTE | tsa_cpm1_serial_id2csel(tsa, serial_id);
512 
513 		if (left > 16) {
514 			cnt = 16;
515 		} else {
516 			cnt = left;
517 			val |= TSA_CPM1_SIRAM_ENTRY_LAST;
518 			area->last_entry = addr;
519 		}
520 		val |= TSA_CPM1_SIRAM_ENTRY_CNT(cnt - 1);
521 
522 		tsa_write32(addr, val);
523 		addr += 4;
524 		left -= cnt;
525 	}
526 
527 	return 0;
528 }
529 
tsa_qe_serial_id2csel(struct tsa * tsa,u32 serial_id)530 static u32 tsa_qe_serial_id2csel(struct tsa *tsa, u32 serial_id)
531 {
532 	switch (serial_id) {
533 	case FSL_QE_TSA_UCC1:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC1;
534 	case FSL_QE_TSA_UCC2:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC2;
535 	case FSL_QE_TSA_UCC3:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC3;
536 	case FSL_QE_TSA_UCC4:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC4;
537 	case FSL_QE_TSA_UCC5:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC5;
538 	default:
539 		break;
540 	}
541 	return TSA_QE_SIRAM_ENTRY_CSEL_NU;
542 }
543 
tsa_qe_add_entry(struct tsa * tsa,struct tsa_entries_area * area,u32 count,u32 serial_id)544 static int tsa_qe_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
545 			    u32 count, u32 serial_id)
546 {
547 	void __iomem *addr;
548 	u32 left;
549 	u32 val;
550 	u32 cnt;
551 	u32 nb;
552 
553 	addr = area->last_entry ? area->last_entry + 2 : area->entries_start;
554 
555 	nb = DIV_ROUND_UP(count, 8);
556 	if ((addr + (nb * 2)) > area->entries_next) {
557 		dev_err(tsa->dev, "si ram area full\n");
558 		return -ENOSPC;
559 	}
560 
561 	if (area->last_entry) {
562 		/* Clear last flag */
563 		tsa_clrbits16(area->last_entry, TSA_QE_SIRAM_ENTRY_LAST);
564 	}
565 
566 	left = count;
567 	while (left) {
568 		val = TSA_QE_SIRAM_ENTRY_BYTE | tsa_qe_serial_id2csel(tsa, serial_id);
569 
570 		if (left > 8) {
571 			cnt = 8;
572 		} else {
573 			cnt = left;
574 			val |= TSA_QE_SIRAM_ENTRY_LAST;
575 			area->last_entry = addr;
576 		}
577 		val |= TSA_QE_SIRAM_ENTRY_CNT(cnt - 1);
578 
579 		tsa_write16(addr, val);
580 		addr += 2;
581 		left -= cnt;
582 	}
583 
584 	return 0;
585 }
586 
tsa_add_entry(struct tsa * tsa,struct tsa_entries_area * area,u32 count,u32 serial_id)587 static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
588 			 u32 count, u32 serial_id)
589 {
590 	return tsa_is_qe(tsa) ?
591 		tsa_qe_add_entry(tsa, area, count, serial_id) :
592 		tsa_cpm1_add_entry(tsa, area, count, serial_id);
593 }
594 
tsa_of_parse_tdm_route(struct tsa * tsa,struct device_node * tdm_np,u32 tdms,u32 tdm_id,bool is_rx)595 static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
596 				  u32 tdms, u32 tdm_id, bool is_rx)
597 {
598 	struct tsa_entries_area area;
599 	const char *route_name;
600 	u32 serial_id;
601 	int len, i;
602 	u32 count;
603 	const char *serial_name;
604 	struct tsa_serial_info *serial_info;
605 	struct tsa_tdm *tdm;
606 	int ret;
607 	u32 ts;
608 
609 	route_name = is_rx ? "fsl,rx-ts-routes" : "fsl,tx-ts-routes";
610 
611 	len = of_property_count_u32_elems(tdm_np,  route_name);
612 	if (len < 0) {
613 		dev_err(tsa->dev, "%pOF: failed to read %s\n", tdm_np, route_name);
614 		return len;
615 	}
616 	if (len % 2 != 0) {
617 		dev_err(tsa->dev, "%pOF: wrong %s format\n", tdm_np, route_name);
618 		return -EINVAL;
619 	}
620 
621 	tsa_init_entries_area(tsa, &area, tdms, tdm_id, is_rx);
622 	ts = 0;
623 	for (i = 0; i < len; i += 2) {
624 		of_property_read_u32_index(tdm_np, route_name, i, &count);
625 		of_property_read_u32_index(tdm_np, route_name, i + 1, &serial_id);
626 
627 		if (serial_id >= ARRAY_SIZE(tsa->serials)) {
628 			dev_err(tsa->dev, "%pOF: invalid serial id (%u)\n",
629 				tdm_np, serial_id);
630 			return -EINVAL;
631 		}
632 
633 		serial_name = tsa_serial_id2name(tsa, serial_id);
634 		if (!serial_name) {
635 			dev_err(tsa->dev, "%pOF: unsupported serial id (%u)\n",
636 				tdm_np, serial_id);
637 			return -EINVAL;
638 		}
639 
640 		dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
641 			tdm_id, route_name, ts, ts + count - 1, serial_name);
642 		ts += count;
643 
644 		ret = tsa_add_entry(tsa, &area, count, serial_id);
645 		if (ret)
646 			return ret;
647 
648 		serial_info = &tsa->serials[serial_id].info;
649 		tdm = &tsa->tdm[tdm_id];
650 		if (is_rx) {
651 			serial_info->rx_fs_rate = clk_get_rate(tdm->l1rsync_clk);
652 			serial_info->rx_bit_rate = clk_get_rate(tdm->l1rclk_clk);
653 			serial_info->nb_rx_ts += count;
654 		} else {
655 			serial_info->tx_fs_rate = tdm->l1tsync_clk ?
656 				clk_get_rate(tdm->l1tsync_clk) :
657 				clk_get_rate(tdm->l1rsync_clk);
658 			serial_info->tx_bit_rate = tdm->l1tclk_clk ?
659 				clk_get_rate(tdm->l1tclk_clk) :
660 				clk_get_rate(tdm->l1rclk_clk);
661 			serial_info->nb_tx_ts += count;
662 		}
663 	}
664 	return 0;
665 }
666 
tsa_of_parse_tdm_rx_route(struct tsa * tsa,struct device_node * tdm_np,u32 tdms,u32 tdm_id)667 static inline int tsa_of_parse_tdm_rx_route(struct tsa *tsa,
668 					    struct device_node *tdm_np,
669 					    u32 tdms, u32 tdm_id)
670 {
671 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, true);
672 }
673 
tsa_of_parse_tdm_tx_route(struct tsa * tsa,struct device_node * tdm_np,u32 tdms,u32 tdm_id)674 static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
675 					    struct device_node *tdm_np,
676 					    u32 tdms, u32 tdm_id)
677 {
678 	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, false);
679 }
680 
tsa_of_parse_tdms(struct tsa * tsa,struct device_node * np)681 static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
682 {
683 	struct tsa_tdm *tdm;
684 	struct clk *clk;
685 	u32 tdm_id, val;
686 	int ret;
687 	int i;
688 
689 	tsa->tdms = 0;
690 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++)
691 		tsa->tdm[i].is_enable = false;
692 
693 	for_each_available_child_of_node_scoped(np, tdm_np) {
694 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
695 		if (ret) {
696 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
697 			return ret;
698 		}
699 		switch (tdm_id) {
700 		case 0:
701 			tsa->tdms |= BIT(TSA_TDMA);
702 			break;
703 		case 1:
704 			tsa->tdms |= BIT(TSA_TDMB);
705 			break;
706 		case 2:
707 			if (!tsa_is_qe(tsa))
708 				goto invalid_tdm; /* Not available on CPM1 */
709 			tsa->tdms |= BIT(TSA_TDMC);
710 			break;
711 		case 3:
712 			if (!tsa_is_qe(tsa))
713 				goto invalid_tdm;  /* Not available on CPM1 */
714 			tsa->tdms |= BIT(TSA_TDMD);
715 			break;
716 		default:
717 invalid_tdm:
718 			dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
719 				tdm_id);
720 			return -EINVAL;
721 		}
722 	}
723 
724 	for_each_available_child_of_node_scoped(np, tdm_np) {
725 		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
726 		if (ret) {
727 			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
728 			return ret;
729 		}
730 
731 		tdm = &tsa->tdm[tdm_id];
732 		tdm->simode_tdm = TSA_SIMODE_TDM_SDM_NORM;
733 
734 		val = 0;
735 		ret = of_property_read_u32(tdm_np, "fsl,rx-frame-sync-delay-bits",
736 					   &val);
737 		if (ret && ret != -EINVAL) {
738 			dev_err(tsa->dev,
739 				"%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
740 				tdm_np);
741 			return ret;
742 		}
743 		if (val > 3) {
744 			dev_err(tsa->dev,
745 				"%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
746 				tdm_np, val);
747 			return -EINVAL;
748 		}
749 		tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
750 
751 		val = 0;
752 		ret = of_property_read_u32(tdm_np, "fsl,tx-frame-sync-delay-bits",
753 					   &val);
754 		if (ret && ret != -EINVAL) {
755 			dev_err(tsa->dev,
756 				"%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
757 				tdm_np);
758 			return ret;
759 		}
760 		if (val > 3) {
761 			dev_err(tsa->dev,
762 				"%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
763 				tdm_np, val);
764 			return -EINVAL;
765 		}
766 		tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
767 
768 		if (of_property_read_bool(tdm_np, "fsl,common-rxtx-pins"))
769 			tdm->simode_tdm |= TSA_SIMODE_TDM_CRT;
770 
771 		if (of_property_read_bool(tdm_np, "fsl,clock-falling-edge"))
772 			tdm->simode_tdm |= TSA_SIMODE_TDM_CE;
773 
774 		if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
775 			tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
776 
777 		if (tsa_is_qe(tsa) &&
778 		    of_property_read_bool(tdm_np, "fsl,fsync-active-low"))
779 			tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SL;
780 
781 		if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
782 			tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
783 
784 		clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rsync" : "l1rsync");
785 		if (IS_ERR(clk)) {
786 			ret = PTR_ERR(clk);
787 			goto err;
788 		}
789 		ret = clk_prepare_enable(clk);
790 		if (ret) {
791 			clk_put(clk);
792 			goto err;
793 		}
794 		tdm->l1rsync_clk = clk;
795 
796 		clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rclk" : "l1rclk");
797 		if (IS_ERR(clk)) {
798 			ret = PTR_ERR(clk);
799 			goto err;
800 		}
801 		ret = clk_prepare_enable(clk);
802 		if (ret) {
803 			clk_put(clk);
804 			goto err;
805 		}
806 		tdm->l1rclk_clk = clk;
807 
808 		if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
809 			clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tsync" : "l1tsync");
810 			if (IS_ERR(clk)) {
811 				ret = PTR_ERR(clk);
812 				goto err;
813 			}
814 			ret = clk_prepare_enable(clk);
815 			if (ret) {
816 				clk_put(clk);
817 				goto err;
818 			}
819 			tdm->l1tsync_clk = clk;
820 
821 			clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tclk" : "l1tclk");
822 			if (IS_ERR(clk)) {
823 				ret = PTR_ERR(clk);
824 				goto err;
825 			}
826 			ret = clk_prepare_enable(clk);
827 			if (ret) {
828 				clk_put(clk);
829 				goto err;
830 			}
831 			tdm->l1tclk_clk = clk;
832 		}
833 
834 		if (tsa_is_qe(tsa)) {
835 			/*
836 			 * The starting address for TSA table must be set.
837 			 * 512 entries for Tx and 512 entries for Rx are
838 			 * available for 4 TDMs.
839 			 * We assign entries equally -> 128 Rx/Tx entries per
840 			 * TDM. In other words, 4 blocks of 32 entries per TDM.
841 			 */
842 			tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SAD(4 * tdm_id);
843 		}
844 
845 		ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
846 		if (ret)
847 			goto err;
848 
849 		ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
850 		if (ret)
851 			goto err;
852 
853 		tdm->is_enable = true;
854 	}
855 	return 0;
856 
857 err:
858 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
859 		if (tsa->tdm[i].l1rsync_clk) {
860 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
861 			clk_put(tsa->tdm[i].l1rsync_clk);
862 		}
863 		if (tsa->tdm[i].l1rclk_clk) {
864 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
865 			clk_put(tsa->tdm[i].l1rclk_clk);
866 		}
867 		if (tsa->tdm[i].l1tsync_clk) {
868 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
869 			clk_put(tsa->tdm[i].l1rsync_clk);
870 		}
871 		if (tsa->tdm[i].l1tclk_clk) {
872 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
873 			clk_put(tsa->tdm[i].l1rclk_clk);
874 		}
875 	}
876 	return ret;
877 }
878 
tsa_init_si_ram(struct tsa * tsa)879 static void tsa_init_si_ram(struct tsa *tsa)
880 {
881 	resource_size_t i;
882 
883 	/* Fill all entries as the last one */
884 	if (tsa_is_qe(tsa)) {
885 		for (i = 0; i < tsa->si_ram_sz; i += 2)
886 			tsa_write16(tsa->si_ram + i, TSA_QE_SIRAM_ENTRY_LAST);
887 	} else {
888 		for (i = 0; i < tsa->si_ram_sz; i += 4)
889 			tsa_write32(tsa->si_ram + i, TSA_CPM1_SIRAM_ENTRY_LAST);
890 	}
891 }
892 
tsa_cpm1_setup(struct tsa * tsa)893 static int tsa_cpm1_setup(struct tsa *tsa)
894 {
895 	u32 val;
896 
897 	/* Set SIMODE */
898 	val = 0;
899 	if (tsa->tdm[0].is_enable)
900 		val |= TSA_CPM1_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
901 	if (tsa->tdm[1].is_enable)
902 		val |= TSA_CPM1_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
903 
904 	tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SIMODE,
905 			 TSA_CPM1_SIMODE_TDMA(TSA_CPM1_SIMODE_TDM_MASK) |
906 			 TSA_CPM1_SIMODE_TDMB(TSA_CPM1_SIMODE_TDM_MASK),
907 			 val);
908 
909 	/* Set SIGMR */
910 	val = (tsa->tdms == BIT(TSA_TDMA)) ?
911 		TSA_CPM1_SIGMR_RDM_STATIC_TDMA : TSA_CPM1_SIGMR_RDM_STATIC_TDMAB;
912 	if (tsa->tdms & BIT(TSA_TDMA))
913 		val |= TSA_CPM1_SIGMR_ENA;
914 	if (tsa->tdms & BIT(TSA_TDMB))
915 		val |= TSA_CPM1_SIGMR_ENB;
916 	tsa_write8(tsa->si_regs + TSA_CPM1_SIGMR, val);
917 
918 	return 0;
919 }
920 
tsa_qe_setup(struct tsa * tsa)921 static int tsa_qe_setup(struct tsa *tsa)
922 {
923 	unsigned int sixmr;
924 	u8 siglmrh = 0;
925 	unsigned int i;
926 
927 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
928 		if (!tsa->tdm[i].is_enable)
929 			continue;
930 
931 		switch (i) {
932 		case 0:
933 			sixmr = TSA_QE_SIAMR;
934 			siglmrh |= TSA_QE_SIGLMRH_ENA;
935 			break;
936 		case 1:
937 			sixmr = TSA_QE_SIBMR;
938 			siglmrh |= TSA_QE_SIGLMRH_ENB;
939 			break;
940 		case 2:
941 			sixmr = TSA_QE_SICMR;
942 			siglmrh |= TSA_QE_SIGLMRH_ENC;
943 			break;
944 		case 3:
945 			sixmr = TSA_QE_SIDMR;
946 			siglmrh |= TSA_QE_SIGLMRH_END;
947 			break;
948 		default:
949 			return -EINVAL;
950 		}
951 
952 		/* Set SI mode register */
953 		tsa_write16(tsa->si_regs + sixmr, tsa->tdm[i].simode_tdm);
954 	}
955 
956 	/* Enable TDMs */
957 	tsa_write8(tsa->si_regs + TSA_QE_SIGLMRH, siglmrh);
958 
959 	return 0;
960 }
961 
tsa_setup(struct tsa * tsa)962 static int tsa_setup(struct tsa *tsa)
963 {
964 	return tsa_is_qe(tsa) ? tsa_qe_setup(tsa) : tsa_cpm1_setup(tsa);
965 }
966 
tsa_probe(struct platform_device * pdev)967 static int tsa_probe(struct platform_device *pdev)
968 {
969 	struct device_node *np = pdev->dev.of_node;
970 	struct resource *res;
971 	struct tsa *tsa;
972 	unsigned int i;
973 	int ret;
974 
975 	tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
976 	if (!tsa)
977 		return -ENOMEM;
978 
979 	tsa->dev = &pdev->dev;
980 	tsa->version = (enum tsa_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
981 	switch (tsa->version) {
982 	case TSA_CPM1:
983 		dev_info(tsa->dev, "CPM1 version\n");
984 		break;
985 	case TSA_QE:
986 		dev_info(tsa->dev, "QE version\n");
987 		break;
988 	default:
989 		dev_err(tsa->dev, "Unknown version (%d)\n", tsa->version);
990 		return -EINVAL;
991 	}
992 
993 	for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
994 		tsa->serials[i].id = i;
995 
996 	spin_lock_init(&tsa->lock);
997 
998 	tsa->si_regs = devm_platform_ioremap_resource_byname(pdev, "si_regs");
999 	if (IS_ERR(tsa->si_regs))
1000 		return PTR_ERR(tsa->si_regs);
1001 
1002 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "si_ram");
1003 	if (!res) {
1004 		dev_err(tsa->dev, "si_ram resource missing\n");
1005 		return -EINVAL;
1006 	}
1007 	tsa->si_ram_sz = resource_size(res);
1008 	tsa->si_ram = devm_ioremap_resource(&pdev->dev, res);
1009 	if (IS_ERR(tsa->si_ram))
1010 		return PTR_ERR(tsa->si_ram);
1011 
1012 	tsa_init_si_ram(tsa);
1013 
1014 	ret = tsa_of_parse_tdms(tsa, np);
1015 	if (ret)
1016 		return ret;
1017 
1018 	ret = tsa_setup(tsa);
1019 	if (ret)
1020 		return ret;
1021 
1022 	platform_set_drvdata(pdev, tsa);
1023 
1024 	return 0;
1025 }
1026 
tsa_remove(struct platform_device * pdev)1027 static void tsa_remove(struct platform_device *pdev)
1028 {
1029 	struct tsa *tsa = platform_get_drvdata(pdev);
1030 	int i;
1031 
1032 	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
1033 		if (tsa->tdm[i].l1rsync_clk) {
1034 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
1035 			clk_put(tsa->tdm[i].l1rsync_clk);
1036 		}
1037 		if (tsa->tdm[i].l1rclk_clk) {
1038 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
1039 			clk_put(tsa->tdm[i].l1rclk_clk);
1040 		}
1041 		if (tsa->tdm[i].l1tsync_clk) {
1042 			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
1043 			clk_put(tsa->tdm[i].l1rsync_clk);
1044 		}
1045 		if (tsa->tdm[i].l1tclk_clk) {
1046 			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
1047 			clk_put(tsa->tdm[i].l1rclk_clk);
1048 		}
1049 	}
1050 }
1051 
1052 static const struct of_device_id tsa_id_table[] = {
1053 #if IS_ENABLED(CONFIG_CPM1)
1054 	{ .compatible = "fsl,cpm1-tsa", .data = (void *)TSA_CPM1 },
1055 #endif
1056 #if IS_ENABLED(CONFIG_QUICC_ENGINE)
1057 	{ .compatible = "fsl,qe-tsa", .data = (void *)TSA_QE },
1058 #endif
1059 	{} /* sentinel */
1060 };
1061 MODULE_DEVICE_TABLE(of, tsa_id_table);
1062 
1063 static struct platform_driver tsa_driver = {
1064 	.driver = {
1065 		.name = "fsl-tsa",
1066 		.of_match_table = of_match_ptr(tsa_id_table),
1067 	},
1068 	.probe = tsa_probe,
1069 	.remove = tsa_remove,
1070 };
1071 module_platform_driver(tsa_driver);
1072 
tsa_serial_get_byphandle(struct device_node * np,const char * phandle_name)1073 struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
1074 					    const char *phandle_name)
1075 {
1076 	struct of_phandle_args out_args;
1077 	struct platform_device *pdev;
1078 	struct tsa_serial *tsa_serial;
1079 	struct tsa *tsa;
1080 	int ret;
1081 
1082 	ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, &out_args);
1083 	if (ret < 0)
1084 		return ERR_PTR(ret);
1085 
1086 	if (!of_match_node(tsa_driver.driver.of_match_table, out_args.np)) {
1087 		of_node_put(out_args.np);
1088 		return ERR_PTR(-EINVAL);
1089 	}
1090 
1091 	pdev = of_find_device_by_node(out_args.np);
1092 	of_node_put(out_args.np);
1093 	if (!pdev)
1094 		return ERR_PTR(-ENODEV);
1095 
1096 	tsa = platform_get_drvdata(pdev);
1097 	if (!tsa) {
1098 		platform_device_put(pdev);
1099 		return ERR_PTR(-EPROBE_DEFER);
1100 	}
1101 
1102 	if (out_args.args_count != 1) {
1103 		platform_device_put(pdev);
1104 		return ERR_PTR(-EINVAL);
1105 	}
1106 
1107 	if (out_args.args[0] >= ARRAY_SIZE(tsa->serials)) {
1108 		platform_device_put(pdev);
1109 		return ERR_PTR(-EINVAL);
1110 	}
1111 
1112 	tsa_serial = &tsa->serials[out_args.args[0]];
1113 
1114 	/*
1115 	 * Be sure that the serial id matches the phandle arg.
1116 	 * The tsa_serials table is indexed by serial ids. The serial id is set
1117 	 * during the probe() call and needs to be coherent.
1118 	 */
1119 	if (WARN_ON(tsa_serial->id != out_args.args[0])) {
1120 		platform_device_put(pdev);
1121 		return ERR_PTR(-EINVAL);
1122 	}
1123 
1124 	return tsa_serial;
1125 }
1126 EXPORT_SYMBOL(tsa_serial_get_byphandle);
1127 
tsa_serial_put(struct tsa_serial * tsa_serial)1128 void tsa_serial_put(struct tsa_serial *tsa_serial)
1129 {
1130 	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
1131 
1132 	put_device(tsa->dev);
1133 }
1134 EXPORT_SYMBOL(tsa_serial_put);
1135 
devm_tsa_serial_release(struct device * dev,void * res)1136 static void devm_tsa_serial_release(struct device *dev, void *res)
1137 {
1138 	struct tsa_serial **tsa_serial = res;
1139 
1140 	tsa_serial_put(*tsa_serial);
1141 }
1142 
devm_tsa_serial_get_byphandle(struct device * dev,struct device_node * np,const char * phandle_name)1143 struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
1144 						 struct device_node *np,
1145 						 const char *phandle_name)
1146 {
1147 	struct tsa_serial *tsa_serial;
1148 	struct tsa_serial **dr;
1149 
1150 	dr = devres_alloc(devm_tsa_serial_release, sizeof(*dr), GFP_KERNEL);
1151 	if (!dr)
1152 		return ERR_PTR(-ENOMEM);
1153 
1154 	tsa_serial = tsa_serial_get_byphandle(np, phandle_name);
1155 	if (!IS_ERR(tsa_serial)) {
1156 		*dr = tsa_serial;
1157 		devres_add(dev, dr);
1158 	} else {
1159 		devres_free(dr);
1160 	}
1161 
1162 	return tsa_serial;
1163 }
1164 EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
1165 
1166 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
1167 MODULE_DESCRIPTION("CPM/QE TSA driver");
1168 MODULE_LICENSE("GPL");
1169