xref: /linux/drivers/media/i2c/ds90ub960.c (revision 8e1bb4a41aa78d6105e59186af3dcd545fc66e70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for the Texas Instruments DS90UB960-Q1 video deserializer
4  *
5  * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
6  * Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
7  */
8 
9 /*
10  * (Possible) TODOs:
11  *
12  * - PM for serializer and remote peripherals. We need to manage:
13  *   - VPOC
14  *     - Power domain? Regulator? Somehow any remote device should be able to
15  *       cause the VPOC to be turned on.
16  *   - Link between the deserializer and the serializer
17  *     - Related to VPOC management. We probably always want to turn on the VPOC
18  *       and then enable the link.
19  *   - Serializer's services: i2c, gpios, power
20  *     - The serializer needs to resume before the remote peripherals can
21  *       e.g. use the i2c.
22  *     - How to handle gpios? Reserving a gpio essentially keeps the provider
23  *       (serializer) always powered on.
24  * - Do we need a new bus for the FPD-Link? At the moment the serializers
25  *   are children of the same i2c-adapter where the deserializer resides.
26  * - i2c-atr could be made embeddable instead of allocatable.
27  */
28 
29 #include <linux/bitops.h>
30 #include <linux/clk.h>
31 #include <linux/delay.h>
32 #include <linux/fwnode.h>
33 #include <linux/gpio/consumer.h>
34 #include <linux/i2c-atr.h>
35 #include <linux/i2c.h>
36 #include <linux/init.h>
37 #include <linux/interrupt.h>
38 #include <linux/kernel.h>
39 #include <linux/kthread.h>
40 #include <linux/module.h>
41 #include <linux/mutex.h>
42 #include <linux/property.h>
43 #include <linux/regmap.h>
44 #include <linux/regulator/consumer.h>
45 #include <linux/slab.h>
46 #include <linux/workqueue.h>
47 
48 #include <media/i2c/ds90ub9xx.h>
49 #include <media/mipi-csi2.h>
50 #include <media/v4l2-ctrls.h>
51 #include <media/v4l2-event.h>
52 #include <media/v4l2-fwnode.h>
53 #include <media/v4l2-subdev.h>
54 
55 #define MHZ(v) ((u32)((v) * 1000000U))
56 
57 #define UB960_POLL_TIME_MS	500
58 
59 #define UB960_MAX_RX_NPORTS	4
60 #define UB960_MAX_TX_NPORTS	2
61 #define UB960_MAX_NPORTS	(UB960_MAX_RX_NPORTS + UB960_MAX_TX_NPORTS)
62 
63 #define UB960_MAX_PORT_ALIASES	8
64 
65 #define UB960_NUM_BC_GPIOS		4
66 
67 /*
68  * Register map
69  *
70  * 0x00-0x32   Shared (UB960_SR)
71  * 0x33-0x3a   CSI-2 TX (per-port paged on DS90UB960, shared on 954) (UB960_TR)
72  * 0x4c        Shared (UB960_SR)
73  * 0x4d-0x7f   FPD-Link RX, per-port paged (UB960_RR)
74  * 0xb0-0xbf   Shared (UB960_SR)
75  * 0xd0-0xdf   FPD-Link RX, per-port paged (UB960_RR)
76  * 0xf0-0xf5   Shared (UB960_SR)
77  * 0xf8-0xfb   Shared (UB960_SR)
78  * All others  Reserved
79  *
80  * Register prefixes:
81  * UB960_SR_* = Shared register
82  * UB960_RR_* = FPD-Link RX, per-port paged register
83  * UB960_TR_* = CSI-2 TX, per-port paged register
84  * UB960_XR_* = Reserved register
85  * UB960_IR_* = Indirect register
86  */
87 
88 #define UB960_SR_I2C_DEV_ID			0x00
89 #define UB960_SR_RESET				0x01
90 #define UB960_SR_RESET_DIGITAL_RESET1		BIT(1)
91 #define UB960_SR_RESET_DIGITAL_RESET0		BIT(0)
92 #define UB960_SR_RESET_GPIO_LOCK_RELEASE	BIT(5)
93 
94 #define UB960_SR_GEN_CONFIG			0x02
95 #define UB960_SR_REV_MASK			0x03
96 #define UB960_SR_DEVICE_STS			0x04
97 #define UB960_SR_PAR_ERR_THOLD_HI		0x05
98 #define UB960_SR_PAR_ERR_THOLD_LO		0x06
99 #define UB960_SR_BCC_WDOG_CTL			0x07
100 #define UB960_SR_I2C_CTL1			0x08
101 #define UB960_SR_I2C_CTL2			0x09
102 #define UB960_SR_SCL_HIGH_TIME			0x0a
103 #define UB960_SR_SCL_LOW_TIME			0x0b
104 #define UB960_SR_RX_PORT_CTL			0x0c
105 #define UB960_SR_IO_CTL				0x0d
106 #define UB960_SR_GPIO_PIN_STS			0x0e
107 #define UB960_SR_GPIO_INPUT_CTL			0x0f
108 #define UB960_SR_GPIO_PIN_CTL(n)		(0x10 + (n)) /* n < UB960_NUM_GPIOS */
109 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SEL		5
110 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SRC_SHIFT	2
111 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_EN		BIT(0)
112 
113 #define UB960_SR_FS_CTL				0x18
114 #define UB960_SR_FS_HIGH_TIME_1			0x19
115 #define UB960_SR_FS_HIGH_TIME_0			0x1a
116 #define UB960_SR_FS_LOW_TIME_1			0x1b
117 #define UB960_SR_FS_LOW_TIME_0			0x1c
118 #define UB960_SR_MAX_FRM_HI			0x1d
119 #define UB960_SR_MAX_FRM_LO			0x1e
120 #define UB960_SR_CSI_PLL_CTL			0x1f
121 
122 #define UB960_SR_FWD_CTL1			0x20
123 #define UB960_SR_FWD_CTL1_PORT_DIS(n)		BIT((n) + 4)
124 
125 #define UB960_SR_FWD_CTL2			0x21
126 #define UB960_SR_FWD_STS			0x22
127 
128 #define UB960_SR_INTERRUPT_CTL			0x23
129 #define UB960_SR_INTERRUPT_CTL_INT_EN		BIT(7)
130 #define UB960_SR_INTERRUPT_CTL_IE_CSI_TX0	BIT(4)
131 #define UB960_SR_INTERRUPT_CTL_IE_RX(n)		BIT((n)) /* rxport[n] IRQ */
132 
133 #define UB960_SR_INTERRUPT_STS			0x24
134 #define UB960_SR_INTERRUPT_STS_INT		BIT(7)
135 #define UB960_SR_INTERRUPT_STS_IS_CSI_TX(n)	BIT(4 + (n)) /* txport[n] IRQ */
136 #define UB960_SR_INTERRUPT_STS_IS_RX(n)		BIT((n)) /* rxport[n] IRQ */
137 
138 #define UB960_SR_TS_CONFIG			0x25
139 #define UB960_SR_TS_CONTROL			0x26
140 #define UB960_SR_TS_LINE_HI			0x27
141 #define UB960_SR_TS_LINE_LO			0x28
142 #define UB960_SR_TS_STATUS			0x29
143 #define UB960_SR_TIMESTAMP_P0_HI		0x2a
144 #define UB960_SR_TIMESTAMP_P0_LO		0x2b
145 #define UB960_SR_TIMESTAMP_P1_HI		0x2c
146 #define UB960_SR_TIMESTAMP_P1_LO		0x2d
147 
148 #define UB960_SR_CSI_PORT_SEL			0x32
149 
150 #define UB960_TR_CSI_CTL			0x33
151 #define UB960_TR_CSI_CTL_CSI_CAL_EN		BIT(6)
152 #define UB960_TR_CSI_CTL_CSI_CONTS_CLOCK	BIT(1)
153 #define UB960_TR_CSI_CTL_CSI_ENABLE		BIT(0)
154 
155 #define UB960_TR_CSI_CTL2			0x34
156 #define UB960_TR_CSI_STS			0x35
157 #define UB960_TR_CSI_TX_ICR			0x36
158 
159 #define UB960_TR_CSI_TX_ISR			0x37
160 #define UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR	BIT(3)
161 #define UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR	BIT(1)
162 
163 #define UB960_TR_CSI_TEST_CTL			0x38
164 #define UB960_TR_CSI_TEST_PATT_HI		0x39
165 #define UB960_TR_CSI_TEST_PATT_LO		0x3a
166 
167 #define UB960_XR_SFILTER_CFG			0x41
168 #define UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT	4
169 #define UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT	0
170 
171 #define UB960_XR_AEQ_CTL1			0x42
172 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK	BIT(6)
173 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING	BIT(5)
174 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY	BIT(4)
175 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK        \
176 	(UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK |  \
177 	 UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING | \
178 	 UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY)
179 #define UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN	BIT(0)
180 
181 #define UB960_XR_AEQ_ERR_THOLD			0x43
182 
183 #define UB960_RR_BCC_ERR_CTL			0x46
184 #define UB960_RR_BCC_STATUS			0x47
185 #define UB960_RR_BCC_STATUS_SEQ_ERROR		BIT(5)
186 #define UB960_RR_BCC_STATUS_MASTER_ERR		BIT(4)
187 #define UB960_RR_BCC_STATUS_MASTER_TO		BIT(3)
188 #define UB960_RR_BCC_STATUS_SLAVE_ERR		BIT(2)
189 #define UB960_RR_BCC_STATUS_SLAVE_TO		BIT(1)
190 #define UB960_RR_BCC_STATUS_RESP_ERR		BIT(0)
191 #define UB960_RR_BCC_STATUS_ERROR_MASK                                    \
192 	(UB960_RR_BCC_STATUS_SEQ_ERROR | UB960_RR_BCC_STATUS_MASTER_ERR | \
193 	 UB960_RR_BCC_STATUS_MASTER_TO | UB960_RR_BCC_STATUS_SLAVE_ERR |  \
194 	 UB960_RR_BCC_STATUS_SLAVE_TO | UB960_RR_BCC_STATUS_RESP_ERR)
195 
196 #define UB960_RR_FPD3_CAP			0x4a
197 #define UB960_RR_RAW_EMBED_DTYPE		0x4b
198 #define UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT	6
199 
200 #define UB960_SR_FPD3_PORT_SEL			0x4c
201 
202 #define UB960_RR_RX_PORT_STS1			0x4d
203 #define UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR	BIT(5)
204 #define UB960_RR_RX_PORT_STS1_LOCK_STS_CHG	BIT(4)
205 #define UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR	BIT(3)
206 #define UB960_RR_RX_PORT_STS1_PARITY_ERROR	BIT(2)
207 #define UB960_RR_RX_PORT_STS1_PORT_PASS		BIT(1)
208 #define UB960_RR_RX_PORT_STS1_LOCK_STS		BIT(0)
209 #define UB960_RR_RX_PORT_STS1_ERROR_MASK       \
210 	(UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR | \
211 	 UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR | \
212 	 UB960_RR_RX_PORT_STS1_PARITY_ERROR)
213 
214 #define UB960_RR_RX_PORT_STS2			0x4e
215 #define UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE	BIT(7)
216 #define UB960_RR_RX_PORT_STS2_LINE_LEN_CHG	BIT(6)
217 #define UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR	BIT(5)
218 #define UB960_RR_RX_PORT_STS2_BUFFER_ERROR	BIT(4)
219 #define UB960_RR_RX_PORT_STS2_CSI_ERROR		BIT(3)
220 #define UB960_RR_RX_PORT_STS2_FREQ_STABLE	BIT(2)
221 #define UB960_RR_RX_PORT_STS2_CABLE_FAULT	BIT(1)
222 #define UB960_RR_RX_PORT_STS2_LINE_CNT_CHG	BIT(0)
223 #define UB960_RR_RX_PORT_STS2_ERROR_MASK       \
224 	UB960_RR_RX_PORT_STS2_BUFFER_ERROR
225 
226 #define UB960_RR_RX_FREQ_HIGH			0x4f
227 #define UB960_RR_RX_FREQ_LOW			0x50
228 #define UB960_RR_SENSOR_STS_0			0x51
229 #define UB960_RR_SENSOR_STS_1			0x52
230 #define UB960_RR_SENSOR_STS_2			0x53
231 #define UB960_RR_SENSOR_STS_3			0x54
232 #define UB960_RR_RX_PAR_ERR_HI			0x55
233 #define UB960_RR_RX_PAR_ERR_LO			0x56
234 #define UB960_RR_BIST_ERR_COUNT			0x57
235 
236 #define UB960_RR_BCC_CONFIG			0x58
237 #define UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH	BIT(6)
238 #define UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK	GENMASK(2, 0)
239 
240 #define UB960_RR_DATAPATH_CTL1			0x59
241 #define UB960_RR_DATAPATH_CTL2			0x5a
242 #define UB960_RR_SER_ID				0x5b
243 #define UB960_RR_SER_ALIAS_ID			0x5c
244 
245 /* For these two register sets: n < UB960_MAX_PORT_ALIASES */
246 #define UB960_RR_SLAVE_ID(n)			(0x5d + (n))
247 #define UB960_RR_SLAVE_ALIAS(n)			(0x65 + (n))
248 
249 #define UB960_RR_PORT_CONFIG			0x6d
250 #define UB960_RR_PORT_CONFIG_FPD3_MODE_MASK	GENMASK(1, 0)
251 
252 #define UB960_RR_BC_GPIO_CTL(n)			(0x6e + (n)) /* n < 2 */
253 #define UB960_RR_RAW10_ID			0x70
254 #define UB960_RR_RAW10_ID_VC_SHIFT		6
255 #define UB960_RR_RAW10_ID_DT_SHIFT		0
256 
257 #define UB960_RR_RAW12_ID			0x71
258 #define UB960_RR_CSI_VC_MAP			0x72
259 #define UB960_RR_CSI_VC_MAP_SHIFT(x)		((x) * 2)
260 
261 #define UB960_RR_LINE_COUNT_HI			0x73
262 #define UB960_RR_LINE_COUNT_LO			0x74
263 #define UB960_RR_LINE_LEN_1			0x75
264 #define UB960_RR_LINE_LEN_0			0x76
265 #define UB960_RR_FREQ_DET_CTL			0x77
266 #define UB960_RR_MAILBOX_1			0x78
267 #define UB960_RR_MAILBOX_2			0x79
268 
269 #define UB960_RR_CSI_RX_STS			0x7a
270 #define UB960_RR_CSI_RX_STS_LENGTH_ERR		BIT(3)
271 #define UB960_RR_CSI_RX_STS_CKSUM_ERR		BIT(2)
272 #define UB960_RR_CSI_RX_STS_ECC2_ERR		BIT(1)
273 #define UB960_RR_CSI_RX_STS_ECC1_ERR		BIT(0)
274 #define UB960_RR_CSI_RX_STS_ERROR_MASK                                    \
275 	(UB960_RR_CSI_RX_STS_LENGTH_ERR | UB960_RR_CSI_RX_STS_CKSUM_ERR | \
276 	 UB960_RR_CSI_RX_STS_ECC2_ERR | UB960_RR_CSI_RX_STS_ECC1_ERR)
277 
278 #define UB960_RR_CSI_ERR_COUNTER		0x7b
279 #define UB960_RR_PORT_CONFIG2			0x7c
280 #define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK GENMASK(7, 6)
281 #define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT 6
282 
283 #define UB960_RR_PORT_CONFIG2_LV_POL_LOW	BIT(1)
284 #define UB960_RR_PORT_CONFIG2_FV_POL_LOW	BIT(0)
285 
286 #define UB960_RR_PORT_PASS_CTL			0x7d
287 #define UB960_RR_SEN_INT_RISE_CTL		0x7e
288 #define UB960_RR_SEN_INT_FALL_CTL		0x7f
289 
290 #define UB960_SR_CSI_FRAME_COUNT_HI(n)		(0x90 + 8 * (n))
291 #define UB960_SR_CSI_FRAME_COUNT_LO(n)		(0x91 + 8 * (n))
292 #define UB960_SR_CSI_FRAME_ERR_COUNT_HI(n)	(0x92 + 8 * (n))
293 #define UB960_SR_CSI_FRAME_ERR_COUNT_LO(n)	(0x93 + 8 * (n))
294 #define UB960_SR_CSI_LINE_COUNT_HI(n)		(0x94 + 8 * (n))
295 #define UB960_SR_CSI_LINE_COUNT_LO(n)		(0x95 + 8 * (n))
296 #define UB960_SR_CSI_LINE_ERR_COUNT_HI(n)	(0x96 + 8 * (n))
297 #define UB960_SR_CSI_LINE_ERR_COUNT_LO(n)	(0x97 + 8 * (n))
298 
299 #define UB960_XR_REFCLK_FREQ			0xa5	/* UB960 */
300 
301 #define UB960_RR_VC_ID_MAP(x)			(0xa0 + (x)) /* UB9702 */
302 
303 #define UB960_SR_IND_ACC_CTL			0xb0
304 #define UB960_SR_IND_ACC_CTL_IA_AUTO_INC	BIT(1)
305 
306 #define UB960_SR_IND_ACC_ADDR			0xb1
307 #define UB960_SR_IND_ACC_DATA			0xb2
308 #define UB960_SR_BIST_CONTROL			0xb3
309 #define UB960_SR_MODE_IDX_STS			0xb8
310 #define UB960_SR_LINK_ERROR_COUNT		0xb9
311 #define UB960_SR_FPD3_ENC_CTL			0xba
312 #define UB960_SR_FV_MIN_TIME			0xbc
313 #define UB960_SR_GPIO_PD_CTL			0xbe
314 
315 #define UB960_SR_FPD_RATE_CFG			0xc2	/* UB9702 */
316 #define UB960_SR_CSI_PLL_DIV			0xc9	/* UB9702 */
317 
318 #define UB960_RR_PORT_DEBUG			0xd0
319 #define UB960_RR_AEQ_CTL2			0xd2
320 #define UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR		BIT(2)
321 
322 #define UB960_RR_AEQ_STATUS			0xd3
323 #define UB960_RR_AEQ_STATUS_STATUS_2		GENMASK(5, 3)
324 #define UB960_RR_AEQ_STATUS_STATUS_1		GENMASK(2, 0)
325 
326 #define UB960_RR_AEQ_BYPASS			0xd4
327 #define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT	5
328 #define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK	GENMASK(7, 5)
329 #define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT	1
330 #define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK	GENMASK(3, 1)
331 #define UB960_RR_AEQ_BYPASS_ENABLE			BIT(0)
332 
333 #define UB960_RR_AEQ_MIN_MAX			0xd5
334 #define UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT	4
335 #define UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT	0
336 
337 #define UB960_RR_SFILTER_STS_0			0xd6
338 #define UB960_RR_SFILTER_STS_1			0xd7
339 #define UB960_RR_PORT_ICR_HI			0xd8
340 #define UB960_RR_PORT_ICR_LO			0xd9
341 #define UB960_RR_PORT_ISR_HI			0xda
342 #define UB960_RR_PORT_ISR_LO			0xdb
343 #define UB960_RR_FC_GPIO_STS			0xdc
344 #define UB960_RR_FC_GPIO_ICR			0xdd
345 #define UB960_RR_SEN_INT_RISE_STS		0xde
346 #define UB960_RR_SEN_INT_FALL_STS		0xdf
347 
348 #define UB960_RR_CHANNEL_MODE			0xe4	/* UB9702 */
349 
350 #define UB960_SR_FPD3_RX_ID(n)			(0xf0 + (n))
351 #define UB960_SR_FPD3_RX_ID_LEN			6
352 
353 #define UB960_SR_I2C_RX_ID(n)			(0xf8 + (n)) /* < UB960_FPD_RX_NPORTS */
354 
355 /* Indirect register blocks */
356 #define UB960_IND_TARGET_PAT_GEN		0x00
357 #define UB960_IND_TARGET_RX_ANA(n)		(0x01 + (n))
358 #define UB960_IND_TARGET_CSI_CSIPLL_REG_1	0x92	/* UB9702 */
359 #define UB960_IND_TARGET_CSI_ANA		0x07
360 
361 /* UB960_IR_PGEN_*: Indirect Registers for Test Pattern Generator */
362 
363 #define UB960_IR_PGEN_CTL			0x01
364 #define UB960_IR_PGEN_CTL_PGEN_ENABLE		BIT(0)
365 
366 #define UB960_IR_PGEN_CFG			0x02
367 #define UB960_IR_PGEN_CSI_DI			0x03
368 #define UB960_IR_PGEN_LINE_SIZE1		0x04
369 #define UB960_IR_PGEN_LINE_SIZE0		0x05
370 #define UB960_IR_PGEN_BAR_SIZE1			0x06
371 #define UB960_IR_PGEN_BAR_SIZE0			0x07
372 #define UB960_IR_PGEN_ACT_LPF1			0x08
373 #define UB960_IR_PGEN_ACT_LPF0			0x09
374 #define UB960_IR_PGEN_TOT_LPF1			0x0a
375 #define UB960_IR_PGEN_TOT_LPF0			0x0b
376 #define UB960_IR_PGEN_LINE_PD1			0x0c
377 #define UB960_IR_PGEN_LINE_PD0			0x0d
378 #define UB960_IR_PGEN_VBP			0x0e
379 #define UB960_IR_PGEN_VFP			0x0f
380 #define UB960_IR_PGEN_COLOR(n)			(0x10 + (n)) /* n < 15 */
381 
382 #define UB960_IR_RX_ANA_STROBE_SET_CLK		0x08
383 #define UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY	BIT(3)
384 #define UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK	GENMASK(2, 0)
385 
386 #define UB960_IR_RX_ANA_STROBE_SET_DATA		0x09
387 #define UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY	BIT(3)
388 #define UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK	GENMASK(2, 0)
389 
390 /* EQ related */
391 
392 #define UB960_MIN_AEQ_STROBE_POS -7
393 #define UB960_MAX_AEQ_STROBE_POS  7
394 
395 #define UB960_MANUAL_STROBE_EXTRA_DELAY 6
396 
397 #define UB960_MIN_MANUAL_STROBE_POS -(7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
398 #define UB960_MAX_MANUAL_STROBE_POS  (7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
399 #define UB960_NUM_MANUAL_STROBE_POS  (UB960_MAX_MANUAL_STROBE_POS - UB960_MIN_MANUAL_STROBE_POS + 1)
400 
401 #define UB960_MIN_EQ_LEVEL  0
402 #define UB960_MAX_EQ_LEVEL  14
403 #define UB960_NUM_EQ_LEVELS (UB960_MAX_EQ_LEVEL - UB960_MIN_EQ_LEVEL + 1)
404 
405 struct ub960_hw_data {
406 	const char *model;
407 	u8 num_rxports;
408 	u8 num_txports;
409 	bool is_ub9702;
410 	bool is_fpdlink4;
411 };
412 
413 enum ub960_rxport_mode {
414 	RXPORT_MODE_RAW10 = 0,
415 	RXPORT_MODE_RAW12_HF = 1,
416 	RXPORT_MODE_RAW12_LF = 2,
417 	RXPORT_MODE_CSI2_SYNC = 3,
418 	RXPORT_MODE_CSI2_NONSYNC = 4,
419 	RXPORT_MODE_LAST = RXPORT_MODE_CSI2_NONSYNC,
420 };
421 
422 enum ub960_rxport_cdr {
423 	RXPORT_CDR_FPD3 = 0,
424 	RXPORT_CDR_FPD4 = 1,
425 	RXPORT_CDR_LAST = RXPORT_CDR_FPD4,
426 };
427 
428 struct ub960_rxport {
429 	struct ub960_data      *priv;
430 	u8                      nport;	/* RX port number, and index in priv->rxport[] */
431 
432 	struct {
433 		struct v4l2_subdev *sd;
434 		u16 pad;
435 		struct fwnode_handle *ep_fwnode;
436 	} source;
437 
438 	/* Serializer */
439 	struct {
440 		struct fwnode_handle *fwnode;
441 		struct i2c_client *client;
442 		unsigned short alias; /* I2C alias (lower 7 bits) */
443 		struct ds90ub9xx_platform_data pdata;
444 	} ser;
445 
446 	enum ub960_rxport_mode  rx_mode;
447 	enum ub960_rxport_cdr	cdr_mode;
448 
449 	u8			lv_fv_pol;	/* LV and FV polarities */
450 
451 	struct regulator	*vpoc;
452 
453 	/* EQ settings */
454 	struct {
455 		bool manual_eq;
456 
457 		s8 strobe_pos;
458 
459 		union {
460 			struct {
461 				u8 eq_level_min;
462 				u8 eq_level_max;
463 			} aeq;
464 
465 			struct {
466 				u8 eq_level;
467 			} manual;
468 		};
469 	} eq;
470 
471 	const struct i2c_client *aliased_clients[UB960_MAX_PORT_ALIASES];
472 };
473 
474 struct ub960_asd {
475 	struct v4l2_async_connection base;
476 	struct ub960_rxport *rxport;
477 };
478 
479 static inline struct ub960_asd *to_ub960_asd(struct v4l2_async_connection *asd)
480 {
481 	return container_of(asd, struct ub960_asd, base);
482 }
483 
484 struct ub960_txport {
485 	struct ub960_data      *priv;
486 	u8                      nport;	/* TX port number, and index in priv->txport[] */
487 
488 	u32 num_data_lanes;
489 	bool non_continous_clk;
490 };
491 
492 struct ub960_data {
493 	const struct ub960_hw_data	*hw_data;
494 	struct i2c_client	*client; /* for shared local registers */
495 	struct regmap		*regmap;
496 
497 	/* lock for register access */
498 	struct mutex		reg_lock;
499 
500 	struct clk		*refclk;
501 
502 	struct regulator	*vddio;
503 
504 	struct gpio_desc	*pd_gpio;
505 	struct delayed_work	poll_work;
506 	struct ub960_rxport	*rxports[UB960_MAX_RX_NPORTS];
507 	struct ub960_txport	*txports[UB960_MAX_TX_NPORTS];
508 
509 	struct v4l2_subdev	sd;
510 	struct media_pad	pads[UB960_MAX_NPORTS];
511 
512 	struct v4l2_ctrl_handler   ctrl_handler;
513 	struct v4l2_async_notifier notifier;
514 
515 	u32 tx_data_rate;		/* Nominal data rate (Gb/s) */
516 	s64 tx_link_freq[1];
517 
518 	struct i2c_atr *atr;
519 
520 	struct {
521 		u8 rxport;
522 		u8 txport;
523 		u8 indirect_target;
524 	} reg_current;
525 
526 	bool streaming;
527 
528 	u8 stored_fwd_ctl;
529 
530 	u64 stream_enable_mask[UB960_MAX_NPORTS];
531 
532 	/* These are common to all ports */
533 	struct {
534 		bool manual;
535 
536 		s8 min;
537 		s8 max;
538 	} strobe;
539 };
540 
541 static inline struct ub960_data *sd_to_ub960(struct v4l2_subdev *sd)
542 {
543 	return container_of(sd, struct ub960_data, sd);
544 }
545 
546 static inline bool ub960_pad_is_sink(struct ub960_data *priv, u32 pad)
547 {
548 	return pad < priv->hw_data->num_rxports;
549 }
550 
551 static inline bool ub960_pad_is_source(struct ub960_data *priv, u32 pad)
552 {
553 	return pad >= priv->hw_data->num_rxports;
554 }
555 
556 static inline unsigned int ub960_pad_to_port(struct ub960_data *priv, u32 pad)
557 {
558 	if (ub960_pad_is_sink(priv, pad))
559 		return pad;
560 	else
561 		return pad - priv->hw_data->num_rxports;
562 }
563 
564 struct ub960_format_info {
565 	u32 code;
566 	u32 bpp;
567 	u8 datatype;
568 	bool meta;
569 };
570 
571 static const struct ub960_format_info ub960_formats[] = {
572 	{ .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
573 	{ .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
574 	{ .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
575 	{ .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
576 
577 	{ .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
578 	{ .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
579 	{ .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
580 	{ .code = MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
581 };
582 
583 static const struct ub960_format_info *ub960_find_format(u32 code)
584 {
585 	unsigned int i;
586 
587 	for (i = 0; i < ARRAY_SIZE(ub960_formats); i++) {
588 		if (ub960_formats[i].code == code)
589 			return &ub960_formats[i];
590 	}
591 
592 	return NULL;
593 }
594 
595 /* -----------------------------------------------------------------------------
596  * Basic device access
597  */
598 
599 static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val)
600 {
601 	struct device *dev = &priv->client->dev;
602 	unsigned int v;
603 	int ret;
604 
605 	mutex_lock(&priv->reg_lock);
606 
607 	ret = regmap_read(priv->regmap, reg, &v);
608 	if (ret) {
609 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
610 			__func__, reg, ret);
611 		goto out_unlock;
612 	}
613 
614 	*val = v;
615 
616 out_unlock:
617 	mutex_unlock(&priv->reg_lock);
618 
619 	return ret;
620 }
621 
622 static int ub960_write(struct ub960_data *priv, u8 reg, u8 val)
623 {
624 	struct device *dev = &priv->client->dev;
625 	int ret;
626 
627 	mutex_lock(&priv->reg_lock);
628 
629 	ret = regmap_write(priv->regmap, reg, val);
630 	if (ret)
631 		dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
632 			__func__, reg, ret);
633 
634 	mutex_unlock(&priv->reg_lock);
635 
636 	return ret;
637 }
638 
639 static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val)
640 {
641 	struct device *dev = &priv->client->dev;
642 	int ret;
643 
644 	mutex_lock(&priv->reg_lock);
645 
646 	ret = regmap_update_bits(priv->regmap, reg, mask, val);
647 	if (ret)
648 		dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
649 			__func__, reg, ret);
650 
651 	mutex_unlock(&priv->reg_lock);
652 
653 	return ret;
654 }
655 
656 static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val)
657 {
658 	struct device *dev = &priv->client->dev;
659 	__be16 __v;
660 	int ret;
661 
662 	mutex_lock(&priv->reg_lock);
663 
664 	ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
665 	if (ret) {
666 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
667 			__func__, reg, ret);
668 		goto out_unlock;
669 	}
670 
671 	*val = be16_to_cpu(__v);
672 
673 out_unlock:
674 	mutex_unlock(&priv->reg_lock);
675 
676 	return ret;
677 }
678 
679 static int ub960_rxport_select(struct ub960_data *priv, u8 nport)
680 {
681 	struct device *dev = &priv->client->dev;
682 	int ret;
683 
684 	lockdep_assert_held(&priv->reg_lock);
685 
686 	if (priv->reg_current.rxport == nport)
687 		return 0;
688 
689 	ret = regmap_write(priv->regmap, UB960_SR_FPD3_PORT_SEL,
690 			   (nport << 4) | BIT(nport));
691 	if (ret) {
692 		dev_err(dev, "%s: cannot select rxport %d (%d)!\n", __func__,
693 			nport, ret);
694 		return ret;
695 	}
696 
697 	priv->reg_current.rxport = nport;
698 
699 	return 0;
700 }
701 
702 static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
703 {
704 	struct device *dev = &priv->client->dev;
705 	unsigned int v;
706 	int ret;
707 
708 	mutex_lock(&priv->reg_lock);
709 
710 	ret = ub960_rxport_select(priv, nport);
711 	if (ret)
712 		goto out_unlock;
713 
714 	ret = regmap_read(priv->regmap, reg, &v);
715 	if (ret) {
716 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
717 			__func__, reg, ret);
718 		goto out_unlock;
719 	}
720 
721 	*val = v;
722 
723 out_unlock:
724 	mutex_unlock(&priv->reg_lock);
725 
726 	return ret;
727 }
728 
729 static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
730 {
731 	struct device *dev = &priv->client->dev;
732 	int ret;
733 
734 	mutex_lock(&priv->reg_lock);
735 
736 	ret = ub960_rxport_select(priv, nport);
737 	if (ret)
738 		goto out_unlock;
739 
740 	ret = regmap_write(priv->regmap, reg, val);
741 	if (ret)
742 		dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
743 			__func__, reg, ret);
744 
745 out_unlock:
746 	mutex_unlock(&priv->reg_lock);
747 
748 	return ret;
749 }
750 
751 static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
752 				    u8 mask, u8 val)
753 {
754 	struct device *dev = &priv->client->dev;
755 	int ret;
756 
757 	mutex_lock(&priv->reg_lock);
758 
759 	ret = ub960_rxport_select(priv, nport);
760 	if (ret)
761 		goto out_unlock;
762 
763 	ret = regmap_update_bits(priv->regmap, reg, mask, val);
764 	if (ret)
765 		dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
766 			__func__, reg, ret);
767 
768 out_unlock:
769 	mutex_unlock(&priv->reg_lock);
770 
771 	return ret;
772 }
773 
774 static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg,
775 			       u16 *val)
776 {
777 	struct device *dev = &priv->client->dev;
778 	__be16 __v;
779 	int ret;
780 
781 	mutex_lock(&priv->reg_lock);
782 
783 	ret = ub960_rxport_select(priv, nport);
784 	if (ret)
785 		goto out_unlock;
786 
787 	ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
788 	if (ret) {
789 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
790 			__func__, reg, ret);
791 		goto out_unlock;
792 	}
793 
794 	*val = be16_to_cpu(__v);
795 
796 out_unlock:
797 	mutex_unlock(&priv->reg_lock);
798 
799 	return ret;
800 }
801 
802 static int ub960_txport_select(struct ub960_data *priv, u8 nport)
803 {
804 	struct device *dev = &priv->client->dev;
805 	int ret;
806 
807 	lockdep_assert_held(&priv->reg_lock);
808 
809 	if (priv->reg_current.txport == nport)
810 		return 0;
811 
812 	ret = regmap_write(priv->regmap, UB960_SR_CSI_PORT_SEL,
813 			   (nport << 4) | BIT(nport));
814 	if (ret) {
815 		dev_err(dev, "%s: cannot select tx port %d (%d)!\n", __func__,
816 			nport, ret);
817 		return ret;
818 	}
819 
820 	priv->reg_current.txport = nport;
821 
822 	return 0;
823 }
824 
825 static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
826 {
827 	struct device *dev = &priv->client->dev;
828 	unsigned int v;
829 	int ret;
830 
831 	mutex_lock(&priv->reg_lock);
832 
833 	ret = ub960_txport_select(priv, nport);
834 	if (ret)
835 		goto out_unlock;
836 
837 	ret = regmap_read(priv->regmap, reg, &v);
838 	if (ret) {
839 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
840 			__func__, reg, ret);
841 		goto out_unlock;
842 	}
843 
844 	*val = v;
845 
846 out_unlock:
847 	mutex_unlock(&priv->reg_lock);
848 
849 	return ret;
850 }
851 
852 static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
853 {
854 	struct device *dev = &priv->client->dev;
855 	int ret;
856 
857 	mutex_lock(&priv->reg_lock);
858 
859 	ret = ub960_txport_select(priv, nport);
860 	if (ret)
861 		goto out_unlock;
862 
863 	ret = regmap_write(priv->regmap, reg, val);
864 	if (ret)
865 		dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
866 			__func__, reg, ret);
867 
868 out_unlock:
869 	mutex_unlock(&priv->reg_lock);
870 
871 	return ret;
872 }
873 
874 static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
875 				    u8 mask, u8 val)
876 {
877 	struct device *dev = &priv->client->dev;
878 	int ret;
879 
880 	mutex_lock(&priv->reg_lock);
881 
882 	ret = ub960_txport_select(priv, nport);
883 	if (ret)
884 		goto out_unlock;
885 
886 	ret = regmap_update_bits(priv->regmap, reg, mask, val);
887 	if (ret)
888 		dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
889 			__func__, reg, ret);
890 
891 out_unlock:
892 	mutex_unlock(&priv->reg_lock);
893 
894 	return ret;
895 }
896 
897 static int ub960_select_ind_reg_block(struct ub960_data *priv, u8 block)
898 {
899 	struct device *dev = &priv->client->dev;
900 	int ret;
901 
902 	lockdep_assert_held(&priv->reg_lock);
903 
904 	if (priv->reg_current.indirect_target == block)
905 		return 0;
906 
907 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_CTL, block << 2);
908 	if (ret) {
909 		dev_err(dev, "%s: cannot select indirect target %u (%d)!\n",
910 			__func__, block, ret);
911 		return ret;
912 	}
913 
914 	priv->reg_current.indirect_target = block;
915 
916 	return 0;
917 }
918 
919 static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val)
920 {
921 	struct device *dev = &priv->client->dev;
922 	unsigned int v;
923 	int ret;
924 
925 	mutex_lock(&priv->reg_lock);
926 
927 	ret = ub960_select_ind_reg_block(priv, block);
928 	if (ret)
929 		goto out_unlock;
930 
931 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
932 	if (ret) {
933 		dev_err(dev,
934 			"Write to IND_ACC_ADDR failed when reading %u:%x02x: %d\n",
935 			block, reg, ret);
936 		goto out_unlock;
937 	}
938 
939 	ret = regmap_read(priv->regmap, UB960_SR_IND_ACC_DATA, &v);
940 	if (ret) {
941 		dev_err(dev,
942 			"Write to IND_ACC_DATA failed when reading %u:%x02x: %d\n",
943 			block, reg, ret);
944 		goto out_unlock;
945 	}
946 
947 	*val = v;
948 
949 out_unlock:
950 	mutex_unlock(&priv->reg_lock);
951 
952 	return ret;
953 }
954 
955 static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val)
956 {
957 	struct device *dev = &priv->client->dev;
958 	int ret;
959 
960 	mutex_lock(&priv->reg_lock);
961 
962 	ret = ub960_select_ind_reg_block(priv, block);
963 	if (ret)
964 		goto out_unlock;
965 
966 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
967 	if (ret) {
968 		dev_err(dev,
969 			"Write to IND_ACC_ADDR failed when writing %u:%x02x: %d\n",
970 			block, reg, ret);
971 		goto out_unlock;
972 	}
973 
974 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_DATA, val);
975 	if (ret) {
976 		dev_err(dev,
977 			"Write to IND_ACC_DATA failed when writing %u:%x02x: %d\n",
978 			block, reg, ret);
979 		goto out_unlock;
980 	}
981 
982 out_unlock:
983 	mutex_unlock(&priv->reg_lock);
984 
985 	return ret;
986 }
987 
988 static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg,
989 				 u8 mask, u8 val)
990 {
991 	struct device *dev = &priv->client->dev;
992 	int ret;
993 
994 	mutex_lock(&priv->reg_lock);
995 
996 	ret = ub960_select_ind_reg_block(priv, block);
997 	if (ret)
998 		goto out_unlock;
999 
1000 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
1001 	if (ret) {
1002 		dev_err(dev,
1003 			"Write to IND_ACC_ADDR failed when updating %u:%x02x: %d\n",
1004 			block, reg, ret);
1005 		goto out_unlock;
1006 	}
1007 
1008 	ret = regmap_update_bits(priv->regmap, UB960_SR_IND_ACC_DATA, mask,
1009 				 val);
1010 	if (ret) {
1011 		dev_err(dev,
1012 			"Write to IND_ACC_DATA failed when updating %u:%x02x: %d\n",
1013 			block, reg, ret);
1014 		goto out_unlock;
1015 	}
1016 
1017 out_unlock:
1018 	mutex_unlock(&priv->reg_lock);
1019 
1020 	return ret;
1021 }
1022 
1023 /* -----------------------------------------------------------------------------
1024  * I2C-ATR (address translator)
1025  */
1026 
1027 static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id,
1028 				   const struct i2c_client *client, u16 alias)
1029 {
1030 	struct ub960_data *priv = i2c_atr_get_driver_data(atr);
1031 	struct ub960_rxport *rxport = priv->rxports[chan_id];
1032 	struct device *dev = &priv->client->dev;
1033 	unsigned int reg_idx;
1034 
1035 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
1036 		if (!rxport->aliased_clients[reg_idx])
1037 			break;
1038 	}
1039 
1040 	if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
1041 		dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport);
1042 		return -EADDRNOTAVAIL;
1043 	}
1044 
1045 	rxport->aliased_clients[reg_idx] = client;
1046 
1047 	ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx),
1048 			   client->addr << 1);
1049 	ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
1050 			   alias << 1);
1051 
1052 	dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n",
1053 		rxport->nport, client->addr, alias, reg_idx);
1054 
1055 	return 0;
1056 }
1057 
1058 static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id,
1059 				    const struct i2c_client *client)
1060 {
1061 	struct ub960_data *priv = i2c_atr_get_driver_data(atr);
1062 	struct ub960_rxport *rxport = priv->rxports[chan_id];
1063 	struct device *dev = &priv->client->dev;
1064 	unsigned int reg_idx;
1065 
1066 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
1067 		if (rxport->aliased_clients[reg_idx] == client)
1068 			break;
1069 	}
1070 
1071 	if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
1072 		dev_err(dev, "rx%u: client 0x%02x is not mapped!\n",
1073 			rxport->nport, client->addr);
1074 		return;
1075 	}
1076 
1077 	rxport->aliased_clients[reg_idx] = NULL;
1078 
1079 	ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx), 0);
1080 
1081 	dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport,
1082 		client->addr, reg_idx);
1083 }
1084 
1085 static const struct i2c_atr_ops ub960_atr_ops = {
1086 	.attach_client = ub960_atr_attach_client,
1087 	.detach_client = ub960_atr_detach_client,
1088 };
1089 
1090 static int ub960_init_atr(struct ub960_data *priv)
1091 {
1092 	struct device *dev = &priv->client->dev;
1093 	struct i2c_adapter *parent_adap = priv->client->adapter;
1094 
1095 	priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops,
1096 				priv->hw_data->num_rxports);
1097 	if (IS_ERR(priv->atr))
1098 		return PTR_ERR(priv->atr);
1099 
1100 	i2c_atr_set_driver_data(priv->atr, priv);
1101 
1102 	return 0;
1103 }
1104 
1105 static void ub960_uninit_atr(struct ub960_data *priv)
1106 {
1107 	i2c_atr_delete(priv->atr);
1108 	priv->atr = NULL;
1109 }
1110 
1111 /* -----------------------------------------------------------------------------
1112  * TX ports
1113  */
1114 
1115 static int ub960_parse_dt_txport(struct ub960_data *priv,
1116 				 struct fwnode_handle *ep_fwnode,
1117 				 u8 nport)
1118 {
1119 	struct device *dev = &priv->client->dev;
1120 	struct v4l2_fwnode_endpoint vep = {};
1121 	struct ub960_txport *txport;
1122 	int ret;
1123 
1124 	txport = kzalloc(sizeof(*txport), GFP_KERNEL);
1125 	if (!txport)
1126 		return -ENOMEM;
1127 
1128 	txport->priv = priv;
1129 	txport->nport = nport;
1130 
1131 	vep.bus_type = V4L2_MBUS_CSI2_DPHY;
1132 	ret = v4l2_fwnode_endpoint_alloc_parse(ep_fwnode, &vep);
1133 	if (ret) {
1134 		dev_err(dev, "tx%u: failed to parse endpoint data\n", nport);
1135 		goto err_free_txport;
1136 	}
1137 
1138 	txport->non_continous_clk = vep.bus.mipi_csi2.flags &
1139 				    V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
1140 
1141 	txport->num_data_lanes = vep.bus.mipi_csi2.num_data_lanes;
1142 
1143 	if (vep.nr_of_link_frequencies != 1) {
1144 		ret = -EINVAL;
1145 		goto err_free_vep;
1146 	}
1147 
1148 	priv->tx_link_freq[0] = vep.link_frequencies[0];
1149 	priv->tx_data_rate = priv->tx_link_freq[0] * 2;
1150 
1151 	if (priv->tx_data_rate != MHZ(1600) &&
1152 	    priv->tx_data_rate != MHZ(1200) &&
1153 	    priv->tx_data_rate != MHZ(800) &&
1154 	    priv->tx_data_rate != MHZ(400)) {
1155 		dev_err(dev, "tx%u: invalid 'link-frequencies' value\n", nport);
1156 		ret = -EINVAL;
1157 		goto err_free_vep;
1158 	}
1159 
1160 	v4l2_fwnode_endpoint_free(&vep);
1161 
1162 	priv->txports[nport] = txport;
1163 
1164 	return 0;
1165 
1166 err_free_vep:
1167 	v4l2_fwnode_endpoint_free(&vep);
1168 err_free_txport:
1169 	kfree(txport);
1170 
1171 	return ret;
1172 }
1173 
1174 static void ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
1175 {
1176 	struct device *dev = &priv->client->dev;
1177 	u8 csi_tx_isr;
1178 	int ret;
1179 
1180 	ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr);
1181 	if (ret)
1182 		return;
1183 
1184 	if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR)
1185 		dev_warn(dev, "TX%u: CSI_SYNC_ERROR\n", nport);
1186 
1187 	if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR)
1188 		dev_warn(dev, "TX%u: CSI_PASS_ERROR\n", nport);
1189 }
1190 
1191 /* -----------------------------------------------------------------------------
1192  * RX ports
1193  */
1194 
1195 static int ub960_rxport_enable_vpocs(struct ub960_data *priv)
1196 {
1197 	unsigned int nport;
1198 	int ret;
1199 
1200 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
1201 		struct ub960_rxport *rxport = priv->rxports[nport];
1202 
1203 		if (!rxport || !rxport->vpoc)
1204 			continue;
1205 
1206 		ret = regulator_enable(rxport->vpoc);
1207 		if (ret)
1208 			goto err_disable_vpocs;
1209 	}
1210 
1211 	return 0;
1212 
1213 err_disable_vpocs:
1214 	while (nport--) {
1215 		struct ub960_rxport *rxport = priv->rxports[nport];
1216 
1217 		if (!rxport || !rxport->vpoc)
1218 			continue;
1219 
1220 		regulator_disable(rxport->vpoc);
1221 	}
1222 
1223 	return ret;
1224 }
1225 
1226 static void ub960_rxport_disable_vpocs(struct ub960_data *priv)
1227 {
1228 	unsigned int nport;
1229 
1230 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
1231 		struct ub960_rxport *rxport = priv->rxports[nport];
1232 
1233 		if (!rxport || !rxport->vpoc)
1234 			continue;
1235 
1236 		regulator_disable(rxport->vpoc);
1237 	}
1238 }
1239 
1240 static void ub960_rxport_clear_errors(struct ub960_data *priv,
1241 				      unsigned int nport)
1242 {
1243 	u8 v;
1244 
1245 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v);
1246 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v);
1247 	ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v);
1248 	ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v);
1249 
1250 	ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v);
1251 	ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v);
1252 
1253 	ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
1254 }
1255 
1256 static void ub960_clear_rx_errors(struct ub960_data *priv)
1257 {
1258 	unsigned int nport;
1259 
1260 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++)
1261 		ub960_rxport_clear_errors(priv, nport);
1262 }
1263 
1264 static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
1265 				       unsigned int nport, s8 *strobe_pos)
1266 {
1267 	u8 v;
1268 	u8 clk_delay, data_delay;
1269 	int ret;
1270 
1271 	ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1272 		       UB960_IR_RX_ANA_STROBE_SET_CLK, &v);
1273 
1274 	clk_delay = (v & UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY) ?
1275 			    0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
1276 
1277 	ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1278 		       UB960_IR_RX_ANA_STROBE_SET_DATA, &v);
1279 
1280 	data_delay = (v & UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) ?
1281 			     0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
1282 
1283 	ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v);
1284 	if (ret)
1285 		return ret;
1286 
1287 	clk_delay += v & UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK;
1288 
1289 	ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v);
1290 	if (ret)
1291 		return ret;
1292 
1293 	data_delay += v & UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK;
1294 
1295 	*strobe_pos = data_delay - clk_delay;
1296 
1297 	return 0;
1298 }
1299 
1300 static void ub960_rxport_set_strobe_pos(struct ub960_data *priv,
1301 					unsigned int nport, s8 strobe_pos)
1302 {
1303 	u8 clk_delay, data_delay;
1304 
1305 	clk_delay = UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
1306 	data_delay = UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
1307 
1308 	if (strobe_pos < UB960_MIN_AEQ_STROBE_POS)
1309 		clk_delay = abs(strobe_pos) - UB960_MANUAL_STROBE_EXTRA_DELAY;
1310 	else if (strobe_pos > UB960_MAX_AEQ_STROBE_POS)
1311 		data_delay = strobe_pos - UB960_MANUAL_STROBE_EXTRA_DELAY;
1312 	else if (strobe_pos < 0)
1313 		clk_delay = abs(strobe_pos) | UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
1314 	else if (strobe_pos > 0)
1315 		data_delay = strobe_pos | UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
1316 
1317 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1318 			UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay);
1319 
1320 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1321 			UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay);
1322 }
1323 
1324 static void ub960_rxport_set_strobe_range(struct ub960_data *priv,
1325 					  s8 strobe_min, s8 strobe_max)
1326 {
1327 	/* Convert the signed strobe pos to positive zero based value */
1328 	strobe_min -= UB960_MIN_AEQ_STROBE_POS;
1329 	strobe_max -= UB960_MIN_AEQ_STROBE_POS;
1330 
1331 	ub960_write(priv, UB960_XR_SFILTER_CFG,
1332 		    ((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) |
1333 		    ((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT));
1334 }
1335 
1336 static int ub960_rxport_get_eq_level(struct ub960_data *priv,
1337 				     unsigned int nport, u8 *eq_level)
1338 {
1339 	int ret;
1340 	u8 v;
1341 
1342 	ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v);
1343 	if (ret)
1344 		return ret;
1345 
1346 	*eq_level = (v & UB960_RR_AEQ_STATUS_STATUS_1) +
1347 		    (v & UB960_RR_AEQ_STATUS_STATUS_2);
1348 
1349 	return 0;
1350 }
1351 
1352 static void ub960_rxport_set_eq_level(struct ub960_data *priv,
1353 				      unsigned int nport, u8 eq_level)
1354 {
1355 	u8 eq_stage_1_select_value, eq_stage_2_select_value;
1356 	const unsigned int eq_stage_max = 7;
1357 	u8 v;
1358 
1359 	if (eq_level <= eq_stage_max) {
1360 		eq_stage_1_select_value = eq_level;
1361 		eq_stage_2_select_value = 0;
1362 	} else {
1363 		eq_stage_1_select_value = eq_stage_max;
1364 		eq_stage_2_select_value = eq_level - eq_stage_max;
1365 	}
1366 
1367 	ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
1368 
1369 	v &= ~(UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK |
1370 	       UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK);
1371 	v |= eq_stage_1_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT;
1372 	v |= eq_stage_2_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT;
1373 	v |= UB960_RR_AEQ_BYPASS_ENABLE;
1374 
1375 	ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v);
1376 }
1377 
1378 static void ub960_rxport_set_eq_range(struct ub960_data *priv,
1379 				      unsigned int nport, u8 eq_min, u8 eq_max)
1380 {
1381 	ub960_rxport_write(priv, nport, UB960_RR_AEQ_MIN_MAX,
1382 			   (eq_min << UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) |
1383 			   (eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT));
1384 
1385 	/* Enable AEQ min setting */
1386 	ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_CTL2,
1387 				 UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR,
1388 				 UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR);
1389 }
1390 
1391 static void ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport)
1392 {
1393 	struct ub960_rxport *rxport = priv->rxports[nport];
1394 
1395 	/* We also set common settings here. Should be moved elsewhere. */
1396 
1397 	if (priv->strobe.manual) {
1398 		/* Disable AEQ_SFILTER_EN */
1399 		ub960_update_bits(priv, UB960_XR_AEQ_CTL1,
1400 				  UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0);
1401 	} else {
1402 		/* Enable SFILTER and error control */
1403 		ub960_write(priv, UB960_XR_AEQ_CTL1,
1404 			    UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK |
1405 				    UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN);
1406 
1407 		/* Set AEQ strobe range */
1408 		ub960_rxport_set_strobe_range(priv, priv->strobe.min,
1409 					      priv->strobe.max);
1410 	}
1411 
1412 	/* The rest are port specific */
1413 
1414 	if (priv->strobe.manual)
1415 		ub960_rxport_set_strobe_pos(priv, nport, rxport->eq.strobe_pos);
1416 	else
1417 		ub960_rxport_set_strobe_pos(priv, nport, 0);
1418 
1419 	if (rxport->eq.manual_eq) {
1420 		ub960_rxport_set_eq_level(priv, nport,
1421 					  rxport->eq.manual.eq_level);
1422 
1423 		/* Enable AEQ Bypass */
1424 		ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
1425 					 UB960_RR_AEQ_BYPASS_ENABLE,
1426 					 UB960_RR_AEQ_BYPASS_ENABLE);
1427 	} else {
1428 		ub960_rxport_set_eq_range(priv, nport,
1429 					  rxport->eq.aeq.eq_level_min,
1430 					  rxport->eq.aeq.eq_level_max);
1431 
1432 		/* Disable AEQ Bypass */
1433 		ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
1434 					 UB960_RR_AEQ_BYPASS_ENABLE, 0);
1435 	}
1436 }
1437 
1438 static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
1439 				bool *ok)
1440 {
1441 	u8 rx_port_sts1, rx_port_sts2;
1442 	u16 parity_errors;
1443 	u8 csi_rx_sts;
1444 	u8 csi_err_cnt;
1445 	u8 bcc_sts;
1446 	int ret;
1447 	bool errors;
1448 
1449 	ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
1450 				&rx_port_sts1);
1451 	if (ret)
1452 		return ret;
1453 
1454 	if (!(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS)) {
1455 		*ok = false;
1456 		return 0;
1457 	}
1458 
1459 	ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
1460 				&rx_port_sts2);
1461 	if (ret)
1462 		return ret;
1463 
1464 	ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts);
1465 	if (ret)
1466 		return ret;
1467 
1468 	ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
1469 				&csi_err_cnt);
1470 	if (ret)
1471 		return ret;
1472 
1473 	ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts);
1474 	if (ret)
1475 		return ret;
1476 
1477 	ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
1478 				  &parity_errors);
1479 	if (ret)
1480 		return ret;
1481 
1482 	errors = (rx_port_sts1 & UB960_RR_RX_PORT_STS1_ERROR_MASK) ||
1483 		 (rx_port_sts2 & UB960_RR_RX_PORT_STS2_ERROR_MASK) ||
1484 		 (bcc_sts & UB960_RR_BCC_STATUS_ERROR_MASK) ||
1485 		 (csi_rx_sts & UB960_RR_CSI_RX_STS_ERROR_MASK) || csi_err_cnt ||
1486 		 parity_errors;
1487 
1488 	*ok = !errors;
1489 
1490 	return 0;
1491 }
1492 
1493 /*
1494  * Wait for the RX ports to lock, have no errors and have stable strobe position
1495  * and EQ level.
1496  */
1497 static int ub960_rxport_wait_locks(struct ub960_data *priv,
1498 				   unsigned long port_mask,
1499 				   unsigned int *lock_mask)
1500 {
1501 	struct device *dev = &priv->client->dev;
1502 	unsigned long timeout;
1503 	unsigned int link_ok_mask;
1504 	unsigned int missing;
1505 	unsigned int loops;
1506 	u8 nport;
1507 	int ret;
1508 
1509 	if (port_mask == 0) {
1510 		if (lock_mask)
1511 			*lock_mask = 0;
1512 		return 0;
1513 	}
1514 
1515 	if (port_mask >= BIT(priv->hw_data->num_rxports))
1516 		return -EINVAL;
1517 
1518 	timeout = jiffies + msecs_to_jiffies(1000);
1519 	loops = 0;
1520 	link_ok_mask = 0;
1521 
1522 	while (time_before(jiffies, timeout)) {
1523 		missing = 0;
1524 
1525 		for_each_set_bit(nport, &port_mask,
1526 				 priv->hw_data->num_rxports) {
1527 			struct ub960_rxport *rxport = priv->rxports[nport];
1528 			bool ok;
1529 
1530 			if (!rxport)
1531 				continue;
1532 
1533 			ret = ub960_rxport_link_ok(priv, nport, &ok);
1534 			if (ret)
1535 				return ret;
1536 
1537 			/*
1538 			 * We want the link to be ok for two consecutive loops,
1539 			 * as a link could get established just before our test
1540 			 * and drop soon after.
1541 			 */
1542 			if (!ok || !(link_ok_mask & BIT(nport)))
1543 				missing++;
1544 
1545 			if (ok)
1546 				link_ok_mask |= BIT(nport);
1547 			else
1548 				link_ok_mask &= ~BIT(nport);
1549 		}
1550 
1551 		loops++;
1552 
1553 		if (missing == 0)
1554 			break;
1555 
1556 		msleep(50);
1557 	}
1558 
1559 	if (lock_mask)
1560 		*lock_mask = link_ok_mask;
1561 
1562 	dev_dbg(dev, "Wait locks done in %u loops\n", loops);
1563 	for_each_set_bit(nport, &port_mask, priv->hw_data->num_rxports) {
1564 		struct ub960_rxport *rxport = priv->rxports[nport];
1565 		s8 strobe_pos, eq_level;
1566 		u16 v;
1567 
1568 		if (!rxport)
1569 			continue;
1570 
1571 		if (!(link_ok_mask & BIT(nport))) {
1572 			dev_dbg(dev, "\trx%u: not locked\n", nport);
1573 			continue;
1574 		}
1575 
1576 		ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v);
1577 
1578 		ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
1579 		if (ret)
1580 			return ret;
1581 
1582 		ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
1583 		if (ret)
1584 			return ret;
1585 
1586 		dev_dbg(dev, "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
1587 			nport, strobe_pos, eq_level, (v * 1000000ULL) >> 8);
1588 	}
1589 
1590 	return 0;
1591 }
1592 
1593 static unsigned long ub960_calc_bc_clk_rate_ub960(struct ub960_data *priv,
1594 						  struct ub960_rxport *rxport)
1595 {
1596 	unsigned int mult;
1597 	unsigned int div;
1598 
1599 	switch (rxport->rx_mode) {
1600 	case RXPORT_MODE_RAW10:
1601 	case RXPORT_MODE_RAW12_HF:
1602 	case RXPORT_MODE_RAW12_LF:
1603 		mult = 1;
1604 		div = 10;
1605 		break;
1606 
1607 	case RXPORT_MODE_CSI2_SYNC:
1608 		mult = 2;
1609 		div = 1;
1610 		break;
1611 
1612 	case RXPORT_MODE_CSI2_NONSYNC:
1613 		mult = 2;
1614 		div = 5;
1615 		break;
1616 
1617 	default:
1618 		return 0;
1619 	}
1620 
1621 	return clk_get_rate(priv->refclk) * mult / div;
1622 }
1623 
1624 static unsigned long ub960_calc_bc_clk_rate_ub9702(struct ub960_data *priv,
1625 						   struct ub960_rxport *rxport)
1626 {
1627 	switch (rxport->rx_mode) {
1628 	case RXPORT_MODE_RAW10:
1629 	case RXPORT_MODE_RAW12_HF:
1630 	case RXPORT_MODE_RAW12_LF:
1631 		return 2359400;
1632 
1633 	case RXPORT_MODE_CSI2_SYNC:
1634 		return 47187500;
1635 
1636 	case RXPORT_MODE_CSI2_NONSYNC:
1637 		return 9437500;
1638 
1639 	default:
1640 		return 0;
1641 	}
1642 }
1643 
1644 static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport)
1645 {
1646 	struct ub960_rxport *rxport = priv->rxports[nport];
1647 	struct device *dev = &priv->client->dev;
1648 	struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata;
1649 	struct i2c_board_info ser_info = {
1650 		.of_node = to_of_node(rxport->ser.fwnode),
1651 		.fwnode = rxport->ser.fwnode,
1652 		.platform_data = ser_pdata,
1653 	};
1654 
1655 	ser_pdata->port = nport;
1656 	ser_pdata->atr = priv->atr;
1657 	if (priv->hw_data->is_ub9702)
1658 		ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub9702(priv, rxport);
1659 	else
1660 		ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub960(priv, rxport);
1661 
1662 	/*
1663 	 * The serializer is added under the same i2c adapter as the
1664 	 * deserializer. This is not quite right, as the serializer is behind
1665 	 * the FPD-Link.
1666 	 */
1667 	ser_info.addr = rxport->ser.alias;
1668 	rxport->ser.client =
1669 		i2c_new_client_device(priv->client->adapter, &ser_info);
1670 	if (IS_ERR(rxport->ser.client)) {
1671 		dev_err(dev, "rx%u: cannot add %s i2c device", nport,
1672 			ser_info.type);
1673 		return PTR_ERR(rxport->ser.client);
1674 	}
1675 
1676 	dev_dbg(dev, "rx%u: remote serializer at alias 0x%02x (%u-%04x)\n",
1677 		nport, rxport->ser.client->addr,
1678 		rxport->ser.client->adapter->nr, rxport->ser.client->addr);
1679 
1680 	return 0;
1681 }
1682 
1683 static void ub960_rxport_remove_serializer(struct ub960_data *priv, u8 nport)
1684 {
1685 	struct ub960_rxport *rxport = priv->rxports[nport];
1686 
1687 	i2c_unregister_device(rxport->ser.client);
1688 	rxport->ser.client = NULL;
1689 }
1690 
1691 /* Add serializer i2c devices for all initialized ports */
1692 static int ub960_rxport_add_serializers(struct ub960_data *priv)
1693 {
1694 	unsigned int nport;
1695 	int ret;
1696 
1697 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
1698 		struct ub960_rxport *rxport = priv->rxports[nport];
1699 
1700 		if (!rxport)
1701 			continue;
1702 
1703 		ret = ub960_rxport_add_serializer(priv, nport);
1704 		if (ret)
1705 			goto err_remove_sers;
1706 	}
1707 
1708 	return 0;
1709 
1710 err_remove_sers:
1711 	while (nport--) {
1712 		struct ub960_rxport *rxport = priv->rxports[nport];
1713 
1714 		if (!rxport)
1715 			continue;
1716 
1717 		ub960_rxport_remove_serializer(priv, nport);
1718 	}
1719 
1720 	return ret;
1721 }
1722 
1723 static void ub960_rxport_remove_serializers(struct ub960_data *priv)
1724 {
1725 	unsigned int nport;
1726 
1727 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
1728 		struct ub960_rxport *rxport = priv->rxports[nport];
1729 
1730 		if (!rxport)
1731 			continue;
1732 
1733 		ub960_rxport_remove_serializer(priv, nport);
1734 	}
1735 }
1736 
1737 static void ub960_init_tx_port(struct ub960_data *priv,
1738 			       struct ub960_txport *txport)
1739 {
1740 	unsigned int nport = txport->nport;
1741 	u8 csi_ctl = 0;
1742 
1743 	/*
1744 	 * From the datasheet: "initial CSI Skew-Calibration
1745 	 * sequence [...] should be set when operating at 1.6 Gbps"
1746 	 */
1747 	if (priv->tx_data_rate == MHZ(1600))
1748 		csi_ctl |= UB960_TR_CSI_CTL_CSI_CAL_EN;
1749 
1750 	csi_ctl |= (4 - txport->num_data_lanes) << 4;
1751 
1752 	if (!txport->non_continous_clk)
1753 		csi_ctl |= UB960_TR_CSI_CTL_CSI_CONTS_CLOCK;
1754 
1755 	ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl);
1756 }
1757 
1758 static int ub960_init_tx_ports(struct ub960_data *priv)
1759 {
1760 	unsigned int nport;
1761 	u8 speed_select;
1762 	u8 pll_div;
1763 
1764 	/* TX ports */
1765 
1766 	switch (priv->tx_data_rate) {
1767 	case MHZ(1600):
1768 	default:
1769 		speed_select = 0;
1770 		pll_div = 0x10;
1771 		break;
1772 	case MHZ(1200):
1773 		speed_select = 1;
1774 		pll_div = 0x18;
1775 		break;
1776 	case MHZ(800):
1777 		speed_select = 2;
1778 		pll_div = 0x10;
1779 		break;
1780 	case MHZ(400):
1781 		speed_select = 3;
1782 		pll_div = 0x10;
1783 		break;
1784 	}
1785 
1786 	ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select);
1787 
1788 	if (priv->hw_data->is_ub9702) {
1789 		ub960_write(priv, UB960_SR_CSI_PLL_DIV, pll_div);
1790 
1791 		switch (priv->tx_data_rate) {
1792 		case MHZ(1600):
1793 		default:
1794 			ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x80);
1795 			ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a);
1796 			break;
1797 		case MHZ(800):
1798 			ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x90);
1799 			ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4f, 0x2a);
1800 			ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a);
1801 			break;
1802 		case MHZ(400):
1803 			ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0xa0);
1804 			break;
1805 		}
1806 	}
1807 
1808 	for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
1809 		struct ub960_txport *txport = priv->txports[nport];
1810 
1811 		if (!txport)
1812 			continue;
1813 
1814 		ub960_init_tx_port(priv, txport);
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 static void ub960_init_rx_port_ub960(struct ub960_data *priv,
1821 				     struct ub960_rxport *rxport)
1822 {
1823 	unsigned int nport = rxport->nport;
1824 	u32 bc_freq_val;
1825 
1826 	/*
1827 	 * Back channel frequency select.
1828 	 * Override FREQ_SELECT from the strap.
1829 	 * 0 - 2.5 Mbps (DS90UB913A-Q1 / DS90UB933-Q1)
1830 	 * 2 - 10 Mbps
1831 	 * 6 - 50 Mbps (DS90UB953-Q1)
1832 	 *
1833 	 * Note that changing this setting will result in some errors on the back
1834 	 * channel for a short period of time.
1835 	 */
1836 
1837 	switch (rxport->rx_mode) {
1838 	case RXPORT_MODE_RAW10:
1839 	case RXPORT_MODE_RAW12_HF:
1840 	case RXPORT_MODE_RAW12_LF:
1841 		bc_freq_val = 0;
1842 		break;
1843 
1844 	case RXPORT_MODE_CSI2_NONSYNC:
1845 		bc_freq_val = 2;
1846 		break;
1847 
1848 	case RXPORT_MODE_CSI2_SYNC:
1849 		bc_freq_val = 6;
1850 		break;
1851 
1852 	default:
1853 		return;
1854 	}
1855 
1856 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
1857 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK,
1858 				 bc_freq_val);
1859 
1860 	switch (rxport->rx_mode) {
1861 	case RXPORT_MODE_RAW10:
1862 		/* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */
1863 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG,
1864 					 UB960_RR_PORT_CONFIG_FPD3_MODE_MASK,
1865 					 0x3);
1866 
1867 		/*
1868 		 * RAW10_8BIT_CTL = 0b10 : 8-bit processing using upper 8 bits
1869 		 */
1870 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
1871 			UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK,
1872 			0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT);
1873 
1874 		break;
1875 
1876 	case RXPORT_MODE_RAW12_HF:
1877 	case RXPORT_MODE_RAW12_LF:
1878 		/* Not implemented */
1879 		return;
1880 
1881 	case RXPORT_MODE_CSI2_SYNC:
1882 	case RXPORT_MODE_CSI2_NONSYNC:
1883 		/* CSI-2 Mode (DS90UB953-Q1 compatible) */
1884 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3,
1885 					 0x0);
1886 
1887 		break;
1888 	}
1889 
1890 	/* LV_POLARITY & FV_POLARITY */
1891 	ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
1892 				 rxport->lv_fv_pol);
1893 
1894 	/* Enable all interrupt sources from this port */
1895 	ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07);
1896 	ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f);
1897 
1898 	/* Enable I2C_PASS_THROUGH */
1899 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
1900 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
1901 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH);
1902 
1903 	/* Enable I2C communication to the serializer via the alias addr */
1904 	ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
1905 			   rxport->ser.alias << 1);
1906 
1907 	/* Configure EQ related settings */
1908 	ub960_rxport_config_eq(priv, nport);
1909 
1910 	/* Enable RX port */
1911 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport));
1912 }
1913 
1914 static void ub960_init_rx_port_ub9702_fpd3(struct ub960_data *priv,
1915 					   struct ub960_rxport *rxport)
1916 {
1917 	unsigned int nport = rxport->nport;
1918 	u8 bc_freq_val;
1919 	u8 fpd_func_mode;
1920 
1921 	switch (rxport->rx_mode) {
1922 	case RXPORT_MODE_RAW10:
1923 		bc_freq_val = 0;
1924 		fpd_func_mode = 5;
1925 		break;
1926 
1927 	case RXPORT_MODE_RAW12_HF:
1928 		bc_freq_val = 0;
1929 		fpd_func_mode = 4;
1930 		break;
1931 
1932 	case RXPORT_MODE_RAW12_LF:
1933 		bc_freq_val = 0;
1934 		fpd_func_mode = 6;
1935 		break;
1936 
1937 	case RXPORT_MODE_CSI2_SYNC:
1938 		bc_freq_val = 6;
1939 		fpd_func_mode = 2;
1940 		break;
1941 
1942 	case RXPORT_MODE_CSI2_NONSYNC:
1943 		bc_freq_val = 2;
1944 		fpd_func_mode = 2;
1945 		break;
1946 
1947 	default:
1948 		return;
1949 	}
1950 
1951 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7,
1952 				 bc_freq_val);
1953 	ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, fpd_func_mode);
1954 
1955 	/* set serdes_eq_mode = 1 */
1956 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa8, 0x80);
1957 
1958 	/* enable serdes driver */
1959 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x0d, 0x7f);
1960 
1961 	/* set serdes_eq_offset=4 */
1962 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04);
1963 
1964 	/* init default serdes_eq_max in 0xa9 */
1965 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa9, 0x23);
1966 
1967 	/* init serdes_eq_min in 0xaa */
1968 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xaa, 0);
1969 
1970 	/* serdes_driver_ctl2 control: DS90UB953-Q1/DS90UB933-Q1/DS90UB913A-Q1 */
1971 	ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b,
1972 			      BIT(3), BIT(3));
1973 
1974 	/* RX port to half-rate */
1975 	ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2),
1976 			  BIT(nport * 2));
1977 }
1978 
1979 static void ub960_init_rx_port_ub9702_fpd4_aeq(struct ub960_data *priv,
1980 					       struct ub960_rxport *rxport)
1981 {
1982 	unsigned int nport = rxport->nport;
1983 	bool first_time_power_up = true;
1984 
1985 	if (first_time_power_up) {
1986 		u8 v;
1987 
1988 		/* AEQ init */
1989 		ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2c, &v);
1990 
1991 		ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, v);
1992 		ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, v + 1);
1993 
1994 		ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x00);
1995 	}
1996 
1997 	/* enable serdes_eq_ctl2 */
1998 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x9e, 0x00);
1999 
2000 	/* enable serdes_eq_ctl1 */
2001 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x90, 0x40);
2002 
2003 	/* enable serdes_eq_en */
2004 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2e, 0x40);
2005 
2006 	/* disable serdes_eq_override */
2007 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xf0, 0x00);
2008 
2009 	/* disable serdes_gain_override */
2010 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x71, 0x00);
2011 }
2012 
2013 static void ub960_init_rx_port_ub9702_fpd4(struct ub960_data *priv,
2014 					   struct ub960_rxport *rxport)
2015 {
2016 	unsigned int nport = rxport->nport;
2017 	u8 bc_freq_val;
2018 
2019 	switch (rxport->rx_mode) {
2020 	case RXPORT_MODE_RAW10:
2021 		bc_freq_val = 0;
2022 		break;
2023 
2024 	case RXPORT_MODE_RAW12_HF:
2025 		bc_freq_val = 0;
2026 		break;
2027 
2028 	case RXPORT_MODE_RAW12_LF:
2029 		bc_freq_val = 0;
2030 		break;
2031 
2032 	case RXPORT_MODE_CSI2_SYNC:
2033 		bc_freq_val = 6;
2034 		break;
2035 
2036 	case RXPORT_MODE_CSI2_NONSYNC:
2037 		bc_freq_val = 2;
2038 		break;
2039 
2040 	default:
2041 		return;
2042 	}
2043 
2044 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7,
2045 				 bc_freq_val);
2046 
2047 	/* FPD4 Sync Mode */
2048 	ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, 0);
2049 
2050 	/* add serdes_eq_offset of 4 */
2051 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04);
2052 
2053 	/* FPD4 serdes_start_eq in 0x27: assign default */
2054 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, 0x0);
2055 	/* FPD4 serdes_end_eq in 0x28: assign default */
2056 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, 0x23);
2057 
2058 	/* set serdes_driver_mode into FPD IV mode */
2059 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x04, 0x00);
2060 	/* set FPD PBC drv into FPD IV mode */
2061 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b, 0x00);
2062 
2063 	/* set serdes_system_init to 0x2f */
2064 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x21, 0x2f);
2065 	/* set serdes_system_rst in reset mode */
2066 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0xc1);
2067 
2068 	/* RX port to 7.55G mode */
2069 	ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2),
2070 			  0 << (nport * 2));
2071 
2072 	ub960_init_rx_port_ub9702_fpd4_aeq(priv, rxport);
2073 }
2074 
2075 static void ub960_init_rx_port_ub9702(struct ub960_data *priv,
2076 				      struct ub960_rxport *rxport)
2077 {
2078 	unsigned int nport = rxport->nport;
2079 
2080 	if (rxport->cdr_mode == RXPORT_CDR_FPD3)
2081 		ub960_init_rx_port_ub9702_fpd3(priv, rxport);
2082 	else /* RXPORT_CDR_FPD4 */
2083 		ub960_init_rx_port_ub9702_fpd4(priv, rxport);
2084 
2085 	switch (rxport->rx_mode) {
2086 	case RXPORT_MODE_RAW10:
2087 		/*
2088 		 * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits
2089 		 * 0b10 : 8-bit processing using upper 8 bits
2090 		 */
2091 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
2092 					 0x3 << 6, 0x2 << 6);
2093 
2094 		break;
2095 
2096 	case RXPORT_MODE_RAW12_HF:
2097 	case RXPORT_MODE_RAW12_LF:
2098 		/* Not implemented */
2099 		return;
2100 
2101 	case RXPORT_MODE_CSI2_SYNC:
2102 	case RXPORT_MODE_CSI2_NONSYNC:
2103 
2104 		break;
2105 	}
2106 
2107 	/* LV_POLARITY & FV_POLARITY */
2108 	ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
2109 				 rxport->lv_fv_pol);
2110 
2111 	/* Enable all interrupt sources from this port */
2112 	ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07);
2113 	ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f);
2114 
2115 	/* Enable I2C_PASS_THROUGH */
2116 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2117 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
2118 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH);
2119 
2120 	/* Enable I2C communication to the serializer via the alias addr */
2121 	ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
2122 			   rxport->ser.alias << 1);
2123 
2124 	/* Enable RX port */
2125 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport));
2126 
2127 	if (rxport->cdr_mode == RXPORT_CDR_FPD4) {
2128 		/* unreset 960 AEQ */
2129 		ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0x41);
2130 	}
2131 }
2132 
2133 static int ub960_init_rx_ports(struct ub960_data *priv)
2134 {
2135 	unsigned int nport;
2136 
2137 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
2138 		struct ub960_rxport *rxport = priv->rxports[nport];
2139 
2140 		if (!rxport)
2141 			continue;
2142 
2143 		if (priv->hw_data->is_ub9702)
2144 			ub960_init_rx_port_ub9702(priv, rxport);
2145 		else
2146 			ub960_init_rx_port_ub960(priv, rxport);
2147 	}
2148 
2149 	return 0;
2150 }
2151 
2152 static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
2153 {
2154 	struct device *dev = &priv->client->dev;
2155 	u8 rx_port_sts1;
2156 	u8 rx_port_sts2;
2157 	u8 csi_rx_sts;
2158 	u8 bcc_sts;
2159 	int ret = 0;
2160 
2161 	/* Read interrupts (also clears most of them) */
2162 	if (!ret)
2163 		ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
2164 					&rx_port_sts1);
2165 	if (!ret)
2166 		ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
2167 					&rx_port_sts2);
2168 	if (!ret)
2169 		ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS,
2170 					&csi_rx_sts);
2171 	if (!ret)
2172 		ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS,
2173 					&bcc_sts);
2174 
2175 	if (ret)
2176 		return;
2177 
2178 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_PARITY_ERROR) {
2179 		u16 v;
2180 
2181 		ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
2182 					  &v);
2183 		if (!ret)
2184 			dev_err(dev, "rx%u parity errors: %u\n", nport, v);
2185 	}
2186 
2187 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR)
2188 		dev_err(dev, "rx%u BCC CRC error\n", nport);
2189 
2190 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR)
2191 		dev_err(dev, "rx%u BCC SEQ error\n", nport);
2192 
2193 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE)
2194 		dev_err(dev, "rx%u line length unstable\n", nport);
2195 
2196 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR)
2197 		dev_err(dev, "rx%u FPD3 encode error\n", nport);
2198 
2199 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_BUFFER_ERROR)
2200 		dev_err(dev, "rx%u buffer error\n", nport);
2201 
2202 	if (csi_rx_sts)
2203 		dev_err(dev, "rx%u CSI error: %#02x\n", nport, csi_rx_sts);
2204 
2205 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC1_ERR)
2206 		dev_err(dev, "rx%u CSI ECC1 error\n", nport);
2207 
2208 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC2_ERR)
2209 		dev_err(dev, "rx%u CSI ECC2 error\n", nport);
2210 
2211 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_CKSUM_ERR)
2212 		dev_err(dev, "rx%u CSI checksum error\n", nport);
2213 
2214 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_LENGTH_ERR)
2215 		dev_err(dev, "rx%u CSI length error\n", nport);
2216 
2217 	if (bcc_sts)
2218 		dev_err(dev, "rx%u BCC error: %#02x\n", nport, bcc_sts);
2219 
2220 	if (bcc_sts & UB960_RR_BCC_STATUS_RESP_ERR)
2221 		dev_err(dev, "rx%u BCC response error", nport);
2222 
2223 	if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_TO)
2224 		dev_err(dev, "rx%u BCC slave timeout", nport);
2225 
2226 	if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_ERR)
2227 		dev_err(dev, "rx%u BCC slave error", nport);
2228 
2229 	if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_TO)
2230 		dev_err(dev, "rx%u BCC master timeout", nport);
2231 
2232 	if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_ERR)
2233 		dev_err(dev, "rx%u BCC master error", nport);
2234 
2235 	if (bcc_sts & UB960_RR_BCC_STATUS_SEQ_ERROR)
2236 		dev_err(dev, "rx%u BCC sequence error", nport);
2237 
2238 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_CHG) {
2239 		u16 v;
2240 
2241 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v);
2242 		if (!ret)
2243 			dev_dbg(dev, "rx%u line len changed: %u\n", nport, v);
2244 	}
2245 
2246 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_CNT_CHG) {
2247 		u16 v;
2248 
2249 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
2250 					  &v);
2251 		if (!ret)
2252 			dev_dbg(dev, "rx%u line count changed: %u\n", nport, v);
2253 	}
2254 
2255 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS_CHG) {
2256 		dev_dbg(dev, "rx%u: %s, %s, %s, %s\n", nport,
2257 			(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS) ?
2258 				"locked" :
2259 				"unlocked",
2260 			(rx_port_sts1 & UB960_RR_RX_PORT_STS1_PORT_PASS) ?
2261 				"passed" :
2262 				"not passed",
2263 			(rx_port_sts2 & UB960_RR_RX_PORT_STS2_CABLE_FAULT) ?
2264 				"no clock" :
2265 				"clock ok",
2266 			(rx_port_sts2 & UB960_RR_RX_PORT_STS2_FREQ_STABLE) ?
2267 				"stable freq" :
2268 				"unstable freq");
2269 	}
2270 }
2271 
2272 /* -----------------------------------------------------------------------------
2273  * V4L2
2274  */
2275 
2276 /*
2277  * The current implementation only supports a simple VC mapping, where all VCs
2278  * from a one RX port will be mapped to the same VC. Also, the hardware
2279  * dictates that all streams from an RX port must go to a single TX port.
2280  *
2281  * This function decides the target VC numbers for each RX port with a simple
2282  * algorithm, so that for each TX port, we get VC numbers starting from 0,
2283  * and counting up.
2284  *
2285  * E.g. if all four RX ports are in use, of which the first two go to the
2286  * first TX port and the secont two go to the second TX port, we would get
2287  * the following VCs for the four RX ports: 0, 1, 0, 1.
2288  *
2289  * TODO: implement a more sophisticated VC mapping. As the driver cannot know
2290  * what VCs the sinks expect (say, an FPGA with hardcoded VC routing), this
2291  * probably needs to be somehow configurable. Device tree?
2292  */
2293 static void ub960_get_vc_maps(struct ub960_data *priv,
2294 			      struct v4l2_subdev_state *state, u8 *vc)
2295 {
2296 	u8 cur_vc[UB960_MAX_TX_NPORTS] = {};
2297 	struct v4l2_subdev_route *route;
2298 	u8 handled_mask = 0;
2299 
2300 	for_each_active_route(&state->routing, route) {
2301 		unsigned int rx, tx;
2302 
2303 		rx = ub960_pad_to_port(priv, route->sink_pad);
2304 		if (BIT(rx) & handled_mask)
2305 			continue;
2306 
2307 		tx = ub960_pad_to_port(priv, route->source_pad);
2308 
2309 		vc[rx] = cur_vc[tx]++;
2310 		handled_mask |= BIT(rx);
2311 	}
2312 }
2313 
2314 static int ub960_enable_tx_port(struct ub960_data *priv, unsigned int nport)
2315 {
2316 	struct device *dev = &priv->client->dev;
2317 
2318 	dev_dbg(dev, "enable TX port %u\n", nport);
2319 
2320 	return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
2321 					UB960_TR_CSI_CTL_CSI_ENABLE,
2322 					UB960_TR_CSI_CTL_CSI_ENABLE);
2323 }
2324 
2325 static void ub960_disable_tx_port(struct ub960_data *priv, unsigned int nport)
2326 {
2327 	struct device *dev = &priv->client->dev;
2328 
2329 	dev_dbg(dev, "disable TX port %u\n", nport);
2330 
2331 	ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
2332 				 UB960_TR_CSI_CTL_CSI_ENABLE, 0);
2333 }
2334 
2335 static int ub960_enable_rx_port(struct ub960_data *priv, unsigned int nport)
2336 {
2337 	struct device *dev = &priv->client->dev;
2338 
2339 	dev_dbg(dev, "enable RX port %u\n", nport);
2340 
2341 	/* Enable forwarding */
2342 	return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
2343 				 UB960_SR_FWD_CTL1_PORT_DIS(nport), 0);
2344 }
2345 
2346 static void ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
2347 {
2348 	struct device *dev = &priv->client->dev;
2349 
2350 	dev_dbg(dev, "disable RX port %u\n", nport);
2351 
2352 	/* Disable forwarding */
2353 	ub960_update_bits(priv, UB960_SR_FWD_CTL1,
2354 			  UB960_SR_FWD_CTL1_PORT_DIS(nport),
2355 			  UB960_SR_FWD_CTL1_PORT_DIS(nport));
2356 }
2357 
2358 /*
2359  * The driver only supports using a single VC for each source. This function
2360  * checks that each source only provides streams using a single VC.
2361  */
2362 static int ub960_validate_stream_vcs(struct ub960_data *priv)
2363 {
2364 	unsigned int nport;
2365 	unsigned int i;
2366 
2367 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
2368 		struct ub960_rxport *rxport = priv->rxports[nport];
2369 		struct v4l2_mbus_frame_desc desc;
2370 		int ret;
2371 		u8 vc;
2372 
2373 		if (!rxport)
2374 			continue;
2375 
2376 		ret = v4l2_subdev_call(rxport->source.sd, pad, get_frame_desc,
2377 				       rxport->source.pad, &desc);
2378 		if (ret)
2379 			return ret;
2380 
2381 		if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
2382 			continue;
2383 
2384 		if (desc.num_entries == 0)
2385 			continue;
2386 
2387 		vc = desc.entry[0].bus.csi2.vc;
2388 
2389 		for (i = 1; i < desc.num_entries; i++) {
2390 			if (vc == desc.entry[i].bus.csi2.vc)
2391 				continue;
2392 
2393 			dev_err(&priv->client->dev,
2394 				"rx%u: source with multiple virtual-channels is not supported\n",
2395 				nport);
2396 			return -ENODEV;
2397 		}
2398 	}
2399 
2400 	return 0;
2401 }
2402 
2403 static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
2404 					       struct v4l2_subdev_state *state)
2405 {
2406 	u8 fwd_ctl;
2407 	struct {
2408 		u32 num_streams;
2409 		u8 pixel_dt;
2410 		u8 meta_dt;
2411 		u32 meta_lines;
2412 		u32 tx_port;
2413 	} rx_data[UB960_MAX_RX_NPORTS] = {};
2414 	u8 vc_map[UB960_MAX_RX_NPORTS] = {};
2415 	struct v4l2_subdev_route *route;
2416 	unsigned int nport;
2417 	int ret;
2418 
2419 	ret = ub960_validate_stream_vcs(priv);
2420 	if (ret)
2421 		return ret;
2422 
2423 	ub960_get_vc_maps(priv, state, vc_map);
2424 
2425 	for_each_active_route(&state->routing, route) {
2426 		struct ub960_rxport *rxport;
2427 		struct ub960_txport *txport;
2428 		struct v4l2_mbus_framefmt *fmt;
2429 		const struct ub960_format_info *ub960_fmt;
2430 		unsigned int nport;
2431 
2432 		nport = ub960_pad_to_port(priv, route->sink_pad);
2433 
2434 		rxport = priv->rxports[nport];
2435 		if (!rxport)
2436 			return -EINVAL;
2437 
2438 		txport = priv->txports[ub960_pad_to_port(priv, route->source_pad)];
2439 		if (!txport)
2440 			return -EINVAL;
2441 
2442 		rx_data[nport].tx_port = ub960_pad_to_port(priv, route->source_pad);
2443 
2444 		rx_data[nport].num_streams++;
2445 
2446 		/* For the rest, we are only interested in parallel busses */
2447 		if (rxport->rx_mode == RXPORT_MODE_CSI2_SYNC ||
2448 		    rxport->rx_mode == RXPORT_MODE_CSI2_NONSYNC)
2449 			continue;
2450 
2451 		if (rx_data[nport].num_streams > 2)
2452 			return -EPIPE;
2453 
2454 		fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
2455 						   route->sink_stream);
2456 		if (!fmt)
2457 			return -EPIPE;
2458 
2459 		ub960_fmt = ub960_find_format(fmt->code);
2460 		if (!ub960_fmt)
2461 			return -EPIPE;
2462 
2463 		if (ub960_fmt->meta) {
2464 			if (fmt->height > 3) {
2465 				dev_err(&priv->client->dev,
2466 					"rx%u: unsupported metadata height %u\n",
2467 					nport, fmt->height);
2468 				return -EPIPE;
2469 			}
2470 
2471 			rx_data[nport].meta_dt = ub960_fmt->datatype;
2472 			rx_data[nport].meta_lines = fmt->height;
2473 		} else {
2474 			rx_data[nport].pixel_dt = ub960_fmt->datatype;
2475 		}
2476 	}
2477 
2478 	/* Configure RX ports */
2479 
2480 	/*
2481 	 * Keep all port forwardings disabled by default. Forwarding will be
2482 	 * enabled in ub960_enable_rx_port.
2483 	 */
2484 	fwd_ctl = GENMASK(7, 4);
2485 
2486 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
2487 		struct ub960_rxport *rxport = priv->rxports[nport];
2488 		u8 vc = vc_map[nport];
2489 
2490 		if (rx_data[nport].num_streams == 0)
2491 			continue;
2492 
2493 		switch (rxport->rx_mode) {
2494 		case RXPORT_MODE_RAW10:
2495 			ub960_rxport_write(priv, nport, UB960_RR_RAW10_ID,
2496 				rx_data[nport].pixel_dt | (vc << UB960_RR_RAW10_ID_VC_SHIFT));
2497 
2498 			ub960_rxport_write(priv, rxport->nport,
2499 				UB960_RR_RAW_EMBED_DTYPE,
2500 				(rx_data[nport].meta_lines << UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT) |
2501 					rx_data[nport].meta_dt);
2502 
2503 			break;
2504 
2505 		case RXPORT_MODE_RAW12_HF:
2506 		case RXPORT_MODE_RAW12_LF:
2507 			/* Not implemented */
2508 			break;
2509 
2510 		case RXPORT_MODE_CSI2_SYNC:
2511 		case RXPORT_MODE_CSI2_NONSYNC:
2512 			if (!priv->hw_data->is_ub9702) {
2513 				/* Map all VCs from this port to the same VC */
2514 				ub960_rxport_write(priv, nport, UB960_RR_CSI_VC_MAP,
2515 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(3)) |
2516 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(2)) |
2517 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(1)) |
2518 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(0)));
2519 			} else {
2520 				unsigned int i;
2521 
2522 				/* Map all VCs from this port to VC(nport) */
2523 				for (i = 0; i < 8; i++)
2524 					ub960_rxport_write(priv, nport,
2525 							   UB960_RR_VC_ID_MAP(i),
2526 							   nport);
2527 			}
2528 
2529 			break;
2530 		}
2531 
2532 		if (rx_data[nport].tx_port == 1)
2533 			fwd_ctl |= BIT(nport); /* forward to TX1 */
2534 		else
2535 			fwd_ctl &= ~BIT(nport); /* forward to TX0 */
2536 	}
2537 
2538 	ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl);
2539 
2540 	return 0;
2541 }
2542 
2543 static void ub960_update_streaming_status(struct ub960_data *priv)
2544 {
2545 	unsigned int i;
2546 
2547 	for (i = 0; i < UB960_MAX_NPORTS; i++) {
2548 		if (priv->stream_enable_mask[i])
2549 			break;
2550 	}
2551 
2552 	priv->streaming = i < UB960_MAX_NPORTS;
2553 }
2554 
2555 static int ub960_enable_streams(struct v4l2_subdev *sd,
2556 				struct v4l2_subdev_state *state, u32 source_pad,
2557 				u64 source_streams_mask)
2558 {
2559 	struct ub960_data *priv = sd_to_ub960(sd);
2560 	struct device *dev = &priv->client->dev;
2561 	u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
2562 	struct v4l2_subdev_route *route;
2563 	unsigned int failed_port;
2564 	unsigned int nport;
2565 	int ret;
2566 
2567 	if (!priv->streaming) {
2568 		dev_dbg(dev, "Prepare for streaming\n");
2569 		ret = ub960_configure_ports_for_streaming(priv, state);
2570 		if (ret)
2571 			return ret;
2572 	}
2573 
2574 	/* Enable TX port if not yet enabled */
2575 	if (!priv->stream_enable_mask[source_pad]) {
2576 		ret = ub960_enable_tx_port(priv,
2577 					   ub960_pad_to_port(priv, source_pad));
2578 		if (ret)
2579 			return ret;
2580 	}
2581 
2582 	priv->stream_enable_mask[source_pad] |= source_streams_mask;
2583 
2584 	/* Collect sink streams per pad which we need to enable */
2585 	for_each_active_route(&state->routing, route) {
2586 		if (route->source_pad != source_pad)
2587 			continue;
2588 
2589 		if (!(source_streams_mask & BIT_ULL(route->source_stream)))
2590 			continue;
2591 
2592 		nport = ub960_pad_to_port(priv, route->sink_pad);
2593 
2594 		sink_streams[nport] |= BIT_ULL(route->sink_stream);
2595 	}
2596 
2597 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
2598 		if (!sink_streams[nport])
2599 			continue;
2600 
2601 		/* Enable the RX port if not yet enabled */
2602 		if (!priv->stream_enable_mask[nport]) {
2603 			ret = ub960_enable_rx_port(priv, nport);
2604 			if (ret) {
2605 				failed_port = nport;
2606 				goto err;
2607 			}
2608 		}
2609 
2610 		priv->stream_enable_mask[nport] |= sink_streams[nport];
2611 
2612 		dev_dbg(dev, "enable RX port %u streams %#llx\n", nport,
2613 			sink_streams[nport]);
2614 
2615 		ret = v4l2_subdev_enable_streams(
2616 			priv->rxports[nport]->source.sd,
2617 			priv->rxports[nport]->source.pad,
2618 			sink_streams[nport]);
2619 		if (ret) {
2620 			priv->stream_enable_mask[nport] &= ~sink_streams[nport];
2621 
2622 			if (!priv->stream_enable_mask[nport])
2623 				ub960_disable_rx_port(priv, nport);
2624 
2625 			failed_port = nport;
2626 			goto err;
2627 		}
2628 	}
2629 
2630 	priv->streaming = true;
2631 
2632 	return 0;
2633 
2634 err:
2635 	for (nport = 0; nport < failed_port; nport++) {
2636 		if (!sink_streams[nport])
2637 			continue;
2638 
2639 		dev_dbg(dev, "disable RX port %u streams %#llx\n", nport,
2640 			sink_streams[nport]);
2641 
2642 		ret = v4l2_subdev_disable_streams(
2643 			priv->rxports[nport]->source.sd,
2644 			priv->rxports[nport]->source.pad,
2645 			sink_streams[nport]);
2646 		if (ret)
2647 			dev_err(dev, "Failed to disable streams: %d\n", ret);
2648 
2649 		priv->stream_enable_mask[nport] &= ~sink_streams[nport];
2650 
2651 		/* Disable RX port if no active streams */
2652 		if (!priv->stream_enable_mask[nport])
2653 			ub960_disable_rx_port(priv, nport);
2654 	}
2655 
2656 	priv->stream_enable_mask[source_pad] &= ~source_streams_mask;
2657 
2658 	if (!priv->stream_enable_mask[source_pad])
2659 		ub960_disable_tx_port(priv,
2660 				      ub960_pad_to_port(priv, source_pad));
2661 
2662 	ub960_update_streaming_status(priv);
2663 
2664 	return ret;
2665 }
2666 
2667 static int ub960_disable_streams(struct v4l2_subdev *sd,
2668 				 struct v4l2_subdev_state *state,
2669 				 u32 source_pad, u64 source_streams_mask)
2670 {
2671 	struct ub960_data *priv = sd_to_ub960(sd);
2672 	struct device *dev = &priv->client->dev;
2673 	u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
2674 	struct v4l2_subdev_route *route;
2675 	unsigned int nport;
2676 	int ret;
2677 
2678 	/* Collect sink streams per pad which we need to disable */
2679 	for_each_active_route(&state->routing, route) {
2680 		if (route->source_pad != source_pad)
2681 			continue;
2682 
2683 		if (!(source_streams_mask & BIT_ULL(route->source_stream)))
2684 			continue;
2685 
2686 		nport = ub960_pad_to_port(priv, route->sink_pad);
2687 
2688 		sink_streams[nport] |= BIT_ULL(route->sink_stream);
2689 	}
2690 
2691 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
2692 		if (!sink_streams[nport])
2693 			continue;
2694 
2695 		dev_dbg(dev, "disable RX port %u streams %#llx\n", nport,
2696 			sink_streams[nport]);
2697 
2698 		ret = v4l2_subdev_disable_streams(
2699 			priv->rxports[nport]->source.sd,
2700 			priv->rxports[nport]->source.pad,
2701 			sink_streams[nport]);
2702 		if (ret)
2703 			dev_err(dev, "Failed to disable streams: %d\n", ret);
2704 
2705 		priv->stream_enable_mask[nport] &= ~sink_streams[nport];
2706 
2707 		/* Disable RX port if no active streams */
2708 		if (!priv->stream_enable_mask[nport])
2709 			ub960_disable_rx_port(priv, nport);
2710 	}
2711 
2712 	/* Disable TX port if no active streams */
2713 
2714 	priv->stream_enable_mask[source_pad] &= ~source_streams_mask;
2715 
2716 	if (!priv->stream_enable_mask[source_pad])
2717 		ub960_disable_tx_port(priv,
2718 				      ub960_pad_to_port(priv, source_pad));
2719 
2720 	ub960_update_streaming_status(priv);
2721 
2722 	return 0;
2723 }
2724 
2725 static int _ub960_set_routing(struct v4l2_subdev *sd,
2726 			      struct v4l2_subdev_state *state,
2727 			      struct v4l2_subdev_krouting *routing)
2728 {
2729 	static const struct v4l2_mbus_framefmt format = {
2730 		.width = 640,
2731 		.height = 480,
2732 		.code = MEDIA_BUS_FMT_UYVY8_1X16,
2733 		.field = V4L2_FIELD_NONE,
2734 		.colorspace = V4L2_COLORSPACE_SRGB,
2735 		.ycbcr_enc = V4L2_YCBCR_ENC_601,
2736 		.quantization = V4L2_QUANTIZATION_LIM_RANGE,
2737 		.xfer_func = V4L2_XFER_FUNC_SRGB,
2738 	};
2739 	int ret;
2740 
2741 	/*
2742 	 * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
2743 	 * frame desc is made dynamically allocated.
2744 	 */
2745 
2746 	if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
2747 		return -E2BIG;
2748 
2749 	ret = v4l2_subdev_routing_validate(sd, routing,
2750 					   V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
2751 					   V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX);
2752 	if (ret)
2753 		return ret;
2754 
2755 	ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
2756 	if (ret)
2757 		return ret;
2758 
2759 	return 0;
2760 }
2761 
2762 static int ub960_set_routing(struct v4l2_subdev *sd,
2763 			     struct v4l2_subdev_state *state,
2764 			     enum v4l2_subdev_format_whence which,
2765 			     struct v4l2_subdev_krouting *routing)
2766 {
2767 	struct ub960_data *priv = sd_to_ub960(sd);
2768 
2769 	if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
2770 		return -EBUSY;
2771 
2772 	return _ub960_set_routing(sd, state, routing);
2773 }
2774 
2775 static int ub960_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
2776 				struct v4l2_mbus_frame_desc *fd)
2777 {
2778 	struct ub960_data *priv = sd_to_ub960(sd);
2779 	struct v4l2_subdev_route *route;
2780 	struct v4l2_subdev_state *state;
2781 	int ret = 0;
2782 	struct device *dev = &priv->client->dev;
2783 	u8 vc_map[UB960_MAX_RX_NPORTS] = {};
2784 
2785 	if (!ub960_pad_is_source(priv, pad))
2786 		return -EINVAL;
2787 
2788 	fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
2789 
2790 	state = v4l2_subdev_lock_and_get_active_state(&priv->sd);
2791 
2792 	ub960_get_vc_maps(priv, state, vc_map);
2793 
2794 	for_each_active_route(&state->routing, route) {
2795 		struct v4l2_mbus_frame_desc_entry *source_entry = NULL;
2796 		struct v4l2_mbus_frame_desc source_fd;
2797 		unsigned int nport;
2798 		unsigned int i;
2799 
2800 		if (route->source_pad != pad)
2801 			continue;
2802 
2803 		nport = ub960_pad_to_port(priv, route->sink_pad);
2804 
2805 		ret = v4l2_subdev_call(priv->rxports[nport]->source.sd, pad,
2806 				       get_frame_desc,
2807 				       priv->rxports[nport]->source.pad,
2808 				       &source_fd);
2809 		if (ret) {
2810 			dev_err(dev,
2811 				"Failed to get source frame desc for pad %u\n",
2812 				route->sink_pad);
2813 			goto out_unlock;
2814 		}
2815 
2816 		for (i = 0; i < source_fd.num_entries; i++) {
2817 			if (source_fd.entry[i].stream == route->sink_stream) {
2818 				source_entry = &source_fd.entry[i];
2819 				break;
2820 			}
2821 		}
2822 
2823 		if (!source_entry) {
2824 			dev_err(dev,
2825 				"Failed to find stream from source frame desc\n");
2826 			ret = -EPIPE;
2827 			goto out_unlock;
2828 		}
2829 
2830 		fd->entry[fd->num_entries].stream = route->source_stream;
2831 		fd->entry[fd->num_entries].flags = source_entry->flags;
2832 		fd->entry[fd->num_entries].length = source_entry->length;
2833 		fd->entry[fd->num_entries].pixelcode = source_entry->pixelcode;
2834 
2835 		fd->entry[fd->num_entries].bus.csi2.vc = vc_map[nport];
2836 
2837 		if (source_fd.type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
2838 			fd->entry[fd->num_entries].bus.csi2.dt =
2839 				source_entry->bus.csi2.dt;
2840 		} else {
2841 			const struct ub960_format_info *ub960_fmt;
2842 			struct v4l2_mbus_framefmt *fmt;
2843 
2844 			fmt = v4l2_subdev_state_get_format(state, pad,
2845 							   route->source_stream);
2846 
2847 			if (!fmt) {
2848 				ret = -EINVAL;
2849 				goto out_unlock;
2850 			}
2851 
2852 			ub960_fmt = ub960_find_format(fmt->code);
2853 			if (!ub960_fmt) {
2854 				dev_err(dev, "Unable to find format\n");
2855 				ret = -EINVAL;
2856 				goto out_unlock;
2857 			}
2858 
2859 			fd->entry[fd->num_entries].bus.csi2.dt =
2860 				ub960_fmt->datatype;
2861 		}
2862 
2863 		fd->num_entries++;
2864 	}
2865 
2866 out_unlock:
2867 	v4l2_subdev_unlock_state(state);
2868 
2869 	return ret;
2870 }
2871 
2872 static int ub960_set_fmt(struct v4l2_subdev *sd,
2873 			 struct v4l2_subdev_state *state,
2874 			 struct v4l2_subdev_format *format)
2875 {
2876 	struct ub960_data *priv = sd_to_ub960(sd);
2877 	struct v4l2_mbus_framefmt *fmt;
2878 
2879 	if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
2880 		return -EBUSY;
2881 
2882 	/* No transcoding, source and sink formats must match. */
2883 	if (ub960_pad_is_source(priv, format->pad))
2884 		return v4l2_subdev_get_fmt(sd, state, format);
2885 
2886 	/*
2887 	 * Default to the first format if the requested media bus code isn't
2888 	 * supported.
2889 	 */
2890 	if (!ub960_find_format(format->format.code))
2891 		format->format.code = ub960_formats[0].code;
2892 
2893 	fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
2894 	if (!fmt)
2895 		return -EINVAL;
2896 
2897 	*fmt = format->format;
2898 
2899 	fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
2900 							   format->stream);
2901 	if (!fmt)
2902 		return -EINVAL;
2903 
2904 	*fmt = format->format;
2905 
2906 	return 0;
2907 }
2908 
2909 static int ub960_init_state(struct v4l2_subdev *sd,
2910 			    struct v4l2_subdev_state *state)
2911 {
2912 	struct ub960_data *priv = sd_to_ub960(sd);
2913 
2914 	struct v4l2_subdev_route routes[] = {
2915 		{
2916 			.sink_pad = 0,
2917 			.sink_stream = 0,
2918 			.source_pad = priv->hw_data->num_rxports,
2919 			.source_stream = 0,
2920 			.flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
2921 		},
2922 	};
2923 
2924 	struct v4l2_subdev_krouting routing = {
2925 		.num_routes = ARRAY_SIZE(routes),
2926 		.routes = routes,
2927 	};
2928 
2929 	return _ub960_set_routing(sd, state, &routing);
2930 }
2931 
2932 static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
2933 	.enable_streams = ub960_enable_streams,
2934 	.disable_streams = ub960_disable_streams,
2935 
2936 	.set_routing = ub960_set_routing,
2937 	.get_frame_desc = ub960_get_frame_desc,
2938 
2939 	.get_fmt = v4l2_subdev_get_fmt,
2940 	.set_fmt = ub960_set_fmt,
2941 };
2942 
2943 static int ub960_log_status(struct v4l2_subdev *sd)
2944 {
2945 	struct ub960_data *priv = sd_to_ub960(sd);
2946 	struct device *dev = &priv->client->dev;
2947 	struct v4l2_subdev_state *state;
2948 	unsigned int nport;
2949 	unsigned int i;
2950 	u16 v16 = 0;
2951 	u8 v = 0;
2952 	u8 id[UB960_SR_FPD3_RX_ID_LEN];
2953 
2954 	state = v4l2_subdev_lock_and_get_active_state(sd);
2955 
2956 	for (i = 0; i < sizeof(id); i++)
2957 		ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i]);
2958 
2959 	dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
2960 
2961 	for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
2962 		struct ub960_txport *txport = priv->txports[nport];
2963 
2964 		dev_info(dev, "TX %u\n", nport);
2965 
2966 		if (!txport) {
2967 			dev_info(dev, "\tNot initialized\n");
2968 			continue;
2969 		}
2970 
2971 		ub960_txport_read(priv, nport, UB960_TR_CSI_STS, &v);
2972 		dev_info(dev, "\tsync %u, pass %u\n", v & (u8)BIT(1),
2973 			 v & (u8)BIT(0));
2974 
2975 		ub960_read16(priv, UB960_SR_CSI_FRAME_COUNT_HI(nport), &v16);
2976 		dev_info(dev, "\tframe counter %u\n", v16);
2977 
2978 		ub960_read16(priv, UB960_SR_CSI_FRAME_ERR_COUNT_HI(nport), &v16);
2979 		dev_info(dev, "\tframe error counter %u\n", v16);
2980 
2981 		ub960_read16(priv, UB960_SR_CSI_LINE_COUNT_HI(nport), &v16);
2982 		dev_info(dev, "\tline counter %u\n", v16);
2983 
2984 		ub960_read16(priv, UB960_SR_CSI_LINE_ERR_COUNT_HI(nport), &v16);
2985 		dev_info(dev, "\tline error counter %u\n", v16);
2986 	}
2987 
2988 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
2989 		struct ub960_rxport *rxport = priv->rxports[nport];
2990 		u8 eq_level;
2991 		s8 strobe_pos;
2992 		unsigned int i;
2993 
2994 		dev_info(dev, "RX %u\n", nport);
2995 
2996 		if (!rxport) {
2997 			dev_info(dev, "\tNot initialized\n");
2998 			continue;
2999 		}
3000 
3001 		ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v);
3002 
3003 		if (v & UB960_RR_RX_PORT_STS1_LOCK_STS)
3004 			dev_info(dev, "\tLocked\n");
3005 		else
3006 			dev_info(dev, "\tNot locked\n");
3007 
3008 		dev_info(dev, "\trx_port_sts1 %#02x\n", v);
3009 		ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v);
3010 		dev_info(dev, "\trx_port_sts2 %#02x\n", v);
3011 
3012 		ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v16);
3013 		dev_info(dev, "\tlink freq %llu Hz\n", (v16 * 1000000ULL) >> 8);
3014 
3015 		ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v16);
3016 		dev_info(dev, "\tparity errors %u\n", v16);
3017 
3018 		ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI, &v16);
3019 		dev_info(dev, "\tlines per frame %u\n", v16);
3020 
3021 		ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v16);
3022 		dev_info(dev, "\tbytes per line %u\n", v16);
3023 
3024 		ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
3025 		dev_info(dev, "\tcsi_err_counter %u\n", v);
3026 
3027 		/* Strobe */
3028 
3029 		ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
3030 
3031 		dev_info(dev, "\t%s strobe\n",
3032 			 (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
3033 								  "Manual");
3034 
3035 		if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
3036 			ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
3037 
3038 			dev_info(dev, "\tStrobe range [%d, %d]\n",
3039 				 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
3040 				 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
3041 		}
3042 
3043 		ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
3044 
3045 		dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
3046 
3047 		/* EQ */
3048 
3049 		ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
3050 
3051 		dev_info(dev, "\t%s EQ\n",
3052 			 (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
3053 							    "Adaptive");
3054 
3055 		if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
3056 			ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
3057 
3058 			dev_info(dev, "\tEQ range [%u, %u]\n",
3059 				 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
3060 				 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
3061 		}
3062 
3063 		if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
3064 			dev_info(dev, "\tEQ level %u\n", eq_level);
3065 
3066 		/* GPIOs */
3067 		for (i = 0; i < UB960_NUM_BC_GPIOS; i++) {
3068 			u8 ctl_reg;
3069 			u8 ctl_shift;
3070 
3071 			ctl_reg = UB960_RR_BC_GPIO_CTL(i / 2);
3072 			ctl_shift = (i % 2) * 4;
3073 
3074 			ub960_rxport_read(priv, nport, ctl_reg, &v);
3075 
3076 			dev_info(dev, "\tGPIO%u: mode %u\n", i,
3077 				 (v >> ctl_shift) & 0xf);
3078 		}
3079 	}
3080 
3081 	v4l2_subdev_unlock_state(state);
3082 
3083 	return 0;
3084 }
3085 
3086 static const struct v4l2_subdev_core_ops ub960_subdev_core_ops = {
3087 	.log_status = ub960_log_status,
3088 	.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
3089 	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
3090 };
3091 
3092 static const struct v4l2_subdev_internal_ops ub960_internal_ops = {
3093 	.init_state = ub960_init_state,
3094 };
3095 
3096 static const struct v4l2_subdev_ops ub960_subdev_ops = {
3097 	.core = &ub960_subdev_core_ops,
3098 	.pad = &ub960_pad_ops,
3099 };
3100 
3101 static const struct media_entity_operations ub960_entity_ops = {
3102 	.get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
3103 	.link_validate = v4l2_subdev_link_validate,
3104 	.has_pad_interdep = v4l2_subdev_has_pad_interdep,
3105 };
3106 
3107 /* -----------------------------------------------------------------------------
3108  * Core
3109  */
3110 
3111 static irqreturn_t ub960_handle_events(int irq, void *arg)
3112 {
3113 	struct ub960_data *priv = arg;
3114 	unsigned int i;
3115 	u8 int_sts;
3116 	u8 fwd_sts;
3117 	int ret;
3118 
3119 	ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts);
3120 	if (ret || !int_sts)
3121 		return IRQ_NONE;
3122 
3123 	dev_dbg(&priv->client->dev, "INTERRUPT_STS %x\n", int_sts);
3124 
3125 	ret = ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts);
3126 	if (ret)
3127 		return IRQ_NONE;
3128 
3129 	dev_dbg(&priv->client->dev, "FWD_STS %#02x\n", fwd_sts);
3130 
3131 	for (i = 0; i < priv->hw_data->num_txports; i++) {
3132 		if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i))
3133 			ub960_csi_handle_events(priv, i);
3134 	}
3135 
3136 	for (i = 0; i < priv->hw_data->num_rxports; i++) {
3137 		if (!priv->rxports[i])
3138 			continue;
3139 
3140 		if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(i))
3141 			ub960_rxport_handle_events(priv, i);
3142 	}
3143 
3144 	return IRQ_HANDLED;
3145 }
3146 
3147 static void ub960_handler_work(struct work_struct *work)
3148 {
3149 	struct delayed_work *dwork = to_delayed_work(work);
3150 	struct ub960_data *priv =
3151 		container_of(dwork, struct ub960_data, poll_work);
3152 
3153 	ub960_handle_events(0, priv);
3154 
3155 	schedule_delayed_work(&priv->poll_work,
3156 			      msecs_to_jiffies(UB960_POLL_TIME_MS));
3157 }
3158 
3159 static void ub960_txport_free_ports(struct ub960_data *priv)
3160 {
3161 	unsigned int nport;
3162 
3163 	for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
3164 		struct ub960_txport *txport = priv->txports[nport];
3165 
3166 		if (!txport)
3167 			continue;
3168 
3169 		kfree(txport);
3170 		priv->txports[nport] = NULL;
3171 	}
3172 }
3173 
3174 static void ub960_rxport_free_ports(struct ub960_data *priv)
3175 {
3176 	unsigned int nport;
3177 
3178 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
3179 		struct ub960_rxport *rxport = priv->rxports[nport];
3180 
3181 		if (!rxport)
3182 			continue;
3183 
3184 		fwnode_handle_put(rxport->source.ep_fwnode);
3185 		fwnode_handle_put(rxport->ser.fwnode);
3186 
3187 		kfree(rxport);
3188 		priv->rxports[nport] = NULL;
3189 	}
3190 }
3191 
3192 static int
3193 ub960_parse_dt_rxport_link_properties(struct ub960_data *priv,
3194 				      struct fwnode_handle *link_fwnode,
3195 				      struct ub960_rxport *rxport)
3196 {
3197 	struct device *dev = &priv->client->dev;
3198 	unsigned int nport = rxport->nport;
3199 	u32 rx_mode;
3200 	u32 cdr_mode;
3201 	s32 strobe_pos;
3202 	u32 eq_level;
3203 	u32 ser_i2c_alias;
3204 	int ret;
3205 
3206 	cdr_mode = RXPORT_CDR_FPD3;
3207 
3208 	ret = fwnode_property_read_u32(link_fwnode, "ti,cdr-mode", &cdr_mode);
3209 	if (ret < 0 && ret != -EINVAL) {
3210 		dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
3211 			"ti,cdr-mode", ret);
3212 		return ret;
3213 	}
3214 
3215 	if (cdr_mode > RXPORT_CDR_LAST) {
3216 		dev_err(dev, "rx%u: bad 'ti,cdr-mode' %u\n", nport, cdr_mode);
3217 		return -EINVAL;
3218 	}
3219 
3220 	if (!priv->hw_data->is_fpdlink4 && cdr_mode == RXPORT_CDR_FPD4) {
3221 		dev_err(dev, "rx%u: FPD-Link 4 CDR not supported\n", nport);
3222 		return -EINVAL;
3223 	}
3224 
3225 	rxport->cdr_mode = cdr_mode;
3226 
3227 	ret = fwnode_property_read_u32(link_fwnode, "ti,rx-mode", &rx_mode);
3228 	if (ret < 0) {
3229 		dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
3230 			"ti,rx-mode", ret);
3231 		return ret;
3232 	}
3233 
3234 	if (rx_mode > RXPORT_MODE_LAST) {
3235 		dev_err(dev, "rx%u: bad 'ti,rx-mode' %u\n", nport, rx_mode);
3236 		return -EINVAL;
3237 	}
3238 
3239 	switch (rx_mode) {
3240 	case RXPORT_MODE_RAW12_HF:
3241 	case RXPORT_MODE_RAW12_LF:
3242 		dev_err(dev, "rx%u: unsupported 'ti,rx-mode' %u\n", nport,
3243 			rx_mode);
3244 		return -EINVAL;
3245 	default:
3246 		break;
3247 	}
3248 
3249 	rxport->rx_mode = rx_mode;
3250 
3251 	/* EQ & Strobe related */
3252 
3253 	/* Defaults */
3254 	rxport->eq.manual_eq = false;
3255 	rxport->eq.aeq.eq_level_min = UB960_MIN_EQ_LEVEL;
3256 	rxport->eq.aeq.eq_level_max = UB960_MAX_EQ_LEVEL;
3257 
3258 	ret = fwnode_property_read_u32(link_fwnode, "ti,strobe-pos",
3259 				       &strobe_pos);
3260 	if (ret) {
3261 		if (ret != -EINVAL) {
3262 			dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
3263 				"ti,strobe-pos", ret);
3264 			return ret;
3265 		}
3266 	} else {
3267 		if (strobe_pos < UB960_MIN_MANUAL_STROBE_POS ||
3268 		    strobe_pos > UB960_MAX_MANUAL_STROBE_POS) {
3269 			dev_err(dev, "rx%u: illegal 'strobe-pos' value: %d\n",
3270 				nport, strobe_pos);
3271 			return -EINVAL;
3272 		}
3273 
3274 		/* NOTE: ignored unless global manual strobe pos is also set */
3275 		rxport->eq.strobe_pos = strobe_pos;
3276 		if (!priv->strobe.manual)
3277 			dev_warn(dev,
3278 				 "rx%u: 'ti,strobe-pos' ignored as 'ti,manual-strobe' not set\n",
3279 				 nport);
3280 	}
3281 
3282 	ret = fwnode_property_read_u32(link_fwnode, "ti,eq-level", &eq_level);
3283 	if (ret) {
3284 		if (ret != -EINVAL) {
3285 			dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
3286 				"ti,eq-level", ret);
3287 			return ret;
3288 		}
3289 	} else {
3290 		if (eq_level > UB960_MAX_EQ_LEVEL) {
3291 			dev_err(dev, "rx%u: illegal 'ti,eq-level' value: %d\n",
3292 				nport, eq_level);
3293 			return -EINVAL;
3294 		}
3295 
3296 		rxport->eq.manual_eq = true;
3297 		rxport->eq.manual.eq_level = eq_level;
3298 	}
3299 
3300 	ret = fwnode_property_read_u32(link_fwnode, "i2c-alias",
3301 				       &ser_i2c_alias);
3302 	if (ret) {
3303 		dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
3304 			"i2c-alias", ret);
3305 		return ret;
3306 	}
3307 	rxport->ser.alias = ser_i2c_alias;
3308 
3309 	rxport->ser.fwnode = fwnode_get_named_child_node(link_fwnode, "serializer");
3310 	if (!rxport->ser.fwnode) {
3311 		dev_err(dev, "rx%u: missing 'serializer' node\n", nport);
3312 		return -EINVAL;
3313 	}
3314 
3315 	return 0;
3316 }
3317 
3318 static int ub960_parse_dt_rxport_ep_properties(struct ub960_data *priv,
3319 					       struct fwnode_handle *ep_fwnode,
3320 					       struct ub960_rxport *rxport)
3321 {
3322 	struct device *dev = &priv->client->dev;
3323 	struct v4l2_fwnode_endpoint vep = {};
3324 	unsigned int nport = rxport->nport;
3325 	bool hsync_hi;
3326 	bool vsync_hi;
3327 	int ret;
3328 
3329 	rxport->source.ep_fwnode = fwnode_graph_get_remote_endpoint(ep_fwnode);
3330 	if (!rxport->source.ep_fwnode) {
3331 		dev_err(dev, "rx%u: no remote endpoint\n", nport);
3332 		return -ENODEV;
3333 	}
3334 
3335 	/* We currently have properties only for RAW modes */
3336 
3337 	switch (rxport->rx_mode) {
3338 	case RXPORT_MODE_RAW10:
3339 	case RXPORT_MODE_RAW12_HF:
3340 	case RXPORT_MODE_RAW12_LF:
3341 		break;
3342 	default:
3343 		return 0;
3344 	}
3345 
3346 	vep.bus_type = V4L2_MBUS_PARALLEL;
3347 	ret = v4l2_fwnode_endpoint_parse(ep_fwnode, &vep);
3348 	if (ret) {
3349 		dev_err(dev, "rx%u: failed to parse endpoint data\n", nport);
3350 		goto err_put_source_ep_fwnode;
3351 	}
3352 
3353 	hsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
3354 	vsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
3355 
3356 	/* LineValid and FrameValid are inverse to the h/vsync active */
3357 	rxport->lv_fv_pol = (hsync_hi ? UB960_RR_PORT_CONFIG2_LV_POL_LOW : 0) |
3358 			    (vsync_hi ? UB960_RR_PORT_CONFIG2_FV_POL_LOW : 0);
3359 
3360 	return 0;
3361 
3362 err_put_source_ep_fwnode:
3363 	fwnode_handle_put(rxport->source.ep_fwnode);
3364 	return ret;
3365 }
3366 
3367 static int ub960_parse_dt_rxport(struct ub960_data *priv, unsigned int nport,
3368 				 struct fwnode_handle *link_fwnode,
3369 				 struct fwnode_handle *ep_fwnode)
3370 {
3371 	static const char *vpoc_names[UB960_MAX_RX_NPORTS] = {
3372 		"vpoc0", "vpoc1", "vpoc2", "vpoc3"
3373 	};
3374 	struct device *dev = &priv->client->dev;
3375 	struct ub960_rxport *rxport;
3376 	int ret;
3377 
3378 	rxport = kzalloc(sizeof(*rxport), GFP_KERNEL);
3379 	if (!rxport)
3380 		return -ENOMEM;
3381 
3382 	priv->rxports[nport] = rxport;
3383 
3384 	rxport->nport = nport;
3385 	rxport->priv = priv;
3386 
3387 	ret = ub960_parse_dt_rxport_link_properties(priv, link_fwnode, rxport);
3388 	if (ret)
3389 		goto err_free_rxport;
3390 
3391 	rxport->vpoc = devm_regulator_get_optional(dev, vpoc_names[nport]);
3392 	if (IS_ERR(rxport->vpoc)) {
3393 		ret = PTR_ERR(rxport->vpoc);
3394 		if (ret == -ENODEV) {
3395 			rxport->vpoc = NULL;
3396 		} else {
3397 			dev_err(dev, "rx%u: failed to get VPOC supply: %d\n",
3398 				nport, ret);
3399 			goto err_put_remote_fwnode;
3400 		}
3401 	}
3402 
3403 	ret = ub960_parse_dt_rxport_ep_properties(priv, ep_fwnode, rxport);
3404 	if (ret)
3405 		goto err_put_remote_fwnode;
3406 
3407 	return 0;
3408 
3409 err_put_remote_fwnode:
3410 	fwnode_handle_put(rxport->ser.fwnode);
3411 err_free_rxport:
3412 	priv->rxports[nport] = NULL;
3413 	kfree(rxport);
3414 	return ret;
3415 }
3416 
3417 static struct fwnode_handle *
3418 ub960_fwnode_get_link_by_regs(struct fwnode_handle *links_fwnode,
3419 			      unsigned int nport)
3420 {
3421 	struct fwnode_handle *link_fwnode;
3422 	int ret;
3423 
3424 	fwnode_for_each_child_node(links_fwnode, link_fwnode) {
3425 		u32 link_num;
3426 
3427 		if (!str_has_prefix(fwnode_get_name(link_fwnode), "link@"))
3428 			continue;
3429 
3430 		ret = fwnode_property_read_u32(link_fwnode, "reg", &link_num);
3431 		if (ret) {
3432 			fwnode_handle_put(link_fwnode);
3433 			return NULL;
3434 		}
3435 
3436 		if (nport == link_num)
3437 			return link_fwnode;
3438 	}
3439 
3440 	return NULL;
3441 }
3442 
3443 static int ub960_parse_dt_rxports(struct ub960_data *priv)
3444 {
3445 	struct device *dev = &priv->client->dev;
3446 	struct fwnode_handle *links_fwnode;
3447 	unsigned int nport;
3448 	int ret;
3449 
3450 	links_fwnode = fwnode_get_named_child_node(dev_fwnode(dev), "links");
3451 	if (!links_fwnode) {
3452 		dev_err(dev, "'links' node missing\n");
3453 		return -ENODEV;
3454 	}
3455 
3456 	/* Defaults, recommended by TI */
3457 	priv->strobe.min = 2;
3458 	priv->strobe.max = 3;
3459 
3460 	priv->strobe.manual = fwnode_property_read_bool(links_fwnode, "ti,manual-strobe");
3461 
3462 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
3463 		struct fwnode_handle *link_fwnode;
3464 		struct fwnode_handle *ep_fwnode;
3465 
3466 		link_fwnode = ub960_fwnode_get_link_by_regs(links_fwnode, nport);
3467 		if (!link_fwnode)
3468 			continue;
3469 
3470 		ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
3471 							    nport, 0, 0);
3472 		if (!ep_fwnode) {
3473 			fwnode_handle_put(link_fwnode);
3474 			continue;
3475 		}
3476 
3477 		ret = ub960_parse_dt_rxport(priv, nport, link_fwnode,
3478 					    ep_fwnode);
3479 
3480 		fwnode_handle_put(link_fwnode);
3481 		fwnode_handle_put(ep_fwnode);
3482 
3483 		if (ret) {
3484 			dev_err(dev, "rx%u: failed to parse RX port\n", nport);
3485 			goto err_put_links;
3486 		}
3487 	}
3488 
3489 	fwnode_handle_put(links_fwnode);
3490 
3491 	return 0;
3492 
3493 err_put_links:
3494 	fwnode_handle_put(links_fwnode);
3495 
3496 	return ret;
3497 }
3498 
3499 static int ub960_parse_dt_txports(struct ub960_data *priv)
3500 {
3501 	struct device *dev = &priv->client->dev;
3502 	u32 nport;
3503 	int ret;
3504 
3505 	for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
3506 		unsigned int port = nport + priv->hw_data->num_rxports;
3507 		struct fwnode_handle *ep_fwnode;
3508 
3509 		ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
3510 							    port, 0, 0);
3511 		if (!ep_fwnode)
3512 			continue;
3513 
3514 		ret = ub960_parse_dt_txport(priv, ep_fwnode, nport);
3515 
3516 		fwnode_handle_put(ep_fwnode);
3517 
3518 		if (ret)
3519 			break;
3520 	}
3521 
3522 	return 0;
3523 }
3524 
3525 static int ub960_parse_dt(struct ub960_data *priv)
3526 {
3527 	int ret;
3528 
3529 	ret = ub960_parse_dt_rxports(priv);
3530 	if (ret)
3531 		return ret;
3532 
3533 	ret = ub960_parse_dt_txports(priv);
3534 	if (ret)
3535 		goto err_free_rxports;
3536 
3537 	return 0;
3538 
3539 err_free_rxports:
3540 	ub960_rxport_free_ports(priv);
3541 
3542 	return ret;
3543 }
3544 
3545 static int ub960_notify_bound(struct v4l2_async_notifier *notifier,
3546 			      struct v4l2_subdev *subdev,
3547 			      struct v4l2_async_connection *asd)
3548 {
3549 	struct ub960_data *priv = sd_to_ub960(notifier->sd);
3550 	struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
3551 	struct device *dev = &priv->client->dev;
3552 	u8 nport = rxport->nport;
3553 	unsigned int i;
3554 	int ret;
3555 
3556 	ret = media_entity_get_fwnode_pad(&subdev->entity,
3557 					  rxport->source.ep_fwnode,
3558 					  MEDIA_PAD_FL_SOURCE);
3559 	if (ret < 0) {
3560 		dev_err(dev, "Failed to find pad for %s\n", subdev->name);
3561 		return ret;
3562 	}
3563 
3564 	rxport->source.sd = subdev;
3565 	rxport->source.pad = ret;
3566 
3567 	ret = media_create_pad_link(&rxport->source.sd->entity,
3568 				    rxport->source.pad, &priv->sd.entity, nport,
3569 				    MEDIA_LNK_FL_ENABLED |
3570 					    MEDIA_LNK_FL_IMMUTABLE);
3571 	if (ret) {
3572 		dev_err(dev, "Unable to link %s:%u -> %s:%u\n",
3573 			rxport->source.sd->name, rxport->source.pad,
3574 			priv->sd.name, nport);
3575 		return ret;
3576 	}
3577 
3578 	for (i = 0; i < priv->hw_data->num_rxports; i++) {
3579 		if (priv->rxports[i] && !priv->rxports[i]->source.sd) {
3580 			dev_dbg(dev, "Waiting for more subdevs to be bound\n");
3581 			return 0;
3582 		}
3583 	}
3584 
3585 	return 0;
3586 }
3587 
3588 static void ub960_notify_unbind(struct v4l2_async_notifier *notifier,
3589 				struct v4l2_subdev *subdev,
3590 				struct v4l2_async_connection *asd)
3591 {
3592 	struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
3593 
3594 	rxport->source.sd = NULL;
3595 }
3596 
3597 static const struct v4l2_async_notifier_operations ub960_notify_ops = {
3598 	.bound = ub960_notify_bound,
3599 	.unbind = ub960_notify_unbind,
3600 };
3601 
3602 static int ub960_v4l2_notifier_register(struct ub960_data *priv)
3603 {
3604 	struct device *dev = &priv->client->dev;
3605 	unsigned int i;
3606 	int ret;
3607 
3608 	v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
3609 
3610 	for (i = 0; i < priv->hw_data->num_rxports; i++) {
3611 		struct ub960_rxport *rxport = priv->rxports[i];
3612 		struct ub960_asd *asd;
3613 
3614 		if (!rxport)
3615 			continue;
3616 
3617 		asd = v4l2_async_nf_add_fwnode(&priv->notifier,
3618 					       rxport->source.ep_fwnode,
3619 					       struct ub960_asd);
3620 		if (IS_ERR(asd)) {
3621 			dev_err(dev, "Failed to add subdev for source %u: %pe",
3622 				i, asd);
3623 			v4l2_async_nf_cleanup(&priv->notifier);
3624 			return PTR_ERR(asd);
3625 		}
3626 
3627 		asd->rxport = rxport;
3628 	}
3629 
3630 	priv->notifier.ops = &ub960_notify_ops;
3631 
3632 	ret = v4l2_async_nf_register(&priv->notifier);
3633 	if (ret) {
3634 		dev_err(dev, "Failed to register subdev_notifier");
3635 		v4l2_async_nf_cleanup(&priv->notifier);
3636 		return ret;
3637 	}
3638 
3639 	return 0;
3640 }
3641 
3642 static void ub960_v4l2_notifier_unregister(struct ub960_data *priv)
3643 {
3644 	v4l2_async_nf_unregister(&priv->notifier);
3645 	v4l2_async_nf_cleanup(&priv->notifier);
3646 }
3647 
3648 static int ub960_create_subdev(struct ub960_data *priv)
3649 {
3650 	struct device *dev = &priv->client->dev;
3651 	unsigned int i;
3652 	int ret;
3653 
3654 	v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub960_subdev_ops);
3655 	priv->sd.internal_ops = &ub960_internal_ops;
3656 
3657 	v4l2_ctrl_handler_init(&priv->ctrl_handler, 1);
3658 	priv->sd.ctrl_handler = &priv->ctrl_handler;
3659 
3660 	v4l2_ctrl_new_int_menu(&priv->ctrl_handler, NULL, V4L2_CID_LINK_FREQ,
3661 			       ARRAY_SIZE(priv->tx_link_freq) - 1, 0,
3662 			       priv->tx_link_freq);
3663 
3664 	if (priv->ctrl_handler.error) {
3665 		ret = priv->ctrl_handler.error;
3666 		goto err_free_ctrl;
3667 	}
3668 
3669 	priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
3670 			  V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_STREAMS;
3671 	priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
3672 	priv->sd.entity.ops = &ub960_entity_ops;
3673 
3674 	for (i = 0; i < priv->hw_data->num_rxports + priv->hw_data->num_txports; i++) {
3675 		priv->pads[i].flags = ub960_pad_is_sink(priv, i) ?
3676 					      MEDIA_PAD_FL_SINK :
3677 					      MEDIA_PAD_FL_SOURCE;
3678 	}
3679 
3680 	ret = media_entity_pads_init(&priv->sd.entity,
3681 				     priv->hw_data->num_rxports +
3682 					     priv->hw_data->num_txports,
3683 				     priv->pads);
3684 	if (ret)
3685 		goto err_free_ctrl;
3686 
3687 	priv->sd.state_lock = priv->sd.ctrl_handler->lock;
3688 
3689 	ret = v4l2_subdev_init_finalize(&priv->sd);
3690 	if (ret)
3691 		goto err_entity_cleanup;
3692 
3693 	ret = ub960_v4l2_notifier_register(priv);
3694 	if (ret) {
3695 		dev_err(dev, "v4l2 subdev notifier register failed: %d\n", ret);
3696 		goto err_subdev_cleanup;
3697 	}
3698 
3699 	ret = v4l2_async_register_subdev(&priv->sd);
3700 	if (ret) {
3701 		dev_err(dev, "v4l2_async_register_subdev error: %d\n", ret);
3702 		goto err_unreg_notif;
3703 	}
3704 
3705 	return 0;
3706 
3707 err_unreg_notif:
3708 	ub960_v4l2_notifier_unregister(priv);
3709 err_subdev_cleanup:
3710 	v4l2_subdev_cleanup(&priv->sd);
3711 err_entity_cleanup:
3712 	media_entity_cleanup(&priv->sd.entity);
3713 err_free_ctrl:
3714 	v4l2_ctrl_handler_free(&priv->ctrl_handler);
3715 
3716 	return ret;
3717 }
3718 
3719 static void ub960_destroy_subdev(struct ub960_data *priv)
3720 {
3721 	ub960_v4l2_notifier_unregister(priv);
3722 	v4l2_async_unregister_subdev(&priv->sd);
3723 
3724 	v4l2_subdev_cleanup(&priv->sd);
3725 
3726 	media_entity_cleanup(&priv->sd.entity);
3727 	v4l2_ctrl_handler_free(&priv->ctrl_handler);
3728 }
3729 
3730 static const struct regmap_config ub960_regmap_config = {
3731 	.name = "ds90ub960",
3732 
3733 	.reg_bits = 8,
3734 	.val_bits = 8,
3735 
3736 	.max_register = 0xff,
3737 
3738 	/*
3739 	 * We do locking in the driver to cover the TX/RX port selection and the
3740 	 * indirect register access.
3741 	 */
3742 	.disable_locking = true,
3743 };
3744 
3745 static void ub960_reset(struct ub960_data *priv, bool reset_regs)
3746 {
3747 	struct device *dev = &priv->client->dev;
3748 	unsigned int v;
3749 	int ret;
3750 	u8 bit;
3751 
3752 	bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 :
3753 			   UB960_SR_RESET_DIGITAL_RESET0;
3754 
3755 	ub960_write(priv, UB960_SR_RESET, bit);
3756 
3757 	mutex_lock(&priv->reg_lock);
3758 
3759 	ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v,
3760 				       (v & bit) == 0, 2000, 100000);
3761 
3762 	mutex_unlock(&priv->reg_lock);
3763 
3764 	if (ret)
3765 		dev_err(dev, "reset failed: %d\n", ret);
3766 }
3767 
3768 static int ub960_get_hw_resources(struct ub960_data *priv)
3769 {
3770 	struct device *dev = &priv->client->dev;
3771 
3772 	priv->regmap = devm_regmap_init_i2c(priv->client, &ub960_regmap_config);
3773 	if (IS_ERR(priv->regmap))
3774 		return PTR_ERR(priv->regmap);
3775 
3776 	priv->vddio = devm_regulator_get(dev, "vddio");
3777 	if (IS_ERR(priv->vddio))
3778 		return dev_err_probe(dev, PTR_ERR(priv->vddio),
3779 				     "cannot get VDDIO regulator\n");
3780 
3781 	/* get power-down pin from DT */
3782 	priv->pd_gpio =
3783 		devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
3784 	if (IS_ERR(priv->pd_gpio))
3785 		return dev_err_probe(dev, PTR_ERR(priv->pd_gpio),
3786 				     "Cannot get powerdown GPIO\n");
3787 
3788 	priv->refclk = devm_clk_get(dev, "refclk");
3789 	if (IS_ERR(priv->refclk))
3790 		return dev_err_probe(dev, PTR_ERR(priv->refclk),
3791 				     "Cannot get REFCLK\n");
3792 
3793 	return 0;
3794 }
3795 
3796 static int ub960_enable_core_hw(struct ub960_data *priv)
3797 {
3798 	struct device *dev = &priv->client->dev;
3799 	u8 rev_mask;
3800 	int ret;
3801 	u8 dev_sts;
3802 	u8 refclk_freq;
3803 
3804 	ret = regulator_enable(priv->vddio);
3805 	if (ret)
3806 		return dev_err_probe(dev, ret,
3807 				     "failed to enable VDDIO regulator\n");
3808 
3809 	ret = clk_prepare_enable(priv->refclk);
3810 	if (ret) {
3811 		dev_err_probe(dev, ret, "Failed to enable refclk\n");
3812 		goto err_disable_vddio;
3813 	}
3814 
3815 	if (priv->pd_gpio) {
3816 		gpiod_set_value_cansleep(priv->pd_gpio, 1);
3817 		/* wait min 2 ms for reset to complete */
3818 		fsleep(2000);
3819 		gpiod_set_value_cansleep(priv->pd_gpio, 0);
3820 		/* wait min 2 ms for power up to finish */
3821 		fsleep(2000);
3822 	}
3823 
3824 	ub960_reset(priv, true);
3825 
3826 	/* Runtime check register accessibility */
3827 	ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask);
3828 	if (ret) {
3829 		dev_err_probe(dev, ret, "Cannot read first register, abort\n");
3830 		goto err_pd_gpio;
3831 	}
3832 
3833 	dev_dbg(dev, "Found %s (rev/mask %#04x)\n", priv->hw_data->model,
3834 		rev_mask);
3835 
3836 	ret = ub960_read(priv, UB960_SR_DEVICE_STS, &dev_sts);
3837 	if (ret)
3838 		goto err_pd_gpio;
3839 
3840 	ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
3841 	if (ret)
3842 		goto err_pd_gpio;
3843 
3844 	dev_dbg(dev, "refclk valid %u freq %u MHz (clk fw freq %lu MHz)\n",
3845 		!!(dev_sts & BIT(4)), refclk_freq,
3846 		clk_get_rate(priv->refclk) / 1000000);
3847 
3848 	/* Disable all RX ports by default */
3849 	ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0);
3850 	if (ret)
3851 		goto err_pd_gpio;
3852 
3853 	/* release GPIO lock */
3854 	if (priv->hw_data->is_ub9702) {
3855 		ret = ub960_update_bits(priv, UB960_SR_RESET,
3856 					UB960_SR_RESET_GPIO_LOCK_RELEASE,
3857 					UB960_SR_RESET_GPIO_LOCK_RELEASE);
3858 		if (ret)
3859 			goto err_pd_gpio;
3860 	}
3861 
3862 	return 0;
3863 
3864 err_pd_gpio:
3865 	gpiod_set_value_cansleep(priv->pd_gpio, 1);
3866 	clk_disable_unprepare(priv->refclk);
3867 err_disable_vddio:
3868 	regulator_disable(priv->vddio);
3869 
3870 	return ret;
3871 }
3872 
3873 static void ub960_disable_core_hw(struct ub960_data *priv)
3874 {
3875 	gpiod_set_value_cansleep(priv->pd_gpio, 1);
3876 	clk_disable_unprepare(priv->refclk);
3877 	regulator_disable(priv->vddio);
3878 }
3879 
3880 static int ub960_probe(struct i2c_client *client)
3881 {
3882 	struct device *dev = &client->dev;
3883 	struct ub960_data *priv;
3884 	unsigned int port_lock_mask;
3885 	unsigned int port_mask;
3886 	unsigned int nport;
3887 	int ret;
3888 
3889 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
3890 	if (!priv)
3891 		return -ENOMEM;
3892 
3893 	priv->client = client;
3894 
3895 	priv->hw_data = device_get_match_data(dev);
3896 
3897 	mutex_init(&priv->reg_lock);
3898 
3899 	INIT_DELAYED_WORK(&priv->poll_work, ub960_handler_work);
3900 
3901 	/*
3902 	 * Initialize these to invalid values so that the first reg writes will
3903 	 * configure the target.
3904 	 */
3905 	priv->reg_current.indirect_target = 0xff;
3906 	priv->reg_current.rxport = 0xff;
3907 	priv->reg_current.txport = 0xff;
3908 
3909 	ret = ub960_get_hw_resources(priv);
3910 	if (ret)
3911 		goto err_mutex_destroy;
3912 
3913 	ret = ub960_enable_core_hw(priv);
3914 	if (ret)
3915 		goto err_mutex_destroy;
3916 
3917 	ret = ub960_parse_dt(priv);
3918 	if (ret)
3919 		goto err_disable_core_hw;
3920 
3921 	ret = ub960_init_tx_ports(priv);
3922 	if (ret)
3923 		goto err_free_ports;
3924 
3925 	ret = ub960_rxport_enable_vpocs(priv);
3926 	if (ret)
3927 		goto err_free_ports;
3928 
3929 	ret = ub960_init_rx_ports(priv);
3930 	if (ret)
3931 		goto err_disable_vpocs;
3932 
3933 	ub960_reset(priv, false);
3934 
3935 	port_mask = 0;
3936 
3937 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
3938 		struct ub960_rxport *rxport = priv->rxports[nport];
3939 
3940 		if (!rxport)
3941 			continue;
3942 
3943 		port_mask |= BIT(nport);
3944 	}
3945 
3946 	ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
3947 	if (ret)
3948 		goto err_disable_vpocs;
3949 
3950 	if (port_mask != port_lock_mask) {
3951 		ret = -EIO;
3952 		dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
3953 		goto err_disable_vpocs;
3954 	}
3955 
3956 	/*
3957 	 * Clear any errors caused by switching the RX port settings while
3958 	 * probing.
3959 	 */
3960 	ub960_clear_rx_errors(priv);
3961 
3962 	ret = ub960_init_atr(priv);
3963 	if (ret)
3964 		goto err_disable_vpocs;
3965 
3966 	ret = ub960_rxport_add_serializers(priv);
3967 	if (ret)
3968 		goto err_uninit_atr;
3969 
3970 	ret = ub960_create_subdev(priv);
3971 	if (ret)
3972 		goto err_free_sers;
3973 
3974 	if (client->irq)
3975 		dev_warn(dev, "irq support not implemented, using polling\n");
3976 
3977 	schedule_delayed_work(&priv->poll_work,
3978 			      msecs_to_jiffies(UB960_POLL_TIME_MS));
3979 
3980 	return 0;
3981 
3982 err_free_sers:
3983 	ub960_rxport_remove_serializers(priv);
3984 err_uninit_atr:
3985 	ub960_uninit_atr(priv);
3986 err_disable_vpocs:
3987 	ub960_rxport_disable_vpocs(priv);
3988 err_free_ports:
3989 	ub960_rxport_free_ports(priv);
3990 	ub960_txport_free_ports(priv);
3991 err_disable_core_hw:
3992 	ub960_disable_core_hw(priv);
3993 err_mutex_destroy:
3994 	mutex_destroy(&priv->reg_lock);
3995 	return ret;
3996 }
3997 
3998 static void ub960_remove(struct i2c_client *client)
3999 {
4000 	struct v4l2_subdev *sd = i2c_get_clientdata(client);
4001 	struct ub960_data *priv = sd_to_ub960(sd);
4002 
4003 	cancel_delayed_work_sync(&priv->poll_work);
4004 
4005 	ub960_destroy_subdev(priv);
4006 	ub960_rxport_remove_serializers(priv);
4007 	ub960_uninit_atr(priv);
4008 	ub960_rxport_disable_vpocs(priv);
4009 	ub960_rxport_free_ports(priv);
4010 	ub960_txport_free_ports(priv);
4011 	ub960_disable_core_hw(priv);
4012 	mutex_destroy(&priv->reg_lock);
4013 }
4014 
4015 static const struct ub960_hw_data ds90ub960_hw = {
4016 	.model = "ub960",
4017 	.num_rxports = 4,
4018 	.num_txports = 2,
4019 };
4020 
4021 static const struct ub960_hw_data ds90ub9702_hw = {
4022 	.model = "ub9702",
4023 	.num_rxports = 4,
4024 	.num_txports = 2,
4025 	.is_ub9702 = true,
4026 	.is_fpdlink4 = true,
4027 };
4028 
4029 static const struct i2c_device_id ub960_id[] = {
4030 	{ "ds90ub960-q1", (kernel_ulong_t)&ds90ub960_hw },
4031 	{ "ds90ub9702-q1", (kernel_ulong_t)&ds90ub9702_hw },
4032 	{}
4033 };
4034 MODULE_DEVICE_TABLE(i2c, ub960_id);
4035 
4036 static const struct of_device_id ub960_dt_ids[] = {
4037 	{ .compatible = "ti,ds90ub960-q1", .data = &ds90ub960_hw },
4038 	{ .compatible = "ti,ds90ub9702-q1", .data = &ds90ub9702_hw },
4039 	{}
4040 };
4041 MODULE_DEVICE_TABLE(of, ub960_dt_ids);
4042 
4043 static struct i2c_driver ds90ub960_driver = {
4044 	.probe		= ub960_probe,
4045 	.remove		= ub960_remove,
4046 	.id_table	= ub960_id,
4047 	.driver = {
4048 		.name	= "ds90ub960",
4049 		.of_match_table = ub960_dt_ids,
4050 	},
4051 };
4052 module_i2c_driver(ds90ub960_driver);
4053 
4054 MODULE_LICENSE("GPL");
4055 MODULE_DESCRIPTION("Texas Instruments FPD-Link III/IV Deserializers Driver");
4056 MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
4057 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
4058 MODULE_IMPORT_NS(I2C_ATR);
4059