xref: /linux/drivers/media/i2c/ds90ub960.c (revision 0707307f039813eed83e9e8d6cd70c02a6e86158)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for the Texas Instruments DS90UB960-Q1 video deserializer
4  *
5  * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
6  * Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
7  */
8 
9 /*
10  * (Possible) TODOs:
11  *
12  * - PM for serializer and remote peripherals. We need to manage:
13  *   - VPOC
14  *     - Power domain? Regulator? Somehow any remote device should be able to
15  *       cause the VPOC to be turned on.
16  *   - Link between the deserializer and the serializer
17  *     - Related to VPOC management. We probably always want to turn on the VPOC
18  *       and then enable the link.
19  *   - Serializer's services: i2c, gpios, power
20  *     - The serializer needs to resume before the remote peripherals can
21  *       e.g. use the i2c.
22  *     - How to handle gpios? Reserving a gpio essentially keeps the provider
23  *       (serializer) always powered on.
24  * - Do we need a new bus for the FPD-Link? At the moment the serializers
25  *   are children of the same i2c-adapter where the deserializer resides.
26  * - i2c-atr could be made embeddable instead of allocatable.
27  */
28 
29 #include <linux/bitops.h>
30 #include <linux/cleanup.h>
31 #include <linux/clk.h>
32 #include <linux/delay.h>
33 #include <linux/fwnode.h>
34 #include <linux/gpio/consumer.h>
35 #include <linux/i2c-atr.h>
36 #include <linux/i2c.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/kernel.h>
40 #include <linux/kthread.h>
41 #include <linux/module.h>
42 #include <linux/mutex.h>
43 #include <linux/property.h>
44 #include <linux/regmap.h>
45 #include <linux/regulator/consumer.h>
46 #include <linux/slab.h>
47 #include <linux/units.h>
48 #include <linux/workqueue.h>
49 
50 #include <media/i2c/ds90ub9xx.h>
51 #include <media/mipi-csi2.h>
52 #include <media/v4l2-ctrls.h>
53 #include <media/v4l2-fwnode.h>
54 #include <media/v4l2-subdev.h>
55 
56 #include "ds90ub953.h"
57 
58 #define MHZ(v) ((u32)((v) * HZ_PER_MHZ))
59 
60 /*
61  * If this is defined, the i2c addresses from UB960_DEBUG_I2C_RX_ID to
62  * UB960_DEBUG_I2C_RX_ID + 3 can be used to access the paged RX port registers
63  * directly.
64  *
65  * Only for debug purposes.
66  */
67 /* #define UB960_DEBUG_I2C_RX_ID	0x40 */
68 
69 #define UB960_POLL_TIME_MS	500
70 
71 #define UB960_MAX_RX_NPORTS	4
72 #define UB960_MAX_TX_NPORTS	2
73 #define UB960_MAX_NPORTS	(UB960_MAX_RX_NPORTS + UB960_MAX_TX_NPORTS)
74 
75 #define UB960_MAX_PORT_ALIASES	8
76 
77 #define UB960_NUM_BC_GPIOS		4
78 
79 /*
80  * Register map
81  *
82  * 0x00-0x32   Shared (UB960_SR)
83  * 0x33-0x3a   CSI-2 TX (per-port paged on DS90UB960, shared on 954) (UB960_TR)
84  * 0x4c        Shared (UB960_SR)
85  * 0x4d-0x7f   FPD-Link RX, per-port paged (UB960_RR)
86  * 0xb0-0xbf   Shared (UB960_SR)
87  * 0xd0-0xdf   FPD-Link RX, per-port paged (UB960_RR)
88  * 0xf0-0xf5   Shared (UB960_SR)
89  * 0xf8-0xfb   Shared (UB960_SR)
90  * All others  Reserved
91  *
92  * Register prefixes:
93  * UB960_SR_* = Shared register
94  * UB960_RR_* = FPD-Link RX, per-port paged register
95  * UB960_TR_* = CSI-2 TX, per-port paged register
96  * UB960_XR_* = Reserved register
97  * UB960_IR_* = Indirect register
98  */
99 
100 #define UB960_SR_I2C_DEV_ID			0x00
101 #define UB960_SR_RESET				0x01
102 #define UB960_SR_RESET_DIGITAL_RESET1		BIT(1)
103 #define UB960_SR_RESET_DIGITAL_RESET0		BIT(0)
104 #define UB960_SR_RESET_GPIO_LOCK_RELEASE	BIT(5)
105 
106 #define UB960_SR_GEN_CONFIG			0x02
107 #define UB960_SR_REV_MASK			0x03
108 #define UB960_SR_DEVICE_STS			0x04
109 #define UB960_SR_PAR_ERR_THOLD_HI		0x05
110 #define UB960_SR_PAR_ERR_THOLD_LO		0x06
111 #define UB960_SR_BCC_WDOG_CTL			0x07
112 #define UB960_SR_I2C_CTL1			0x08
113 #define UB960_SR_I2C_CTL2			0x09
114 #define UB960_SR_SCL_HIGH_TIME			0x0a
115 #define UB960_SR_SCL_LOW_TIME			0x0b
116 #define UB960_SR_RX_PORT_CTL			0x0c
117 #define UB960_SR_IO_CTL				0x0d
118 #define UB960_SR_GPIO_PIN_STS			0x0e
119 #define UB960_SR_GPIO_INPUT_CTL			0x0f
120 #define UB960_SR_GPIO_PIN_CTL(n)		(0x10 + (n)) /* n < UB960_NUM_GPIOS */
121 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SEL		5
122 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SRC_SHIFT	2
123 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_EN		BIT(0)
124 
125 #define UB960_SR_FS_CTL				0x18
126 #define UB960_SR_FS_HIGH_TIME_1			0x19
127 #define UB960_SR_FS_HIGH_TIME_0			0x1a
128 #define UB960_SR_FS_LOW_TIME_1			0x1b
129 #define UB960_SR_FS_LOW_TIME_0			0x1c
130 #define UB960_SR_MAX_FRM_HI			0x1d
131 #define UB960_SR_MAX_FRM_LO			0x1e
132 #define UB960_SR_CSI_PLL_CTL			0x1f
133 
134 #define UB960_SR_FWD_CTL1			0x20
135 #define UB960_SR_FWD_CTL1_PORT_DIS(n)		BIT((n) + 4)
136 
137 #define UB960_SR_FWD_CTL2			0x21
138 #define UB960_SR_FWD_STS			0x22
139 
140 #define UB960_SR_INTERRUPT_CTL			0x23
141 #define UB960_SR_INTERRUPT_CTL_INT_EN		BIT(7)
142 #define UB960_SR_INTERRUPT_CTL_IE_CSI_TX0	BIT(4)
143 #define UB960_SR_INTERRUPT_CTL_IE_RX(n)		BIT((n)) /* rxport[n] IRQ */
144 
145 #define UB960_SR_INTERRUPT_STS			0x24
146 #define UB960_SR_INTERRUPT_STS_INT		BIT(7)
147 #define UB960_SR_INTERRUPT_STS_IS_CSI_TX(n)	BIT(4 + (n)) /* txport[n] IRQ */
148 #define UB960_SR_INTERRUPT_STS_IS_RX(n)		BIT((n)) /* rxport[n] IRQ */
149 
150 #define UB960_SR_TS_CONFIG			0x25
151 #define UB960_SR_TS_CONTROL			0x26
152 #define UB960_SR_TS_LINE_HI			0x27
153 #define UB960_SR_TS_LINE_LO			0x28
154 #define UB960_SR_TS_STATUS			0x29
155 #define UB960_SR_TIMESTAMP_P0_HI		0x2a
156 #define UB960_SR_TIMESTAMP_P0_LO		0x2b
157 #define UB960_SR_TIMESTAMP_P1_HI		0x2c
158 #define UB960_SR_TIMESTAMP_P1_LO		0x2d
159 
160 #define UB960_SR_CSI_PORT_SEL			0x32
161 
162 #define UB960_TR_CSI_CTL			0x33
163 #define UB960_TR_CSI_CTL_CSI_CAL_EN		BIT(6)
164 #define UB960_TR_CSI_CTL_CSI_CONTS_CLOCK	BIT(1)
165 #define UB960_TR_CSI_CTL_CSI_ENABLE		BIT(0)
166 
167 #define UB960_TR_CSI_CTL2			0x34
168 #define UB960_TR_CSI_STS			0x35
169 #define UB960_TR_CSI_TX_ICR			0x36
170 
171 #define UB960_TR_CSI_TX_ISR			0x37
172 #define UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR	BIT(3)
173 #define UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR	BIT(1)
174 
175 #define UB960_TR_CSI_TEST_CTL			0x38
176 #define UB960_TR_CSI_TEST_PATT_HI		0x39
177 #define UB960_TR_CSI_TEST_PATT_LO		0x3a
178 
179 #define UB960_XR_SFILTER_CFG			0x41
180 #define UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT	4
181 #define UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT	0
182 
183 #define UB960_XR_AEQ_CTL1			0x42
184 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK	BIT(6)
185 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING	BIT(5)
186 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY	BIT(4)
187 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK        \
188 	(UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK |  \
189 	 UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING | \
190 	 UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY)
191 #define UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN	BIT(0)
192 
193 #define UB960_XR_AEQ_ERR_THOLD			0x43
194 
195 #define UB960_RR_BCC_ERR_CTL			0x46
196 #define UB960_RR_BCC_STATUS			0x47
197 #define UB960_RR_BCC_STATUS_SEQ_ERROR		BIT(5)
198 #define UB960_RR_BCC_STATUS_MASTER_ERR		BIT(4)
199 #define UB960_RR_BCC_STATUS_MASTER_TO		BIT(3)
200 #define UB960_RR_BCC_STATUS_SLAVE_ERR		BIT(2)
201 #define UB960_RR_BCC_STATUS_SLAVE_TO		BIT(1)
202 #define UB960_RR_BCC_STATUS_RESP_ERR		BIT(0)
203 #define UB960_RR_BCC_STATUS_ERROR_MASK                                    \
204 	(UB960_RR_BCC_STATUS_SEQ_ERROR | UB960_RR_BCC_STATUS_MASTER_ERR | \
205 	 UB960_RR_BCC_STATUS_MASTER_TO | UB960_RR_BCC_STATUS_SLAVE_ERR |  \
206 	 UB960_RR_BCC_STATUS_SLAVE_TO | UB960_RR_BCC_STATUS_RESP_ERR)
207 
208 #define UB960_RR_FPD3_CAP			0x4a
209 #define UB960_RR_RAW_EMBED_DTYPE		0x4b
210 #define UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT	6
211 
212 #define UB960_SR_FPD3_PORT_SEL			0x4c
213 
214 #define UB960_RR_RX_PORT_STS1			0x4d
215 #define UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR	BIT(5)
216 #define UB960_RR_RX_PORT_STS1_LOCK_STS_CHG	BIT(4)
217 #define UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR	BIT(3)
218 #define UB960_RR_RX_PORT_STS1_PARITY_ERROR	BIT(2)
219 #define UB960_RR_RX_PORT_STS1_PORT_PASS		BIT(1)
220 #define UB960_RR_RX_PORT_STS1_LOCK_STS		BIT(0)
221 #define UB960_RR_RX_PORT_STS1_ERROR_MASK       \
222 	(UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR | \
223 	 UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR | \
224 	 UB960_RR_RX_PORT_STS1_PARITY_ERROR)
225 
226 #define UB960_RR_RX_PORT_STS2			0x4e
227 #define UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE	BIT(7)
228 #define UB960_RR_RX_PORT_STS2_LINE_LEN_CHG	BIT(6)
229 #define UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR	BIT(5)
230 #define UB960_RR_RX_PORT_STS2_BUFFER_ERROR	BIT(4)
231 #define UB960_RR_RX_PORT_STS2_CSI_ERROR		BIT(3)
232 #define UB960_RR_RX_PORT_STS2_FREQ_STABLE	BIT(2)
233 #define UB960_RR_RX_PORT_STS2_CABLE_FAULT	BIT(1)
234 #define UB960_RR_RX_PORT_STS2_LINE_CNT_CHG	BIT(0)
235 #define UB960_RR_RX_PORT_STS2_ERROR_MASK       \
236 	UB960_RR_RX_PORT_STS2_BUFFER_ERROR
237 
238 #define UB960_RR_RX_FREQ_HIGH			0x4f
239 #define UB960_RR_RX_FREQ_LOW			0x50
240 #define UB960_RR_SENSOR_STS_0			0x51
241 #define UB960_RR_SENSOR_STS_1			0x52
242 #define UB960_RR_SENSOR_STS_2			0x53
243 #define UB960_RR_SENSOR_STS_3			0x54
244 #define UB960_RR_RX_PAR_ERR_HI			0x55
245 #define UB960_RR_RX_PAR_ERR_LO			0x56
246 #define UB960_RR_BIST_ERR_COUNT			0x57
247 
248 #define UB960_RR_BCC_CONFIG			0x58
249 #define UB960_RR_BCC_CONFIG_BC_ALWAYS_ON	BIT(4)
250 #define UB960_RR_BCC_CONFIG_AUTO_ACK_ALL	BIT(5)
251 #define UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH	BIT(6)
252 #define UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK	GENMASK(2, 0)
253 
254 #define UB960_RR_DATAPATH_CTL1			0x59
255 #define UB960_RR_DATAPATH_CTL2			0x5a
256 #define UB960_RR_SER_ID				0x5b
257 #define UB960_RR_SER_ID_FREEZE_DEVICE_ID	BIT(0)
258 #define UB960_RR_SER_ALIAS_ID			0x5c
259 #define UB960_RR_SER_ALIAS_ID_AUTO_ACK		BIT(0)
260 
261 /* For these two register sets: n < UB960_MAX_PORT_ALIASES */
262 #define UB960_RR_SLAVE_ID(n)			(0x5d + (n))
263 #define UB960_RR_SLAVE_ALIAS(n)			(0x65 + (n))
264 
265 #define UB960_RR_PORT_CONFIG			0x6d
266 #define UB960_RR_PORT_CONFIG_FPD3_MODE_MASK	GENMASK(1, 0)
267 
268 #define UB960_RR_BC_GPIO_CTL(n)			(0x6e + (n)) /* n < 2 */
269 #define UB960_RR_RAW10_ID			0x70
270 #define UB960_RR_RAW10_ID_VC_SHIFT		6
271 #define UB960_RR_RAW10_ID_DT_SHIFT		0
272 
273 #define UB960_RR_RAW12_ID			0x71
274 #define UB960_RR_CSI_VC_MAP			0x72
275 #define UB960_RR_CSI_VC_MAP_SHIFT(x)		((x) * 2)
276 
277 #define UB960_RR_LINE_COUNT_HI			0x73
278 #define UB960_RR_LINE_COUNT_LO			0x74
279 #define UB960_RR_LINE_LEN_1			0x75
280 #define UB960_RR_LINE_LEN_0			0x76
281 #define UB960_RR_FREQ_DET_CTL			0x77
282 #define UB960_RR_MAILBOX_1			0x78
283 #define UB960_RR_MAILBOX_2			0x79
284 
285 #define UB960_RR_CSI_RX_STS			0x7a
286 #define UB960_RR_CSI_RX_STS_LENGTH_ERR		BIT(3)
287 #define UB960_RR_CSI_RX_STS_CKSUM_ERR		BIT(2)
288 #define UB960_RR_CSI_RX_STS_ECC2_ERR		BIT(1)
289 #define UB960_RR_CSI_RX_STS_ECC1_ERR		BIT(0)
290 #define UB960_RR_CSI_RX_STS_ERROR_MASK                                    \
291 	(UB960_RR_CSI_RX_STS_LENGTH_ERR | UB960_RR_CSI_RX_STS_CKSUM_ERR | \
292 	 UB960_RR_CSI_RX_STS_ECC2_ERR | UB960_RR_CSI_RX_STS_ECC1_ERR)
293 
294 #define UB960_RR_CSI_ERR_COUNTER		0x7b
295 #define UB960_RR_PORT_CONFIG2			0x7c
296 #define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK GENMASK(7, 6)
297 #define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT 6
298 
299 #define UB960_RR_PORT_CONFIG2_LV_POL_LOW	BIT(1)
300 #define UB960_RR_PORT_CONFIG2_FV_POL_LOW	BIT(0)
301 
302 #define UB960_RR_PORT_PASS_CTL			0x7d
303 #define UB960_RR_SEN_INT_RISE_CTL		0x7e
304 #define UB960_RR_SEN_INT_FALL_CTL		0x7f
305 
306 #define UB960_SR_CSI_FRAME_COUNT_HI(n)		(0x90 + 8 * (n))
307 #define UB960_SR_CSI_FRAME_COUNT_LO(n)		(0x91 + 8 * (n))
308 #define UB960_SR_CSI_FRAME_ERR_COUNT_HI(n)	(0x92 + 8 * (n))
309 #define UB960_SR_CSI_FRAME_ERR_COUNT_LO(n)	(0x93 + 8 * (n))
310 #define UB960_SR_CSI_LINE_COUNT_HI(n)		(0x94 + 8 * (n))
311 #define UB960_SR_CSI_LINE_COUNT_LO(n)		(0x95 + 8 * (n))
312 #define UB960_SR_CSI_LINE_ERR_COUNT_HI(n)	(0x96 + 8 * (n))
313 #define UB960_SR_CSI_LINE_ERR_COUNT_LO(n)	(0x97 + 8 * (n))
314 
315 #define UB960_XR_REFCLK_FREQ			0xa5	/* UB960 */
316 
317 #define UB960_SR_IND_ACC_CTL			0xb0
318 #define UB960_SR_IND_ACC_CTL_IA_AUTO_INC	BIT(1)
319 
320 #define UB960_SR_IND_ACC_ADDR			0xb1
321 #define UB960_SR_IND_ACC_DATA			0xb2
322 #define UB960_SR_BIST_CONTROL			0xb3
323 #define UB960_SR_MODE_IDX_STS			0xb8
324 #define UB960_SR_LINK_ERROR_COUNT		0xb9
325 #define UB960_SR_FPD3_ENC_CTL			0xba
326 #define UB960_SR_FV_MIN_TIME			0xbc
327 #define UB960_SR_GPIO_PD_CTL			0xbe
328 
329 #define UB960_RR_PORT_DEBUG			0xd0
330 #define UB960_RR_AEQ_CTL2			0xd2
331 #define UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR		BIT(2)
332 
333 #define UB960_RR_AEQ_STATUS			0xd3
334 #define UB960_RR_AEQ_STATUS_STATUS_2		GENMASK(5, 3)
335 #define UB960_RR_AEQ_STATUS_STATUS_1		GENMASK(2, 0)
336 
337 #define UB960_RR_AEQ_BYPASS			0xd4
338 #define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT	5
339 #define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK	GENMASK(7, 5)
340 #define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT	1
341 #define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK	GENMASK(3, 1)
342 #define UB960_RR_AEQ_BYPASS_ENABLE			BIT(0)
343 
344 #define UB960_RR_AEQ_MIN_MAX			0xd5
345 #define UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT	4
346 #define UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT	0
347 
348 #define UB960_RR_SFILTER_STS_0			0xd6
349 #define UB960_RR_SFILTER_STS_1			0xd7
350 #define UB960_RR_PORT_ICR_HI			0xd8
351 #define UB960_RR_PORT_ICR_LO			0xd9
352 #define UB960_RR_PORT_ISR_HI			0xda
353 #define UB960_RR_PORT_ISR_LO			0xdb
354 #define UB960_RR_FC_GPIO_STS			0xdc
355 #define UB960_RR_FC_GPIO_ICR			0xdd
356 #define UB960_RR_SEN_INT_RISE_STS		0xde
357 #define UB960_RR_SEN_INT_FALL_STS		0xdf
358 
359 
360 #define UB960_SR_FPD3_RX_ID(n)			(0xf0 + (n))
361 #define UB960_SR_FPD3_RX_ID_LEN			6
362 
363 #define UB960_SR_I2C_RX_ID(n)			(0xf8 + (n))
364 
365 /* Indirect register blocks */
366 #define UB960_IND_TARGET_PAT_GEN		0x00
367 #define UB960_IND_TARGET_RX_ANA(n)		(0x01 + (n))
368 #define UB960_IND_TARGET_CSI_ANA		0x07
369 
370 /* UB960_IR_PGEN_*: Indirect Registers for Test Pattern Generator */
371 
372 #define UB960_IR_PGEN_CTL			0x01
373 #define UB960_IR_PGEN_CTL_PGEN_ENABLE		BIT(0)
374 
375 #define UB960_IR_PGEN_CFG			0x02
376 #define UB960_IR_PGEN_CSI_DI			0x03
377 #define UB960_IR_PGEN_LINE_SIZE1		0x04
378 #define UB960_IR_PGEN_LINE_SIZE0		0x05
379 #define UB960_IR_PGEN_BAR_SIZE1			0x06
380 #define UB960_IR_PGEN_BAR_SIZE0			0x07
381 #define UB960_IR_PGEN_ACT_LPF1			0x08
382 #define UB960_IR_PGEN_ACT_LPF0			0x09
383 #define UB960_IR_PGEN_TOT_LPF1			0x0a
384 #define UB960_IR_PGEN_TOT_LPF0			0x0b
385 #define UB960_IR_PGEN_LINE_PD1			0x0c
386 #define UB960_IR_PGEN_LINE_PD0			0x0d
387 #define UB960_IR_PGEN_VBP			0x0e
388 #define UB960_IR_PGEN_VFP			0x0f
389 #define UB960_IR_PGEN_COLOR(n)			(0x10 + (n)) /* n < 15 */
390 
391 #define UB960_IR_RX_ANA_STROBE_SET_CLK		0x08
392 #define UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY	BIT(3)
393 #define UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK	GENMASK(2, 0)
394 
395 #define UB960_IR_RX_ANA_STROBE_SET_DATA		0x09
396 #define UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY	BIT(3)
397 #define UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK	GENMASK(2, 0)
398 
399 #define UB954_IR_RX_ANA_STROBE_SET_CLK_DATA		0x08
400 #define UB954_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY	BIT(3)
401 #define UB954_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY	BIT(7)
402 #define UB954_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK	GENMASK(2, 0)
403 #define UB954_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK	GENMASK(4, 6)
404 #define UB954_IR_RX_ANA_STROBE_SET_DATA_DELAY_SHIFT	4
405 
406 /* UB9702 Registers */
407 
408 #define UB9702_SR_CSI_EXCLUSIVE_FWD2		0x3c
409 #define UB9702_SR_REFCLK_FREQ			0x3d
410 #define UB9702_RR_RX_CTL_1			0x80
411 #define UB9702_RR_RX_CTL_2			0x87
412 #define UB9702_RR_VC_ID_MAP(x)			(0xa0 + (x))
413 #define UB9702_SR_FPD_RATE_CFG			0xc2
414 #define UB9702_SR_CSI_PLL_DIV			0xc9
415 #define UB9702_RR_RX_SM_SEL_2			0xd4
416 #define UB9702_RR_CHANNEL_MODE			0xe4
417 
418 #define UB9702_IND_TARGET_SAR_ADC		0x0a
419 
420 #define UB9702_IR_RX_ANA_FPD_BC_CTL0		0x04
421 #define UB9702_IR_RX_ANA_FPD_BC_CTL1		0x0d
422 #define UB9702_IR_RX_ANA_FPD_BC_CTL2		0x1b
423 #define UB9702_IR_RX_ANA_SYSTEM_INIT_REG0	0x21
424 #define UB9702_IR_RX_ANA_AEQ_ALP_SEL6		0x27
425 #define UB9702_IR_RX_ANA_AEQ_ALP_SEL7		0x28
426 #define UB9702_IR_RX_ANA_AEQ_ALP_SEL10		0x2b
427 #define UB9702_IR_RX_ANA_AEQ_ALP_SEL11		0x2c
428 #define UB9702_IR_RX_ANA_EQ_ADAPT_CTRL		0x2e
429 #define UB9702_IR_RX_ANA_AEQ_CFG_1		0x34
430 #define UB9702_IR_RX_ANA_AEQ_CFG_2		0x4d
431 #define UB9702_IR_RX_ANA_GAIN_CTRL_0		0x71
432 #define UB9702_IR_RX_ANA_GAIN_CTRL_0		0x71
433 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_1		0x72
434 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_2		0x73
435 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_3		0x74
436 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_6		0x77
437 #define UB9702_IR_RX_ANA_AEQ_CFG_3		0x79
438 #define UB9702_IR_RX_ANA_AEQ_CFG_4		0x85
439 #define UB9702_IR_RX_ANA_EQ_CTRL_SEL_15		0x87
440 #define UB9702_IR_RX_ANA_EQ_CTRL_SEL_24		0x90
441 #define UB9702_IR_RX_ANA_EQ_CTRL_SEL_38		0x9e
442 #define UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5	0xa5
443 #define UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1	0xa8
444 #define UB9702_IR_RX_ANA_EQ_OVERRIDE_CTRL	0xf0
445 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_8		0xf1
446 
447 #define UB9702_IR_CSI_ANA_CSIPLL_REG_1		0x92
448 
449 /* EQ related */
450 
451 #define UB960_MIN_AEQ_STROBE_POS -7
452 #define UB960_MAX_AEQ_STROBE_POS  7
453 
454 #define UB960_MANUAL_STROBE_EXTRA_DELAY 6
455 
456 #define UB960_MIN_MANUAL_STROBE_POS -(7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
457 #define UB960_MAX_MANUAL_STROBE_POS  (7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
458 #define UB960_NUM_MANUAL_STROBE_POS  (UB960_MAX_MANUAL_STROBE_POS - UB960_MIN_MANUAL_STROBE_POS + 1)
459 
460 #define UB960_MIN_EQ_LEVEL  0
461 #define UB960_MAX_EQ_LEVEL  14
462 #define UB960_NUM_EQ_LEVELS (UB960_MAX_EQ_LEVEL - UB960_MIN_EQ_LEVEL + 1)
463 
464 enum chip_type {
465 	UB954,
466 	UB960,
467 	UB9702,
468 };
469 
470 enum chip_family {
471 	FAMILY_FPD3,
472 	FAMILY_FPD4,
473 };
474 
475 struct ub960_hw_data {
476 	const char *model;
477 	enum chip_type chip_type;
478 	enum chip_family chip_family;
479 	u8 num_rxports;
480 	u8 num_txports;
481 };
482 
483 enum ub960_rxport_mode {
484 	RXPORT_MODE_RAW10 = 0,
485 	RXPORT_MODE_RAW12_HF = 1,
486 	RXPORT_MODE_RAW12_LF = 2,
487 	RXPORT_MODE_CSI2_SYNC = 3,
488 	RXPORT_MODE_CSI2_NONSYNC = 4,
489 	RXPORT_MODE_LAST = RXPORT_MODE_CSI2_NONSYNC,
490 };
491 
492 enum ub960_rxport_cdr {
493 	RXPORT_CDR_FPD3 = 0,
494 	RXPORT_CDR_FPD4 = 1,
495 	RXPORT_CDR_LAST = RXPORT_CDR_FPD4,
496 };
497 
498 struct ub960_rxport {
499 	struct ub960_data      *priv;
500 	u8                      nport;	/* RX port number, and index in priv->rxport[] */
501 
502 	struct {
503 		struct v4l2_subdev *sd;
504 		u16 pad;
505 		struct fwnode_handle *ep_fwnode;
506 	} source;
507 
508 	/* Serializer */
509 	struct {
510 		struct fwnode_handle *fwnode;
511 		struct i2c_client *client;
512 		unsigned short alias; /* I2C alias (lower 7 bits) */
513 		short addr; /* Local I2C address (lower 7 bits) */
514 		struct ds90ub9xx_platform_data pdata;
515 		struct regmap *regmap;
516 	} ser;
517 
518 	enum ub960_rxport_mode  rx_mode;
519 	enum ub960_rxport_cdr	cdr_mode;
520 
521 	u8			lv_fv_pol;	/* LV and FV polarities */
522 
523 	struct regulator	*vpoc;
524 
525 	/* EQ settings */
526 	struct {
527 		bool manual_eq;
528 
529 		s8 strobe_pos;
530 
531 		union {
532 			struct {
533 				u8 eq_level_min;
534 				u8 eq_level_max;
535 			} aeq;
536 
537 			struct {
538 				u8 eq_level;
539 			} manual;
540 		};
541 	} eq;
542 
543 	/* lock for aliased_addrs and associated registers */
544 	struct mutex aliased_addrs_lock;
545 	u16 aliased_addrs[UB960_MAX_PORT_ALIASES];
546 };
547 
548 struct ub960_asd {
549 	struct v4l2_async_connection base;
550 	struct ub960_rxport *rxport;
551 };
552 
553 static inline struct ub960_asd *to_ub960_asd(struct v4l2_async_connection *asd)
554 {
555 	return container_of(asd, struct ub960_asd, base);
556 }
557 
558 struct ub960_txport {
559 	struct ub960_data      *priv;
560 	u8                      nport;	/* TX port number, and index in priv->txport[] */
561 
562 	u32 num_data_lanes;
563 	bool non_continous_clk;
564 };
565 
566 struct ub960_data {
567 	const struct ub960_hw_data	*hw_data;
568 	struct i2c_client	*client; /* for shared local registers */
569 	struct regmap		*regmap;
570 
571 	/* lock for register access */
572 	struct mutex		reg_lock;
573 
574 	struct clk		*refclk;
575 
576 	struct regulator	*vddio;
577 
578 	struct gpio_desc	*pd_gpio;
579 	struct delayed_work	poll_work;
580 	struct ub960_rxport	*rxports[UB960_MAX_RX_NPORTS];
581 	struct ub960_txport	*txports[UB960_MAX_TX_NPORTS];
582 
583 	struct v4l2_subdev	sd;
584 	struct media_pad	pads[UB960_MAX_NPORTS];
585 
586 	struct v4l2_ctrl_handler   ctrl_handler;
587 	struct v4l2_async_notifier notifier;
588 
589 	u32 tx_data_rate;		/* Nominal data rate (Gb/s) */
590 	s64 tx_link_freq[1];
591 
592 	struct i2c_atr *atr;
593 
594 	struct {
595 		u8 rxport;
596 		u8 txport;
597 		u8 indirect_target;
598 	} reg_current;
599 
600 	bool streaming;
601 
602 	u8 stored_fwd_ctl;
603 
604 	u64 stream_enable_mask[UB960_MAX_NPORTS];
605 
606 	/* These are common to all ports */
607 	struct {
608 		bool manual;
609 
610 		s8 min;
611 		s8 max;
612 	} strobe;
613 };
614 
615 static inline struct ub960_data *sd_to_ub960(struct v4l2_subdev *sd)
616 {
617 	return container_of(sd, struct ub960_data, sd);
618 }
619 
620 static inline bool ub960_pad_is_sink(struct ub960_data *priv, u32 pad)
621 {
622 	return pad < priv->hw_data->num_rxports;
623 }
624 
625 static inline bool ub960_pad_is_source(struct ub960_data *priv, u32 pad)
626 {
627 	return pad >= priv->hw_data->num_rxports;
628 }
629 
630 static inline unsigned int ub960_pad_to_port(struct ub960_data *priv, u32 pad)
631 {
632 	if (ub960_pad_is_sink(priv, pad))
633 		return pad;
634 	else
635 		return pad - priv->hw_data->num_rxports;
636 }
637 
638 struct ub960_format_info {
639 	u32 code;
640 	u32 bpp;
641 	u8 datatype;
642 	bool meta;
643 };
644 
645 static const struct ub960_format_info ub960_formats[] = {
646 	{ .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, .datatype = MIPI_CSI2_DT_RGB888, },
647 
648 	{ .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
649 	{ .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
650 	{ .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
651 	{ .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
652 
653 	{ .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
654 	{ .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
655 	{ .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
656 	{ .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
657 
658 	{ .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
659 	{ .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
660 	{ .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
661 	{ .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
662 
663 	{ .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
664 	{ .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
665 	{ .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
666 	{ .code = MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
667 };
668 
669 static const struct ub960_format_info *ub960_find_format(u32 code)
670 {
671 	unsigned int i;
672 
673 	for (i = 0; i < ARRAY_SIZE(ub960_formats); i++) {
674 		if (ub960_formats[i].code == code)
675 			return &ub960_formats[i];
676 	}
677 
678 	return NULL;
679 }
680 
681 struct ub960_rxport_iter {
682 	unsigned int nport;
683 	struct ub960_rxport *rxport;
684 };
685 
686 enum ub960_iter_flags {
687 	UB960_ITER_ACTIVE_ONLY = BIT(0),
688 	UB960_ITER_FPD4_ONLY = BIT(1),
689 };
690 
691 static struct ub960_rxport_iter ub960_iter_rxport(struct ub960_data *priv,
692 						  struct ub960_rxport_iter it,
693 						  enum ub960_iter_flags flags)
694 {
695 	for (; it.nport < priv->hw_data->num_rxports; it.nport++) {
696 		it.rxport = priv->rxports[it.nport];
697 
698 		if ((flags & UB960_ITER_ACTIVE_ONLY) && !it.rxport)
699 			continue;
700 
701 		if ((flags & UB960_ITER_FPD4_ONLY) &&
702 		    it.rxport->cdr_mode != RXPORT_CDR_FPD4)
703 			continue;
704 
705 		return it;
706 	}
707 
708 	it.rxport = NULL;
709 
710 	return it;
711 }
712 
713 #define for_each_rxport(priv, it)                                             \
714 	for (struct ub960_rxport_iter it =                                    \
715 		     ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
716 				       0);                                    \
717 	     it.nport < (priv)->hw_data->num_rxports;                         \
718 	     it.nport++, it = ub960_iter_rxport(priv, it, 0))
719 
720 #define for_each_active_rxport(priv, it)                                      \
721 	for (struct ub960_rxport_iter it =                                    \
722 		     ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
723 				       UB960_ITER_ACTIVE_ONLY);               \
724 	     it.nport < (priv)->hw_data->num_rxports;                         \
725 	     it.nport++, it = ub960_iter_rxport(priv, it,                     \
726 						UB960_ITER_ACTIVE_ONLY))
727 
728 #define for_each_active_rxport_fpd4(priv, it)                                 \
729 	for (struct ub960_rxport_iter it =                                    \
730 		     ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
731 				       UB960_ITER_ACTIVE_ONLY |               \
732 					       UB960_ITER_FPD4_ONLY);         \
733 	     it.nport < (priv)->hw_data->num_rxports;                         \
734 	     it.nport++, it = ub960_iter_rxport(priv, it,                     \
735 						UB960_ITER_ACTIVE_ONLY |      \
736 							UB960_ITER_FPD4_ONLY))
737 
738 /* -----------------------------------------------------------------------------
739  * Basic device access
740  */
741 
742 static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val, int *err)
743 {
744 	struct device *dev = &priv->client->dev;
745 	unsigned int v;
746 	int ret;
747 
748 	if (err && *err)
749 		return *err;
750 
751 	mutex_lock(&priv->reg_lock);
752 
753 	ret = regmap_read(priv->regmap, reg, &v);
754 	if (ret) {
755 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
756 			__func__, reg, ret);
757 		goto out_unlock;
758 	}
759 
760 	*val = v;
761 
762 out_unlock:
763 	mutex_unlock(&priv->reg_lock);
764 
765 	if (ret && err)
766 		*err = ret;
767 
768 	return ret;
769 }
770 
771 static int ub960_write(struct ub960_data *priv, u8 reg, u8 val, int *err)
772 {
773 	struct device *dev = &priv->client->dev;
774 	int ret;
775 
776 	if (err && *err)
777 		return *err;
778 
779 	mutex_lock(&priv->reg_lock);
780 
781 	ret = regmap_write(priv->regmap, reg, val);
782 	if (ret)
783 		dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
784 			__func__, reg, ret);
785 
786 	mutex_unlock(&priv->reg_lock);
787 
788 	if (ret && err)
789 		*err = ret;
790 
791 	return ret;
792 }
793 
794 static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val,
795 			     int *err)
796 {
797 	struct device *dev = &priv->client->dev;
798 	int ret;
799 
800 	if (err && *err)
801 		return *err;
802 
803 	mutex_lock(&priv->reg_lock);
804 
805 	ret = regmap_update_bits(priv->regmap, reg, mask, val);
806 	if (ret)
807 		dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
808 			__func__, reg, ret);
809 
810 	mutex_unlock(&priv->reg_lock);
811 
812 	if (ret && err)
813 		*err = ret;
814 
815 	return ret;
816 }
817 
818 static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val, int *err)
819 {
820 	struct device *dev = &priv->client->dev;
821 	__be16 __v;
822 	int ret;
823 
824 	if (err && *err)
825 		return *err;
826 
827 	mutex_lock(&priv->reg_lock);
828 
829 	ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
830 	if (ret) {
831 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
832 			__func__, reg, ret);
833 		goto out_unlock;
834 	}
835 
836 	*val = be16_to_cpu(__v);
837 
838 out_unlock:
839 	mutex_unlock(&priv->reg_lock);
840 
841 	if (ret && err)
842 		*err = ret;
843 
844 	return ret;
845 }
846 
847 static int ub960_rxport_select(struct ub960_data *priv, u8 nport)
848 {
849 	struct device *dev = &priv->client->dev;
850 	int ret;
851 
852 	lockdep_assert_held(&priv->reg_lock);
853 
854 	if (priv->reg_current.rxport == nport)
855 		return 0;
856 
857 	ret = regmap_write(priv->regmap, UB960_SR_FPD3_PORT_SEL,
858 			   (nport << 4) | BIT(nport));
859 	if (ret) {
860 		dev_err(dev, "%s: cannot select rxport %d (%d)!\n", __func__,
861 			nport, ret);
862 		return ret;
863 	}
864 
865 	priv->reg_current.rxport = nport;
866 
867 	return 0;
868 }
869 
870 static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg,
871 			     u8 *val, int *err)
872 {
873 	struct device *dev = &priv->client->dev;
874 	unsigned int v;
875 	int ret;
876 
877 	if (err && *err)
878 		return *err;
879 
880 	mutex_lock(&priv->reg_lock);
881 
882 	ret = ub960_rxport_select(priv, nport);
883 	if (ret)
884 		goto out_unlock;
885 
886 	ret = regmap_read(priv->regmap, reg, &v);
887 	if (ret) {
888 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
889 			__func__, reg, ret);
890 		goto out_unlock;
891 	}
892 
893 	*val = v;
894 
895 out_unlock:
896 	mutex_unlock(&priv->reg_lock);
897 
898 	if (ret && err)
899 		*err = ret;
900 
901 	return ret;
902 }
903 
904 static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg,
905 			      u8 val, int *err)
906 {
907 	struct device *dev = &priv->client->dev;
908 	int ret;
909 
910 	if (err && *err)
911 		return *err;
912 
913 	mutex_lock(&priv->reg_lock);
914 
915 	ret = ub960_rxport_select(priv, nport);
916 	if (ret)
917 		goto out_unlock;
918 
919 	ret = regmap_write(priv->regmap, reg, val);
920 	if (ret)
921 		dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
922 			__func__, reg, ret);
923 
924 out_unlock:
925 	mutex_unlock(&priv->reg_lock);
926 
927 	if (ret && err)
928 		*err = ret;
929 
930 	return ret;
931 }
932 
933 static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
934 				    u8 mask, u8 val, int *err)
935 {
936 	struct device *dev = &priv->client->dev;
937 	int ret;
938 
939 	if (err && *err)
940 		return *err;
941 
942 	mutex_lock(&priv->reg_lock);
943 
944 	ret = ub960_rxport_select(priv, nport);
945 	if (ret)
946 		goto out_unlock;
947 
948 	ret = regmap_update_bits(priv->regmap, reg, mask, val);
949 	if (ret)
950 		dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
951 			__func__, reg, ret);
952 
953 out_unlock:
954 	mutex_unlock(&priv->reg_lock);
955 
956 	if (ret && err)
957 		*err = ret;
958 
959 	return ret;
960 }
961 
962 static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg,
963 			       u16 *val, int *err)
964 {
965 	struct device *dev = &priv->client->dev;
966 	__be16 __v;
967 	int ret;
968 
969 	if (err && *err)
970 		return *err;
971 
972 	mutex_lock(&priv->reg_lock);
973 
974 	ret = ub960_rxport_select(priv, nport);
975 	if (ret)
976 		goto out_unlock;
977 
978 	ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
979 	if (ret) {
980 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
981 			__func__, reg, ret);
982 		goto out_unlock;
983 	}
984 
985 	*val = be16_to_cpu(__v);
986 
987 out_unlock:
988 	mutex_unlock(&priv->reg_lock);
989 
990 	if (ret && err)
991 		*err = ret;
992 
993 	return ret;
994 }
995 
996 static int ub960_txport_select(struct ub960_data *priv, u8 nport)
997 {
998 	struct device *dev = &priv->client->dev;
999 	int ret;
1000 
1001 	lockdep_assert_held(&priv->reg_lock);
1002 
1003 	/* UB954 has only 1 CSI TX. Hence, no need to select */
1004 	if (priv->hw_data->chip_type == UB954)
1005 		return 0;
1006 
1007 	if (priv->reg_current.txport == nport)
1008 		return 0;
1009 
1010 	ret = regmap_write(priv->regmap, UB960_SR_CSI_PORT_SEL,
1011 			   (nport << 4) | BIT(nport));
1012 	if (ret) {
1013 		dev_err(dev, "%s: cannot select tx port %d (%d)!\n", __func__,
1014 			nport, ret);
1015 		return ret;
1016 	}
1017 
1018 	priv->reg_current.txport = nport;
1019 
1020 	return 0;
1021 }
1022 
1023 static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg,
1024 			     u8 *val, int *err)
1025 {
1026 	struct device *dev = &priv->client->dev;
1027 	unsigned int v;
1028 	int ret;
1029 
1030 	if (err && *err)
1031 		return *err;
1032 
1033 	mutex_lock(&priv->reg_lock);
1034 
1035 	ret = ub960_txport_select(priv, nport);
1036 	if (ret)
1037 		goto out_unlock;
1038 
1039 	ret = regmap_read(priv->regmap, reg, &v);
1040 	if (ret) {
1041 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
1042 			__func__, reg, ret);
1043 		goto out_unlock;
1044 	}
1045 
1046 	*val = v;
1047 
1048 out_unlock:
1049 	mutex_unlock(&priv->reg_lock);
1050 
1051 	if (ret && err)
1052 		*err = ret;
1053 
1054 	return ret;
1055 }
1056 
1057 static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg,
1058 			      u8 val, int *err)
1059 {
1060 	struct device *dev = &priv->client->dev;
1061 	int ret;
1062 
1063 	if (err && *err)
1064 		return *err;
1065 
1066 	mutex_lock(&priv->reg_lock);
1067 
1068 	ret = ub960_txport_select(priv, nport);
1069 	if (ret)
1070 		goto out_unlock;
1071 
1072 	ret = regmap_write(priv->regmap, reg, val);
1073 	if (ret)
1074 		dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
1075 			__func__, reg, ret);
1076 
1077 out_unlock:
1078 	mutex_unlock(&priv->reg_lock);
1079 
1080 	if (ret && err)
1081 		*err = ret;
1082 
1083 	return ret;
1084 }
1085 
1086 static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
1087 				    u8 mask, u8 val, int *err)
1088 {
1089 	struct device *dev = &priv->client->dev;
1090 	int ret;
1091 
1092 	if (err && *err)
1093 		return *err;
1094 
1095 	mutex_lock(&priv->reg_lock);
1096 
1097 	ret = ub960_txport_select(priv, nport);
1098 	if (ret)
1099 		goto out_unlock;
1100 
1101 	ret = regmap_update_bits(priv->regmap, reg, mask, val);
1102 	if (ret)
1103 		dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
1104 			__func__, reg, ret);
1105 
1106 out_unlock:
1107 	mutex_unlock(&priv->reg_lock);
1108 
1109 	if (ret && err)
1110 		*err = ret;
1111 
1112 	return ret;
1113 }
1114 
1115 static int ub960_select_ind_reg_block(struct ub960_data *priv, u8 block)
1116 {
1117 	struct device *dev = &priv->client->dev;
1118 	int ret;
1119 
1120 	lockdep_assert_held(&priv->reg_lock);
1121 
1122 	if (priv->reg_current.indirect_target == block)
1123 		return 0;
1124 
1125 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_CTL, block << 2);
1126 	if (ret) {
1127 		dev_err(dev, "%s: cannot select indirect target %u (%d)!\n",
1128 			__func__, block, ret);
1129 		return ret;
1130 	}
1131 
1132 	priv->reg_current.indirect_target = block;
1133 
1134 	return 0;
1135 }
1136 
1137 static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val,
1138 			  int *err)
1139 {
1140 	struct device *dev = &priv->client->dev;
1141 	unsigned int v;
1142 	int ret;
1143 
1144 	if (err && *err)
1145 		return *err;
1146 
1147 	mutex_lock(&priv->reg_lock);
1148 
1149 	ret = ub960_select_ind_reg_block(priv, block);
1150 	if (ret)
1151 		goto out_unlock;
1152 
1153 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
1154 	if (ret) {
1155 		dev_err(dev,
1156 			"Write to IND_ACC_ADDR failed when reading %u:%x02x: %d\n",
1157 			block, reg, ret);
1158 		goto out_unlock;
1159 	}
1160 
1161 	ret = regmap_read(priv->regmap, UB960_SR_IND_ACC_DATA, &v);
1162 	if (ret) {
1163 		dev_err(dev,
1164 			"Write to IND_ACC_DATA failed when reading %u:%x02x: %d\n",
1165 			block, reg, ret);
1166 		goto out_unlock;
1167 	}
1168 
1169 	*val = v;
1170 
1171 out_unlock:
1172 	mutex_unlock(&priv->reg_lock);
1173 
1174 	if (ret && err)
1175 		*err = ret;
1176 
1177 	return ret;
1178 }
1179 
1180 static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val,
1181 			   int *err)
1182 {
1183 	struct device *dev = &priv->client->dev;
1184 	int ret;
1185 
1186 	if (err && *err)
1187 		return *err;
1188 
1189 	mutex_lock(&priv->reg_lock);
1190 
1191 	ret = ub960_select_ind_reg_block(priv, block);
1192 	if (ret)
1193 		goto out_unlock;
1194 
1195 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
1196 	if (ret) {
1197 		dev_err(dev,
1198 			"Write to IND_ACC_ADDR failed when writing %u:%x02x: %d\n",
1199 			block, reg, ret);
1200 		goto out_unlock;
1201 	}
1202 
1203 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_DATA, val);
1204 	if (ret) {
1205 		dev_err(dev,
1206 			"Write to IND_ACC_DATA failed when writing %u:%x02x: %d\n",
1207 			block, reg, ret);
1208 		goto out_unlock;
1209 	}
1210 
1211 out_unlock:
1212 	mutex_unlock(&priv->reg_lock);
1213 
1214 	if (ret && err)
1215 		*err = ret;
1216 
1217 	return ret;
1218 }
1219 
1220 static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg,
1221 				 u8 mask, u8 val, int *err)
1222 {
1223 	struct device *dev = &priv->client->dev;
1224 	int ret;
1225 
1226 	if (err && *err)
1227 		return *err;
1228 
1229 	mutex_lock(&priv->reg_lock);
1230 
1231 	ret = ub960_select_ind_reg_block(priv, block);
1232 	if (ret)
1233 		goto out_unlock;
1234 
1235 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
1236 	if (ret) {
1237 		dev_err(dev,
1238 			"Write to IND_ACC_ADDR failed when updating %u:%x02x: %d\n",
1239 			block, reg, ret);
1240 		goto out_unlock;
1241 	}
1242 
1243 	ret = regmap_update_bits(priv->regmap, UB960_SR_IND_ACC_DATA, mask,
1244 				 val);
1245 	if (ret) {
1246 		dev_err(dev,
1247 			"Write to IND_ACC_DATA failed when updating %u:%x02x: %d\n",
1248 			block, reg, ret);
1249 		goto out_unlock;
1250 	}
1251 
1252 out_unlock:
1253 	mutex_unlock(&priv->reg_lock);
1254 
1255 	if (ret && err)
1256 		*err = ret;
1257 
1258 	return ret;
1259 }
1260 
1261 static int ub960_reset(struct ub960_data *priv, bool reset_regs)
1262 {
1263 	struct device *dev = &priv->client->dev;
1264 	unsigned int v;
1265 	int ret;
1266 	u8 bit;
1267 
1268 	bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 :
1269 			   UB960_SR_RESET_DIGITAL_RESET0;
1270 
1271 	ret = ub960_write(priv, UB960_SR_RESET, bit, NULL);
1272 	if (ret)
1273 		return ret;
1274 
1275 	mutex_lock(&priv->reg_lock);
1276 
1277 	ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v,
1278 				       (v & bit) == 0, 2000, 100000);
1279 
1280 	mutex_unlock(&priv->reg_lock);
1281 
1282 	if (ret)
1283 		dev_err(dev, "reset failed: %d\n", ret);
1284 
1285 	return ret;
1286 }
1287 
1288 /* -----------------------------------------------------------------------------
1289  * I2C-ATR (address translator)
1290  */
1291 
1292 static int ub960_atr_attach_addr(struct i2c_atr *atr, u32 chan_id,
1293 				 u16 addr, u16 alias)
1294 {
1295 	struct ub960_data *priv = i2c_atr_get_driver_data(atr);
1296 	struct ub960_rxport *rxport = priv->rxports[chan_id];
1297 	struct device *dev = &priv->client->dev;
1298 	unsigned int reg_idx;
1299 	int ret = 0;
1300 
1301 	guard(mutex)(&rxport->aliased_addrs_lock);
1302 
1303 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
1304 		if (!rxport->aliased_addrs[reg_idx])
1305 			break;
1306 	}
1307 
1308 	if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
1309 		dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport);
1310 		return -EADDRNOTAVAIL;
1311 	}
1312 
1313 	rxport->aliased_addrs[reg_idx] = addr;
1314 
1315 	ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx),
1316 			   addr << 1, &ret);
1317 	ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
1318 			   alias << 1, &ret);
1319 
1320 	if (ret)
1321 		return ret;
1322 
1323 	dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n",
1324 		rxport->nport, addr, alias, reg_idx);
1325 
1326 	return 0;
1327 }
1328 
1329 static void ub960_atr_detach_addr(struct i2c_atr *atr, u32 chan_id,
1330 				  u16 addr)
1331 {
1332 	struct ub960_data *priv = i2c_atr_get_driver_data(atr);
1333 	struct ub960_rxport *rxport = priv->rxports[chan_id];
1334 	struct device *dev = &priv->client->dev;
1335 	unsigned int reg_idx;
1336 	int ret;
1337 
1338 	guard(mutex)(&rxport->aliased_addrs_lock);
1339 
1340 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
1341 		if (rxport->aliased_addrs[reg_idx] == addr)
1342 			break;
1343 	}
1344 
1345 	if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
1346 		dev_err(dev, "rx%u: client 0x%02x is not mapped!\n",
1347 			rxport->nport, addr);
1348 		return;
1349 	}
1350 
1351 	rxport->aliased_addrs[reg_idx] = 0;
1352 
1353 	ret = ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
1354 				 0, NULL);
1355 	if (ret) {
1356 		dev_err(dev, "rx%u: unable to fully unmap client 0x%02x: %d\n",
1357 			rxport->nport, addr, ret);
1358 		return;
1359 	}
1360 
1361 	dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport,
1362 		addr, reg_idx);
1363 }
1364 
1365 static const struct i2c_atr_ops ub960_atr_ops = {
1366 	.attach_addr = ub960_atr_attach_addr,
1367 	.detach_addr = ub960_atr_detach_addr,
1368 };
1369 
1370 static int ub960_init_atr(struct ub960_data *priv)
1371 {
1372 	struct device *dev = &priv->client->dev;
1373 	struct i2c_adapter *parent_adap = priv->client->adapter;
1374 
1375 	priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops,
1376 				priv->hw_data->num_rxports, 0);
1377 	if (IS_ERR(priv->atr))
1378 		return PTR_ERR(priv->atr);
1379 
1380 	i2c_atr_set_driver_data(priv->atr, priv);
1381 
1382 	return 0;
1383 }
1384 
1385 static void ub960_uninit_atr(struct ub960_data *priv)
1386 {
1387 	i2c_atr_delete(priv->atr);
1388 	priv->atr = NULL;
1389 }
1390 
1391 /* -----------------------------------------------------------------------------
1392  * TX ports
1393  */
1394 
1395 static int ub960_parse_dt_txport(struct ub960_data *priv,
1396 				 struct fwnode_handle *ep_fwnode,
1397 				 u8 nport)
1398 {
1399 	struct device *dev = &priv->client->dev;
1400 	struct v4l2_fwnode_endpoint vep = {};
1401 	struct ub960_txport *txport;
1402 	int ret;
1403 
1404 	txport = kzalloc_obj(*txport);
1405 	if (!txport)
1406 		return -ENOMEM;
1407 
1408 	txport->priv = priv;
1409 	txport->nport = nport;
1410 
1411 	vep.bus_type = V4L2_MBUS_CSI2_DPHY;
1412 	ret = v4l2_fwnode_endpoint_alloc_parse(ep_fwnode, &vep);
1413 	if (ret) {
1414 		dev_err(dev, "tx%u: failed to parse endpoint data\n", nport);
1415 		goto err_free_txport;
1416 	}
1417 
1418 	txport->non_continous_clk = vep.bus.mipi_csi2.flags &
1419 				    V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
1420 
1421 	txport->num_data_lanes = vep.bus.mipi_csi2.num_data_lanes;
1422 
1423 	if (vep.nr_of_link_frequencies != 1) {
1424 		ret = -EINVAL;
1425 		goto err_free_vep;
1426 	}
1427 
1428 	priv->tx_link_freq[0] = vep.link_frequencies[0];
1429 	priv->tx_data_rate = priv->tx_link_freq[0] * 2;
1430 
1431 	if ((priv->tx_data_rate != MHZ(1600) &&
1432 	     priv->tx_data_rate != MHZ(1200) &&
1433 	     priv->tx_data_rate != MHZ(800) &&
1434 	     priv->tx_data_rate != MHZ(400)) ||
1435 	     (priv->hw_data->chip_type == UB954 && priv->tx_data_rate == MHZ(1200))) {
1436 		dev_err(dev, "tx%u: invalid 'link-frequencies' value\n", nport);
1437 		ret = -EINVAL;
1438 		goto err_free_vep;
1439 	}
1440 
1441 	v4l2_fwnode_endpoint_free(&vep);
1442 
1443 	priv->txports[nport] = txport;
1444 
1445 	return 0;
1446 
1447 err_free_vep:
1448 	v4l2_fwnode_endpoint_free(&vep);
1449 err_free_txport:
1450 	kfree(txport);
1451 
1452 	return ret;
1453 }
1454 
1455 static int  ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
1456 {
1457 	struct device *dev = &priv->client->dev;
1458 	u8 csi_tx_isr;
1459 	int ret;
1460 
1461 	ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr,
1462 				NULL);
1463 	if (ret)
1464 		return ret;
1465 
1466 	if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR)
1467 		dev_warn(dev, "TX%u: CSI_SYNC_ERROR\n", nport);
1468 
1469 	if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR)
1470 		dev_warn(dev, "TX%u: CSI_PASS_ERROR\n", nport);
1471 
1472 	return 0;
1473 }
1474 
1475 /* -----------------------------------------------------------------------------
1476  * RX ports
1477  */
1478 
1479 static int ub960_rxport_enable_vpocs(struct ub960_data *priv)
1480 {
1481 	unsigned int failed_nport;
1482 	int ret;
1483 
1484 	for_each_active_rxport(priv, it) {
1485 		if (!it.rxport->vpoc)
1486 			continue;
1487 
1488 		ret = regulator_enable(it.rxport->vpoc);
1489 		if (ret) {
1490 			failed_nport = it.nport;
1491 			goto err_disable_vpocs;
1492 		}
1493 	}
1494 
1495 	return 0;
1496 
1497 err_disable_vpocs:
1498 	while (failed_nport--) {
1499 		struct ub960_rxport *rxport = priv->rxports[failed_nport];
1500 
1501 		if (!rxport || !rxport->vpoc)
1502 			continue;
1503 
1504 		regulator_disable(rxport->vpoc);
1505 	}
1506 
1507 	return ret;
1508 }
1509 
1510 static void ub960_rxport_disable_vpocs(struct ub960_data *priv)
1511 {
1512 	for_each_active_rxport(priv, it) {
1513 		if (!it.rxport->vpoc)
1514 			continue;
1515 
1516 		regulator_disable(it.rxport->vpoc);
1517 	}
1518 }
1519 
1520 static int ub960_rxport_clear_errors(struct ub960_data *priv,
1521 				     unsigned int nport)
1522 {
1523 	int ret = 0;
1524 	u8 v;
1525 
1526 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v, &ret);
1527 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v, &ret);
1528 	ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v, &ret);
1529 	ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v, &ret);
1530 
1531 	ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v, &ret);
1532 	ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v, &ret);
1533 
1534 	ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v, &ret);
1535 
1536 	return ret;
1537 }
1538 
1539 static int ub960_clear_rx_errors(struct ub960_data *priv)
1540 {
1541 	int ret;
1542 
1543 	for_each_rxport(priv, it) {
1544 		ret = ub960_rxport_clear_errors(priv, it.nport);
1545 		if (ret)
1546 			return ret;
1547 	}
1548 
1549 	return 0;
1550 }
1551 
1552 static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
1553 				       unsigned int nport, s8 *strobe_pos)
1554 {
1555 	u8 v;
1556 	u8 clk_delay, data_delay;
1557 	int ret;
1558 
1559 	if (priv->hw_data->chip_type == UB954) {
1560 		ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1561 				     UB954_IR_RX_ANA_STROBE_SET_CLK_DATA, &v, NULL);
1562 		if (ret)
1563 			return ret;
1564 
1565 		clk_delay = (v & UB954_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY) ?
1566 			     0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
1567 
1568 		data_delay = (v & UB954_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) ?
1569 			      0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
1570 	} else {
1571 		ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1572 				     UB960_IR_RX_ANA_STROBE_SET_CLK, &v, NULL);
1573 		if (ret)
1574 			return ret;
1575 
1576 		clk_delay = (v & UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY) ?
1577 			     0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
1578 
1579 		ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1580 				     UB960_IR_RX_ANA_STROBE_SET_DATA, &v, NULL);
1581 		if (ret)
1582 			return ret;
1583 
1584 		data_delay = (v & UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) ?
1585 			      0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
1586 	}
1587 
1588 	ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v, NULL);
1589 	if (ret)
1590 		return ret;
1591 
1592 	clk_delay += v & UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK;
1593 
1594 	ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v, NULL);
1595 	if (ret)
1596 		return ret;
1597 
1598 	data_delay += v & UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK;
1599 
1600 	*strobe_pos = data_delay - clk_delay;
1601 
1602 	return 0;
1603 }
1604 
1605 static int ub960_rxport_set_strobe_pos(struct ub960_data *priv,
1606 				       unsigned int nport, s8 strobe_pos)
1607 {
1608 	int ret = 0;
1609 
1610 	if (priv->hw_data->chip_type == UB954) {
1611 		u8 clk_data_delay;
1612 
1613 		clk_data_delay = UB954_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY |
1614 				 UB954_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
1615 
1616 		if (strobe_pos < UB960_MIN_AEQ_STROBE_POS)
1617 			clk_data_delay = abs(strobe_pos) - UB960_MANUAL_STROBE_EXTRA_DELAY;
1618 		else if (strobe_pos > UB960_MAX_AEQ_STROBE_POS)
1619 			clk_data_delay = (strobe_pos - UB960_MANUAL_STROBE_EXTRA_DELAY) <<
1620 					  UB954_IR_RX_ANA_STROBE_SET_DATA_DELAY_SHIFT;
1621 		else if (strobe_pos < 0)
1622 			clk_data_delay = abs(strobe_pos) |
1623 					 UB954_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
1624 		else if (strobe_pos > 0)
1625 			clk_data_delay = (strobe_pos |
1626 					  UB954_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) <<
1627 					  UB954_IR_RX_ANA_STROBE_SET_DATA_DELAY_SHIFT;
1628 
1629 		ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1630 				UB954_IR_RX_ANA_STROBE_SET_CLK_DATA, clk_data_delay, &ret);
1631 	} else {
1632 		u8 clk_delay, data_delay;
1633 
1634 		clk_delay = UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
1635 		data_delay = UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
1636 
1637 		if (strobe_pos < UB960_MIN_AEQ_STROBE_POS)
1638 			clk_delay = abs(strobe_pos) - UB960_MANUAL_STROBE_EXTRA_DELAY;
1639 		else if (strobe_pos > UB960_MAX_AEQ_STROBE_POS)
1640 			data_delay = strobe_pos - UB960_MANUAL_STROBE_EXTRA_DELAY;
1641 		else if (strobe_pos < 0)
1642 			clk_delay = abs(strobe_pos) | UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
1643 		else if (strobe_pos > 0)
1644 			data_delay = strobe_pos | UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
1645 
1646 		ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1647 				UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay, &ret);
1648 		ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1649 				UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay, &ret);
1650 	}
1651 
1652 	return ret;
1653 }
1654 
1655 static int ub960_rxport_set_strobe_range(struct ub960_data *priv, s8 strobe_min,
1656 					 s8 strobe_max)
1657 {
1658 	/* Convert the signed strobe pos to positive zero based value */
1659 	strobe_min -= UB960_MIN_AEQ_STROBE_POS;
1660 	strobe_max -= UB960_MIN_AEQ_STROBE_POS;
1661 
1662 	return ub960_write(priv, UB960_XR_SFILTER_CFG,
1663 			   ((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) |
1664 			   ((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT),
1665 			   NULL);
1666 }
1667 
1668 static int ub960_rxport_get_eq_level(struct ub960_data *priv,
1669 				     unsigned int nport, u8 *eq_level)
1670 {
1671 	int ret;
1672 	u8 v;
1673 
1674 	ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v, NULL);
1675 	if (ret)
1676 		return ret;
1677 
1678 	*eq_level = (v & UB960_RR_AEQ_STATUS_STATUS_1) +
1679 		    (v & UB960_RR_AEQ_STATUS_STATUS_2);
1680 
1681 	return 0;
1682 }
1683 
1684 static int ub960_rxport_set_eq_level(struct ub960_data *priv,
1685 				     unsigned int nport, u8 eq_level)
1686 {
1687 	u8 eq_stage_1_select_value, eq_stage_2_select_value;
1688 	const unsigned int eq_stage_max = 7;
1689 	int ret;
1690 	u8 v;
1691 
1692 	if (eq_level <= eq_stage_max) {
1693 		eq_stage_1_select_value = eq_level;
1694 		eq_stage_2_select_value = 0;
1695 	} else {
1696 		eq_stage_1_select_value = eq_stage_max;
1697 		eq_stage_2_select_value = eq_level - eq_stage_max;
1698 	}
1699 
1700 	ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v, NULL);
1701 	if (ret)
1702 		return ret;
1703 
1704 	v &= ~(UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK |
1705 	       UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK);
1706 	v |= eq_stage_1_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT;
1707 	v |= eq_stage_2_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT;
1708 	v |= UB960_RR_AEQ_BYPASS_ENABLE;
1709 
1710 	ret = ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v, NULL);
1711 	if (ret)
1712 		return ret;
1713 
1714 	return 0;
1715 }
1716 
1717 static int ub960_rxport_set_eq_range(struct ub960_data *priv,
1718 				     unsigned int nport, u8 eq_min, u8 eq_max)
1719 {
1720 	int ret = 0;
1721 
1722 	ub960_rxport_write(priv, nport, UB960_RR_AEQ_MIN_MAX,
1723 			   (eq_min << UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) |
1724 			   (eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT),
1725 			   &ret);
1726 
1727 	/* Enable AEQ min setting */
1728 	ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_CTL2,
1729 				 UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR,
1730 				 UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR, &ret);
1731 
1732 	return ret;
1733 }
1734 
1735 static int ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport)
1736 {
1737 	struct ub960_rxport *rxport = priv->rxports[nport];
1738 	int ret;
1739 
1740 	/* We also set common settings here. Should be moved elsewhere. */
1741 
1742 	if (priv->strobe.manual) {
1743 		/* Disable AEQ_SFILTER_EN */
1744 		ret = ub960_update_bits(priv, UB960_XR_AEQ_CTL1,
1745 					UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0,
1746 					NULL);
1747 		if (ret)
1748 			return ret;
1749 	} else {
1750 		/* Enable SFILTER and error control */
1751 		ret = ub960_write(priv, UB960_XR_AEQ_CTL1,
1752 				  UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK |
1753 					  UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN,
1754 				  NULL);
1755 
1756 		if (ret)
1757 			return ret;
1758 
1759 		/* Set AEQ strobe range */
1760 		ret = ub960_rxport_set_strobe_range(priv, priv->strobe.min,
1761 						    priv->strobe.max);
1762 		if (ret)
1763 			return ret;
1764 	}
1765 
1766 	/* The rest are port specific */
1767 
1768 	if (priv->strobe.manual)
1769 		ret = ub960_rxport_set_strobe_pos(priv, nport,
1770 						  rxport->eq.strobe_pos);
1771 	else
1772 		ret = ub960_rxport_set_strobe_pos(priv, nport, 0);
1773 
1774 	if (ret)
1775 		return ret;
1776 
1777 	if (rxport->eq.manual_eq) {
1778 		ret = ub960_rxport_set_eq_level(priv, nport,
1779 						rxport->eq.manual.eq_level);
1780 		if (ret)
1781 			return ret;
1782 
1783 		/* Enable AEQ Bypass */
1784 		ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
1785 					       UB960_RR_AEQ_BYPASS_ENABLE,
1786 					       UB960_RR_AEQ_BYPASS_ENABLE,
1787 					       NULL);
1788 		if (ret)
1789 			return ret;
1790 	} else {
1791 		ret = ub960_rxport_set_eq_range(priv, nport,
1792 						rxport->eq.aeq.eq_level_min,
1793 						rxport->eq.aeq.eq_level_max);
1794 		if (ret)
1795 			return ret;
1796 
1797 		/* Disable AEQ Bypass */
1798 		ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
1799 					       UB960_RR_AEQ_BYPASS_ENABLE, 0,
1800 					       NULL);
1801 		if (ret)
1802 			return ret;
1803 	}
1804 
1805 	return 0;
1806 }
1807 
1808 static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
1809 				bool *ok)
1810 {
1811 	u8 rx_port_sts1, rx_port_sts2;
1812 	u16 parity_errors;
1813 	u8 csi_rx_sts;
1814 	u8 csi_err_cnt;
1815 	u8 bcc_sts;
1816 	int ret;
1817 	bool errors;
1818 
1819 	ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
1820 				&rx_port_sts1, NULL);
1821 	if (ret)
1822 		return ret;
1823 
1824 	if (!(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS)) {
1825 		*ok = false;
1826 		return 0;
1827 	}
1828 
1829 	ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
1830 				&rx_port_sts2, NULL);
1831 	if (ret)
1832 		return ret;
1833 
1834 	ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts,
1835 				NULL);
1836 	if (ret)
1837 		return ret;
1838 
1839 	ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
1840 				&csi_err_cnt, NULL);
1841 	if (ret)
1842 		return ret;
1843 
1844 	ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts,
1845 				NULL);
1846 	if (ret)
1847 		return ret;
1848 
1849 	ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
1850 				  &parity_errors, NULL);
1851 	if (ret)
1852 		return ret;
1853 
1854 	errors = (rx_port_sts1 & UB960_RR_RX_PORT_STS1_ERROR_MASK) ||
1855 		 (rx_port_sts2 & UB960_RR_RX_PORT_STS2_ERROR_MASK) ||
1856 		 (bcc_sts & UB960_RR_BCC_STATUS_ERROR_MASK) ||
1857 		 (csi_rx_sts & UB960_RR_CSI_RX_STS_ERROR_MASK) || csi_err_cnt ||
1858 		 parity_errors;
1859 
1860 	*ok = !errors;
1861 
1862 	return 0;
1863 }
1864 
1865 static int ub960_rxport_lockup_wa_ub9702(struct ub960_data *priv)
1866 {
1867 	int ret;
1868 
1869 	/* Toggle PI_MODE to avoid possible FPD RX lockup */
1870 
1871 	ret = ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
1872 				2 << 3, NULL);
1873 	if (ret)
1874 		return ret;
1875 
1876 	usleep_range(1000, 5000);
1877 
1878 	return ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
1879 				 0, NULL);
1880 }
1881 
1882 /*
1883  * Wait for the RX ports to lock, have no errors and have stable strobe position
1884  * and EQ level.
1885  */
1886 static int ub960_rxport_wait_locks(struct ub960_data *priv,
1887 				   unsigned long port_mask,
1888 				   unsigned int *lock_mask)
1889 {
1890 	struct device *dev = &priv->client->dev;
1891 	unsigned long timeout;
1892 	unsigned int link_ok_mask;
1893 	unsigned int missing;
1894 	unsigned int loops;
1895 	u8 nport;
1896 	int ret;
1897 
1898 	if (port_mask == 0) {
1899 		if (lock_mask)
1900 			*lock_mask = 0;
1901 		return 0;
1902 	}
1903 
1904 	if (port_mask >= BIT(priv->hw_data->num_rxports))
1905 		return -EINVAL;
1906 
1907 	timeout = jiffies + msecs_to_jiffies(1000);
1908 	loops = 0;
1909 	link_ok_mask = 0;
1910 
1911 	while (time_before(jiffies, timeout)) {
1912 		bool fpd4_wa = false;
1913 		missing = 0;
1914 
1915 		for_each_set_bit(nport, &port_mask,
1916 				 priv->hw_data->num_rxports) {
1917 			struct ub960_rxport *rxport = priv->rxports[nport];
1918 			bool ok;
1919 
1920 			if (!rxport)
1921 				continue;
1922 
1923 			ret = ub960_rxport_link_ok(priv, nport, &ok);
1924 			if (ret)
1925 				return ret;
1926 
1927 			if (!ok && rxport->cdr_mode == RXPORT_CDR_FPD4)
1928 				fpd4_wa = true;
1929 
1930 			/*
1931 			 * We want the link to be ok for two consecutive loops,
1932 			 * as a link could get established just before our test
1933 			 * and drop soon after.
1934 			 */
1935 			if (!ok || !(link_ok_mask & BIT(nport)))
1936 				missing++;
1937 
1938 			if (ok)
1939 				link_ok_mask |= BIT(nport);
1940 			else
1941 				link_ok_mask &= ~BIT(nport);
1942 		}
1943 
1944 		loops++;
1945 
1946 		if (missing == 0)
1947 			break;
1948 
1949 		if (fpd4_wa) {
1950 			ret = ub960_rxport_lockup_wa_ub9702(priv);
1951 			if (ret)
1952 				return ret;
1953 		}
1954 
1955 		/*
1956 		 * The sleep time of 10 ms was found by testing to give a lock
1957 		 * with a few iterations. It can be decreased if on some setups
1958 		 * the lock can be achieved much faster.
1959 		 */
1960 		fsleep(10 * USEC_PER_MSEC);
1961 	}
1962 
1963 	if (lock_mask)
1964 		*lock_mask = link_ok_mask;
1965 
1966 	dev_dbg(dev, "Wait locks done in %u loops\n", loops);
1967 	for_each_set_bit(nport, &port_mask, priv->hw_data->num_rxports) {
1968 		struct ub960_rxport *rxport = priv->rxports[nport];
1969 		s8 strobe_pos, eq_level;
1970 		u16 v;
1971 
1972 		if (!rxport)
1973 			continue;
1974 
1975 		if (!(link_ok_mask & BIT(nport))) {
1976 			dev_dbg(dev, "\trx%u: not locked\n", nport);
1977 			continue;
1978 		}
1979 
1980 		ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH,
1981 					  &v, NULL);
1982 
1983 		if (ret)
1984 			return ret;
1985 
1986 		if (priv->hw_data->chip_type == UB9702) {
1987 			dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n",
1988 				nport, ((u64)v * HZ_PER_MHZ) >> 8);
1989 		} else {
1990 			ret = ub960_rxport_get_strobe_pos(priv, nport,
1991 							  &strobe_pos);
1992 			if (ret)
1993 				return ret;
1994 
1995 			ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
1996 			if (ret)
1997 				return ret;
1998 
1999 			dev_dbg(dev,
2000 				"\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
2001 				nport, strobe_pos, eq_level,
2002 				((u64)v * HZ_PER_MHZ) >> 8);
2003 		}
2004 	}
2005 
2006 	return 0;
2007 }
2008 
2009 static unsigned long ub960_calc_bc_clk_rate_ub960(struct ub960_data *priv,
2010 						  struct ub960_rxport *rxport)
2011 {
2012 	unsigned int mult;
2013 	unsigned int div;
2014 
2015 	switch (rxport->rx_mode) {
2016 	case RXPORT_MODE_RAW10:
2017 	case RXPORT_MODE_RAW12_HF:
2018 	case RXPORT_MODE_RAW12_LF:
2019 		mult = 1;
2020 		div = 10;
2021 		break;
2022 
2023 	case RXPORT_MODE_CSI2_SYNC:
2024 		mult = 2;
2025 		div = 1;
2026 		break;
2027 
2028 	case RXPORT_MODE_CSI2_NONSYNC:
2029 		mult = 2;
2030 		div = 5;
2031 		break;
2032 
2033 	default:
2034 		return 0;
2035 	}
2036 
2037 	return clk_get_rate(priv->refclk) * mult / div;
2038 }
2039 
2040 static unsigned long ub960_calc_bc_clk_rate_ub9702(struct ub960_data *priv,
2041 						   struct ub960_rxport *rxport)
2042 {
2043 	switch (rxport->rx_mode) {
2044 	case RXPORT_MODE_RAW10:
2045 	case RXPORT_MODE_RAW12_HF:
2046 	case RXPORT_MODE_RAW12_LF:
2047 		return 2359400;
2048 
2049 	case RXPORT_MODE_CSI2_SYNC:
2050 		return 47187500;
2051 
2052 	case RXPORT_MODE_CSI2_NONSYNC:
2053 		return 9437500;
2054 
2055 	default:
2056 		return 0;
2057 	}
2058 }
2059 
2060 static int ub960_rxport_serializer_write(struct ub960_rxport *rxport, u8 reg,
2061 					 u8 val, int *err)
2062 {
2063 	struct ub960_data *priv = rxport->priv;
2064 	struct device *dev = &priv->client->dev;
2065 	union i2c_smbus_data data;
2066 	int ret;
2067 
2068 	if (err && *err)
2069 		return *err;
2070 
2071 	data.byte = val;
2072 
2073 	ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias, 0,
2074 			     I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA, &data);
2075 	if (ret)
2076 		dev_err(dev,
2077 			"rx%u: cannot write serializer register 0x%02x (%d)!\n",
2078 			rxport->nport, reg, ret);
2079 
2080 	if (ret && err)
2081 		*err = ret;
2082 
2083 	return ret;
2084 }
2085 
2086 static int ub960_rxport_serializer_read(struct ub960_rxport *rxport, u8 reg,
2087 					u8 *val, int *err)
2088 {
2089 	struct ub960_data *priv = rxport->priv;
2090 	struct device *dev = &priv->client->dev;
2091 	union i2c_smbus_data data = { 0 };
2092 	int ret;
2093 
2094 	if (err && *err)
2095 		return *err;
2096 
2097 	ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias,
2098 			     priv->client->flags, I2C_SMBUS_READ, reg,
2099 			     I2C_SMBUS_BYTE_DATA, &data);
2100 	if (ret)
2101 		dev_err(dev,
2102 			"rx%u: cannot read serializer register 0x%02x (%d)!\n",
2103 			rxport->nport, reg, ret);
2104 	else
2105 		*val = data.byte;
2106 
2107 	if (ret && err)
2108 		*err = ret;
2109 
2110 	return ret;
2111 }
2112 
2113 static int ub960_serializer_temp_ramp(struct ub960_rxport *rxport)
2114 {
2115 	struct ub960_data *priv = rxport->priv;
2116 	short temp_dynamic_offset[] = {-1, -1, 0, 0, 1, 1, 1, 3};
2117 	u8 temp_dynamic_cfg;
2118 	u8 nport = rxport->nport;
2119 	u8 ser_temp_code;
2120 	int ret = 0;
2121 
2122 	/* Configure temp ramp only on UB953 */
2123 	if (!fwnode_device_is_compatible(rxport->ser.fwnode, "ti,ds90ub953-q1"))
2124 		return 0;
2125 
2126 	/* Read current serializer die temperature */
2127 	ub960_rxport_read(priv, nport, UB960_RR_SENSOR_STS_2, &ser_temp_code,
2128 			  &ret);
2129 
2130 	/* Enable I2C passthrough on back channel */
2131 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2132 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
2133 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
2134 
2135 	if (ret)
2136 		return ret;
2137 
2138 	/* Select indirect page for analog regs on the serializer */
2139 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_CTL,
2140 				      UB953_IND_TARGET_ANALOG << 2, &ret);
2141 
2142 	/* Set temperature ramp dynamic and static config */
2143 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
2144 				      UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
2145 	ub960_rxport_serializer_read(rxport, UB953_REG_IND_ACC_DATA,
2146 				     &temp_dynamic_cfg, &ret);
2147 
2148 	if (ret)
2149 		return ret;
2150 
2151 	temp_dynamic_cfg |= UB953_IND_ANA_TEMP_DYNAMIC_CFG_OV;
2152 	temp_dynamic_cfg += temp_dynamic_offset[ser_temp_code];
2153 
2154 	/* Update temp static config */
2155 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
2156 				      UB953_IND_ANA_TEMP_STATIC_CFG, &ret);
2157 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
2158 				      UB953_IND_ANA_TEMP_STATIC_CFG_MASK, &ret);
2159 
2160 	/* Update temperature ramp dynamic config */
2161 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
2162 				      UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
2163 
2164 	/* Enable I2C auto ack on BC before we set dynamic cfg and reset */
2165 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2166 				 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2167 				 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL, &ret);
2168 
2169 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
2170 				      temp_dynamic_cfg, &ret);
2171 
2172 	if (ret)
2173 		return ret;
2174 
2175 	/* Soft reset to apply PLL updates */
2176 	ub960_rxport_serializer_write(rxport, UB953_REG_RESET_CTL,
2177 				      UB953_REG_RESET_CTL_DIGITAL_RESET_0,
2178 				      &ret);
2179 	msleep(20);
2180 
2181 	/* Disable I2C passthrough and auto-ack on BC */
2182 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2183 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
2184 					 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2185 				 0x0, &ret);
2186 
2187 	return ret;
2188 }
2189 
2190 static int ub960_rxport_bc_ser_config(struct ub960_rxport *rxport)
2191 {
2192 	struct ub960_data *priv = rxport->priv;
2193 	struct device *dev = &priv->client->dev;
2194 	u8 nport = rxport->nport;
2195 	int ret = 0;
2196 
2197 	/* Skip port if serializer's address is not known */
2198 	if (rxport->ser.addr < 0) {
2199 		dev_dbg(dev,
2200 			"rx%u: serializer address missing, skip configuration\n",
2201 			nport);
2202 		return 0;
2203 	}
2204 
2205 	/*
2206 	 * Note: the code here probably only works for CSI-2 serializers in
2207 	 * sync mode. To support other serializers the BC related configuration
2208 	 * should be done before calling this function.
2209 	 */
2210 
2211 	/* Enable I2C passthrough and auto-ack on BC */
2212 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2213 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
2214 					 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2215 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
2216 					 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2217 				 &ret);
2218 
2219 	if (ret)
2220 		return ret;
2221 
2222 	/* Disable BC alternate mode auto detect */
2223 	ub960_rxport_serializer_write(rxport, UB971_ENH_BC_CHK, 0x02, &ret);
2224 	/* Decrease link detect timer */
2225 	ub960_rxport_serializer_write(rxport, UB953_REG_BC_CTRL, 0x06, &ret);
2226 
2227 	/* Disable I2C passthrough and auto-ack on BC */
2228 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2229 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
2230 					 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2231 				 0x0, &ret);
2232 
2233 	return ret;
2234 }
2235 
2236 static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport)
2237 {
2238 	struct ub960_rxport *rxport = priv->rxports[nport];
2239 	struct device *dev = &priv->client->dev;
2240 	struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata;
2241 	struct i2c_board_info ser_info = {
2242 		.fwnode = rxport->ser.fwnode,
2243 		.platform_data = ser_pdata,
2244 	};
2245 
2246 	ser_pdata->port = nport;
2247 	ser_pdata->atr = priv->atr;
2248 	if (priv->hw_data->chip_type == UB9702)
2249 		ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub9702(priv, rxport);
2250 	else
2251 		ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub960(priv, rxport);
2252 
2253 	/*
2254 	 * The serializer is added under the same i2c adapter as the
2255 	 * deserializer. This is not quite right, as the serializer is behind
2256 	 * the FPD-Link.
2257 	 */
2258 	ser_info.addr = rxport->ser.alias;
2259 	rxport->ser.client =
2260 		i2c_new_client_device(priv->client->adapter, &ser_info);
2261 	if (IS_ERR(rxport->ser.client)) {
2262 		dev_err(dev, "rx%u: cannot add %s i2c device", nport,
2263 			ser_info.type);
2264 		return PTR_ERR(rxport->ser.client);
2265 	}
2266 
2267 	dev_dbg(dev, "rx%u: remote serializer at alias 0x%02x (%u-%04x)\n",
2268 		nport, rxport->ser.client->addr,
2269 		rxport->ser.client->adapter->nr, rxport->ser.client->addr);
2270 
2271 	return 0;
2272 }
2273 
2274 static void ub960_rxport_remove_serializer(struct ub960_data *priv, u8 nport)
2275 {
2276 	struct ub960_rxport *rxport = priv->rxports[nport];
2277 
2278 	i2c_unregister_device(rxport->ser.client);
2279 	rxport->ser.client = NULL;
2280 }
2281 
2282 /* Add serializer i2c devices for all initialized ports */
2283 static int ub960_rxport_add_serializers(struct ub960_data *priv)
2284 {
2285 	unsigned int failed_nport;
2286 	int ret;
2287 
2288 	for_each_active_rxport(priv, it) {
2289 		ret = ub960_rxport_add_serializer(priv, it.nport);
2290 		if (ret) {
2291 			failed_nport = it.nport;
2292 			goto err_remove_sers;
2293 		}
2294 	}
2295 
2296 	return 0;
2297 
2298 err_remove_sers:
2299 	while (failed_nport--) {
2300 		struct ub960_rxport *rxport = priv->rxports[failed_nport];
2301 
2302 		if (!rxport)
2303 			continue;
2304 
2305 		ub960_rxport_remove_serializer(priv, failed_nport);
2306 	}
2307 
2308 	return ret;
2309 }
2310 
2311 static void ub960_rxport_remove_serializers(struct ub960_data *priv)
2312 {
2313 	for_each_active_rxport(priv, it)
2314 		ub960_rxport_remove_serializer(priv, it.nport);
2315 }
2316 
2317 static int ub960_init_tx_port(struct ub960_data *priv,
2318 			      struct ub960_txport *txport)
2319 {
2320 	unsigned int nport = txport->nport;
2321 	u8 csi_ctl = 0;
2322 
2323 	/*
2324 	 * From the datasheet: "initial CSI Skew-Calibration
2325 	 * sequence [...] should be set when operating at 1.6 Gbps"
2326 	 */
2327 	if (priv->tx_data_rate == MHZ(1600))
2328 		csi_ctl |= UB960_TR_CSI_CTL_CSI_CAL_EN;
2329 
2330 	csi_ctl |= (4 - txport->num_data_lanes) << 4;
2331 
2332 	if (!txport->non_continous_clk)
2333 		csi_ctl |= UB960_TR_CSI_CTL_CSI_CONTS_CLOCK;
2334 
2335 	return ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl, NULL);
2336 }
2337 
2338 static int ub960_init_tx_ports_ub960(struct ub960_data *priv)
2339 {
2340 	u8 speed_select;
2341 
2342 	switch (priv->tx_data_rate) {
2343 	case MHZ(400):
2344 		speed_select = 3;
2345 		break;
2346 	case MHZ(800):
2347 		speed_select = 2;
2348 		break;
2349 	case MHZ(1200):
2350 		speed_select = 1;
2351 		break;
2352 	case MHZ(1600):
2353 	default:
2354 		speed_select = 0;
2355 		break;
2356 	}
2357 
2358 	return ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, NULL);
2359 }
2360 
2361 static int ub960_init_tx_ports_ub9702(struct ub960_data *priv)
2362 {
2363 	u8 speed_select;
2364 	u8 ana_pll_div;
2365 	u8 pll_div;
2366 	int ret = 0;
2367 
2368 	switch (priv->tx_data_rate) {
2369 	case MHZ(400):
2370 		speed_select = 3;
2371 		pll_div = 0x10;
2372 		ana_pll_div = 0xa2;
2373 		break;
2374 	case MHZ(800):
2375 		speed_select = 2;
2376 		pll_div = 0x10;
2377 		ana_pll_div = 0x92;
2378 		break;
2379 	case MHZ(1200):
2380 		speed_select = 1;
2381 		pll_div = 0x18;
2382 		ana_pll_div = 0x90;
2383 		break;
2384 	case MHZ(1500):
2385 		speed_select = 0;
2386 		pll_div = 0x0f;
2387 		ana_pll_div = 0x82;
2388 		break;
2389 	case MHZ(1600):
2390 	default:
2391 		speed_select = 0;
2392 		pll_div = 0x10;
2393 		ana_pll_div = 0x82;
2394 		break;
2395 	case MHZ(2500):
2396 		speed_select = 0x10;
2397 		pll_div = 0x19;
2398 		ana_pll_div = 0x80;
2399 		break;
2400 	}
2401 
2402 	ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, &ret);
2403 	ub960_write(priv, UB9702_SR_CSI_PLL_DIV, pll_div, &ret);
2404 	ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA,
2405 			UB9702_IR_CSI_ANA_CSIPLL_REG_1, ana_pll_div, &ret);
2406 
2407 	return ret;
2408 }
2409 
2410 static int ub960_init_tx_ports(struct ub960_data *priv)
2411 {
2412 	int ret;
2413 
2414 	if (priv->hw_data->chip_type == UB9702)
2415 		ret = ub960_init_tx_ports_ub9702(priv);
2416 	else
2417 		ret = ub960_init_tx_ports_ub960(priv);
2418 
2419 	if (ret)
2420 		return ret;
2421 
2422 	for (unsigned int nport = 0; nport < priv->hw_data->num_txports;
2423 	     nport++) {
2424 		struct ub960_txport *txport = priv->txports[nport];
2425 
2426 		if (!txport)
2427 			continue;
2428 
2429 		ret = ub960_init_tx_port(priv, txport);
2430 		if (ret)
2431 			return ret;
2432 	}
2433 
2434 	return 0;
2435 }
2436 
2437 static int ub960_init_rx_port_ub960(struct ub960_data *priv,
2438 				    struct ub960_rxport *rxport)
2439 {
2440 	unsigned int nport = rxport->nport;
2441 	u32 bc_freq_val;
2442 	int ret = 0;
2443 
2444 	/*
2445 	 * Back channel frequency select.
2446 	 * Override FREQ_SELECT from the strap.
2447 	 * 0 - 2.5 Mbps (DS90UB913A-Q1 / DS90UB933-Q1)
2448 	 * 2 - 10 Mbps
2449 	 * 6 - 50 Mbps (DS90UB953-Q1)
2450 	 *
2451 	 * Note that changing this setting will result in some errors on the back
2452 	 * channel for a short period of time.
2453 	 */
2454 
2455 	switch (rxport->rx_mode) {
2456 	case RXPORT_MODE_RAW10:
2457 	case RXPORT_MODE_RAW12_HF:
2458 	case RXPORT_MODE_RAW12_LF:
2459 		bc_freq_val = 0;
2460 		break;
2461 
2462 	case RXPORT_MODE_CSI2_NONSYNC:
2463 		bc_freq_val = 2;
2464 		break;
2465 
2466 	case RXPORT_MODE_CSI2_SYNC:
2467 		bc_freq_val = 6;
2468 		break;
2469 
2470 	default:
2471 		return -EINVAL;
2472 	}
2473 
2474 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2475 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK,
2476 				 bc_freq_val, &ret);
2477 
2478 	switch (rxport->rx_mode) {
2479 	case RXPORT_MODE_RAW10:
2480 		/* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */
2481 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG,
2482 					 UB960_RR_PORT_CONFIG_FPD3_MODE_MASK,
2483 					 0x3, &ret);
2484 
2485 		/*
2486 		 * RAW10_8BIT_CTL = 0b10 : 8-bit processing using upper 8 bits
2487 		 */
2488 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
2489 			UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK,
2490 			0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT,
2491 			&ret);
2492 
2493 		break;
2494 
2495 	case RXPORT_MODE_RAW12_HF:
2496 	case RXPORT_MODE_RAW12_LF:
2497 		/* Not implemented */
2498 		return -EINVAL;
2499 
2500 	case RXPORT_MODE_CSI2_SYNC:
2501 	case RXPORT_MODE_CSI2_NONSYNC:
2502 		/* CSI-2 Mode (DS90UB953-Q1 compatible) */
2503 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3,
2504 					 0x0, &ret);
2505 
2506 		break;
2507 	}
2508 
2509 	/* LV_POLARITY & FV_POLARITY */
2510 	ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
2511 				 rxport->lv_fv_pol, &ret);
2512 
2513 	/* Enable all interrupt sources from this port */
2514 	ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07, &ret);
2515 	ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f, &ret);
2516 
2517 	/* Enable I2C_PASS_THROUGH */
2518 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2519 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
2520 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
2521 
2522 	/* Enable I2C communication to the serializer via the alias addr */
2523 	ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
2524 			   rxport->ser.alias << 1, &ret);
2525 
2526 	/* Configure EQ related settings */
2527 	ub960_rxport_config_eq(priv, nport);
2528 
2529 	/* Enable RX port */
2530 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2531 			  &ret);
2532 
2533 	return ret;
2534 }
2535 
2536 static int ub960_init_rx_ports_ub960(struct ub960_data *priv)
2537 {
2538 	struct device *dev = &priv->client->dev;
2539 	unsigned int port_lock_mask;
2540 	unsigned int port_mask;
2541 	int ret;
2542 
2543 	for_each_active_rxport(priv, it) {
2544 		ret = ub960_init_rx_port_ub960(priv, it.rxport);
2545 		if (ret)
2546 			return ret;
2547 	}
2548 
2549 	ret = ub960_reset(priv, false);
2550 	if (ret)
2551 		return ret;
2552 
2553 	port_mask = 0;
2554 
2555 	for_each_active_rxport(priv, it)
2556 		port_mask |= BIT(it.nport);
2557 
2558 	ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
2559 	if (ret)
2560 		return ret;
2561 
2562 	if (port_mask != port_lock_mask) {
2563 		ret = -EIO;
2564 		dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
2565 		return ret;
2566 	}
2567 
2568 	/* Set temperature ramp on serializer */
2569 	for_each_active_rxport(priv, it) {
2570 		ret = ub960_serializer_temp_ramp(it.rxport);
2571 		if (ret)
2572 			return ret;
2573 
2574 		ub960_rxport_update_bits(priv, it.nport, UB960_RR_BCC_CONFIG,
2575 					 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
2576 					 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
2577 					 &ret);
2578 		if (ret)
2579 			return ret;
2580 	}
2581 
2582 	/*
2583 	 * Clear any errors caused by switching the RX port settings while
2584 	 * probing.
2585 	 */
2586 	ret = ub960_clear_rx_errors(priv);
2587 	if (ret)
2588 		return ret;
2589 
2590 	return 0;
2591 }
2592 
2593 /*
2594  * UB9702 specific initial RX port configuration
2595  */
2596 
2597 static int ub960_turn_off_rxport_ub9702(struct ub960_data *priv,
2598 					unsigned int nport)
2599 {
2600 	int ret = 0;
2601 
2602 	/* Disable RX port */
2603 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), 0, &ret);
2604 
2605 	/* Disable FPD Rx and FPD BC CMR */
2606 	ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_2, 0x1b, &ret);
2607 
2608 	/* Disable FPD BC Tx */
2609 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, BIT(4), 0,
2610 				 &ret);
2611 
2612 	/* Disable internal RX blocks */
2613 	ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_1, 0x15, &ret);
2614 
2615 	/* Disable AEQ */
2616 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2617 			UB9702_IR_RX_ANA_AEQ_CFG_2, 0x03, &ret);
2618 
2619 	/* PI disabled and oDAC disabled */
2620 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2621 			UB9702_IR_RX_ANA_AEQ_CFG_4, 0x09, &ret);
2622 
2623 	/* AEQ configured for disabled link */
2624 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2625 			UB9702_IR_RX_ANA_AEQ_CFG_1, 0x20, &ret);
2626 
2627 	/* disable AEQ clock and DFE */
2628 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2629 			UB9702_IR_RX_ANA_AEQ_CFG_3, 0x45, &ret);
2630 
2631 	/* Powerdown FPD3 CDR */
2632 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2633 			UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5, 0x82, &ret);
2634 
2635 	return ret;
2636 }
2637 
2638 static int ub960_set_bc_drv_config_ub9702(struct ub960_data *priv,
2639 					  unsigned int nport)
2640 {
2641 	u8 fpd_bc_ctl0;
2642 	u8 fpd_bc_ctl1;
2643 	u8 fpd_bc_ctl2;
2644 	int ret = 0;
2645 
2646 	if (priv->rxports[nport]->cdr_mode == RXPORT_CDR_FPD4) {
2647 		/* Set FPD PBC drv into FPD IV mode */
2648 
2649 		fpd_bc_ctl0 = 0;
2650 		fpd_bc_ctl1 = 0;
2651 		fpd_bc_ctl2 = 0;
2652 	} else {
2653 		/* Set FPD PBC drv into FPD III mode */
2654 
2655 		fpd_bc_ctl0 = 2;
2656 		fpd_bc_ctl1 = 1;
2657 		fpd_bc_ctl2 = 5;
2658 	}
2659 
2660 	ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
2661 			      UB9702_IR_RX_ANA_FPD_BC_CTL0, GENMASK(7, 5),
2662 			      fpd_bc_ctl0 << 5, &ret);
2663 
2664 	ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
2665 			      UB9702_IR_RX_ANA_FPD_BC_CTL1, BIT(6),
2666 			      fpd_bc_ctl1 << 6, &ret);
2667 
2668 	ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
2669 			      UB9702_IR_RX_ANA_FPD_BC_CTL2, GENMASK(6, 3),
2670 			      fpd_bc_ctl2 << 3, &ret);
2671 
2672 	return ret;
2673 }
2674 
2675 static int ub960_set_fpd4_sync_mode_ub9702(struct ub960_data *priv,
2676 					   unsigned int nport)
2677 {
2678 	int ret = 0;
2679 
2680 	/* FPD4 Sync Mode */
2681 	ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x0, &ret);
2682 
2683 	/* BC_FREQ_SELECT = (PLL_FREQ/3200) Mbps */
2684 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2685 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 6, &ret);
2686 
2687 	if (ret)
2688 		return ret;
2689 
2690 	ret = ub960_set_bc_drv_config_ub9702(priv, nport);
2691 	if (ret)
2692 		return ret;
2693 
2694 	/* Set AEQ timer to 400us/step */
2695 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2696 			UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0x2f, &ret);
2697 
2698 	/* Disable FPD4 Auto Recovery */
2699 	ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4), 0,
2700 			  &ret);
2701 
2702 	/* Enable RX port */
2703 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2704 			  &ret);
2705 
2706 	/* Enable FPD4 Auto Recovery */
2707 	ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4),
2708 			  BIT(4), &ret);
2709 
2710 	return ret;
2711 }
2712 
2713 static int ub960_set_fpd4_async_mode_ub9702(struct ub960_data *priv,
2714 					    unsigned int nport)
2715 {
2716 	int ret = 0;
2717 
2718 	/* FPD4 ASync Mode */
2719 	ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x1, &ret);
2720 
2721 	/* 10Mbps w/ BC enabled */
2722 	/* BC_FREQ_SELECT=(PLL_FREQ/3200) Mbps */
2723 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2724 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 2, &ret);
2725 
2726 	if (ret)
2727 		return ret;
2728 
2729 	ret = ub960_set_bc_drv_config_ub9702(priv, nport);
2730 	if (ret)
2731 		return ret;
2732 
2733 	/* Set AEQ timer to 400us/step */
2734 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2735 			UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0x2f, &ret);
2736 
2737 	/* Disable FPD4 Auto Recover */
2738 	ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4), 0,
2739 			  &ret);
2740 
2741 	/* Enable RX port */
2742 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2743 			  &ret);
2744 
2745 	/* Enable FPD4 Auto Recovery */
2746 	ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4),
2747 			  BIT(4), &ret);
2748 
2749 	return ret;
2750 }
2751 
2752 static int ub960_set_fpd3_sync_mode_ub9702(struct ub960_data *priv,
2753 					   unsigned int nport)
2754 {
2755 	int ret = 0;
2756 
2757 	/* FPD3 Sync Mode */
2758 	ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x2, &ret);
2759 
2760 	/* BC_FREQ_SELECT=(PLL_FREQ/3200) Mbps */
2761 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2762 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 6, &ret);
2763 
2764 	/* Set AEQ_LOCK_MODE = 1 */
2765 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2766 			UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1, BIT(7), &ret);
2767 
2768 	if (ret)
2769 		return ret;
2770 
2771 	ret = ub960_set_bc_drv_config_ub9702(priv, nport);
2772 	if (ret)
2773 		return ret;
2774 
2775 	/* Enable RX port */
2776 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2777 			  &ret);
2778 
2779 	return ret;
2780 }
2781 
2782 static int ub960_set_raw10_dvp_mode_ub9702(struct ub960_data *priv,
2783 					   unsigned int nport)
2784 {
2785 	int ret = 0;
2786 
2787 	/* FPD3 RAW10 Mode */
2788 	ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x5, &ret);
2789 
2790 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2791 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 0, &ret);
2792 
2793 	/* Set AEQ_LOCK_MODE = 1 */
2794 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2795 			UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1, BIT(7), &ret);
2796 
2797 	/*
2798 	 * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits
2799 	 * 0b10 : 8-bit processing using upper 8 bits
2800 	 */
2801 	ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3 << 6,
2802 				 0x2 << 6, &ret);
2803 
2804 	/* LV_POLARITY & FV_POLARITY */
2805 	ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
2806 				 priv->rxports[nport]->lv_fv_pol, &ret);
2807 
2808 	if (ret)
2809 		return ret;
2810 
2811 	ret = ub960_set_bc_drv_config_ub9702(priv, nport);
2812 	if (ret)
2813 		return ret;
2814 
2815 	/* Enable RX port */
2816 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2817 			  &ret);
2818 
2819 	return ret;
2820 }
2821 
2822 static int ub960_configure_rx_port_ub9702(struct ub960_data *priv,
2823 					  unsigned int nport)
2824 {
2825 	struct device *dev = &priv->client->dev;
2826 	struct ub960_rxport *rxport = priv->rxports[nport];
2827 	int ret;
2828 
2829 	if (!rxport) {
2830 		ret = ub960_turn_off_rxport_ub9702(priv, nport);
2831 		if (ret)
2832 			return ret;
2833 
2834 		dev_dbg(dev, "rx%u: disabled\n", nport);
2835 		return 0;
2836 	}
2837 
2838 	switch (rxport->cdr_mode) {
2839 	case RXPORT_CDR_FPD4:
2840 		switch (rxport->rx_mode) {
2841 		case RXPORT_MODE_CSI2_SYNC:
2842 			ret = ub960_set_fpd4_sync_mode_ub9702(priv, nport);
2843 			if (ret)
2844 				return ret;
2845 
2846 			dev_dbg(dev, "rx%u: FPD-Link IV SYNC mode\n", nport);
2847 			break;
2848 		case RXPORT_MODE_CSI2_NONSYNC:
2849 			ret = ub960_set_fpd4_async_mode_ub9702(priv, nport);
2850 			if (ret)
2851 				return ret;
2852 
2853 			dev_dbg(dev, "rx%u: FPD-Link IV ASYNC mode\n", nport);
2854 			break;
2855 		default:
2856 			dev_err(dev, "rx%u: unsupported FPD4 mode %u\n", nport,
2857 				rxport->rx_mode);
2858 			return -EINVAL;
2859 		}
2860 		break;
2861 
2862 	case RXPORT_CDR_FPD3:
2863 		switch (rxport->rx_mode) {
2864 		case RXPORT_MODE_CSI2_SYNC:
2865 			ret = ub960_set_fpd3_sync_mode_ub9702(priv, nport);
2866 			if (ret)
2867 				return ret;
2868 
2869 			dev_dbg(dev, "rx%u: FPD-Link III SYNC mode\n", nport);
2870 			break;
2871 		case RXPORT_MODE_RAW10:
2872 			ret = ub960_set_raw10_dvp_mode_ub9702(priv, nport);
2873 			if (ret)
2874 				return ret;
2875 
2876 			dev_dbg(dev, "rx%u: FPD-Link III RAW10 DVP mode\n",
2877 				nport);
2878 			break;
2879 		default:
2880 			dev_err(&priv->client->dev,
2881 				"rx%u: unsupported FPD3 mode %u\n", nport,
2882 				rxport->rx_mode);
2883 			return -EINVAL;
2884 		}
2885 		break;
2886 
2887 	default:
2888 		dev_err(&priv->client->dev, "rx%u: unsupported CDR mode %u\n",
2889 			nport, rxport->cdr_mode);
2890 		return -EINVAL;
2891 	}
2892 
2893 	return 0;
2894 }
2895 
2896 static int ub960_lock_recovery_ub9702(struct ub960_data *priv,
2897 				      unsigned int nport)
2898 {
2899 	struct device *dev = &priv->client->dev;
2900 	/* Assumption that max AEQ should be under 16 */
2901 	const u8 rx_aeq_limit = 16;
2902 	u8 prev_aeq = 0xff;
2903 	bool rx_lock;
2904 
2905 	for (unsigned int retry = 0; retry < 3; ++retry) {
2906 		u8 port_sts1;
2907 		u8 rx_aeq;
2908 		int ret;
2909 
2910 		ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
2911 					&port_sts1, NULL);
2912 		if (ret)
2913 			return ret;
2914 
2915 		rx_lock = port_sts1 & UB960_RR_RX_PORT_STS1_PORT_PASS;
2916 
2917 		if (!rx_lock) {
2918 			ret = ub960_rxport_lockup_wa_ub9702(priv);
2919 			if (ret)
2920 				return ret;
2921 
2922 			/* Restart AEQ by changing max to 0 --> 0x23 */
2923 			ret = ub960_write_ind(priv,
2924 					      UB960_IND_TARGET_RX_ANA(nport),
2925 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7, 0,
2926 					      NULL);
2927 			if (ret)
2928 				return ret;
2929 
2930 			msleep(20);
2931 
2932 			/* AEQ Restart */
2933 			ret = ub960_write_ind(priv,
2934 					      UB960_IND_TARGET_RX_ANA(nport),
2935 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
2936 					      0x23, NULL);
2937 
2938 			if (ret)
2939 				return ret;
2940 
2941 			msleep(20);
2942 			dev_dbg(dev, "rx%u: no lock, retry = %u\n", nport,
2943 				retry);
2944 
2945 			continue;
2946 		}
2947 
2948 		ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2949 				     UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &rx_aeq,
2950 				     NULL);
2951 		if (ret)
2952 			return ret;
2953 
2954 		if (rx_aeq < rx_aeq_limit) {
2955 			dev_dbg(dev,
2956 				"rx%u: locked and AEQ normal before setting AEQ window\n",
2957 				nport);
2958 			return 0;
2959 		}
2960 
2961 		if (rx_aeq != prev_aeq) {
2962 			ret = ub960_rxport_lockup_wa_ub9702(priv);
2963 			if (ret)
2964 				return ret;
2965 
2966 			/* Restart AEQ by changing max to 0 --> 0x23 */
2967 			ret = ub960_write_ind(priv,
2968 					      UB960_IND_TARGET_RX_ANA(nport),
2969 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
2970 					      0, NULL);
2971 			if (ret)
2972 				return ret;
2973 
2974 			msleep(20);
2975 
2976 			/* AEQ Restart */
2977 			ret = ub960_write_ind(priv,
2978 					      UB960_IND_TARGET_RX_ANA(nport),
2979 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
2980 					      0x23, NULL);
2981 			if (ret)
2982 				return ret;
2983 
2984 			msleep(20);
2985 
2986 			dev_dbg(dev,
2987 				"rx%u: high AEQ at initial check recovery loop, retry=%u\n",
2988 				nport, retry);
2989 
2990 			prev_aeq = rx_aeq;
2991 		} else {
2992 			dev_dbg(dev,
2993 				"rx%u: lossy cable detected, RX_AEQ %#x, RX_AEQ_LIMIT %#x, retry %u\n",
2994 				nport, rx_aeq, rx_aeq_limit, retry);
2995 			dev_dbg(dev,
2996 				"rx%u: will continue with initiation sequence but high AEQ\n",
2997 				nport);
2998 			return 0;
2999 		}
3000 	}
3001 
3002 	dev_err(dev, "rx%u: max number of retries: %s\n", nport,
3003 		rx_lock ? "unstable AEQ" : "no lock");
3004 
3005 	return -EIO;
3006 }
3007 
3008 static int ub960_enable_aeq_lms_ub9702(struct ub960_data *priv,
3009 				       unsigned int nport)
3010 {
3011 	struct device *dev = &priv->client->dev;
3012 	u8 read_aeq_init;
3013 	int ret;
3014 
3015 	ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3016 			     UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &read_aeq_init,
3017 			     NULL);
3018 	if (ret)
3019 		return ret;
3020 
3021 	dev_dbg(dev, "rx%u: initial AEQ = %#x\n", nport, read_aeq_init);
3022 
3023 	/* Set AEQ Min */
3024 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3025 			UB9702_IR_RX_ANA_AEQ_ALP_SEL6, read_aeq_init, &ret);
3026 	/* Set AEQ Max */
3027 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3028 			UB9702_IR_RX_ANA_AEQ_ALP_SEL7, read_aeq_init + 1, &ret);
3029 	/* Set AEQ offset to 0 */
3030 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3031 			UB9702_IR_RX_ANA_AEQ_ALP_SEL10, 0x0, &ret);
3032 
3033 	/* Enable AEQ tap2 */
3034 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3035 			UB9702_IR_RX_ANA_EQ_CTRL_SEL_38, 0x00, &ret);
3036 	/* Set VGA Gain 1 Gain 2 override to 0 */
3037 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3038 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_8, 0x00, &ret);
3039 	/* Set VGA Initial Sweep Gain to 0 */
3040 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3041 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_6, 0x80, &ret);
3042 	/* Set VGA_Adapt (VGA Gain) override to 0 (thermometer encoded) */
3043 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3044 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_3, 0x00, &ret);
3045 	/* Enable VGA_SWEEP */
3046 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3047 			UB9702_IR_RX_ANA_EQ_ADAPT_CTRL, 0x40, &ret);
3048 	/* Disable VGA_SWEEP_GAIN_OV, disable VGA_TUNE_OV */
3049 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3050 			UB9702_IR_RX_ANA_EQ_OVERRIDE_CTRL, 0x00, &ret);
3051 
3052 	/* Set VGA HIGH Threshold to 43 */
3053 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3054 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_1, 0x2b, &ret);
3055 	/* Set VGA LOW Threshold to 18 */
3056 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3057 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_2, 0x12, &ret);
3058 	/* Set vga_sweep_th to 32 */
3059 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3060 			UB9702_IR_RX_ANA_EQ_CTRL_SEL_15, 0x20, &ret);
3061 	/* Set AEQ timer to 400us/step and parity threshold to 7 */
3062 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3063 			UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0xef, &ret);
3064 
3065 	if (ret)
3066 		return ret;
3067 
3068 	dev_dbg(dev, "rx%u: enable FPD-Link IV AEQ LMS\n", nport);
3069 
3070 	return 0;
3071 }
3072 
3073 static int ub960_enable_dfe_lms_ub9702(struct ub960_data *priv,
3074 				       unsigned int nport)
3075 {
3076 	struct device *dev = &priv->client->dev;
3077 	int ret = 0;
3078 
3079 	/* Enable DFE LMS */
3080 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3081 			UB9702_IR_RX_ANA_EQ_CTRL_SEL_24, 0x40, &ret);
3082 	/* Disable VGA Gain1 override */
3083 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3084 			UB9702_IR_RX_ANA_GAIN_CTRL_0, 0x20, &ret);
3085 
3086 	if (ret)
3087 		return ret;
3088 
3089 	usleep_range(1000, 5000);
3090 
3091 	/* Disable VGA Gain2 override */
3092 	ret = ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3093 			      UB9702_IR_RX_ANA_GAIN_CTRL_0, 0x00, NULL);
3094 	if (ret)
3095 		return ret;
3096 
3097 	dev_dbg(dev, "rx%u: enabled FPD-Link IV DFE LMS", nport);
3098 
3099 	return 0;
3100 }
3101 
3102 static int ub960_init_rx_ports_ub9702(struct ub960_data *priv)
3103 {
3104 	struct device *dev = &priv->client->dev;
3105 	unsigned int port_lock_mask;
3106 	unsigned int port_mask = 0;
3107 	bool have_fpd4 = false;
3108 	int ret;
3109 
3110 	for_each_active_rxport(priv, it) {
3111 		ret = ub960_rxport_update_bits(priv, it.nport,
3112 					       UB960_RR_BCC_CONFIG,
3113 					       UB960_RR_BCC_CONFIG_BC_ALWAYS_ON,
3114 					       UB960_RR_BCC_CONFIG_BC_ALWAYS_ON,
3115 					       NULL);
3116 		if (ret)
3117 			return ret;
3118 	}
3119 
3120 	/* Disable FPD4 Auto Recovery */
3121 	ret = ub960_write(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, 0x0f, NULL);
3122 	if (ret)
3123 		return ret;
3124 
3125 	for_each_active_rxport(priv, it) {
3126 		if (it.rxport->ser.addr >= 0) {
3127 			/*
3128 			 * Set serializer's I2C address if set in the dts file,
3129 			 * and freeze it to prevent updates from the FC.
3130 			 */
3131 			ub960_rxport_write(priv, it.nport, UB960_RR_SER_ID,
3132 					   it.rxport->ser.addr << 1 |
3133 					   UB960_RR_SER_ID_FREEZE_DEVICE_ID,
3134 					   &ret);
3135 		}
3136 
3137 		/* Set serializer I2C alias with auto-ack */
3138 		ub960_rxport_write(priv, it.nport, UB960_RR_SER_ALIAS_ID,
3139 				   it.rxport->ser.alias << 1 |
3140 				   UB960_RR_SER_ALIAS_ID_AUTO_ACK, &ret);
3141 
3142 		if (ret)
3143 			return ret;
3144 	}
3145 
3146 	for_each_active_rxport(priv, it) {
3147 		if (fwnode_device_is_compatible(it.rxport->ser.fwnode,
3148 						"ti,ds90ub971-q1")) {
3149 			ret = ub960_rxport_bc_ser_config(it.rxport);
3150 			if (ret)
3151 				return ret;
3152 		}
3153 	}
3154 
3155 	for_each_active_rxport_fpd4(priv, it) {
3156 		/* Hold state machine in reset */
3157 		ub960_rxport_write(priv, it.nport, UB9702_RR_RX_SM_SEL_2, 0x10,
3158 				   &ret);
3159 
3160 		/* Set AEQ max to 0 */
3161 		ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(it.nport),
3162 				UB9702_IR_RX_ANA_AEQ_ALP_SEL7, 0, &ret);
3163 
3164 		if (ret)
3165 			return ret;
3166 
3167 		dev_dbg(dev,
3168 			"rx%u: holding state machine and adjusting AEQ max to 0",
3169 			it.nport);
3170 	}
3171 
3172 	for_each_active_rxport(priv, it) {
3173 		port_mask |= BIT(it.nport);
3174 
3175 		if (it.rxport->cdr_mode == RXPORT_CDR_FPD4)
3176 			have_fpd4 = true;
3177 	}
3178 
3179 	for_each_rxport(priv, it) {
3180 		ret = ub960_configure_rx_port_ub9702(priv, it.nport);
3181 		if (ret)
3182 			return ret;
3183 	}
3184 
3185 	ret = ub960_reset(priv, false);
3186 	if (ret)
3187 		return ret;
3188 
3189 	if (have_fpd4) {
3190 		for_each_active_rxport_fpd4(priv, it) {
3191 			/* Release state machine */
3192 			ret = ub960_rxport_write(priv, it.nport,
3193 						 UB9702_RR_RX_SM_SEL_2, 0x0,
3194 						 NULL);
3195 			if (ret)
3196 				return ret;
3197 
3198 			dev_dbg(dev, "rx%u: state machine released\n",
3199 				it.nport);
3200 		}
3201 
3202 		/* Wait for SM to resume */
3203 		fsleep(5000);
3204 
3205 		for_each_active_rxport_fpd4(priv, it) {
3206 			ret = ub960_write_ind(priv,
3207 					      UB960_IND_TARGET_RX_ANA(it.nport),
3208 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
3209 					      0x23, NULL);
3210 			if (ret)
3211 				return ret;
3212 
3213 			dev_dbg(dev, "rx%u: AEQ restart\n", it.nport);
3214 		}
3215 
3216 		/* Wait for lock */
3217 		fsleep(20000);
3218 
3219 		for_each_active_rxport_fpd4(priv, it) {
3220 			ret = ub960_lock_recovery_ub9702(priv, it.nport);
3221 			if (ret)
3222 				return ret;
3223 		}
3224 
3225 		for_each_active_rxport_fpd4(priv, it) {
3226 			ret = ub960_enable_aeq_lms_ub9702(priv, it.nport);
3227 			if (ret)
3228 				return ret;
3229 		}
3230 
3231 		for_each_active_rxport_fpd4(priv, it) {
3232 			/* Hold state machine in reset */
3233 			ret = ub960_rxport_write(priv, it.nport,
3234 						 UB9702_RR_RX_SM_SEL_2, 0x10,
3235 						 NULL);
3236 			if (ret)
3237 				return ret;
3238 		}
3239 
3240 		ret = ub960_reset(priv, false);
3241 		if (ret)
3242 			return ret;
3243 
3244 		for_each_active_rxport_fpd4(priv, it) {
3245 			/* Release state machine */
3246 			ret = ub960_rxport_write(priv, it.nport,
3247 						 UB9702_RR_RX_SM_SEL_2, 0,
3248 						 NULL);
3249 			if (ret)
3250 				return ret;
3251 		}
3252 	}
3253 
3254 	/* Wait time for stable lock */
3255 	fsleep(15000);
3256 
3257 	/* Set temperature ramp on serializer */
3258 	for_each_active_rxport(priv, it) {
3259 		ret = ub960_serializer_temp_ramp(it.rxport);
3260 		if (ret)
3261 			return ret;
3262 	}
3263 
3264 	for_each_active_rxport_fpd4(priv, it) {
3265 		ret = ub960_enable_dfe_lms_ub9702(priv, it.nport);
3266 		if (ret)
3267 			return ret;
3268 	}
3269 
3270 	/* Wait for DFE and LMS to adapt */
3271 	fsleep(5000);
3272 
3273 	ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
3274 	if (ret)
3275 		return ret;
3276 
3277 	if (port_mask != port_lock_mask) {
3278 		ret = -EIO;
3279 		dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
3280 		return ret;
3281 	}
3282 
3283 	for_each_active_rxport(priv, it) {
3284 		/* Enable all interrupt sources from this port */
3285 		ub960_rxport_write(priv, it.nport, UB960_RR_PORT_ICR_HI, 0x07,
3286 				   &ret);
3287 		ub960_rxport_write(priv, it.nport, UB960_RR_PORT_ICR_LO, 0x7f,
3288 				   &ret);
3289 
3290 		/* Clear serializer I2C alias auto-ack */
3291 		ub960_rxport_update_bits(priv, it.nport, UB960_RR_SER_ALIAS_ID,
3292 					 UB960_RR_SER_ALIAS_ID_AUTO_ACK, 0,
3293 					 &ret);
3294 
3295 		/* Enable I2C_PASS_THROUGH */
3296 		ub960_rxport_update_bits(priv, it.nport, UB960_RR_BCC_CONFIG,
3297 					 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
3298 					 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
3299 					 &ret);
3300 
3301 		if (ret)
3302 			return ret;
3303 	}
3304 
3305 	/* Enable FPD4 Auto Recovery, Recovery loop active */
3306 	ret = ub960_write(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, 0x18, NULL);
3307 	if (ret)
3308 		return ret;
3309 
3310 	for_each_active_rxport_fpd4(priv, it) {
3311 		u8 final_aeq;
3312 
3313 		ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(it.nport),
3314 				     UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &final_aeq,
3315 				     NULL);
3316 		if (ret)
3317 			return ret;
3318 
3319 		dev_dbg(dev, "rx%u: final AEQ = %#x\n", it.nport, final_aeq);
3320 	}
3321 
3322 	/*
3323 	 * Clear any errors caused by switching the RX port settings while
3324 	 * probing.
3325 	 */
3326 
3327 	ret = ub960_clear_rx_errors(priv);
3328 	if (ret)
3329 		return ret;
3330 
3331 	return 0;
3332 }
3333 
3334 static int ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
3335 {
3336 	struct device *dev = &priv->client->dev;
3337 	u8 rx_port_sts1;
3338 	u8 rx_port_sts2;
3339 	u8 csi_rx_sts;
3340 	u8 bcc_sts;
3341 	int ret = 0;
3342 
3343 	/* Read interrupts (also clears most of them) */
3344 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &rx_port_sts1,
3345 			  &ret);
3346 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &rx_port_sts2,
3347 			  &ret);
3348 	ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts, &ret);
3349 	ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts, &ret);
3350 
3351 	if (ret)
3352 		return ret;
3353 
3354 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_PARITY_ERROR) {
3355 		u16 v;
3356 
3357 		ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
3358 					  &v, NULL);
3359 		if (!ret)
3360 			dev_err(dev, "rx%u parity errors: %u\n", nport, v);
3361 	}
3362 
3363 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR)
3364 		dev_err(dev, "rx%u BCC CRC error\n", nport);
3365 
3366 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR)
3367 		dev_err(dev, "rx%u BCC SEQ error\n", nport);
3368 
3369 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE)
3370 		dev_err(dev, "rx%u line length unstable\n", nport);
3371 
3372 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR)
3373 		dev_err(dev, "rx%u FPD3 encode error\n", nport);
3374 
3375 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_BUFFER_ERROR)
3376 		dev_err(dev, "rx%u buffer error\n", nport);
3377 
3378 	if (csi_rx_sts)
3379 		dev_err(dev, "rx%u CSI error: %#02x\n", nport, csi_rx_sts);
3380 
3381 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC1_ERR)
3382 		dev_err(dev, "rx%u CSI ECC1 error\n", nport);
3383 
3384 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC2_ERR)
3385 		dev_err(dev, "rx%u CSI ECC2 error\n", nport);
3386 
3387 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_CKSUM_ERR)
3388 		dev_err(dev, "rx%u CSI checksum error\n", nport);
3389 
3390 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_LENGTH_ERR)
3391 		dev_err(dev, "rx%u CSI length error\n", nport);
3392 
3393 	if (bcc_sts)
3394 		dev_err(dev, "rx%u BCC error: %#02x\n", nport, bcc_sts);
3395 
3396 	if (bcc_sts & UB960_RR_BCC_STATUS_RESP_ERR)
3397 		dev_err(dev, "rx%u BCC response error", nport);
3398 
3399 	if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_TO)
3400 		dev_err(dev, "rx%u BCC slave timeout", nport);
3401 
3402 	if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_ERR)
3403 		dev_err(dev, "rx%u BCC slave error", nport);
3404 
3405 	if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_TO)
3406 		dev_err(dev, "rx%u BCC master timeout", nport);
3407 
3408 	if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_ERR)
3409 		dev_err(dev, "rx%u BCC master error", nport);
3410 
3411 	if (bcc_sts & UB960_RR_BCC_STATUS_SEQ_ERROR)
3412 		dev_err(dev, "rx%u BCC sequence error", nport);
3413 
3414 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_CHG) {
3415 		u16 v;
3416 
3417 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1,
3418 					  &v, NULL);
3419 		if (!ret)
3420 			dev_dbg(dev, "rx%u line len changed: %u\n", nport, v);
3421 	}
3422 
3423 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_CNT_CHG) {
3424 		u16 v;
3425 
3426 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
3427 					  &v, NULL);
3428 		if (!ret)
3429 			dev_dbg(dev, "rx%u line count changed: %u\n", nport, v);
3430 	}
3431 
3432 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS_CHG) {
3433 		dev_dbg(dev, "rx%u: %s, %s, %s, %s\n", nport,
3434 			(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS) ?
3435 				"locked" :
3436 				"unlocked",
3437 			(rx_port_sts1 & UB960_RR_RX_PORT_STS1_PORT_PASS) ?
3438 				"passed" :
3439 				"not passed",
3440 			(rx_port_sts2 & UB960_RR_RX_PORT_STS2_CABLE_FAULT) ?
3441 				"no clock" :
3442 				"clock ok",
3443 			(rx_port_sts2 & UB960_RR_RX_PORT_STS2_FREQ_STABLE) ?
3444 				"stable freq" :
3445 				"unstable freq");
3446 	}
3447 
3448 	return 0;
3449 }
3450 
3451 /* -----------------------------------------------------------------------------
3452  * V4L2
3453  */
3454 
3455 /*
3456  * The current implementation only supports a simple VC mapping, where all VCs
3457  * from a one RX port will be mapped to the same VC. Also, the hardware
3458  * dictates that all streams from an RX port must go to a single TX port.
3459  *
3460  * This function decides the target VC numbers for each RX port with a simple
3461  * algorithm, so that for each TX port, we get VC numbers starting from 0,
3462  * and counting up.
3463  *
3464  * E.g. if all four RX ports are in use, of which the first two go to the
3465  * first TX port and the secont two go to the second TX port, we would get
3466  * the following VCs for the four RX ports: 0, 1, 0, 1.
3467  *
3468  * TODO: implement a more sophisticated VC mapping. As the driver cannot know
3469  * what VCs the sinks expect (say, an FPGA with hardcoded VC routing), this
3470  * probably needs to be somehow configurable. Device tree?
3471  */
3472 static void ub960_get_vc_maps(struct ub960_data *priv,
3473 			      struct v4l2_subdev_state *state, u8 *vc)
3474 {
3475 	u8 cur_vc[UB960_MAX_TX_NPORTS] = {};
3476 	struct v4l2_subdev_route *route;
3477 	u8 handled_mask = 0;
3478 
3479 	for_each_active_route(&state->routing, route) {
3480 		unsigned int rx, tx;
3481 
3482 		rx = ub960_pad_to_port(priv, route->sink_pad);
3483 		if (BIT(rx) & handled_mask)
3484 			continue;
3485 
3486 		tx = ub960_pad_to_port(priv, route->source_pad);
3487 
3488 		vc[rx] = cur_vc[tx]++;
3489 		handled_mask |= BIT(rx);
3490 	}
3491 }
3492 
3493 static int ub960_enable_tx_port(struct ub960_data *priv, unsigned int nport)
3494 {
3495 	struct device *dev = &priv->client->dev;
3496 
3497 	dev_dbg(dev, "enable TX port %u\n", nport);
3498 
3499 	return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
3500 					UB960_TR_CSI_CTL_CSI_ENABLE,
3501 					UB960_TR_CSI_CTL_CSI_ENABLE, NULL);
3502 }
3503 
3504 static int ub960_disable_tx_port(struct ub960_data *priv, unsigned int nport)
3505 {
3506 	struct device *dev = &priv->client->dev;
3507 
3508 	dev_dbg(dev, "disable TX port %u\n", nport);
3509 
3510 	return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
3511 					UB960_TR_CSI_CTL_CSI_ENABLE, 0, NULL);
3512 }
3513 
3514 static int ub960_enable_rx_port(struct ub960_data *priv, unsigned int nport)
3515 {
3516 	struct device *dev = &priv->client->dev;
3517 
3518 	dev_dbg(dev, "enable RX port %u\n", nport);
3519 
3520 	/* Enable forwarding */
3521 	return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
3522 				 UB960_SR_FWD_CTL1_PORT_DIS(nport), 0, NULL);
3523 }
3524 
3525 static int ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
3526 {
3527 	struct device *dev = &priv->client->dev;
3528 
3529 	dev_dbg(dev, "disable RX port %u\n", nport);
3530 
3531 	/* Disable forwarding */
3532 	return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
3533 				 UB960_SR_FWD_CTL1_PORT_DIS(nport),
3534 				 UB960_SR_FWD_CTL1_PORT_DIS(nport), NULL);
3535 }
3536 
3537 /*
3538  * The driver only supports using a single VC for each source. This function
3539  * checks that each source only provides streams using a single VC.
3540  */
3541 static int ub960_validate_stream_vcs(struct ub960_data *priv)
3542 {
3543 	for_each_active_rxport(priv, it) {
3544 		struct v4l2_mbus_frame_desc desc;
3545 		int ret;
3546 		u8 vc;
3547 
3548 		ret = v4l2_subdev_call(it.rxport->source.sd, pad,
3549 				       get_frame_desc, it.rxport->source.pad,
3550 				       &desc);
3551 		if (ret)
3552 			return ret;
3553 
3554 		if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
3555 			continue;
3556 
3557 		if (desc.num_entries == 0)
3558 			continue;
3559 
3560 		vc = desc.entry[0].bus.csi2.vc;
3561 
3562 		for (unsigned int i = 1; i < desc.num_entries; i++) {
3563 			if (vc == desc.entry[i].bus.csi2.vc)
3564 				continue;
3565 
3566 			dev_err(&priv->client->dev,
3567 				"rx%u: source with multiple virtual-channels is not supported\n",
3568 				it.nport);
3569 			return -ENODEV;
3570 		}
3571 	}
3572 
3573 	return 0;
3574 }
3575 
3576 static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
3577 					       struct v4l2_subdev_state *state)
3578 {
3579 	u8 fwd_ctl;
3580 	struct {
3581 		u32 num_streams;
3582 		u8 pixel_dt;
3583 		u8 meta_dt;
3584 		u32 meta_lines;
3585 		u32 tx_port;
3586 	} rx_data[UB960_MAX_RX_NPORTS] = {};
3587 	u8 vc_map[UB960_MAX_RX_NPORTS] = {};
3588 	struct v4l2_subdev_route *route;
3589 	int ret;
3590 
3591 	ret = ub960_validate_stream_vcs(priv);
3592 	if (ret)
3593 		return ret;
3594 
3595 	ub960_get_vc_maps(priv, state, vc_map);
3596 
3597 	for_each_active_route(&state->routing, route) {
3598 		struct ub960_rxport *rxport;
3599 		struct ub960_txport *txport;
3600 		struct v4l2_mbus_framefmt *fmt;
3601 		const struct ub960_format_info *ub960_fmt;
3602 		unsigned int nport;
3603 
3604 		nport = ub960_pad_to_port(priv, route->sink_pad);
3605 
3606 		rxport = priv->rxports[nport];
3607 		if (!rxport)
3608 			return -EINVAL;
3609 
3610 		txport = priv->txports[ub960_pad_to_port(priv, route->source_pad)];
3611 		if (!txport)
3612 			return -EINVAL;
3613 
3614 		rx_data[nport].tx_port = ub960_pad_to_port(priv, route->source_pad);
3615 
3616 		rx_data[nport].num_streams++;
3617 
3618 		/* For the rest, we are only interested in parallel busses */
3619 		if (rxport->rx_mode == RXPORT_MODE_CSI2_SYNC ||
3620 		    rxport->rx_mode == RXPORT_MODE_CSI2_NONSYNC)
3621 			continue;
3622 
3623 		if (rx_data[nport].num_streams > 2)
3624 			return -EPIPE;
3625 
3626 		fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
3627 						   route->sink_stream);
3628 		if (!fmt)
3629 			return -EPIPE;
3630 
3631 		ub960_fmt = ub960_find_format(fmt->code);
3632 		if (!ub960_fmt)
3633 			return -EPIPE;
3634 
3635 		if (ub960_fmt->meta) {
3636 			if (fmt->height > 3) {
3637 				dev_err(&priv->client->dev,
3638 					"rx%u: unsupported metadata height %u\n",
3639 					nport, fmt->height);
3640 				return -EPIPE;
3641 			}
3642 
3643 			rx_data[nport].meta_dt = ub960_fmt->datatype;
3644 			rx_data[nport].meta_lines = fmt->height;
3645 		} else {
3646 			rx_data[nport].pixel_dt = ub960_fmt->datatype;
3647 		}
3648 	}
3649 
3650 	/* Configure RX ports */
3651 
3652 	/*
3653 	 * Keep all port forwardings disabled by default. Forwarding will be
3654 	 * enabled in ub960_enable_rx_port.
3655 	 */
3656 	fwd_ctl = GENMASK(7, 4);
3657 
3658 	for_each_active_rxport(priv, it) {
3659 		unsigned long nport = it.nport;
3660 
3661 		u8 vc = vc_map[nport];
3662 
3663 		if (rx_data[nport].num_streams == 0)
3664 			continue;
3665 
3666 		switch (it.rxport->rx_mode) {
3667 		case RXPORT_MODE_RAW10:
3668 			ub960_rxport_write(priv, nport, UB960_RR_RAW10_ID,
3669 				rx_data[nport].pixel_dt | (vc << UB960_RR_RAW10_ID_VC_SHIFT),
3670 				&ret);
3671 
3672 			ub960_rxport_write(priv, nport,
3673 				UB960_RR_RAW_EMBED_DTYPE,
3674 				(rx_data[nport].meta_lines << UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT) |
3675 					rx_data[nport].meta_dt, &ret);
3676 
3677 			break;
3678 
3679 		case RXPORT_MODE_RAW12_HF:
3680 		case RXPORT_MODE_RAW12_LF:
3681 			/* Not implemented */
3682 			break;
3683 
3684 		case RXPORT_MODE_CSI2_SYNC:
3685 		case RXPORT_MODE_CSI2_NONSYNC:
3686 			if (priv->hw_data->chip_type == UB960 ||
3687 			    priv->hw_data->chip_type == UB954) {
3688 				/* Map all VCs from this port to the same VC */
3689 				ub960_rxport_write(priv, nport, UB960_RR_CSI_VC_MAP,
3690 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(3)) |
3691 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(2)) |
3692 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(1)) |
3693 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(0)),
3694 						   &ret);
3695 			} else {
3696 				unsigned int i;
3697 
3698 				/* Map all VCs from this port to VC(nport) */
3699 				for (i = 0; i < 8; i++)
3700 					ub960_rxport_write(priv, nport,
3701 							   UB9702_RR_VC_ID_MAP(i),
3702 							   (nport << 4) | nport,
3703 							   &ret);
3704 			}
3705 
3706 			break;
3707 		}
3708 
3709 		if (rx_data[nport].tx_port == 1)
3710 			fwd_ctl |= BIT(nport); /* forward to TX1 */
3711 		else
3712 			fwd_ctl &= ~BIT(nport); /* forward to TX0 */
3713 	}
3714 
3715 	ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl, &ret);
3716 
3717 	return ret;
3718 }
3719 
3720 static void ub960_update_streaming_status(struct ub960_data *priv)
3721 {
3722 	unsigned int i;
3723 
3724 	for (i = 0; i < UB960_MAX_NPORTS; i++) {
3725 		if (priv->stream_enable_mask[i])
3726 			break;
3727 	}
3728 
3729 	priv->streaming = i < UB960_MAX_NPORTS;
3730 }
3731 
3732 static int ub960_enable_streams(struct v4l2_subdev *sd,
3733 				struct v4l2_subdev_state *state, u32 source_pad,
3734 				u64 source_streams_mask)
3735 {
3736 	struct ub960_data *priv = sd_to_ub960(sd);
3737 	struct device *dev = &priv->client->dev;
3738 	u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
3739 	struct v4l2_subdev_route *route;
3740 	unsigned int failed_port;
3741 	int ret;
3742 
3743 	if (!priv->streaming) {
3744 		dev_dbg(dev, "Prepare for streaming\n");
3745 		ret = ub960_configure_ports_for_streaming(priv, state);
3746 		if (ret)
3747 			return ret;
3748 	}
3749 
3750 	/* Enable TX port if not yet enabled */
3751 	if (!priv->stream_enable_mask[source_pad]) {
3752 		ret = ub960_enable_tx_port(priv,
3753 					   ub960_pad_to_port(priv, source_pad));
3754 		if (ret)
3755 			return ret;
3756 	}
3757 
3758 	priv->stream_enable_mask[source_pad] |= source_streams_mask;
3759 
3760 	/* Collect sink streams per pad which we need to enable */
3761 	for_each_active_route(&state->routing, route) {
3762 		unsigned int nport;
3763 
3764 		if (route->source_pad != source_pad)
3765 			continue;
3766 
3767 		if (!(source_streams_mask & BIT_ULL(route->source_stream)))
3768 			continue;
3769 
3770 		nport = ub960_pad_to_port(priv, route->sink_pad);
3771 
3772 		sink_streams[nport] |= BIT_ULL(route->sink_stream);
3773 	}
3774 
3775 	for_each_rxport(priv, it) {
3776 		unsigned int nport = it.nport;
3777 
3778 		if (!sink_streams[nport])
3779 			continue;
3780 
3781 		/* Enable the RX port if not yet enabled */
3782 		if (!priv->stream_enable_mask[nport]) {
3783 			ret = ub960_enable_rx_port(priv, nport);
3784 			if (ret) {
3785 				failed_port = nport;
3786 				goto err;
3787 			}
3788 		}
3789 
3790 		priv->stream_enable_mask[nport] |= sink_streams[nport];
3791 
3792 		dev_dbg(dev, "enable RX port %u streams %#llx\n", nport,
3793 			sink_streams[nport]);
3794 
3795 		ret = v4l2_subdev_enable_streams(
3796 			priv->rxports[nport]->source.sd,
3797 			priv->rxports[nport]->source.pad,
3798 			sink_streams[nport]);
3799 		if (ret) {
3800 			priv->stream_enable_mask[nport] &= ~sink_streams[nport];
3801 
3802 			if (!priv->stream_enable_mask[nport])
3803 				ub960_disable_rx_port(priv, nport);
3804 
3805 			failed_port = nport;
3806 			goto err;
3807 		}
3808 	}
3809 
3810 	priv->streaming = true;
3811 
3812 	return 0;
3813 
3814 err:
3815 	for (unsigned int nport = 0; nport < failed_port; nport++) {
3816 		if (!sink_streams[nport])
3817 			continue;
3818 
3819 		dev_dbg(dev, "disable RX port %u streams %#llx\n", nport,
3820 			sink_streams[nport]);
3821 
3822 		ret = v4l2_subdev_disable_streams(
3823 			priv->rxports[nport]->source.sd,
3824 			priv->rxports[nport]->source.pad,
3825 			sink_streams[nport]);
3826 		if (ret)
3827 			dev_err(dev, "Failed to disable streams: %d\n", ret);
3828 
3829 		priv->stream_enable_mask[nport] &= ~sink_streams[nport];
3830 
3831 		/* Disable RX port if no active streams */
3832 		if (!priv->stream_enable_mask[nport])
3833 			ub960_disable_rx_port(priv, nport);
3834 	}
3835 
3836 	priv->stream_enable_mask[source_pad] &= ~source_streams_mask;
3837 
3838 	if (!priv->stream_enable_mask[source_pad])
3839 		ub960_disable_tx_port(priv,
3840 				      ub960_pad_to_port(priv, source_pad));
3841 
3842 	ub960_update_streaming_status(priv);
3843 
3844 	return ret;
3845 }
3846 
3847 static int ub960_disable_streams(struct v4l2_subdev *sd,
3848 				 struct v4l2_subdev_state *state,
3849 				 u32 source_pad, u64 source_streams_mask)
3850 {
3851 	struct ub960_data *priv = sd_to_ub960(sd);
3852 	struct device *dev = &priv->client->dev;
3853 	u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
3854 	struct v4l2_subdev_route *route;
3855 	int ret;
3856 
3857 	/* Collect sink streams per pad which we need to disable */
3858 	for_each_active_route(&state->routing, route) {
3859 		unsigned int nport;
3860 
3861 		if (route->source_pad != source_pad)
3862 			continue;
3863 
3864 		if (!(source_streams_mask & BIT_ULL(route->source_stream)))
3865 			continue;
3866 
3867 		nport = ub960_pad_to_port(priv, route->sink_pad);
3868 
3869 		sink_streams[nport] |= BIT_ULL(route->sink_stream);
3870 	}
3871 
3872 	for_each_rxport(priv, it) {
3873 		unsigned int nport = it.nport;
3874 
3875 		if (!sink_streams[nport])
3876 			continue;
3877 
3878 		dev_dbg(dev, "disable RX port %u streams %#llx\n", nport,
3879 			sink_streams[nport]);
3880 
3881 		ret = v4l2_subdev_disable_streams(
3882 			priv->rxports[nport]->source.sd,
3883 			priv->rxports[nport]->source.pad,
3884 			sink_streams[nport]);
3885 		if (ret)
3886 			dev_err(dev, "Failed to disable streams: %d\n", ret);
3887 
3888 		priv->stream_enable_mask[nport] &= ~sink_streams[nport];
3889 
3890 		/* Disable RX port if no active streams */
3891 		if (!priv->stream_enable_mask[nport])
3892 			ub960_disable_rx_port(priv, nport);
3893 	}
3894 
3895 	/* Disable TX port if no active streams */
3896 
3897 	priv->stream_enable_mask[source_pad] &= ~source_streams_mask;
3898 
3899 	if (!priv->stream_enable_mask[source_pad])
3900 		ub960_disable_tx_port(priv,
3901 				      ub960_pad_to_port(priv, source_pad));
3902 
3903 	ub960_update_streaming_status(priv);
3904 
3905 	return 0;
3906 }
3907 
3908 static int _ub960_set_routing(struct v4l2_subdev *sd,
3909 			      struct v4l2_subdev_state *state,
3910 			      struct v4l2_subdev_krouting *routing)
3911 {
3912 	static const struct v4l2_mbus_framefmt format = {
3913 		.width = 640,
3914 		.height = 480,
3915 		.code = MEDIA_BUS_FMT_UYVY8_1X16,
3916 		.field = V4L2_FIELD_NONE,
3917 		.colorspace = V4L2_COLORSPACE_SRGB,
3918 		.ycbcr_enc = V4L2_YCBCR_ENC_601,
3919 		.quantization = V4L2_QUANTIZATION_LIM_RANGE,
3920 		.xfer_func = V4L2_XFER_FUNC_SRGB,
3921 	};
3922 	int ret;
3923 
3924 	ret = v4l2_subdev_routing_validate(sd, routing,
3925 					   V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
3926 					   V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX);
3927 	if (ret)
3928 		return ret;
3929 
3930 	ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
3931 	if (ret)
3932 		return ret;
3933 
3934 	return 0;
3935 }
3936 
3937 static int ub960_set_routing(struct v4l2_subdev *sd,
3938 			     struct v4l2_subdev_state *state,
3939 			     enum v4l2_subdev_format_whence which,
3940 			     struct v4l2_subdev_krouting *routing)
3941 {
3942 	struct ub960_data *priv = sd_to_ub960(sd);
3943 
3944 	if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
3945 		return -EBUSY;
3946 
3947 	return _ub960_set_routing(sd, state, routing);
3948 }
3949 
3950 static int ub960_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
3951 				struct v4l2_mbus_frame_desc *fd)
3952 {
3953 	struct ub960_data *priv = sd_to_ub960(sd);
3954 	struct v4l2_subdev_route *route;
3955 	struct v4l2_subdev_state *state;
3956 	int ret = 0;
3957 	struct device *dev = &priv->client->dev;
3958 	u8 vc_map[UB960_MAX_RX_NPORTS] = {};
3959 
3960 	if (!ub960_pad_is_source(priv, pad))
3961 		return -EINVAL;
3962 
3963 	fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
3964 
3965 	state = v4l2_subdev_lock_and_get_active_state(&priv->sd);
3966 
3967 	ub960_get_vc_maps(priv, state, vc_map);
3968 
3969 	for_each_active_route(&state->routing, route) {
3970 		struct v4l2_mbus_frame_desc_entry *source_entry = NULL;
3971 		struct v4l2_mbus_frame_desc source_fd;
3972 		unsigned int nport;
3973 		unsigned int i;
3974 
3975 		if (route->source_pad != pad)
3976 			continue;
3977 
3978 		nport = ub960_pad_to_port(priv, route->sink_pad);
3979 
3980 		ret = v4l2_subdev_call(priv->rxports[nport]->source.sd, pad,
3981 				       get_frame_desc,
3982 				       priv->rxports[nport]->source.pad,
3983 				       &source_fd);
3984 		if (ret) {
3985 			dev_err(dev,
3986 				"Failed to get source frame desc for pad %u\n",
3987 				route->sink_pad);
3988 			goto out_unlock;
3989 		}
3990 
3991 		for (i = 0; i < source_fd.num_entries; i++) {
3992 			if (source_fd.entry[i].stream == route->sink_stream) {
3993 				source_entry = &source_fd.entry[i];
3994 				break;
3995 			}
3996 		}
3997 
3998 		if (!source_entry) {
3999 			dev_err(dev,
4000 				"Failed to find stream from source frame desc\n");
4001 			ret = -EPIPE;
4002 			goto out_unlock;
4003 		}
4004 
4005 		fd->entry[fd->num_entries].stream = route->source_stream;
4006 		fd->entry[fd->num_entries].flags = source_entry->flags;
4007 		fd->entry[fd->num_entries].length = source_entry->length;
4008 		fd->entry[fd->num_entries].pixelcode = source_entry->pixelcode;
4009 
4010 		fd->entry[fd->num_entries].bus.csi2.vc = vc_map[nport];
4011 
4012 		if (source_fd.type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
4013 			fd->entry[fd->num_entries].bus.csi2.dt =
4014 				source_entry->bus.csi2.dt;
4015 		} else {
4016 			const struct ub960_format_info *ub960_fmt;
4017 			struct v4l2_mbus_framefmt *fmt;
4018 
4019 			fmt = v4l2_subdev_state_get_format(state, pad,
4020 							   route->source_stream);
4021 
4022 			if (!fmt) {
4023 				ret = -EINVAL;
4024 				goto out_unlock;
4025 			}
4026 
4027 			ub960_fmt = ub960_find_format(fmt->code);
4028 			if (!ub960_fmt) {
4029 				dev_err(dev, "Unable to find format\n");
4030 				ret = -EINVAL;
4031 				goto out_unlock;
4032 			}
4033 
4034 			fd->entry[fd->num_entries].bus.csi2.dt =
4035 				ub960_fmt->datatype;
4036 		}
4037 
4038 		fd->num_entries++;
4039 	}
4040 
4041 out_unlock:
4042 	v4l2_subdev_unlock_state(state);
4043 
4044 	return ret;
4045 }
4046 
4047 static int ub960_set_fmt(struct v4l2_subdev *sd,
4048 			 struct v4l2_subdev_state *state,
4049 			 struct v4l2_subdev_format *format)
4050 {
4051 	struct ub960_data *priv = sd_to_ub960(sd);
4052 	struct v4l2_mbus_framefmt *fmt;
4053 
4054 	if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
4055 		return -EBUSY;
4056 
4057 	/* No transcoding, source and sink formats must match. */
4058 	if (ub960_pad_is_source(priv, format->pad))
4059 		return v4l2_subdev_get_fmt(sd, state, format);
4060 
4061 	/*
4062 	 * Default to the first format if the requested media bus code isn't
4063 	 * supported.
4064 	 */
4065 	if (!ub960_find_format(format->format.code))
4066 		format->format.code = ub960_formats[0].code;
4067 
4068 	fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
4069 	if (!fmt)
4070 		return -EINVAL;
4071 
4072 	*fmt = format->format;
4073 
4074 	fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
4075 							   format->stream);
4076 	if (!fmt)
4077 		return -EINVAL;
4078 
4079 	*fmt = format->format;
4080 
4081 	return 0;
4082 }
4083 
4084 static int ub960_init_state(struct v4l2_subdev *sd,
4085 			    struct v4l2_subdev_state *state)
4086 {
4087 	struct ub960_data *priv = sd_to_ub960(sd);
4088 
4089 	struct v4l2_subdev_route routes[] = {
4090 		{
4091 			.sink_pad = 0,
4092 			.sink_stream = 0,
4093 			.source_pad = priv->hw_data->num_rxports,
4094 			.source_stream = 0,
4095 			.flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
4096 		},
4097 	};
4098 
4099 	struct v4l2_subdev_krouting routing = {
4100 		.num_routes = ARRAY_SIZE(routes),
4101 		.routes = routes,
4102 	};
4103 
4104 	return _ub960_set_routing(sd, state, &routing);
4105 }
4106 
4107 static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
4108 	.enable_streams = ub960_enable_streams,
4109 	.disable_streams = ub960_disable_streams,
4110 
4111 	.set_routing = ub960_set_routing,
4112 	.get_frame_desc = ub960_get_frame_desc,
4113 
4114 	.get_fmt = v4l2_subdev_get_fmt,
4115 	.set_fmt = ub960_set_fmt,
4116 };
4117 
4118 static int ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
4119 					unsigned int nport)
4120 {
4121 	struct device *dev = &priv->client->dev;
4122 	u8 eq_level;
4123 	s8 strobe_pos;
4124 	int ret;
4125 	u8 v;
4126 
4127 	/* Strobe */
4128 
4129 	ret = ub960_read(priv, UB960_XR_AEQ_CTL1, &v, NULL);
4130 	if (ret)
4131 		return ret;
4132 
4133 	dev_info(dev, "\t%s strobe\n",
4134 		 (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
4135 							  "Manual");
4136 
4137 	if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
4138 		ret = ub960_read(priv, UB960_XR_SFILTER_CFG, &v, NULL);
4139 		if (ret)
4140 			return ret;
4141 
4142 		dev_info(dev, "\tStrobe range [%d, %d]\n",
4143 			 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
4144 			 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
4145 	}
4146 
4147 	ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
4148 	if (ret)
4149 		return ret;
4150 
4151 	dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
4152 
4153 	/* EQ */
4154 
4155 	ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v, NULL);
4156 	if (ret)
4157 		return ret;
4158 
4159 	dev_info(dev, "\t%s EQ\n",
4160 		 (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
4161 						    "Adaptive");
4162 
4163 	if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
4164 		ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v,
4165 					NULL);
4166 		if (ret)
4167 			return ret;
4168 
4169 		dev_info(dev, "\tEQ range [%u, %u]\n",
4170 			 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
4171 			 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
4172 	}
4173 
4174 	ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
4175 	if (ret)
4176 		return ret;
4177 
4178 	dev_info(dev, "\tEQ level %u\n", eq_level);
4179 
4180 	return 0;
4181 }
4182 
4183 static int ub960_log_status(struct v4l2_subdev *sd)
4184 {
4185 	struct ub960_data *priv = sd_to_ub960(sd);
4186 	struct device *dev = &priv->client->dev;
4187 	struct v4l2_subdev_state *state;
4188 	u16 v16 = 0;
4189 	u8 v = 0;
4190 	u8 id[UB960_SR_FPD3_RX_ID_LEN];
4191 	int ret = 0;
4192 
4193 	state = v4l2_subdev_lock_and_get_active_state(sd);
4194 
4195 	for (unsigned int i = 0; i < sizeof(id); i++) {
4196 		ret = ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i], NULL);
4197 		if (ret)
4198 			return ret;
4199 	}
4200 
4201 	dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
4202 
4203 	for (unsigned int nport = 0; nport < priv->hw_data->num_txports;
4204 	     nport++) {
4205 		struct ub960_txport *txport = priv->txports[nport];
4206 
4207 		dev_info(dev, "TX %u\n", nport);
4208 
4209 		if (!txport) {
4210 			dev_info(dev, "\tNot initialized\n");
4211 			continue;
4212 		}
4213 
4214 		ret = ub960_txport_read(priv, nport, UB960_TR_CSI_STS, &v, NULL);
4215 		if (ret)
4216 			return ret;
4217 
4218 		dev_info(dev, "\tsync %u, pass %u\n", v & (u8)BIT(1),
4219 			 v & (u8)BIT(0));
4220 
4221 		/*
4222 		 * Frame counter, frame error counter, line counter and line error counter
4223 		 * registers are marked as reserved in the UB954 datasheet. Hence restrict
4224 		 * the following register reads only for UB960 and UB9702.
4225 		 */
4226 		if (priv->hw_data->chip_type == UB960 || priv->hw_data->chip_type == UB9702) {
4227 			ret = ub960_read16(priv, UB960_SR_CSI_FRAME_COUNT_HI(nport),
4228 					   &v16, NULL);
4229 			if (ret)
4230 				return ret;
4231 
4232 			dev_info(dev, "\tframe counter %u\n", v16);
4233 
4234 			ret = ub960_read16(priv, UB960_SR_CSI_FRAME_ERR_COUNT_HI(nport),
4235 					   &v16, NULL);
4236 			if (ret)
4237 				return ret;
4238 
4239 			dev_info(dev, "\tframe error counter %u\n", v16);
4240 
4241 			ret = ub960_read16(priv, UB960_SR_CSI_LINE_COUNT_HI(nport),
4242 					   &v16, NULL);
4243 			if (ret)
4244 				return ret;
4245 
4246 			dev_info(dev, "\tline counter %u\n", v16);
4247 
4248 			ret = ub960_read16(priv, UB960_SR_CSI_LINE_ERR_COUNT_HI(nport),
4249 					   &v16, NULL);
4250 			if (ret)
4251 				return ret;
4252 
4253 			dev_info(dev, "\tline error counter %u\n", v16);
4254 		}
4255 	}
4256 
4257 	for_each_rxport(priv, it) {
4258 		unsigned int nport = it.nport;
4259 
4260 		dev_info(dev, "RX %u\n", nport);
4261 
4262 		if (!it.rxport) {
4263 			dev_info(dev, "\tNot initialized\n");
4264 			continue;
4265 		}
4266 
4267 		ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v,
4268 					NULL);
4269 		if (ret)
4270 			return ret;
4271 
4272 		if (v & UB960_RR_RX_PORT_STS1_LOCK_STS)
4273 			dev_info(dev, "\tLocked\n");
4274 		else
4275 			dev_info(dev, "\tNot locked\n");
4276 
4277 		dev_info(dev, "\trx_port_sts1 %#02x\n", v);
4278 		ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v,
4279 					NULL);
4280 		if (ret)
4281 			return ret;
4282 
4283 		dev_info(dev, "\trx_port_sts2 %#02x\n", v);
4284 
4285 		ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH,
4286 					  &v16, NULL);
4287 		if (ret)
4288 			return ret;
4289 
4290 		dev_info(dev, "\tlink freq %llu Hz\n", ((u64)v16 * HZ_PER_MHZ) >> 8);
4291 
4292 		ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
4293 					  &v16, NULL);
4294 		if (ret)
4295 			return ret;
4296 
4297 		dev_info(dev, "\tparity errors %u\n", v16);
4298 
4299 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
4300 					  &v16, NULL);
4301 		if (ret)
4302 			return ret;
4303 
4304 		dev_info(dev, "\tlines per frame %u\n", v16);
4305 
4306 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1,
4307 					  &v16, NULL);
4308 		if (ret)
4309 			return ret;
4310 
4311 		dev_info(dev, "\tbytes per line %u\n", v16);
4312 
4313 		ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
4314 					&v, NULL);
4315 		if (ret)
4316 			return ret;
4317 
4318 		dev_info(dev, "\tcsi_err_counter %u\n", v);
4319 
4320 		if (priv->hw_data->chip_type == UB960 || priv->hw_data->chip_type == UB954) {
4321 			ret = ub960_log_status_ub960_sp_eq(priv, nport);
4322 			if (ret)
4323 				return ret;
4324 		}
4325 
4326 		/* GPIOs */
4327 		for (unsigned int i = 0; i < UB960_NUM_BC_GPIOS; i++) {
4328 			u8 ctl_reg;
4329 			u8 ctl_shift;
4330 
4331 			ctl_reg = UB960_RR_BC_GPIO_CTL(i / 2);
4332 			ctl_shift = (i % 2) * 4;
4333 
4334 			ret = ub960_rxport_read(priv, nport, ctl_reg, &v, NULL);
4335 			if (ret)
4336 				return ret;
4337 
4338 			dev_info(dev, "\tGPIO%u: mode %u\n", i,
4339 				 (v >> ctl_shift) & 0xf);
4340 		}
4341 	}
4342 
4343 	v4l2_subdev_unlock_state(state);
4344 
4345 	return 0;
4346 }
4347 
4348 static const struct v4l2_subdev_core_ops ub960_subdev_core_ops = {
4349 	.log_status = ub960_log_status,
4350 };
4351 
4352 static const struct v4l2_subdev_internal_ops ub960_internal_ops = {
4353 	.init_state = ub960_init_state,
4354 };
4355 
4356 static const struct v4l2_subdev_ops ub960_subdev_ops = {
4357 	.core = &ub960_subdev_core_ops,
4358 	.pad = &ub960_pad_ops,
4359 };
4360 
4361 static const struct media_entity_operations ub960_entity_ops = {
4362 	.get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
4363 	.link_validate = v4l2_subdev_link_validate,
4364 	.has_pad_interdep = v4l2_subdev_has_pad_interdep,
4365 };
4366 
4367 /* -----------------------------------------------------------------------------
4368  * Core
4369  */
4370 
4371 static irqreturn_t ub960_handle_events(int irq, void *arg)
4372 {
4373 	struct ub960_data *priv = arg;
4374 	u8 int_sts;
4375 	u8 fwd_sts;
4376 	int ret;
4377 
4378 	ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts, NULL);
4379 	if (ret || !int_sts)
4380 		return IRQ_NONE;
4381 
4382 	dev_dbg(&priv->client->dev, "INTERRUPT_STS %x\n", int_sts);
4383 
4384 	ret = ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts, NULL);
4385 	if (ret)
4386 		return IRQ_NONE;
4387 
4388 	dev_dbg(&priv->client->dev, "FWD_STS %#02x\n", fwd_sts);
4389 
4390 	for (unsigned int i = 0; i < priv->hw_data->num_txports; i++) {
4391 		if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i)) {
4392 			ret = ub960_csi_handle_events(priv, i);
4393 			if (ret)
4394 				return IRQ_NONE;
4395 		}
4396 	}
4397 
4398 	for_each_active_rxport(priv, it) {
4399 		if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(it.nport)) {
4400 			ret = ub960_rxport_handle_events(priv, it.nport);
4401 			if (ret)
4402 				return IRQ_NONE;
4403 		}
4404 	}
4405 
4406 	return IRQ_HANDLED;
4407 }
4408 
4409 static void ub960_handler_work(struct work_struct *work)
4410 {
4411 	struct delayed_work *dwork = to_delayed_work(work);
4412 	struct ub960_data *priv =
4413 		container_of(dwork, struct ub960_data, poll_work);
4414 
4415 	ub960_handle_events(0, priv);
4416 
4417 	schedule_delayed_work(&priv->poll_work,
4418 			      msecs_to_jiffies(UB960_POLL_TIME_MS));
4419 }
4420 
4421 static void ub960_txport_free_ports(struct ub960_data *priv)
4422 {
4423 	unsigned int nport;
4424 
4425 	for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
4426 		struct ub960_txport *txport = priv->txports[nport];
4427 
4428 		if (!txport)
4429 			continue;
4430 
4431 		kfree(txport);
4432 		priv->txports[nport] = NULL;
4433 	}
4434 }
4435 
4436 static void ub960_rxport_free_ports(struct ub960_data *priv)
4437 {
4438 	for_each_active_rxport(priv, it) {
4439 		fwnode_handle_put(it.rxport->source.ep_fwnode);
4440 		fwnode_handle_put(it.rxport->ser.fwnode);
4441 
4442 		mutex_destroy(&it.rxport->aliased_addrs_lock);
4443 
4444 		kfree(it.rxport);
4445 		priv->rxports[it.nport] = NULL;
4446 	}
4447 }
4448 
4449 static int
4450 ub960_parse_dt_rxport_link_properties(struct ub960_data *priv,
4451 				      struct fwnode_handle *link_fwnode,
4452 				      struct ub960_rxport *rxport)
4453 {
4454 	struct device *dev = &priv->client->dev;
4455 	unsigned int nport = rxport->nport;
4456 	u32 rx_mode;
4457 	u32 cdr_mode;
4458 	s32 strobe_pos;
4459 	u32 eq_level;
4460 	u32 ser_i2c_alias;
4461 	u32 ser_i2c_addr;
4462 	int ret;
4463 
4464 	cdr_mode = RXPORT_CDR_FPD3;
4465 
4466 	ret = fwnode_property_read_u32(link_fwnode, "ti,cdr-mode", &cdr_mode);
4467 	if (ret < 0 && ret != -EINVAL) {
4468 		dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4469 			"ti,cdr-mode", ret);
4470 		return ret;
4471 	}
4472 
4473 	if (cdr_mode > RXPORT_CDR_LAST) {
4474 		dev_err(dev, "rx%u: bad 'ti,cdr-mode' %u\n", nport, cdr_mode);
4475 		return -EINVAL;
4476 	}
4477 
4478 	if (priv->hw_data->chip_family != FAMILY_FPD4 && cdr_mode == RXPORT_CDR_FPD4) {
4479 		dev_err(dev, "rx%u: FPD-Link 4 CDR not supported\n", nport);
4480 		return -EINVAL;
4481 	}
4482 
4483 	rxport->cdr_mode = cdr_mode;
4484 
4485 	ret = fwnode_property_read_u32(link_fwnode, "ti,rx-mode", &rx_mode);
4486 	if (ret < 0) {
4487 		dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4488 			"ti,rx-mode", ret);
4489 		return ret;
4490 	}
4491 
4492 	if (rx_mode > RXPORT_MODE_LAST) {
4493 		dev_err(dev, "rx%u: bad 'ti,rx-mode' %u\n", nport, rx_mode);
4494 		return -EINVAL;
4495 	}
4496 
4497 	switch (rx_mode) {
4498 	case RXPORT_MODE_RAW12_HF:
4499 	case RXPORT_MODE_RAW12_LF:
4500 		dev_err(dev, "rx%u: unsupported 'ti,rx-mode' %u\n", nport,
4501 			rx_mode);
4502 		return -EINVAL;
4503 	default:
4504 		break;
4505 	}
4506 
4507 	rxport->rx_mode = rx_mode;
4508 
4509 	/* EQ & Strobe related */
4510 
4511 	/* Defaults */
4512 	rxport->eq.manual_eq = false;
4513 	rxport->eq.aeq.eq_level_min = UB960_MIN_EQ_LEVEL;
4514 	rxport->eq.aeq.eq_level_max = UB960_MAX_EQ_LEVEL;
4515 
4516 	ret = fwnode_property_read_u32(link_fwnode, "ti,strobe-pos",
4517 				       &strobe_pos);
4518 	if (ret) {
4519 		if (ret != -EINVAL) {
4520 			dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4521 				"ti,strobe-pos", ret);
4522 			return ret;
4523 		}
4524 	} else {
4525 		if (strobe_pos < UB960_MIN_MANUAL_STROBE_POS ||
4526 		    strobe_pos > UB960_MAX_MANUAL_STROBE_POS) {
4527 			dev_err(dev, "rx%u: illegal 'strobe-pos' value: %d\n",
4528 				nport, strobe_pos);
4529 			return -EINVAL;
4530 		}
4531 
4532 		/* NOTE: ignored unless global manual strobe pos is also set */
4533 		rxport->eq.strobe_pos = strobe_pos;
4534 		if (!priv->strobe.manual)
4535 			dev_warn(dev,
4536 				 "rx%u: 'ti,strobe-pos' ignored as 'ti,manual-strobe' not set\n",
4537 				 nport);
4538 	}
4539 
4540 	ret = fwnode_property_read_u32(link_fwnode, "ti,eq-level", &eq_level);
4541 	if (ret) {
4542 		if (ret != -EINVAL) {
4543 			dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4544 				"ti,eq-level", ret);
4545 			return ret;
4546 		}
4547 	} else {
4548 		if (eq_level > UB960_MAX_EQ_LEVEL) {
4549 			dev_err(dev, "rx%u: illegal 'ti,eq-level' value: %d\n",
4550 				nport, eq_level);
4551 			return -EINVAL;
4552 		}
4553 
4554 		rxport->eq.manual_eq = true;
4555 		rxport->eq.manual.eq_level = eq_level;
4556 	}
4557 
4558 	ret = fwnode_property_read_u32(link_fwnode, "i2c-alias",
4559 				       &ser_i2c_alias);
4560 	if (ret) {
4561 		dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4562 			"i2c-alias", ret);
4563 		return ret;
4564 	}
4565 	rxport->ser.alias = ser_i2c_alias;
4566 
4567 	rxport->ser.fwnode = fwnode_get_named_child_node(link_fwnode, "serializer");
4568 	if (!rxport->ser.fwnode) {
4569 		dev_err(dev, "rx%u: missing 'serializer' node\n", nport);
4570 		return -EINVAL;
4571 	}
4572 
4573 	ret = fwnode_property_read_u32(rxport->ser.fwnode, "reg",
4574 				       &ser_i2c_addr);
4575 	if (ret)
4576 		rxport->ser.addr = -EINVAL;
4577 	else
4578 		rxport->ser.addr = ser_i2c_addr;
4579 
4580 	return 0;
4581 }
4582 
4583 static int ub960_parse_dt_rxport_ep_properties(struct ub960_data *priv,
4584 					       struct fwnode_handle *ep_fwnode,
4585 					       struct ub960_rxport *rxport)
4586 {
4587 	struct device *dev = &priv->client->dev;
4588 	struct v4l2_fwnode_endpoint vep = {};
4589 	unsigned int nport = rxport->nport;
4590 	bool hsync_hi;
4591 	bool vsync_hi;
4592 	int ret;
4593 
4594 	rxport->source.ep_fwnode = fwnode_graph_get_remote_endpoint(ep_fwnode);
4595 	if (!rxport->source.ep_fwnode) {
4596 		dev_err(dev, "rx%u: no remote endpoint\n", nport);
4597 		return -ENODEV;
4598 	}
4599 
4600 	/* We currently have properties only for RAW modes */
4601 
4602 	switch (rxport->rx_mode) {
4603 	case RXPORT_MODE_RAW10:
4604 	case RXPORT_MODE_RAW12_HF:
4605 	case RXPORT_MODE_RAW12_LF:
4606 		break;
4607 	default:
4608 		return 0;
4609 	}
4610 
4611 	vep.bus_type = V4L2_MBUS_PARALLEL;
4612 	ret = v4l2_fwnode_endpoint_parse(ep_fwnode, &vep);
4613 	if (ret) {
4614 		dev_err(dev, "rx%u: failed to parse endpoint data\n", nport);
4615 		goto err_put_source_ep_fwnode;
4616 	}
4617 
4618 	hsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
4619 	vsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
4620 
4621 	/* LineValid and FrameValid are inverse to the h/vsync active */
4622 	rxport->lv_fv_pol = (hsync_hi ? UB960_RR_PORT_CONFIG2_LV_POL_LOW : 0) |
4623 			    (vsync_hi ? UB960_RR_PORT_CONFIG2_FV_POL_LOW : 0);
4624 
4625 	return 0;
4626 
4627 err_put_source_ep_fwnode:
4628 	fwnode_handle_put(rxport->source.ep_fwnode);
4629 	return ret;
4630 }
4631 
4632 static int ub960_parse_dt_rxport(struct ub960_data *priv, unsigned int nport,
4633 				 struct fwnode_handle *link_fwnode,
4634 				 struct fwnode_handle *ep_fwnode)
4635 {
4636 	static const char *vpoc_names[UB960_MAX_RX_NPORTS] = {
4637 		"vpoc0", "vpoc1", "vpoc2", "vpoc3"
4638 	};
4639 	struct device *dev = &priv->client->dev;
4640 	struct ub960_rxport *rxport;
4641 	int ret;
4642 
4643 	rxport = kzalloc_obj(*rxport);
4644 	if (!rxport)
4645 		return -ENOMEM;
4646 
4647 	priv->rxports[nport] = rxport;
4648 
4649 	rxport->nport = nport;
4650 	rxport->priv = priv;
4651 
4652 	ret = ub960_parse_dt_rxport_link_properties(priv, link_fwnode, rxport);
4653 	if (ret)
4654 		goto err_free_rxport;
4655 
4656 	rxport->vpoc = devm_regulator_get_optional(dev, vpoc_names[nport]);
4657 	if (IS_ERR(rxport->vpoc)) {
4658 		ret = PTR_ERR(rxport->vpoc);
4659 		if (ret == -ENODEV) {
4660 			rxport->vpoc = NULL;
4661 		} else {
4662 			dev_err(dev, "rx%u: failed to get VPOC supply: %d\n",
4663 				nport, ret);
4664 			goto err_put_remote_fwnode;
4665 		}
4666 	}
4667 
4668 	ret = ub960_parse_dt_rxport_ep_properties(priv, ep_fwnode, rxport);
4669 	if (ret)
4670 		goto err_put_remote_fwnode;
4671 
4672 	mutex_init(&rxport->aliased_addrs_lock);
4673 
4674 	return 0;
4675 
4676 err_put_remote_fwnode:
4677 	fwnode_handle_put(rxport->ser.fwnode);
4678 err_free_rxport:
4679 	priv->rxports[nport] = NULL;
4680 	kfree(rxport);
4681 	return ret;
4682 }
4683 
4684 static struct fwnode_handle *
4685 ub960_fwnode_get_link_by_regs(struct fwnode_handle *links_fwnode,
4686 			      unsigned int nport)
4687 {
4688 	struct fwnode_handle *link_fwnode;
4689 	int ret;
4690 
4691 	fwnode_for_each_child_node(links_fwnode, link_fwnode) {
4692 		u32 link_num;
4693 
4694 		if (!str_has_prefix(fwnode_get_name(link_fwnode), "link@"))
4695 			continue;
4696 
4697 		ret = fwnode_property_read_u32(link_fwnode, "reg", &link_num);
4698 		if (ret) {
4699 			fwnode_handle_put(link_fwnode);
4700 			return NULL;
4701 		}
4702 
4703 		if (nport == link_num)
4704 			return link_fwnode;
4705 	}
4706 
4707 	return NULL;
4708 }
4709 
4710 static int ub960_parse_dt_rxports(struct ub960_data *priv)
4711 {
4712 	struct device *dev = &priv->client->dev;
4713 	struct fwnode_handle *links_fwnode;
4714 	int ret;
4715 
4716 	links_fwnode = fwnode_get_named_child_node(dev_fwnode(dev), "links");
4717 	if (!links_fwnode) {
4718 		dev_err(dev, "'links' node missing\n");
4719 		return -ENODEV;
4720 	}
4721 
4722 	/* Defaults, recommended by TI */
4723 	priv->strobe.min = 2;
4724 	priv->strobe.max = 3;
4725 
4726 	priv->strobe.manual = fwnode_property_read_bool(links_fwnode, "ti,manual-strobe");
4727 
4728 	for_each_rxport(priv, it) {
4729 		struct fwnode_handle *link_fwnode;
4730 		struct fwnode_handle *ep_fwnode;
4731 		unsigned int nport = it.nport;
4732 
4733 		link_fwnode = ub960_fwnode_get_link_by_regs(links_fwnode, nport);
4734 		if (!link_fwnode)
4735 			continue;
4736 
4737 		ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
4738 							    nport, 0, 0);
4739 		if (!ep_fwnode) {
4740 			fwnode_handle_put(link_fwnode);
4741 			continue;
4742 		}
4743 
4744 		ret = ub960_parse_dt_rxport(priv, nport, link_fwnode,
4745 					    ep_fwnode);
4746 
4747 		fwnode_handle_put(link_fwnode);
4748 		fwnode_handle_put(ep_fwnode);
4749 
4750 		if (ret) {
4751 			dev_err(dev, "rx%u: failed to parse RX port\n", nport);
4752 			goto err_put_links;
4753 		}
4754 	}
4755 
4756 	fwnode_handle_put(links_fwnode);
4757 
4758 	return 0;
4759 
4760 err_put_links:
4761 	fwnode_handle_put(links_fwnode);
4762 
4763 	return ret;
4764 }
4765 
4766 static int ub960_parse_dt_txports(struct ub960_data *priv)
4767 {
4768 	struct device *dev = &priv->client->dev;
4769 	u32 nport;
4770 	int ret;
4771 
4772 	for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
4773 		unsigned int port = nport + priv->hw_data->num_rxports;
4774 		struct fwnode_handle *ep_fwnode;
4775 
4776 		ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
4777 							    port, 0, 0);
4778 		if (!ep_fwnode)
4779 			continue;
4780 
4781 		ret = ub960_parse_dt_txport(priv, ep_fwnode, nport);
4782 
4783 		fwnode_handle_put(ep_fwnode);
4784 
4785 		if (ret)
4786 			break;
4787 	}
4788 
4789 	return 0;
4790 }
4791 
4792 static int ub960_parse_dt(struct ub960_data *priv)
4793 {
4794 	int ret;
4795 
4796 	ret = ub960_parse_dt_rxports(priv);
4797 	if (ret)
4798 		return ret;
4799 
4800 	ret = ub960_parse_dt_txports(priv);
4801 	if (ret)
4802 		goto err_free_rxports;
4803 
4804 	return 0;
4805 
4806 err_free_rxports:
4807 	ub960_rxport_free_ports(priv);
4808 
4809 	return ret;
4810 }
4811 
4812 static int ub960_notify_bound(struct v4l2_async_notifier *notifier,
4813 			      struct v4l2_subdev *subdev,
4814 			      struct v4l2_async_connection *asd)
4815 {
4816 	struct ub960_data *priv = sd_to_ub960(notifier->sd);
4817 	struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
4818 	struct device *dev = &priv->client->dev;
4819 	u8 nport = rxport->nport;
4820 	int ret;
4821 
4822 	ret = media_entity_get_fwnode_pad(&subdev->entity,
4823 					  rxport->source.ep_fwnode,
4824 					  MEDIA_PAD_FL_SOURCE);
4825 	if (ret < 0) {
4826 		dev_err(dev, "Failed to find pad for %s\n", subdev->name);
4827 		return ret;
4828 	}
4829 
4830 	rxport->source.sd = subdev;
4831 	rxport->source.pad = ret;
4832 
4833 	ret = media_create_pad_link(&rxport->source.sd->entity,
4834 				    rxport->source.pad, &priv->sd.entity, nport,
4835 				    MEDIA_LNK_FL_ENABLED |
4836 					    MEDIA_LNK_FL_IMMUTABLE);
4837 	if (ret) {
4838 		dev_err(dev, "Unable to link %s:%u -> %s:%u\n",
4839 			rxport->source.sd->name, rxport->source.pad,
4840 			priv->sd.name, nport);
4841 		return ret;
4842 	}
4843 
4844 	for_each_active_rxport(priv, it) {
4845 		if (!it.rxport->source.sd) {
4846 			dev_dbg(dev, "Waiting for more subdevs to be bound\n");
4847 			return 0;
4848 		}
4849 	}
4850 
4851 	return 0;
4852 }
4853 
4854 static void ub960_notify_unbind(struct v4l2_async_notifier *notifier,
4855 				struct v4l2_subdev *subdev,
4856 				struct v4l2_async_connection *asd)
4857 {
4858 	struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
4859 
4860 	rxport->source.sd = NULL;
4861 }
4862 
4863 static const struct v4l2_async_notifier_operations ub960_notify_ops = {
4864 	.bound = ub960_notify_bound,
4865 	.unbind = ub960_notify_unbind,
4866 };
4867 
4868 static int ub960_v4l2_notifier_register(struct ub960_data *priv)
4869 {
4870 	struct device *dev = &priv->client->dev;
4871 	int ret;
4872 
4873 	v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
4874 
4875 	for_each_active_rxport(priv, it) {
4876 		struct ub960_asd *asd;
4877 
4878 		asd = v4l2_async_nf_add_fwnode(&priv->notifier,
4879 					       it.rxport->source.ep_fwnode,
4880 					       struct ub960_asd);
4881 		if (IS_ERR(asd)) {
4882 			dev_err(dev, "Failed to add subdev for source %u: %pe",
4883 				it.nport, asd);
4884 			v4l2_async_nf_cleanup(&priv->notifier);
4885 			return PTR_ERR(asd);
4886 		}
4887 
4888 		asd->rxport = it.rxport;
4889 	}
4890 
4891 	priv->notifier.ops = &ub960_notify_ops;
4892 
4893 	ret = v4l2_async_nf_register(&priv->notifier);
4894 	if (ret) {
4895 		dev_err(dev, "Failed to register subdev_notifier");
4896 		v4l2_async_nf_cleanup(&priv->notifier);
4897 		return ret;
4898 	}
4899 
4900 	return 0;
4901 }
4902 
4903 static void ub960_v4l2_notifier_unregister(struct ub960_data *priv)
4904 {
4905 	v4l2_async_nf_unregister(&priv->notifier);
4906 	v4l2_async_nf_cleanup(&priv->notifier);
4907 }
4908 
4909 static int ub960_create_subdev(struct ub960_data *priv)
4910 {
4911 	struct device *dev = &priv->client->dev;
4912 	unsigned int i;
4913 	int ret;
4914 
4915 	v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub960_subdev_ops);
4916 	priv->sd.internal_ops = &ub960_internal_ops;
4917 
4918 	v4l2_ctrl_handler_init(&priv->ctrl_handler, 1);
4919 	priv->sd.ctrl_handler = &priv->ctrl_handler;
4920 
4921 	v4l2_ctrl_new_int_menu(&priv->ctrl_handler, NULL, V4L2_CID_LINK_FREQ,
4922 			       ARRAY_SIZE(priv->tx_link_freq) - 1, 0,
4923 			       priv->tx_link_freq);
4924 
4925 	if (priv->ctrl_handler.error) {
4926 		ret = priv->ctrl_handler.error;
4927 		goto err_free_ctrl;
4928 	}
4929 
4930 	priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
4931 			  V4L2_SUBDEV_FL_STREAMS;
4932 	priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
4933 	priv->sd.entity.ops = &ub960_entity_ops;
4934 
4935 	for (i = 0; i < priv->hw_data->num_rxports + priv->hw_data->num_txports; i++) {
4936 		priv->pads[i].flags = ub960_pad_is_sink(priv, i) ?
4937 					      MEDIA_PAD_FL_SINK :
4938 					      MEDIA_PAD_FL_SOURCE;
4939 	}
4940 
4941 	ret = media_entity_pads_init(&priv->sd.entity,
4942 				     priv->hw_data->num_rxports +
4943 					     priv->hw_data->num_txports,
4944 				     priv->pads);
4945 	if (ret)
4946 		goto err_free_ctrl;
4947 
4948 	priv->sd.state_lock = priv->sd.ctrl_handler->lock;
4949 
4950 	ret = v4l2_subdev_init_finalize(&priv->sd);
4951 	if (ret)
4952 		goto err_entity_cleanup;
4953 
4954 	ret = ub960_v4l2_notifier_register(priv);
4955 	if (ret) {
4956 		dev_err(dev, "v4l2 subdev notifier register failed: %d\n", ret);
4957 		goto err_subdev_cleanup;
4958 	}
4959 
4960 	ret = v4l2_async_register_subdev(&priv->sd);
4961 	if (ret) {
4962 		dev_err(dev, "v4l2_async_register_subdev error: %d\n", ret);
4963 		goto err_unreg_notif;
4964 	}
4965 
4966 	return 0;
4967 
4968 err_unreg_notif:
4969 	ub960_v4l2_notifier_unregister(priv);
4970 err_subdev_cleanup:
4971 	v4l2_subdev_cleanup(&priv->sd);
4972 err_entity_cleanup:
4973 	media_entity_cleanup(&priv->sd.entity);
4974 err_free_ctrl:
4975 	v4l2_ctrl_handler_free(&priv->ctrl_handler);
4976 
4977 	return ret;
4978 }
4979 
4980 static void ub960_destroy_subdev(struct ub960_data *priv)
4981 {
4982 	ub960_v4l2_notifier_unregister(priv);
4983 	v4l2_async_unregister_subdev(&priv->sd);
4984 
4985 	v4l2_subdev_cleanup(&priv->sd);
4986 
4987 	media_entity_cleanup(&priv->sd.entity);
4988 	v4l2_ctrl_handler_free(&priv->ctrl_handler);
4989 }
4990 
4991 static const struct regmap_config ub960_regmap_config = {
4992 	.name = "ds90ub960",
4993 
4994 	.reg_bits = 8,
4995 	.val_bits = 8,
4996 
4997 	.max_register = 0xff,
4998 
4999 	/*
5000 	 * We do locking in the driver to cover the TX/RX port selection and the
5001 	 * indirect register access.
5002 	 */
5003 	.disable_locking = true,
5004 };
5005 
5006 static int ub960_get_hw_resources(struct ub960_data *priv)
5007 {
5008 	struct device *dev = &priv->client->dev;
5009 
5010 	priv->regmap = devm_regmap_init_i2c(priv->client, &ub960_regmap_config);
5011 	if (IS_ERR(priv->regmap))
5012 		return PTR_ERR(priv->regmap);
5013 
5014 	priv->vddio = devm_regulator_get(dev, "vddio");
5015 	if (IS_ERR(priv->vddio))
5016 		return dev_err_probe(dev, PTR_ERR(priv->vddio),
5017 				     "cannot get VDDIO regulator\n");
5018 
5019 	/* get power-down pin from DT */
5020 	priv->pd_gpio =
5021 		devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
5022 	if (IS_ERR(priv->pd_gpio))
5023 		return dev_err_probe(dev, PTR_ERR(priv->pd_gpio),
5024 				     "Cannot get powerdown GPIO\n");
5025 
5026 	priv->refclk = devm_clk_get(dev, "refclk");
5027 	if (IS_ERR(priv->refclk))
5028 		return dev_err_probe(dev, PTR_ERR(priv->refclk),
5029 				     "Cannot get REFCLK\n");
5030 
5031 	return 0;
5032 }
5033 
5034 static int ub960_enable_core_hw(struct ub960_data *priv)
5035 {
5036 	struct device *dev = &priv->client->dev;
5037 	u8 rev_mask;
5038 	int ret;
5039 	u8 dev_sts;
5040 	u8 refclk_freq;
5041 
5042 	ret = regulator_enable(priv->vddio);
5043 	if (ret)
5044 		return dev_err_probe(dev, ret,
5045 				     "failed to enable VDDIO regulator\n");
5046 
5047 	ret = clk_prepare_enable(priv->refclk);
5048 	if (ret) {
5049 		dev_err_probe(dev, ret, "Failed to enable refclk\n");
5050 		goto err_disable_vddio;
5051 	}
5052 
5053 	if (priv->pd_gpio) {
5054 		gpiod_set_value_cansleep(priv->pd_gpio, 1);
5055 		/* wait min 2 ms for reset to complete */
5056 		fsleep(2000);
5057 		gpiod_set_value_cansleep(priv->pd_gpio, 0);
5058 		/* wait min 2 ms for power up to finish */
5059 		fsleep(2000);
5060 	}
5061 
5062 	ret = ub960_reset(priv, true);
5063 	if (ret)
5064 		goto err_pd_gpio;
5065 
5066 	/* Runtime check register accessibility */
5067 	ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask, NULL);
5068 	if (ret) {
5069 		dev_err_probe(dev, ret, "Cannot read first register, abort\n");
5070 		goto err_pd_gpio;
5071 	}
5072 
5073 	dev_dbg(dev, "Found %s (rev/mask %#04x)\n", priv->hw_data->model,
5074 		rev_mask);
5075 
5076 	ret = ub960_read(priv, UB960_SR_DEVICE_STS, &dev_sts, NULL);
5077 	if (ret)
5078 		goto err_pd_gpio;
5079 
5080 	/*
5081 	 * UB954 REFCLK_FREQ is not synchronized, so multiple reads are recommended
5082 	 * by the datasheet. However, a single read is practically seen to be
5083 	 * sufficient and moreover it is only used for a debug print.
5084 	 */
5085 	if (priv->hw_data->chip_type == UB9702)
5086 		ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq,
5087 				 NULL);
5088 	else
5089 		ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq,
5090 				 NULL);
5091 	if (ret)
5092 		goto err_pd_gpio;
5093 
5094 	dev_dbg(dev, "refclk valid %u freq %u MHz (clk fw freq %lu MHz)\n",
5095 		!!(dev_sts & BIT(4)), refclk_freq,
5096 		clk_get_rate(priv->refclk) / HZ_PER_MHZ);
5097 
5098 	/* Disable all RX ports by default */
5099 	ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0, NULL);
5100 	if (ret)
5101 		goto err_pd_gpio;
5102 
5103 	/* release GPIO lock */
5104 	if (priv->hw_data->chip_type == UB9702) {
5105 		ret = ub960_update_bits(priv, UB960_SR_RESET,
5106 					UB960_SR_RESET_GPIO_LOCK_RELEASE,
5107 					UB960_SR_RESET_GPIO_LOCK_RELEASE,
5108 					NULL);
5109 		if (ret)
5110 			goto err_pd_gpio;
5111 	}
5112 
5113 	return 0;
5114 
5115 err_pd_gpio:
5116 	gpiod_set_value_cansleep(priv->pd_gpio, 1);
5117 	clk_disable_unprepare(priv->refclk);
5118 err_disable_vddio:
5119 	regulator_disable(priv->vddio);
5120 
5121 	return ret;
5122 }
5123 
5124 static void ub960_disable_core_hw(struct ub960_data *priv)
5125 {
5126 	gpiod_set_value_cansleep(priv->pd_gpio, 1);
5127 	clk_disable_unprepare(priv->refclk);
5128 	regulator_disable(priv->vddio);
5129 }
5130 
5131 static int ub960_probe(struct i2c_client *client)
5132 {
5133 	struct device *dev = &client->dev;
5134 	struct ub960_data *priv;
5135 	int ret;
5136 
5137 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5138 	if (!priv)
5139 		return -ENOMEM;
5140 
5141 	priv->client = client;
5142 
5143 	priv->hw_data = device_get_match_data(dev);
5144 
5145 	mutex_init(&priv->reg_lock);
5146 
5147 	INIT_DELAYED_WORK(&priv->poll_work, ub960_handler_work);
5148 
5149 	/*
5150 	 * Initialize these to invalid values so that the first reg writes will
5151 	 * configure the target.
5152 	 */
5153 	priv->reg_current.indirect_target = 0xff;
5154 	priv->reg_current.rxport = 0xff;
5155 	priv->reg_current.txport = 0xff;
5156 
5157 	ret = ub960_get_hw_resources(priv);
5158 	if (ret)
5159 		goto err_mutex_destroy;
5160 
5161 	ret = ub960_enable_core_hw(priv);
5162 	if (ret)
5163 		goto err_mutex_destroy;
5164 
5165 	ret = ub960_parse_dt(priv);
5166 	if (ret)
5167 		goto err_disable_core_hw;
5168 
5169 	ret = ub960_init_tx_ports(priv);
5170 	if (ret)
5171 		goto err_free_ports;
5172 
5173 	ret = ub960_rxport_enable_vpocs(priv);
5174 	if (ret)
5175 		goto err_free_ports;
5176 
5177 	if (priv->hw_data->chip_type == UB9702)
5178 		ret = ub960_init_rx_ports_ub9702(priv);
5179 	else
5180 		ret = ub960_init_rx_ports_ub960(priv);
5181 
5182 	if (ret)
5183 		goto err_disable_vpocs;
5184 
5185 	ret = ub960_init_atr(priv);
5186 	if (ret)
5187 		goto err_disable_vpocs;
5188 
5189 	ret = ub960_rxport_add_serializers(priv);
5190 	if (ret)
5191 		goto err_uninit_atr;
5192 
5193 	ret = ub960_create_subdev(priv);
5194 	if (ret)
5195 		goto err_free_sers;
5196 
5197 	if (client->irq)
5198 		dev_warn(dev, "irq support not implemented, using polling\n");
5199 
5200 	schedule_delayed_work(&priv->poll_work,
5201 			      msecs_to_jiffies(UB960_POLL_TIME_MS));
5202 
5203 #ifdef UB960_DEBUG_I2C_RX_ID
5204 	for_each_rxport(priv, it)
5205 		ub960_write(priv, UB960_SR_I2C_RX_ID(it.nport),
5206 			    (UB960_DEBUG_I2C_RX_ID + it.nport) << 1, NULL);
5207 #endif
5208 
5209 	return 0;
5210 
5211 err_free_sers:
5212 	ub960_rxport_remove_serializers(priv);
5213 err_uninit_atr:
5214 	ub960_uninit_atr(priv);
5215 err_disable_vpocs:
5216 	ub960_rxport_disable_vpocs(priv);
5217 err_free_ports:
5218 	ub960_rxport_free_ports(priv);
5219 	ub960_txport_free_ports(priv);
5220 err_disable_core_hw:
5221 	ub960_disable_core_hw(priv);
5222 err_mutex_destroy:
5223 	mutex_destroy(&priv->reg_lock);
5224 	return ret;
5225 }
5226 
5227 static void ub960_remove(struct i2c_client *client)
5228 {
5229 	struct v4l2_subdev *sd = i2c_get_clientdata(client);
5230 	struct ub960_data *priv = sd_to_ub960(sd);
5231 
5232 	cancel_delayed_work_sync(&priv->poll_work);
5233 
5234 	ub960_destroy_subdev(priv);
5235 	ub960_rxport_remove_serializers(priv);
5236 	ub960_uninit_atr(priv);
5237 	ub960_rxport_disable_vpocs(priv);
5238 	ub960_rxport_free_ports(priv);
5239 	ub960_txport_free_ports(priv);
5240 	ub960_disable_core_hw(priv);
5241 	mutex_destroy(&priv->reg_lock);
5242 }
5243 
5244 static const struct ub960_hw_data ds90ub954_hw = {
5245 	.model = "ub954",
5246 	.chip_type = UB954,
5247 	.chip_family = FAMILY_FPD3,
5248 	.num_rxports = 2,
5249 	.num_txports = 1,
5250 };
5251 
5252 static const struct ub960_hw_data ds90ub960_hw = {
5253 	.model = "ub960",
5254 	.chip_type = UB960,
5255 	.chip_family = FAMILY_FPD3,
5256 	.num_rxports = 4,
5257 	.num_txports = 2,
5258 };
5259 
5260 static const struct ub960_hw_data ds90ub9702_hw = {
5261 	.model = "ub9702",
5262 	.chip_type = UB9702,
5263 	.chip_family = FAMILY_FPD4,
5264 	.num_rxports = 4,
5265 	.num_txports = 2,
5266 };
5267 
5268 static const struct i2c_device_id ub960_id[] = {
5269 	{ "ds90ub954-q1", (kernel_ulong_t)&ds90ub954_hw },
5270 	{ "ds90ub960-q1", (kernel_ulong_t)&ds90ub960_hw },
5271 	{ "ds90ub9702-q1", (kernel_ulong_t)&ds90ub9702_hw },
5272 	{}
5273 };
5274 MODULE_DEVICE_TABLE(i2c, ub960_id);
5275 
5276 static const struct of_device_id ub960_dt_ids[] = {
5277 	{ .compatible = "ti,ds90ub954-q1", .data = &ds90ub954_hw },
5278 	{ .compatible = "ti,ds90ub960-q1", .data = &ds90ub960_hw },
5279 	{ .compatible = "ti,ds90ub9702-q1", .data = &ds90ub9702_hw },
5280 	{}
5281 };
5282 MODULE_DEVICE_TABLE(of, ub960_dt_ids);
5283 
5284 static struct i2c_driver ds90ub960_driver = {
5285 	.probe		= ub960_probe,
5286 	.remove		= ub960_remove,
5287 	.id_table	= ub960_id,
5288 	.driver = {
5289 		.name	= "ds90ub960",
5290 		.of_match_table = ub960_dt_ids,
5291 	},
5292 };
5293 module_i2c_driver(ds90ub960_driver);
5294 
5295 MODULE_LICENSE("GPL");
5296 MODULE_DESCRIPTION("Texas Instruments FPD-Link III/IV Deserializers Driver");
5297 MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
5298 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
5299 MODULE_IMPORT_NS("I2C_ATR");
5300