xref: /linux/drivers/media/i2c/ds90ub960.c (revision 883e3c9f40814377a239ca0becbcc77deab5ffe5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for the Texas Instruments DS90UB960-Q1 video deserializer
4  *
5  * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
6  * Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
7  */
8 
9 /*
10  * (Possible) TODOs:
11  *
12  * - PM for serializer and remote peripherals. We need to manage:
13  *   - VPOC
14  *     - Power domain? Regulator? Somehow any remote device should be able to
15  *       cause the VPOC to be turned on.
16  *   - Link between the deserializer and the serializer
17  *     - Related to VPOC management. We probably always want to turn on the VPOC
18  *       and then enable the link.
19  *   - Serializer's services: i2c, gpios, power
20  *     - The serializer needs to resume before the remote peripherals can
21  *       e.g. use the i2c.
22  *     - How to handle gpios? Reserving a gpio essentially keeps the provider
23  *       (serializer) always powered on.
24  * - Do we need a new bus for the FPD-Link? At the moment the serializers
25  *   are children of the same i2c-adapter where the deserializer resides.
26  * - i2c-atr could be made embeddable instead of allocatable.
27  */
28 
29 #include <linux/bitops.h>
30 #include <linux/cleanup.h>
31 #include <linux/clk.h>
32 #include <linux/delay.h>
33 #include <linux/fwnode.h>
34 #include <linux/gpio/consumer.h>
35 #include <linux/i2c-atr.h>
36 #include <linux/i2c.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/kernel.h>
40 #include <linux/kthread.h>
41 #include <linux/module.h>
42 #include <linux/mutex.h>
43 #include <linux/property.h>
44 #include <linux/regmap.h>
45 #include <linux/regulator/consumer.h>
46 #include <linux/slab.h>
47 #include <linux/units.h>
48 #include <linux/workqueue.h>
49 
50 #include <media/i2c/ds90ub9xx.h>
51 #include <media/mipi-csi2.h>
52 #include <media/v4l2-ctrls.h>
53 #include <media/v4l2-fwnode.h>
54 #include <media/v4l2-subdev.h>
55 
56 #include "ds90ub953.h"
57 
58 #define MHZ(v) ((u32)((v) * HZ_PER_MHZ))
59 
60 /*
61  * If this is defined, the i2c addresses from UB960_DEBUG_I2C_RX_ID to
62  * UB960_DEBUG_I2C_RX_ID + 3 can be used to access the paged RX port registers
63  * directly.
64  *
65  * Only for debug purposes.
66  */
67 /* #define UB960_DEBUG_I2C_RX_ID	0x40 */
68 
69 #define UB960_POLL_TIME_MS	500
70 
71 #define UB960_MAX_RX_NPORTS	4
72 #define UB960_MAX_TX_NPORTS	2
73 #define UB960_MAX_NPORTS	(UB960_MAX_RX_NPORTS + UB960_MAX_TX_NPORTS)
74 
75 #define UB960_MAX_PORT_ALIASES	8
76 
77 #define UB960_NUM_BC_GPIOS		4
78 
79 /*
80  * Register map
81  *
82  * 0x00-0x32   Shared (UB960_SR)
83  * 0x33-0x3a   CSI-2 TX (per-port paged on DS90UB960, shared on 954) (UB960_TR)
84  * 0x4c        Shared (UB960_SR)
85  * 0x4d-0x7f   FPD-Link RX, per-port paged (UB960_RR)
86  * 0xb0-0xbf   Shared (UB960_SR)
87  * 0xd0-0xdf   FPD-Link RX, per-port paged (UB960_RR)
88  * 0xf0-0xf5   Shared (UB960_SR)
89  * 0xf8-0xfb   Shared (UB960_SR)
90  * All others  Reserved
91  *
92  * Register prefixes:
93  * UB960_SR_* = Shared register
94  * UB960_RR_* = FPD-Link RX, per-port paged register
95  * UB960_TR_* = CSI-2 TX, per-port paged register
96  * UB960_XR_* = Reserved register
97  * UB960_IR_* = Indirect register
98  */
99 
100 #define UB960_SR_I2C_DEV_ID			0x00
101 #define UB960_SR_RESET				0x01
102 #define UB960_SR_RESET_DIGITAL_RESET1		BIT(1)
103 #define UB960_SR_RESET_DIGITAL_RESET0		BIT(0)
104 #define UB960_SR_RESET_GPIO_LOCK_RELEASE	BIT(5)
105 
106 #define UB960_SR_GEN_CONFIG			0x02
107 #define UB960_SR_REV_MASK			0x03
108 #define UB960_SR_DEVICE_STS			0x04
109 #define UB960_SR_PAR_ERR_THOLD_HI		0x05
110 #define UB960_SR_PAR_ERR_THOLD_LO		0x06
111 #define UB960_SR_BCC_WDOG_CTL			0x07
112 #define UB960_SR_I2C_CTL1			0x08
113 #define UB960_SR_I2C_CTL2			0x09
114 #define UB960_SR_SCL_HIGH_TIME			0x0a
115 #define UB960_SR_SCL_LOW_TIME			0x0b
116 #define UB960_SR_RX_PORT_CTL			0x0c
117 #define UB960_SR_IO_CTL				0x0d
118 #define UB960_SR_GPIO_PIN_STS			0x0e
119 #define UB960_SR_GPIO_INPUT_CTL			0x0f
120 #define UB960_SR_GPIO_PIN_CTL(n)		(0x10 + (n)) /* n < UB960_NUM_GPIOS */
121 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SEL		5
122 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SRC_SHIFT	2
123 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_EN		BIT(0)
124 
125 #define UB960_SR_FS_CTL				0x18
126 #define UB960_SR_FS_HIGH_TIME_1			0x19
127 #define UB960_SR_FS_HIGH_TIME_0			0x1a
128 #define UB960_SR_FS_LOW_TIME_1			0x1b
129 #define UB960_SR_FS_LOW_TIME_0			0x1c
130 #define UB960_SR_MAX_FRM_HI			0x1d
131 #define UB960_SR_MAX_FRM_LO			0x1e
132 #define UB960_SR_CSI_PLL_CTL			0x1f
133 
134 #define UB960_SR_FWD_CTL1			0x20
135 #define UB960_SR_FWD_CTL1_PORT_DIS(n)		BIT((n) + 4)
136 
137 #define UB960_SR_FWD_CTL2			0x21
138 #define UB960_SR_FWD_STS			0x22
139 
140 #define UB960_SR_INTERRUPT_CTL			0x23
141 #define UB960_SR_INTERRUPT_CTL_INT_EN		BIT(7)
142 #define UB960_SR_INTERRUPT_CTL_IE_CSI_TX0	BIT(4)
143 #define UB960_SR_INTERRUPT_CTL_IE_RX(n)		BIT((n)) /* rxport[n] IRQ */
144 
145 #define UB960_SR_INTERRUPT_STS			0x24
146 #define UB960_SR_INTERRUPT_STS_INT		BIT(7)
147 #define UB960_SR_INTERRUPT_STS_IS_CSI_TX(n)	BIT(4 + (n)) /* txport[n] IRQ */
148 #define UB960_SR_INTERRUPT_STS_IS_RX(n)		BIT((n)) /* rxport[n] IRQ */
149 
150 #define UB960_SR_TS_CONFIG			0x25
151 #define UB960_SR_TS_CONTROL			0x26
152 #define UB960_SR_TS_LINE_HI			0x27
153 #define UB960_SR_TS_LINE_LO			0x28
154 #define UB960_SR_TS_STATUS			0x29
155 #define UB960_SR_TIMESTAMP_P0_HI		0x2a
156 #define UB960_SR_TIMESTAMP_P0_LO		0x2b
157 #define UB960_SR_TIMESTAMP_P1_HI		0x2c
158 #define UB960_SR_TIMESTAMP_P1_LO		0x2d
159 
160 #define UB960_SR_CSI_PORT_SEL			0x32
161 
162 #define UB960_TR_CSI_CTL			0x33
163 #define UB960_TR_CSI_CTL_CSI_CAL_EN		BIT(6)
164 #define UB960_TR_CSI_CTL_CSI_CONTS_CLOCK	BIT(1)
165 #define UB960_TR_CSI_CTL_CSI_ENABLE		BIT(0)
166 
167 #define UB960_TR_CSI_CTL2			0x34
168 #define UB960_TR_CSI_STS			0x35
169 #define UB960_TR_CSI_TX_ICR			0x36
170 
171 #define UB960_TR_CSI_TX_ISR			0x37
172 #define UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR	BIT(3)
173 #define UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR	BIT(1)
174 
175 #define UB960_TR_CSI_TEST_CTL			0x38
176 #define UB960_TR_CSI_TEST_PATT_HI		0x39
177 #define UB960_TR_CSI_TEST_PATT_LO		0x3a
178 
179 #define UB960_XR_SFILTER_CFG			0x41
180 #define UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT	4
181 #define UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT	0
182 
183 #define UB960_XR_AEQ_CTL1			0x42
184 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK	BIT(6)
185 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING	BIT(5)
186 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY	BIT(4)
187 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK        \
188 	(UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK |  \
189 	 UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING | \
190 	 UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY)
191 #define UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN	BIT(0)
192 
193 #define UB960_XR_AEQ_ERR_THOLD			0x43
194 
195 #define UB960_RR_BCC_ERR_CTL			0x46
196 #define UB960_RR_BCC_STATUS			0x47
197 #define UB960_RR_BCC_STATUS_SEQ_ERROR		BIT(5)
198 #define UB960_RR_BCC_STATUS_MASTER_ERR		BIT(4)
199 #define UB960_RR_BCC_STATUS_MASTER_TO		BIT(3)
200 #define UB960_RR_BCC_STATUS_SLAVE_ERR		BIT(2)
201 #define UB960_RR_BCC_STATUS_SLAVE_TO		BIT(1)
202 #define UB960_RR_BCC_STATUS_RESP_ERR		BIT(0)
203 #define UB960_RR_BCC_STATUS_ERROR_MASK                                    \
204 	(UB960_RR_BCC_STATUS_SEQ_ERROR | UB960_RR_BCC_STATUS_MASTER_ERR | \
205 	 UB960_RR_BCC_STATUS_MASTER_TO | UB960_RR_BCC_STATUS_SLAVE_ERR |  \
206 	 UB960_RR_BCC_STATUS_SLAVE_TO | UB960_RR_BCC_STATUS_RESP_ERR)
207 
208 #define UB960_RR_FPD3_CAP			0x4a
209 #define UB960_RR_RAW_EMBED_DTYPE		0x4b
210 #define UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT	6
211 
212 #define UB960_SR_FPD3_PORT_SEL			0x4c
213 
214 #define UB960_RR_RX_PORT_STS1			0x4d
215 #define UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR	BIT(5)
216 #define UB960_RR_RX_PORT_STS1_LOCK_STS_CHG	BIT(4)
217 #define UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR	BIT(3)
218 #define UB960_RR_RX_PORT_STS1_PARITY_ERROR	BIT(2)
219 #define UB960_RR_RX_PORT_STS1_PORT_PASS		BIT(1)
220 #define UB960_RR_RX_PORT_STS1_LOCK_STS		BIT(0)
221 #define UB960_RR_RX_PORT_STS1_ERROR_MASK       \
222 	(UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR | \
223 	 UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR | \
224 	 UB960_RR_RX_PORT_STS1_PARITY_ERROR)
225 
226 #define UB960_RR_RX_PORT_STS2			0x4e
227 #define UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE	BIT(7)
228 #define UB960_RR_RX_PORT_STS2_LINE_LEN_CHG	BIT(6)
229 #define UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR	BIT(5)
230 #define UB960_RR_RX_PORT_STS2_BUFFER_ERROR	BIT(4)
231 #define UB960_RR_RX_PORT_STS2_CSI_ERROR		BIT(3)
232 #define UB960_RR_RX_PORT_STS2_FREQ_STABLE	BIT(2)
233 #define UB960_RR_RX_PORT_STS2_CABLE_FAULT	BIT(1)
234 #define UB960_RR_RX_PORT_STS2_LINE_CNT_CHG	BIT(0)
235 #define UB960_RR_RX_PORT_STS2_ERROR_MASK       \
236 	UB960_RR_RX_PORT_STS2_BUFFER_ERROR
237 
238 #define UB960_RR_RX_FREQ_HIGH			0x4f
239 #define UB960_RR_RX_FREQ_LOW			0x50
240 #define UB960_RR_SENSOR_STS_0			0x51
241 #define UB960_RR_SENSOR_STS_1			0x52
242 #define UB960_RR_SENSOR_STS_2			0x53
243 #define UB960_RR_SENSOR_STS_3			0x54
244 #define UB960_RR_RX_PAR_ERR_HI			0x55
245 #define UB960_RR_RX_PAR_ERR_LO			0x56
246 #define UB960_RR_BIST_ERR_COUNT			0x57
247 
248 #define UB960_RR_BCC_CONFIG			0x58
249 #define UB960_RR_BCC_CONFIG_BC_ALWAYS_ON	BIT(4)
250 #define UB960_RR_BCC_CONFIG_AUTO_ACK_ALL	BIT(5)
251 #define UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH	BIT(6)
252 #define UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK	GENMASK(2, 0)
253 
254 #define UB960_RR_DATAPATH_CTL1			0x59
255 #define UB960_RR_DATAPATH_CTL2			0x5a
256 #define UB960_RR_SER_ID				0x5b
257 #define UB960_RR_SER_ID_FREEZE_DEVICE_ID	BIT(0)
258 #define UB960_RR_SER_ALIAS_ID			0x5c
259 #define UB960_RR_SER_ALIAS_ID_AUTO_ACK		BIT(0)
260 
261 /* For these two register sets: n < UB960_MAX_PORT_ALIASES */
262 #define UB960_RR_SLAVE_ID(n)			(0x5d + (n))
263 #define UB960_RR_SLAVE_ALIAS(n)			(0x65 + (n))
264 
265 #define UB960_RR_PORT_CONFIG			0x6d
266 #define UB960_RR_PORT_CONFIG_FPD3_MODE_MASK	GENMASK(1, 0)
267 
268 #define UB960_RR_BC_GPIO_CTL(n)			(0x6e + (n)) /* n < 2 */
269 #define UB960_RR_RAW10_ID			0x70
270 #define UB960_RR_RAW10_ID_VC_SHIFT		6
271 #define UB960_RR_RAW10_ID_DT_SHIFT		0
272 
273 #define UB960_RR_RAW12_ID			0x71
274 #define UB960_RR_CSI_VC_MAP			0x72
275 #define UB960_RR_CSI_VC_MAP_SHIFT(x)		((x) * 2)
276 
277 #define UB960_RR_LINE_COUNT_HI			0x73
278 #define UB960_RR_LINE_COUNT_LO			0x74
279 #define UB960_RR_LINE_LEN_1			0x75
280 #define UB960_RR_LINE_LEN_0			0x76
281 #define UB960_RR_FREQ_DET_CTL			0x77
282 #define UB960_RR_MAILBOX_1			0x78
283 #define UB960_RR_MAILBOX_2			0x79
284 
285 #define UB960_RR_CSI_RX_STS			0x7a
286 #define UB960_RR_CSI_RX_STS_LENGTH_ERR		BIT(3)
287 #define UB960_RR_CSI_RX_STS_CKSUM_ERR		BIT(2)
288 #define UB960_RR_CSI_RX_STS_ECC2_ERR		BIT(1)
289 #define UB960_RR_CSI_RX_STS_ECC1_ERR		BIT(0)
290 #define UB960_RR_CSI_RX_STS_ERROR_MASK                                    \
291 	(UB960_RR_CSI_RX_STS_LENGTH_ERR | UB960_RR_CSI_RX_STS_CKSUM_ERR | \
292 	 UB960_RR_CSI_RX_STS_ECC2_ERR | UB960_RR_CSI_RX_STS_ECC1_ERR)
293 
294 #define UB960_RR_CSI_ERR_COUNTER		0x7b
295 #define UB960_RR_PORT_CONFIG2			0x7c
296 #define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK GENMASK(7, 6)
297 #define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT 6
298 
299 #define UB960_RR_PORT_CONFIG2_LV_POL_LOW	BIT(1)
300 #define UB960_RR_PORT_CONFIG2_FV_POL_LOW	BIT(0)
301 
302 #define UB960_RR_PORT_PASS_CTL			0x7d
303 #define UB960_RR_SEN_INT_RISE_CTL		0x7e
304 #define UB960_RR_SEN_INT_FALL_CTL		0x7f
305 
306 #define UB960_SR_CSI_FRAME_COUNT_HI(n)		(0x90 + 8 * (n))
307 #define UB960_SR_CSI_FRAME_COUNT_LO(n)		(0x91 + 8 * (n))
308 #define UB960_SR_CSI_FRAME_ERR_COUNT_HI(n)	(0x92 + 8 * (n))
309 #define UB960_SR_CSI_FRAME_ERR_COUNT_LO(n)	(0x93 + 8 * (n))
310 #define UB960_SR_CSI_LINE_COUNT_HI(n)		(0x94 + 8 * (n))
311 #define UB960_SR_CSI_LINE_COUNT_LO(n)		(0x95 + 8 * (n))
312 #define UB960_SR_CSI_LINE_ERR_COUNT_HI(n)	(0x96 + 8 * (n))
313 #define UB960_SR_CSI_LINE_ERR_COUNT_LO(n)	(0x97 + 8 * (n))
314 
315 #define UB960_XR_REFCLK_FREQ			0xa5	/* UB960 */
316 
317 #define UB960_SR_IND_ACC_CTL			0xb0
318 #define UB960_SR_IND_ACC_CTL_IA_AUTO_INC	BIT(1)
319 
320 #define UB960_SR_IND_ACC_ADDR			0xb1
321 #define UB960_SR_IND_ACC_DATA			0xb2
322 #define UB960_SR_BIST_CONTROL			0xb3
323 #define UB960_SR_MODE_IDX_STS			0xb8
324 #define UB960_SR_LINK_ERROR_COUNT		0xb9
325 #define UB960_SR_FPD3_ENC_CTL			0xba
326 #define UB960_SR_FV_MIN_TIME			0xbc
327 #define UB960_SR_GPIO_PD_CTL			0xbe
328 
329 #define UB960_RR_PORT_DEBUG			0xd0
330 #define UB960_RR_AEQ_CTL2			0xd2
331 #define UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR		BIT(2)
332 
333 #define UB960_RR_AEQ_STATUS			0xd3
334 #define UB960_RR_AEQ_STATUS_STATUS_2		GENMASK(5, 3)
335 #define UB960_RR_AEQ_STATUS_STATUS_1		GENMASK(2, 0)
336 
337 #define UB960_RR_AEQ_BYPASS			0xd4
338 #define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT	5
339 #define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK	GENMASK(7, 5)
340 #define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT	1
341 #define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK	GENMASK(3, 1)
342 #define UB960_RR_AEQ_BYPASS_ENABLE			BIT(0)
343 
344 #define UB960_RR_AEQ_MIN_MAX			0xd5
345 #define UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT	4
346 #define UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT	0
347 
348 #define UB960_RR_SFILTER_STS_0			0xd6
349 #define UB960_RR_SFILTER_STS_1			0xd7
350 #define UB960_RR_PORT_ICR_HI			0xd8
351 #define UB960_RR_PORT_ICR_LO			0xd9
352 #define UB960_RR_PORT_ISR_HI			0xda
353 #define UB960_RR_PORT_ISR_LO			0xdb
354 #define UB960_RR_FC_GPIO_STS			0xdc
355 #define UB960_RR_FC_GPIO_ICR			0xdd
356 #define UB960_RR_SEN_INT_RISE_STS		0xde
357 #define UB960_RR_SEN_INT_FALL_STS		0xdf
358 
359 
360 #define UB960_SR_FPD3_RX_ID(n)			(0xf0 + (n))
361 #define UB960_SR_FPD3_RX_ID_LEN			6
362 
363 #define UB960_SR_I2C_RX_ID(n)			(0xf8 + (n))
364 
365 /* Indirect register blocks */
366 #define UB960_IND_TARGET_PAT_GEN		0x00
367 #define UB960_IND_TARGET_RX_ANA(n)		(0x01 + (n))
368 #define UB960_IND_TARGET_CSI_ANA		0x07
369 
370 /* UB960_IR_PGEN_*: Indirect Registers for Test Pattern Generator */
371 
372 #define UB960_IR_PGEN_CTL			0x01
373 #define UB960_IR_PGEN_CTL_PGEN_ENABLE		BIT(0)
374 
375 #define UB960_IR_PGEN_CFG			0x02
376 #define UB960_IR_PGEN_CSI_DI			0x03
377 #define UB960_IR_PGEN_LINE_SIZE1		0x04
378 #define UB960_IR_PGEN_LINE_SIZE0		0x05
379 #define UB960_IR_PGEN_BAR_SIZE1			0x06
380 #define UB960_IR_PGEN_BAR_SIZE0			0x07
381 #define UB960_IR_PGEN_ACT_LPF1			0x08
382 #define UB960_IR_PGEN_ACT_LPF0			0x09
383 #define UB960_IR_PGEN_TOT_LPF1			0x0a
384 #define UB960_IR_PGEN_TOT_LPF0			0x0b
385 #define UB960_IR_PGEN_LINE_PD1			0x0c
386 #define UB960_IR_PGEN_LINE_PD0			0x0d
387 #define UB960_IR_PGEN_VBP			0x0e
388 #define UB960_IR_PGEN_VFP			0x0f
389 #define UB960_IR_PGEN_COLOR(n)			(0x10 + (n)) /* n < 15 */
390 
391 #define UB960_IR_RX_ANA_STROBE_SET_CLK		0x08
392 #define UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY	BIT(3)
393 #define UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK	GENMASK(2, 0)
394 
395 #define UB960_IR_RX_ANA_STROBE_SET_DATA		0x09
396 #define UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY	BIT(3)
397 #define UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK	GENMASK(2, 0)
398 
399 /* UB9702 Registers */
400 
401 #define UB9702_SR_CSI_EXCLUSIVE_FWD2		0x3c
402 #define UB9702_SR_REFCLK_FREQ			0x3d
403 #define UB9702_RR_RX_CTL_1			0x80
404 #define UB9702_RR_RX_CTL_2			0x87
405 #define UB9702_RR_VC_ID_MAP(x)			(0xa0 + (x))
406 #define UB9702_SR_FPD_RATE_CFG			0xc2
407 #define UB9702_SR_CSI_PLL_DIV			0xc9
408 #define UB9702_RR_RX_SM_SEL_2			0xd4
409 #define UB9702_RR_CHANNEL_MODE			0xe4
410 
411 #define UB9702_IND_TARGET_SAR_ADC		0x0a
412 
413 #define UB9702_IR_RX_ANA_FPD_BC_CTL0		0x04
414 #define UB9702_IR_RX_ANA_FPD_BC_CTL1		0x0d
415 #define UB9702_IR_RX_ANA_FPD_BC_CTL2		0x1b
416 #define UB9702_IR_RX_ANA_SYSTEM_INIT_REG0	0x21
417 #define UB9702_IR_RX_ANA_AEQ_ALP_SEL6		0x27
418 #define UB9702_IR_RX_ANA_AEQ_ALP_SEL7		0x28
419 #define UB9702_IR_RX_ANA_AEQ_ALP_SEL10		0x2b
420 #define UB9702_IR_RX_ANA_AEQ_ALP_SEL11		0x2c
421 #define UB9702_IR_RX_ANA_EQ_ADAPT_CTRL		0x2e
422 #define UB9702_IR_RX_ANA_AEQ_CFG_1		0x34
423 #define UB9702_IR_RX_ANA_AEQ_CFG_2		0x4d
424 #define UB9702_IR_RX_ANA_GAIN_CTRL_0		0x71
425 #define UB9702_IR_RX_ANA_GAIN_CTRL_0		0x71
426 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_1		0x72
427 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_2		0x73
428 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_3		0x74
429 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_6		0x77
430 #define UB9702_IR_RX_ANA_AEQ_CFG_3		0x79
431 #define UB9702_IR_RX_ANA_AEQ_CFG_4		0x85
432 #define UB9702_IR_RX_ANA_EQ_CTRL_SEL_15		0x87
433 #define UB9702_IR_RX_ANA_EQ_CTRL_SEL_24		0x90
434 #define UB9702_IR_RX_ANA_EQ_CTRL_SEL_38		0x9e
435 #define UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5	0xa5
436 #define UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1	0xa8
437 #define UB9702_IR_RX_ANA_EQ_OVERRIDE_CTRL	0xf0
438 #define UB9702_IR_RX_ANA_VGA_CTRL_SEL_8		0xf1
439 
440 #define UB9702_IR_CSI_ANA_CSIPLL_REG_1		0x92
441 
442 /* EQ related */
443 
444 #define UB960_MIN_AEQ_STROBE_POS -7
445 #define UB960_MAX_AEQ_STROBE_POS  7
446 
447 #define UB960_MANUAL_STROBE_EXTRA_DELAY 6
448 
449 #define UB960_MIN_MANUAL_STROBE_POS -(7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
450 #define UB960_MAX_MANUAL_STROBE_POS  (7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
451 #define UB960_NUM_MANUAL_STROBE_POS  (UB960_MAX_MANUAL_STROBE_POS - UB960_MIN_MANUAL_STROBE_POS + 1)
452 
453 #define UB960_MIN_EQ_LEVEL  0
454 #define UB960_MAX_EQ_LEVEL  14
455 #define UB960_NUM_EQ_LEVELS (UB960_MAX_EQ_LEVEL - UB960_MIN_EQ_LEVEL + 1)
456 
457 struct ub960_hw_data {
458 	const char *model;
459 	u8 num_rxports;
460 	u8 num_txports;
461 	bool is_ub9702;
462 	bool is_fpdlink4;
463 };
464 
465 enum ub960_rxport_mode {
466 	RXPORT_MODE_RAW10 = 0,
467 	RXPORT_MODE_RAW12_HF = 1,
468 	RXPORT_MODE_RAW12_LF = 2,
469 	RXPORT_MODE_CSI2_SYNC = 3,
470 	RXPORT_MODE_CSI2_NONSYNC = 4,
471 	RXPORT_MODE_LAST = RXPORT_MODE_CSI2_NONSYNC,
472 };
473 
474 enum ub960_rxport_cdr {
475 	RXPORT_CDR_FPD3 = 0,
476 	RXPORT_CDR_FPD4 = 1,
477 	RXPORT_CDR_LAST = RXPORT_CDR_FPD4,
478 };
479 
480 struct ub960_rxport {
481 	struct ub960_data      *priv;
482 	u8                      nport;	/* RX port number, and index in priv->rxport[] */
483 
484 	struct {
485 		struct v4l2_subdev *sd;
486 		u16 pad;
487 		struct fwnode_handle *ep_fwnode;
488 	} source;
489 
490 	/* Serializer */
491 	struct {
492 		struct fwnode_handle *fwnode;
493 		struct i2c_client *client;
494 		unsigned short alias; /* I2C alias (lower 7 bits) */
495 		short addr; /* Local I2C address (lower 7 bits) */
496 		struct ds90ub9xx_platform_data pdata;
497 		struct regmap *regmap;
498 	} ser;
499 
500 	enum ub960_rxport_mode  rx_mode;
501 	enum ub960_rxport_cdr	cdr_mode;
502 
503 	u8			lv_fv_pol;	/* LV and FV polarities */
504 
505 	struct regulator	*vpoc;
506 
507 	/* EQ settings */
508 	struct {
509 		bool manual_eq;
510 
511 		s8 strobe_pos;
512 
513 		union {
514 			struct {
515 				u8 eq_level_min;
516 				u8 eq_level_max;
517 			} aeq;
518 
519 			struct {
520 				u8 eq_level;
521 			} manual;
522 		};
523 	} eq;
524 
525 	/* lock for aliased_addrs and associated registers */
526 	struct mutex aliased_addrs_lock;
527 	u16 aliased_addrs[UB960_MAX_PORT_ALIASES];
528 };
529 
530 struct ub960_asd {
531 	struct v4l2_async_connection base;
532 	struct ub960_rxport *rxport;
533 };
534 
to_ub960_asd(struct v4l2_async_connection * asd)535 static inline struct ub960_asd *to_ub960_asd(struct v4l2_async_connection *asd)
536 {
537 	return container_of(asd, struct ub960_asd, base);
538 }
539 
540 struct ub960_txport {
541 	struct ub960_data      *priv;
542 	u8                      nport;	/* TX port number, and index in priv->txport[] */
543 
544 	u32 num_data_lanes;
545 	bool non_continous_clk;
546 };
547 
548 struct ub960_data {
549 	const struct ub960_hw_data	*hw_data;
550 	struct i2c_client	*client; /* for shared local registers */
551 	struct regmap		*regmap;
552 
553 	/* lock for register access */
554 	struct mutex		reg_lock;
555 
556 	struct clk		*refclk;
557 
558 	struct regulator	*vddio;
559 
560 	struct gpio_desc	*pd_gpio;
561 	struct delayed_work	poll_work;
562 	struct ub960_rxport	*rxports[UB960_MAX_RX_NPORTS];
563 	struct ub960_txport	*txports[UB960_MAX_TX_NPORTS];
564 
565 	struct v4l2_subdev	sd;
566 	struct media_pad	pads[UB960_MAX_NPORTS];
567 
568 	struct v4l2_ctrl_handler   ctrl_handler;
569 	struct v4l2_async_notifier notifier;
570 
571 	u32 tx_data_rate;		/* Nominal data rate (Gb/s) */
572 	s64 tx_link_freq[1];
573 
574 	struct i2c_atr *atr;
575 
576 	struct {
577 		u8 rxport;
578 		u8 txport;
579 		u8 indirect_target;
580 	} reg_current;
581 
582 	bool streaming;
583 
584 	u8 stored_fwd_ctl;
585 
586 	u64 stream_enable_mask[UB960_MAX_NPORTS];
587 
588 	/* These are common to all ports */
589 	struct {
590 		bool manual;
591 
592 		s8 min;
593 		s8 max;
594 	} strobe;
595 };
596 
sd_to_ub960(struct v4l2_subdev * sd)597 static inline struct ub960_data *sd_to_ub960(struct v4l2_subdev *sd)
598 {
599 	return container_of(sd, struct ub960_data, sd);
600 }
601 
ub960_pad_is_sink(struct ub960_data * priv,u32 pad)602 static inline bool ub960_pad_is_sink(struct ub960_data *priv, u32 pad)
603 {
604 	return pad < priv->hw_data->num_rxports;
605 }
606 
ub960_pad_is_source(struct ub960_data * priv,u32 pad)607 static inline bool ub960_pad_is_source(struct ub960_data *priv, u32 pad)
608 {
609 	return pad >= priv->hw_data->num_rxports;
610 }
611 
ub960_pad_to_port(struct ub960_data * priv,u32 pad)612 static inline unsigned int ub960_pad_to_port(struct ub960_data *priv, u32 pad)
613 {
614 	if (ub960_pad_is_sink(priv, pad))
615 		return pad;
616 	else
617 		return pad - priv->hw_data->num_rxports;
618 }
619 
620 struct ub960_format_info {
621 	u32 code;
622 	u32 bpp;
623 	u8 datatype;
624 	bool meta;
625 };
626 
627 static const struct ub960_format_info ub960_formats[] = {
628 	{ .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, .datatype = MIPI_CSI2_DT_RGB888, },
629 
630 	{ .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
631 	{ .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
632 	{ .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
633 	{ .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
634 
635 	{ .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
636 	{ .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
637 	{ .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
638 	{ .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
639 
640 	{ .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
641 	{ .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
642 	{ .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
643 	{ .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
644 
645 	{ .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
646 	{ .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
647 	{ .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
648 	{ .code = MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
649 };
650 
ub960_find_format(u32 code)651 static const struct ub960_format_info *ub960_find_format(u32 code)
652 {
653 	unsigned int i;
654 
655 	for (i = 0; i < ARRAY_SIZE(ub960_formats); i++) {
656 		if (ub960_formats[i].code == code)
657 			return &ub960_formats[i];
658 	}
659 
660 	return NULL;
661 }
662 
663 struct ub960_rxport_iter {
664 	unsigned int nport;
665 	struct ub960_rxport *rxport;
666 };
667 
668 enum ub960_iter_flags {
669 	UB960_ITER_ACTIVE_ONLY = BIT(0),
670 	UB960_ITER_FPD4_ONLY = BIT(1),
671 };
672 
ub960_iter_rxport(struct ub960_data * priv,struct ub960_rxport_iter it,enum ub960_iter_flags flags)673 static struct ub960_rxport_iter ub960_iter_rxport(struct ub960_data *priv,
674 						  struct ub960_rxport_iter it,
675 						  enum ub960_iter_flags flags)
676 {
677 	for (; it.nport < priv->hw_data->num_rxports; it.nport++) {
678 		it.rxport = priv->rxports[it.nport];
679 
680 		if ((flags & UB960_ITER_ACTIVE_ONLY) && !it.rxport)
681 			continue;
682 
683 		if ((flags & UB960_ITER_FPD4_ONLY) &&
684 		    it.rxport->cdr_mode != RXPORT_CDR_FPD4)
685 			continue;
686 
687 		return it;
688 	}
689 
690 	it.rxport = NULL;
691 
692 	return it;
693 }
694 
695 #define for_each_rxport(priv, it)                                             \
696 	for (struct ub960_rxport_iter it =                                    \
697 		     ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
698 				       0);                                    \
699 	     it.nport < (priv)->hw_data->num_rxports;                         \
700 	     it.nport++, it = ub960_iter_rxport(priv, it, 0))
701 
702 #define for_each_active_rxport(priv, it)                                      \
703 	for (struct ub960_rxport_iter it =                                    \
704 		     ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
705 				       UB960_ITER_ACTIVE_ONLY);               \
706 	     it.nport < (priv)->hw_data->num_rxports;                         \
707 	     it.nport++, it = ub960_iter_rxport(priv, it,                     \
708 						UB960_ITER_ACTIVE_ONLY))
709 
710 #define for_each_active_rxport_fpd4(priv, it)                                 \
711 	for (struct ub960_rxport_iter it =                                    \
712 		     ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
713 				       UB960_ITER_ACTIVE_ONLY |               \
714 					       UB960_ITER_FPD4_ONLY);         \
715 	     it.nport < (priv)->hw_data->num_rxports;                         \
716 	     it.nport++, it = ub960_iter_rxport(priv, it,                     \
717 						UB960_ITER_ACTIVE_ONLY |      \
718 							UB960_ITER_FPD4_ONLY))
719 
720 /* -----------------------------------------------------------------------------
721  * Basic device access
722  */
723 
ub960_read(struct ub960_data * priv,u8 reg,u8 * val,int * err)724 static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val, int *err)
725 {
726 	struct device *dev = &priv->client->dev;
727 	unsigned int v;
728 	int ret;
729 
730 	if (err && *err)
731 		return *err;
732 
733 	mutex_lock(&priv->reg_lock);
734 
735 	ret = regmap_read(priv->regmap, reg, &v);
736 	if (ret) {
737 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
738 			__func__, reg, ret);
739 		goto out_unlock;
740 	}
741 
742 	*val = v;
743 
744 out_unlock:
745 	mutex_unlock(&priv->reg_lock);
746 
747 	if (ret && err)
748 		*err = ret;
749 
750 	return ret;
751 }
752 
ub960_write(struct ub960_data * priv,u8 reg,u8 val,int * err)753 static int ub960_write(struct ub960_data *priv, u8 reg, u8 val, int *err)
754 {
755 	struct device *dev = &priv->client->dev;
756 	int ret;
757 
758 	if (err && *err)
759 		return *err;
760 
761 	mutex_lock(&priv->reg_lock);
762 
763 	ret = regmap_write(priv->regmap, reg, val);
764 	if (ret)
765 		dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
766 			__func__, reg, ret);
767 
768 	mutex_unlock(&priv->reg_lock);
769 
770 	if (ret && err)
771 		*err = ret;
772 
773 	return ret;
774 }
775 
ub960_update_bits(struct ub960_data * priv,u8 reg,u8 mask,u8 val,int * err)776 static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val,
777 			     int *err)
778 {
779 	struct device *dev = &priv->client->dev;
780 	int ret;
781 
782 	if (err && *err)
783 		return *err;
784 
785 	mutex_lock(&priv->reg_lock);
786 
787 	ret = regmap_update_bits(priv->regmap, reg, mask, val);
788 	if (ret)
789 		dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
790 			__func__, reg, ret);
791 
792 	mutex_unlock(&priv->reg_lock);
793 
794 	if (ret && err)
795 		*err = ret;
796 
797 	return ret;
798 }
799 
ub960_read16(struct ub960_data * priv,u8 reg,u16 * val,int * err)800 static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val, int *err)
801 {
802 	struct device *dev = &priv->client->dev;
803 	__be16 __v;
804 	int ret;
805 
806 	if (err && *err)
807 		return *err;
808 
809 	mutex_lock(&priv->reg_lock);
810 
811 	ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
812 	if (ret) {
813 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
814 			__func__, reg, ret);
815 		goto out_unlock;
816 	}
817 
818 	*val = be16_to_cpu(__v);
819 
820 out_unlock:
821 	mutex_unlock(&priv->reg_lock);
822 
823 	if (ret && err)
824 		*err = ret;
825 
826 	return ret;
827 }
828 
ub960_rxport_select(struct ub960_data * priv,u8 nport)829 static int ub960_rxport_select(struct ub960_data *priv, u8 nport)
830 {
831 	struct device *dev = &priv->client->dev;
832 	int ret;
833 
834 	lockdep_assert_held(&priv->reg_lock);
835 
836 	if (priv->reg_current.rxport == nport)
837 		return 0;
838 
839 	ret = regmap_write(priv->regmap, UB960_SR_FPD3_PORT_SEL,
840 			   (nport << 4) | BIT(nport));
841 	if (ret) {
842 		dev_err(dev, "%s: cannot select rxport %d (%d)!\n", __func__,
843 			nport, ret);
844 		return ret;
845 	}
846 
847 	priv->reg_current.rxport = nport;
848 
849 	return 0;
850 }
851 
ub960_rxport_read(struct ub960_data * priv,u8 nport,u8 reg,u8 * val,int * err)852 static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg,
853 			     u8 *val, int *err)
854 {
855 	struct device *dev = &priv->client->dev;
856 	unsigned int v;
857 	int ret;
858 
859 	if (err && *err)
860 		return *err;
861 
862 	mutex_lock(&priv->reg_lock);
863 
864 	ret = ub960_rxport_select(priv, nport);
865 	if (ret)
866 		goto out_unlock;
867 
868 	ret = regmap_read(priv->regmap, reg, &v);
869 	if (ret) {
870 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
871 			__func__, reg, ret);
872 		goto out_unlock;
873 	}
874 
875 	*val = v;
876 
877 out_unlock:
878 	mutex_unlock(&priv->reg_lock);
879 
880 	if (ret && err)
881 		*err = ret;
882 
883 	return ret;
884 }
885 
ub960_rxport_write(struct ub960_data * priv,u8 nport,u8 reg,u8 val,int * err)886 static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg,
887 			      u8 val, int *err)
888 {
889 	struct device *dev = &priv->client->dev;
890 	int ret;
891 
892 	if (err && *err)
893 		return *err;
894 
895 	mutex_lock(&priv->reg_lock);
896 
897 	ret = ub960_rxport_select(priv, nport);
898 	if (ret)
899 		goto out_unlock;
900 
901 	ret = regmap_write(priv->regmap, reg, val);
902 	if (ret)
903 		dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
904 			__func__, reg, ret);
905 
906 out_unlock:
907 	mutex_unlock(&priv->reg_lock);
908 
909 	if (ret && err)
910 		*err = ret;
911 
912 	return ret;
913 }
914 
ub960_rxport_update_bits(struct ub960_data * priv,u8 nport,u8 reg,u8 mask,u8 val,int * err)915 static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
916 				    u8 mask, u8 val, int *err)
917 {
918 	struct device *dev = &priv->client->dev;
919 	int ret;
920 
921 	if (err && *err)
922 		return *err;
923 
924 	mutex_lock(&priv->reg_lock);
925 
926 	ret = ub960_rxport_select(priv, nport);
927 	if (ret)
928 		goto out_unlock;
929 
930 	ret = regmap_update_bits(priv->regmap, reg, mask, val);
931 	if (ret)
932 		dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
933 			__func__, reg, ret);
934 
935 out_unlock:
936 	mutex_unlock(&priv->reg_lock);
937 
938 	if (ret && err)
939 		*err = ret;
940 
941 	return ret;
942 }
943 
ub960_rxport_read16(struct ub960_data * priv,u8 nport,u8 reg,u16 * val,int * err)944 static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg,
945 			       u16 *val, int *err)
946 {
947 	struct device *dev = &priv->client->dev;
948 	__be16 __v;
949 	int ret;
950 
951 	if (err && *err)
952 		return *err;
953 
954 	mutex_lock(&priv->reg_lock);
955 
956 	ret = ub960_rxport_select(priv, nport);
957 	if (ret)
958 		goto out_unlock;
959 
960 	ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
961 	if (ret) {
962 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
963 			__func__, reg, ret);
964 		goto out_unlock;
965 	}
966 
967 	*val = be16_to_cpu(__v);
968 
969 out_unlock:
970 	mutex_unlock(&priv->reg_lock);
971 
972 	if (ret && err)
973 		*err = ret;
974 
975 	return ret;
976 }
977 
ub960_txport_select(struct ub960_data * priv,u8 nport)978 static int ub960_txport_select(struct ub960_data *priv, u8 nport)
979 {
980 	struct device *dev = &priv->client->dev;
981 	int ret;
982 
983 	lockdep_assert_held(&priv->reg_lock);
984 
985 	if (priv->reg_current.txport == nport)
986 		return 0;
987 
988 	ret = regmap_write(priv->regmap, UB960_SR_CSI_PORT_SEL,
989 			   (nport << 4) | BIT(nport));
990 	if (ret) {
991 		dev_err(dev, "%s: cannot select tx port %d (%d)!\n", __func__,
992 			nport, ret);
993 		return ret;
994 	}
995 
996 	priv->reg_current.txport = nport;
997 
998 	return 0;
999 }
1000 
ub960_txport_read(struct ub960_data * priv,u8 nport,u8 reg,u8 * val,int * err)1001 static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg,
1002 			     u8 *val, int *err)
1003 {
1004 	struct device *dev = &priv->client->dev;
1005 	unsigned int v;
1006 	int ret;
1007 
1008 	if (err && *err)
1009 		return *err;
1010 
1011 	mutex_lock(&priv->reg_lock);
1012 
1013 	ret = ub960_txport_select(priv, nport);
1014 	if (ret)
1015 		goto out_unlock;
1016 
1017 	ret = regmap_read(priv->regmap, reg, &v);
1018 	if (ret) {
1019 		dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
1020 			__func__, reg, ret);
1021 		goto out_unlock;
1022 	}
1023 
1024 	*val = v;
1025 
1026 out_unlock:
1027 	mutex_unlock(&priv->reg_lock);
1028 
1029 	if (ret && err)
1030 		*err = ret;
1031 
1032 	return ret;
1033 }
1034 
ub960_txport_write(struct ub960_data * priv,u8 nport,u8 reg,u8 val,int * err)1035 static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg,
1036 			      u8 val, int *err)
1037 {
1038 	struct device *dev = &priv->client->dev;
1039 	int ret;
1040 
1041 	if (err && *err)
1042 		return *err;
1043 
1044 	mutex_lock(&priv->reg_lock);
1045 
1046 	ret = ub960_txport_select(priv, nport);
1047 	if (ret)
1048 		goto out_unlock;
1049 
1050 	ret = regmap_write(priv->regmap, reg, val);
1051 	if (ret)
1052 		dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
1053 			__func__, reg, ret);
1054 
1055 out_unlock:
1056 	mutex_unlock(&priv->reg_lock);
1057 
1058 	if (ret && err)
1059 		*err = ret;
1060 
1061 	return ret;
1062 }
1063 
ub960_txport_update_bits(struct ub960_data * priv,u8 nport,u8 reg,u8 mask,u8 val,int * err)1064 static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
1065 				    u8 mask, u8 val, int *err)
1066 {
1067 	struct device *dev = &priv->client->dev;
1068 	int ret;
1069 
1070 	if (err && *err)
1071 		return *err;
1072 
1073 	mutex_lock(&priv->reg_lock);
1074 
1075 	ret = ub960_txport_select(priv, nport);
1076 	if (ret)
1077 		goto out_unlock;
1078 
1079 	ret = regmap_update_bits(priv->regmap, reg, mask, val);
1080 	if (ret)
1081 		dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
1082 			__func__, reg, ret);
1083 
1084 out_unlock:
1085 	mutex_unlock(&priv->reg_lock);
1086 
1087 	if (ret && err)
1088 		*err = ret;
1089 
1090 	return ret;
1091 }
1092 
ub960_select_ind_reg_block(struct ub960_data * priv,u8 block)1093 static int ub960_select_ind_reg_block(struct ub960_data *priv, u8 block)
1094 {
1095 	struct device *dev = &priv->client->dev;
1096 	int ret;
1097 
1098 	lockdep_assert_held(&priv->reg_lock);
1099 
1100 	if (priv->reg_current.indirect_target == block)
1101 		return 0;
1102 
1103 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_CTL, block << 2);
1104 	if (ret) {
1105 		dev_err(dev, "%s: cannot select indirect target %u (%d)!\n",
1106 			__func__, block, ret);
1107 		return ret;
1108 	}
1109 
1110 	priv->reg_current.indirect_target = block;
1111 
1112 	return 0;
1113 }
1114 
ub960_read_ind(struct ub960_data * priv,u8 block,u8 reg,u8 * val,int * err)1115 static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val,
1116 			  int *err)
1117 {
1118 	struct device *dev = &priv->client->dev;
1119 	unsigned int v;
1120 	int ret;
1121 
1122 	if (err && *err)
1123 		return *err;
1124 
1125 	mutex_lock(&priv->reg_lock);
1126 
1127 	ret = ub960_select_ind_reg_block(priv, block);
1128 	if (ret)
1129 		goto out_unlock;
1130 
1131 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
1132 	if (ret) {
1133 		dev_err(dev,
1134 			"Write to IND_ACC_ADDR failed when reading %u:%x02x: %d\n",
1135 			block, reg, ret);
1136 		goto out_unlock;
1137 	}
1138 
1139 	ret = regmap_read(priv->regmap, UB960_SR_IND_ACC_DATA, &v);
1140 	if (ret) {
1141 		dev_err(dev,
1142 			"Write to IND_ACC_DATA failed when reading %u:%x02x: %d\n",
1143 			block, reg, ret);
1144 		goto out_unlock;
1145 	}
1146 
1147 	*val = v;
1148 
1149 out_unlock:
1150 	mutex_unlock(&priv->reg_lock);
1151 
1152 	if (ret && err)
1153 		*err = ret;
1154 
1155 	return ret;
1156 }
1157 
ub960_write_ind(struct ub960_data * priv,u8 block,u8 reg,u8 val,int * err)1158 static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val,
1159 			   int *err)
1160 {
1161 	struct device *dev = &priv->client->dev;
1162 	int ret;
1163 
1164 	if (err && *err)
1165 		return *err;
1166 
1167 	mutex_lock(&priv->reg_lock);
1168 
1169 	ret = ub960_select_ind_reg_block(priv, block);
1170 	if (ret)
1171 		goto out_unlock;
1172 
1173 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
1174 	if (ret) {
1175 		dev_err(dev,
1176 			"Write to IND_ACC_ADDR failed when writing %u:%x02x: %d\n",
1177 			block, reg, ret);
1178 		goto out_unlock;
1179 	}
1180 
1181 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_DATA, val);
1182 	if (ret) {
1183 		dev_err(dev,
1184 			"Write to IND_ACC_DATA failed when writing %u:%x02x: %d\n",
1185 			block, reg, ret);
1186 		goto out_unlock;
1187 	}
1188 
1189 out_unlock:
1190 	mutex_unlock(&priv->reg_lock);
1191 
1192 	if (ret && err)
1193 		*err = ret;
1194 
1195 	return ret;
1196 }
1197 
ub960_ind_update_bits(struct ub960_data * priv,u8 block,u8 reg,u8 mask,u8 val,int * err)1198 static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg,
1199 				 u8 mask, u8 val, int *err)
1200 {
1201 	struct device *dev = &priv->client->dev;
1202 	int ret;
1203 
1204 	if (err && *err)
1205 		return *err;
1206 
1207 	mutex_lock(&priv->reg_lock);
1208 
1209 	ret = ub960_select_ind_reg_block(priv, block);
1210 	if (ret)
1211 		goto out_unlock;
1212 
1213 	ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
1214 	if (ret) {
1215 		dev_err(dev,
1216 			"Write to IND_ACC_ADDR failed when updating %u:%x02x: %d\n",
1217 			block, reg, ret);
1218 		goto out_unlock;
1219 	}
1220 
1221 	ret = regmap_update_bits(priv->regmap, UB960_SR_IND_ACC_DATA, mask,
1222 				 val);
1223 	if (ret) {
1224 		dev_err(dev,
1225 			"Write to IND_ACC_DATA failed when updating %u:%x02x: %d\n",
1226 			block, reg, ret);
1227 		goto out_unlock;
1228 	}
1229 
1230 out_unlock:
1231 	mutex_unlock(&priv->reg_lock);
1232 
1233 	if (ret && err)
1234 		*err = ret;
1235 
1236 	return ret;
1237 }
1238 
ub960_reset(struct ub960_data * priv,bool reset_regs)1239 static int ub960_reset(struct ub960_data *priv, bool reset_regs)
1240 {
1241 	struct device *dev = &priv->client->dev;
1242 	unsigned int v;
1243 	int ret;
1244 	u8 bit;
1245 
1246 	bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 :
1247 			   UB960_SR_RESET_DIGITAL_RESET0;
1248 
1249 	ret = ub960_write(priv, UB960_SR_RESET, bit, NULL);
1250 	if (ret)
1251 		return ret;
1252 
1253 	mutex_lock(&priv->reg_lock);
1254 
1255 	ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v,
1256 				       (v & bit) == 0, 2000, 100000);
1257 
1258 	mutex_unlock(&priv->reg_lock);
1259 
1260 	if (ret)
1261 		dev_err(dev, "reset failed: %d\n", ret);
1262 
1263 	return ret;
1264 }
1265 
1266 /* -----------------------------------------------------------------------------
1267  * I2C-ATR (address translator)
1268  */
1269 
ub960_atr_attach_addr(struct i2c_atr * atr,u32 chan_id,u16 addr,u16 alias)1270 static int ub960_atr_attach_addr(struct i2c_atr *atr, u32 chan_id,
1271 				 u16 addr, u16 alias)
1272 {
1273 	struct ub960_data *priv = i2c_atr_get_driver_data(atr);
1274 	struct ub960_rxport *rxport = priv->rxports[chan_id];
1275 	struct device *dev = &priv->client->dev;
1276 	unsigned int reg_idx;
1277 	int ret = 0;
1278 
1279 	guard(mutex)(&rxport->aliased_addrs_lock);
1280 
1281 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
1282 		if (!rxport->aliased_addrs[reg_idx])
1283 			break;
1284 	}
1285 
1286 	if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
1287 		dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport);
1288 		return -EADDRNOTAVAIL;
1289 	}
1290 
1291 	rxport->aliased_addrs[reg_idx] = addr;
1292 
1293 	ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx),
1294 			   addr << 1, &ret);
1295 	ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
1296 			   alias << 1, &ret);
1297 
1298 	if (ret)
1299 		return ret;
1300 
1301 	dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n",
1302 		rxport->nport, addr, alias, reg_idx);
1303 
1304 	return 0;
1305 }
1306 
ub960_atr_detach_addr(struct i2c_atr * atr,u32 chan_id,u16 addr)1307 static void ub960_atr_detach_addr(struct i2c_atr *atr, u32 chan_id,
1308 				  u16 addr)
1309 {
1310 	struct ub960_data *priv = i2c_atr_get_driver_data(atr);
1311 	struct ub960_rxport *rxport = priv->rxports[chan_id];
1312 	struct device *dev = &priv->client->dev;
1313 	unsigned int reg_idx;
1314 	int ret;
1315 
1316 	guard(mutex)(&rxport->aliased_addrs_lock);
1317 
1318 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
1319 		if (rxport->aliased_addrs[reg_idx] == addr)
1320 			break;
1321 	}
1322 
1323 	if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
1324 		dev_err(dev, "rx%u: client 0x%02x is not mapped!\n",
1325 			rxport->nport, addr);
1326 		return;
1327 	}
1328 
1329 	rxport->aliased_addrs[reg_idx] = 0;
1330 
1331 	ret = ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
1332 				 0, NULL);
1333 	if (ret) {
1334 		dev_err(dev, "rx%u: unable to fully unmap client 0x%02x: %d\n",
1335 			rxport->nport, addr, ret);
1336 		return;
1337 	}
1338 
1339 	dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport,
1340 		addr, reg_idx);
1341 }
1342 
1343 static const struct i2c_atr_ops ub960_atr_ops = {
1344 	.attach_addr = ub960_atr_attach_addr,
1345 	.detach_addr = ub960_atr_detach_addr,
1346 };
1347 
ub960_init_atr(struct ub960_data * priv)1348 static int ub960_init_atr(struct ub960_data *priv)
1349 {
1350 	struct device *dev = &priv->client->dev;
1351 	struct i2c_adapter *parent_adap = priv->client->adapter;
1352 
1353 	priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops,
1354 				priv->hw_data->num_rxports, 0);
1355 	if (IS_ERR(priv->atr))
1356 		return PTR_ERR(priv->atr);
1357 
1358 	i2c_atr_set_driver_data(priv->atr, priv);
1359 
1360 	return 0;
1361 }
1362 
ub960_uninit_atr(struct ub960_data * priv)1363 static void ub960_uninit_atr(struct ub960_data *priv)
1364 {
1365 	i2c_atr_delete(priv->atr);
1366 	priv->atr = NULL;
1367 }
1368 
1369 /* -----------------------------------------------------------------------------
1370  * TX ports
1371  */
1372 
ub960_parse_dt_txport(struct ub960_data * priv,struct fwnode_handle * ep_fwnode,u8 nport)1373 static int ub960_parse_dt_txport(struct ub960_data *priv,
1374 				 struct fwnode_handle *ep_fwnode,
1375 				 u8 nport)
1376 {
1377 	struct device *dev = &priv->client->dev;
1378 	struct v4l2_fwnode_endpoint vep = {};
1379 	struct ub960_txport *txport;
1380 	int ret;
1381 
1382 	txport = kzalloc(sizeof(*txport), GFP_KERNEL);
1383 	if (!txport)
1384 		return -ENOMEM;
1385 
1386 	txport->priv = priv;
1387 	txport->nport = nport;
1388 
1389 	vep.bus_type = V4L2_MBUS_CSI2_DPHY;
1390 	ret = v4l2_fwnode_endpoint_alloc_parse(ep_fwnode, &vep);
1391 	if (ret) {
1392 		dev_err(dev, "tx%u: failed to parse endpoint data\n", nport);
1393 		goto err_free_txport;
1394 	}
1395 
1396 	txport->non_continous_clk = vep.bus.mipi_csi2.flags &
1397 				    V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
1398 
1399 	txport->num_data_lanes = vep.bus.mipi_csi2.num_data_lanes;
1400 
1401 	if (vep.nr_of_link_frequencies != 1) {
1402 		ret = -EINVAL;
1403 		goto err_free_vep;
1404 	}
1405 
1406 	priv->tx_link_freq[0] = vep.link_frequencies[0];
1407 	priv->tx_data_rate = priv->tx_link_freq[0] * 2;
1408 
1409 	if (priv->tx_data_rate != MHZ(1600) &&
1410 	    priv->tx_data_rate != MHZ(1200) &&
1411 	    priv->tx_data_rate != MHZ(800) &&
1412 	    priv->tx_data_rate != MHZ(400)) {
1413 		dev_err(dev, "tx%u: invalid 'link-frequencies' value\n", nport);
1414 		ret = -EINVAL;
1415 		goto err_free_vep;
1416 	}
1417 
1418 	v4l2_fwnode_endpoint_free(&vep);
1419 
1420 	priv->txports[nport] = txport;
1421 
1422 	return 0;
1423 
1424 err_free_vep:
1425 	v4l2_fwnode_endpoint_free(&vep);
1426 err_free_txport:
1427 	kfree(txport);
1428 
1429 	return ret;
1430 }
1431 
ub960_csi_handle_events(struct ub960_data * priv,u8 nport)1432 static int  ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
1433 {
1434 	struct device *dev = &priv->client->dev;
1435 	u8 csi_tx_isr;
1436 	int ret;
1437 
1438 	ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr,
1439 				NULL);
1440 	if (ret)
1441 		return ret;
1442 
1443 	if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR)
1444 		dev_warn(dev, "TX%u: CSI_SYNC_ERROR\n", nport);
1445 
1446 	if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR)
1447 		dev_warn(dev, "TX%u: CSI_PASS_ERROR\n", nport);
1448 
1449 	return 0;
1450 }
1451 
1452 /* -----------------------------------------------------------------------------
1453  * RX ports
1454  */
1455 
ub960_rxport_enable_vpocs(struct ub960_data * priv)1456 static int ub960_rxport_enable_vpocs(struct ub960_data *priv)
1457 {
1458 	unsigned int failed_nport;
1459 	int ret;
1460 
1461 	for_each_active_rxport(priv, it) {
1462 		if (!it.rxport->vpoc)
1463 			continue;
1464 
1465 		ret = regulator_enable(it.rxport->vpoc);
1466 		if (ret) {
1467 			failed_nport = it.nport;
1468 			goto err_disable_vpocs;
1469 		}
1470 	}
1471 
1472 	return 0;
1473 
1474 err_disable_vpocs:
1475 	while (failed_nport--) {
1476 		struct ub960_rxport *rxport = priv->rxports[failed_nport];
1477 
1478 		if (!rxport || !rxport->vpoc)
1479 			continue;
1480 
1481 		regulator_disable(rxport->vpoc);
1482 	}
1483 
1484 	return ret;
1485 }
1486 
ub960_rxport_disable_vpocs(struct ub960_data * priv)1487 static void ub960_rxport_disable_vpocs(struct ub960_data *priv)
1488 {
1489 	for_each_active_rxport(priv, it) {
1490 		if (!it.rxport->vpoc)
1491 			continue;
1492 
1493 		regulator_disable(it.rxport->vpoc);
1494 	}
1495 }
1496 
ub960_rxport_clear_errors(struct ub960_data * priv,unsigned int nport)1497 static int ub960_rxport_clear_errors(struct ub960_data *priv,
1498 				     unsigned int nport)
1499 {
1500 	int ret = 0;
1501 	u8 v;
1502 
1503 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v, &ret);
1504 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v, &ret);
1505 	ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v, &ret);
1506 	ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v, &ret);
1507 
1508 	ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v, &ret);
1509 	ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v, &ret);
1510 
1511 	ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v, &ret);
1512 
1513 	return ret;
1514 }
1515 
ub960_clear_rx_errors(struct ub960_data * priv)1516 static int ub960_clear_rx_errors(struct ub960_data *priv)
1517 {
1518 	int ret;
1519 
1520 	for_each_rxport(priv, it) {
1521 		ret = ub960_rxport_clear_errors(priv, it.nport);
1522 		if (ret)
1523 			return ret;
1524 	}
1525 
1526 	return 0;
1527 }
1528 
ub960_rxport_get_strobe_pos(struct ub960_data * priv,unsigned int nport,s8 * strobe_pos)1529 static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
1530 				       unsigned int nport, s8 *strobe_pos)
1531 {
1532 	u8 v;
1533 	u8 clk_delay, data_delay;
1534 	int ret;
1535 
1536 	ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1537 			     UB960_IR_RX_ANA_STROBE_SET_CLK, &v, NULL);
1538 	if (ret)
1539 		return ret;
1540 
1541 	clk_delay = (v & UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY) ?
1542 			    0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
1543 
1544 	ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1545 			     UB960_IR_RX_ANA_STROBE_SET_DATA, &v, NULL);
1546 	if (ret)
1547 		return ret;
1548 
1549 	data_delay = (v & UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) ?
1550 			     0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
1551 
1552 	ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v, NULL);
1553 	if (ret)
1554 		return ret;
1555 
1556 	clk_delay += v & UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK;
1557 
1558 	ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v, NULL);
1559 	if (ret)
1560 		return ret;
1561 
1562 	data_delay += v & UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK;
1563 
1564 	*strobe_pos = data_delay - clk_delay;
1565 
1566 	return 0;
1567 }
1568 
ub960_rxport_set_strobe_pos(struct ub960_data * priv,unsigned int nport,s8 strobe_pos)1569 static int ub960_rxport_set_strobe_pos(struct ub960_data *priv,
1570 				       unsigned int nport, s8 strobe_pos)
1571 {
1572 	u8 clk_delay, data_delay;
1573 	int ret = 0;
1574 
1575 	clk_delay = UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
1576 	data_delay = UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
1577 
1578 	if (strobe_pos < UB960_MIN_AEQ_STROBE_POS)
1579 		clk_delay = abs(strobe_pos) - UB960_MANUAL_STROBE_EXTRA_DELAY;
1580 	else if (strobe_pos > UB960_MAX_AEQ_STROBE_POS)
1581 		data_delay = strobe_pos - UB960_MANUAL_STROBE_EXTRA_DELAY;
1582 	else if (strobe_pos < 0)
1583 		clk_delay = abs(strobe_pos) | UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
1584 	else if (strobe_pos > 0)
1585 		data_delay = strobe_pos | UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
1586 
1587 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1588 			UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay, &ret);
1589 
1590 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
1591 			UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay, &ret);
1592 
1593 	return ret;
1594 }
1595 
ub960_rxport_set_strobe_range(struct ub960_data * priv,s8 strobe_min,s8 strobe_max)1596 static int ub960_rxport_set_strobe_range(struct ub960_data *priv, s8 strobe_min,
1597 					 s8 strobe_max)
1598 {
1599 	/* Convert the signed strobe pos to positive zero based value */
1600 	strobe_min -= UB960_MIN_AEQ_STROBE_POS;
1601 	strobe_max -= UB960_MIN_AEQ_STROBE_POS;
1602 
1603 	return ub960_write(priv, UB960_XR_SFILTER_CFG,
1604 			   ((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) |
1605 			   ((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT),
1606 			   NULL);
1607 }
1608 
ub960_rxport_get_eq_level(struct ub960_data * priv,unsigned int nport,u8 * eq_level)1609 static int ub960_rxport_get_eq_level(struct ub960_data *priv,
1610 				     unsigned int nport, u8 *eq_level)
1611 {
1612 	int ret;
1613 	u8 v;
1614 
1615 	ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v, NULL);
1616 	if (ret)
1617 		return ret;
1618 
1619 	*eq_level = (v & UB960_RR_AEQ_STATUS_STATUS_1) +
1620 		    (v & UB960_RR_AEQ_STATUS_STATUS_2);
1621 
1622 	return 0;
1623 }
1624 
ub960_rxport_set_eq_level(struct ub960_data * priv,unsigned int nport,u8 eq_level)1625 static int ub960_rxport_set_eq_level(struct ub960_data *priv,
1626 				     unsigned int nport, u8 eq_level)
1627 {
1628 	u8 eq_stage_1_select_value, eq_stage_2_select_value;
1629 	const unsigned int eq_stage_max = 7;
1630 	int ret;
1631 	u8 v;
1632 
1633 	if (eq_level <= eq_stage_max) {
1634 		eq_stage_1_select_value = eq_level;
1635 		eq_stage_2_select_value = 0;
1636 	} else {
1637 		eq_stage_1_select_value = eq_stage_max;
1638 		eq_stage_2_select_value = eq_level - eq_stage_max;
1639 	}
1640 
1641 	ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v, NULL);
1642 	if (ret)
1643 		return ret;
1644 
1645 	v &= ~(UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK |
1646 	       UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK);
1647 	v |= eq_stage_1_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT;
1648 	v |= eq_stage_2_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT;
1649 	v |= UB960_RR_AEQ_BYPASS_ENABLE;
1650 
1651 	ret = ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v, NULL);
1652 	if (ret)
1653 		return ret;
1654 
1655 	return 0;
1656 }
1657 
ub960_rxport_set_eq_range(struct ub960_data * priv,unsigned int nport,u8 eq_min,u8 eq_max)1658 static int ub960_rxport_set_eq_range(struct ub960_data *priv,
1659 				     unsigned int nport, u8 eq_min, u8 eq_max)
1660 {
1661 	int ret = 0;
1662 
1663 	ub960_rxport_write(priv, nport, UB960_RR_AEQ_MIN_MAX,
1664 			   (eq_min << UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) |
1665 			   (eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT),
1666 			   &ret);
1667 
1668 	/* Enable AEQ min setting */
1669 	ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_CTL2,
1670 				 UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR,
1671 				 UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR, &ret);
1672 
1673 	return ret;
1674 }
1675 
ub960_rxport_config_eq(struct ub960_data * priv,unsigned int nport)1676 static int ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport)
1677 {
1678 	struct ub960_rxport *rxport = priv->rxports[nport];
1679 	int ret;
1680 
1681 	/* We also set common settings here. Should be moved elsewhere. */
1682 
1683 	if (priv->strobe.manual) {
1684 		/* Disable AEQ_SFILTER_EN */
1685 		ret = ub960_update_bits(priv, UB960_XR_AEQ_CTL1,
1686 					UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0,
1687 					NULL);
1688 		if (ret)
1689 			return ret;
1690 	} else {
1691 		/* Enable SFILTER and error control */
1692 		ret = ub960_write(priv, UB960_XR_AEQ_CTL1,
1693 				  UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK |
1694 					  UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN,
1695 				  NULL);
1696 
1697 		if (ret)
1698 			return ret;
1699 
1700 		/* Set AEQ strobe range */
1701 		ret = ub960_rxport_set_strobe_range(priv, priv->strobe.min,
1702 						    priv->strobe.max);
1703 		if (ret)
1704 			return ret;
1705 	}
1706 
1707 	/* The rest are port specific */
1708 
1709 	if (priv->strobe.manual)
1710 		ret = ub960_rxport_set_strobe_pos(priv, nport,
1711 						  rxport->eq.strobe_pos);
1712 	else
1713 		ret = ub960_rxport_set_strobe_pos(priv, nport, 0);
1714 
1715 	if (ret)
1716 		return ret;
1717 
1718 	if (rxport->eq.manual_eq) {
1719 		ret = ub960_rxport_set_eq_level(priv, nport,
1720 						rxport->eq.manual.eq_level);
1721 		if (ret)
1722 			return ret;
1723 
1724 		/* Enable AEQ Bypass */
1725 		ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
1726 					       UB960_RR_AEQ_BYPASS_ENABLE,
1727 					       UB960_RR_AEQ_BYPASS_ENABLE,
1728 					       NULL);
1729 		if (ret)
1730 			return ret;
1731 	} else {
1732 		ret = ub960_rxport_set_eq_range(priv, nport,
1733 						rxport->eq.aeq.eq_level_min,
1734 						rxport->eq.aeq.eq_level_max);
1735 		if (ret)
1736 			return ret;
1737 
1738 		/* Disable AEQ Bypass */
1739 		ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
1740 					       UB960_RR_AEQ_BYPASS_ENABLE, 0,
1741 					       NULL);
1742 		if (ret)
1743 			return ret;
1744 	}
1745 
1746 	return 0;
1747 }
1748 
ub960_rxport_link_ok(struct ub960_data * priv,unsigned int nport,bool * ok)1749 static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
1750 				bool *ok)
1751 {
1752 	u8 rx_port_sts1, rx_port_sts2;
1753 	u16 parity_errors;
1754 	u8 csi_rx_sts;
1755 	u8 csi_err_cnt;
1756 	u8 bcc_sts;
1757 	int ret;
1758 	bool errors;
1759 
1760 	ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
1761 				&rx_port_sts1, NULL);
1762 	if (ret)
1763 		return ret;
1764 
1765 	if (!(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS)) {
1766 		*ok = false;
1767 		return 0;
1768 	}
1769 
1770 	ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
1771 				&rx_port_sts2, NULL);
1772 	if (ret)
1773 		return ret;
1774 
1775 	ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts,
1776 				NULL);
1777 	if (ret)
1778 		return ret;
1779 
1780 	ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
1781 				&csi_err_cnt, NULL);
1782 	if (ret)
1783 		return ret;
1784 
1785 	ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts,
1786 				NULL);
1787 	if (ret)
1788 		return ret;
1789 
1790 	ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
1791 				  &parity_errors, NULL);
1792 	if (ret)
1793 		return ret;
1794 
1795 	errors = (rx_port_sts1 & UB960_RR_RX_PORT_STS1_ERROR_MASK) ||
1796 		 (rx_port_sts2 & UB960_RR_RX_PORT_STS2_ERROR_MASK) ||
1797 		 (bcc_sts & UB960_RR_BCC_STATUS_ERROR_MASK) ||
1798 		 (csi_rx_sts & UB960_RR_CSI_RX_STS_ERROR_MASK) || csi_err_cnt ||
1799 		 parity_errors;
1800 
1801 	*ok = !errors;
1802 
1803 	return 0;
1804 }
1805 
ub960_rxport_lockup_wa_ub9702(struct ub960_data * priv)1806 static int ub960_rxport_lockup_wa_ub9702(struct ub960_data *priv)
1807 {
1808 	int ret;
1809 
1810 	/* Toggle PI_MODE to avoid possible FPD RX lockup */
1811 
1812 	ret = ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
1813 				2 << 3, NULL);
1814 	if (ret)
1815 		return ret;
1816 
1817 	usleep_range(1000, 5000);
1818 
1819 	return ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
1820 				 0, NULL);
1821 }
1822 
1823 /*
1824  * Wait for the RX ports to lock, have no errors and have stable strobe position
1825  * and EQ level.
1826  */
ub960_rxport_wait_locks(struct ub960_data * priv,unsigned long port_mask,unsigned int * lock_mask)1827 static int ub960_rxport_wait_locks(struct ub960_data *priv,
1828 				   unsigned long port_mask,
1829 				   unsigned int *lock_mask)
1830 {
1831 	struct device *dev = &priv->client->dev;
1832 	unsigned long timeout;
1833 	unsigned int link_ok_mask;
1834 	unsigned int missing;
1835 	unsigned int loops;
1836 	u8 nport;
1837 	int ret;
1838 
1839 	if (port_mask == 0) {
1840 		if (lock_mask)
1841 			*lock_mask = 0;
1842 		return 0;
1843 	}
1844 
1845 	if (port_mask >= BIT(priv->hw_data->num_rxports))
1846 		return -EINVAL;
1847 
1848 	timeout = jiffies + msecs_to_jiffies(1000);
1849 	loops = 0;
1850 	link_ok_mask = 0;
1851 
1852 	while (time_before(jiffies, timeout)) {
1853 		bool fpd4_wa = false;
1854 		missing = 0;
1855 
1856 		for_each_set_bit(nport, &port_mask,
1857 				 priv->hw_data->num_rxports) {
1858 			struct ub960_rxport *rxport = priv->rxports[nport];
1859 			bool ok;
1860 
1861 			if (!rxport)
1862 				continue;
1863 
1864 			ret = ub960_rxport_link_ok(priv, nport, &ok);
1865 			if (ret)
1866 				return ret;
1867 
1868 			if (!ok && rxport->cdr_mode == RXPORT_CDR_FPD4)
1869 				fpd4_wa = true;
1870 
1871 			/*
1872 			 * We want the link to be ok for two consecutive loops,
1873 			 * as a link could get established just before our test
1874 			 * and drop soon after.
1875 			 */
1876 			if (!ok || !(link_ok_mask & BIT(nport)))
1877 				missing++;
1878 
1879 			if (ok)
1880 				link_ok_mask |= BIT(nport);
1881 			else
1882 				link_ok_mask &= ~BIT(nport);
1883 		}
1884 
1885 		loops++;
1886 
1887 		if (missing == 0)
1888 			break;
1889 
1890 		if (fpd4_wa) {
1891 			ret = ub960_rxport_lockup_wa_ub9702(priv);
1892 			if (ret)
1893 				return ret;
1894 		}
1895 
1896 		/*
1897 		 * The sleep time of 10 ms was found by testing to give a lock
1898 		 * with a few iterations. It can be decreased if on some setups
1899 		 * the lock can be achieved much faster.
1900 		 */
1901 		fsleep(10 * USEC_PER_MSEC);
1902 	}
1903 
1904 	if (lock_mask)
1905 		*lock_mask = link_ok_mask;
1906 
1907 	dev_dbg(dev, "Wait locks done in %u loops\n", loops);
1908 	for_each_set_bit(nport, &port_mask, priv->hw_data->num_rxports) {
1909 		struct ub960_rxport *rxport = priv->rxports[nport];
1910 		s8 strobe_pos, eq_level;
1911 		u16 v;
1912 
1913 		if (!rxport)
1914 			continue;
1915 
1916 		if (!(link_ok_mask & BIT(nport))) {
1917 			dev_dbg(dev, "\trx%u: not locked\n", nport);
1918 			continue;
1919 		}
1920 
1921 		ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH,
1922 					  &v, NULL);
1923 
1924 		if (ret)
1925 			return ret;
1926 
1927 		if (priv->hw_data->is_ub9702) {
1928 			dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n",
1929 				nport, ((u64)v * HZ_PER_MHZ) >> 8);
1930 		} else {
1931 			ret = ub960_rxport_get_strobe_pos(priv, nport,
1932 							  &strobe_pos);
1933 			if (ret)
1934 				return ret;
1935 
1936 			ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
1937 			if (ret)
1938 				return ret;
1939 
1940 			dev_dbg(dev,
1941 				"\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
1942 				nport, strobe_pos, eq_level,
1943 				((u64)v * HZ_PER_MHZ) >> 8);
1944 		}
1945 	}
1946 
1947 	return 0;
1948 }
1949 
ub960_calc_bc_clk_rate_ub960(struct ub960_data * priv,struct ub960_rxport * rxport)1950 static unsigned long ub960_calc_bc_clk_rate_ub960(struct ub960_data *priv,
1951 						  struct ub960_rxport *rxport)
1952 {
1953 	unsigned int mult;
1954 	unsigned int div;
1955 
1956 	switch (rxport->rx_mode) {
1957 	case RXPORT_MODE_RAW10:
1958 	case RXPORT_MODE_RAW12_HF:
1959 	case RXPORT_MODE_RAW12_LF:
1960 		mult = 1;
1961 		div = 10;
1962 		break;
1963 
1964 	case RXPORT_MODE_CSI2_SYNC:
1965 		mult = 2;
1966 		div = 1;
1967 		break;
1968 
1969 	case RXPORT_MODE_CSI2_NONSYNC:
1970 		mult = 2;
1971 		div = 5;
1972 		break;
1973 
1974 	default:
1975 		return 0;
1976 	}
1977 
1978 	return clk_get_rate(priv->refclk) * mult / div;
1979 }
1980 
ub960_calc_bc_clk_rate_ub9702(struct ub960_data * priv,struct ub960_rxport * rxport)1981 static unsigned long ub960_calc_bc_clk_rate_ub9702(struct ub960_data *priv,
1982 						   struct ub960_rxport *rxport)
1983 {
1984 	switch (rxport->rx_mode) {
1985 	case RXPORT_MODE_RAW10:
1986 	case RXPORT_MODE_RAW12_HF:
1987 	case RXPORT_MODE_RAW12_LF:
1988 		return 2359400;
1989 
1990 	case RXPORT_MODE_CSI2_SYNC:
1991 		return 47187500;
1992 
1993 	case RXPORT_MODE_CSI2_NONSYNC:
1994 		return 9437500;
1995 
1996 	default:
1997 		return 0;
1998 	}
1999 }
2000 
ub960_rxport_serializer_write(struct ub960_rxport * rxport,u8 reg,u8 val,int * err)2001 static int ub960_rxport_serializer_write(struct ub960_rxport *rxport, u8 reg,
2002 					 u8 val, int *err)
2003 {
2004 	struct ub960_data *priv = rxport->priv;
2005 	struct device *dev = &priv->client->dev;
2006 	union i2c_smbus_data data;
2007 	int ret;
2008 
2009 	if (err && *err)
2010 		return *err;
2011 
2012 	data.byte = val;
2013 
2014 	ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias, 0,
2015 			     I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA, &data);
2016 	if (ret)
2017 		dev_err(dev,
2018 			"rx%u: cannot write serializer register 0x%02x (%d)!\n",
2019 			rxport->nport, reg, ret);
2020 
2021 	if (ret && err)
2022 		*err = ret;
2023 
2024 	return ret;
2025 }
2026 
ub960_rxport_serializer_read(struct ub960_rxport * rxport,u8 reg,u8 * val,int * err)2027 static int ub960_rxport_serializer_read(struct ub960_rxport *rxport, u8 reg,
2028 					u8 *val, int *err)
2029 {
2030 	struct ub960_data *priv = rxport->priv;
2031 	struct device *dev = &priv->client->dev;
2032 	union i2c_smbus_data data = { 0 };
2033 	int ret;
2034 
2035 	if (err && *err)
2036 		return *err;
2037 
2038 	ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias,
2039 			     priv->client->flags, I2C_SMBUS_READ, reg,
2040 			     I2C_SMBUS_BYTE_DATA, &data);
2041 	if (ret)
2042 		dev_err(dev,
2043 			"rx%u: cannot read serializer register 0x%02x (%d)!\n",
2044 			rxport->nport, reg, ret);
2045 	else
2046 		*val = data.byte;
2047 
2048 	if (ret && err)
2049 		*err = ret;
2050 
2051 	return ret;
2052 }
2053 
ub960_serializer_temp_ramp(struct ub960_rxport * rxport)2054 static int ub960_serializer_temp_ramp(struct ub960_rxport *rxport)
2055 {
2056 	struct ub960_data *priv = rxport->priv;
2057 	short temp_dynamic_offset[] = {-1, -1, 0, 0, 1, 1, 1, 3};
2058 	u8 temp_dynamic_cfg;
2059 	u8 nport = rxport->nport;
2060 	u8 ser_temp_code;
2061 	int ret = 0;
2062 
2063 	/* Configure temp ramp only on UB953 */
2064 	if (!fwnode_device_is_compatible(rxport->ser.fwnode, "ti,ds90ub953-q1"))
2065 		return 0;
2066 
2067 	/* Read current serializer die temperature */
2068 	ub960_rxport_read(priv, nport, UB960_RR_SENSOR_STS_2, &ser_temp_code,
2069 			  &ret);
2070 
2071 	/* Enable I2C passthrough on back channel */
2072 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2073 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
2074 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
2075 
2076 	if (ret)
2077 		return ret;
2078 
2079 	/* Select indirect page for analog regs on the serializer */
2080 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_CTL,
2081 				      UB953_IND_TARGET_ANALOG << 2, &ret);
2082 
2083 	/* Set temperature ramp dynamic and static config */
2084 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
2085 				      UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
2086 	ub960_rxport_serializer_read(rxport, UB953_REG_IND_ACC_DATA,
2087 				     &temp_dynamic_cfg, &ret);
2088 
2089 	if (ret)
2090 		return ret;
2091 
2092 	temp_dynamic_cfg |= UB953_IND_ANA_TEMP_DYNAMIC_CFG_OV;
2093 	temp_dynamic_cfg += temp_dynamic_offset[ser_temp_code];
2094 
2095 	/* Update temp static config */
2096 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
2097 				      UB953_IND_ANA_TEMP_STATIC_CFG, &ret);
2098 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
2099 				      UB953_IND_ANA_TEMP_STATIC_CFG_MASK, &ret);
2100 
2101 	/* Update temperature ramp dynamic config */
2102 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
2103 				      UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
2104 
2105 	/* Enable I2C auto ack on BC before we set dynamic cfg and reset */
2106 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2107 				 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2108 				 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL, &ret);
2109 
2110 	ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
2111 				      temp_dynamic_cfg, &ret);
2112 
2113 	if (ret)
2114 		return ret;
2115 
2116 	/* Soft reset to apply PLL updates */
2117 	ub960_rxport_serializer_write(rxport, UB953_REG_RESET_CTL,
2118 				      UB953_REG_RESET_CTL_DIGITAL_RESET_0,
2119 				      &ret);
2120 	msleep(20);
2121 
2122 	/* Disable I2C passthrough and auto-ack on BC */
2123 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2124 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
2125 					 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2126 				 0x0, &ret);
2127 
2128 	return ret;
2129 }
2130 
ub960_rxport_bc_ser_config(struct ub960_rxport * rxport)2131 static int ub960_rxport_bc_ser_config(struct ub960_rxport *rxport)
2132 {
2133 	struct ub960_data *priv = rxport->priv;
2134 	struct device *dev = &priv->client->dev;
2135 	u8 nport = rxport->nport;
2136 	int ret = 0;
2137 
2138 	/* Skip port if serializer's address is not known */
2139 	if (rxport->ser.addr < 0) {
2140 		dev_dbg(dev,
2141 			"rx%u: serializer address missing, skip configuration\n",
2142 			nport);
2143 		return 0;
2144 	}
2145 
2146 	/*
2147 	 * Note: the code here probably only works for CSI-2 serializers in
2148 	 * sync mode. To support other serializers the BC related configuration
2149 	 * should be done before calling this function.
2150 	 */
2151 
2152 	/* Enable I2C passthrough and auto-ack on BC */
2153 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2154 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
2155 					 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2156 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
2157 					 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2158 				 &ret);
2159 
2160 	if (ret)
2161 		return ret;
2162 
2163 	/* Disable BC alternate mode auto detect */
2164 	ub960_rxport_serializer_write(rxport, UB971_ENH_BC_CHK, 0x02, &ret);
2165 	/* Decrease link detect timer */
2166 	ub960_rxport_serializer_write(rxport, UB953_REG_BC_CTRL, 0x06, &ret);
2167 
2168 	/* Disable I2C passthrough and auto-ack on BC */
2169 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2170 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
2171 					 UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
2172 				 0x0, &ret);
2173 
2174 	return ret;
2175 }
2176 
ub960_rxport_add_serializer(struct ub960_data * priv,u8 nport)2177 static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport)
2178 {
2179 	struct ub960_rxport *rxport = priv->rxports[nport];
2180 	struct device *dev = &priv->client->dev;
2181 	struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata;
2182 	struct i2c_board_info ser_info = {
2183 		.fwnode = rxport->ser.fwnode,
2184 		.platform_data = ser_pdata,
2185 	};
2186 
2187 	ser_pdata->port = nport;
2188 	ser_pdata->atr = priv->atr;
2189 	if (priv->hw_data->is_ub9702)
2190 		ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub9702(priv, rxport);
2191 	else
2192 		ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub960(priv, rxport);
2193 
2194 	/*
2195 	 * The serializer is added under the same i2c adapter as the
2196 	 * deserializer. This is not quite right, as the serializer is behind
2197 	 * the FPD-Link.
2198 	 */
2199 	ser_info.addr = rxport->ser.alias;
2200 	rxport->ser.client =
2201 		i2c_new_client_device(priv->client->adapter, &ser_info);
2202 	if (IS_ERR(rxport->ser.client)) {
2203 		dev_err(dev, "rx%u: cannot add %s i2c device", nport,
2204 			ser_info.type);
2205 		return PTR_ERR(rxport->ser.client);
2206 	}
2207 
2208 	dev_dbg(dev, "rx%u: remote serializer at alias 0x%02x (%u-%04x)\n",
2209 		nport, rxport->ser.client->addr,
2210 		rxport->ser.client->adapter->nr, rxport->ser.client->addr);
2211 
2212 	return 0;
2213 }
2214 
ub960_rxport_remove_serializer(struct ub960_data * priv,u8 nport)2215 static void ub960_rxport_remove_serializer(struct ub960_data *priv, u8 nport)
2216 {
2217 	struct ub960_rxport *rxport = priv->rxports[nport];
2218 
2219 	i2c_unregister_device(rxport->ser.client);
2220 	rxport->ser.client = NULL;
2221 }
2222 
2223 /* Add serializer i2c devices for all initialized ports */
ub960_rxport_add_serializers(struct ub960_data * priv)2224 static int ub960_rxport_add_serializers(struct ub960_data *priv)
2225 {
2226 	unsigned int failed_nport;
2227 	int ret;
2228 
2229 	for_each_active_rxport(priv, it) {
2230 		ret = ub960_rxport_add_serializer(priv, it.nport);
2231 		if (ret) {
2232 			failed_nport = it.nport;
2233 			goto err_remove_sers;
2234 		}
2235 	}
2236 
2237 	return 0;
2238 
2239 err_remove_sers:
2240 	while (failed_nport--) {
2241 		struct ub960_rxport *rxport = priv->rxports[failed_nport];
2242 
2243 		if (!rxport)
2244 			continue;
2245 
2246 		ub960_rxport_remove_serializer(priv, failed_nport);
2247 	}
2248 
2249 	return ret;
2250 }
2251 
ub960_rxport_remove_serializers(struct ub960_data * priv)2252 static void ub960_rxport_remove_serializers(struct ub960_data *priv)
2253 {
2254 	for_each_active_rxport(priv, it)
2255 		ub960_rxport_remove_serializer(priv, it.nport);
2256 }
2257 
ub960_init_tx_port(struct ub960_data * priv,struct ub960_txport * txport)2258 static int ub960_init_tx_port(struct ub960_data *priv,
2259 			      struct ub960_txport *txport)
2260 {
2261 	unsigned int nport = txport->nport;
2262 	u8 csi_ctl = 0;
2263 
2264 	/*
2265 	 * From the datasheet: "initial CSI Skew-Calibration
2266 	 * sequence [...] should be set when operating at 1.6 Gbps"
2267 	 */
2268 	if (priv->tx_data_rate == MHZ(1600))
2269 		csi_ctl |= UB960_TR_CSI_CTL_CSI_CAL_EN;
2270 
2271 	csi_ctl |= (4 - txport->num_data_lanes) << 4;
2272 
2273 	if (!txport->non_continous_clk)
2274 		csi_ctl |= UB960_TR_CSI_CTL_CSI_CONTS_CLOCK;
2275 
2276 	return ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl, NULL);
2277 }
2278 
ub960_init_tx_ports_ub960(struct ub960_data * priv)2279 static int ub960_init_tx_ports_ub960(struct ub960_data *priv)
2280 {
2281 	u8 speed_select;
2282 
2283 	switch (priv->tx_data_rate) {
2284 	case MHZ(400):
2285 		speed_select = 3;
2286 		break;
2287 	case MHZ(800):
2288 		speed_select = 2;
2289 		break;
2290 	case MHZ(1200):
2291 		speed_select = 1;
2292 		break;
2293 	case MHZ(1600):
2294 	default:
2295 		speed_select = 0;
2296 		break;
2297 	}
2298 
2299 	return ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, NULL);
2300 }
2301 
ub960_init_tx_ports_ub9702(struct ub960_data * priv)2302 static int ub960_init_tx_ports_ub9702(struct ub960_data *priv)
2303 {
2304 	u8 speed_select;
2305 	u8 ana_pll_div;
2306 	u8 pll_div;
2307 	int ret = 0;
2308 
2309 	switch (priv->tx_data_rate) {
2310 	case MHZ(400):
2311 		speed_select = 3;
2312 		pll_div = 0x10;
2313 		ana_pll_div = 0xa2;
2314 		break;
2315 	case MHZ(800):
2316 		speed_select = 2;
2317 		pll_div = 0x10;
2318 		ana_pll_div = 0x92;
2319 		break;
2320 	case MHZ(1200):
2321 		speed_select = 1;
2322 		pll_div = 0x18;
2323 		ana_pll_div = 0x90;
2324 		break;
2325 	case MHZ(1500):
2326 		speed_select = 0;
2327 		pll_div = 0x0f;
2328 		ana_pll_div = 0x82;
2329 		break;
2330 	case MHZ(1600):
2331 	default:
2332 		speed_select = 0;
2333 		pll_div = 0x10;
2334 		ana_pll_div = 0x82;
2335 		break;
2336 	case MHZ(2500):
2337 		speed_select = 0x10;
2338 		pll_div = 0x19;
2339 		ana_pll_div = 0x80;
2340 		break;
2341 	}
2342 
2343 	ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, &ret);
2344 	ub960_write(priv, UB9702_SR_CSI_PLL_DIV, pll_div, &ret);
2345 	ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA,
2346 			UB9702_IR_CSI_ANA_CSIPLL_REG_1, ana_pll_div, &ret);
2347 
2348 	return ret;
2349 }
2350 
ub960_init_tx_ports(struct ub960_data * priv)2351 static int ub960_init_tx_ports(struct ub960_data *priv)
2352 {
2353 	int ret;
2354 
2355 	if (priv->hw_data->is_ub9702)
2356 		ret = ub960_init_tx_ports_ub9702(priv);
2357 	else
2358 		ret = ub960_init_tx_ports_ub960(priv);
2359 
2360 	if (ret)
2361 		return ret;
2362 
2363 	for (unsigned int nport = 0; nport < priv->hw_data->num_txports;
2364 	     nport++) {
2365 		struct ub960_txport *txport = priv->txports[nport];
2366 
2367 		if (!txport)
2368 			continue;
2369 
2370 		ret = ub960_init_tx_port(priv, txport);
2371 		if (ret)
2372 			return ret;
2373 	}
2374 
2375 	return 0;
2376 }
2377 
ub960_init_rx_port_ub960(struct ub960_data * priv,struct ub960_rxport * rxport)2378 static int ub960_init_rx_port_ub960(struct ub960_data *priv,
2379 				    struct ub960_rxport *rxport)
2380 {
2381 	unsigned int nport = rxport->nport;
2382 	u32 bc_freq_val;
2383 	int ret = 0;
2384 
2385 	/*
2386 	 * Back channel frequency select.
2387 	 * Override FREQ_SELECT from the strap.
2388 	 * 0 - 2.5 Mbps (DS90UB913A-Q1 / DS90UB933-Q1)
2389 	 * 2 - 10 Mbps
2390 	 * 6 - 50 Mbps (DS90UB953-Q1)
2391 	 *
2392 	 * Note that changing this setting will result in some errors on the back
2393 	 * channel for a short period of time.
2394 	 */
2395 
2396 	switch (rxport->rx_mode) {
2397 	case RXPORT_MODE_RAW10:
2398 	case RXPORT_MODE_RAW12_HF:
2399 	case RXPORT_MODE_RAW12_LF:
2400 		bc_freq_val = 0;
2401 		break;
2402 
2403 	case RXPORT_MODE_CSI2_NONSYNC:
2404 		bc_freq_val = 2;
2405 		break;
2406 
2407 	case RXPORT_MODE_CSI2_SYNC:
2408 		bc_freq_val = 6;
2409 		break;
2410 
2411 	default:
2412 		return -EINVAL;
2413 	}
2414 
2415 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2416 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK,
2417 				 bc_freq_val, &ret);
2418 
2419 	switch (rxport->rx_mode) {
2420 	case RXPORT_MODE_RAW10:
2421 		/* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */
2422 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG,
2423 					 UB960_RR_PORT_CONFIG_FPD3_MODE_MASK,
2424 					 0x3, &ret);
2425 
2426 		/*
2427 		 * RAW10_8BIT_CTL = 0b10 : 8-bit processing using upper 8 bits
2428 		 */
2429 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
2430 			UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK,
2431 			0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT,
2432 			&ret);
2433 
2434 		break;
2435 
2436 	case RXPORT_MODE_RAW12_HF:
2437 	case RXPORT_MODE_RAW12_LF:
2438 		/* Not implemented */
2439 		return -EINVAL;
2440 
2441 	case RXPORT_MODE_CSI2_SYNC:
2442 	case RXPORT_MODE_CSI2_NONSYNC:
2443 		/* CSI-2 Mode (DS90UB953-Q1 compatible) */
2444 		ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3,
2445 					 0x0, &ret);
2446 
2447 		break;
2448 	}
2449 
2450 	/* LV_POLARITY & FV_POLARITY */
2451 	ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
2452 				 rxport->lv_fv_pol, &ret);
2453 
2454 	/* Enable all interrupt sources from this port */
2455 	ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07, &ret);
2456 	ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f, &ret);
2457 
2458 	/* Enable I2C_PASS_THROUGH */
2459 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2460 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
2461 				 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
2462 
2463 	/* Enable I2C communication to the serializer via the alias addr */
2464 	ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
2465 			   rxport->ser.alias << 1, &ret);
2466 
2467 	/* Configure EQ related settings */
2468 	ub960_rxport_config_eq(priv, nport);
2469 
2470 	/* Enable RX port */
2471 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2472 			  &ret);
2473 
2474 	return ret;
2475 }
2476 
ub960_init_rx_ports_ub960(struct ub960_data * priv)2477 static int ub960_init_rx_ports_ub960(struct ub960_data *priv)
2478 {
2479 	struct device *dev = &priv->client->dev;
2480 	unsigned int port_lock_mask;
2481 	unsigned int port_mask;
2482 	int ret;
2483 
2484 	for_each_active_rxport(priv, it) {
2485 		ret = ub960_init_rx_port_ub960(priv, it.rxport);
2486 		if (ret)
2487 			return ret;
2488 	}
2489 
2490 	ret = ub960_reset(priv, false);
2491 	if (ret)
2492 		return ret;
2493 
2494 	port_mask = 0;
2495 
2496 	for_each_active_rxport(priv, it)
2497 		port_mask |= BIT(it.nport);
2498 
2499 	ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
2500 	if (ret)
2501 		return ret;
2502 
2503 	if (port_mask != port_lock_mask) {
2504 		ret = -EIO;
2505 		dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
2506 		return ret;
2507 	}
2508 
2509 	/* Set temperature ramp on serializer */
2510 	for_each_active_rxport(priv, it) {
2511 		ret = ub960_serializer_temp_ramp(it.rxport);
2512 		if (ret)
2513 			return ret;
2514 
2515 		ub960_rxport_update_bits(priv, it.nport, UB960_RR_BCC_CONFIG,
2516 					 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
2517 					 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
2518 					 &ret);
2519 		if (ret)
2520 			return ret;
2521 	}
2522 
2523 	/*
2524 	 * Clear any errors caused by switching the RX port settings while
2525 	 * probing.
2526 	 */
2527 	ret = ub960_clear_rx_errors(priv);
2528 	if (ret)
2529 		return ret;
2530 
2531 	return 0;
2532 }
2533 
2534 /*
2535  * UB9702 specific initial RX port configuration
2536  */
2537 
ub960_turn_off_rxport_ub9702(struct ub960_data * priv,unsigned int nport)2538 static int ub960_turn_off_rxport_ub9702(struct ub960_data *priv,
2539 					unsigned int nport)
2540 {
2541 	int ret = 0;
2542 
2543 	/* Disable RX port */
2544 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), 0, &ret);
2545 
2546 	/* Disable FPD Rx and FPD BC CMR */
2547 	ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_2, 0x1b, &ret);
2548 
2549 	/* Disable FPD BC Tx */
2550 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, BIT(4), 0,
2551 				 &ret);
2552 
2553 	/* Disable internal RX blocks */
2554 	ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_1, 0x15, &ret);
2555 
2556 	/* Disable AEQ */
2557 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2558 			UB9702_IR_RX_ANA_AEQ_CFG_2, 0x03, &ret);
2559 
2560 	/* PI disabled and oDAC disabled */
2561 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2562 			UB9702_IR_RX_ANA_AEQ_CFG_4, 0x09, &ret);
2563 
2564 	/* AEQ configured for disabled link */
2565 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2566 			UB9702_IR_RX_ANA_AEQ_CFG_1, 0x20, &ret);
2567 
2568 	/* disable AEQ clock and DFE */
2569 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2570 			UB9702_IR_RX_ANA_AEQ_CFG_3, 0x45, &ret);
2571 
2572 	/* Powerdown FPD3 CDR */
2573 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2574 			UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5, 0x82, &ret);
2575 
2576 	return ret;
2577 }
2578 
ub960_set_bc_drv_config_ub9702(struct ub960_data * priv,unsigned int nport)2579 static int ub960_set_bc_drv_config_ub9702(struct ub960_data *priv,
2580 					  unsigned int nport)
2581 {
2582 	u8 fpd_bc_ctl0;
2583 	u8 fpd_bc_ctl1;
2584 	u8 fpd_bc_ctl2;
2585 	int ret = 0;
2586 
2587 	if (priv->rxports[nport]->cdr_mode == RXPORT_CDR_FPD4) {
2588 		/* Set FPD PBC drv into FPD IV mode */
2589 
2590 		fpd_bc_ctl0 = 0;
2591 		fpd_bc_ctl1 = 0;
2592 		fpd_bc_ctl2 = 0;
2593 	} else {
2594 		/* Set FPD PBC drv into FPD III mode */
2595 
2596 		fpd_bc_ctl0 = 2;
2597 		fpd_bc_ctl1 = 1;
2598 		fpd_bc_ctl2 = 5;
2599 	}
2600 
2601 	ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
2602 			      UB9702_IR_RX_ANA_FPD_BC_CTL0, GENMASK(7, 5),
2603 			      fpd_bc_ctl0 << 5, &ret);
2604 
2605 	ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
2606 			      UB9702_IR_RX_ANA_FPD_BC_CTL1, BIT(6),
2607 			      fpd_bc_ctl1 << 6, &ret);
2608 
2609 	ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
2610 			      UB9702_IR_RX_ANA_FPD_BC_CTL2, GENMASK(6, 3),
2611 			      fpd_bc_ctl2 << 3, &ret);
2612 
2613 	return ret;
2614 }
2615 
ub960_set_fpd4_sync_mode_ub9702(struct ub960_data * priv,unsigned int nport)2616 static int ub960_set_fpd4_sync_mode_ub9702(struct ub960_data *priv,
2617 					   unsigned int nport)
2618 {
2619 	int ret = 0;
2620 
2621 	/* FPD4 Sync Mode */
2622 	ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x0, &ret);
2623 
2624 	/* BC_FREQ_SELECT = (PLL_FREQ/3200) Mbps */
2625 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2626 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 6, &ret);
2627 
2628 	if (ret)
2629 		return ret;
2630 
2631 	ret = ub960_set_bc_drv_config_ub9702(priv, nport);
2632 	if (ret)
2633 		return ret;
2634 
2635 	/* Set AEQ timer to 400us/step */
2636 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2637 			UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0x2f, &ret);
2638 
2639 	/* Disable FPD4 Auto Recovery */
2640 	ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4), 0,
2641 			  &ret);
2642 
2643 	/* Enable RX port */
2644 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2645 			  &ret);
2646 
2647 	/* Enable FPD4 Auto Recovery */
2648 	ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4),
2649 			  BIT(4), &ret);
2650 
2651 	return ret;
2652 }
2653 
ub960_set_fpd4_async_mode_ub9702(struct ub960_data * priv,unsigned int nport)2654 static int ub960_set_fpd4_async_mode_ub9702(struct ub960_data *priv,
2655 					    unsigned int nport)
2656 {
2657 	int ret = 0;
2658 
2659 	/* FPD4 ASync Mode */
2660 	ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x1, &ret);
2661 
2662 	/* 10Mbps w/ BC enabled */
2663 	/* BC_FREQ_SELECT=(PLL_FREQ/3200) Mbps */
2664 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2665 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 2, &ret);
2666 
2667 	if (ret)
2668 		return ret;
2669 
2670 	ret = ub960_set_bc_drv_config_ub9702(priv, nport);
2671 	if (ret)
2672 		return ret;
2673 
2674 	/* Set AEQ timer to 400us/step */
2675 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2676 			UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0x2f, &ret);
2677 
2678 	/* Disable FPD4 Auto Recover */
2679 	ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4), 0,
2680 			  &ret);
2681 
2682 	/* Enable RX port */
2683 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2684 			  &ret);
2685 
2686 	/* Enable FPD4 Auto Recovery */
2687 	ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4),
2688 			  BIT(4), &ret);
2689 
2690 	return ret;
2691 }
2692 
ub960_set_fpd3_sync_mode_ub9702(struct ub960_data * priv,unsigned int nport)2693 static int ub960_set_fpd3_sync_mode_ub9702(struct ub960_data *priv,
2694 					   unsigned int nport)
2695 {
2696 	int ret = 0;
2697 
2698 	/* FPD3 Sync Mode */
2699 	ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x2, &ret);
2700 
2701 	/* BC_FREQ_SELECT=(PLL_FREQ/3200) Mbps */
2702 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2703 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 6, &ret);
2704 
2705 	/* Set AEQ_LOCK_MODE = 1 */
2706 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2707 			UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1, BIT(7), &ret);
2708 
2709 	if (ret)
2710 		return ret;
2711 
2712 	ret = ub960_set_bc_drv_config_ub9702(priv, nport);
2713 	if (ret)
2714 		return ret;
2715 
2716 	/* Enable RX port */
2717 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2718 			  &ret);
2719 
2720 	return ret;
2721 }
2722 
ub960_set_raw10_dvp_mode_ub9702(struct ub960_data * priv,unsigned int nport)2723 static int ub960_set_raw10_dvp_mode_ub9702(struct ub960_data *priv,
2724 					   unsigned int nport)
2725 {
2726 	int ret = 0;
2727 
2728 	/* FPD3 RAW10 Mode */
2729 	ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x5, &ret);
2730 
2731 	ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
2732 				 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 0, &ret);
2733 
2734 	/* Set AEQ_LOCK_MODE = 1 */
2735 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2736 			UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1, BIT(7), &ret);
2737 
2738 	/*
2739 	 * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits
2740 	 * 0b10 : 8-bit processing using upper 8 bits
2741 	 */
2742 	ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3 << 6,
2743 				 0x2 << 6, &ret);
2744 
2745 	/* LV_POLARITY & FV_POLARITY */
2746 	ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
2747 				 priv->rxports[nport]->lv_fv_pol, &ret);
2748 
2749 	if (ret)
2750 		return ret;
2751 
2752 	ret = ub960_set_bc_drv_config_ub9702(priv, nport);
2753 	if (ret)
2754 		return ret;
2755 
2756 	/* Enable RX port */
2757 	ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
2758 			  &ret);
2759 
2760 	return ret;
2761 }
2762 
ub960_configure_rx_port_ub9702(struct ub960_data * priv,unsigned int nport)2763 static int ub960_configure_rx_port_ub9702(struct ub960_data *priv,
2764 					  unsigned int nport)
2765 {
2766 	struct device *dev = &priv->client->dev;
2767 	struct ub960_rxport *rxport = priv->rxports[nport];
2768 	int ret;
2769 
2770 	if (!rxport) {
2771 		ret = ub960_turn_off_rxport_ub9702(priv, nport);
2772 		if (ret)
2773 			return ret;
2774 
2775 		dev_dbg(dev, "rx%u: disabled\n", nport);
2776 		return 0;
2777 	}
2778 
2779 	switch (rxport->cdr_mode) {
2780 	case RXPORT_CDR_FPD4:
2781 		switch (rxport->rx_mode) {
2782 		case RXPORT_MODE_CSI2_SYNC:
2783 			ret = ub960_set_fpd4_sync_mode_ub9702(priv, nport);
2784 			if (ret)
2785 				return ret;
2786 
2787 			dev_dbg(dev, "rx%u: FPD-Link IV SYNC mode\n", nport);
2788 			break;
2789 		case RXPORT_MODE_CSI2_NONSYNC:
2790 			ret = ub960_set_fpd4_async_mode_ub9702(priv, nport);
2791 			if (ret)
2792 				return ret;
2793 
2794 			dev_dbg(dev, "rx%u: FPD-Link IV ASYNC mode\n", nport);
2795 			break;
2796 		default:
2797 			dev_err(dev, "rx%u: unsupported FPD4 mode %u\n", nport,
2798 				rxport->rx_mode);
2799 			return -EINVAL;
2800 		}
2801 		break;
2802 
2803 	case RXPORT_CDR_FPD3:
2804 		switch (rxport->rx_mode) {
2805 		case RXPORT_MODE_CSI2_SYNC:
2806 			ret = ub960_set_fpd3_sync_mode_ub9702(priv, nport);
2807 			if (ret)
2808 				return ret;
2809 
2810 			dev_dbg(dev, "rx%u: FPD-Link III SYNC mode\n", nport);
2811 			break;
2812 		case RXPORT_MODE_RAW10:
2813 			ret = ub960_set_raw10_dvp_mode_ub9702(priv, nport);
2814 			if (ret)
2815 				return ret;
2816 
2817 			dev_dbg(dev, "rx%u: FPD-Link III RAW10 DVP mode\n",
2818 				nport);
2819 			break;
2820 		default:
2821 			dev_err(&priv->client->dev,
2822 				"rx%u: unsupported FPD3 mode %u\n", nport,
2823 				rxport->rx_mode);
2824 			return -EINVAL;
2825 		}
2826 		break;
2827 
2828 	default:
2829 		dev_err(&priv->client->dev, "rx%u: unsupported CDR mode %u\n",
2830 			nport, rxport->cdr_mode);
2831 		return -EINVAL;
2832 	}
2833 
2834 	return 0;
2835 }
2836 
ub960_lock_recovery_ub9702(struct ub960_data * priv,unsigned int nport)2837 static int ub960_lock_recovery_ub9702(struct ub960_data *priv,
2838 				      unsigned int nport)
2839 {
2840 	struct device *dev = &priv->client->dev;
2841 	/* Assumption that max AEQ should be under 16 */
2842 	const u8 rx_aeq_limit = 16;
2843 	u8 prev_aeq = 0xff;
2844 	bool rx_lock;
2845 
2846 	for (unsigned int retry = 0; retry < 3; ++retry) {
2847 		u8 port_sts1;
2848 		u8 rx_aeq;
2849 		int ret;
2850 
2851 		ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
2852 					&port_sts1, NULL);
2853 		if (ret)
2854 			return ret;
2855 
2856 		rx_lock = port_sts1 & UB960_RR_RX_PORT_STS1_PORT_PASS;
2857 
2858 		if (!rx_lock) {
2859 			ret = ub960_rxport_lockup_wa_ub9702(priv);
2860 			if (ret)
2861 				return ret;
2862 
2863 			/* Restart AEQ by changing max to 0 --> 0x23 */
2864 			ret = ub960_write_ind(priv,
2865 					      UB960_IND_TARGET_RX_ANA(nport),
2866 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7, 0,
2867 					      NULL);
2868 			if (ret)
2869 				return ret;
2870 
2871 			msleep(20);
2872 
2873 			/* AEQ Restart */
2874 			ret = ub960_write_ind(priv,
2875 					      UB960_IND_TARGET_RX_ANA(nport),
2876 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
2877 					      0x23, NULL);
2878 
2879 			if (ret)
2880 				return ret;
2881 
2882 			msleep(20);
2883 			dev_dbg(dev, "rx%u: no lock, retry = %u\n", nport,
2884 				retry);
2885 
2886 			continue;
2887 		}
2888 
2889 		ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2890 				     UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &rx_aeq,
2891 				     NULL);
2892 		if (ret)
2893 			return ret;
2894 
2895 		if (rx_aeq < rx_aeq_limit) {
2896 			dev_dbg(dev,
2897 				"rx%u: locked and AEQ normal before setting AEQ window\n",
2898 				nport);
2899 			return 0;
2900 		}
2901 
2902 		if (rx_aeq != prev_aeq) {
2903 			ret = ub960_rxport_lockup_wa_ub9702(priv);
2904 			if (ret)
2905 				return ret;
2906 
2907 			/* Restart AEQ by changing max to 0 --> 0x23 */
2908 			ret = ub960_write_ind(priv,
2909 					      UB960_IND_TARGET_RX_ANA(nport),
2910 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
2911 					      0, NULL);
2912 			if (ret)
2913 				return ret;
2914 
2915 			msleep(20);
2916 
2917 			/* AEQ Restart */
2918 			ret = ub960_write_ind(priv,
2919 					      UB960_IND_TARGET_RX_ANA(nport),
2920 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
2921 					      0x23, NULL);
2922 			if (ret)
2923 				return ret;
2924 
2925 			msleep(20);
2926 
2927 			dev_dbg(dev,
2928 				"rx%u: high AEQ at initial check recovery loop, retry=%u\n",
2929 				nport, retry);
2930 
2931 			prev_aeq = rx_aeq;
2932 		} else {
2933 			dev_dbg(dev,
2934 				"rx%u: lossy cable detected, RX_AEQ %#x, RX_AEQ_LIMIT %#x, retry %u\n",
2935 				nport, rx_aeq, rx_aeq_limit, retry);
2936 			dev_dbg(dev,
2937 				"rx%u: will continue with initiation sequence but high AEQ\n",
2938 				nport);
2939 			return 0;
2940 		}
2941 	}
2942 
2943 	dev_err(dev, "rx%u: max number of retries: %s\n", nport,
2944 		rx_lock ? "unstable AEQ" : "no lock");
2945 
2946 	return -EIO;
2947 }
2948 
ub960_enable_aeq_lms_ub9702(struct ub960_data * priv,unsigned int nport)2949 static int ub960_enable_aeq_lms_ub9702(struct ub960_data *priv,
2950 				       unsigned int nport)
2951 {
2952 	struct device *dev = &priv->client->dev;
2953 	u8 read_aeq_init;
2954 	int ret;
2955 
2956 	ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2957 			     UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &read_aeq_init,
2958 			     NULL);
2959 	if (ret)
2960 		return ret;
2961 
2962 	dev_dbg(dev, "rx%u: initial AEQ = %#x\n", nport, read_aeq_init);
2963 
2964 	/* Set AEQ Min */
2965 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2966 			UB9702_IR_RX_ANA_AEQ_ALP_SEL6, read_aeq_init, &ret);
2967 	/* Set AEQ Max */
2968 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2969 			UB9702_IR_RX_ANA_AEQ_ALP_SEL7, read_aeq_init + 1, &ret);
2970 	/* Set AEQ offset to 0 */
2971 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2972 			UB9702_IR_RX_ANA_AEQ_ALP_SEL10, 0x0, &ret);
2973 
2974 	/* Enable AEQ tap2 */
2975 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2976 			UB9702_IR_RX_ANA_EQ_CTRL_SEL_38, 0x00, &ret);
2977 	/* Set VGA Gain 1 Gain 2 override to 0 */
2978 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2979 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_8, 0x00, &ret);
2980 	/* Set VGA Initial Sweep Gain to 0 */
2981 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2982 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_6, 0x80, &ret);
2983 	/* Set VGA_Adapt (VGA Gain) override to 0 (thermometer encoded) */
2984 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2985 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_3, 0x00, &ret);
2986 	/* Enable VGA_SWEEP */
2987 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2988 			UB9702_IR_RX_ANA_EQ_ADAPT_CTRL, 0x40, &ret);
2989 	/* Disable VGA_SWEEP_GAIN_OV, disable VGA_TUNE_OV */
2990 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2991 			UB9702_IR_RX_ANA_EQ_OVERRIDE_CTRL, 0x00, &ret);
2992 
2993 	/* Set VGA HIGH Threshold to 43 */
2994 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2995 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_1, 0x2b, &ret);
2996 	/* Set VGA LOW Threshold to 18 */
2997 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
2998 			UB9702_IR_RX_ANA_VGA_CTRL_SEL_2, 0x12, &ret);
2999 	/* Set vga_sweep_th to 32 */
3000 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3001 			UB9702_IR_RX_ANA_EQ_CTRL_SEL_15, 0x20, &ret);
3002 	/* Set AEQ timer to 400us/step and parity threshold to 7 */
3003 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3004 			UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0xef, &ret);
3005 
3006 	if (ret)
3007 		return ret;
3008 
3009 	dev_dbg(dev, "rx%u: enable FPD-Link IV AEQ LMS\n", nport);
3010 
3011 	return 0;
3012 }
3013 
ub960_enable_dfe_lms_ub9702(struct ub960_data * priv,unsigned int nport)3014 static int ub960_enable_dfe_lms_ub9702(struct ub960_data *priv,
3015 				       unsigned int nport)
3016 {
3017 	struct device *dev = &priv->client->dev;
3018 	int ret = 0;
3019 
3020 	/* Enable DFE LMS */
3021 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3022 			UB9702_IR_RX_ANA_EQ_CTRL_SEL_24, 0x40, &ret);
3023 	/* Disable VGA Gain1 override */
3024 	ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3025 			UB9702_IR_RX_ANA_GAIN_CTRL_0, 0x20, &ret);
3026 
3027 	if (ret)
3028 		return ret;
3029 
3030 	usleep_range(1000, 5000);
3031 
3032 	/* Disable VGA Gain2 override */
3033 	ret = ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
3034 			      UB9702_IR_RX_ANA_GAIN_CTRL_0, 0x00, NULL);
3035 	if (ret)
3036 		return ret;
3037 
3038 	dev_dbg(dev, "rx%u: enabled FPD-Link IV DFE LMS", nport);
3039 
3040 	return 0;
3041 }
3042 
ub960_init_rx_ports_ub9702(struct ub960_data * priv)3043 static int ub960_init_rx_ports_ub9702(struct ub960_data *priv)
3044 {
3045 	struct device *dev = &priv->client->dev;
3046 	unsigned int port_lock_mask;
3047 	unsigned int port_mask = 0;
3048 	bool have_fpd4 = false;
3049 	int ret;
3050 
3051 	for_each_active_rxport(priv, it) {
3052 		ret = ub960_rxport_update_bits(priv, it.nport,
3053 					       UB960_RR_BCC_CONFIG,
3054 					       UB960_RR_BCC_CONFIG_BC_ALWAYS_ON,
3055 					       UB960_RR_BCC_CONFIG_BC_ALWAYS_ON,
3056 					       NULL);
3057 		if (ret)
3058 			return ret;
3059 	}
3060 
3061 	/* Disable FPD4 Auto Recovery */
3062 	ret = ub960_write(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, 0x0f, NULL);
3063 	if (ret)
3064 		return ret;
3065 
3066 	for_each_active_rxport(priv, it) {
3067 		if (it.rxport->ser.addr >= 0) {
3068 			/*
3069 			 * Set serializer's I2C address if set in the dts file,
3070 			 * and freeze it to prevent updates from the FC.
3071 			 */
3072 			ub960_rxport_write(priv, it.nport, UB960_RR_SER_ID,
3073 					   it.rxport->ser.addr << 1 |
3074 					   UB960_RR_SER_ID_FREEZE_DEVICE_ID,
3075 					   &ret);
3076 		}
3077 
3078 		/* Set serializer I2C alias with auto-ack */
3079 		ub960_rxport_write(priv, it.nport, UB960_RR_SER_ALIAS_ID,
3080 				   it.rxport->ser.alias << 1 |
3081 				   UB960_RR_SER_ALIAS_ID_AUTO_ACK, &ret);
3082 
3083 		if (ret)
3084 			return ret;
3085 	}
3086 
3087 	for_each_active_rxport(priv, it) {
3088 		if (fwnode_device_is_compatible(it.rxport->ser.fwnode,
3089 						"ti,ds90ub971-q1")) {
3090 			ret = ub960_rxport_bc_ser_config(it.rxport);
3091 			if (ret)
3092 				return ret;
3093 		}
3094 	}
3095 
3096 	for_each_active_rxport_fpd4(priv, it) {
3097 		/* Hold state machine in reset */
3098 		ub960_rxport_write(priv, it.nport, UB9702_RR_RX_SM_SEL_2, 0x10,
3099 				   &ret);
3100 
3101 		/* Set AEQ max to 0 */
3102 		ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(it.nport),
3103 				UB9702_IR_RX_ANA_AEQ_ALP_SEL7, 0, &ret);
3104 
3105 		if (ret)
3106 			return ret;
3107 
3108 		dev_dbg(dev,
3109 			"rx%u: holding state machine and adjusting AEQ max to 0",
3110 			it.nport);
3111 	}
3112 
3113 	for_each_active_rxport(priv, it) {
3114 		port_mask |= BIT(it.nport);
3115 
3116 		if (it.rxport->cdr_mode == RXPORT_CDR_FPD4)
3117 			have_fpd4 = true;
3118 	}
3119 
3120 	for_each_rxport(priv, it) {
3121 		ret = ub960_configure_rx_port_ub9702(priv, it.nport);
3122 		if (ret)
3123 			return ret;
3124 	}
3125 
3126 	ret = ub960_reset(priv, false);
3127 	if (ret)
3128 		return ret;
3129 
3130 	if (have_fpd4) {
3131 		for_each_active_rxport_fpd4(priv, it) {
3132 			/* Release state machine */
3133 			ret = ub960_rxport_write(priv, it.nport,
3134 						 UB9702_RR_RX_SM_SEL_2, 0x0,
3135 						 NULL);
3136 			if (ret)
3137 				return ret;
3138 
3139 			dev_dbg(dev, "rx%u: state machine released\n",
3140 				it.nport);
3141 		}
3142 
3143 		/* Wait for SM to resume */
3144 		fsleep(5000);
3145 
3146 		for_each_active_rxport_fpd4(priv, it) {
3147 			ret = ub960_write_ind(priv,
3148 					      UB960_IND_TARGET_RX_ANA(it.nport),
3149 					      UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
3150 					      0x23, NULL);
3151 			if (ret)
3152 				return ret;
3153 
3154 			dev_dbg(dev, "rx%u: AEQ restart\n", it.nport);
3155 		}
3156 
3157 		/* Wait for lock */
3158 		fsleep(20000);
3159 
3160 		for_each_active_rxport_fpd4(priv, it) {
3161 			ret = ub960_lock_recovery_ub9702(priv, it.nport);
3162 			if (ret)
3163 				return ret;
3164 		}
3165 
3166 		for_each_active_rxport_fpd4(priv, it) {
3167 			ret = ub960_enable_aeq_lms_ub9702(priv, it.nport);
3168 			if (ret)
3169 				return ret;
3170 		}
3171 
3172 		for_each_active_rxport_fpd4(priv, it) {
3173 			/* Hold state machine in reset */
3174 			ret = ub960_rxport_write(priv, it.nport,
3175 						 UB9702_RR_RX_SM_SEL_2, 0x10,
3176 						 NULL);
3177 			if (ret)
3178 				return ret;
3179 		}
3180 
3181 		ret = ub960_reset(priv, false);
3182 		if (ret)
3183 			return ret;
3184 
3185 		for_each_active_rxport_fpd4(priv, it) {
3186 			/* Release state machine */
3187 			ret = ub960_rxport_write(priv, it.nport,
3188 						 UB9702_RR_RX_SM_SEL_2, 0,
3189 						 NULL);
3190 			if (ret)
3191 				return ret;
3192 		}
3193 	}
3194 
3195 	/* Wait time for stable lock */
3196 	fsleep(15000);
3197 
3198 	/* Set temperature ramp on serializer */
3199 	for_each_active_rxport(priv, it) {
3200 		ret = ub960_serializer_temp_ramp(it.rxport);
3201 		if (ret)
3202 			return ret;
3203 	}
3204 
3205 	for_each_active_rxport_fpd4(priv, it) {
3206 		ret = ub960_enable_dfe_lms_ub9702(priv, it.nport);
3207 		if (ret)
3208 			return ret;
3209 	}
3210 
3211 	/* Wait for DFE and LMS to adapt */
3212 	fsleep(5000);
3213 
3214 	ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
3215 	if (ret)
3216 		return ret;
3217 
3218 	if (port_mask != port_lock_mask) {
3219 		ret = -EIO;
3220 		dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
3221 		return ret;
3222 	}
3223 
3224 	for_each_active_rxport(priv, it) {
3225 		/* Enable all interrupt sources from this port */
3226 		ub960_rxport_write(priv, it.nport, UB960_RR_PORT_ICR_HI, 0x07,
3227 				   &ret);
3228 		ub960_rxport_write(priv, it.nport, UB960_RR_PORT_ICR_LO, 0x7f,
3229 				   &ret);
3230 
3231 		/* Clear serializer I2C alias auto-ack */
3232 		ub960_rxport_update_bits(priv, it.nport, UB960_RR_SER_ALIAS_ID,
3233 					 UB960_RR_SER_ALIAS_ID_AUTO_ACK, 0,
3234 					 &ret);
3235 
3236 		/* Enable I2C_PASS_THROUGH */
3237 		ub960_rxport_update_bits(priv, it.nport, UB960_RR_BCC_CONFIG,
3238 					 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
3239 					 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
3240 					 &ret);
3241 
3242 		if (ret)
3243 			return ret;
3244 	}
3245 
3246 	/* Enable FPD4 Auto Recovery, Recovery loop active */
3247 	ret = ub960_write(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, 0x18, NULL);
3248 	if (ret)
3249 		return ret;
3250 
3251 	for_each_active_rxport_fpd4(priv, it) {
3252 		u8 final_aeq;
3253 
3254 		ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(it.nport),
3255 				     UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &final_aeq,
3256 				     NULL);
3257 		if (ret)
3258 			return ret;
3259 
3260 		dev_dbg(dev, "rx%u: final AEQ = %#x\n", it.nport, final_aeq);
3261 	}
3262 
3263 	/*
3264 	 * Clear any errors caused by switching the RX port settings while
3265 	 * probing.
3266 	 */
3267 
3268 	ret = ub960_clear_rx_errors(priv);
3269 	if (ret)
3270 		return ret;
3271 
3272 	return 0;
3273 }
3274 
ub960_rxport_handle_events(struct ub960_data * priv,u8 nport)3275 static int ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
3276 {
3277 	struct device *dev = &priv->client->dev;
3278 	u8 rx_port_sts1;
3279 	u8 rx_port_sts2;
3280 	u8 csi_rx_sts;
3281 	u8 bcc_sts;
3282 	int ret = 0;
3283 
3284 	/* Read interrupts (also clears most of them) */
3285 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &rx_port_sts1,
3286 			  &ret);
3287 	ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &rx_port_sts2,
3288 			  &ret);
3289 	ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts, &ret);
3290 	ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts, &ret);
3291 
3292 	if (ret)
3293 		return ret;
3294 
3295 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_PARITY_ERROR) {
3296 		u16 v;
3297 
3298 		ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
3299 					  &v, NULL);
3300 		if (!ret)
3301 			dev_err(dev, "rx%u parity errors: %u\n", nport, v);
3302 	}
3303 
3304 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR)
3305 		dev_err(dev, "rx%u BCC CRC error\n", nport);
3306 
3307 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR)
3308 		dev_err(dev, "rx%u BCC SEQ error\n", nport);
3309 
3310 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE)
3311 		dev_err(dev, "rx%u line length unstable\n", nport);
3312 
3313 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR)
3314 		dev_err(dev, "rx%u FPD3 encode error\n", nport);
3315 
3316 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_BUFFER_ERROR)
3317 		dev_err(dev, "rx%u buffer error\n", nport);
3318 
3319 	if (csi_rx_sts)
3320 		dev_err(dev, "rx%u CSI error: %#02x\n", nport, csi_rx_sts);
3321 
3322 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC1_ERR)
3323 		dev_err(dev, "rx%u CSI ECC1 error\n", nport);
3324 
3325 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC2_ERR)
3326 		dev_err(dev, "rx%u CSI ECC2 error\n", nport);
3327 
3328 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_CKSUM_ERR)
3329 		dev_err(dev, "rx%u CSI checksum error\n", nport);
3330 
3331 	if (csi_rx_sts & UB960_RR_CSI_RX_STS_LENGTH_ERR)
3332 		dev_err(dev, "rx%u CSI length error\n", nport);
3333 
3334 	if (bcc_sts)
3335 		dev_err(dev, "rx%u BCC error: %#02x\n", nport, bcc_sts);
3336 
3337 	if (bcc_sts & UB960_RR_BCC_STATUS_RESP_ERR)
3338 		dev_err(dev, "rx%u BCC response error", nport);
3339 
3340 	if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_TO)
3341 		dev_err(dev, "rx%u BCC slave timeout", nport);
3342 
3343 	if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_ERR)
3344 		dev_err(dev, "rx%u BCC slave error", nport);
3345 
3346 	if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_TO)
3347 		dev_err(dev, "rx%u BCC master timeout", nport);
3348 
3349 	if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_ERR)
3350 		dev_err(dev, "rx%u BCC master error", nport);
3351 
3352 	if (bcc_sts & UB960_RR_BCC_STATUS_SEQ_ERROR)
3353 		dev_err(dev, "rx%u BCC sequence error", nport);
3354 
3355 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_CHG) {
3356 		u16 v;
3357 
3358 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1,
3359 					  &v, NULL);
3360 		if (!ret)
3361 			dev_dbg(dev, "rx%u line len changed: %u\n", nport, v);
3362 	}
3363 
3364 	if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_CNT_CHG) {
3365 		u16 v;
3366 
3367 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
3368 					  &v, NULL);
3369 		if (!ret)
3370 			dev_dbg(dev, "rx%u line count changed: %u\n", nport, v);
3371 	}
3372 
3373 	if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS_CHG) {
3374 		dev_dbg(dev, "rx%u: %s, %s, %s, %s\n", nport,
3375 			(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS) ?
3376 				"locked" :
3377 				"unlocked",
3378 			(rx_port_sts1 & UB960_RR_RX_PORT_STS1_PORT_PASS) ?
3379 				"passed" :
3380 				"not passed",
3381 			(rx_port_sts2 & UB960_RR_RX_PORT_STS2_CABLE_FAULT) ?
3382 				"no clock" :
3383 				"clock ok",
3384 			(rx_port_sts2 & UB960_RR_RX_PORT_STS2_FREQ_STABLE) ?
3385 				"stable freq" :
3386 				"unstable freq");
3387 	}
3388 
3389 	return 0;
3390 }
3391 
3392 /* -----------------------------------------------------------------------------
3393  * V4L2
3394  */
3395 
3396 /*
3397  * The current implementation only supports a simple VC mapping, where all VCs
3398  * from a one RX port will be mapped to the same VC. Also, the hardware
3399  * dictates that all streams from an RX port must go to a single TX port.
3400  *
3401  * This function decides the target VC numbers for each RX port with a simple
3402  * algorithm, so that for each TX port, we get VC numbers starting from 0,
3403  * and counting up.
3404  *
3405  * E.g. if all four RX ports are in use, of which the first two go to the
3406  * first TX port and the secont two go to the second TX port, we would get
3407  * the following VCs for the four RX ports: 0, 1, 0, 1.
3408  *
3409  * TODO: implement a more sophisticated VC mapping. As the driver cannot know
3410  * what VCs the sinks expect (say, an FPGA with hardcoded VC routing), this
3411  * probably needs to be somehow configurable. Device tree?
3412  */
ub960_get_vc_maps(struct ub960_data * priv,struct v4l2_subdev_state * state,u8 * vc)3413 static void ub960_get_vc_maps(struct ub960_data *priv,
3414 			      struct v4l2_subdev_state *state, u8 *vc)
3415 {
3416 	u8 cur_vc[UB960_MAX_TX_NPORTS] = {};
3417 	struct v4l2_subdev_route *route;
3418 	u8 handled_mask = 0;
3419 
3420 	for_each_active_route(&state->routing, route) {
3421 		unsigned int rx, tx;
3422 
3423 		rx = ub960_pad_to_port(priv, route->sink_pad);
3424 		if (BIT(rx) & handled_mask)
3425 			continue;
3426 
3427 		tx = ub960_pad_to_port(priv, route->source_pad);
3428 
3429 		vc[rx] = cur_vc[tx]++;
3430 		handled_mask |= BIT(rx);
3431 	}
3432 }
3433 
ub960_enable_tx_port(struct ub960_data * priv,unsigned int nport)3434 static int ub960_enable_tx_port(struct ub960_data *priv, unsigned int nport)
3435 {
3436 	struct device *dev = &priv->client->dev;
3437 
3438 	dev_dbg(dev, "enable TX port %u\n", nport);
3439 
3440 	return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
3441 					UB960_TR_CSI_CTL_CSI_ENABLE,
3442 					UB960_TR_CSI_CTL_CSI_ENABLE, NULL);
3443 }
3444 
ub960_disable_tx_port(struct ub960_data * priv,unsigned int nport)3445 static int ub960_disable_tx_port(struct ub960_data *priv, unsigned int nport)
3446 {
3447 	struct device *dev = &priv->client->dev;
3448 
3449 	dev_dbg(dev, "disable TX port %u\n", nport);
3450 
3451 	return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
3452 					UB960_TR_CSI_CTL_CSI_ENABLE, 0, NULL);
3453 }
3454 
ub960_enable_rx_port(struct ub960_data * priv,unsigned int nport)3455 static int ub960_enable_rx_port(struct ub960_data *priv, unsigned int nport)
3456 {
3457 	struct device *dev = &priv->client->dev;
3458 
3459 	dev_dbg(dev, "enable RX port %u\n", nport);
3460 
3461 	/* Enable forwarding */
3462 	return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
3463 				 UB960_SR_FWD_CTL1_PORT_DIS(nport), 0, NULL);
3464 }
3465 
ub960_disable_rx_port(struct ub960_data * priv,unsigned int nport)3466 static int ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
3467 {
3468 	struct device *dev = &priv->client->dev;
3469 
3470 	dev_dbg(dev, "disable RX port %u\n", nport);
3471 
3472 	/* Disable forwarding */
3473 	return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
3474 				 UB960_SR_FWD_CTL1_PORT_DIS(nport),
3475 				 UB960_SR_FWD_CTL1_PORT_DIS(nport), NULL);
3476 }
3477 
3478 /*
3479  * The driver only supports using a single VC for each source. This function
3480  * checks that each source only provides streams using a single VC.
3481  */
ub960_validate_stream_vcs(struct ub960_data * priv)3482 static int ub960_validate_stream_vcs(struct ub960_data *priv)
3483 {
3484 	for_each_active_rxport(priv, it) {
3485 		struct v4l2_mbus_frame_desc desc;
3486 		int ret;
3487 		u8 vc;
3488 
3489 		ret = v4l2_subdev_call(it.rxport->source.sd, pad,
3490 				       get_frame_desc, it.rxport->source.pad,
3491 				       &desc);
3492 		if (ret)
3493 			return ret;
3494 
3495 		if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
3496 			continue;
3497 
3498 		if (desc.num_entries == 0)
3499 			continue;
3500 
3501 		vc = desc.entry[0].bus.csi2.vc;
3502 
3503 		for (unsigned int i = 1; i < desc.num_entries; i++) {
3504 			if (vc == desc.entry[i].bus.csi2.vc)
3505 				continue;
3506 
3507 			dev_err(&priv->client->dev,
3508 				"rx%u: source with multiple virtual-channels is not supported\n",
3509 				it.nport);
3510 			return -ENODEV;
3511 		}
3512 	}
3513 
3514 	return 0;
3515 }
3516 
ub960_configure_ports_for_streaming(struct ub960_data * priv,struct v4l2_subdev_state * state)3517 static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
3518 					       struct v4l2_subdev_state *state)
3519 {
3520 	u8 fwd_ctl;
3521 	struct {
3522 		u32 num_streams;
3523 		u8 pixel_dt;
3524 		u8 meta_dt;
3525 		u32 meta_lines;
3526 		u32 tx_port;
3527 	} rx_data[UB960_MAX_RX_NPORTS] = {};
3528 	u8 vc_map[UB960_MAX_RX_NPORTS] = {};
3529 	struct v4l2_subdev_route *route;
3530 	int ret;
3531 
3532 	ret = ub960_validate_stream_vcs(priv);
3533 	if (ret)
3534 		return ret;
3535 
3536 	ub960_get_vc_maps(priv, state, vc_map);
3537 
3538 	for_each_active_route(&state->routing, route) {
3539 		struct ub960_rxport *rxport;
3540 		struct ub960_txport *txport;
3541 		struct v4l2_mbus_framefmt *fmt;
3542 		const struct ub960_format_info *ub960_fmt;
3543 		unsigned int nport;
3544 
3545 		nport = ub960_pad_to_port(priv, route->sink_pad);
3546 
3547 		rxport = priv->rxports[nport];
3548 		if (!rxport)
3549 			return -EINVAL;
3550 
3551 		txport = priv->txports[ub960_pad_to_port(priv, route->source_pad)];
3552 		if (!txport)
3553 			return -EINVAL;
3554 
3555 		rx_data[nport].tx_port = ub960_pad_to_port(priv, route->source_pad);
3556 
3557 		rx_data[nport].num_streams++;
3558 
3559 		/* For the rest, we are only interested in parallel busses */
3560 		if (rxport->rx_mode == RXPORT_MODE_CSI2_SYNC ||
3561 		    rxport->rx_mode == RXPORT_MODE_CSI2_NONSYNC)
3562 			continue;
3563 
3564 		if (rx_data[nport].num_streams > 2)
3565 			return -EPIPE;
3566 
3567 		fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
3568 						   route->sink_stream);
3569 		if (!fmt)
3570 			return -EPIPE;
3571 
3572 		ub960_fmt = ub960_find_format(fmt->code);
3573 		if (!ub960_fmt)
3574 			return -EPIPE;
3575 
3576 		if (ub960_fmt->meta) {
3577 			if (fmt->height > 3) {
3578 				dev_err(&priv->client->dev,
3579 					"rx%u: unsupported metadata height %u\n",
3580 					nport, fmt->height);
3581 				return -EPIPE;
3582 			}
3583 
3584 			rx_data[nport].meta_dt = ub960_fmt->datatype;
3585 			rx_data[nport].meta_lines = fmt->height;
3586 		} else {
3587 			rx_data[nport].pixel_dt = ub960_fmt->datatype;
3588 		}
3589 	}
3590 
3591 	/* Configure RX ports */
3592 
3593 	/*
3594 	 * Keep all port forwardings disabled by default. Forwarding will be
3595 	 * enabled in ub960_enable_rx_port.
3596 	 */
3597 	fwd_ctl = GENMASK(7, 4);
3598 
3599 	for_each_active_rxport(priv, it) {
3600 		unsigned long nport = it.nport;
3601 
3602 		u8 vc = vc_map[nport];
3603 
3604 		if (rx_data[nport].num_streams == 0)
3605 			continue;
3606 
3607 		switch (it.rxport->rx_mode) {
3608 		case RXPORT_MODE_RAW10:
3609 			ub960_rxport_write(priv, nport, UB960_RR_RAW10_ID,
3610 				rx_data[nport].pixel_dt | (vc << UB960_RR_RAW10_ID_VC_SHIFT),
3611 				&ret);
3612 
3613 			ub960_rxport_write(priv, nport,
3614 				UB960_RR_RAW_EMBED_DTYPE,
3615 				(rx_data[nport].meta_lines << UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT) |
3616 					rx_data[nport].meta_dt, &ret);
3617 
3618 			break;
3619 
3620 		case RXPORT_MODE_RAW12_HF:
3621 		case RXPORT_MODE_RAW12_LF:
3622 			/* Not implemented */
3623 			break;
3624 
3625 		case RXPORT_MODE_CSI2_SYNC:
3626 		case RXPORT_MODE_CSI2_NONSYNC:
3627 			if (!priv->hw_data->is_ub9702) {
3628 				/* Map all VCs from this port to the same VC */
3629 				ub960_rxport_write(priv, nport, UB960_RR_CSI_VC_MAP,
3630 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(3)) |
3631 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(2)) |
3632 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(1)) |
3633 						   (vc << UB960_RR_CSI_VC_MAP_SHIFT(0)),
3634 						   &ret);
3635 			} else {
3636 				unsigned int i;
3637 
3638 				/* Map all VCs from this port to VC(nport) */
3639 				for (i = 0; i < 8; i++)
3640 					ub960_rxport_write(priv, nport,
3641 							   UB9702_RR_VC_ID_MAP(i),
3642 							   (nport << 4) | nport,
3643 							   &ret);
3644 			}
3645 
3646 			break;
3647 		}
3648 
3649 		if (rx_data[nport].tx_port == 1)
3650 			fwd_ctl |= BIT(nport); /* forward to TX1 */
3651 		else
3652 			fwd_ctl &= ~BIT(nport); /* forward to TX0 */
3653 	}
3654 
3655 	ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl, &ret);
3656 
3657 	return ret;
3658 }
3659 
ub960_update_streaming_status(struct ub960_data * priv)3660 static void ub960_update_streaming_status(struct ub960_data *priv)
3661 {
3662 	unsigned int i;
3663 
3664 	for (i = 0; i < UB960_MAX_NPORTS; i++) {
3665 		if (priv->stream_enable_mask[i])
3666 			break;
3667 	}
3668 
3669 	priv->streaming = i < UB960_MAX_NPORTS;
3670 }
3671 
ub960_enable_streams(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,u32 source_pad,u64 source_streams_mask)3672 static int ub960_enable_streams(struct v4l2_subdev *sd,
3673 				struct v4l2_subdev_state *state, u32 source_pad,
3674 				u64 source_streams_mask)
3675 {
3676 	struct ub960_data *priv = sd_to_ub960(sd);
3677 	struct device *dev = &priv->client->dev;
3678 	u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
3679 	struct v4l2_subdev_route *route;
3680 	unsigned int failed_port;
3681 	int ret;
3682 
3683 	if (!priv->streaming) {
3684 		dev_dbg(dev, "Prepare for streaming\n");
3685 		ret = ub960_configure_ports_for_streaming(priv, state);
3686 		if (ret)
3687 			return ret;
3688 	}
3689 
3690 	/* Enable TX port if not yet enabled */
3691 	if (!priv->stream_enable_mask[source_pad]) {
3692 		ret = ub960_enable_tx_port(priv,
3693 					   ub960_pad_to_port(priv, source_pad));
3694 		if (ret)
3695 			return ret;
3696 	}
3697 
3698 	priv->stream_enable_mask[source_pad] |= source_streams_mask;
3699 
3700 	/* Collect sink streams per pad which we need to enable */
3701 	for_each_active_route(&state->routing, route) {
3702 		unsigned int nport;
3703 
3704 		if (route->source_pad != source_pad)
3705 			continue;
3706 
3707 		if (!(source_streams_mask & BIT_ULL(route->source_stream)))
3708 			continue;
3709 
3710 		nport = ub960_pad_to_port(priv, route->sink_pad);
3711 
3712 		sink_streams[nport] |= BIT_ULL(route->sink_stream);
3713 	}
3714 
3715 	for_each_rxport(priv, it) {
3716 		unsigned int nport = it.nport;
3717 
3718 		if (!sink_streams[nport])
3719 			continue;
3720 
3721 		/* Enable the RX port if not yet enabled */
3722 		if (!priv->stream_enable_mask[nport]) {
3723 			ret = ub960_enable_rx_port(priv, nport);
3724 			if (ret) {
3725 				failed_port = nport;
3726 				goto err;
3727 			}
3728 		}
3729 
3730 		priv->stream_enable_mask[nport] |= sink_streams[nport];
3731 
3732 		dev_dbg(dev, "enable RX port %u streams %#llx\n", nport,
3733 			sink_streams[nport]);
3734 
3735 		ret = v4l2_subdev_enable_streams(
3736 			priv->rxports[nport]->source.sd,
3737 			priv->rxports[nport]->source.pad,
3738 			sink_streams[nport]);
3739 		if (ret) {
3740 			priv->stream_enable_mask[nport] &= ~sink_streams[nport];
3741 
3742 			if (!priv->stream_enable_mask[nport])
3743 				ub960_disable_rx_port(priv, nport);
3744 
3745 			failed_port = nport;
3746 			goto err;
3747 		}
3748 	}
3749 
3750 	priv->streaming = true;
3751 
3752 	return 0;
3753 
3754 err:
3755 	for (unsigned int nport = 0; nport < failed_port; nport++) {
3756 		if (!sink_streams[nport])
3757 			continue;
3758 
3759 		dev_dbg(dev, "disable RX port %u streams %#llx\n", nport,
3760 			sink_streams[nport]);
3761 
3762 		ret = v4l2_subdev_disable_streams(
3763 			priv->rxports[nport]->source.sd,
3764 			priv->rxports[nport]->source.pad,
3765 			sink_streams[nport]);
3766 		if (ret)
3767 			dev_err(dev, "Failed to disable streams: %d\n", ret);
3768 
3769 		priv->stream_enable_mask[nport] &= ~sink_streams[nport];
3770 
3771 		/* Disable RX port if no active streams */
3772 		if (!priv->stream_enable_mask[nport])
3773 			ub960_disable_rx_port(priv, nport);
3774 	}
3775 
3776 	priv->stream_enable_mask[source_pad] &= ~source_streams_mask;
3777 
3778 	if (!priv->stream_enable_mask[source_pad])
3779 		ub960_disable_tx_port(priv,
3780 				      ub960_pad_to_port(priv, source_pad));
3781 
3782 	ub960_update_streaming_status(priv);
3783 
3784 	return ret;
3785 }
3786 
ub960_disable_streams(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,u32 source_pad,u64 source_streams_mask)3787 static int ub960_disable_streams(struct v4l2_subdev *sd,
3788 				 struct v4l2_subdev_state *state,
3789 				 u32 source_pad, u64 source_streams_mask)
3790 {
3791 	struct ub960_data *priv = sd_to_ub960(sd);
3792 	struct device *dev = &priv->client->dev;
3793 	u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
3794 	struct v4l2_subdev_route *route;
3795 	int ret;
3796 
3797 	/* Collect sink streams per pad which we need to disable */
3798 	for_each_active_route(&state->routing, route) {
3799 		unsigned int nport;
3800 
3801 		if (route->source_pad != source_pad)
3802 			continue;
3803 
3804 		if (!(source_streams_mask & BIT_ULL(route->source_stream)))
3805 			continue;
3806 
3807 		nport = ub960_pad_to_port(priv, route->sink_pad);
3808 
3809 		sink_streams[nport] |= BIT_ULL(route->sink_stream);
3810 	}
3811 
3812 	for_each_rxport(priv, it) {
3813 		unsigned int nport = it.nport;
3814 
3815 		if (!sink_streams[nport])
3816 			continue;
3817 
3818 		dev_dbg(dev, "disable RX port %u streams %#llx\n", nport,
3819 			sink_streams[nport]);
3820 
3821 		ret = v4l2_subdev_disable_streams(
3822 			priv->rxports[nport]->source.sd,
3823 			priv->rxports[nport]->source.pad,
3824 			sink_streams[nport]);
3825 		if (ret)
3826 			dev_err(dev, "Failed to disable streams: %d\n", ret);
3827 
3828 		priv->stream_enable_mask[nport] &= ~sink_streams[nport];
3829 
3830 		/* Disable RX port if no active streams */
3831 		if (!priv->stream_enable_mask[nport])
3832 			ub960_disable_rx_port(priv, nport);
3833 	}
3834 
3835 	/* Disable TX port if no active streams */
3836 
3837 	priv->stream_enable_mask[source_pad] &= ~source_streams_mask;
3838 
3839 	if (!priv->stream_enable_mask[source_pad])
3840 		ub960_disable_tx_port(priv,
3841 				      ub960_pad_to_port(priv, source_pad));
3842 
3843 	ub960_update_streaming_status(priv);
3844 
3845 	return 0;
3846 }
3847 
_ub960_set_routing(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_krouting * routing)3848 static int _ub960_set_routing(struct v4l2_subdev *sd,
3849 			      struct v4l2_subdev_state *state,
3850 			      struct v4l2_subdev_krouting *routing)
3851 {
3852 	static const struct v4l2_mbus_framefmt format = {
3853 		.width = 640,
3854 		.height = 480,
3855 		.code = MEDIA_BUS_FMT_UYVY8_1X16,
3856 		.field = V4L2_FIELD_NONE,
3857 		.colorspace = V4L2_COLORSPACE_SRGB,
3858 		.ycbcr_enc = V4L2_YCBCR_ENC_601,
3859 		.quantization = V4L2_QUANTIZATION_LIM_RANGE,
3860 		.xfer_func = V4L2_XFER_FUNC_SRGB,
3861 	};
3862 	int ret;
3863 
3864 	/*
3865 	 * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
3866 	 * frame desc is made dynamically allocated.
3867 	 */
3868 
3869 	if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
3870 		return -E2BIG;
3871 
3872 	ret = v4l2_subdev_routing_validate(sd, routing,
3873 					   V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
3874 					   V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX);
3875 	if (ret)
3876 		return ret;
3877 
3878 	ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
3879 	if (ret)
3880 		return ret;
3881 
3882 	return 0;
3883 }
3884 
ub960_set_routing(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,enum v4l2_subdev_format_whence which,struct v4l2_subdev_krouting * routing)3885 static int ub960_set_routing(struct v4l2_subdev *sd,
3886 			     struct v4l2_subdev_state *state,
3887 			     enum v4l2_subdev_format_whence which,
3888 			     struct v4l2_subdev_krouting *routing)
3889 {
3890 	struct ub960_data *priv = sd_to_ub960(sd);
3891 
3892 	if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
3893 		return -EBUSY;
3894 
3895 	return _ub960_set_routing(sd, state, routing);
3896 }
3897 
ub960_get_frame_desc(struct v4l2_subdev * sd,unsigned int pad,struct v4l2_mbus_frame_desc * fd)3898 static int ub960_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
3899 				struct v4l2_mbus_frame_desc *fd)
3900 {
3901 	struct ub960_data *priv = sd_to_ub960(sd);
3902 	struct v4l2_subdev_route *route;
3903 	struct v4l2_subdev_state *state;
3904 	int ret = 0;
3905 	struct device *dev = &priv->client->dev;
3906 	u8 vc_map[UB960_MAX_RX_NPORTS] = {};
3907 
3908 	if (!ub960_pad_is_source(priv, pad))
3909 		return -EINVAL;
3910 
3911 	fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
3912 
3913 	state = v4l2_subdev_lock_and_get_active_state(&priv->sd);
3914 
3915 	ub960_get_vc_maps(priv, state, vc_map);
3916 
3917 	for_each_active_route(&state->routing, route) {
3918 		struct v4l2_mbus_frame_desc_entry *source_entry = NULL;
3919 		struct v4l2_mbus_frame_desc source_fd;
3920 		unsigned int nport;
3921 		unsigned int i;
3922 
3923 		if (route->source_pad != pad)
3924 			continue;
3925 
3926 		nport = ub960_pad_to_port(priv, route->sink_pad);
3927 
3928 		ret = v4l2_subdev_call(priv->rxports[nport]->source.sd, pad,
3929 				       get_frame_desc,
3930 				       priv->rxports[nport]->source.pad,
3931 				       &source_fd);
3932 		if (ret) {
3933 			dev_err(dev,
3934 				"Failed to get source frame desc for pad %u\n",
3935 				route->sink_pad);
3936 			goto out_unlock;
3937 		}
3938 
3939 		for (i = 0; i < source_fd.num_entries; i++) {
3940 			if (source_fd.entry[i].stream == route->sink_stream) {
3941 				source_entry = &source_fd.entry[i];
3942 				break;
3943 			}
3944 		}
3945 
3946 		if (!source_entry) {
3947 			dev_err(dev,
3948 				"Failed to find stream from source frame desc\n");
3949 			ret = -EPIPE;
3950 			goto out_unlock;
3951 		}
3952 
3953 		fd->entry[fd->num_entries].stream = route->source_stream;
3954 		fd->entry[fd->num_entries].flags = source_entry->flags;
3955 		fd->entry[fd->num_entries].length = source_entry->length;
3956 		fd->entry[fd->num_entries].pixelcode = source_entry->pixelcode;
3957 
3958 		fd->entry[fd->num_entries].bus.csi2.vc = vc_map[nport];
3959 
3960 		if (source_fd.type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
3961 			fd->entry[fd->num_entries].bus.csi2.dt =
3962 				source_entry->bus.csi2.dt;
3963 		} else {
3964 			const struct ub960_format_info *ub960_fmt;
3965 			struct v4l2_mbus_framefmt *fmt;
3966 
3967 			fmt = v4l2_subdev_state_get_format(state, pad,
3968 							   route->source_stream);
3969 
3970 			if (!fmt) {
3971 				ret = -EINVAL;
3972 				goto out_unlock;
3973 			}
3974 
3975 			ub960_fmt = ub960_find_format(fmt->code);
3976 			if (!ub960_fmt) {
3977 				dev_err(dev, "Unable to find format\n");
3978 				ret = -EINVAL;
3979 				goto out_unlock;
3980 			}
3981 
3982 			fd->entry[fd->num_entries].bus.csi2.dt =
3983 				ub960_fmt->datatype;
3984 		}
3985 
3986 		fd->num_entries++;
3987 	}
3988 
3989 out_unlock:
3990 	v4l2_subdev_unlock_state(state);
3991 
3992 	return ret;
3993 }
3994 
ub960_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)3995 static int ub960_set_fmt(struct v4l2_subdev *sd,
3996 			 struct v4l2_subdev_state *state,
3997 			 struct v4l2_subdev_format *format)
3998 {
3999 	struct ub960_data *priv = sd_to_ub960(sd);
4000 	struct v4l2_mbus_framefmt *fmt;
4001 
4002 	if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
4003 		return -EBUSY;
4004 
4005 	/* No transcoding, source and sink formats must match. */
4006 	if (ub960_pad_is_source(priv, format->pad))
4007 		return v4l2_subdev_get_fmt(sd, state, format);
4008 
4009 	/*
4010 	 * Default to the first format if the requested media bus code isn't
4011 	 * supported.
4012 	 */
4013 	if (!ub960_find_format(format->format.code))
4014 		format->format.code = ub960_formats[0].code;
4015 
4016 	fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
4017 	if (!fmt)
4018 		return -EINVAL;
4019 
4020 	*fmt = format->format;
4021 
4022 	fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
4023 							   format->stream);
4024 	if (!fmt)
4025 		return -EINVAL;
4026 
4027 	*fmt = format->format;
4028 
4029 	return 0;
4030 }
4031 
ub960_init_state(struct v4l2_subdev * sd,struct v4l2_subdev_state * state)4032 static int ub960_init_state(struct v4l2_subdev *sd,
4033 			    struct v4l2_subdev_state *state)
4034 {
4035 	struct ub960_data *priv = sd_to_ub960(sd);
4036 
4037 	struct v4l2_subdev_route routes[] = {
4038 		{
4039 			.sink_pad = 0,
4040 			.sink_stream = 0,
4041 			.source_pad = priv->hw_data->num_rxports,
4042 			.source_stream = 0,
4043 			.flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
4044 		},
4045 	};
4046 
4047 	struct v4l2_subdev_krouting routing = {
4048 		.num_routes = ARRAY_SIZE(routes),
4049 		.routes = routes,
4050 	};
4051 
4052 	return _ub960_set_routing(sd, state, &routing);
4053 }
4054 
4055 static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
4056 	.enable_streams = ub960_enable_streams,
4057 	.disable_streams = ub960_disable_streams,
4058 
4059 	.set_routing = ub960_set_routing,
4060 	.get_frame_desc = ub960_get_frame_desc,
4061 
4062 	.get_fmt = v4l2_subdev_get_fmt,
4063 	.set_fmt = ub960_set_fmt,
4064 };
4065 
ub960_log_status_ub960_sp_eq(struct ub960_data * priv,unsigned int nport)4066 static int ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
4067 					unsigned int nport)
4068 {
4069 	struct device *dev = &priv->client->dev;
4070 	u8 eq_level;
4071 	s8 strobe_pos;
4072 	int ret;
4073 	u8 v;
4074 
4075 	/* Strobe */
4076 
4077 	ret = ub960_read(priv, UB960_XR_AEQ_CTL1, &v, NULL);
4078 	if (ret)
4079 		return ret;
4080 
4081 	dev_info(dev, "\t%s strobe\n",
4082 		 (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
4083 							  "Manual");
4084 
4085 	if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
4086 		ret = ub960_read(priv, UB960_XR_SFILTER_CFG, &v, NULL);
4087 		if (ret)
4088 			return ret;
4089 
4090 		dev_info(dev, "\tStrobe range [%d, %d]\n",
4091 			 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
4092 			 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
4093 	}
4094 
4095 	ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
4096 	if (ret)
4097 		return ret;
4098 
4099 	dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
4100 
4101 	/* EQ */
4102 
4103 	ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v, NULL);
4104 	if (ret)
4105 		return ret;
4106 
4107 	dev_info(dev, "\t%s EQ\n",
4108 		 (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
4109 						    "Adaptive");
4110 
4111 	if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
4112 		ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v,
4113 					NULL);
4114 		if (ret)
4115 			return ret;
4116 
4117 		dev_info(dev, "\tEQ range [%u, %u]\n",
4118 			 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
4119 			 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
4120 	}
4121 
4122 	ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
4123 	if (ret)
4124 		return ret;
4125 
4126 	dev_info(dev, "\tEQ level %u\n", eq_level);
4127 
4128 	return 0;
4129 }
4130 
ub960_log_status(struct v4l2_subdev * sd)4131 static int ub960_log_status(struct v4l2_subdev *sd)
4132 {
4133 	struct ub960_data *priv = sd_to_ub960(sd);
4134 	struct device *dev = &priv->client->dev;
4135 	struct v4l2_subdev_state *state;
4136 	u16 v16 = 0;
4137 	u8 v = 0;
4138 	u8 id[UB960_SR_FPD3_RX_ID_LEN];
4139 	int ret = 0;
4140 
4141 	state = v4l2_subdev_lock_and_get_active_state(sd);
4142 
4143 	for (unsigned int i = 0; i < sizeof(id); i++) {
4144 		ret = ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i], NULL);
4145 		if (ret)
4146 			return ret;
4147 	}
4148 
4149 	dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
4150 
4151 	for (unsigned int nport = 0; nport < priv->hw_data->num_txports;
4152 	     nport++) {
4153 		struct ub960_txport *txport = priv->txports[nport];
4154 
4155 		dev_info(dev, "TX %u\n", nport);
4156 
4157 		if (!txport) {
4158 			dev_info(dev, "\tNot initialized\n");
4159 			continue;
4160 		}
4161 
4162 		ret = ub960_txport_read(priv, nport, UB960_TR_CSI_STS, &v, NULL);
4163 		if (ret)
4164 			return ret;
4165 
4166 		dev_info(dev, "\tsync %u, pass %u\n", v & (u8)BIT(1),
4167 			 v & (u8)BIT(0));
4168 
4169 		ret = ub960_read16(priv, UB960_SR_CSI_FRAME_COUNT_HI(nport),
4170 				   &v16, NULL);
4171 		if (ret)
4172 			return ret;
4173 
4174 		dev_info(dev, "\tframe counter %u\n", v16);
4175 
4176 		ret = ub960_read16(priv, UB960_SR_CSI_FRAME_ERR_COUNT_HI(nport),
4177 				   &v16, NULL);
4178 		if (ret)
4179 			return ret;
4180 
4181 		dev_info(dev, "\tframe error counter %u\n", v16);
4182 
4183 		ret = ub960_read16(priv, UB960_SR_CSI_LINE_COUNT_HI(nport),
4184 				   &v16, NULL);
4185 		if (ret)
4186 			return ret;
4187 
4188 		dev_info(dev, "\tline counter %u\n", v16);
4189 
4190 		ret = ub960_read16(priv, UB960_SR_CSI_LINE_ERR_COUNT_HI(nport),
4191 				   &v16, NULL);
4192 		if (ret)
4193 			return ret;
4194 
4195 		dev_info(dev, "\tline error counter %u\n", v16);
4196 	}
4197 
4198 	for_each_rxport(priv, it) {
4199 		unsigned int nport = it.nport;
4200 
4201 		dev_info(dev, "RX %u\n", nport);
4202 
4203 		if (!it.rxport) {
4204 			dev_info(dev, "\tNot initialized\n");
4205 			continue;
4206 		}
4207 
4208 		ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v,
4209 					NULL);
4210 		if (ret)
4211 			return ret;
4212 
4213 		if (v & UB960_RR_RX_PORT_STS1_LOCK_STS)
4214 			dev_info(dev, "\tLocked\n");
4215 		else
4216 			dev_info(dev, "\tNot locked\n");
4217 
4218 		dev_info(dev, "\trx_port_sts1 %#02x\n", v);
4219 		ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v,
4220 					NULL);
4221 		if (ret)
4222 			return ret;
4223 
4224 		dev_info(dev, "\trx_port_sts2 %#02x\n", v);
4225 
4226 		ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH,
4227 					  &v16, NULL);
4228 		if (ret)
4229 			return ret;
4230 
4231 		dev_info(dev, "\tlink freq %llu Hz\n", ((u64)v16 * HZ_PER_MHZ) >> 8);
4232 
4233 		ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
4234 					  &v16, NULL);
4235 		if (ret)
4236 			return ret;
4237 
4238 		dev_info(dev, "\tparity errors %u\n", v16);
4239 
4240 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
4241 					  &v16, NULL);
4242 		if (ret)
4243 			return ret;
4244 
4245 		dev_info(dev, "\tlines per frame %u\n", v16);
4246 
4247 		ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1,
4248 					  &v16, NULL);
4249 		if (ret)
4250 			return ret;
4251 
4252 		dev_info(dev, "\tbytes per line %u\n", v16);
4253 
4254 		ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
4255 					&v, NULL);
4256 		if (ret)
4257 			return ret;
4258 
4259 		dev_info(dev, "\tcsi_err_counter %u\n", v);
4260 
4261 		if (!priv->hw_data->is_ub9702) {
4262 			ret = ub960_log_status_ub960_sp_eq(priv, nport);
4263 			if (ret)
4264 				return ret;
4265 		}
4266 
4267 		/* GPIOs */
4268 		for (unsigned int i = 0; i < UB960_NUM_BC_GPIOS; i++) {
4269 			u8 ctl_reg;
4270 			u8 ctl_shift;
4271 
4272 			ctl_reg = UB960_RR_BC_GPIO_CTL(i / 2);
4273 			ctl_shift = (i % 2) * 4;
4274 
4275 			ret = ub960_rxport_read(priv, nport, ctl_reg, &v, NULL);
4276 			if (ret)
4277 				return ret;
4278 
4279 			dev_info(dev, "\tGPIO%u: mode %u\n", i,
4280 				 (v >> ctl_shift) & 0xf);
4281 		}
4282 	}
4283 
4284 	v4l2_subdev_unlock_state(state);
4285 
4286 	return 0;
4287 }
4288 
4289 static const struct v4l2_subdev_core_ops ub960_subdev_core_ops = {
4290 	.log_status = ub960_log_status,
4291 };
4292 
4293 static const struct v4l2_subdev_internal_ops ub960_internal_ops = {
4294 	.init_state = ub960_init_state,
4295 };
4296 
4297 static const struct v4l2_subdev_ops ub960_subdev_ops = {
4298 	.core = &ub960_subdev_core_ops,
4299 	.pad = &ub960_pad_ops,
4300 };
4301 
4302 static const struct media_entity_operations ub960_entity_ops = {
4303 	.get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
4304 	.link_validate = v4l2_subdev_link_validate,
4305 	.has_pad_interdep = v4l2_subdev_has_pad_interdep,
4306 };
4307 
4308 /* -----------------------------------------------------------------------------
4309  * Core
4310  */
4311 
ub960_handle_events(int irq,void * arg)4312 static irqreturn_t ub960_handle_events(int irq, void *arg)
4313 {
4314 	struct ub960_data *priv = arg;
4315 	u8 int_sts;
4316 	u8 fwd_sts;
4317 	int ret;
4318 
4319 	ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts, NULL);
4320 	if (ret || !int_sts)
4321 		return IRQ_NONE;
4322 
4323 	dev_dbg(&priv->client->dev, "INTERRUPT_STS %x\n", int_sts);
4324 
4325 	ret = ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts, NULL);
4326 	if (ret)
4327 		return IRQ_NONE;
4328 
4329 	dev_dbg(&priv->client->dev, "FWD_STS %#02x\n", fwd_sts);
4330 
4331 	for (unsigned int i = 0; i < priv->hw_data->num_txports; i++) {
4332 		if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i)) {
4333 			ret = ub960_csi_handle_events(priv, i);
4334 			if (ret)
4335 				return IRQ_NONE;
4336 		}
4337 	}
4338 
4339 	for_each_active_rxport(priv, it) {
4340 		if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(it.nport)) {
4341 			ret = ub960_rxport_handle_events(priv, it.nport);
4342 			if (ret)
4343 				return IRQ_NONE;
4344 		}
4345 	}
4346 
4347 	return IRQ_HANDLED;
4348 }
4349 
ub960_handler_work(struct work_struct * work)4350 static void ub960_handler_work(struct work_struct *work)
4351 {
4352 	struct delayed_work *dwork = to_delayed_work(work);
4353 	struct ub960_data *priv =
4354 		container_of(dwork, struct ub960_data, poll_work);
4355 
4356 	ub960_handle_events(0, priv);
4357 
4358 	schedule_delayed_work(&priv->poll_work,
4359 			      msecs_to_jiffies(UB960_POLL_TIME_MS));
4360 }
4361 
ub960_txport_free_ports(struct ub960_data * priv)4362 static void ub960_txport_free_ports(struct ub960_data *priv)
4363 {
4364 	unsigned int nport;
4365 
4366 	for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
4367 		struct ub960_txport *txport = priv->txports[nport];
4368 
4369 		if (!txport)
4370 			continue;
4371 
4372 		kfree(txport);
4373 		priv->txports[nport] = NULL;
4374 	}
4375 }
4376 
ub960_rxport_free_ports(struct ub960_data * priv)4377 static void ub960_rxport_free_ports(struct ub960_data *priv)
4378 {
4379 	for_each_active_rxport(priv, it) {
4380 		fwnode_handle_put(it.rxport->source.ep_fwnode);
4381 		fwnode_handle_put(it.rxport->ser.fwnode);
4382 
4383 		mutex_destroy(&it.rxport->aliased_addrs_lock);
4384 
4385 		kfree(it.rxport);
4386 		priv->rxports[it.nport] = NULL;
4387 	}
4388 }
4389 
4390 static int
ub960_parse_dt_rxport_link_properties(struct ub960_data * priv,struct fwnode_handle * link_fwnode,struct ub960_rxport * rxport)4391 ub960_parse_dt_rxport_link_properties(struct ub960_data *priv,
4392 				      struct fwnode_handle *link_fwnode,
4393 				      struct ub960_rxport *rxport)
4394 {
4395 	struct device *dev = &priv->client->dev;
4396 	unsigned int nport = rxport->nport;
4397 	u32 rx_mode;
4398 	u32 cdr_mode;
4399 	s32 strobe_pos;
4400 	u32 eq_level;
4401 	u32 ser_i2c_alias;
4402 	u32 ser_i2c_addr;
4403 	int ret;
4404 
4405 	cdr_mode = RXPORT_CDR_FPD3;
4406 
4407 	ret = fwnode_property_read_u32(link_fwnode, "ti,cdr-mode", &cdr_mode);
4408 	if (ret < 0 && ret != -EINVAL) {
4409 		dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4410 			"ti,cdr-mode", ret);
4411 		return ret;
4412 	}
4413 
4414 	if (cdr_mode > RXPORT_CDR_LAST) {
4415 		dev_err(dev, "rx%u: bad 'ti,cdr-mode' %u\n", nport, cdr_mode);
4416 		return -EINVAL;
4417 	}
4418 
4419 	if (!priv->hw_data->is_fpdlink4 && cdr_mode == RXPORT_CDR_FPD4) {
4420 		dev_err(dev, "rx%u: FPD-Link 4 CDR not supported\n", nport);
4421 		return -EINVAL;
4422 	}
4423 
4424 	rxport->cdr_mode = cdr_mode;
4425 
4426 	ret = fwnode_property_read_u32(link_fwnode, "ti,rx-mode", &rx_mode);
4427 	if (ret < 0) {
4428 		dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4429 			"ti,rx-mode", ret);
4430 		return ret;
4431 	}
4432 
4433 	if (rx_mode > RXPORT_MODE_LAST) {
4434 		dev_err(dev, "rx%u: bad 'ti,rx-mode' %u\n", nport, rx_mode);
4435 		return -EINVAL;
4436 	}
4437 
4438 	switch (rx_mode) {
4439 	case RXPORT_MODE_RAW12_HF:
4440 	case RXPORT_MODE_RAW12_LF:
4441 		dev_err(dev, "rx%u: unsupported 'ti,rx-mode' %u\n", nport,
4442 			rx_mode);
4443 		return -EINVAL;
4444 	default:
4445 		break;
4446 	}
4447 
4448 	rxport->rx_mode = rx_mode;
4449 
4450 	/* EQ & Strobe related */
4451 
4452 	/* Defaults */
4453 	rxport->eq.manual_eq = false;
4454 	rxport->eq.aeq.eq_level_min = UB960_MIN_EQ_LEVEL;
4455 	rxport->eq.aeq.eq_level_max = UB960_MAX_EQ_LEVEL;
4456 
4457 	ret = fwnode_property_read_u32(link_fwnode, "ti,strobe-pos",
4458 				       &strobe_pos);
4459 	if (ret) {
4460 		if (ret != -EINVAL) {
4461 			dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4462 				"ti,strobe-pos", ret);
4463 			return ret;
4464 		}
4465 	} else {
4466 		if (strobe_pos < UB960_MIN_MANUAL_STROBE_POS ||
4467 		    strobe_pos > UB960_MAX_MANUAL_STROBE_POS) {
4468 			dev_err(dev, "rx%u: illegal 'strobe-pos' value: %d\n",
4469 				nport, strobe_pos);
4470 			return -EINVAL;
4471 		}
4472 
4473 		/* NOTE: ignored unless global manual strobe pos is also set */
4474 		rxport->eq.strobe_pos = strobe_pos;
4475 		if (!priv->strobe.manual)
4476 			dev_warn(dev,
4477 				 "rx%u: 'ti,strobe-pos' ignored as 'ti,manual-strobe' not set\n",
4478 				 nport);
4479 	}
4480 
4481 	ret = fwnode_property_read_u32(link_fwnode, "ti,eq-level", &eq_level);
4482 	if (ret) {
4483 		if (ret != -EINVAL) {
4484 			dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4485 				"ti,eq-level", ret);
4486 			return ret;
4487 		}
4488 	} else {
4489 		if (eq_level > UB960_MAX_EQ_LEVEL) {
4490 			dev_err(dev, "rx%u: illegal 'ti,eq-level' value: %d\n",
4491 				nport, eq_level);
4492 			return -EINVAL;
4493 		}
4494 
4495 		rxport->eq.manual_eq = true;
4496 		rxport->eq.manual.eq_level = eq_level;
4497 	}
4498 
4499 	ret = fwnode_property_read_u32(link_fwnode, "i2c-alias",
4500 				       &ser_i2c_alias);
4501 	if (ret) {
4502 		dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
4503 			"i2c-alias", ret);
4504 		return ret;
4505 	}
4506 	rxport->ser.alias = ser_i2c_alias;
4507 
4508 	rxport->ser.fwnode = fwnode_get_named_child_node(link_fwnode, "serializer");
4509 	if (!rxport->ser.fwnode) {
4510 		dev_err(dev, "rx%u: missing 'serializer' node\n", nport);
4511 		return -EINVAL;
4512 	}
4513 
4514 	ret = fwnode_property_read_u32(rxport->ser.fwnode, "reg",
4515 				       &ser_i2c_addr);
4516 	if (ret)
4517 		rxport->ser.addr = -EINVAL;
4518 	else
4519 		rxport->ser.addr = ser_i2c_addr;
4520 
4521 	return 0;
4522 }
4523 
ub960_parse_dt_rxport_ep_properties(struct ub960_data * priv,struct fwnode_handle * ep_fwnode,struct ub960_rxport * rxport)4524 static int ub960_parse_dt_rxport_ep_properties(struct ub960_data *priv,
4525 					       struct fwnode_handle *ep_fwnode,
4526 					       struct ub960_rxport *rxport)
4527 {
4528 	struct device *dev = &priv->client->dev;
4529 	struct v4l2_fwnode_endpoint vep = {};
4530 	unsigned int nport = rxport->nport;
4531 	bool hsync_hi;
4532 	bool vsync_hi;
4533 	int ret;
4534 
4535 	rxport->source.ep_fwnode = fwnode_graph_get_remote_endpoint(ep_fwnode);
4536 	if (!rxport->source.ep_fwnode) {
4537 		dev_err(dev, "rx%u: no remote endpoint\n", nport);
4538 		return -ENODEV;
4539 	}
4540 
4541 	/* We currently have properties only for RAW modes */
4542 
4543 	switch (rxport->rx_mode) {
4544 	case RXPORT_MODE_RAW10:
4545 	case RXPORT_MODE_RAW12_HF:
4546 	case RXPORT_MODE_RAW12_LF:
4547 		break;
4548 	default:
4549 		return 0;
4550 	}
4551 
4552 	vep.bus_type = V4L2_MBUS_PARALLEL;
4553 	ret = v4l2_fwnode_endpoint_parse(ep_fwnode, &vep);
4554 	if (ret) {
4555 		dev_err(dev, "rx%u: failed to parse endpoint data\n", nport);
4556 		goto err_put_source_ep_fwnode;
4557 	}
4558 
4559 	hsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
4560 	vsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
4561 
4562 	/* LineValid and FrameValid are inverse to the h/vsync active */
4563 	rxport->lv_fv_pol = (hsync_hi ? UB960_RR_PORT_CONFIG2_LV_POL_LOW : 0) |
4564 			    (vsync_hi ? UB960_RR_PORT_CONFIG2_FV_POL_LOW : 0);
4565 
4566 	return 0;
4567 
4568 err_put_source_ep_fwnode:
4569 	fwnode_handle_put(rxport->source.ep_fwnode);
4570 	return ret;
4571 }
4572 
ub960_parse_dt_rxport(struct ub960_data * priv,unsigned int nport,struct fwnode_handle * link_fwnode,struct fwnode_handle * ep_fwnode)4573 static int ub960_parse_dt_rxport(struct ub960_data *priv, unsigned int nport,
4574 				 struct fwnode_handle *link_fwnode,
4575 				 struct fwnode_handle *ep_fwnode)
4576 {
4577 	static const char *vpoc_names[UB960_MAX_RX_NPORTS] = {
4578 		"vpoc0", "vpoc1", "vpoc2", "vpoc3"
4579 	};
4580 	struct device *dev = &priv->client->dev;
4581 	struct ub960_rxport *rxport;
4582 	int ret;
4583 
4584 	rxport = kzalloc(sizeof(*rxport), GFP_KERNEL);
4585 	if (!rxport)
4586 		return -ENOMEM;
4587 
4588 	priv->rxports[nport] = rxport;
4589 
4590 	rxport->nport = nport;
4591 	rxport->priv = priv;
4592 
4593 	ret = ub960_parse_dt_rxport_link_properties(priv, link_fwnode, rxport);
4594 	if (ret)
4595 		goto err_free_rxport;
4596 
4597 	rxport->vpoc = devm_regulator_get_optional(dev, vpoc_names[nport]);
4598 	if (IS_ERR(rxport->vpoc)) {
4599 		ret = PTR_ERR(rxport->vpoc);
4600 		if (ret == -ENODEV) {
4601 			rxport->vpoc = NULL;
4602 		} else {
4603 			dev_err(dev, "rx%u: failed to get VPOC supply: %d\n",
4604 				nport, ret);
4605 			goto err_put_remote_fwnode;
4606 		}
4607 	}
4608 
4609 	ret = ub960_parse_dt_rxport_ep_properties(priv, ep_fwnode, rxport);
4610 	if (ret)
4611 		goto err_put_remote_fwnode;
4612 
4613 	mutex_init(&rxport->aliased_addrs_lock);
4614 
4615 	return 0;
4616 
4617 err_put_remote_fwnode:
4618 	fwnode_handle_put(rxport->ser.fwnode);
4619 err_free_rxport:
4620 	priv->rxports[nport] = NULL;
4621 	kfree(rxport);
4622 	return ret;
4623 }
4624 
4625 static struct fwnode_handle *
ub960_fwnode_get_link_by_regs(struct fwnode_handle * links_fwnode,unsigned int nport)4626 ub960_fwnode_get_link_by_regs(struct fwnode_handle *links_fwnode,
4627 			      unsigned int nport)
4628 {
4629 	struct fwnode_handle *link_fwnode;
4630 	int ret;
4631 
4632 	fwnode_for_each_child_node(links_fwnode, link_fwnode) {
4633 		u32 link_num;
4634 
4635 		if (!str_has_prefix(fwnode_get_name(link_fwnode), "link@"))
4636 			continue;
4637 
4638 		ret = fwnode_property_read_u32(link_fwnode, "reg", &link_num);
4639 		if (ret) {
4640 			fwnode_handle_put(link_fwnode);
4641 			return NULL;
4642 		}
4643 
4644 		if (nport == link_num)
4645 			return link_fwnode;
4646 	}
4647 
4648 	return NULL;
4649 }
4650 
ub960_parse_dt_rxports(struct ub960_data * priv)4651 static int ub960_parse_dt_rxports(struct ub960_data *priv)
4652 {
4653 	struct device *dev = &priv->client->dev;
4654 	struct fwnode_handle *links_fwnode;
4655 	int ret;
4656 
4657 	links_fwnode = fwnode_get_named_child_node(dev_fwnode(dev), "links");
4658 	if (!links_fwnode) {
4659 		dev_err(dev, "'links' node missing\n");
4660 		return -ENODEV;
4661 	}
4662 
4663 	/* Defaults, recommended by TI */
4664 	priv->strobe.min = 2;
4665 	priv->strobe.max = 3;
4666 
4667 	priv->strobe.manual = fwnode_property_read_bool(links_fwnode, "ti,manual-strobe");
4668 
4669 	for_each_rxport(priv, it) {
4670 		struct fwnode_handle *link_fwnode;
4671 		struct fwnode_handle *ep_fwnode;
4672 		unsigned int nport = it.nport;
4673 
4674 		link_fwnode = ub960_fwnode_get_link_by_regs(links_fwnode, nport);
4675 		if (!link_fwnode)
4676 			continue;
4677 
4678 		ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
4679 							    nport, 0, 0);
4680 		if (!ep_fwnode) {
4681 			fwnode_handle_put(link_fwnode);
4682 			continue;
4683 		}
4684 
4685 		ret = ub960_parse_dt_rxport(priv, nport, link_fwnode,
4686 					    ep_fwnode);
4687 
4688 		fwnode_handle_put(link_fwnode);
4689 		fwnode_handle_put(ep_fwnode);
4690 
4691 		if (ret) {
4692 			dev_err(dev, "rx%u: failed to parse RX port\n", nport);
4693 			goto err_put_links;
4694 		}
4695 	}
4696 
4697 	fwnode_handle_put(links_fwnode);
4698 
4699 	return 0;
4700 
4701 err_put_links:
4702 	fwnode_handle_put(links_fwnode);
4703 
4704 	return ret;
4705 }
4706 
ub960_parse_dt_txports(struct ub960_data * priv)4707 static int ub960_parse_dt_txports(struct ub960_data *priv)
4708 {
4709 	struct device *dev = &priv->client->dev;
4710 	u32 nport;
4711 	int ret;
4712 
4713 	for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
4714 		unsigned int port = nport + priv->hw_data->num_rxports;
4715 		struct fwnode_handle *ep_fwnode;
4716 
4717 		ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
4718 							    port, 0, 0);
4719 		if (!ep_fwnode)
4720 			continue;
4721 
4722 		ret = ub960_parse_dt_txport(priv, ep_fwnode, nport);
4723 
4724 		fwnode_handle_put(ep_fwnode);
4725 
4726 		if (ret)
4727 			break;
4728 	}
4729 
4730 	return 0;
4731 }
4732 
ub960_parse_dt(struct ub960_data * priv)4733 static int ub960_parse_dt(struct ub960_data *priv)
4734 {
4735 	int ret;
4736 
4737 	ret = ub960_parse_dt_rxports(priv);
4738 	if (ret)
4739 		return ret;
4740 
4741 	ret = ub960_parse_dt_txports(priv);
4742 	if (ret)
4743 		goto err_free_rxports;
4744 
4745 	return 0;
4746 
4747 err_free_rxports:
4748 	ub960_rxport_free_ports(priv);
4749 
4750 	return ret;
4751 }
4752 
ub960_notify_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_connection * asd)4753 static int ub960_notify_bound(struct v4l2_async_notifier *notifier,
4754 			      struct v4l2_subdev *subdev,
4755 			      struct v4l2_async_connection *asd)
4756 {
4757 	struct ub960_data *priv = sd_to_ub960(notifier->sd);
4758 	struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
4759 	struct device *dev = &priv->client->dev;
4760 	u8 nport = rxport->nport;
4761 	int ret;
4762 
4763 	ret = media_entity_get_fwnode_pad(&subdev->entity,
4764 					  rxport->source.ep_fwnode,
4765 					  MEDIA_PAD_FL_SOURCE);
4766 	if (ret < 0) {
4767 		dev_err(dev, "Failed to find pad for %s\n", subdev->name);
4768 		return ret;
4769 	}
4770 
4771 	rxport->source.sd = subdev;
4772 	rxport->source.pad = ret;
4773 
4774 	ret = media_create_pad_link(&rxport->source.sd->entity,
4775 				    rxport->source.pad, &priv->sd.entity, nport,
4776 				    MEDIA_LNK_FL_ENABLED |
4777 					    MEDIA_LNK_FL_IMMUTABLE);
4778 	if (ret) {
4779 		dev_err(dev, "Unable to link %s:%u -> %s:%u\n",
4780 			rxport->source.sd->name, rxport->source.pad,
4781 			priv->sd.name, nport);
4782 		return ret;
4783 	}
4784 
4785 	for_each_active_rxport(priv, it) {
4786 		if (!it.rxport->source.sd) {
4787 			dev_dbg(dev, "Waiting for more subdevs to be bound\n");
4788 			return 0;
4789 		}
4790 	}
4791 
4792 	return 0;
4793 }
4794 
ub960_notify_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_connection * asd)4795 static void ub960_notify_unbind(struct v4l2_async_notifier *notifier,
4796 				struct v4l2_subdev *subdev,
4797 				struct v4l2_async_connection *asd)
4798 {
4799 	struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
4800 
4801 	rxport->source.sd = NULL;
4802 }
4803 
4804 static const struct v4l2_async_notifier_operations ub960_notify_ops = {
4805 	.bound = ub960_notify_bound,
4806 	.unbind = ub960_notify_unbind,
4807 };
4808 
ub960_v4l2_notifier_register(struct ub960_data * priv)4809 static int ub960_v4l2_notifier_register(struct ub960_data *priv)
4810 {
4811 	struct device *dev = &priv->client->dev;
4812 	int ret;
4813 
4814 	v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
4815 
4816 	for_each_active_rxport(priv, it) {
4817 		struct ub960_asd *asd;
4818 
4819 		asd = v4l2_async_nf_add_fwnode(&priv->notifier,
4820 					       it.rxport->source.ep_fwnode,
4821 					       struct ub960_asd);
4822 		if (IS_ERR(asd)) {
4823 			dev_err(dev, "Failed to add subdev for source %u: %pe",
4824 				it.nport, asd);
4825 			v4l2_async_nf_cleanup(&priv->notifier);
4826 			return PTR_ERR(asd);
4827 		}
4828 
4829 		asd->rxport = it.rxport;
4830 	}
4831 
4832 	priv->notifier.ops = &ub960_notify_ops;
4833 
4834 	ret = v4l2_async_nf_register(&priv->notifier);
4835 	if (ret) {
4836 		dev_err(dev, "Failed to register subdev_notifier");
4837 		v4l2_async_nf_cleanup(&priv->notifier);
4838 		return ret;
4839 	}
4840 
4841 	return 0;
4842 }
4843 
ub960_v4l2_notifier_unregister(struct ub960_data * priv)4844 static void ub960_v4l2_notifier_unregister(struct ub960_data *priv)
4845 {
4846 	v4l2_async_nf_unregister(&priv->notifier);
4847 	v4l2_async_nf_cleanup(&priv->notifier);
4848 }
4849 
ub960_create_subdev(struct ub960_data * priv)4850 static int ub960_create_subdev(struct ub960_data *priv)
4851 {
4852 	struct device *dev = &priv->client->dev;
4853 	unsigned int i;
4854 	int ret;
4855 
4856 	v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub960_subdev_ops);
4857 	priv->sd.internal_ops = &ub960_internal_ops;
4858 
4859 	v4l2_ctrl_handler_init(&priv->ctrl_handler, 1);
4860 	priv->sd.ctrl_handler = &priv->ctrl_handler;
4861 
4862 	v4l2_ctrl_new_int_menu(&priv->ctrl_handler, NULL, V4L2_CID_LINK_FREQ,
4863 			       ARRAY_SIZE(priv->tx_link_freq) - 1, 0,
4864 			       priv->tx_link_freq);
4865 
4866 	if (priv->ctrl_handler.error) {
4867 		ret = priv->ctrl_handler.error;
4868 		goto err_free_ctrl;
4869 	}
4870 
4871 	priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
4872 			  V4L2_SUBDEV_FL_STREAMS;
4873 	priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
4874 	priv->sd.entity.ops = &ub960_entity_ops;
4875 
4876 	for (i = 0; i < priv->hw_data->num_rxports + priv->hw_data->num_txports; i++) {
4877 		priv->pads[i].flags = ub960_pad_is_sink(priv, i) ?
4878 					      MEDIA_PAD_FL_SINK :
4879 					      MEDIA_PAD_FL_SOURCE;
4880 	}
4881 
4882 	ret = media_entity_pads_init(&priv->sd.entity,
4883 				     priv->hw_data->num_rxports +
4884 					     priv->hw_data->num_txports,
4885 				     priv->pads);
4886 	if (ret)
4887 		goto err_free_ctrl;
4888 
4889 	priv->sd.state_lock = priv->sd.ctrl_handler->lock;
4890 
4891 	ret = v4l2_subdev_init_finalize(&priv->sd);
4892 	if (ret)
4893 		goto err_entity_cleanup;
4894 
4895 	ret = ub960_v4l2_notifier_register(priv);
4896 	if (ret) {
4897 		dev_err(dev, "v4l2 subdev notifier register failed: %d\n", ret);
4898 		goto err_subdev_cleanup;
4899 	}
4900 
4901 	ret = v4l2_async_register_subdev(&priv->sd);
4902 	if (ret) {
4903 		dev_err(dev, "v4l2_async_register_subdev error: %d\n", ret);
4904 		goto err_unreg_notif;
4905 	}
4906 
4907 	return 0;
4908 
4909 err_unreg_notif:
4910 	ub960_v4l2_notifier_unregister(priv);
4911 err_subdev_cleanup:
4912 	v4l2_subdev_cleanup(&priv->sd);
4913 err_entity_cleanup:
4914 	media_entity_cleanup(&priv->sd.entity);
4915 err_free_ctrl:
4916 	v4l2_ctrl_handler_free(&priv->ctrl_handler);
4917 
4918 	return ret;
4919 }
4920 
ub960_destroy_subdev(struct ub960_data * priv)4921 static void ub960_destroy_subdev(struct ub960_data *priv)
4922 {
4923 	ub960_v4l2_notifier_unregister(priv);
4924 	v4l2_async_unregister_subdev(&priv->sd);
4925 
4926 	v4l2_subdev_cleanup(&priv->sd);
4927 
4928 	media_entity_cleanup(&priv->sd.entity);
4929 	v4l2_ctrl_handler_free(&priv->ctrl_handler);
4930 }
4931 
4932 static const struct regmap_config ub960_regmap_config = {
4933 	.name = "ds90ub960",
4934 
4935 	.reg_bits = 8,
4936 	.val_bits = 8,
4937 
4938 	.max_register = 0xff,
4939 
4940 	/*
4941 	 * We do locking in the driver to cover the TX/RX port selection and the
4942 	 * indirect register access.
4943 	 */
4944 	.disable_locking = true,
4945 };
4946 
ub960_get_hw_resources(struct ub960_data * priv)4947 static int ub960_get_hw_resources(struct ub960_data *priv)
4948 {
4949 	struct device *dev = &priv->client->dev;
4950 
4951 	priv->regmap = devm_regmap_init_i2c(priv->client, &ub960_regmap_config);
4952 	if (IS_ERR(priv->regmap))
4953 		return PTR_ERR(priv->regmap);
4954 
4955 	priv->vddio = devm_regulator_get(dev, "vddio");
4956 	if (IS_ERR(priv->vddio))
4957 		return dev_err_probe(dev, PTR_ERR(priv->vddio),
4958 				     "cannot get VDDIO regulator\n");
4959 
4960 	/* get power-down pin from DT */
4961 	priv->pd_gpio =
4962 		devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
4963 	if (IS_ERR(priv->pd_gpio))
4964 		return dev_err_probe(dev, PTR_ERR(priv->pd_gpio),
4965 				     "Cannot get powerdown GPIO\n");
4966 
4967 	priv->refclk = devm_clk_get(dev, "refclk");
4968 	if (IS_ERR(priv->refclk))
4969 		return dev_err_probe(dev, PTR_ERR(priv->refclk),
4970 				     "Cannot get REFCLK\n");
4971 
4972 	return 0;
4973 }
4974 
ub960_enable_core_hw(struct ub960_data * priv)4975 static int ub960_enable_core_hw(struct ub960_data *priv)
4976 {
4977 	struct device *dev = &priv->client->dev;
4978 	u8 rev_mask;
4979 	int ret;
4980 	u8 dev_sts;
4981 	u8 refclk_freq;
4982 
4983 	ret = regulator_enable(priv->vddio);
4984 	if (ret)
4985 		return dev_err_probe(dev, ret,
4986 				     "failed to enable VDDIO regulator\n");
4987 
4988 	ret = clk_prepare_enable(priv->refclk);
4989 	if (ret) {
4990 		dev_err_probe(dev, ret, "Failed to enable refclk\n");
4991 		goto err_disable_vddio;
4992 	}
4993 
4994 	if (priv->pd_gpio) {
4995 		gpiod_set_value_cansleep(priv->pd_gpio, 1);
4996 		/* wait min 2 ms for reset to complete */
4997 		fsleep(2000);
4998 		gpiod_set_value_cansleep(priv->pd_gpio, 0);
4999 		/* wait min 2 ms for power up to finish */
5000 		fsleep(2000);
5001 	}
5002 
5003 	ret = ub960_reset(priv, true);
5004 	if (ret)
5005 		goto err_pd_gpio;
5006 
5007 	/* Runtime check register accessibility */
5008 	ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask, NULL);
5009 	if (ret) {
5010 		dev_err_probe(dev, ret, "Cannot read first register, abort\n");
5011 		goto err_pd_gpio;
5012 	}
5013 
5014 	dev_dbg(dev, "Found %s (rev/mask %#04x)\n", priv->hw_data->model,
5015 		rev_mask);
5016 
5017 	ret = ub960_read(priv, UB960_SR_DEVICE_STS, &dev_sts, NULL);
5018 	if (ret)
5019 		goto err_pd_gpio;
5020 
5021 	if (priv->hw_data->is_ub9702)
5022 		ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq,
5023 				 NULL);
5024 	else
5025 		ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq,
5026 				 NULL);
5027 	if (ret)
5028 		goto err_pd_gpio;
5029 
5030 	dev_dbg(dev, "refclk valid %u freq %u MHz (clk fw freq %lu MHz)\n",
5031 		!!(dev_sts & BIT(4)), refclk_freq,
5032 		clk_get_rate(priv->refclk) / HZ_PER_MHZ);
5033 
5034 	/* Disable all RX ports by default */
5035 	ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0, NULL);
5036 	if (ret)
5037 		goto err_pd_gpio;
5038 
5039 	/* release GPIO lock */
5040 	if (priv->hw_data->is_ub9702) {
5041 		ret = ub960_update_bits(priv, UB960_SR_RESET,
5042 					UB960_SR_RESET_GPIO_LOCK_RELEASE,
5043 					UB960_SR_RESET_GPIO_LOCK_RELEASE,
5044 					NULL);
5045 		if (ret)
5046 			goto err_pd_gpio;
5047 	}
5048 
5049 	return 0;
5050 
5051 err_pd_gpio:
5052 	gpiod_set_value_cansleep(priv->pd_gpio, 1);
5053 	clk_disable_unprepare(priv->refclk);
5054 err_disable_vddio:
5055 	regulator_disable(priv->vddio);
5056 
5057 	return ret;
5058 }
5059 
ub960_disable_core_hw(struct ub960_data * priv)5060 static void ub960_disable_core_hw(struct ub960_data *priv)
5061 {
5062 	gpiod_set_value_cansleep(priv->pd_gpio, 1);
5063 	clk_disable_unprepare(priv->refclk);
5064 	regulator_disable(priv->vddio);
5065 }
5066 
ub960_probe(struct i2c_client * client)5067 static int ub960_probe(struct i2c_client *client)
5068 {
5069 	struct device *dev = &client->dev;
5070 	struct ub960_data *priv;
5071 	int ret;
5072 
5073 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5074 	if (!priv)
5075 		return -ENOMEM;
5076 
5077 	priv->client = client;
5078 
5079 	priv->hw_data = device_get_match_data(dev);
5080 
5081 	mutex_init(&priv->reg_lock);
5082 
5083 	INIT_DELAYED_WORK(&priv->poll_work, ub960_handler_work);
5084 
5085 	/*
5086 	 * Initialize these to invalid values so that the first reg writes will
5087 	 * configure the target.
5088 	 */
5089 	priv->reg_current.indirect_target = 0xff;
5090 	priv->reg_current.rxport = 0xff;
5091 	priv->reg_current.txport = 0xff;
5092 
5093 	ret = ub960_get_hw_resources(priv);
5094 	if (ret)
5095 		goto err_mutex_destroy;
5096 
5097 	ret = ub960_enable_core_hw(priv);
5098 	if (ret)
5099 		goto err_mutex_destroy;
5100 
5101 	ret = ub960_parse_dt(priv);
5102 	if (ret)
5103 		goto err_disable_core_hw;
5104 
5105 	ret = ub960_init_tx_ports(priv);
5106 	if (ret)
5107 		goto err_free_ports;
5108 
5109 	ret = ub960_rxport_enable_vpocs(priv);
5110 	if (ret)
5111 		goto err_free_ports;
5112 
5113 	if (priv->hw_data->is_ub9702)
5114 		ret = ub960_init_rx_ports_ub9702(priv);
5115 	else
5116 		ret = ub960_init_rx_ports_ub960(priv);
5117 
5118 	if (ret)
5119 		goto err_disable_vpocs;
5120 
5121 	ret = ub960_init_atr(priv);
5122 	if (ret)
5123 		goto err_disable_vpocs;
5124 
5125 	ret = ub960_rxport_add_serializers(priv);
5126 	if (ret)
5127 		goto err_uninit_atr;
5128 
5129 	ret = ub960_create_subdev(priv);
5130 	if (ret)
5131 		goto err_free_sers;
5132 
5133 	if (client->irq)
5134 		dev_warn(dev, "irq support not implemented, using polling\n");
5135 
5136 	schedule_delayed_work(&priv->poll_work,
5137 			      msecs_to_jiffies(UB960_POLL_TIME_MS));
5138 
5139 #ifdef UB960_DEBUG_I2C_RX_ID
5140 	for_each_rxport(priv, it)
5141 		ub960_write(priv, UB960_SR_I2C_RX_ID(it.nport),
5142 			    (UB960_DEBUG_I2C_RX_ID + it.nport) << 1, NULL);
5143 #endif
5144 
5145 	return 0;
5146 
5147 err_free_sers:
5148 	ub960_rxport_remove_serializers(priv);
5149 err_uninit_atr:
5150 	ub960_uninit_atr(priv);
5151 err_disable_vpocs:
5152 	ub960_rxport_disable_vpocs(priv);
5153 err_free_ports:
5154 	ub960_rxport_free_ports(priv);
5155 	ub960_txport_free_ports(priv);
5156 err_disable_core_hw:
5157 	ub960_disable_core_hw(priv);
5158 err_mutex_destroy:
5159 	mutex_destroy(&priv->reg_lock);
5160 	return ret;
5161 }
5162 
ub960_remove(struct i2c_client * client)5163 static void ub960_remove(struct i2c_client *client)
5164 {
5165 	struct v4l2_subdev *sd = i2c_get_clientdata(client);
5166 	struct ub960_data *priv = sd_to_ub960(sd);
5167 
5168 	cancel_delayed_work_sync(&priv->poll_work);
5169 
5170 	ub960_destroy_subdev(priv);
5171 	ub960_rxport_remove_serializers(priv);
5172 	ub960_uninit_atr(priv);
5173 	ub960_rxport_disable_vpocs(priv);
5174 	ub960_rxport_free_ports(priv);
5175 	ub960_txport_free_ports(priv);
5176 	ub960_disable_core_hw(priv);
5177 	mutex_destroy(&priv->reg_lock);
5178 }
5179 
5180 static const struct ub960_hw_data ds90ub960_hw = {
5181 	.model = "ub960",
5182 	.num_rxports = 4,
5183 	.num_txports = 2,
5184 };
5185 
5186 static const struct ub960_hw_data ds90ub9702_hw = {
5187 	.model = "ub9702",
5188 	.num_rxports = 4,
5189 	.num_txports = 2,
5190 	.is_ub9702 = true,
5191 	.is_fpdlink4 = true,
5192 };
5193 
5194 static const struct i2c_device_id ub960_id[] = {
5195 	{ "ds90ub960-q1", (kernel_ulong_t)&ds90ub960_hw },
5196 	{ "ds90ub9702-q1", (kernel_ulong_t)&ds90ub9702_hw },
5197 	{}
5198 };
5199 MODULE_DEVICE_TABLE(i2c, ub960_id);
5200 
5201 static const struct of_device_id ub960_dt_ids[] = {
5202 	{ .compatible = "ti,ds90ub960-q1", .data = &ds90ub960_hw },
5203 	{ .compatible = "ti,ds90ub9702-q1", .data = &ds90ub9702_hw },
5204 	{}
5205 };
5206 MODULE_DEVICE_TABLE(of, ub960_dt_ids);
5207 
5208 static struct i2c_driver ds90ub960_driver = {
5209 	.probe		= ub960_probe,
5210 	.remove		= ub960_remove,
5211 	.id_table	= ub960_id,
5212 	.driver = {
5213 		.name	= "ds90ub960",
5214 		.of_match_table = ub960_dt_ids,
5215 	},
5216 };
5217 module_i2c_driver(ds90ub960_driver);
5218 
5219 MODULE_LICENSE("GPL");
5220 MODULE_DESCRIPTION("Texas Instruments FPD-Link III/IV Deserializers Driver");
5221 MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
5222 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
5223 MODULE_IMPORT_NS("I2C_ATR");
5224