1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for the Texas Instruments DS90UB960-Q1 video deserializer 4 * 5 * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net> 6 * Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com> 7 */ 8 9 /* 10 * (Possible) TODOs: 11 * 12 * - PM for serializer and remote peripherals. We need to manage: 13 * - VPOC 14 * - Power domain? Regulator? Somehow any remote device should be able to 15 * cause the VPOC to be turned on. 16 * - Link between the deserializer and the serializer 17 * - Related to VPOC management. We probably always want to turn on the VPOC 18 * and then enable the link. 19 * - Serializer's services: i2c, gpios, power 20 * - The serializer needs to resume before the remote peripherals can 21 * e.g. use the i2c. 22 * - How to handle gpios? Reserving a gpio essentially keeps the provider 23 * (serializer) always powered on. 24 * - Do we need a new bus for the FPD-Link? At the moment the serializers 25 * are children of the same i2c-adapter where the deserializer resides. 26 * - i2c-atr could be made embeddable instead of allocatable. 27 */ 28 29 #include <linux/bitops.h> 30 #include <linux/clk.h> 31 #include <linux/delay.h> 32 #include <linux/fwnode.h> 33 #include <linux/gpio/consumer.h> 34 #include <linux/i2c-atr.h> 35 #include <linux/i2c.h> 36 #include <linux/init.h> 37 #include <linux/interrupt.h> 38 #include <linux/kernel.h> 39 #include <linux/kthread.h> 40 #include <linux/module.h> 41 #include <linux/mutex.h> 42 #include <linux/property.h> 43 #include <linux/regmap.h> 44 #include <linux/regulator/consumer.h> 45 #include <linux/slab.h> 46 #include <linux/units.h> 47 #include <linux/workqueue.h> 48 49 #include <media/i2c/ds90ub9xx.h> 50 #include <media/mipi-csi2.h> 51 #include <media/v4l2-ctrls.h> 52 #include <media/v4l2-fwnode.h> 53 #include <media/v4l2-subdev.h> 54 55 #define MHZ(v) ((u32)((v) * HZ_PER_MHZ)) 56 57 /* 58 * If this is defined, the i2c addresses from UB960_DEBUG_I2C_RX_ID to 59 * UB960_DEBUG_I2C_RX_ID + 3 can be used to access the paged RX port registers 60 * directly. 61 * 62 * Only for debug purposes. 63 */ 64 /* #define UB960_DEBUG_I2C_RX_ID 0x40 */ 65 66 #define UB960_POLL_TIME_MS 500 67 68 #define UB960_MAX_RX_NPORTS 4 69 #define UB960_MAX_TX_NPORTS 2 70 #define UB960_MAX_NPORTS (UB960_MAX_RX_NPORTS + UB960_MAX_TX_NPORTS) 71 72 #define UB960_MAX_PORT_ALIASES 8 73 74 #define UB960_NUM_BC_GPIOS 4 75 76 /* 77 * Register map 78 * 79 * 0x00-0x32 Shared (UB960_SR) 80 * 0x33-0x3a CSI-2 TX (per-port paged on DS90UB960, shared on 954) (UB960_TR) 81 * 0x4c Shared (UB960_SR) 82 * 0x4d-0x7f FPD-Link RX, per-port paged (UB960_RR) 83 * 0xb0-0xbf Shared (UB960_SR) 84 * 0xd0-0xdf FPD-Link RX, per-port paged (UB960_RR) 85 * 0xf0-0xf5 Shared (UB960_SR) 86 * 0xf8-0xfb Shared (UB960_SR) 87 * All others Reserved 88 * 89 * Register prefixes: 90 * UB960_SR_* = Shared register 91 * UB960_RR_* = FPD-Link RX, per-port paged register 92 * UB960_TR_* = CSI-2 TX, per-port paged register 93 * UB960_XR_* = Reserved register 94 * UB960_IR_* = Indirect register 95 */ 96 97 #define UB960_SR_I2C_DEV_ID 0x00 98 #define UB960_SR_RESET 0x01 99 #define UB960_SR_RESET_DIGITAL_RESET1 BIT(1) 100 #define UB960_SR_RESET_DIGITAL_RESET0 BIT(0) 101 #define UB960_SR_RESET_GPIO_LOCK_RELEASE BIT(5) 102 103 #define UB960_SR_GEN_CONFIG 0x02 104 #define UB960_SR_REV_MASK 0x03 105 #define UB960_SR_DEVICE_STS 0x04 106 #define UB960_SR_PAR_ERR_THOLD_HI 0x05 107 #define UB960_SR_PAR_ERR_THOLD_LO 0x06 108 #define UB960_SR_BCC_WDOG_CTL 0x07 109 #define UB960_SR_I2C_CTL1 0x08 110 #define UB960_SR_I2C_CTL2 0x09 111 #define UB960_SR_SCL_HIGH_TIME 0x0a 112 #define UB960_SR_SCL_LOW_TIME 0x0b 113 #define UB960_SR_RX_PORT_CTL 0x0c 114 #define UB960_SR_IO_CTL 0x0d 115 #define UB960_SR_GPIO_PIN_STS 0x0e 116 #define UB960_SR_GPIO_INPUT_CTL 0x0f 117 #define UB960_SR_GPIO_PIN_CTL(n) (0x10 + (n)) /* n < UB960_NUM_GPIOS */ 118 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SEL 5 119 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SRC_SHIFT 2 120 #define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_EN BIT(0) 121 122 #define UB960_SR_FS_CTL 0x18 123 #define UB960_SR_FS_HIGH_TIME_1 0x19 124 #define UB960_SR_FS_HIGH_TIME_0 0x1a 125 #define UB960_SR_FS_LOW_TIME_1 0x1b 126 #define UB960_SR_FS_LOW_TIME_0 0x1c 127 #define UB960_SR_MAX_FRM_HI 0x1d 128 #define UB960_SR_MAX_FRM_LO 0x1e 129 #define UB960_SR_CSI_PLL_CTL 0x1f 130 131 #define UB960_SR_FWD_CTL1 0x20 132 #define UB960_SR_FWD_CTL1_PORT_DIS(n) BIT((n) + 4) 133 134 #define UB960_SR_FWD_CTL2 0x21 135 #define UB960_SR_FWD_STS 0x22 136 137 #define UB960_SR_INTERRUPT_CTL 0x23 138 #define UB960_SR_INTERRUPT_CTL_INT_EN BIT(7) 139 #define UB960_SR_INTERRUPT_CTL_IE_CSI_TX0 BIT(4) 140 #define UB960_SR_INTERRUPT_CTL_IE_RX(n) BIT((n)) /* rxport[n] IRQ */ 141 142 #define UB960_SR_INTERRUPT_STS 0x24 143 #define UB960_SR_INTERRUPT_STS_INT BIT(7) 144 #define UB960_SR_INTERRUPT_STS_IS_CSI_TX(n) BIT(4 + (n)) /* txport[n] IRQ */ 145 #define UB960_SR_INTERRUPT_STS_IS_RX(n) BIT((n)) /* rxport[n] IRQ */ 146 147 #define UB960_SR_TS_CONFIG 0x25 148 #define UB960_SR_TS_CONTROL 0x26 149 #define UB960_SR_TS_LINE_HI 0x27 150 #define UB960_SR_TS_LINE_LO 0x28 151 #define UB960_SR_TS_STATUS 0x29 152 #define UB960_SR_TIMESTAMP_P0_HI 0x2a 153 #define UB960_SR_TIMESTAMP_P0_LO 0x2b 154 #define UB960_SR_TIMESTAMP_P1_HI 0x2c 155 #define UB960_SR_TIMESTAMP_P1_LO 0x2d 156 157 #define UB960_SR_CSI_PORT_SEL 0x32 158 159 #define UB960_TR_CSI_CTL 0x33 160 #define UB960_TR_CSI_CTL_CSI_CAL_EN BIT(6) 161 #define UB960_TR_CSI_CTL_CSI_CONTS_CLOCK BIT(1) 162 #define UB960_TR_CSI_CTL_CSI_ENABLE BIT(0) 163 164 #define UB960_TR_CSI_CTL2 0x34 165 #define UB960_TR_CSI_STS 0x35 166 #define UB960_TR_CSI_TX_ICR 0x36 167 168 #define UB960_TR_CSI_TX_ISR 0x37 169 #define UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR BIT(3) 170 #define UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR BIT(1) 171 172 #define UB960_TR_CSI_TEST_CTL 0x38 173 #define UB960_TR_CSI_TEST_PATT_HI 0x39 174 #define UB960_TR_CSI_TEST_PATT_LO 0x3a 175 176 #define UB960_XR_SFILTER_CFG 0x41 177 #define UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT 4 178 #define UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT 0 179 180 #define UB960_XR_AEQ_CTL1 0x42 181 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK BIT(6) 182 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING BIT(5) 183 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY BIT(4) 184 #define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK \ 185 (UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK | \ 186 UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING | \ 187 UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY) 188 #define UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN BIT(0) 189 190 #define UB960_XR_AEQ_ERR_THOLD 0x43 191 192 #define UB960_RR_BCC_ERR_CTL 0x46 193 #define UB960_RR_BCC_STATUS 0x47 194 #define UB960_RR_BCC_STATUS_SEQ_ERROR BIT(5) 195 #define UB960_RR_BCC_STATUS_MASTER_ERR BIT(4) 196 #define UB960_RR_BCC_STATUS_MASTER_TO BIT(3) 197 #define UB960_RR_BCC_STATUS_SLAVE_ERR BIT(2) 198 #define UB960_RR_BCC_STATUS_SLAVE_TO BIT(1) 199 #define UB960_RR_BCC_STATUS_RESP_ERR BIT(0) 200 #define UB960_RR_BCC_STATUS_ERROR_MASK \ 201 (UB960_RR_BCC_STATUS_SEQ_ERROR | UB960_RR_BCC_STATUS_MASTER_ERR | \ 202 UB960_RR_BCC_STATUS_MASTER_TO | UB960_RR_BCC_STATUS_SLAVE_ERR | \ 203 UB960_RR_BCC_STATUS_SLAVE_TO | UB960_RR_BCC_STATUS_RESP_ERR) 204 205 #define UB960_RR_FPD3_CAP 0x4a 206 #define UB960_RR_RAW_EMBED_DTYPE 0x4b 207 #define UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT 6 208 209 #define UB960_SR_FPD3_PORT_SEL 0x4c 210 211 #define UB960_RR_RX_PORT_STS1 0x4d 212 #define UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR BIT(5) 213 #define UB960_RR_RX_PORT_STS1_LOCK_STS_CHG BIT(4) 214 #define UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR BIT(3) 215 #define UB960_RR_RX_PORT_STS1_PARITY_ERROR BIT(2) 216 #define UB960_RR_RX_PORT_STS1_PORT_PASS BIT(1) 217 #define UB960_RR_RX_PORT_STS1_LOCK_STS BIT(0) 218 #define UB960_RR_RX_PORT_STS1_ERROR_MASK \ 219 (UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR | \ 220 UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR | \ 221 UB960_RR_RX_PORT_STS1_PARITY_ERROR) 222 223 #define UB960_RR_RX_PORT_STS2 0x4e 224 #define UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE BIT(7) 225 #define UB960_RR_RX_PORT_STS2_LINE_LEN_CHG BIT(6) 226 #define UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR BIT(5) 227 #define UB960_RR_RX_PORT_STS2_BUFFER_ERROR BIT(4) 228 #define UB960_RR_RX_PORT_STS2_CSI_ERROR BIT(3) 229 #define UB960_RR_RX_PORT_STS2_FREQ_STABLE BIT(2) 230 #define UB960_RR_RX_PORT_STS2_CABLE_FAULT BIT(1) 231 #define UB960_RR_RX_PORT_STS2_LINE_CNT_CHG BIT(0) 232 #define UB960_RR_RX_PORT_STS2_ERROR_MASK \ 233 UB960_RR_RX_PORT_STS2_BUFFER_ERROR 234 235 #define UB960_RR_RX_FREQ_HIGH 0x4f 236 #define UB960_RR_RX_FREQ_LOW 0x50 237 #define UB960_RR_SENSOR_STS_0 0x51 238 #define UB960_RR_SENSOR_STS_1 0x52 239 #define UB960_RR_SENSOR_STS_2 0x53 240 #define UB960_RR_SENSOR_STS_3 0x54 241 #define UB960_RR_RX_PAR_ERR_HI 0x55 242 #define UB960_RR_RX_PAR_ERR_LO 0x56 243 #define UB960_RR_BIST_ERR_COUNT 0x57 244 245 #define UB960_RR_BCC_CONFIG 0x58 246 #define UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH BIT(6) 247 #define UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK GENMASK(2, 0) 248 249 #define UB960_RR_DATAPATH_CTL1 0x59 250 #define UB960_RR_DATAPATH_CTL2 0x5a 251 #define UB960_RR_SER_ID 0x5b 252 #define UB960_RR_SER_ALIAS_ID 0x5c 253 254 /* For these two register sets: n < UB960_MAX_PORT_ALIASES */ 255 #define UB960_RR_SLAVE_ID(n) (0x5d + (n)) 256 #define UB960_RR_SLAVE_ALIAS(n) (0x65 + (n)) 257 258 #define UB960_RR_PORT_CONFIG 0x6d 259 #define UB960_RR_PORT_CONFIG_FPD3_MODE_MASK GENMASK(1, 0) 260 261 #define UB960_RR_BC_GPIO_CTL(n) (0x6e + (n)) /* n < 2 */ 262 #define UB960_RR_RAW10_ID 0x70 263 #define UB960_RR_RAW10_ID_VC_SHIFT 6 264 #define UB960_RR_RAW10_ID_DT_SHIFT 0 265 266 #define UB960_RR_RAW12_ID 0x71 267 #define UB960_RR_CSI_VC_MAP 0x72 268 #define UB960_RR_CSI_VC_MAP_SHIFT(x) ((x) * 2) 269 270 #define UB960_RR_LINE_COUNT_HI 0x73 271 #define UB960_RR_LINE_COUNT_LO 0x74 272 #define UB960_RR_LINE_LEN_1 0x75 273 #define UB960_RR_LINE_LEN_0 0x76 274 #define UB960_RR_FREQ_DET_CTL 0x77 275 #define UB960_RR_MAILBOX_1 0x78 276 #define UB960_RR_MAILBOX_2 0x79 277 278 #define UB960_RR_CSI_RX_STS 0x7a 279 #define UB960_RR_CSI_RX_STS_LENGTH_ERR BIT(3) 280 #define UB960_RR_CSI_RX_STS_CKSUM_ERR BIT(2) 281 #define UB960_RR_CSI_RX_STS_ECC2_ERR BIT(1) 282 #define UB960_RR_CSI_RX_STS_ECC1_ERR BIT(0) 283 #define UB960_RR_CSI_RX_STS_ERROR_MASK \ 284 (UB960_RR_CSI_RX_STS_LENGTH_ERR | UB960_RR_CSI_RX_STS_CKSUM_ERR | \ 285 UB960_RR_CSI_RX_STS_ECC2_ERR | UB960_RR_CSI_RX_STS_ECC1_ERR) 286 287 #define UB960_RR_CSI_ERR_COUNTER 0x7b 288 #define UB960_RR_PORT_CONFIG2 0x7c 289 #define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK GENMASK(7, 6) 290 #define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT 6 291 292 #define UB960_RR_PORT_CONFIG2_LV_POL_LOW BIT(1) 293 #define UB960_RR_PORT_CONFIG2_FV_POL_LOW BIT(0) 294 295 #define UB960_RR_PORT_PASS_CTL 0x7d 296 #define UB960_RR_SEN_INT_RISE_CTL 0x7e 297 #define UB960_RR_SEN_INT_FALL_CTL 0x7f 298 299 #define UB960_SR_CSI_FRAME_COUNT_HI(n) (0x90 + 8 * (n)) 300 #define UB960_SR_CSI_FRAME_COUNT_LO(n) (0x91 + 8 * (n)) 301 #define UB960_SR_CSI_FRAME_ERR_COUNT_HI(n) (0x92 + 8 * (n)) 302 #define UB960_SR_CSI_FRAME_ERR_COUNT_LO(n) (0x93 + 8 * (n)) 303 #define UB960_SR_CSI_LINE_COUNT_HI(n) (0x94 + 8 * (n)) 304 #define UB960_SR_CSI_LINE_COUNT_LO(n) (0x95 + 8 * (n)) 305 #define UB960_SR_CSI_LINE_ERR_COUNT_HI(n) (0x96 + 8 * (n)) 306 #define UB960_SR_CSI_LINE_ERR_COUNT_LO(n) (0x97 + 8 * (n)) 307 308 #define UB960_XR_REFCLK_FREQ 0xa5 /* UB960 */ 309 310 #define UB960_RR_VC_ID_MAP(x) (0xa0 + (x)) /* UB9702 */ 311 312 #define UB960_SR_IND_ACC_CTL 0xb0 313 #define UB960_SR_IND_ACC_CTL_IA_AUTO_INC BIT(1) 314 315 #define UB960_SR_IND_ACC_ADDR 0xb1 316 #define UB960_SR_IND_ACC_DATA 0xb2 317 #define UB960_SR_BIST_CONTROL 0xb3 318 #define UB960_SR_MODE_IDX_STS 0xb8 319 #define UB960_SR_LINK_ERROR_COUNT 0xb9 320 #define UB960_SR_FPD3_ENC_CTL 0xba 321 #define UB960_SR_FV_MIN_TIME 0xbc 322 #define UB960_SR_GPIO_PD_CTL 0xbe 323 324 #define UB960_SR_FPD_RATE_CFG 0xc2 /* UB9702 */ 325 #define UB960_SR_CSI_PLL_DIV 0xc9 /* UB9702 */ 326 327 #define UB960_RR_PORT_DEBUG 0xd0 328 #define UB960_RR_AEQ_CTL2 0xd2 329 #define UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR BIT(2) 330 331 #define UB960_RR_AEQ_STATUS 0xd3 332 #define UB960_RR_AEQ_STATUS_STATUS_2 GENMASK(5, 3) 333 #define UB960_RR_AEQ_STATUS_STATUS_1 GENMASK(2, 0) 334 335 #define UB960_RR_AEQ_BYPASS 0xd4 336 #define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT 5 337 #define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK GENMASK(7, 5) 338 #define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT 1 339 #define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK GENMASK(3, 1) 340 #define UB960_RR_AEQ_BYPASS_ENABLE BIT(0) 341 342 #define UB960_RR_AEQ_MIN_MAX 0xd5 343 #define UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT 4 344 #define UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT 0 345 346 #define UB960_RR_SFILTER_STS_0 0xd6 347 #define UB960_RR_SFILTER_STS_1 0xd7 348 #define UB960_RR_PORT_ICR_HI 0xd8 349 #define UB960_RR_PORT_ICR_LO 0xd9 350 #define UB960_RR_PORT_ISR_HI 0xda 351 #define UB960_RR_PORT_ISR_LO 0xdb 352 #define UB960_RR_FC_GPIO_STS 0xdc 353 #define UB960_RR_FC_GPIO_ICR 0xdd 354 #define UB960_RR_SEN_INT_RISE_STS 0xde 355 #define UB960_RR_SEN_INT_FALL_STS 0xdf 356 357 #define UB960_RR_CHANNEL_MODE 0xe4 /* UB9702 */ 358 359 #define UB960_SR_FPD3_RX_ID(n) (0xf0 + (n)) 360 #define UB960_SR_FPD3_RX_ID_LEN 6 361 362 #define UB960_SR_I2C_RX_ID(n) (0xf8 + (n)) 363 364 #define UB9702_SR_REFCLK_FREQ 0x3d 365 366 /* Indirect register blocks */ 367 #define UB960_IND_TARGET_PAT_GEN 0x00 368 #define UB960_IND_TARGET_RX_ANA(n) (0x01 + (n)) 369 #define UB960_IND_TARGET_CSI_ANA 0x07 370 371 /* UB960_IR_PGEN_*: Indirect Registers for Test Pattern Generator */ 372 373 #define UB960_IR_PGEN_CTL 0x01 374 #define UB960_IR_PGEN_CTL_PGEN_ENABLE BIT(0) 375 376 #define UB960_IR_PGEN_CFG 0x02 377 #define UB960_IR_PGEN_CSI_DI 0x03 378 #define UB960_IR_PGEN_LINE_SIZE1 0x04 379 #define UB960_IR_PGEN_LINE_SIZE0 0x05 380 #define UB960_IR_PGEN_BAR_SIZE1 0x06 381 #define UB960_IR_PGEN_BAR_SIZE0 0x07 382 #define UB960_IR_PGEN_ACT_LPF1 0x08 383 #define UB960_IR_PGEN_ACT_LPF0 0x09 384 #define UB960_IR_PGEN_TOT_LPF1 0x0a 385 #define UB960_IR_PGEN_TOT_LPF0 0x0b 386 #define UB960_IR_PGEN_LINE_PD1 0x0c 387 #define UB960_IR_PGEN_LINE_PD0 0x0d 388 #define UB960_IR_PGEN_VBP 0x0e 389 #define UB960_IR_PGEN_VFP 0x0f 390 #define UB960_IR_PGEN_COLOR(n) (0x10 + (n)) /* n < 15 */ 391 392 #define UB960_IR_RX_ANA_STROBE_SET_CLK 0x08 393 #define UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY BIT(3) 394 #define UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK GENMASK(2, 0) 395 396 #define UB960_IR_RX_ANA_STROBE_SET_DATA 0x09 397 #define UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY BIT(3) 398 #define UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK GENMASK(2, 0) 399 400 /* EQ related */ 401 402 #define UB960_MIN_AEQ_STROBE_POS -7 403 #define UB960_MAX_AEQ_STROBE_POS 7 404 405 #define UB960_MANUAL_STROBE_EXTRA_DELAY 6 406 407 #define UB960_MIN_MANUAL_STROBE_POS -(7 + UB960_MANUAL_STROBE_EXTRA_DELAY) 408 #define UB960_MAX_MANUAL_STROBE_POS (7 + UB960_MANUAL_STROBE_EXTRA_DELAY) 409 #define UB960_NUM_MANUAL_STROBE_POS (UB960_MAX_MANUAL_STROBE_POS - UB960_MIN_MANUAL_STROBE_POS + 1) 410 411 #define UB960_MIN_EQ_LEVEL 0 412 #define UB960_MAX_EQ_LEVEL 14 413 #define UB960_NUM_EQ_LEVELS (UB960_MAX_EQ_LEVEL - UB960_MIN_EQ_LEVEL + 1) 414 415 struct ub960_hw_data { 416 const char *model; 417 u8 num_rxports; 418 u8 num_txports; 419 bool is_ub9702; 420 bool is_fpdlink4; 421 }; 422 423 enum ub960_rxport_mode { 424 RXPORT_MODE_RAW10 = 0, 425 RXPORT_MODE_RAW12_HF = 1, 426 RXPORT_MODE_RAW12_LF = 2, 427 RXPORT_MODE_CSI2_SYNC = 3, 428 RXPORT_MODE_CSI2_NONSYNC = 4, 429 RXPORT_MODE_LAST = RXPORT_MODE_CSI2_NONSYNC, 430 }; 431 432 enum ub960_rxport_cdr { 433 RXPORT_CDR_FPD3 = 0, 434 RXPORT_CDR_FPD4 = 1, 435 RXPORT_CDR_LAST = RXPORT_CDR_FPD4, 436 }; 437 438 struct ub960_rxport { 439 struct ub960_data *priv; 440 u8 nport; /* RX port number, and index in priv->rxport[] */ 441 442 struct { 443 struct v4l2_subdev *sd; 444 u16 pad; 445 struct fwnode_handle *ep_fwnode; 446 } source; 447 448 /* Serializer */ 449 struct { 450 struct fwnode_handle *fwnode; 451 struct i2c_client *client; 452 unsigned short alias; /* I2C alias (lower 7 bits) */ 453 struct ds90ub9xx_platform_data pdata; 454 } ser; 455 456 enum ub960_rxport_mode rx_mode; 457 enum ub960_rxport_cdr cdr_mode; 458 459 u8 lv_fv_pol; /* LV and FV polarities */ 460 461 struct regulator *vpoc; 462 463 /* EQ settings */ 464 struct { 465 bool manual_eq; 466 467 s8 strobe_pos; 468 469 union { 470 struct { 471 u8 eq_level_min; 472 u8 eq_level_max; 473 } aeq; 474 475 struct { 476 u8 eq_level; 477 } manual; 478 }; 479 } eq; 480 481 const struct i2c_client *aliased_clients[UB960_MAX_PORT_ALIASES]; 482 }; 483 484 struct ub960_asd { 485 struct v4l2_async_connection base; 486 struct ub960_rxport *rxport; 487 }; 488 489 static inline struct ub960_asd *to_ub960_asd(struct v4l2_async_connection *asd) 490 { 491 return container_of(asd, struct ub960_asd, base); 492 } 493 494 struct ub960_txport { 495 struct ub960_data *priv; 496 u8 nport; /* TX port number, and index in priv->txport[] */ 497 498 u32 num_data_lanes; 499 bool non_continous_clk; 500 }; 501 502 struct ub960_data { 503 const struct ub960_hw_data *hw_data; 504 struct i2c_client *client; /* for shared local registers */ 505 struct regmap *regmap; 506 507 /* lock for register access */ 508 struct mutex reg_lock; 509 510 struct clk *refclk; 511 512 struct regulator *vddio; 513 514 struct gpio_desc *pd_gpio; 515 struct delayed_work poll_work; 516 struct ub960_rxport *rxports[UB960_MAX_RX_NPORTS]; 517 struct ub960_txport *txports[UB960_MAX_TX_NPORTS]; 518 519 struct v4l2_subdev sd; 520 struct media_pad pads[UB960_MAX_NPORTS]; 521 522 struct v4l2_ctrl_handler ctrl_handler; 523 struct v4l2_async_notifier notifier; 524 525 u32 tx_data_rate; /* Nominal data rate (Gb/s) */ 526 s64 tx_link_freq[1]; 527 528 struct i2c_atr *atr; 529 530 struct { 531 u8 rxport; 532 u8 txport; 533 u8 indirect_target; 534 } reg_current; 535 536 bool streaming; 537 538 u8 stored_fwd_ctl; 539 540 u64 stream_enable_mask[UB960_MAX_NPORTS]; 541 542 /* These are common to all ports */ 543 struct { 544 bool manual; 545 546 s8 min; 547 s8 max; 548 } strobe; 549 }; 550 551 static inline struct ub960_data *sd_to_ub960(struct v4l2_subdev *sd) 552 { 553 return container_of(sd, struct ub960_data, sd); 554 } 555 556 static inline bool ub960_pad_is_sink(struct ub960_data *priv, u32 pad) 557 { 558 return pad < priv->hw_data->num_rxports; 559 } 560 561 static inline bool ub960_pad_is_source(struct ub960_data *priv, u32 pad) 562 { 563 return pad >= priv->hw_data->num_rxports; 564 } 565 566 static inline unsigned int ub960_pad_to_port(struct ub960_data *priv, u32 pad) 567 { 568 if (ub960_pad_is_sink(priv, pad)) 569 return pad; 570 else 571 return pad - priv->hw_data->num_rxports; 572 } 573 574 struct ub960_format_info { 575 u32 code; 576 u32 bpp; 577 u8 datatype; 578 bool meta; 579 }; 580 581 static const struct ub960_format_info ub960_formats[] = { 582 { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, .datatype = MIPI_CSI2_DT_RGB888, }, 583 584 { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, }, 585 { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, }, 586 { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, }, 587 { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, }, 588 589 { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, }, 590 { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, }, 591 { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, }, 592 { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, }, 593 594 { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, }, 595 { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, }, 596 { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, }, 597 { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, }, 598 599 { .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, }, 600 { .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, }, 601 { .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, }, 602 { .code = MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, }, 603 }; 604 605 static const struct ub960_format_info *ub960_find_format(u32 code) 606 { 607 unsigned int i; 608 609 for (i = 0; i < ARRAY_SIZE(ub960_formats); i++) { 610 if (ub960_formats[i].code == code) 611 return &ub960_formats[i]; 612 } 613 614 return NULL; 615 } 616 617 /* ----------------------------------------------------------------------------- 618 * Basic device access 619 */ 620 621 static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val) 622 { 623 struct device *dev = &priv->client->dev; 624 unsigned int v; 625 int ret; 626 627 mutex_lock(&priv->reg_lock); 628 629 ret = regmap_read(priv->regmap, reg, &v); 630 if (ret) { 631 dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n", 632 __func__, reg, ret); 633 goto out_unlock; 634 } 635 636 *val = v; 637 638 out_unlock: 639 mutex_unlock(&priv->reg_lock); 640 641 return ret; 642 } 643 644 static int ub960_write(struct ub960_data *priv, u8 reg, u8 val) 645 { 646 struct device *dev = &priv->client->dev; 647 int ret; 648 649 mutex_lock(&priv->reg_lock); 650 651 ret = regmap_write(priv->regmap, reg, val); 652 if (ret) 653 dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n", 654 __func__, reg, ret); 655 656 mutex_unlock(&priv->reg_lock); 657 658 return ret; 659 } 660 661 static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val) 662 { 663 struct device *dev = &priv->client->dev; 664 int ret; 665 666 mutex_lock(&priv->reg_lock); 667 668 ret = regmap_update_bits(priv->regmap, reg, mask, val); 669 if (ret) 670 dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n", 671 __func__, reg, ret); 672 673 mutex_unlock(&priv->reg_lock); 674 675 return ret; 676 } 677 678 static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val) 679 { 680 struct device *dev = &priv->client->dev; 681 __be16 __v; 682 int ret; 683 684 mutex_lock(&priv->reg_lock); 685 686 ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v)); 687 if (ret) { 688 dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n", 689 __func__, reg, ret); 690 goto out_unlock; 691 } 692 693 *val = be16_to_cpu(__v); 694 695 out_unlock: 696 mutex_unlock(&priv->reg_lock); 697 698 return ret; 699 } 700 701 static int ub960_rxport_select(struct ub960_data *priv, u8 nport) 702 { 703 struct device *dev = &priv->client->dev; 704 int ret; 705 706 lockdep_assert_held(&priv->reg_lock); 707 708 if (priv->reg_current.rxport == nport) 709 return 0; 710 711 ret = regmap_write(priv->regmap, UB960_SR_FPD3_PORT_SEL, 712 (nport << 4) | BIT(nport)); 713 if (ret) { 714 dev_err(dev, "%s: cannot select rxport %d (%d)!\n", __func__, 715 nport, ret); 716 return ret; 717 } 718 719 priv->reg_current.rxport = nport; 720 721 return 0; 722 } 723 724 static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val) 725 { 726 struct device *dev = &priv->client->dev; 727 unsigned int v; 728 int ret; 729 730 mutex_lock(&priv->reg_lock); 731 732 ret = ub960_rxport_select(priv, nport); 733 if (ret) 734 goto out_unlock; 735 736 ret = regmap_read(priv->regmap, reg, &v); 737 if (ret) { 738 dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n", 739 __func__, reg, ret); 740 goto out_unlock; 741 } 742 743 *val = v; 744 745 out_unlock: 746 mutex_unlock(&priv->reg_lock); 747 748 return ret; 749 } 750 751 static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val) 752 { 753 struct device *dev = &priv->client->dev; 754 int ret; 755 756 mutex_lock(&priv->reg_lock); 757 758 ret = ub960_rxport_select(priv, nport); 759 if (ret) 760 goto out_unlock; 761 762 ret = regmap_write(priv->regmap, reg, val); 763 if (ret) 764 dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n", 765 __func__, reg, ret); 766 767 out_unlock: 768 mutex_unlock(&priv->reg_lock); 769 770 return ret; 771 } 772 773 static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg, 774 u8 mask, u8 val) 775 { 776 struct device *dev = &priv->client->dev; 777 int ret; 778 779 mutex_lock(&priv->reg_lock); 780 781 ret = ub960_rxport_select(priv, nport); 782 if (ret) 783 goto out_unlock; 784 785 ret = regmap_update_bits(priv->regmap, reg, mask, val); 786 if (ret) 787 dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n", 788 __func__, reg, ret); 789 790 out_unlock: 791 mutex_unlock(&priv->reg_lock); 792 793 return ret; 794 } 795 796 static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg, 797 u16 *val) 798 { 799 struct device *dev = &priv->client->dev; 800 __be16 __v; 801 int ret; 802 803 mutex_lock(&priv->reg_lock); 804 805 ret = ub960_rxport_select(priv, nport); 806 if (ret) 807 goto out_unlock; 808 809 ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v)); 810 if (ret) { 811 dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n", 812 __func__, reg, ret); 813 goto out_unlock; 814 } 815 816 *val = be16_to_cpu(__v); 817 818 out_unlock: 819 mutex_unlock(&priv->reg_lock); 820 821 return ret; 822 } 823 824 static int ub960_txport_select(struct ub960_data *priv, u8 nport) 825 { 826 struct device *dev = &priv->client->dev; 827 int ret; 828 829 lockdep_assert_held(&priv->reg_lock); 830 831 if (priv->reg_current.txport == nport) 832 return 0; 833 834 ret = regmap_write(priv->regmap, UB960_SR_CSI_PORT_SEL, 835 (nport << 4) | BIT(nport)); 836 if (ret) { 837 dev_err(dev, "%s: cannot select tx port %d (%d)!\n", __func__, 838 nport, ret); 839 return ret; 840 } 841 842 priv->reg_current.txport = nport; 843 844 return 0; 845 } 846 847 static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val) 848 { 849 struct device *dev = &priv->client->dev; 850 unsigned int v; 851 int ret; 852 853 mutex_lock(&priv->reg_lock); 854 855 ret = ub960_txport_select(priv, nport); 856 if (ret) 857 goto out_unlock; 858 859 ret = regmap_read(priv->regmap, reg, &v); 860 if (ret) { 861 dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n", 862 __func__, reg, ret); 863 goto out_unlock; 864 } 865 866 *val = v; 867 868 out_unlock: 869 mutex_unlock(&priv->reg_lock); 870 871 return ret; 872 } 873 874 static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val) 875 { 876 struct device *dev = &priv->client->dev; 877 int ret; 878 879 mutex_lock(&priv->reg_lock); 880 881 ret = ub960_txport_select(priv, nport); 882 if (ret) 883 goto out_unlock; 884 885 ret = regmap_write(priv->regmap, reg, val); 886 if (ret) 887 dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n", 888 __func__, reg, ret); 889 890 out_unlock: 891 mutex_unlock(&priv->reg_lock); 892 893 return ret; 894 } 895 896 static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg, 897 u8 mask, u8 val) 898 { 899 struct device *dev = &priv->client->dev; 900 int ret; 901 902 mutex_lock(&priv->reg_lock); 903 904 ret = ub960_txport_select(priv, nport); 905 if (ret) 906 goto out_unlock; 907 908 ret = regmap_update_bits(priv->regmap, reg, mask, val); 909 if (ret) 910 dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n", 911 __func__, reg, ret); 912 913 out_unlock: 914 mutex_unlock(&priv->reg_lock); 915 916 return ret; 917 } 918 919 static int ub960_select_ind_reg_block(struct ub960_data *priv, u8 block) 920 { 921 struct device *dev = &priv->client->dev; 922 int ret; 923 924 lockdep_assert_held(&priv->reg_lock); 925 926 if (priv->reg_current.indirect_target == block) 927 return 0; 928 929 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_CTL, block << 2); 930 if (ret) { 931 dev_err(dev, "%s: cannot select indirect target %u (%d)!\n", 932 __func__, block, ret); 933 return ret; 934 } 935 936 priv->reg_current.indirect_target = block; 937 938 return 0; 939 } 940 941 static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val) 942 { 943 struct device *dev = &priv->client->dev; 944 unsigned int v; 945 int ret; 946 947 mutex_lock(&priv->reg_lock); 948 949 ret = ub960_select_ind_reg_block(priv, block); 950 if (ret) 951 goto out_unlock; 952 953 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg); 954 if (ret) { 955 dev_err(dev, 956 "Write to IND_ACC_ADDR failed when reading %u:%x02x: %d\n", 957 block, reg, ret); 958 goto out_unlock; 959 } 960 961 ret = regmap_read(priv->regmap, UB960_SR_IND_ACC_DATA, &v); 962 if (ret) { 963 dev_err(dev, 964 "Write to IND_ACC_DATA failed when reading %u:%x02x: %d\n", 965 block, reg, ret); 966 goto out_unlock; 967 } 968 969 *val = v; 970 971 out_unlock: 972 mutex_unlock(&priv->reg_lock); 973 974 return ret; 975 } 976 977 static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val) 978 { 979 struct device *dev = &priv->client->dev; 980 int ret; 981 982 mutex_lock(&priv->reg_lock); 983 984 ret = ub960_select_ind_reg_block(priv, block); 985 if (ret) 986 goto out_unlock; 987 988 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg); 989 if (ret) { 990 dev_err(dev, 991 "Write to IND_ACC_ADDR failed when writing %u:%x02x: %d\n", 992 block, reg, ret); 993 goto out_unlock; 994 } 995 996 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_DATA, val); 997 if (ret) { 998 dev_err(dev, 999 "Write to IND_ACC_DATA failed when writing %u:%x02x: %d\n", 1000 block, reg, ret); 1001 goto out_unlock; 1002 } 1003 1004 out_unlock: 1005 mutex_unlock(&priv->reg_lock); 1006 1007 return ret; 1008 } 1009 1010 static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg, 1011 u8 mask, u8 val) 1012 { 1013 struct device *dev = &priv->client->dev; 1014 int ret; 1015 1016 mutex_lock(&priv->reg_lock); 1017 1018 ret = ub960_select_ind_reg_block(priv, block); 1019 if (ret) 1020 goto out_unlock; 1021 1022 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg); 1023 if (ret) { 1024 dev_err(dev, 1025 "Write to IND_ACC_ADDR failed when updating %u:%x02x: %d\n", 1026 block, reg, ret); 1027 goto out_unlock; 1028 } 1029 1030 ret = regmap_update_bits(priv->regmap, UB960_SR_IND_ACC_DATA, mask, 1031 val); 1032 if (ret) { 1033 dev_err(dev, 1034 "Write to IND_ACC_DATA failed when updating %u:%x02x: %d\n", 1035 block, reg, ret); 1036 goto out_unlock; 1037 } 1038 1039 out_unlock: 1040 mutex_unlock(&priv->reg_lock); 1041 1042 return ret; 1043 } 1044 1045 /* ----------------------------------------------------------------------------- 1046 * I2C-ATR (address translator) 1047 */ 1048 1049 static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id, 1050 const struct i2c_client *client, u16 alias) 1051 { 1052 struct ub960_data *priv = i2c_atr_get_driver_data(atr); 1053 struct ub960_rxport *rxport = priv->rxports[chan_id]; 1054 struct device *dev = &priv->client->dev; 1055 unsigned int reg_idx; 1056 1057 for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) { 1058 if (!rxport->aliased_clients[reg_idx]) 1059 break; 1060 } 1061 1062 if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) { 1063 dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport); 1064 return -EADDRNOTAVAIL; 1065 } 1066 1067 rxport->aliased_clients[reg_idx] = client; 1068 1069 ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx), 1070 client->addr << 1); 1071 ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx), 1072 alias << 1); 1073 1074 dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n", 1075 rxport->nport, client->addr, alias, reg_idx); 1076 1077 return 0; 1078 } 1079 1080 static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id, 1081 const struct i2c_client *client) 1082 { 1083 struct ub960_data *priv = i2c_atr_get_driver_data(atr); 1084 struct ub960_rxport *rxport = priv->rxports[chan_id]; 1085 struct device *dev = &priv->client->dev; 1086 unsigned int reg_idx; 1087 1088 for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) { 1089 if (rxport->aliased_clients[reg_idx] == client) 1090 break; 1091 } 1092 1093 if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) { 1094 dev_err(dev, "rx%u: client 0x%02x is not mapped!\n", 1095 rxport->nport, client->addr); 1096 return; 1097 } 1098 1099 rxport->aliased_clients[reg_idx] = NULL; 1100 1101 ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx), 0); 1102 1103 dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport, 1104 client->addr, reg_idx); 1105 } 1106 1107 static const struct i2c_atr_ops ub960_atr_ops = { 1108 .attach_client = ub960_atr_attach_client, 1109 .detach_client = ub960_atr_detach_client, 1110 }; 1111 1112 static int ub960_init_atr(struct ub960_data *priv) 1113 { 1114 struct device *dev = &priv->client->dev; 1115 struct i2c_adapter *parent_adap = priv->client->adapter; 1116 1117 priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops, 1118 priv->hw_data->num_rxports); 1119 if (IS_ERR(priv->atr)) 1120 return PTR_ERR(priv->atr); 1121 1122 i2c_atr_set_driver_data(priv->atr, priv); 1123 1124 return 0; 1125 } 1126 1127 static void ub960_uninit_atr(struct ub960_data *priv) 1128 { 1129 i2c_atr_delete(priv->atr); 1130 priv->atr = NULL; 1131 } 1132 1133 /* ----------------------------------------------------------------------------- 1134 * TX ports 1135 */ 1136 1137 static int ub960_parse_dt_txport(struct ub960_data *priv, 1138 struct fwnode_handle *ep_fwnode, 1139 u8 nport) 1140 { 1141 struct device *dev = &priv->client->dev; 1142 struct v4l2_fwnode_endpoint vep = {}; 1143 struct ub960_txport *txport; 1144 int ret; 1145 1146 txport = kzalloc(sizeof(*txport), GFP_KERNEL); 1147 if (!txport) 1148 return -ENOMEM; 1149 1150 txport->priv = priv; 1151 txport->nport = nport; 1152 1153 vep.bus_type = V4L2_MBUS_CSI2_DPHY; 1154 ret = v4l2_fwnode_endpoint_alloc_parse(ep_fwnode, &vep); 1155 if (ret) { 1156 dev_err(dev, "tx%u: failed to parse endpoint data\n", nport); 1157 goto err_free_txport; 1158 } 1159 1160 txport->non_continous_clk = vep.bus.mipi_csi2.flags & 1161 V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK; 1162 1163 txport->num_data_lanes = vep.bus.mipi_csi2.num_data_lanes; 1164 1165 if (vep.nr_of_link_frequencies != 1) { 1166 ret = -EINVAL; 1167 goto err_free_vep; 1168 } 1169 1170 priv->tx_link_freq[0] = vep.link_frequencies[0]; 1171 priv->tx_data_rate = priv->tx_link_freq[0] * 2; 1172 1173 if (priv->tx_data_rate != MHZ(1600) && 1174 priv->tx_data_rate != MHZ(1200) && 1175 priv->tx_data_rate != MHZ(800) && 1176 priv->tx_data_rate != MHZ(400)) { 1177 dev_err(dev, "tx%u: invalid 'link-frequencies' value\n", nport); 1178 ret = -EINVAL; 1179 goto err_free_vep; 1180 } 1181 1182 v4l2_fwnode_endpoint_free(&vep); 1183 1184 priv->txports[nport] = txport; 1185 1186 return 0; 1187 1188 err_free_vep: 1189 v4l2_fwnode_endpoint_free(&vep); 1190 err_free_txport: 1191 kfree(txport); 1192 1193 return ret; 1194 } 1195 1196 static void ub960_csi_handle_events(struct ub960_data *priv, u8 nport) 1197 { 1198 struct device *dev = &priv->client->dev; 1199 u8 csi_tx_isr; 1200 int ret; 1201 1202 ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr); 1203 if (ret) 1204 return; 1205 1206 if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR) 1207 dev_warn(dev, "TX%u: CSI_SYNC_ERROR\n", nport); 1208 1209 if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR) 1210 dev_warn(dev, "TX%u: CSI_PASS_ERROR\n", nport); 1211 } 1212 1213 /* ----------------------------------------------------------------------------- 1214 * RX ports 1215 */ 1216 1217 static int ub960_rxport_enable_vpocs(struct ub960_data *priv) 1218 { 1219 unsigned int nport; 1220 int ret; 1221 1222 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 1223 struct ub960_rxport *rxport = priv->rxports[nport]; 1224 1225 if (!rxport || !rxport->vpoc) 1226 continue; 1227 1228 ret = regulator_enable(rxport->vpoc); 1229 if (ret) 1230 goto err_disable_vpocs; 1231 } 1232 1233 return 0; 1234 1235 err_disable_vpocs: 1236 while (nport--) { 1237 struct ub960_rxport *rxport = priv->rxports[nport]; 1238 1239 if (!rxport || !rxport->vpoc) 1240 continue; 1241 1242 regulator_disable(rxport->vpoc); 1243 } 1244 1245 return ret; 1246 } 1247 1248 static void ub960_rxport_disable_vpocs(struct ub960_data *priv) 1249 { 1250 unsigned int nport; 1251 1252 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 1253 struct ub960_rxport *rxport = priv->rxports[nport]; 1254 1255 if (!rxport || !rxport->vpoc) 1256 continue; 1257 1258 regulator_disable(rxport->vpoc); 1259 } 1260 } 1261 1262 static void ub960_rxport_clear_errors(struct ub960_data *priv, 1263 unsigned int nport) 1264 { 1265 u8 v; 1266 1267 ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v); 1268 ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v); 1269 ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v); 1270 ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v); 1271 1272 ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v); 1273 ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v); 1274 1275 ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v); 1276 } 1277 1278 static void ub960_clear_rx_errors(struct ub960_data *priv) 1279 { 1280 unsigned int nport; 1281 1282 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) 1283 ub960_rxport_clear_errors(priv, nport); 1284 } 1285 1286 static int ub960_rxport_get_strobe_pos(struct ub960_data *priv, 1287 unsigned int nport, s8 *strobe_pos) 1288 { 1289 u8 v; 1290 u8 clk_delay, data_delay; 1291 int ret; 1292 1293 ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 1294 UB960_IR_RX_ANA_STROBE_SET_CLK, &v); 1295 1296 clk_delay = (v & UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY) ? 1297 0 : UB960_MANUAL_STROBE_EXTRA_DELAY; 1298 1299 ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 1300 UB960_IR_RX_ANA_STROBE_SET_DATA, &v); 1301 1302 data_delay = (v & UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) ? 1303 0 : UB960_MANUAL_STROBE_EXTRA_DELAY; 1304 1305 ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v); 1306 if (ret) 1307 return ret; 1308 1309 clk_delay += v & UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK; 1310 1311 ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v); 1312 if (ret) 1313 return ret; 1314 1315 data_delay += v & UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK; 1316 1317 *strobe_pos = data_delay - clk_delay; 1318 1319 return 0; 1320 } 1321 1322 static void ub960_rxport_set_strobe_pos(struct ub960_data *priv, 1323 unsigned int nport, s8 strobe_pos) 1324 { 1325 u8 clk_delay, data_delay; 1326 1327 clk_delay = UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY; 1328 data_delay = UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY; 1329 1330 if (strobe_pos < UB960_MIN_AEQ_STROBE_POS) 1331 clk_delay = abs(strobe_pos) - UB960_MANUAL_STROBE_EXTRA_DELAY; 1332 else if (strobe_pos > UB960_MAX_AEQ_STROBE_POS) 1333 data_delay = strobe_pos - UB960_MANUAL_STROBE_EXTRA_DELAY; 1334 else if (strobe_pos < 0) 1335 clk_delay = abs(strobe_pos) | UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY; 1336 else if (strobe_pos > 0) 1337 data_delay = strobe_pos | UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY; 1338 1339 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 1340 UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay); 1341 1342 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 1343 UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay); 1344 } 1345 1346 static void ub960_rxport_set_strobe_range(struct ub960_data *priv, 1347 s8 strobe_min, s8 strobe_max) 1348 { 1349 /* Convert the signed strobe pos to positive zero based value */ 1350 strobe_min -= UB960_MIN_AEQ_STROBE_POS; 1351 strobe_max -= UB960_MIN_AEQ_STROBE_POS; 1352 1353 ub960_write(priv, UB960_XR_SFILTER_CFG, 1354 ((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) | 1355 ((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT)); 1356 } 1357 1358 static int ub960_rxport_get_eq_level(struct ub960_data *priv, 1359 unsigned int nport, u8 *eq_level) 1360 { 1361 int ret; 1362 u8 v; 1363 1364 ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v); 1365 if (ret) 1366 return ret; 1367 1368 *eq_level = (v & UB960_RR_AEQ_STATUS_STATUS_1) + 1369 (v & UB960_RR_AEQ_STATUS_STATUS_2); 1370 1371 return 0; 1372 } 1373 1374 static void ub960_rxport_set_eq_level(struct ub960_data *priv, 1375 unsigned int nport, u8 eq_level) 1376 { 1377 u8 eq_stage_1_select_value, eq_stage_2_select_value; 1378 const unsigned int eq_stage_max = 7; 1379 u8 v; 1380 1381 if (eq_level <= eq_stage_max) { 1382 eq_stage_1_select_value = eq_level; 1383 eq_stage_2_select_value = 0; 1384 } else { 1385 eq_stage_1_select_value = eq_stage_max; 1386 eq_stage_2_select_value = eq_level - eq_stage_max; 1387 } 1388 1389 ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v); 1390 1391 v &= ~(UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK | 1392 UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK); 1393 v |= eq_stage_1_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT; 1394 v |= eq_stage_2_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT; 1395 v |= UB960_RR_AEQ_BYPASS_ENABLE; 1396 1397 ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v); 1398 } 1399 1400 static void ub960_rxport_set_eq_range(struct ub960_data *priv, 1401 unsigned int nport, u8 eq_min, u8 eq_max) 1402 { 1403 ub960_rxport_write(priv, nport, UB960_RR_AEQ_MIN_MAX, 1404 (eq_min << UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) | 1405 (eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT)); 1406 1407 /* Enable AEQ min setting */ 1408 ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_CTL2, 1409 UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR, 1410 UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR); 1411 } 1412 1413 static void ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport) 1414 { 1415 struct ub960_rxport *rxport = priv->rxports[nport]; 1416 1417 /* We also set common settings here. Should be moved elsewhere. */ 1418 1419 if (priv->strobe.manual) { 1420 /* Disable AEQ_SFILTER_EN */ 1421 ub960_update_bits(priv, UB960_XR_AEQ_CTL1, 1422 UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0); 1423 } else { 1424 /* Enable SFILTER and error control */ 1425 ub960_write(priv, UB960_XR_AEQ_CTL1, 1426 UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK | 1427 UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN); 1428 1429 /* Set AEQ strobe range */ 1430 ub960_rxport_set_strobe_range(priv, priv->strobe.min, 1431 priv->strobe.max); 1432 } 1433 1434 /* The rest are port specific */ 1435 1436 if (priv->strobe.manual) 1437 ub960_rxport_set_strobe_pos(priv, nport, rxport->eq.strobe_pos); 1438 else 1439 ub960_rxport_set_strobe_pos(priv, nport, 0); 1440 1441 if (rxport->eq.manual_eq) { 1442 ub960_rxport_set_eq_level(priv, nport, 1443 rxport->eq.manual.eq_level); 1444 1445 /* Enable AEQ Bypass */ 1446 ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS, 1447 UB960_RR_AEQ_BYPASS_ENABLE, 1448 UB960_RR_AEQ_BYPASS_ENABLE); 1449 } else { 1450 ub960_rxport_set_eq_range(priv, nport, 1451 rxport->eq.aeq.eq_level_min, 1452 rxport->eq.aeq.eq_level_max); 1453 1454 /* Disable AEQ Bypass */ 1455 ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS, 1456 UB960_RR_AEQ_BYPASS_ENABLE, 0); 1457 } 1458 } 1459 1460 static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport, 1461 bool *ok) 1462 { 1463 u8 rx_port_sts1, rx_port_sts2; 1464 u16 parity_errors; 1465 u8 csi_rx_sts; 1466 u8 csi_err_cnt; 1467 u8 bcc_sts; 1468 int ret; 1469 bool errors; 1470 1471 ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, 1472 &rx_port_sts1); 1473 if (ret) 1474 return ret; 1475 1476 if (!(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS)) { 1477 *ok = false; 1478 return 0; 1479 } 1480 1481 ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, 1482 &rx_port_sts2); 1483 if (ret) 1484 return ret; 1485 1486 ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts); 1487 if (ret) 1488 return ret; 1489 1490 ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, 1491 &csi_err_cnt); 1492 if (ret) 1493 return ret; 1494 1495 ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts); 1496 if (ret) 1497 return ret; 1498 1499 ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI, 1500 &parity_errors); 1501 if (ret) 1502 return ret; 1503 1504 errors = (rx_port_sts1 & UB960_RR_RX_PORT_STS1_ERROR_MASK) || 1505 (rx_port_sts2 & UB960_RR_RX_PORT_STS2_ERROR_MASK) || 1506 (bcc_sts & UB960_RR_BCC_STATUS_ERROR_MASK) || 1507 (csi_rx_sts & UB960_RR_CSI_RX_STS_ERROR_MASK) || csi_err_cnt || 1508 parity_errors; 1509 1510 *ok = !errors; 1511 1512 return 0; 1513 } 1514 1515 /* 1516 * Wait for the RX ports to lock, have no errors and have stable strobe position 1517 * and EQ level. 1518 */ 1519 static int ub960_rxport_wait_locks(struct ub960_data *priv, 1520 unsigned long port_mask, 1521 unsigned int *lock_mask) 1522 { 1523 struct device *dev = &priv->client->dev; 1524 unsigned long timeout; 1525 unsigned int link_ok_mask; 1526 unsigned int missing; 1527 unsigned int loops; 1528 u8 nport; 1529 int ret; 1530 1531 if (port_mask == 0) { 1532 if (lock_mask) 1533 *lock_mask = 0; 1534 return 0; 1535 } 1536 1537 if (port_mask >= BIT(priv->hw_data->num_rxports)) 1538 return -EINVAL; 1539 1540 timeout = jiffies + msecs_to_jiffies(1000); 1541 loops = 0; 1542 link_ok_mask = 0; 1543 1544 while (time_before(jiffies, timeout)) { 1545 missing = 0; 1546 1547 for_each_set_bit(nport, &port_mask, 1548 priv->hw_data->num_rxports) { 1549 struct ub960_rxport *rxport = priv->rxports[nport]; 1550 bool ok; 1551 1552 if (!rxport) 1553 continue; 1554 1555 ret = ub960_rxport_link_ok(priv, nport, &ok); 1556 if (ret) 1557 return ret; 1558 1559 /* 1560 * We want the link to be ok for two consecutive loops, 1561 * as a link could get established just before our test 1562 * and drop soon after. 1563 */ 1564 if (!ok || !(link_ok_mask & BIT(nport))) 1565 missing++; 1566 1567 if (ok) 1568 link_ok_mask |= BIT(nport); 1569 else 1570 link_ok_mask &= ~BIT(nport); 1571 } 1572 1573 loops++; 1574 1575 if (missing == 0) 1576 break; 1577 1578 /* 1579 * The sleep time of 10 ms was found by testing to give a lock 1580 * with a few iterations. It can be decreased if on some setups 1581 * the lock can be achieved much faster. 1582 */ 1583 fsleep(10 * USEC_PER_MSEC); 1584 } 1585 1586 if (lock_mask) 1587 *lock_mask = link_ok_mask; 1588 1589 dev_dbg(dev, "Wait locks done in %u loops\n", loops); 1590 for_each_set_bit(nport, &port_mask, priv->hw_data->num_rxports) { 1591 struct ub960_rxport *rxport = priv->rxports[nport]; 1592 s8 strobe_pos, eq_level; 1593 u16 v; 1594 1595 if (!rxport) 1596 continue; 1597 1598 if (!(link_ok_mask & BIT(nport))) { 1599 dev_dbg(dev, "\trx%u: not locked\n", nport); 1600 continue; 1601 } 1602 1603 ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v); 1604 1605 if (priv->hw_data->is_ub9702) { 1606 dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n", 1607 nport, ((u64)v * HZ_PER_MHZ) >> 8); 1608 } else { 1609 ret = ub960_rxport_get_strobe_pos(priv, nport, 1610 &strobe_pos); 1611 if (ret) 1612 return ret; 1613 1614 ret = ub960_rxport_get_eq_level(priv, nport, &eq_level); 1615 if (ret) 1616 return ret; 1617 1618 dev_dbg(dev, 1619 "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n", 1620 nport, strobe_pos, eq_level, 1621 ((u64)v * HZ_PER_MHZ) >> 8); 1622 } 1623 } 1624 1625 return 0; 1626 } 1627 1628 static unsigned long ub960_calc_bc_clk_rate_ub960(struct ub960_data *priv, 1629 struct ub960_rxport *rxport) 1630 { 1631 unsigned int mult; 1632 unsigned int div; 1633 1634 switch (rxport->rx_mode) { 1635 case RXPORT_MODE_RAW10: 1636 case RXPORT_MODE_RAW12_HF: 1637 case RXPORT_MODE_RAW12_LF: 1638 mult = 1; 1639 div = 10; 1640 break; 1641 1642 case RXPORT_MODE_CSI2_SYNC: 1643 mult = 2; 1644 div = 1; 1645 break; 1646 1647 case RXPORT_MODE_CSI2_NONSYNC: 1648 mult = 2; 1649 div = 5; 1650 break; 1651 1652 default: 1653 return 0; 1654 } 1655 1656 return clk_get_rate(priv->refclk) * mult / div; 1657 } 1658 1659 static unsigned long ub960_calc_bc_clk_rate_ub9702(struct ub960_data *priv, 1660 struct ub960_rxport *rxport) 1661 { 1662 switch (rxport->rx_mode) { 1663 case RXPORT_MODE_RAW10: 1664 case RXPORT_MODE_RAW12_HF: 1665 case RXPORT_MODE_RAW12_LF: 1666 return 2359400; 1667 1668 case RXPORT_MODE_CSI2_SYNC: 1669 return 47187500; 1670 1671 case RXPORT_MODE_CSI2_NONSYNC: 1672 return 9437500; 1673 1674 default: 1675 return 0; 1676 } 1677 } 1678 1679 static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport) 1680 { 1681 struct ub960_rxport *rxport = priv->rxports[nport]; 1682 struct device *dev = &priv->client->dev; 1683 struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata; 1684 struct i2c_board_info ser_info = { 1685 .of_node = to_of_node(rxport->ser.fwnode), 1686 .fwnode = rxport->ser.fwnode, 1687 .platform_data = ser_pdata, 1688 }; 1689 1690 ser_pdata->port = nport; 1691 ser_pdata->atr = priv->atr; 1692 if (priv->hw_data->is_ub9702) 1693 ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub9702(priv, rxport); 1694 else 1695 ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub960(priv, rxport); 1696 1697 /* 1698 * The serializer is added under the same i2c adapter as the 1699 * deserializer. This is not quite right, as the serializer is behind 1700 * the FPD-Link. 1701 */ 1702 ser_info.addr = rxport->ser.alias; 1703 rxport->ser.client = 1704 i2c_new_client_device(priv->client->adapter, &ser_info); 1705 if (IS_ERR(rxport->ser.client)) { 1706 dev_err(dev, "rx%u: cannot add %s i2c device", nport, 1707 ser_info.type); 1708 return PTR_ERR(rxport->ser.client); 1709 } 1710 1711 dev_dbg(dev, "rx%u: remote serializer at alias 0x%02x (%u-%04x)\n", 1712 nport, rxport->ser.client->addr, 1713 rxport->ser.client->adapter->nr, rxport->ser.client->addr); 1714 1715 return 0; 1716 } 1717 1718 static void ub960_rxport_remove_serializer(struct ub960_data *priv, u8 nport) 1719 { 1720 struct ub960_rxport *rxport = priv->rxports[nport]; 1721 1722 i2c_unregister_device(rxport->ser.client); 1723 rxport->ser.client = NULL; 1724 } 1725 1726 /* Add serializer i2c devices for all initialized ports */ 1727 static int ub960_rxport_add_serializers(struct ub960_data *priv) 1728 { 1729 unsigned int nport; 1730 int ret; 1731 1732 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 1733 struct ub960_rxport *rxport = priv->rxports[nport]; 1734 1735 if (!rxport) 1736 continue; 1737 1738 ret = ub960_rxport_add_serializer(priv, nport); 1739 if (ret) 1740 goto err_remove_sers; 1741 } 1742 1743 return 0; 1744 1745 err_remove_sers: 1746 while (nport--) { 1747 struct ub960_rxport *rxport = priv->rxports[nport]; 1748 1749 if (!rxport) 1750 continue; 1751 1752 ub960_rxport_remove_serializer(priv, nport); 1753 } 1754 1755 return ret; 1756 } 1757 1758 static void ub960_rxport_remove_serializers(struct ub960_data *priv) 1759 { 1760 unsigned int nport; 1761 1762 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 1763 struct ub960_rxport *rxport = priv->rxports[nport]; 1764 1765 if (!rxport) 1766 continue; 1767 1768 ub960_rxport_remove_serializer(priv, nport); 1769 } 1770 } 1771 1772 static void ub960_init_tx_port(struct ub960_data *priv, 1773 struct ub960_txport *txport) 1774 { 1775 unsigned int nport = txport->nport; 1776 u8 csi_ctl = 0; 1777 1778 /* 1779 * From the datasheet: "initial CSI Skew-Calibration 1780 * sequence [...] should be set when operating at 1.6 Gbps" 1781 */ 1782 if (priv->tx_data_rate == MHZ(1600)) 1783 csi_ctl |= UB960_TR_CSI_CTL_CSI_CAL_EN; 1784 1785 csi_ctl |= (4 - txport->num_data_lanes) << 4; 1786 1787 if (!txport->non_continous_clk) 1788 csi_ctl |= UB960_TR_CSI_CTL_CSI_CONTS_CLOCK; 1789 1790 ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl); 1791 } 1792 1793 static int ub960_init_tx_ports(struct ub960_data *priv) 1794 { 1795 unsigned int nport; 1796 u8 speed_select; 1797 u8 pll_div; 1798 1799 /* TX ports */ 1800 1801 switch (priv->tx_data_rate) { 1802 case MHZ(1600): 1803 default: 1804 speed_select = 0; 1805 pll_div = 0x10; 1806 break; 1807 case MHZ(1200): 1808 speed_select = 1; 1809 pll_div = 0x18; 1810 break; 1811 case MHZ(800): 1812 speed_select = 2; 1813 pll_div = 0x10; 1814 break; 1815 case MHZ(400): 1816 speed_select = 3; 1817 pll_div = 0x10; 1818 break; 1819 } 1820 1821 ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select); 1822 1823 if (priv->hw_data->is_ub9702) { 1824 ub960_write(priv, UB960_SR_CSI_PLL_DIV, pll_div); 1825 1826 switch (priv->tx_data_rate) { 1827 case MHZ(1600): 1828 default: 1829 ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x80); 1830 ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a); 1831 break; 1832 case MHZ(800): 1833 ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x90); 1834 ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4f, 0x2a); 1835 ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a); 1836 break; 1837 case MHZ(400): 1838 ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0xa0); 1839 break; 1840 } 1841 } 1842 1843 for (nport = 0; nport < priv->hw_data->num_txports; nport++) { 1844 struct ub960_txport *txport = priv->txports[nport]; 1845 1846 if (!txport) 1847 continue; 1848 1849 ub960_init_tx_port(priv, txport); 1850 } 1851 1852 return 0; 1853 } 1854 1855 static void ub960_init_rx_port_ub960(struct ub960_data *priv, 1856 struct ub960_rxport *rxport) 1857 { 1858 unsigned int nport = rxport->nport; 1859 u32 bc_freq_val; 1860 1861 /* 1862 * Back channel frequency select. 1863 * Override FREQ_SELECT from the strap. 1864 * 0 - 2.5 Mbps (DS90UB913A-Q1 / DS90UB933-Q1) 1865 * 2 - 10 Mbps 1866 * 6 - 50 Mbps (DS90UB953-Q1) 1867 * 1868 * Note that changing this setting will result in some errors on the back 1869 * channel for a short period of time. 1870 */ 1871 1872 switch (rxport->rx_mode) { 1873 case RXPORT_MODE_RAW10: 1874 case RXPORT_MODE_RAW12_HF: 1875 case RXPORT_MODE_RAW12_LF: 1876 bc_freq_val = 0; 1877 break; 1878 1879 case RXPORT_MODE_CSI2_NONSYNC: 1880 bc_freq_val = 2; 1881 break; 1882 1883 case RXPORT_MODE_CSI2_SYNC: 1884 bc_freq_val = 6; 1885 break; 1886 1887 default: 1888 return; 1889 } 1890 1891 ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 1892 UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 1893 bc_freq_val); 1894 1895 switch (rxport->rx_mode) { 1896 case RXPORT_MODE_RAW10: 1897 /* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */ 1898 ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 1899 UB960_RR_PORT_CONFIG_FPD3_MODE_MASK, 1900 0x3); 1901 1902 /* 1903 * RAW10_8BIT_CTL = 0b10 : 8-bit processing using upper 8 bits 1904 */ 1905 ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 1906 UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK, 1907 0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT); 1908 1909 break; 1910 1911 case RXPORT_MODE_RAW12_HF: 1912 case RXPORT_MODE_RAW12_LF: 1913 /* Not implemented */ 1914 return; 1915 1916 case RXPORT_MODE_CSI2_SYNC: 1917 case RXPORT_MODE_CSI2_NONSYNC: 1918 /* CSI-2 Mode (DS90UB953-Q1 compatible) */ 1919 ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3, 1920 0x0); 1921 1922 break; 1923 } 1924 1925 /* LV_POLARITY & FV_POLARITY */ 1926 ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3, 1927 rxport->lv_fv_pol); 1928 1929 /* Enable all interrupt sources from this port */ 1930 ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07); 1931 ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f); 1932 1933 /* Enable I2C_PASS_THROUGH */ 1934 ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 1935 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, 1936 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH); 1937 1938 /* Enable I2C communication to the serializer via the alias addr */ 1939 ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID, 1940 rxport->ser.alias << 1); 1941 1942 /* Configure EQ related settings */ 1943 ub960_rxport_config_eq(priv, nport); 1944 1945 /* Enable RX port */ 1946 ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport)); 1947 } 1948 1949 static void ub960_init_rx_port_ub9702_fpd3(struct ub960_data *priv, 1950 struct ub960_rxport *rxport) 1951 { 1952 unsigned int nport = rxport->nport; 1953 u8 bc_freq_val; 1954 u8 fpd_func_mode; 1955 1956 switch (rxport->rx_mode) { 1957 case RXPORT_MODE_RAW10: 1958 bc_freq_val = 0; 1959 fpd_func_mode = 5; 1960 break; 1961 1962 case RXPORT_MODE_RAW12_HF: 1963 bc_freq_val = 0; 1964 fpd_func_mode = 4; 1965 break; 1966 1967 case RXPORT_MODE_RAW12_LF: 1968 bc_freq_val = 0; 1969 fpd_func_mode = 6; 1970 break; 1971 1972 case RXPORT_MODE_CSI2_SYNC: 1973 bc_freq_val = 6; 1974 fpd_func_mode = 2; 1975 break; 1976 1977 case RXPORT_MODE_CSI2_NONSYNC: 1978 bc_freq_val = 2; 1979 fpd_func_mode = 2; 1980 break; 1981 1982 default: 1983 return; 1984 } 1985 1986 ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7, 1987 bc_freq_val); 1988 ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, fpd_func_mode); 1989 1990 /* set serdes_eq_mode = 1 */ 1991 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa8, 0x80); 1992 1993 /* enable serdes driver */ 1994 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x0d, 0x7f); 1995 1996 /* set serdes_eq_offset=4 */ 1997 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04); 1998 1999 /* init default serdes_eq_max in 0xa9 */ 2000 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa9, 0x23); 2001 2002 /* init serdes_eq_min in 0xaa */ 2003 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xaa, 0); 2004 2005 /* serdes_driver_ctl2 control: DS90UB953-Q1/DS90UB933-Q1/DS90UB913A-Q1 */ 2006 ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b, 2007 BIT(3), BIT(3)); 2008 2009 /* RX port to half-rate */ 2010 ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2), 2011 BIT(nport * 2)); 2012 } 2013 2014 static void ub960_init_rx_port_ub9702_fpd4_aeq(struct ub960_data *priv, 2015 struct ub960_rxport *rxport) 2016 { 2017 unsigned int nport = rxport->nport; 2018 bool first_time_power_up = true; 2019 2020 if (first_time_power_up) { 2021 u8 v; 2022 2023 /* AEQ init */ 2024 ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2c, &v); 2025 2026 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, v); 2027 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, v + 1); 2028 2029 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x00); 2030 } 2031 2032 /* enable serdes_eq_ctl2 */ 2033 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x9e, 0x00); 2034 2035 /* enable serdes_eq_ctl1 */ 2036 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x90, 0x40); 2037 2038 /* enable serdes_eq_en */ 2039 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2e, 0x40); 2040 2041 /* disable serdes_eq_override */ 2042 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xf0, 0x00); 2043 2044 /* disable serdes_gain_override */ 2045 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x71, 0x00); 2046 } 2047 2048 static void ub960_init_rx_port_ub9702_fpd4(struct ub960_data *priv, 2049 struct ub960_rxport *rxport) 2050 { 2051 unsigned int nport = rxport->nport; 2052 u8 bc_freq_val; 2053 2054 switch (rxport->rx_mode) { 2055 case RXPORT_MODE_RAW10: 2056 bc_freq_val = 0; 2057 break; 2058 2059 case RXPORT_MODE_RAW12_HF: 2060 bc_freq_val = 0; 2061 break; 2062 2063 case RXPORT_MODE_RAW12_LF: 2064 bc_freq_val = 0; 2065 break; 2066 2067 case RXPORT_MODE_CSI2_SYNC: 2068 bc_freq_val = 6; 2069 break; 2070 2071 case RXPORT_MODE_CSI2_NONSYNC: 2072 bc_freq_val = 2; 2073 break; 2074 2075 default: 2076 return; 2077 } 2078 2079 ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7, 2080 bc_freq_val); 2081 2082 /* FPD4 Sync Mode */ 2083 ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, 0); 2084 2085 /* add serdes_eq_offset of 4 */ 2086 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04); 2087 2088 /* FPD4 serdes_start_eq in 0x27: assign default */ 2089 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, 0x0); 2090 /* FPD4 serdes_end_eq in 0x28: assign default */ 2091 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, 0x23); 2092 2093 /* set serdes_driver_mode into FPD IV mode */ 2094 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x04, 0x00); 2095 /* set FPD PBC drv into FPD IV mode */ 2096 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b, 0x00); 2097 2098 /* set serdes_system_init to 0x2f */ 2099 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x21, 0x2f); 2100 /* set serdes_system_rst in reset mode */ 2101 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0xc1); 2102 2103 /* RX port to 7.55G mode */ 2104 ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2), 2105 0 << (nport * 2)); 2106 2107 ub960_init_rx_port_ub9702_fpd4_aeq(priv, rxport); 2108 } 2109 2110 static void ub960_init_rx_port_ub9702(struct ub960_data *priv, 2111 struct ub960_rxport *rxport) 2112 { 2113 unsigned int nport = rxport->nport; 2114 2115 if (rxport->cdr_mode == RXPORT_CDR_FPD3) 2116 ub960_init_rx_port_ub9702_fpd3(priv, rxport); 2117 else /* RXPORT_CDR_FPD4 */ 2118 ub960_init_rx_port_ub9702_fpd4(priv, rxport); 2119 2120 switch (rxport->rx_mode) { 2121 case RXPORT_MODE_RAW10: 2122 /* 2123 * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits 2124 * 0b10 : 8-bit processing using upper 8 bits 2125 */ 2126 ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 2127 0x3 << 6, 0x2 << 6); 2128 2129 break; 2130 2131 case RXPORT_MODE_RAW12_HF: 2132 case RXPORT_MODE_RAW12_LF: 2133 /* Not implemented */ 2134 return; 2135 2136 case RXPORT_MODE_CSI2_SYNC: 2137 case RXPORT_MODE_CSI2_NONSYNC: 2138 2139 break; 2140 } 2141 2142 /* LV_POLARITY & FV_POLARITY */ 2143 ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3, 2144 rxport->lv_fv_pol); 2145 2146 /* Enable all interrupt sources from this port */ 2147 ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07); 2148 ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f); 2149 2150 /* Enable I2C_PASS_THROUGH */ 2151 ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 2152 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, 2153 UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH); 2154 2155 /* Enable I2C communication to the serializer via the alias addr */ 2156 ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID, 2157 rxport->ser.alias << 1); 2158 2159 /* Enable RX port */ 2160 ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport)); 2161 2162 if (rxport->cdr_mode == RXPORT_CDR_FPD4) { 2163 /* unreset 960 AEQ */ 2164 ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0x41); 2165 } 2166 } 2167 2168 static int ub960_init_rx_ports(struct ub960_data *priv) 2169 { 2170 unsigned int nport; 2171 2172 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 2173 struct ub960_rxport *rxport = priv->rxports[nport]; 2174 2175 if (!rxport) 2176 continue; 2177 2178 if (priv->hw_data->is_ub9702) 2179 ub960_init_rx_port_ub9702(priv, rxport); 2180 else 2181 ub960_init_rx_port_ub960(priv, rxport); 2182 } 2183 2184 return 0; 2185 } 2186 2187 static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport) 2188 { 2189 struct device *dev = &priv->client->dev; 2190 u8 rx_port_sts1; 2191 u8 rx_port_sts2; 2192 u8 csi_rx_sts; 2193 u8 bcc_sts; 2194 int ret = 0; 2195 2196 /* Read interrupts (also clears most of them) */ 2197 if (!ret) 2198 ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, 2199 &rx_port_sts1); 2200 if (!ret) 2201 ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, 2202 &rx_port_sts2); 2203 if (!ret) 2204 ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, 2205 &csi_rx_sts); 2206 if (!ret) 2207 ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, 2208 &bcc_sts); 2209 2210 if (ret) 2211 return; 2212 2213 if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_PARITY_ERROR) { 2214 u16 v; 2215 2216 ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI, 2217 &v); 2218 if (!ret) 2219 dev_err(dev, "rx%u parity errors: %u\n", nport, v); 2220 } 2221 2222 if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR) 2223 dev_err(dev, "rx%u BCC CRC error\n", nport); 2224 2225 if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR) 2226 dev_err(dev, "rx%u BCC SEQ error\n", nport); 2227 2228 if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE) 2229 dev_err(dev, "rx%u line length unstable\n", nport); 2230 2231 if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR) 2232 dev_err(dev, "rx%u FPD3 encode error\n", nport); 2233 2234 if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_BUFFER_ERROR) 2235 dev_err(dev, "rx%u buffer error\n", nport); 2236 2237 if (csi_rx_sts) 2238 dev_err(dev, "rx%u CSI error: %#02x\n", nport, csi_rx_sts); 2239 2240 if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC1_ERR) 2241 dev_err(dev, "rx%u CSI ECC1 error\n", nport); 2242 2243 if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC2_ERR) 2244 dev_err(dev, "rx%u CSI ECC2 error\n", nport); 2245 2246 if (csi_rx_sts & UB960_RR_CSI_RX_STS_CKSUM_ERR) 2247 dev_err(dev, "rx%u CSI checksum error\n", nport); 2248 2249 if (csi_rx_sts & UB960_RR_CSI_RX_STS_LENGTH_ERR) 2250 dev_err(dev, "rx%u CSI length error\n", nport); 2251 2252 if (bcc_sts) 2253 dev_err(dev, "rx%u BCC error: %#02x\n", nport, bcc_sts); 2254 2255 if (bcc_sts & UB960_RR_BCC_STATUS_RESP_ERR) 2256 dev_err(dev, "rx%u BCC response error", nport); 2257 2258 if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_TO) 2259 dev_err(dev, "rx%u BCC slave timeout", nport); 2260 2261 if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_ERR) 2262 dev_err(dev, "rx%u BCC slave error", nport); 2263 2264 if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_TO) 2265 dev_err(dev, "rx%u BCC master timeout", nport); 2266 2267 if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_ERR) 2268 dev_err(dev, "rx%u BCC master error", nport); 2269 2270 if (bcc_sts & UB960_RR_BCC_STATUS_SEQ_ERROR) 2271 dev_err(dev, "rx%u BCC sequence error", nport); 2272 2273 if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_CHG) { 2274 u16 v; 2275 2276 ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v); 2277 if (!ret) 2278 dev_dbg(dev, "rx%u line len changed: %u\n", nport, v); 2279 } 2280 2281 if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_CNT_CHG) { 2282 u16 v; 2283 2284 ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI, 2285 &v); 2286 if (!ret) 2287 dev_dbg(dev, "rx%u line count changed: %u\n", nport, v); 2288 } 2289 2290 if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS_CHG) { 2291 dev_dbg(dev, "rx%u: %s, %s, %s, %s\n", nport, 2292 (rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS) ? 2293 "locked" : 2294 "unlocked", 2295 (rx_port_sts1 & UB960_RR_RX_PORT_STS1_PORT_PASS) ? 2296 "passed" : 2297 "not passed", 2298 (rx_port_sts2 & UB960_RR_RX_PORT_STS2_CABLE_FAULT) ? 2299 "no clock" : 2300 "clock ok", 2301 (rx_port_sts2 & UB960_RR_RX_PORT_STS2_FREQ_STABLE) ? 2302 "stable freq" : 2303 "unstable freq"); 2304 } 2305 } 2306 2307 /* ----------------------------------------------------------------------------- 2308 * V4L2 2309 */ 2310 2311 /* 2312 * The current implementation only supports a simple VC mapping, where all VCs 2313 * from a one RX port will be mapped to the same VC. Also, the hardware 2314 * dictates that all streams from an RX port must go to a single TX port. 2315 * 2316 * This function decides the target VC numbers for each RX port with a simple 2317 * algorithm, so that for each TX port, we get VC numbers starting from 0, 2318 * and counting up. 2319 * 2320 * E.g. if all four RX ports are in use, of which the first two go to the 2321 * first TX port and the secont two go to the second TX port, we would get 2322 * the following VCs for the four RX ports: 0, 1, 0, 1. 2323 * 2324 * TODO: implement a more sophisticated VC mapping. As the driver cannot know 2325 * what VCs the sinks expect (say, an FPGA with hardcoded VC routing), this 2326 * probably needs to be somehow configurable. Device tree? 2327 */ 2328 static void ub960_get_vc_maps(struct ub960_data *priv, 2329 struct v4l2_subdev_state *state, u8 *vc) 2330 { 2331 u8 cur_vc[UB960_MAX_TX_NPORTS] = {}; 2332 struct v4l2_subdev_route *route; 2333 u8 handled_mask = 0; 2334 2335 for_each_active_route(&state->routing, route) { 2336 unsigned int rx, tx; 2337 2338 rx = ub960_pad_to_port(priv, route->sink_pad); 2339 if (BIT(rx) & handled_mask) 2340 continue; 2341 2342 tx = ub960_pad_to_port(priv, route->source_pad); 2343 2344 vc[rx] = cur_vc[tx]++; 2345 handled_mask |= BIT(rx); 2346 } 2347 } 2348 2349 static int ub960_enable_tx_port(struct ub960_data *priv, unsigned int nport) 2350 { 2351 struct device *dev = &priv->client->dev; 2352 2353 dev_dbg(dev, "enable TX port %u\n", nport); 2354 2355 return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL, 2356 UB960_TR_CSI_CTL_CSI_ENABLE, 2357 UB960_TR_CSI_CTL_CSI_ENABLE); 2358 } 2359 2360 static void ub960_disable_tx_port(struct ub960_data *priv, unsigned int nport) 2361 { 2362 struct device *dev = &priv->client->dev; 2363 2364 dev_dbg(dev, "disable TX port %u\n", nport); 2365 2366 ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL, 2367 UB960_TR_CSI_CTL_CSI_ENABLE, 0); 2368 } 2369 2370 static int ub960_enable_rx_port(struct ub960_data *priv, unsigned int nport) 2371 { 2372 struct device *dev = &priv->client->dev; 2373 2374 dev_dbg(dev, "enable RX port %u\n", nport); 2375 2376 /* Enable forwarding */ 2377 return ub960_update_bits(priv, UB960_SR_FWD_CTL1, 2378 UB960_SR_FWD_CTL1_PORT_DIS(nport), 0); 2379 } 2380 2381 static void ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport) 2382 { 2383 struct device *dev = &priv->client->dev; 2384 2385 dev_dbg(dev, "disable RX port %u\n", nport); 2386 2387 /* Disable forwarding */ 2388 ub960_update_bits(priv, UB960_SR_FWD_CTL1, 2389 UB960_SR_FWD_CTL1_PORT_DIS(nport), 2390 UB960_SR_FWD_CTL1_PORT_DIS(nport)); 2391 } 2392 2393 /* 2394 * The driver only supports using a single VC for each source. This function 2395 * checks that each source only provides streams using a single VC. 2396 */ 2397 static int ub960_validate_stream_vcs(struct ub960_data *priv) 2398 { 2399 unsigned int nport; 2400 unsigned int i; 2401 2402 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 2403 struct ub960_rxport *rxport = priv->rxports[nport]; 2404 struct v4l2_mbus_frame_desc desc; 2405 int ret; 2406 u8 vc; 2407 2408 if (!rxport) 2409 continue; 2410 2411 ret = v4l2_subdev_call(rxport->source.sd, pad, get_frame_desc, 2412 rxport->source.pad, &desc); 2413 if (ret) 2414 return ret; 2415 2416 if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) 2417 continue; 2418 2419 if (desc.num_entries == 0) 2420 continue; 2421 2422 vc = desc.entry[0].bus.csi2.vc; 2423 2424 for (i = 1; i < desc.num_entries; i++) { 2425 if (vc == desc.entry[i].bus.csi2.vc) 2426 continue; 2427 2428 dev_err(&priv->client->dev, 2429 "rx%u: source with multiple virtual-channels is not supported\n", 2430 nport); 2431 return -ENODEV; 2432 } 2433 } 2434 2435 return 0; 2436 } 2437 2438 static int ub960_configure_ports_for_streaming(struct ub960_data *priv, 2439 struct v4l2_subdev_state *state) 2440 { 2441 u8 fwd_ctl; 2442 struct { 2443 u32 num_streams; 2444 u8 pixel_dt; 2445 u8 meta_dt; 2446 u32 meta_lines; 2447 u32 tx_port; 2448 } rx_data[UB960_MAX_RX_NPORTS] = {}; 2449 u8 vc_map[UB960_MAX_RX_NPORTS] = {}; 2450 struct v4l2_subdev_route *route; 2451 int ret; 2452 2453 ret = ub960_validate_stream_vcs(priv); 2454 if (ret) 2455 return ret; 2456 2457 ub960_get_vc_maps(priv, state, vc_map); 2458 2459 for_each_active_route(&state->routing, route) { 2460 struct ub960_rxport *rxport; 2461 struct ub960_txport *txport; 2462 struct v4l2_mbus_framefmt *fmt; 2463 const struct ub960_format_info *ub960_fmt; 2464 unsigned int nport; 2465 2466 nport = ub960_pad_to_port(priv, route->sink_pad); 2467 2468 rxport = priv->rxports[nport]; 2469 if (!rxport) 2470 return -EINVAL; 2471 2472 txport = priv->txports[ub960_pad_to_port(priv, route->source_pad)]; 2473 if (!txport) 2474 return -EINVAL; 2475 2476 rx_data[nport].tx_port = ub960_pad_to_port(priv, route->source_pad); 2477 2478 rx_data[nport].num_streams++; 2479 2480 /* For the rest, we are only interested in parallel busses */ 2481 if (rxport->rx_mode == RXPORT_MODE_CSI2_SYNC || 2482 rxport->rx_mode == RXPORT_MODE_CSI2_NONSYNC) 2483 continue; 2484 2485 if (rx_data[nport].num_streams > 2) 2486 return -EPIPE; 2487 2488 fmt = v4l2_subdev_state_get_format(state, route->sink_pad, 2489 route->sink_stream); 2490 if (!fmt) 2491 return -EPIPE; 2492 2493 ub960_fmt = ub960_find_format(fmt->code); 2494 if (!ub960_fmt) 2495 return -EPIPE; 2496 2497 if (ub960_fmt->meta) { 2498 if (fmt->height > 3) { 2499 dev_err(&priv->client->dev, 2500 "rx%u: unsupported metadata height %u\n", 2501 nport, fmt->height); 2502 return -EPIPE; 2503 } 2504 2505 rx_data[nport].meta_dt = ub960_fmt->datatype; 2506 rx_data[nport].meta_lines = fmt->height; 2507 } else { 2508 rx_data[nport].pixel_dt = ub960_fmt->datatype; 2509 } 2510 } 2511 2512 /* Configure RX ports */ 2513 2514 /* 2515 * Keep all port forwardings disabled by default. Forwarding will be 2516 * enabled in ub960_enable_rx_port. 2517 */ 2518 fwd_ctl = GENMASK(7, 4); 2519 2520 for (unsigned int nport = 0; nport < priv->hw_data->num_rxports; 2521 nport++) { 2522 struct ub960_rxport *rxport = priv->rxports[nport]; 2523 u8 vc = vc_map[nport]; 2524 2525 if (rx_data[nport].num_streams == 0) 2526 continue; 2527 2528 switch (rxport->rx_mode) { 2529 case RXPORT_MODE_RAW10: 2530 ub960_rxport_write(priv, nport, UB960_RR_RAW10_ID, 2531 rx_data[nport].pixel_dt | (vc << UB960_RR_RAW10_ID_VC_SHIFT)); 2532 2533 ub960_rxport_write(priv, rxport->nport, 2534 UB960_RR_RAW_EMBED_DTYPE, 2535 (rx_data[nport].meta_lines << UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT) | 2536 rx_data[nport].meta_dt); 2537 2538 break; 2539 2540 case RXPORT_MODE_RAW12_HF: 2541 case RXPORT_MODE_RAW12_LF: 2542 /* Not implemented */ 2543 break; 2544 2545 case RXPORT_MODE_CSI2_SYNC: 2546 case RXPORT_MODE_CSI2_NONSYNC: 2547 if (!priv->hw_data->is_ub9702) { 2548 /* Map all VCs from this port to the same VC */ 2549 ub960_rxport_write(priv, nport, UB960_RR_CSI_VC_MAP, 2550 (vc << UB960_RR_CSI_VC_MAP_SHIFT(3)) | 2551 (vc << UB960_RR_CSI_VC_MAP_SHIFT(2)) | 2552 (vc << UB960_RR_CSI_VC_MAP_SHIFT(1)) | 2553 (vc << UB960_RR_CSI_VC_MAP_SHIFT(0))); 2554 } else { 2555 unsigned int i; 2556 2557 /* Map all VCs from this port to VC(nport) */ 2558 for (i = 0; i < 8; i++) 2559 ub960_rxport_write(priv, nport, 2560 UB960_RR_VC_ID_MAP(i), 2561 (nport << 4) | nport); 2562 } 2563 2564 break; 2565 } 2566 2567 if (rx_data[nport].tx_port == 1) 2568 fwd_ctl |= BIT(nport); /* forward to TX1 */ 2569 else 2570 fwd_ctl &= ~BIT(nport); /* forward to TX0 */ 2571 } 2572 2573 ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl); 2574 2575 return 0; 2576 } 2577 2578 static void ub960_update_streaming_status(struct ub960_data *priv) 2579 { 2580 unsigned int i; 2581 2582 for (i = 0; i < UB960_MAX_NPORTS; i++) { 2583 if (priv->stream_enable_mask[i]) 2584 break; 2585 } 2586 2587 priv->streaming = i < UB960_MAX_NPORTS; 2588 } 2589 2590 static int ub960_enable_streams(struct v4l2_subdev *sd, 2591 struct v4l2_subdev_state *state, u32 source_pad, 2592 u64 source_streams_mask) 2593 { 2594 struct ub960_data *priv = sd_to_ub960(sd); 2595 struct device *dev = &priv->client->dev; 2596 u64 sink_streams[UB960_MAX_RX_NPORTS] = {}; 2597 struct v4l2_subdev_route *route; 2598 unsigned int failed_port; 2599 unsigned int nport; 2600 int ret; 2601 2602 if (!priv->streaming) { 2603 dev_dbg(dev, "Prepare for streaming\n"); 2604 ret = ub960_configure_ports_for_streaming(priv, state); 2605 if (ret) 2606 return ret; 2607 } 2608 2609 /* Enable TX port if not yet enabled */ 2610 if (!priv->stream_enable_mask[source_pad]) { 2611 ret = ub960_enable_tx_port(priv, 2612 ub960_pad_to_port(priv, source_pad)); 2613 if (ret) 2614 return ret; 2615 } 2616 2617 priv->stream_enable_mask[source_pad] |= source_streams_mask; 2618 2619 /* Collect sink streams per pad which we need to enable */ 2620 for_each_active_route(&state->routing, route) { 2621 if (route->source_pad != source_pad) 2622 continue; 2623 2624 if (!(source_streams_mask & BIT_ULL(route->source_stream))) 2625 continue; 2626 2627 nport = ub960_pad_to_port(priv, route->sink_pad); 2628 2629 sink_streams[nport] |= BIT_ULL(route->sink_stream); 2630 } 2631 2632 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 2633 if (!sink_streams[nport]) 2634 continue; 2635 2636 /* Enable the RX port if not yet enabled */ 2637 if (!priv->stream_enable_mask[nport]) { 2638 ret = ub960_enable_rx_port(priv, nport); 2639 if (ret) { 2640 failed_port = nport; 2641 goto err; 2642 } 2643 } 2644 2645 priv->stream_enable_mask[nport] |= sink_streams[nport]; 2646 2647 dev_dbg(dev, "enable RX port %u streams %#llx\n", nport, 2648 sink_streams[nport]); 2649 2650 ret = v4l2_subdev_enable_streams( 2651 priv->rxports[nport]->source.sd, 2652 priv->rxports[nport]->source.pad, 2653 sink_streams[nport]); 2654 if (ret) { 2655 priv->stream_enable_mask[nport] &= ~sink_streams[nport]; 2656 2657 if (!priv->stream_enable_mask[nport]) 2658 ub960_disable_rx_port(priv, nport); 2659 2660 failed_port = nport; 2661 goto err; 2662 } 2663 } 2664 2665 priv->streaming = true; 2666 2667 return 0; 2668 2669 err: 2670 for (nport = 0; nport < failed_port; nport++) { 2671 if (!sink_streams[nport]) 2672 continue; 2673 2674 dev_dbg(dev, "disable RX port %u streams %#llx\n", nport, 2675 sink_streams[nport]); 2676 2677 ret = v4l2_subdev_disable_streams( 2678 priv->rxports[nport]->source.sd, 2679 priv->rxports[nport]->source.pad, 2680 sink_streams[nport]); 2681 if (ret) 2682 dev_err(dev, "Failed to disable streams: %d\n", ret); 2683 2684 priv->stream_enable_mask[nport] &= ~sink_streams[nport]; 2685 2686 /* Disable RX port if no active streams */ 2687 if (!priv->stream_enable_mask[nport]) 2688 ub960_disable_rx_port(priv, nport); 2689 } 2690 2691 priv->stream_enable_mask[source_pad] &= ~source_streams_mask; 2692 2693 if (!priv->stream_enable_mask[source_pad]) 2694 ub960_disable_tx_port(priv, 2695 ub960_pad_to_port(priv, source_pad)); 2696 2697 ub960_update_streaming_status(priv); 2698 2699 return ret; 2700 } 2701 2702 static int ub960_disable_streams(struct v4l2_subdev *sd, 2703 struct v4l2_subdev_state *state, 2704 u32 source_pad, u64 source_streams_mask) 2705 { 2706 struct ub960_data *priv = sd_to_ub960(sd); 2707 struct device *dev = &priv->client->dev; 2708 u64 sink_streams[UB960_MAX_RX_NPORTS] = {}; 2709 struct v4l2_subdev_route *route; 2710 unsigned int nport; 2711 int ret; 2712 2713 /* Collect sink streams per pad which we need to disable */ 2714 for_each_active_route(&state->routing, route) { 2715 if (route->source_pad != source_pad) 2716 continue; 2717 2718 if (!(source_streams_mask & BIT_ULL(route->source_stream))) 2719 continue; 2720 2721 nport = ub960_pad_to_port(priv, route->sink_pad); 2722 2723 sink_streams[nport] |= BIT_ULL(route->sink_stream); 2724 } 2725 2726 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 2727 if (!sink_streams[nport]) 2728 continue; 2729 2730 dev_dbg(dev, "disable RX port %u streams %#llx\n", nport, 2731 sink_streams[nport]); 2732 2733 ret = v4l2_subdev_disable_streams( 2734 priv->rxports[nport]->source.sd, 2735 priv->rxports[nport]->source.pad, 2736 sink_streams[nport]); 2737 if (ret) 2738 dev_err(dev, "Failed to disable streams: %d\n", ret); 2739 2740 priv->stream_enable_mask[nport] &= ~sink_streams[nport]; 2741 2742 /* Disable RX port if no active streams */ 2743 if (!priv->stream_enable_mask[nport]) 2744 ub960_disable_rx_port(priv, nport); 2745 } 2746 2747 /* Disable TX port if no active streams */ 2748 2749 priv->stream_enable_mask[source_pad] &= ~source_streams_mask; 2750 2751 if (!priv->stream_enable_mask[source_pad]) 2752 ub960_disable_tx_port(priv, 2753 ub960_pad_to_port(priv, source_pad)); 2754 2755 ub960_update_streaming_status(priv); 2756 2757 return 0; 2758 } 2759 2760 static int _ub960_set_routing(struct v4l2_subdev *sd, 2761 struct v4l2_subdev_state *state, 2762 struct v4l2_subdev_krouting *routing) 2763 { 2764 static const struct v4l2_mbus_framefmt format = { 2765 .width = 640, 2766 .height = 480, 2767 .code = MEDIA_BUS_FMT_UYVY8_1X16, 2768 .field = V4L2_FIELD_NONE, 2769 .colorspace = V4L2_COLORSPACE_SRGB, 2770 .ycbcr_enc = V4L2_YCBCR_ENC_601, 2771 .quantization = V4L2_QUANTIZATION_LIM_RANGE, 2772 .xfer_func = V4L2_XFER_FUNC_SRGB, 2773 }; 2774 int ret; 2775 2776 /* 2777 * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until 2778 * frame desc is made dynamically allocated. 2779 */ 2780 2781 if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX) 2782 return -E2BIG; 2783 2784 ret = v4l2_subdev_routing_validate(sd, routing, 2785 V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 | 2786 V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX); 2787 if (ret) 2788 return ret; 2789 2790 ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format); 2791 if (ret) 2792 return ret; 2793 2794 return 0; 2795 } 2796 2797 static int ub960_set_routing(struct v4l2_subdev *sd, 2798 struct v4l2_subdev_state *state, 2799 enum v4l2_subdev_format_whence which, 2800 struct v4l2_subdev_krouting *routing) 2801 { 2802 struct ub960_data *priv = sd_to_ub960(sd); 2803 2804 if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming) 2805 return -EBUSY; 2806 2807 return _ub960_set_routing(sd, state, routing); 2808 } 2809 2810 static int ub960_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, 2811 struct v4l2_mbus_frame_desc *fd) 2812 { 2813 struct ub960_data *priv = sd_to_ub960(sd); 2814 struct v4l2_subdev_route *route; 2815 struct v4l2_subdev_state *state; 2816 int ret = 0; 2817 struct device *dev = &priv->client->dev; 2818 u8 vc_map[UB960_MAX_RX_NPORTS] = {}; 2819 2820 if (!ub960_pad_is_source(priv, pad)) 2821 return -EINVAL; 2822 2823 fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; 2824 2825 state = v4l2_subdev_lock_and_get_active_state(&priv->sd); 2826 2827 ub960_get_vc_maps(priv, state, vc_map); 2828 2829 for_each_active_route(&state->routing, route) { 2830 struct v4l2_mbus_frame_desc_entry *source_entry = NULL; 2831 struct v4l2_mbus_frame_desc source_fd; 2832 unsigned int nport; 2833 unsigned int i; 2834 2835 if (route->source_pad != pad) 2836 continue; 2837 2838 nport = ub960_pad_to_port(priv, route->sink_pad); 2839 2840 ret = v4l2_subdev_call(priv->rxports[nport]->source.sd, pad, 2841 get_frame_desc, 2842 priv->rxports[nport]->source.pad, 2843 &source_fd); 2844 if (ret) { 2845 dev_err(dev, 2846 "Failed to get source frame desc for pad %u\n", 2847 route->sink_pad); 2848 goto out_unlock; 2849 } 2850 2851 for (i = 0; i < source_fd.num_entries; i++) { 2852 if (source_fd.entry[i].stream == route->sink_stream) { 2853 source_entry = &source_fd.entry[i]; 2854 break; 2855 } 2856 } 2857 2858 if (!source_entry) { 2859 dev_err(dev, 2860 "Failed to find stream from source frame desc\n"); 2861 ret = -EPIPE; 2862 goto out_unlock; 2863 } 2864 2865 fd->entry[fd->num_entries].stream = route->source_stream; 2866 fd->entry[fd->num_entries].flags = source_entry->flags; 2867 fd->entry[fd->num_entries].length = source_entry->length; 2868 fd->entry[fd->num_entries].pixelcode = source_entry->pixelcode; 2869 2870 fd->entry[fd->num_entries].bus.csi2.vc = vc_map[nport]; 2871 2872 if (source_fd.type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { 2873 fd->entry[fd->num_entries].bus.csi2.dt = 2874 source_entry->bus.csi2.dt; 2875 } else { 2876 const struct ub960_format_info *ub960_fmt; 2877 struct v4l2_mbus_framefmt *fmt; 2878 2879 fmt = v4l2_subdev_state_get_format(state, pad, 2880 route->source_stream); 2881 2882 if (!fmt) { 2883 ret = -EINVAL; 2884 goto out_unlock; 2885 } 2886 2887 ub960_fmt = ub960_find_format(fmt->code); 2888 if (!ub960_fmt) { 2889 dev_err(dev, "Unable to find format\n"); 2890 ret = -EINVAL; 2891 goto out_unlock; 2892 } 2893 2894 fd->entry[fd->num_entries].bus.csi2.dt = 2895 ub960_fmt->datatype; 2896 } 2897 2898 fd->num_entries++; 2899 } 2900 2901 out_unlock: 2902 v4l2_subdev_unlock_state(state); 2903 2904 return ret; 2905 } 2906 2907 static int ub960_set_fmt(struct v4l2_subdev *sd, 2908 struct v4l2_subdev_state *state, 2909 struct v4l2_subdev_format *format) 2910 { 2911 struct ub960_data *priv = sd_to_ub960(sd); 2912 struct v4l2_mbus_framefmt *fmt; 2913 2914 if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming) 2915 return -EBUSY; 2916 2917 /* No transcoding, source and sink formats must match. */ 2918 if (ub960_pad_is_source(priv, format->pad)) 2919 return v4l2_subdev_get_fmt(sd, state, format); 2920 2921 /* 2922 * Default to the first format if the requested media bus code isn't 2923 * supported. 2924 */ 2925 if (!ub960_find_format(format->format.code)) 2926 format->format.code = ub960_formats[0].code; 2927 2928 fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); 2929 if (!fmt) 2930 return -EINVAL; 2931 2932 *fmt = format->format; 2933 2934 fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad, 2935 format->stream); 2936 if (!fmt) 2937 return -EINVAL; 2938 2939 *fmt = format->format; 2940 2941 return 0; 2942 } 2943 2944 static int ub960_init_state(struct v4l2_subdev *sd, 2945 struct v4l2_subdev_state *state) 2946 { 2947 struct ub960_data *priv = sd_to_ub960(sd); 2948 2949 struct v4l2_subdev_route routes[] = { 2950 { 2951 .sink_pad = 0, 2952 .sink_stream = 0, 2953 .source_pad = priv->hw_data->num_rxports, 2954 .source_stream = 0, 2955 .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, 2956 }, 2957 }; 2958 2959 struct v4l2_subdev_krouting routing = { 2960 .num_routes = ARRAY_SIZE(routes), 2961 .routes = routes, 2962 }; 2963 2964 return _ub960_set_routing(sd, state, &routing); 2965 } 2966 2967 static const struct v4l2_subdev_pad_ops ub960_pad_ops = { 2968 .enable_streams = ub960_enable_streams, 2969 .disable_streams = ub960_disable_streams, 2970 2971 .set_routing = ub960_set_routing, 2972 .get_frame_desc = ub960_get_frame_desc, 2973 2974 .get_fmt = v4l2_subdev_get_fmt, 2975 .set_fmt = ub960_set_fmt, 2976 }; 2977 2978 static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv, 2979 unsigned int nport) 2980 { 2981 struct device *dev = &priv->client->dev; 2982 u8 eq_level; 2983 s8 strobe_pos; 2984 int ret; 2985 u8 v; 2986 2987 /* Strobe */ 2988 2989 ret = ub960_read(priv, UB960_XR_AEQ_CTL1, &v); 2990 if (ret) 2991 return; 2992 2993 dev_info(dev, "\t%s strobe\n", 2994 (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" : 2995 "Manual"); 2996 2997 if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) { 2998 ret = ub960_read(priv, UB960_XR_SFILTER_CFG, &v); 2999 if (ret) 3000 return; 3001 3002 dev_info(dev, "\tStrobe range [%d, %d]\n", 3003 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7, 3004 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7); 3005 } 3006 3007 ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos); 3008 if (ret) 3009 return; 3010 3011 dev_info(dev, "\tStrobe pos %d\n", strobe_pos); 3012 3013 /* EQ */ 3014 3015 ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v); 3016 if (ret) 3017 return; 3018 3019 dev_info(dev, "\t%s EQ\n", 3020 (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" : 3021 "Adaptive"); 3022 3023 if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) { 3024 ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v); 3025 if (ret) 3026 return; 3027 3028 dev_info(dev, "\tEQ range [%u, %u]\n", 3029 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf, 3030 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf); 3031 } 3032 3033 if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0) 3034 dev_info(dev, "\tEQ level %u\n", eq_level); 3035 } 3036 3037 static int ub960_log_status(struct v4l2_subdev *sd) 3038 { 3039 struct ub960_data *priv = sd_to_ub960(sd); 3040 struct device *dev = &priv->client->dev; 3041 struct v4l2_subdev_state *state; 3042 unsigned int nport; 3043 u16 v16 = 0; 3044 u8 v = 0; 3045 u8 id[UB960_SR_FPD3_RX_ID_LEN]; 3046 3047 state = v4l2_subdev_lock_and_get_active_state(sd); 3048 3049 for (unsigned int i = 0; i < sizeof(id); i++) 3050 ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i]); 3051 3052 dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id); 3053 3054 for (nport = 0; nport < priv->hw_data->num_txports; nport++) { 3055 struct ub960_txport *txport = priv->txports[nport]; 3056 3057 dev_info(dev, "TX %u\n", nport); 3058 3059 if (!txport) { 3060 dev_info(dev, "\tNot initialized\n"); 3061 continue; 3062 } 3063 3064 ub960_txport_read(priv, nport, UB960_TR_CSI_STS, &v); 3065 dev_info(dev, "\tsync %u, pass %u\n", v & (u8)BIT(1), 3066 v & (u8)BIT(0)); 3067 3068 ub960_read16(priv, UB960_SR_CSI_FRAME_COUNT_HI(nport), &v16); 3069 dev_info(dev, "\tframe counter %u\n", v16); 3070 3071 ub960_read16(priv, UB960_SR_CSI_FRAME_ERR_COUNT_HI(nport), &v16); 3072 dev_info(dev, "\tframe error counter %u\n", v16); 3073 3074 ub960_read16(priv, UB960_SR_CSI_LINE_COUNT_HI(nport), &v16); 3075 dev_info(dev, "\tline counter %u\n", v16); 3076 3077 ub960_read16(priv, UB960_SR_CSI_LINE_ERR_COUNT_HI(nport), &v16); 3078 dev_info(dev, "\tline error counter %u\n", v16); 3079 } 3080 3081 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 3082 struct ub960_rxport *rxport = priv->rxports[nport]; 3083 3084 dev_info(dev, "RX %u\n", nport); 3085 3086 if (!rxport) { 3087 dev_info(dev, "\tNot initialized\n"); 3088 continue; 3089 } 3090 3091 ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v); 3092 3093 if (v & UB960_RR_RX_PORT_STS1_LOCK_STS) 3094 dev_info(dev, "\tLocked\n"); 3095 else 3096 dev_info(dev, "\tNot locked\n"); 3097 3098 dev_info(dev, "\trx_port_sts1 %#02x\n", v); 3099 ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v); 3100 dev_info(dev, "\trx_port_sts2 %#02x\n", v); 3101 3102 ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v16); 3103 dev_info(dev, "\tlink freq %llu Hz\n", ((u64)v16 * HZ_PER_MHZ) >> 8); 3104 3105 ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v16); 3106 dev_info(dev, "\tparity errors %u\n", v16); 3107 3108 ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI, &v16); 3109 dev_info(dev, "\tlines per frame %u\n", v16); 3110 3111 ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v16); 3112 dev_info(dev, "\tbytes per line %u\n", v16); 3113 3114 ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v); 3115 dev_info(dev, "\tcsi_err_counter %u\n", v); 3116 3117 if (!priv->hw_data->is_ub9702) 3118 ub960_log_status_ub960_sp_eq(priv, nport); 3119 3120 /* GPIOs */ 3121 for (unsigned int i = 0; i < UB960_NUM_BC_GPIOS; i++) { 3122 u8 ctl_reg; 3123 u8 ctl_shift; 3124 3125 ctl_reg = UB960_RR_BC_GPIO_CTL(i / 2); 3126 ctl_shift = (i % 2) * 4; 3127 3128 ub960_rxport_read(priv, nport, ctl_reg, &v); 3129 3130 dev_info(dev, "\tGPIO%u: mode %u\n", i, 3131 (v >> ctl_shift) & 0xf); 3132 } 3133 } 3134 3135 v4l2_subdev_unlock_state(state); 3136 3137 return 0; 3138 } 3139 3140 static const struct v4l2_subdev_core_ops ub960_subdev_core_ops = { 3141 .log_status = ub960_log_status, 3142 }; 3143 3144 static const struct v4l2_subdev_internal_ops ub960_internal_ops = { 3145 .init_state = ub960_init_state, 3146 }; 3147 3148 static const struct v4l2_subdev_ops ub960_subdev_ops = { 3149 .core = &ub960_subdev_core_ops, 3150 .pad = &ub960_pad_ops, 3151 }; 3152 3153 static const struct media_entity_operations ub960_entity_ops = { 3154 .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1, 3155 .link_validate = v4l2_subdev_link_validate, 3156 .has_pad_interdep = v4l2_subdev_has_pad_interdep, 3157 }; 3158 3159 /* ----------------------------------------------------------------------------- 3160 * Core 3161 */ 3162 3163 static irqreturn_t ub960_handle_events(int irq, void *arg) 3164 { 3165 struct ub960_data *priv = arg; 3166 unsigned int i; 3167 u8 int_sts; 3168 u8 fwd_sts; 3169 int ret; 3170 3171 ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts); 3172 if (ret || !int_sts) 3173 return IRQ_NONE; 3174 3175 dev_dbg(&priv->client->dev, "INTERRUPT_STS %x\n", int_sts); 3176 3177 ret = ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts); 3178 if (ret) 3179 return IRQ_NONE; 3180 3181 dev_dbg(&priv->client->dev, "FWD_STS %#02x\n", fwd_sts); 3182 3183 for (i = 0; i < priv->hw_data->num_txports; i++) { 3184 if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i)) 3185 ub960_csi_handle_events(priv, i); 3186 } 3187 3188 for (i = 0; i < priv->hw_data->num_rxports; i++) { 3189 if (!priv->rxports[i]) 3190 continue; 3191 3192 if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(i)) 3193 ub960_rxport_handle_events(priv, i); 3194 } 3195 3196 return IRQ_HANDLED; 3197 } 3198 3199 static void ub960_handler_work(struct work_struct *work) 3200 { 3201 struct delayed_work *dwork = to_delayed_work(work); 3202 struct ub960_data *priv = 3203 container_of(dwork, struct ub960_data, poll_work); 3204 3205 ub960_handle_events(0, priv); 3206 3207 schedule_delayed_work(&priv->poll_work, 3208 msecs_to_jiffies(UB960_POLL_TIME_MS)); 3209 } 3210 3211 static void ub960_txport_free_ports(struct ub960_data *priv) 3212 { 3213 unsigned int nport; 3214 3215 for (nport = 0; nport < priv->hw_data->num_txports; nport++) { 3216 struct ub960_txport *txport = priv->txports[nport]; 3217 3218 if (!txport) 3219 continue; 3220 3221 kfree(txport); 3222 priv->txports[nport] = NULL; 3223 } 3224 } 3225 3226 static void ub960_rxport_free_ports(struct ub960_data *priv) 3227 { 3228 unsigned int nport; 3229 3230 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 3231 struct ub960_rxport *rxport = priv->rxports[nport]; 3232 3233 if (!rxport) 3234 continue; 3235 3236 fwnode_handle_put(rxport->source.ep_fwnode); 3237 fwnode_handle_put(rxport->ser.fwnode); 3238 3239 kfree(rxport); 3240 priv->rxports[nport] = NULL; 3241 } 3242 } 3243 3244 static int 3245 ub960_parse_dt_rxport_link_properties(struct ub960_data *priv, 3246 struct fwnode_handle *link_fwnode, 3247 struct ub960_rxport *rxport) 3248 { 3249 struct device *dev = &priv->client->dev; 3250 unsigned int nport = rxport->nport; 3251 u32 rx_mode; 3252 u32 cdr_mode; 3253 s32 strobe_pos; 3254 u32 eq_level; 3255 u32 ser_i2c_alias; 3256 int ret; 3257 3258 cdr_mode = RXPORT_CDR_FPD3; 3259 3260 ret = fwnode_property_read_u32(link_fwnode, "ti,cdr-mode", &cdr_mode); 3261 if (ret < 0 && ret != -EINVAL) { 3262 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, 3263 "ti,cdr-mode", ret); 3264 return ret; 3265 } 3266 3267 if (cdr_mode > RXPORT_CDR_LAST) { 3268 dev_err(dev, "rx%u: bad 'ti,cdr-mode' %u\n", nport, cdr_mode); 3269 return -EINVAL; 3270 } 3271 3272 if (!priv->hw_data->is_fpdlink4 && cdr_mode == RXPORT_CDR_FPD4) { 3273 dev_err(dev, "rx%u: FPD-Link 4 CDR not supported\n", nport); 3274 return -EINVAL; 3275 } 3276 3277 rxport->cdr_mode = cdr_mode; 3278 3279 ret = fwnode_property_read_u32(link_fwnode, "ti,rx-mode", &rx_mode); 3280 if (ret < 0) { 3281 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, 3282 "ti,rx-mode", ret); 3283 return ret; 3284 } 3285 3286 if (rx_mode > RXPORT_MODE_LAST) { 3287 dev_err(dev, "rx%u: bad 'ti,rx-mode' %u\n", nport, rx_mode); 3288 return -EINVAL; 3289 } 3290 3291 switch (rx_mode) { 3292 case RXPORT_MODE_RAW12_HF: 3293 case RXPORT_MODE_RAW12_LF: 3294 dev_err(dev, "rx%u: unsupported 'ti,rx-mode' %u\n", nport, 3295 rx_mode); 3296 return -EINVAL; 3297 default: 3298 break; 3299 } 3300 3301 rxport->rx_mode = rx_mode; 3302 3303 /* EQ & Strobe related */ 3304 3305 /* Defaults */ 3306 rxport->eq.manual_eq = false; 3307 rxport->eq.aeq.eq_level_min = UB960_MIN_EQ_LEVEL; 3308 rxport->eq.aeq.eq_level_max = UB960_MAX_EQ_LEVEL; 3309 3310 ret = fwnode_property_read_u32(link_fwnode, "ti,strobe-pos", 3311 &strobe_pos); 3312 if (ret) { 3313 if (ret != -EINVAL) { 3314 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, 3315 "ti,strobe-pos", ret); 3316 return ret; 3317 } 3318 } else { 3319 if (strobe_pos < UB960_MIN_MANUAL_STROBE_POS || 3320 strobe_pos > UB960_MAX_MANUAL_STROBE_POS) { 3321 dev_err(dev, "rx%u: illegal 'strobe-pos' value: %d\n", 3322 nport, strobe_pos); 3323 return -EINVAL; 3324 } 3325 3326 /* NOTE: ignored unless global manual strobe pos is also set */ 3327 rxport->eq.strobe_pos = strobe_pos; 3328 if (!priv->strobe.manual) 3329 dev_warn(dev, 3330 "rx%u: 'ti,strobe-pos' ignored as 'ti,manual-strobe' not set\n", 3331 nport); 3332 } 3333 3334 ret = fwnode_property_read_u32(link_fwnode, "ti,eq-level", &eq_level); 3335 if (ret) { 3336 if (ret != -EINVAL) { 3337 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, 3338 "ti,eq-level", ret); 3339 return ret; 3340 } 3341 } else { 3342 if (eq_level > UB960_MAX_EQ_LEVEL) { 3343 dev_err(dev, "rx%u: illegal 'ti,eq-level' value: %d\n", 3344 nport, eq_level); 3345 return -EINVAL; 3346 } 3347 3348 rxport->eq.manual_eq = true; 3349 rxport->eq.manual.eq_level = eq_level; 3350 } 3351 3352 ret = fwnode_property_read_u32(link_fwnode, "i2c-alias", 3353 &ser_i2c_alias); 3354 if (ret) { 3355 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, 3356 "i2c-alias", ret); 3357 return ret; 3358 } 3359 rxport->ser.alias = ser_i2c_alias; 3360 3361 rxport->ser.fwnode = fwnode_get_named_child_node(link_fwnode, "serializer"); 3362 if (!rxport->ser.fwnode) { 3363 dev_err(dev, "rx%u: missing 'serializer' node\n", nport); 3364 return -EINVAL; 3365 } 3366 3367 return 0; 3368 } 3369 3370 static int ub960_parse_dt_rxport_ep_properties(struct ub960_data *priv, 3371 struct fwnode_handle *ep_fwnode, 3372 struct ub960_rxport *rxport) 3373 { 3374 struct device *dev = &priv->client->dev; 3375 struct v4l2_fwnode_endpoint vep = {}; 3376 unsigned int nport = rxport->nport; 3377 bool hsync_hi; 3378 bool vsync_hi; 3379 int ret; 3380 3381 rxport->source.ep_fwnode = fwnode_graph_get_remote_endpoint(ep_fwnode); 3382 if (!rxport->source.ep_fwnode) { 3383 dev_err(dev, "rx%u: no remote endpoint\n", nport); 3384 return -ENODEV; 3385 } 3386 3387 /* We currently have properties only for RAW modes */ 3388 3389 switch (rxport->rx_mode) { 3390 case RXPORT_MODE_RAW10: 3391 case RXPORT_MODE_RAW12_HF: 3392 case RXPORT_MODE_RAW12_LF: 3393 break; 3394 default: 3395 return 0; 3396 } 3397 3398 vep.bus_type = V4L2_MBUS_PARALLEL; 3399 ret = v4l2_fwnode_endpoint_parse(ep_fwnode, &vep); 3400 if (ret) { 3401 dev_err(dev, "rx%u: failed to parse endpoint data\n", nport); 3402 goto err_put_source_ep_fwnode; 3403 } 3404 3405 hsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH); 3406 vsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH); 3407 3408 /* LineValid and FrameValid are inverse to the h/vsync active */ 3409 rxport->lv_fv_pol = (hsync_hi ? UB960_RR_PORT_CONFIG2_LV_POL_LOW : 0) | 3410 (vsync_hi ? UB960_RR_PORT_CONFIG2_FV_POL_LOW : 0); 3411 3412 return 0; 3413 3414 err_put_source_ep_fwnode: 3415 fwnode_handle_put(rxport->source.ep_fwnode); 3416 return ret; 3417 } 3418 3419 static int ub960_parse_dt_rxport(struct ub960_data *priv, unsigned int nport, 3420 struct fwnode_handle *link_fwnode, 3421 struct fwnode_handle *ep_fwnode) 3422 { 3423 static const char *vpoc_names[UB960_MAX_RX_NPORTS] = { 3424 "vpoc0", "vpoc1", "vpoc2", "vpoc3" 3425 }; 3426 struct device *dev = &priv->client->dev; 3427 struct ub960_rxport *rxport; 3428 int ret; 3429 3430 rxport = kzalloc(sizeof(*rxport), GFP_KERNEL); 3431 if (!rxport) 3432 return -ENOMEM; 3433 3434 priv->rxports[nport] = rxport; 3435 3436 rxport->nport = nport; 3437 rxport->priv = priv; 3438 3439 ret = ub960_parse_dt_rxport_link_properties(priv, link_fwnode, rxport); 3440 if (ret) 3441 goto err_free_rxport; 3442 3443 rxport->vpoc = devm_regulator_get_optional(dev, vpoc_names[nport]); 3444 if (IS_ERR(rxport->vpoc)) { 3445 ret = PTR_ERR(rxport->vpoc); 3446 if (ret == -ENODEV) { 3447 rxport->vpoc = NULL; 3448 } else { 3449 dev_err(dev, "rx%u: failed to get VPOC supply: %d\n", 3450 nport, ret); 3451 goto err_put_remote_fwnode; 3452 } 3453 } 3454 3455 ret = ub960_parse_dt_rxport_ep_properties(priv, ep_fwnode, rxport); 3456 if (ret) 3457 goto err_put_remote_fwnode; 3458 3459 return 0; 3460 3461 err_put_remote_fwnode: 3462 fwnode_handle_put(rxport->ser.fwnode); 3463 err_free_rxport: 3464 priv->rxports[nport] = NULL; 3465 kfree(rxport); 3466 return ret; 3467 } 3468 3469 static struct fwnode_handle * 3470 ub960_fwnode_get_link_by_regs(struct fwnode_handle *links_fwnode, 3471 unsigned int nport) 3472 { 3473 struct fwnode_handle *link_fwnode; 3474 int ret; 3475 3476 fwnode_for_each_child_node(links_fwnode, link_fwnode) { 3477 u32 link_num; 3478 3479 if (!str_has_prefix(fwnode_get_name(link_fwnode), "link@")) 3480 continue; 3481 3482 ret = fwnode_property_read_u32(link_fwnode, "reg", &link_num); 3483 if (ret) { 3484 fwnode_handle_put(link_fwnode); 3485 return NULL; 3486 } 3487 3488 if (nport == link_num) 3489 return link_fwnode; 3490 } 3491 3492 return NULL; 3493 } 3494 3495 static int ub960_parse_dt_rxports(struct ub960_data *priv) 3496 { 3497 struct device *dev = &priv->client->dev; 3498 struct fwnode_handle *links_fwnode; 3499 unsigned int nport; 3500 int ret; 3501 3502 links_fwnode = fwnode_get_named_child_node(dev_fwnode(dev), "links"); 3503 if (!links_fwnode) { 3504 dev_err(dev, "'links' node missing\n"); 3505 return -ENODEV; 3506 } 3507 3508 /* Defaults, recommended by TI */ 3509 priv->strobe.min = 2; 3510 priv->strobe.max = 3; 3511 3512 priv->strobe.manual = fwnode_property_read_bool(links_fwnode, "ti,manual-strobe"); 3513 3514 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 3515 struct fwnode_handle *link_fwnode; 3516 struct fwnode_handle *ep_fwnode; 3517 3518 link_fwnode = ub960_fwnode_get_link_by_regs(links_fwnode, nport); 3519 if (!link_fwnode) 3520 continue; 3521 3522 ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 3523 nport, 0, 0); 3524 if (!ep_fwnode) { 3525 fwnode_handle_put(link_fwnode); 3526 continue; 3527 } 3528 3529 ret = ub960_parse_dt_rxport(priv, nport, link_fwnode, 3530 ep_fwnode); 3531 3532 fwnode_handle_put(link_fwnode); 3533 fwnode_handle_put(ep_fwnode); 3534 3535 if (ret) { 3536 dev_err(dev, "rx%u: failed to parse RX port\n", nport); 3537 goto err_put_links; 3538 } 3539 } 3540 3541 fwnode_handle_put(links_fwnode); 3542 3543 return 0; 3544 3545 err_put_links: 3546 fwnode_handle_put(links_fwnode); 3547 3548 return ret; 3549 } 3550 3551 static int ub960_parse_dt_txports(struct ub960_data *priv) 3552 { 3553 struct device *dev = &priv->client->dev; 3554 u32 nport; 3555 int ret; 3556 3557 for (nport = 0; nport < priv->hw_data->num_txports; nport++) { 3558 unsigned int port = nport + priv->hw_data->num_rxports; 3559 struct fwnode_handle *ep_fwnode; 3560 3561 ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 3562 port, 0, 0); 3563 if (!ep_fwnode) 3564 continue; 3565 3566 ret = ub960_parse_dt_txport(priv, ep_fwnode, nport); 3567 3568 fwnode_handle_put(ep_fwnode); 3569 3570 if (ret) 3571 break; 3572 } 3573 3574 return 0; 3575 } 3576 3577 static int ub960_parse_dt(struct ub960_data *priv) 3578 { 3579 int ret; 3580 3581 ret = ub960_parse_dt_rxports(priv); 3582 if (ret) 3583 return ret; 3584 3585 ret = ub960_parse_dt_txports(priv); 3586 if (ret) 3587 goto err_free_rxports; 3588 3589 return 0; 3590 3591 err_free_rxports: 3592 ub960_rxport_free_ports(priv); 3593 3594 return ret; 3595 } 3596 3597 static int ub960_notify_bound(struct v4l2_async_notifier *notifier, 3598 struct v4l2_subdev *subdev, 3599 struct v4l2_async_connection *asd) 3600 { 3601 struct ub960_data *priv = sd_to_ub960(notifier->sd); 3602 struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport; 3603 struct device *dev = &priv->client->dev; 3604 u8 nport = rxport->nport; 3605 unsigned int i; 3606 int ret; 3607 3608 ret = media_entity_get_fwnode_pad(&subdev->entity, 3609 rxport->source.ep_fwnode, 3610 MEDIA_PAD_FL_SOURCE); 3611 if (ret < 0) { 3612 dev_err(dev, "Failed to find pad for %s\n", subdev->name); 3613 return ret; 3614 } 3615 3616 rxport->source.sd = subdev; 3617 rxport->source.pad = ret; 3618 3619 ret = media_create_pad_link(&rxport->source.sd->entity, 3620 rxport->source.pad, &priv->sd.entity, nport, 3621 MEDIA_LNK_FL_ENABLED | 3622 MEDIA_LNK_FL_IMMUTABLE); 3623 if (ret) { 3624 dev_err(dev, "Unable to link %s:%u -> %s:%u\n", 3625 rxport->source.sd->name, rxport->source.pad, 3626 priv->sd.name, nport); 3627 return ret; 3628 } 3629 3630 for (i = 0; i < priv->hw_data->num_rxports; i++) { 3631 if (priv->rxports[i] && !priv->rxports[i]->source.sd) { 3632 dev_dbg(dev, "Waiting for more subdevs to be bound\n"); 3633 return 0; 3634 } 3635 } 3636 3637 return 0; 3638 } 3639 3640 static void ub960_notify_unbind(struct v4l2_async_notifier *notifier, 3641 struct v4l2_subdev *subdev, 3642 struct v4l2_async_connection *asd) 3643 { 3644 struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport; 3645 3646 rxport->source.sd = NULL; 3647 } 3648 3649 static const struct v4l2_async_notifier_operations ub960_notify_ops = { 3650 .bound = ub960_notify_bound, 3651 .unbind = ub960_notify_unbind, 3652 }; 3653 3654 static int ub960_v4l2_notifier_register(struct ub960_data *priv) 3655 { 3656 struct device *dev = &priv->client->dev; 3657 unsigned int i; 3658 int ret; 3659 3660 v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd); 3661 3662 for (i = 0; i < priv->hw_data->num_rxports; i++) { 3663 struct ub960_rxport *rxport = priv->rxports[i]; 3664 struct ub960_asd *asd; 3665 3666 if (!rxport) 3667 continue; 3668 3669 asd = v4l2_async_nf_add_fwnode(&priv->notifier, 3670 rxport->source.ep_fwnode, 3671 struct ub960_asd); 3672 if (IS_ERR(asd)) { 3673 dev_err(dev, "Failed to add subdev for source %u: %pe", 3674 i, asd); 3675 v4l2_async_nf_cleanup(&priv->notifier); 3676 return PTR_ERR(asd); 3677 } 3678 3679 asd->rxport = rxport; 3680 } 3681 3682 priv->notifier.ops = &ub960_notify_ops; 3683 3684 ret = v4l2_async_nf_register(&priv->notifier); 3685 if (ret) { 3686 dev_err(dev, "Failed to register subdev_notifier"); 3687 v4l2_async_nf_cleanup(&priv->notifier); 3688 return ret; 3689 } 3690 3691 return 0; 3692 } 3693 3694 static void ub960_v4l2_notifier_unregister(struct ub960_data *priv) 3695 { 3696 v4l2_async_nf_unregister(&priv->notifier); 3697 v4l2_async_nf_cleanup(&priv->notifier); 3698 } 3699 3700 static int ub960_create_subdev(struct ub960_data *priv) 3701 { 3702 struct device *dev = &priv->client->dev; 3703 unsigned int i; 3704 int ret; 3705 3706 v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub960_subdev_ops); 3707 priv->sd.internal_ops = &ub960_internal_ops; 3708 3709 v4l2_ctrl_handler_init(&priv->ctrl_handler, 1); 3710 priv->sd.ctrl_handler = &priv->ctrl_handler; 3711 3712 v4l2_ctrl_new_int_menu(&priv->ctrl_handler, NULL, V4L2_CID_LINK_FREQ, 3713 ARRAY_SIZE(priv->tx_link_freq) - 1, 0, 3714 priv->tx_link_freq); 3715 3716 if (priv->ctrl_handler.error) { 3717 ret = priv->ctrl_handler.error; 3718 goto err_free_ctrl; 3719 } 3720 3721 priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | 3722 V4L2_SUBDEV_FL_STREAMS; 3723 priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; 3724 priv->sd.entity.ops = &ub960_entity_ops; 3725 3726 for (i = 0; i < priv->hw_data->num_rxports + priv->hw_data->num_txports; i++) { 3727 priv->pads[i].flags = ub960_pad_is_sink(priv, i) ? 3728 MEDIA_PAD_FL_SINK : 3729 MEDIA_PAD_FL_SOURCE; 3730 } 3731 3732 ret = media_entity_pads_init(&priv->sd.entity, 3733 priv->hw_data->num_rxports + 3734 priv->hw_data->num_txports, 3735 priv->pads); 3736 if (ret) 3737 goto err_free_ctrl; 3738 3739 priv->sd.state_lock = priv->sd.ctrl_handler->lock; 3740 3741 ret = v4l2_subdev_init_finalize(&priv->sd); 3742 if (ret) 3743 goto err_entity_cleanup; 3744 3745 ret = ub960_v4l2_notifier_register(priv); 3746 if (ret) { 3747 dev_err(dev, "v4l2 subdev notifier register failed: %d\n", ret); 3748 goto err_subdev_cleanup; 3749 } 3750 3751 ret = v4l2_async_register_subdev(&priv->sd); 3752 if (ret) { 3753 dev_err(dev, "v4l2_async_register_subdev error: %d\n", ret); 3754 goto err_unreg_notif; 3755 } 3756 3757 return 0; 3758 3759 err_unreg_notif: 3760 ub960_v4l2_notifier_unregister(priv); 3761 err_subdev_cleanup: 3762 v4l2_subdev_cleanup(&priv->sd); 3763 err_entity_cleanup: 3764 media_entity_cleanup(&priv->sd.entity); 3765 err_free_ctrl: 3766 v4l2_ctrl_handler_free(&priv->ctrl_handler); 3767 3768 return ret; 3769 } 3770 3771 static void ub960_destroy_subdev(struct ub960_data *priv) 3772 { 3773 ub960_v4l2_notifier_unregister(priv); 3774 v4l2_async_unregister_subdev(&priv->sd); 3775 3776 v4l2_subdev_cleanup(&priv->sd); 3777 3778 media_entity_cleanup(&priv->sd.entity); 3779 v4l2_ctrl_handler_free(&priv->ctrl_handler); 3780 } 3781 3782 static const struct regmap_config ub960_regmap_config = { 3783 .name = "ds90ub960", 3784 3785 .reg_bits = 8, 3786 .val_bits = 8, 3787 3788 .max_register = 0xff, 3789 3790 /* 3791 * We do locking in the driver to cover the TX/RX port selection and the 3792 * indirect register access. 3793 */ 3794 .disable_locking = true, 3795 }; 3796 3797 static void ub960_reset(struct ub960_data *priv, bool reset_regs) 3798 { 3799 struct device *dev = &priv->client->dev; 3800 unsigned int v; 3801 int ret; 3802 u8 bit; 3803 3804 bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 : 3805 UB960_SR_RESET_DIGITAL_RESET0; 3806 3807 ub960_write(priv, UB960_SR_RESET, bit); 3808 3809 mutex_lock(&priv->reg_lock); 3810 3811 ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v, 3812 (v & bit) == 0, 2000, 100000); 3813 3814 mutex_unlock(&priv->reg_lock); 3815 3816 if (ret) 3817 dev_err(dev, "reset failed: %d\n", ret); 3818 } 3819 3820 static int ub960_get_hw_resources(struct ub960_data *priv) 3821 { 3822 struct device *dev = &priv->client->dev; 3823 3824 priv->regmap = devm_regmap_init_i2c(priv->client, &ub960_regmap_config); 3825 if (IS_ERR(priv->regmap)) 3826 return PTR_ERR(priv->regmap); 3827 3828 priv->vddio = devm_regulator_get(dev, "vddio"); 3829 if (IS_ERR(priv->vddio)) 3830 return dev_err_probe(dev, PTR_ERR(priv->vddio), 3831 "cannot get VDDIO regulator\n"); 3832 3833 /* get power-down pin from DT */ 3834 priv->pd_gpio = 3835 devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); 3836 if (IS_ERR(priv->pd_gpio)) 3837 return dev_err_probe(dev, PTR_ERR(priv->pd_gpio), 3838 "Cannot get powerdown GPIO\n"); 3839 3840 priv->refclk = devm_clk_get(dev, "refclk"); 3841 if (IS_ERR(priv->refclk)) 3842 return dev_err_probe(dev, PTR_ERR(priv->refclk), 3843 "Cannot get REFCLK\n"); 3844 3845 return 0; 3846 } 3847 3848 static int ub960_enable_core_hw(struct ub960_data *priv) 3849 { 3850 struct device *dev = &priv->client->dev; 3851 u8 rev_mask; 3852 int ret; 3853 u8 dev_sts; 3854 u8 refclk_freq; 3855 3856 ret = regulator_enable(priv->vddio); 3857 if (ret) 3858 return dev_err_probe(dev, ret, 3859 "failed to enable VDDIO regulator\n"); 3860 3861 ret = clk_prepare_enable(priv->refclk); 3862 if (ret) { 3863 dev_err_probe(dev, ret, "Failed to enable refclk\n"); 3864 goto err_disable_vddio; 3865 } 3866 3867 if (priv->pd_gpio) { 3868 gpiod_set_value_cansleep(priv->pd_gpio, 1); 3869 /* wait min 2 ms for reset to complete */ 3870 fsleep(2000); 3871 gpiod_set_value_cansleep(priv->pd_gpio, 0); 3872 /* wait min 2 ms for power up to finish */ 3873 fsleep(2000); 3874 } 3875 3876 ub960_reset(priv, true); 3877 3878 /* Runtime check register accessibility */ 3879 ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask); 3880 if (ret) { 3881 dev_err_probe(dev, ret, "Cannot read first register, abort\n"); 3882 goto err_pd_gpio; 3883 } 3884 3885 dev_dbg(dev, "Found %s (rev/mask %#04x)\n", priv->hw_data->model, 3886 rev_mask); 3887 3888 ret = ub960_read(priv, UB960_SR_DEVICE_STS, &dev_sts); 3889 if (ret) 3890 goto err_pd_gpio; 3891 3892 if (priv->hw_data->is_ub9702) 3893 ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq); 3894 else 3895 ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq); 3896 if (ret) 3897 goto err_pd_gpio; 3898 3899 dev_dbg(dev, "refclk valid %u freq %u MHz (clk fw freq %lu MHz)\n", 3900 !!(dev_sts & BIT(4)), refclk_freq, 3901 clk_get_rate(priv->refclk) / HZ_PER_MHZ); 3902 3903 /* Disable all RX ports by default */ 3904 ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0); 3905 if (ret) 3906 goto err_pd_gpio; 3907 3908 /* release GPIO lock */ 3909 if (priv->hw_data->is_ub9702) { 3910 ret = ub960_update_bits(priv, UB960_SR_RESET, 3911 UB960_SR_RESET_GPIO_LOCK_RELEASE, 3912 UB960_SR_RESET_GPIO_LOCK_RELEASE); 3913 if (ret) 3914 goto err_pd_gpio; 3915 } 3916 3917 return 0; 3918 3919 err_pd_gpio: 3920 gpiod_set_value_cansleep(priv->pd_gpio, 1); 3921 clk_disable_unprepare(priv->refclk); 3922 err_disable_vddio: 3923 regulator_disable(priv->vddio); 3924 3925 return ret; 3926 } 3927 3928 static void ub960_disable_core_hw(struct ub960_data *priv) 3929 { 3930 gpiod_set_value_cansleep(priv->pd_gpio, 1); 3931 clk_disable_unprepare(priv->refclk); 3932 regulator_disable(priv->vddio); 3933 } 3934 3935 static int ub960_probe(struct i2c_client *client) 3936 { 3937 struct device *dev = &client->dev; 3938 struct ub960_data *priv; 3939 unsigned int port_lock_mask; 3940 unsigned int port_mask; 3941 unsigned int nport; 3942 int ret; 3943 3944 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 3945 if (!priv) 3946 return -ENOMEM; 3947 3948 priv->client = client; 3949 3950 priv->hw_data = device_get_match_data(dev); 3951 3952 mutex_init(&priv->reg_lock); 3953 3954 INIT_DELAYED_WORK(&priv->poll_work, ub960_handler_work); 3955 3956 /* 3957 * Initialize these to invalid values so that the first reg writes will 3958 * configure the target. 3959 */ 3960 priv->reg_current.indirect_target = 0xff; 3961 priv->reg_current.rxport = 0xff; 3962 priv->reg_current.txport = 0xff; 3963 3964 ret = ub960_get_hw_resources(priv); 3965 if (ret) 3966 goto err_mutex_destroy; 3967 3968 ret = ub960_enable_core_hw(priv); 3969 if (ret) 3970 goto err_mutex_destroy; 3971 3972 ret = ub960_parse_dt(priv); 3973 if (ret) 3974 goto err_disable_core_hw; 3975 3976 ret = ub960_init_tx_ports(priv); 3977 if (ret) 3978 goto err_free_ports; 3979 3980 ret = ub960_rxport_enable_vpocs(priv); 3981 if (ret) 3982 goto err_free_ports; 3983 3984 ret = ub960_init_rx_ports(priv); 3985 if (ret) 3986 goto err_disable_vpocs; 3987 3988 ub960_reset(priv, false); 3989 3990 port_mask = 0; 3991 3992 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { 3993 struct ub960_rxport *rxport = priv->rxports[nport]; 3994 3995 if (!rxport) 3996 continue; 3997 3998 port_mask |= BIT(nport); 3999 } 4000 4001 ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask); 4002 if (ret) 4003 goto err_disable_vpocs; 4004 4005 if (port_mask != port_lock_mask) { 4006 ret = -EIO; 4007 dev_err_probe(dev, ret, "Failed to lock all RX ports\n"); 4008 goto err_disable_vpocs; 4009 } 4010 4011 /* 4012 * Clear any errors caused by switching the RX port settings while 4013 * probing. 4014 */ 4015 ub960_clear_rx_errors(priv); 4016 4017 ret = ub960_init_atr(priv); 4018 if (ret) 4019 goto err_disable_vpocs; 4020 4021 ret = ub960_rxport_add_serializers(priv); 4022 if (ret) 4023 goto err_uninit_atr; 4024 4025 ret = ub960_create_subdev(priv); 4026 if (ret) 4027 goto err_free_sers; 4028 4029 if (client->irq) 4030 dev_warn(dev, "irq support not implemented, using polling\n"); 4031 4032 schedule_delayed_work(&priv->poll_work, 4033 msecs_to_jiffies(UB960_POLL_TIME_MS)); 4034 4035 #ifdef UB960_DEBUG_I2C_RX_ID 4036 for (unsigned int i = 0; i < priv->hw_data->num_rxports; i++) 4037 ub960_write(priv, UB960_SR_I2C_RX_ID(i), 4038 (UB960_DEBUG_I2C_RX_ID + i) << 1); 4039 #endif 4040 4041 return 0; 4042 4043 err_free_sers: 4044 ub960_rxport_remove_serializers(priv); 4045 err_uninit_atr: 4046 ub960_uninit_atr(priv); 4047 err_disable_vpocs: 4048 ub960_rxport_disable_vpocs(priv); 4049 err_free_ports: 4050 ub960_rxport_free_ports(priv); 4051 ub960_txport_free_ports(priv); 4052 err_disable_core_hw: 4053 ub960_disable_core_hw(priv); 4054 err_mutex_destroy: 4055 mutex_destroy(&priv->reg_lock); 4056 return ret; 4057 } 4058 4059 static void ub960_remove(struct i2c_client *client) 4060 { 4061 struct v4l2_subdev *sd = i2c_get_clientdata(client); 4062 struct ub960_data *priv = sd_to_ub960(sd); 4063 4064 cancel_delayed_work_sync(&priv->poll_work); 4065 4066 ub960_destroy_subdev(priv); 4067 ub960_rxport_remove_serializers(priv); 4068 ub960_uninit_atr(priv); 4069 ub960_rxport_disable_vpocs(priv); 4070 ub960_rxport_free_ports(priv); 4071 ub960_txport_free_ports(priv); 4072 ub960_disable_core_hw(priv); 4073 mutex_destroy(&priv->reg_lock); 4074 } 4075 4076 static const struct ub960_hw_data ds90ub960_hw = { 4077 .model = "ub960", 4078 .num_rxports = 4, 4079 .num_txports = 2, 4080 }; 4081 4082 static const struct ub960_hw_data ds90ub9702_hw = { 4083 .model = "ub9702", 4084 .num_rxports = 4, 4085 .num_txports = 2, 4086 .is_ub9702 = true, 4087 .is_fpdlink4 = true, 4088 }; 4089 4090 static const struct i2c_device_id ub960_id[] = { 4091 { "ds90ub960-q1", (kernel_ulong_t)&ds90ub960_hw }, 4092 { "ds90ub9702-q1", (kernel_ulong_t)&ds90ub9702_hw }, 4093 {} 4094 }; 4095 MODULE_DEVICE_TABLE(i2c, ub960_id); 4096 4097 static const struct of_device_id ub960_dt_ids[] = { 4098 { .compatible = "ti,ds90ub960-q1", .data = &ds90ub960_hw }, 4099 { .compatible = "ti,ds90ub9702-q1", .data = &ds90ub9702_hw }, 4100 {} 4101 }; 4102 MODULE_DEVICE_TABLE(of, ub960_dt_ids); 4103 4104 static struct i2c_driver ds90ub960_driver = { 4105 .probe = ub960_probe, 4106 .remove = ub960_remove, 4107 .id_table = ub960_id, 4108 .driver = { 4109 .name = "ds90ub960", 4110 .of_match_table = ub960_dt_ids, 4111 }, 4112 }; 4113 module_i2c_driver(ds90ub960_driver); 4114 4115 MODULE_LICENSE("GPL"); 4116 MODULE_DESCRIPTION("Texas Instruments FPD-Link III/IV Deserializers Driver"); 4117 MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>"); 4118 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>"); 4119 MODULE_IMPORT_NS("I2C_ATR"); 4120