Lines Matching +full:rx +full:- +full:eq

1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the Texas Instruments DS90UB960-Q1 video deserializer
12 * - PM for serializer and remote peripherals. We need to manage:
13 * - VPOC
14 * - Power domain? Regulator? Somehow any remote device should be able to
16 * - Link between the deserializer and the serializer
17 * - Related to VPOC management. We probably always want to turn on the VPOC
19 * - Serializer's services: i2c, gpios, power
20 * - The serializer needs to resume before the remote peripherals can
22 * - How to handle gpios? Reserving a gpio essentially keeps the provider
24 * - Do we need a new bus for the FPD-Link? At the moment the serializers
25 * are children of the same i2c-adapter where the deserializer resides.
26 * - i2c-atr could be made embeddable instead of allocatable.
34 #include <linux/i2c-atr.h>
49 #include <media/mipi-csi2.h>
50 #include <media/v4l2-ctrls.h>
51 #include <media/v4l2-fwnode.h>
52 #include <media/v4l2-subdev.h>
69 * 0x00-0x32 Shared (UB960_SR)
70 * 0x33-0x3a CSI-2 TX (per-port paged on DS90UB960, shared on 954) (UB960_TR)
72 * 0x4d-0x7f FPD-Link RX, per-port paged (UB960_RR)
73 * 0xb0-0xbf Shared (UB960_SR)
74 * 0xd0-0xdf FPD-Link RX, per-port paged (UB960_RR)
75 * 0xf0-0xf5 Shared (UB960_SR)
76 * 0xf8-0xfb Shared (UB960_SR)
81 * UB960_RR_* = FPD-Link RX, per-port paged register
82 * UB960_TR_* = CSI-2 TX, per-port paged register
389 /* EQ related */
391 #define UB960_MIN_AEQ_STROBE_POS -7
396 #define UB960_MIN_MANUAL_STROBE_POS -(7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
398 #define UB960_NUM_MANUAL_STROBE_POS (UB960_MAX_MANUAL_STROBE_POS - UB960_MIN_MANUAL_STROBE_POS + 1)
402 #define UB960_NUM_EQ_LEVELS (UB960_MAX_EQ_LEVEL - UB960_MIN_EQ_LEVEL + 1)
429 u8 nport; /* RX port number, and index in priv->rxport[] */
452 /* EQ settings */
468 } eq; member
485 u8 nport; /* TX port number, and index in priv->txport[] */
547 return pad < priv->hw_data->num_rxports; in ub960_pad_is_sink()
552 return pad >= priv->hw_data->num_rxports; in ub960_pad_is_source()
560 return pad - priv->hw_data->num_rxports; in ub960_pad_to_port()
594 /* -----------------------------------------------------------------------------
600 struct device *dev = &priv->client->dev; in ub960_read()
604 mutex_lock(&priv->reg_lock); in ub960_read()
606 ret = regmap_read(priv->regmap, reg, &v); in ub960_read()
616 mutex_unlock(&priv->reg_lock); in ub960_read()
623 struct device *dev = &priv->client->dev; in ub960_write()
626 mutex_lock(&priv->reg_lock); in ub960_write()
628 ret = regmap_write(priv->regmap, reg, val); in ub960_write()
633 mutex_unlock(&priv->reg_lock); in ub960_write()
640 struct device *dev = &priv->client->dev; in ub960_update_bits()
643 mutex_lock(&priv->reg_lock); in ub960_update_bits()
645 ret = regmap_update_bits(priv->regmap, reg, mask, val); in ub960_update_bits()
650 mutex_unlock(&priv->reg_lock); in ub960_update_bits()
657 struct device *dev = &priv->client->dev; in ub960_read16()
661 mutex_lock(&priv->reg_lock); in ub960_read16()
663 ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v)); in ub960_read16()
673 mutex_unlock(&priv->reg_lock); in ub960_read16()
680 struct device *dev = &priv->client->dev; in ub960_rxport_select()
683 lockdep_assert_held(&priv->reg_lock); in ub960_rxport_select()
685 if (priv->reg_current.rxport == nport) in ub960_rxport_select()
688 ret = regmap_write(priv->regmap, UB960_SR_FPD3_PORT_SEL, in ub960_rxport_select()
696 priv->reg_current.rxport = nport; in ub960_rxport_select()
703 struct device *dev = &priv->client->dev; in ub960_rxport_read()
707 mutex_lock(&priv->reg_lock); in ub960_rxport_read()
713 ret = regmap_read(priv->regmap, reg, &v); in ub960_rxport_read()
723 mutex_unlock(&priv->reg_lock); in ub960_rxport_read()
730 struct device *dev = &priv->client->dev; in ub960_rxport_write()
733 mutex_lock(&priv->reg_lock); in ub960_rxport_write()
739 ret = regmap_write(priv->regmap, reg, val); in ub960_rxport_write()
745 mutex_unlock(&priv->reg_lock); in ub960_rxport_write()
753 struct device *dev = &priv->client->dev; in ub960_rxport_update_bits()
756 mutex_lock(&priv->reg_lock); in ub960_rxport_update_bits()
762 ret = regmap_update_bits(priv->regmap, reg, mask, val); in ub960_rxport_update_bits()
768 mutex_unlock(&priv->reg_lock); in ub960_rxport_update_bits()
776 struct device *dev = &priv->client->dev; in ub960_rxport_read16()
780 mutex_lock(&priv->reg_lock); in ub960_rxport_read16()
786 ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v)); in ub960_rxport_read16()
796 mutex_unlock(&priv->reg_lock); in ub960_rxport_read16()
803 struct device *dev = &priv->client->dev; in ub960_txport_select()
806 lockdep_assert_held(&priv->reg_lock); in ub960_txport_select()
808 if (priv->reg_current.txport == nport) in ub960_txport_select()
811 ret = regmap_write(priv->regmap, UB960_SR_CSI_PORT_SEL, in ub960_txport_select()
819 priv->reg_current.txport = nport; in ub960_txport_select()
826 struct device *dev = &priv->client->dev; in ub960_txport_read()
830 mutex_lock(&priv->reg_lock); in ub960_txport_read()
836 ret = regmap_read(priv->regmap, reg, &v); in ub960_txport_read()
846 mutex_unlock(&priv->reg_lock); in ub960_txport_read()
853 struct device *dev = &priv->client->dev; in ub960_txport_write()
856 mutex_lock(&priv->reg_lock); in ub960_txport_write()
862 ret = regmap_write(priv->regmap, reg, val); in ub960_txport_write()
868 mutex_unlock(&priv->reg_lock); in ub960_txport_write()
876 struct device *dev = &priv->client->dev; in ub960_txport_update_bits()
879 mutex_lock(&priv->reg_lock); in ub960_txport_update_bits()
885 ret = regmap_update_bits(priv->regmap, reg, mask, val); in ub960_txport_update_bits()
891 mutex_unlock(&priv->reg_lock); in ub960_txport_update_bits()
898 struct device *dev = &priv->client->dev; in ub960_select_ind_reg_block()
901 lockdep_assert_held(&priv->reg_lock); in ub960_select_ind_reg_block()
903 if (priv->reg_current.indirect_target == block) in ub960_select_ind_reg_block()
906 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_CTL, block << 2); in ub960_select_ind_reg_block()
913 priv->reg_current.indirect_target = block; in ub960_select_ind_reg_block()
920 struct device *dev = &priv->client->dev; in ub960_read_ind()
924 mutex_lock(&priv->reg_lock); in ub960_read_ind()
930 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg); in ub960_read_ind()
938 ret = regmap_read(priv->regmap, UB960_SR_IND_ACC_DATA, &v); in ub960_read_ind()
949 mutex_unlock(&priv->reg_lock); in ub960_read_ind()
956 struct device *dev = &priv->client->dev; in ub960_write_ind()
959 mutex_lock(&priv->reg_lock); in ub960_write_ind()
965 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg); in ub960_write_ind()
973 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_DATA, val); in ub960_write_ind()
982 mutex_unlock(&priv->reg_lock); in ub960_write_ind()
990 struct device *dev = &priv->client->dev; in ub960_ind_update_bits()
993 mutex_lock(&priv->reg_lock); in ub960_ind_update_bits()
999 ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg); in ub960_ind_update_bits()
1007 ret = regmap_update_bits(priv->regmap, UB960_SR_IND_ACC_DATA, mask, in ub960_ind_update_bits()
1017 mutex_unlock(&priv->reg_lock); in ub960_ind_update_bits()
1022 /* -----------------------------------------------------------------------------
1023 * I2C-ATR (address translator)
1030 struct ub960_rxport *rxport = priv->rxports[chan_id]; in ub960_atr_attach_client()
1031 struct device *dev = &priv->client->dev; in ub960_atr_attach_client()
1034 for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) { in ub960_atr_attach_client()
1035 if (!rxport->aliased_clients[reg_idx]) in ub960_atr_attach_client()
1039 if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) { in ub960_atr_attach_client()
1040 dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport); in ub960_atr_attach_client()
1041 return -EADDRNOTAVAIL; in ub960_atr_attach_client()
1044 rxport->aliased_clients[reg_idx] = client; in ub960_atr_attach_client()
1047 client->addr << 1); in ub960_atr_attach_client()
1051 dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n", in ub960_atr_attach_client()
1052 rxport->nport, client->addr, alias, reg_idx); in ub960_atr_attach_client()
1061 struct ub960_rxport *rxport = priv->rxports[chan_id]; in ub960_atr_detach_client()
1062 struct device *dev = &priv->client->dev; in ub960_atr_detach_client()
1065 for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) { in ub960_atr_detach_client()
1066 if (rxport->aliased_clients[reg_idx] == client) in ub960_atr_detach_client()
1070 if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) { in ub960_atr_detach_client()
1071 dev_err(dev, "rx%u: client 0x%02x is not mapped!\n", in ub960_atr_detach_client()
1072 rxport->nport, client->addr); in ub960_atr_detach_client()
1076 rxport->aliased_clients[reg_idx] = NULL; in ub960_atr_detach_client()
1080 dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport, in ub960_atr_detach_client()
1081 client->addr, reg_idx); in ub960_atr_detach_client()
1091 struct device *dev = &priv->client->dev; in ub960_init_atr()
1092 struct i2c_adapter *parent_adap = priv->client->adapter; in ub960_init_atr()
1094 priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops, in ub960_init_atr()
1095 priv->hw_data->num_rxports); in ub960_init_atr()
1096 if (IS_ERR(priv->atr)) in ub960_init_atr()
1097 return PTR_ERR(priv->atr); in ub960_init_atr()
1099 i2c_atr_set_driver_data(priv->atr, priv); in ub960_init_atr()
1106 i2c_atr_delete(priv->atr); in ub960_uninit_atr()
1107 priv->atr = NULL; in ub960_uninit_atr()
1110 /* -----------------------------------------------------------------------------
1118 struct device *dev = &priv->client->dev; in ub960_parse_dt_txport()
1125 return -ENOMEM; in ub960_parse_dt_txport()
1127 txport->priv = priv; in ub960_parse_dt_txport()
1128 txport->nport = nport; in ub960_parse_dt_txport()
1137 txport->non_continous_clk = vep.bus.mipi_csi2.flags & in ub960_parse_dt_txport()
1140 txport->num_data_lanes = vep.bus.mipi_csi2.num_data_lanes; in ub960_parse_dt_txport()
1143 ret = -EINVAL; in ub960_parse_dt_txport()
1147 priv->tx_link_freq[0] = vep.link_frequencies[0]; in ub960_parse_dt_txport()
1148 priv->tx_data_rate = priv->tx_link_freq[0] * 2; in ub960_parse_dt_txport()
1150 if (priv->tx_data_rate != MHZ(1600) && in ub960_parse_dt_txport()
1151 priv->tx_data_rate != MHZ(1200) && in ub960_parse_dt_txport()
1152 priv->tx_data_rate != MHZ(800) && in ub960_parse_dt_txport()
1153 priv->tx_data_rate != MHZ(400)) { in ub960_parse_dt_txport()
1154 dev_err(dev, "tx%u: invalid 'link-frequencies' value\n", nport); in ub960_parse_dt_txport()
1155 ret = -EINVAL; in ub960_parse_dt_txport()
1161 priv->txports[nport] = txport; in ub960_parse_dt_txport()
1175 struct device *dev = &priv->client->dev; in ub960_csi_handle_events()
1190 /* -----------------------------------------------------------------------------
1191 * RX ports
1199 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_rxport_enable_vpocs()
1200 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_enable_vpocs()
1202 if (!rxport || !rxport->vpoc) in ub960_rxport_enable_vpocs()
1205 ret = regulator_enable(rxport->vpoc); in ub960_rxport_enable_vpocs()
1213 while (nport--) { in ub960_rxport_enable_vpocs()
1214 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_enable_vpocs()
1216 if (!rxport || !rxport->vpoc) in ub960_rxport_enable_vpocs()
1219 regulator_disable(rxport->vpoc); in ub960_rxport_enable_vpocs()
1229 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_rxport_disable_vpocs()
1230 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_disable_vpocs()
1232 if (!rxport || !rxport->vpoc) in ub960_rxport_disable_vpocs()
1235 regulator_disable(rxport->vpoc); in ub960_rxport_disable_vpocs()
1259 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) in ub960_clear_rx_errors()
1294 *strobe_pos = data_delay - clk_delay; in ub960_rxport_get_strobe_pos()
1308 clk_delay = abs(strobe_pos) - UB960_MANUAL_STROBE_EXTRA_DELAY; in ub960_rxport_set_strobe_pos()
1310 data_delay = strobe_pos - UB960_MANUAL_STROBE_EXTRA_DELAY; in ub960_rxport_set_strobe_pos()
1327 strobe_min -= UB960_MIN_AEQ_STROBE_POS; in ub960_rxport_set_strobe_range()
1328 strobe_max -= UB960_MIN_AEQ_STROBE_POS; in ub960_rxport_set_strobe_range()
1363 eq_stage_2_select_value = eq_level - eq_stage_max; in ub960_rxport_set_eq_level()
1392 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_config_eq()
1396 if (priv->strobe.manual) { in ub960_rxport_config_eq()
1407 ub960_rxport_set_strobe_range(priv, priv->strobe.min, in ub960_rxport_config_eq()
1408 priv->strobe.max); in ub960_rxport_config_eq()
1413 if (priv->strobe.manual) in ub960_rxport_config_eq()
1414 ub960_rxport_set_strobe_pos(priv, nport, rxport->eq.strobe_pos); in ub960_rxport_config_eq()
1418 if (rxport->eq.manual_eq) { in ub960_rxport_config_eq()
1420 rxport->eq.manual.eq_level); in ub960_rxport_config_eq()
1428 rxport->eq.aeq.eq_level_min, in ub960_rxport_config_eq()
1429 rxport->eq.aeq.eq_level_max); in ub960_rxport_config_eq()
1493 * Wait for the RX ports to lock, have no errors and have stable strobe position
1494 * and EQ level.
1500 struct device *dev = &priv->client->dev; in ub960_rxport_wait_locks()
1514 if (port_mask >= BIT(priv->hw_data->num_rxports)) in ub960_rxport_wait_locks()
1515 return -EINVAL; in ub960_rxport_wait_locks()
1525 priv->hw_data->num_rxports) { in ub960_rxport_wait_locks()
1526 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_wait_locks()
1562 for_each_set_bit(nport, &port_mask, priv->hw_data->num_rxports) { in ub960_rxport_wait_locks()
1563 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_wait_locks()
1585 dev_dbg(dev, "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n", in ub960_rxport_wait_locks()
1598 switch (rxport->rx_mode) { in ub960_calc_bc_clk_rate_ub960()
1620 return clk_get_rate(priv->refclk) * mult / div; in ub960_calc_bc_clk_rate_ub960()
1626 switch (rxport->rx_mode) { in ub960_calc_bc_clk_rate_ub9702()
1645 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_add_serializer()
1646 struct device *dev = &priv->client->dev; in ub960_rxport_add_serializer()
1647 struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata; in ub960_rxport_add_serializer()
1649 .of_node = to_of_node(rxport->ser.fwnode), in ub960_rxport_add_serializer()
1650 .fwnode = rxport->ser.fwnode, in ub960_rxport_add_serializer()
1654 ser_pdata->port = nport; in ub960_rxport_add_serializer()
1655 ser_pdata->atr = priv->atr; in ub960_rxport_add_serializer()
1656 if (priv->hw_data->is_ub9702) in ub960_rxport_add_serializer()
1657 ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub9702(priv, rxport); in ub960_rxport_add_serializer()
1659 ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub960(priv, rxport); in ub960_rxport_add_serializer()
1664 * the FPD-Link. in ub960_rxport_add_serializer()
1666 ser_info.addr = rxport->ser.alias; in ub960_rxport_add_serializer()
1667 rxport->ser.client = in ub960_rxport_add_serializer()
1668 i2c_new_client_device(priv->client->adapter, &ser_info); in ub960_rxport_add_serializer()
1669 if (IS_ERR(rxport->ser.client)) { in ub960_rxport_add_serializer()
1670 dev_err(dev, "rx%u: cannot add %s i2c device", nport, in ub960_rxport_add_serializer()
1672 return PTR_ERR(rxport->ser.client); in ub960_rxport_add_serializer()
1675 dev_dbg(dev, "rx%u: remote serializer at alias 0x%02x (%u-%04x)\n", in ub960_rxport_add_serializer()
1676 nport, rxport->ser.client->addr, in ub960_rxport_add_serializer()
1677 rxport->ser.client->adapter->nr, rxport->ser.client->addr); in ub960_rxport_add_serializer()
1684 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_remove_serializer()
1686 i2c_unregister_device(rxport->ser.client); in ub960_rxport_remove_serializer()
1687 rxport->ser.client = NULL; in ub960_rxport_remove_serializer()
1696 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_rxport_add_serializers()
1697 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_add_serializers()
1710 while (nport--) { in ub960_rxport_add_serializers()
1711 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_add_serializers()
1726 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_rxport_remove_serializers()
1727 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_remove_serializers()
1739 unsigned int nport = txport->nport; in ub960_init_tx_port()
1743 * From the datasheet: "initial CSI Skew-Calibration in ub960_init_tx_port()
1746 if (priv->tx_data_rate == MHZ(1600)) in ub960_init_tx_port()
1749 csi_ctl |= (4 - txport->num_data_lanes) << 4; in ub960_init_tx_port()
1751 if (!txport->non_continous_clk) in ub960_init_tx_port()
1765 switch (priv->tx_data_rate) { in ub960_init_tx_ports()
1787 if (priv->hw_data->is_ub9702) { in ub960_init_tx_ports()
1790 switch (priv->tx_data_rate) { in ub960_init_tx_ports()
1807 for (nport = 0; nport < priv->hw_data->num_txports; nport++) { in ub960_init_tx_ports()
1808 struct ub960_txport *txport = priv->txports[nport]; in ub960_init_tx_ports()
1822 unsigned int nport = rxport->nport; in ub960_init_rx_port_ub960()
1828 * 0 - 2.5 Mbps (DS90UB913A-Q1 / DS90UB933-Q1) in ub960_init_rx_port_ub960()
1829 * 2 - 10 Mbps in ub960_init_rx_port_ub960()
1830 * 6 - 50 Mbps (DS90UB953-Q1) in ub960_init_rx_port_ub960()
1836 switch (rxport->rx_mode) { in ub960_init_rx_port_ub960()
1859 switch (rxport->rx_mode) { in ub960_init_rx_port_ub960()
1861 /* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */ in ub960_init_rx_port_ub960()
1867 * RAW10_8BIT_CTL = 0b10 : 8-bit processing using upper 8 bits in ub960_init_rx_port_ub960()
1882 /* CSI-2 Mode (DS90UB953-Q1 compatible) */ in ub960_init_rx_port_ub960()
1891 rxport->lv_fv_pol); in ub960_init_rx_port_ub960()
1904 rxport->ser.alias << 1); in ub960_init_rx_port_ub960()
1906 /* Configure EQ related settings */ in ub960_init_rx_port_ub960()
1909 /* Enable RX port */ in ub960_init_rx_port_ub960()
1916 unsigned int nport = rxport->nport; in ub960_init_rx_port_ub9702_fpd3()
1920 switch (rxport->rx_mode) { in ub960_init_rx_port_ub9702_fpd3()
1969 /* serdes_driver_ctl2 control: DS90UB953-Q1/DS90UB933-Q1/DS90UB913A-Q1 */ in ub960_init_rx_port_ub9702_fpd3()
1973 /* RX port to half-rate */ in ub960_init_rx_port_ub9702_fpd3()
1981 unsigned int nport = rxport->nport; in ub960_init_rx_port_ub9702_fpd4_aeq()
2015 unsigned int nport = rxport->nport; in ub960_init_rx_port_ub9702_fpd4()
2018 switch (rxport->rx_mode) { in ub960_init_rx_port_ub9702_fpd4()
2067 /* RX port to 7.55G mode */ in ub960_init_rx_port_ub9702_fpd4()
2077 unsigned int nport = rxport->nport; in ub960_init_rx_port_ub9702()
2079 if (rxport->cdr_mode == RXPORT_CDR_FPD3) in ub960_init_rx_port_ub9702()
2084 switch (rxport->rx_mode) { in ub960_init_rx_port_ub9702()
2087 * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits in ub960_init_rx_port_ub9702()
2088 * 0b10 : 8-bit processing using upper 8 bits in ub960_init_rx_port_ub9702()
2108 rxport->lv_fv_pol); in ub960_init_rx_port_ub9702()
2121 rxport->ser.alias << 1); in ub960_init_rx_port_ub9702()
2123 /* Enable RX port */ in ub960_init_rx_port_ub9702()
2126 if (rxport->cdr_mode == RXPORT_CDR_FPD4) { in ub960_init_rx_port_ub9702()
2136 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_init_rx_ports()
2137 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_init_rx_ports()
2142 if (priv->hw_data->is_ub9702) in ub960_init_rx_ports()
2153 struct device *dev = &priv->client->dev; in ub960_rxport_handle_events()
2183 dev_err(dev, "rx%u parity errors: %u\n", nport, v); in ub960_rxport_handle_events()
2187 dev_err(dev, "rx%u BCC CRC error\n", nport); in ub960_rxport_handle_events()
2190 dev_err(dev, "rx%u BCC SEQ error\n", nport); in ub960_rxport_handle_events()
2193 dev_err(dev, "rx%u line length unstable\n", nport); in ub960_rxport_handle_events()
2196 dev_err(dev, "rx%u FPD3 encode error\n", nport); in ub960_rxport_handle_events()
2199 dev_err(dev, "rx%u buffer error\n", nport); in ub960_rxport_handle_events()
2202 dev_err(dev, "rx%u CSI error: %#02x\n", nport, csi_rx_sts); in ub960_rxport_handle_events()
2205 dev_err(dev, "rx%u CSI ECC1 error\n", nport); in ub960_rxport_handle_events()
2208 dev_err(dev, "rx%u CSI ECC2 error\n", nport); in ub960_rxport_handle_events()
2211 dev_err(dev, "rx%u CSI checksum error\n", nport); in ub960_rxport_handle_events()
2214 dev_err(dev, "rx%u CSI length error\n", nport); in ub960_rxport_handle_events()
2217 dev_err(dev, "rx%u BCC error: %#02x\n", nport, bcc_sts); in ub960_rxport_handle_events()
2220 dev_err(dev, "rx%u BCC response error", nport); in ub960_rxport_handle_events()
2223 dev_err(dev, "rx%u BCC slave timeout", nport); in ub960_rxport_handle_events()
2226 dev_err(dev, "rx%u BCC slave error", nport); in ub960_rxport_handle_events()
2229 dev_err(dev, "rx%u BCC master timeout", nport); in ub960_rxport_handle_events()
2232 dev_err(dev, "rx%u BCC master error", nport); in ub960_rxport_handle_events()
2235 dev_err(dev, "rx%u BCC sequence error", nport); in ub960_rxport_handle_events()
2242 dev_dbg(dev, "rx%u line len changed: %u\n", nport, v); in ub960_rxport_handle_events()
2251 dev_dbg(dev, "rx%u line count changed: %u\n", nport, v); in ub960_rxport_handle_events()
2255 dev_dbg(dev, "rx%u: %s, %s, %s, %s\n", nport, in ub960_rxport_handle_events()
2271 /* -----------------------------------------------------------------------------
2277 * from a one RX port will be mapped to the same VC. Also, the hardware
2278 * dictates that all streams from an RX port must go to a single TX port.
2280 * This function decides the target VC numbers for each RX port with a simple
2284 * E.g. if all four RX ports are in use, of which the first two go to the
2286 * the following VCs for the four RX ports: 0, 1, 0, 1.
2299 for_each_active_route(&state->routing, route) { in ub960_get_vc_maps()
2300 unsigned int rx, tx; in ub960_get_vc_maps() local
2302 rx = ub960_pad_to_port(priv, route->sink_pad); in ub960_get_vc_maps()
2303 if (BIT(rx) & handled_mask) in ub960_get_vc_maps()
2306 tx = ub960_pad_to_port(priv, route->source_pad); in ub960_get_vc_maps()
2308 vc[rx] = cur_vc[tx]++; in ub960_get_vc_maps()
2309 handled_mask |= BIT(rx); in ub960_get_vc_maps()
2315 struct device *dev = &priv->client->dev; in ub960_enable_tx_port()
2326 struct device *dev = &priv->client->dev; in ub960_disable_tx_port()
2336 struct device *dev = &priv->client->dev; in ub960_enable_rx_port()
2338 dev_dbg(dev, "enable RX port %u\n", nport); in ub960_enable_rx_port()
2347 struct device *dev = &priv->client->dev; in ub960_disable_rx_port()
2349 dev_dbg(dev, "disable RX port %u\n", nport); in ub960_disable_rx_port()
2366 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_validate_stream_vcs()
2367 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_validate_stream_vcs()
2375 ret = v4l2_subdev_call(rxport->source.sd, pad, get_frame_desc, in ub960_validate_stream_vcs()
2376 rxport->source.pad, &desc); in ub960_validate_stream_vcs()
2392 dev_err(&priv->client->dev, in ub960_validate_stream_vcs()
2393 "rx%u: source with multiple virtual-channels is not supported\n", in ub960_validate_stream_vcs()
2395 return -ENODEV; in ub960_validate_stream_vcs()
2424 for_each_active_route(&state->routing, route) { in ub960_configure_ports_for_streaming()
2431 nport = ub960_pad_to_port(priv, route->sink_pad); in ub960_configure_ports_for_streaming()
2433 rxport = priv->rxports[nport]; in ub960_configure_ports_for_streaming()
2435 return -EINVAL; in ub960_configure_ports_for_streaming()
2437 txport = priv->txports[ub960_pad_to_port(priv, route->source_pad)]; in ub960_configure_ports_for_streaming()
2439 return -EINVAL; in ub960_configure_ports_for_streaming()
2441 rx_data[nport].tx_port = ub960_pad_to_port(priv, route->source_pad); in ub960_configure_ports_for_streaming()
2446 if (rxport->rx_mode == RXPORT_MODE_CSI2_SYNC || in ub960_configure_ports_for_streaming()
2447 rxport->rx_mode == RXPORT_MODE_CSI2_NONSYNC) in ub960_configure_ports_for_streaming()
2451 return -EPIPE; in ub960_configure_ports_for_streaming()
2453 fmt = v4l2_subdev_state_get_format(state, route->sink_pad, in ub960_configure_ports_for_streaming()
2454 route->sink_stream); in ub960_configure_ports_for_streaming()
2456 return -EPIPE; in ub960_configure_ports_for_streaming()
2458 ub960_fmt = ub960_find_format(fmt->code); in ub960_configure_ports_for_streaming()
2460 return -EPIPE; in ub960_configure_ports_for_streaming()
2462 if (ub960_fmt->meta) { in ub960_configure_ports_for_streaming()
2463 if (fmt->height > 3) { in ub960_configure_ports_for_streaming()
2464 dev_err(&priv->client->dev, in ub960_configure_ports_for_streaming()
2465 "rx%u: unsupported metadata height %u\n", in ub960_configure_ports_for_streaming()
2466 nport, fmt->height); in ub960_configure_ports_for_streaming()
2467 return -EPIPE; in ub960_configure_ports_for_streaming()
2470 rx_data[nport].meta_dt = ub960_fmt->datatype; in ub960_configure_ports_for_streaming()
2471 rx_data[nport].meta_lines = fmt->height; in ub960_configure_ports_for_streaming()
2473 rx_data[nport].pixel_dt = ub960_fmt->datatype; in ub960_configure_ports_for_streaming()
2477 /* Configure RX ports */ in ub960_configure_ports_for_streaming()
2485 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_configure_ports_for_streaming()
2486 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_configure_ports_for_streaming()
2492 switch (rxport->rx_mode) { in ub960_configure_ports_for_streaming()
2497 ub960_rxport_write(priv, rxport->nport, in ub960_configure_ports_for_streaming()
2511 if (!priv->hw_data->is_ub9702) { in ub960_configure_ports_for_streaming()
2547 if (priv->stream_enable_mask[i]) in ub960_update_streaming_status()
2551 priv->streaming = i < UB960_MAX_NPORTS; in ub960_update_streaming_status()
2559 struct device *dev = &priv->client->dev; in ub960_enable_streams()
2566 if (!priv->streaming) { in ub960_enable_streams()
2574 if (!priv->stream_enable_mask[source_pad]) { in ub960_enable_streams()
2581 priv->stream_enable_mask[source_pad] |= source_streams_mask; in ub960_enable_streams()
2584 for_each_active_route(&state->routing, route) { in ub960_enable_streams()
2585 if (route->source_pad != source_pad) in ub960_enable_streams()
2588 if (!(source_streams_mask & BIT_ULL(route->source_stream))) in ub960_enable_streams()
2591 nport = ub960_pad_to_port(priv, route->sink_pad); in ub960_enable_streams()
2593 sink_streams[nport] |= BIT_ULL(route->sink_stream); in ub960_enable_streams()
2596 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_enable_streams()
2600 /* Enable the RX port if not yet enabled */ in ub960_enable_streams()
2601 if (!priv->stream_enable_mask[nport]) { in ub960_enable_streams()
2609 priv->stream_enable_mask[nport] |= sink_streams[nport]; in ub960_enable_streams()
2611 dev_dbg(dev, "enable RX port %u streams %#llx\n", nport, in ub960_enable_streams()
2615 priv->rxports[nport]->source.sd, in ub960_enable_streams()
2616 priv->rxports[nport]->source.pad, in ub960_enable_streams()
2619 priv->stream_enable_mask[nport] &= ~sink_streams[nport]; in ub960_enable_streams()
2621 if (!priv->stream_enable_mask[nport]) in ub960_enable_streams()
2629 priv->streaming = true; in ub960_enable_streams()
2638 dev_dbg(dev, "disable RX port %u streams %#llx\n", nport, in ub960_enable_streams()
2642 priv->rxports[nport]->source.sd, in ub960_enable_streams()
2643 priv->rxports[nport]->source.pad, in ub960_enable_streams()
2648 priv->stream_enable_mask[nport] &= ~sink_streams[nport]; in ub960_enable_streams()
2650 /* Disable RX port if no active streams */ in ub960_enable_streams()
2651 if (!priv->stream_enable_mask[nport]) in ub960_enable_streams()
2655 priv->stream_enable_mask[source_pad] &= ~source_streams_mask; in ub960_enable_streams()
2657 if (!priv->stream_enable_mask[source_pad]) in ub960_enable_streams()
2671 struct device *dev = &priv->client->dev; in ub960_disable_streams()
2678 for_each_active_route(&state->routing, route) { in ub960_disable_streams()
2679 if (route->source_pad != source_pad) in ub960_disable_streams()
2682 if (!(source_streams_mask & BIT_ULL(route->source_stream))) in ub960_disable_streams()
2685 nport = ub960_pad_to_port(priv, route->sink_pad); in ub960_disable_streams()
2687 sink_streams[nport] |= BIT_ULL(route->sink_stream); in ub960_disable_streams()
2690 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_disable_streams()
2694 dev_dbg(dev, "disable RX port %u streams %#llx\n", nport, in ub960_disable_streams()
2698 priv->rxports[nport]->source.sd, in ub960_disable_streams()
2699 priv->rxports[nport]->source.pad, in ub960_disable_streams()
2704 priv->stream_enable_mask[nport] &= ~sink_streams[nport]; in ub960_disable_streams()
2706 /* Disable RX port if no active streams */ in ub960_disable_streams()
2707 if (!priv->stream_enable_mask[nport]) in ub960_disable_streams()
2713 priv->stream_enable_mask[source_pad] &= ~source_streams_mask; in ub960_disable_streams()
2715 if (!priv->stream_enable_mask[source_pad]) in ub960_disable_streams()
2745 if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX) in _ub960_set_routing()
2746 return -E2BIG; in _ub960_set_routing()
2768 if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming) in ub960_set_routing()
2769 return -EBUSY; in ub960_set_routing()
2781 struct device *dev = &priv->client->dev; in ub960_get_frame_desc()
2785 return -EINVAL; in ub960_get_frame_desc()
2787 fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; in ub960_get_frame_desc()
2789 state = v4l2_subdev_lock_and_get_active_state(&priv->sd); in ub960_get_frame_desc()
2793 for_each_active_route(&state->routing, route) { in ub960_get_frame_desc()
2799 if (route->source_pad != pad) in ub960_get_frame_desc()
2802 nport = ub960_pad_to_port(priv, route->sink_pad); in ub960_get_frame_desc()
2804 ret = v4l2_subdev_call(priv->rxports[nport]->source.sd, pad, in ub960_get_frame_desc()
2806 priv->rxports[nport]->source.pad, in ub960_get_frame_desc()
2811 route->sink_pad); in ub960_get_frame_desc()
2816 if (source_fd.entry[i].stream == route->sink_stream) { in ub960_get_frame_desc()
2825 ret = -EPIPE; in ub960_get_frame_desc()
2829 fd->entry[fd->num_entries].stream = route->source_stream; in ub960_get_frame_desc()
2830 fd->entry[fd->num_entries].flags = source_entry->flags; in ub960_get_frame_desc()
2831 fd->entry[fd->num_entries].length = source_entry->length; in ub960_get_frame_desc()
2832 fd->entry[fd->num_entries].pixelcode = source_entry->pixelcode; in ub960_get_frame_desc()
2834 fd->entry[fd->num_entries].bus.csi2.vc = vc_map[nport]; in ub960_get_frame_desc()
2837 fd->entry[fd->num_entries].bus.csi2.dt = in ub960_get_frame_desc()
2838 source_entry->bus.csi2.dt; in ub960_get_frame_desc()
2844 route->source_stream); in ub960_get_frame_desc()
2847 ret = -EINVAL; in ub960_get_frame_desc()
2851 ub960_fmt = ub960_find_format(fmt->code); in ub960_get_frame_desc()
2854 ret = -EINVAL; in ub960_get_frame_desc()
2858 fd->entry[fd->num_entries].bus.csi2.dt = in ub960_get_frame_desc()
2859 ub960_fmt->datatype; in ub960_get_frame_desc()
2862 fd->num_entries++; in ub960_get_frame_desc()
2878 if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming) in ub960_set_fmt()
2879 return -EBUSY; in ub960_set_fmt()
2882 if (ub960_pad_is_source(priv, format->pad)) in ub960_set_fmt()
2889 if (!ub960_find_format(format->format.code)) in ub960_set_fmt()
2890 format->format.code = ub960_formats[0].code; in ub960_set_fmt()
2892 fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); in ub960_set_fmt()
2894 return -EINVAL; in ub960_set_fmt()
2896 *fmt = format->format; in ub960_set_fmt()
2898 fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad, in ub960_set_fmt()
2899 format->stream); in ub960_set_fmt()
2901 return -EINVAL; in ub960_set_fmt()
2903 *fmt = format->format; in ub960_set_fmt()
2917 .source_pad = priv->hw_data->num_rxports, in ub960_init_state()
2945 struct device *dev = &priv->client->dev; in ub960_log_status()
2960 for (nport = 0; nport < priv->hw_data->num_txports; nport++) { in ub960_log_status()
2961 struct ub960_txport *txport = priv->txports[nport]; in ub960_log_status()
2987 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_log_status()
2988 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_log_status()
2993 dev_info(dev, "RX %u\n", nport); in ub960_log_status()
3038 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7, in ub960_log_status()
3039 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7); in ub960_log_status()
3046 /* EQ */ in ub960_log_status()
3050 dev_info(dev, "\t%s EQ\n", in ub960_log_status()
3104 /* -----------------------------------------------------------------------------
3120 dev_dbg(&priv->client->dev, "INTERRUPT_STS %x\n", int_sts); in ub960_handle_events()
3126 dev_dbg(&priv->client->dev, "FWD_STS %#02x\n", fwd_sts); in ub960_handle_events()
3128 for (i = 0; i < priv->hw_data->num_txports; i++) { in ub960_handle_events()
3133 for (i = 0; i < priv->hw_data->num_rxports; i++) { in ub960_handle_events()
3134 if (!priv->rxports[i]) in ub960_handle_events()
3152 schedule_delayed_work(&priv->poll_work, in ub960_handler_work()
3160 for (nport = 0; nport < priv->hw_data->num_txports; nport++) { in ub960_txport_free_ports()
3161 struct ub960_txport *txport = priv->txports[nport]; in ub960_txport_free_ports()
3167 priv->txports[nport] = NULL; in ub960_txport_free_ports()
3175 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_rxport_free_ports()
3176 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_rxport_free_ports()
3181 fwnode_handle_put(rxport->source.ep_fwnode); in ub960_rxport_free_ports()
3182 fwnode_handle_put(rxport->ser.fwnode); in ub960_rxport_free_ports()
3185 priv->rxports[nport] = NULL; in ub960_rxport_free_ports()
3194 struct device *dev = &priv->client->dev; in ub960_parse_dt_rxport_link_properties()
3195 unsigned int nport = rxport->nport; in ub960_parse_dt_rxport_link_properties()
3205 ret = fwnode_property_read_u32(link_fwnode, "ti,cdr-mode", &cdr_mode); in ub960_parse_dt_rxport_link_properties()
3206 if (ret < 0 && ret != -EINVAL) { in ub960_parse_dt_rxport_link_properties()
3207 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, in ub960_parse_dt_rxport_link_properties()
3208 "ti,cdr-mode", ret); in ub960_parse_dt_rxport_link_properties()
3213 dev_err(dev, "rx%u: bad 'ti,cdr-mode' %u\n", nport, cdr_mode); in ub960_parse_dt_rxport_link_properties()
3214 return -EINVAL; in ub960_parse_dt_rxport_link_properties()
3217 if (!priv->hw_data->is_fpdlink4 && cdr_mode == RXPORT_CDR_FPD4) { in ub960_parse_dt_rxport_link_properties()
3218 dev_err(dev, "rx%u: FPD-Link 4 CDR not supported\n", nport); in ub960_parse_dt_rxport_link_properties()
3219 return -EINVAL; in ub960_parse_dt_rxport_link_properties()
3222 rxport->cdr_mode = cdr_mode; in ub960_parse_dt_rxport_link_properties()
3224 ret = fwnode_property_read_u32(link_fwnode, "ti,rx-mode", &rx_mode); in ub960_parse_dt_rxport_link_properties()
3226 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, in ub960_parse_dt_rxport_link_properties()
3227 "ti,rx-mode", ret); in ub960_parse_dt_rxport_link_properties()
3232 dev_err(dev, "rx%u: bad 'ti,rx-mode' %u\n", nport, rx_mode); in ub960_parse_dt_rxport_link_properties()
3233 return -EINVAL; in ub960_parse_dt_rxport_link_properties()
3239 dev_err(dev, "rx%u: unsupported 'ti,rx-mode' %u\n", nport, in ub960_parse_dt_rxport_link_properties()
3241 return -EINVAL; in ub960_parse_dt_rxport_link_properties()
3246 rxport->rx_mode = rx_mode; in ub960_parse_dt_rxport_link_properties()
3248 /* EQ & Strobe related */ in ub960_parse_dt_rxport_link_properties()
3251 rxport->eq.manual_eq = false; in ub960_parse_dt_rxport_link_properties()
3252 rxport->eq.aeq.eq_level_min = UB960_MIN_EQ_LEVEL; in ub960_parse_dt_rxport_link_properties()
3253 rxport->eq.aeq.eq_level_max = UB960_MAX_EQ_LEVEL; in ub960_parse_dt_rxport_link_properties()
3255 ret = fwnode_property_read_u32(link_fwnode, "ti,strobe-pos", in ub960_parse_dt_rxport_link_properties()
3258 if (ret != -EINVAL) { in ub960_parse_dt_rxport_link_properties()
3259 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, in ub960_parse_dt_rxport_link_properties()
3260 "ti,strobe-pos", ret); in ub960_parse_dt_rxport_link_properties()
3266 dev_err(dev, "rx%u: illegal 'strobe-pos' value: %d\n", in ub960_parse_dt_rxport_link_properties()
3268 return -EINVAL; in ub960_parse_dt_rxport_link_properties()
3272 rxport->eq.strobe_pos = strobe_pos; in ub960_parse_dt_rxport_link_properties()
3273 if (!priv->strobe.manual) in ub960_parse_dt_rxport_link_properties()
3275 "rx%u: 'ti,strobe-pos' ignored as 'ti,manual-strobe' not set\n", in ub960_parse_dt_rxport_link_properties()
3279 ret = fwnode_property_read_u32(link_fwnode, "ti,eq-level", &eq_level); in ub960_parse_dt_rxport_link_properties()
3281 if (ret != -EINVAL) { in ub960_parse_dt_rxport_link_properties()
3282 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, in ub960_parse_dt_rxport_link_properties()
3283 "ti,eq-level", ret); in ub960_parse_dt_rxport_link_properties()
3288 dev_err(dev, "rx%u: illegal 'ti,eq-level' value: %d\n", in ub960_parse_dt_rxport_link_properties()
3290 return -EINVAL; in ub960_parse_dt_rxport_link_properties()
3293 rxport->eq.manual_eq = true; in ub960_parse_dt_rxport_link_properties()
3294 rxport->eq.manual.eq_level = eq_level; in ub960_parse_dt_rxport_link_properties()
3297 ret = fwnode_property_read_u32(link_fwnode, "i2c-alias", in ub960_parse_dt_rxport_link_properties()
3300 dev_err(dev, "rx%u: failed to read '%s': %d\n", nport, in ub960_parse_dt_rxport_link_properties()
3301 "i2c-alias", ret); in ub960_parse_dt_rxport_link_properties()
3304 rxport->ser.alias = ser_i2c_alias; in ub960_parse_dt_rxport_link_properties()
3306 rxport->ser.fwnode = fwnode_get_named_child_node(link_fwnode, "serializer"); in ub960_parse_dt_rxport_link_properties()
3307 if (!rxport->ser.fwnode) { in ub960_parse_dt_rxport_link_properties()
3308 dev_err(dev, "rx%u: missing 'serializer' node\n", nport); in ub960_parse_dt_rxport_link_properties()
3309 return -EINVAL; in ub960_parse_dt_rxport_link_properties()
3319 struct device *dev = &priv->client->dev; in ub960_parse_dt_rxport_ep_properties()
3321 unsigned int nport = rxport->nport; in ub960_parse_dt_rxport_ep_properties()
3326 rxport->source.ep_fwnode = fwnode_graph_get_remote_endpoint(ep_fwnode); in ub960_parse_dt_rxport_ep_properties()
3327 if (!rxport->source.ep_fwnode) { in ub960_parse_dt_rxport_ep_properties()
3328 dev_err(dev, "rx%u: no remote endpoint\n", nport); in ub960_parse_dt_rxport_ep_properties()
3329 return -ENODEV; in ub960_parse_dt_rxport_ep_properties()
3334 switch (rxport->rx_mode) { in ub960_parse_dt_rxport_ep_properties()
3346 dev_err(dev, "rx%u: failed to parse endpoint data\n", nport); in ub960_parse_dt_rxport_ep_properties()
3354 rxport->lv_fv_pol = (hsync_hi ? UB960_RR_PORT_CONFIG2_LV_POL_LOW : 0) | in ub960_parse_dt_rxport_ep_properties()
3360 fwnode_handle_put(rxport->source.ep_fwnode); in ub960_parse_dt_rxport_ep_properties()
3371 struct device *dev = &priv->client->dev; in ub960_parse_dt_rxport()
3377 return -ENOMEM; in ub960_parse_dt_rxport()
3379 priv->rxports[nport] = rxport; in ub960_parse_dt_rxport()
3381 rxport->nport = nport; in ub960_parse_dt_rxport()
3382 rxport->priv = priv; in ub960_parse_dt_rxport()
3388 rxport->vpoc = devm_regulator_get_optional(dev, vpoc_names[nport]); in ub960_parse_dt_rxport()
3389 if (IS_ERR(rxport->vpoc)) { in ub960_parse_dt_rxport()
3390 ret = PTR_ERR(rxport->vpoc); in ub960_parse_dt_rxport()
3391 if (ret == -ENODEV) { in ub960_parse_dt_rxport()
3392 rxport->vpoc = NULL; in ub960_parse_dt_rxport()
3394 dev_err(dev, "rx%u: failed to get VPOC supply: %d\n", in ub960_parse_dt_rxport()
3407 fwnode_handle_put(rxport->ser.fwnode); in ub960_parse_dt_rxport()
3409 priv->rxports[nport] = NULL; in ub960_parse_dt_rxport()
3442 struct device *dev = &priv->client->dev; in ub960_parse_dt_rxports()
3450 return -ENODEV; in ub960_parse_dt_rxports()
3454 priv->strobe.min = 2; in ub960_parse_dt_rxports()
3455 priv->strobe.max = 3; in ub960_parse_dt_rxports()
3457 priv->strobe.manual = fwnode_property_read_bool(links_fwnode, "ti,manual-strobe"); in ub960_parse_dt_rxports()
3459 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_parse_dt_rxports()
3481 dev_err(dev, "rx%u: failed to parse RX port\n", nport); in ub960_parse_dt_rxports()
3498 struct device *dev = &priv->client->dev; in ub960_parse_dt_txports()
3502 for (nport = 0; nport < priv->hw_data->num_txports; nport++) { in ub960_parse_dt_txports()
3503 unsigned int port = nport + priv->hw_data->num_rxports; in ub960_parse_dt_txports()
3546 struct ub960_data *priv = sd_to_ub960(notifier->sd); in ub960_notify_bound()
3547 struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport; in ub960_notify_bound()
3548 struct device *dev = &priv->client->dev; in ub960_notify_bound()
3549 u8 nport = rxport->nport; in ub960_notify_bound()
3553 ret = media_entity_get_fwnode_pad(&subdev->entity, in ub960_notify_bound()
3554 rxport->source.ep_fwnode, in ub960_notify_bound()
3557 dev_err(dev, "Failed to find pad for %s\n", subdev->name); in ub960_notify_bound()
3561 rxport->source.sd = subdev; in ub960_notify_bound()
3562 rxport->source.pad = ret; in ub960_notify_bound()
3564 ret = media_create_pad_link(&rxport->source.sd->entity, in ub960_notify_bound()
3565 rxport->source.pad, &priv->sd.entity, nport, in ub960_notify_bound()
3569 dev_err(dev, "Unable to link %s:%u -> %s:%u\n", in ub960_notify_bound()
3570 rxport->source.sd->name, rxport->source.pad, in ub960_notify_bound()
3571 priv->sd.name, nport); in ub960_notify_bound()
3575 for (i = 0; i < priv->hw_data->num_rxports; i++) { in ub960_notify_bound()
3576 if (priv->rxports[i] && !priv->rxports[i]->source.sd) { in ub960_notify_bound()
3589 struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport; in ub960_notify_unbind()
3591 rxport->source.sd = NULL; in ub960_notify_unbind()
3601 struct device *dev = &priv->client->dev; in ub960_v4l2_notifier_register()
3605 v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd); in ub960_v4l2_notifier_register()
3607 for (i = 0; i < priv->hw_data->num_rxports; i++) { in ub960_v4l2_notifier_register()
3608 struct ub960_rxport *rxport = priv->rxports[i]; in ub960_v4l2_notifier_register()
3614 asd = v4l2_async_nf_add_fwnode(&priv->notifier, in ub960_v4l2_notifier_register()
3615 rxport->source.ep_fwnode, in ub960_v4l2_notifier_register()
3620 v4l2_async_nf_cleanup(&priv->notifier); in ub960_v4l2_notifier_register()
3624 asd->rxport = rxport; in ub960_v4l2_notifier_register()
3627 priv->notifier.ops = &ub960_notify_ops; in ub960_v4l2_notifier_register()
3629 ret = v4l2_async_nf_register(&priv->notifier); in ub960_v4l2_notifier_register()
3632 v4l2_async_nf_cleanup(&priv->notifier); in ub960_v4l2_notifier_register()
3641 v4l2_async_nf_unregister(&priv->notifier); in ub960_v4l2_notifier_unregister()
3642 v4l2_async_nf_cleanup(&priv->notifier); in ub960_v4l2_notifier_unregister()
3647 struct device *dev = &priv->client->dev; in ub960_create_subdev()
3651 v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub960_subdev_ops); in ub960_create_subdev()
3652 priv->sd.internal_ops = &ub960_internal_ops; in ub960_create_subdev()
3654 v4l2_ctrl_handler_init(&priv->ctrl_handler, 1); in ub960_create_subdev()
3655 priv->sd.ctrl_handler = &priv->ctrl_handler; in ub960_create_subdev()
3657 v4l2_ctrl_new_int_menu(&priv->ctrl_handler, NULL, V4L2_CID_LINK_FREQ, in ub960_create_subdev()
3658 ARRAY_SIZE(priv->tx_link_freq) - 1, 0, in ub960_create_subdev()
3659 priv->tx_link_freq); in ub960_create_subdev()
3661 if (priv->ctrl_handler.error) { in ub960_create_subdev()
3662 ret = priv->ctrl_handler.error; in ub960_create_subdev()
3666 priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | in ub960_create_subdev()
3668 priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; in ub960_create_subdev()
3669 priv->sd.entity.ops = &ub960_entity_ops; in ub960_create_subdev()
3671 for (i = 0; i < priv->hw_data->num_rxports + priv->hw_data->num_txports; i++) { in ub960_create_subdev()
3672 priv->pads[i].flags = ub960_pad_is_sink(priv, i) ? in ub960_create_subdev()
3677 ret = media_entity_pads_init(&priv->sd.entity, in ub960_create_subdev()
3678 priv->hw_data->num_rxports + in ub960_create_subdev()
3679 priv->hw_data->num_txports, in ub960_create_subdev()
3680 priv->pads); in ub960_create_subdev()
3684 priv->sd.state_lock = priv->sd.ctrl_handler->lock; in ub960_create_subdev()
3686 ret = v4l2_subdev_init_finalize(&priv->sd); in ub960_create_subdev()
3696 ret = v4l2_async_register_subdev(&priv->sd); in ub960_create_subdev()
3707 v4l2_subdev_cleanup(&priv->sd); in ub960_create_subdev()
3709 media_entity_cleanup(&priv->sd.entity); in ub960_create_subdev()
3711 v4l2_ctrl_handler_free(&priv->ctrl_handler); in ub960_create_subdev()
3719 v4l2_async_unregister_subdev(&priv->sd); in ub960_destroy_subdev()
3721 v4l2_subdev_cleanup(&priv->sd); in ub960_destroy_subdev()
3723 media_entity_cleanup(&priv->sd.entity); in ub960_destroy_subdev()
3724 v4l2_ctrl_handler_free(&priv->ctrl_handler); in ub960_destroy_subdev()
3736 * We do locking in the driver to cover the TX/RX port selection and the
3744 struct device *dev = &priv->client->dev; in ub960_reset()
3754 mutex_lock(&priv->reg_lock); in ub960_reset()
3756 ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v, in ub960_reset()
3759 mutex_unlock(&priv->reg_lock); in ub960_reset()
3767 struct device *dev = &priv->client->dev; in ub960_get_hw_resources()
3769 priv->regmap = devm_regmap_init_i2c(priv->client, &ub960_regmap_config); in ub960_get_hw_resources()
3770 if (IS_ERR(priv->regmap)) in ub960_get_hw_resources()
3771 return PTR_ERR(priv->regmap); in ub960_get_hw_resources()
3773 priv->vddio = devm_regulator_get(dev, "vddio"); in ub960_get_hw_resources()
3774 if (IS_ERR(priv->vddio)) in ub960_get_hw_resources()
3775 return dev_err_probe(dev, PTR_ERR(priv->vddio), in ub960_get_hw_resources()
3778 /* get power-down pin from DT */ in ub960_get_hw_resources()
3779 priv->pd_gpio = in ub960_get_hw_resources()
3781 if (IS_ERR(priv->pd_gpio)) in ub960_get_hw_resources()
3782 return dev_err_probe(dev, PTR_ERR(priv->pd_gpio), in ub960_get_hw_resources()
3785 priv->refclk = devm_clk_get(dev, "refclk"); in ub960_get_hw_resources()
3786 if (IS_ERR(priv->refclk)) in ub960_get_hw_resources()
3787 return dev_err_probe(dev, PTR_ERR(priv->refclk), in ub960_get_hw_resources()
3795 struct device *dev = &priv->client->dev; in ub960_enable_core_hw()
3801 ret = regulator_enable(priv->vddio); in ub960_enable_core_hw()
3806 ret = clk_prepare_enable(priv->refclk); in ub960_enable_core_hw()
3812 if (priv->pd_gpio) { in ub960_enable_core_hw()
3813 gpiod_set_value_cansleep(priv->pd_gpio, 1); in ub960_enable_core_hw()
3816 gpiod_set_value_cansleep(priv->pd_gpio, 0); in ub960_enable_core_hw()
3830 dev_dbg(dev, "Found %s (rev/mask %#04x)\n", priv->hw_data->model, in ub960_enable_core_hw()
3843 clk_get_rate(priv->refclk) / 1000000); in ub960_enable_core_hw()
3845 /* Disable all RX ports by default */ in ub960_enable_core_hw()
3851 if (priv->hw_data->is_ub9702) { in ub960_enable_core_hw()
3862 gpiod_set_value_cansleep(priv->pd_gpio, 1); in ub960_enable_core_hw()
3863 clk_disable_unprepare(priv->refclk); in ub960_enable_core_hw()
3865 regulator_disable(priv->vddio); in ub960_enable_core_hw()
3872 gpiod_set_value_cansleep(priv->pd_gpio, 1); in ub960_disable_core_hw()
3873 clk_disable_unprepare(priv->refclk); in ub960_disable_core_hw()
3874 regulator_disable(priv->vddio); in ub960_disable_core_hw()
3879 struct device *dev = &client->dev; in ub960_probe()
3888 return -ENOMEM; in ub960_probe()
3890 priv->client = client; in ub960_probe()
3892 priv->hw_data = device_get_match_data(dev); in ub960_probe()
3894 mutex_init(&priv->reg_lock); in ub960_probe()
3896 INIT_DELAYED_WORK(&priv->poll_work, ub960_handler_work); in ub960_probe()
3902 priv->reg_current.indirect_target = 0xff; in ub960_probe()
3903 priv->reg_current.rxport = 0xff; in ub960_probe()
3904 priv->reg_current.txport = 0xff; in ub960_probe()
3934 for (nport = 0; nport < priv->hw_data->num_rxports; nport++) { in ub960_probe()
3935 struct ub960_rxport *rxport = priv->rxports[nport]; in ub960_probe()
3948 ret = -EIO; in ub960_probe()
3949 dev_err_probe(dev, ret, "Failed to lock all RX ports\n"); in ub960_probe()
3954 * Clear any errors caused by switching the RX port settings while in ub960_probe()
3971 if (client->irq) in ub960_probe()
3974 schedule_delayed_work(&priv->poll_work, in ub960_probe()
3991 mutex_destroy(&priv->reg_lock); in ub960_probe()
4000 cancel_delayed_work_sync(&priv->poll_work); in ub960_remove()
4009 mutex_destroy(&priv->reg_lock); in ub960_remove()
4027 { "ds90ub960-q1", (kernel_ulong_t)&ds90ub960_hw },
4028 { "ds90ub9702-q1", (kernel_ulong_t)&ds90ub9702_hw },
4034 { .compatible = "ti,ds90ub960-q1", .data = &ds90ub960_hw },
4035 { .compatible = "ti,ds90ub9702-q1", .data = &ds90ub9702_hw },
4052 MODULE_DESCRIPTION("Texas Instruments FPD-Link III/IV Deserializers Driver");