1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include <linux/delay.h> 5 #include "ice_common.h" 6 #include "ice_ptp_hw.h" 7 #include "ice_ptp_consts.h" 8 #include "ice_cgu_regs.h" 9 10 static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = { 11 DPLL_PIN_FREQUENCY_1PPS, 12 DPLL_PIN_FREQUENCY_10MHZ, 13 }; 14 15 static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = { 16 DPLL_PIN_FREQUENCY_1PPS, 17 }; 18 19 static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = { 20 DPLL_PIN_FREQUENCY_10MHZ, 21 }; 22 23 static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = { 24 { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR, 25 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 26 { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR, 27 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 28 { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, }, 29 { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, }, 30 { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT, 31 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 32 { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT, 33 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 34 { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, 35 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, 36 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, }, 37 }; 38 39 static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = { 40 { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR, 41 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 42 { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR, 43 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 44 { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, }, 45 { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, }, 46 { "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, }, 47 { "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, }, 48 { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT, 49 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 50 { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT, 51 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 52 { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, 53 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, 54 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, }, 55 }; 56 57 static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = { 58 { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT, 59 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 60 { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT, 61 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 62 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, }, 63 { "MAC-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, }, 64 { "CVL-SDP21", ZL_OUT4, DPLL_PIN_TYPE_EXT, 65 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, 66 { "CVL-SDP23", ZL_OUT5, DPLL_PIN_TYPE_EXT, 67 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, 68 }; 69 70 static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = { 71 { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT, 72 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 73 { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT, 74 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 75 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, 76 { "PHY2-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, 77 { "MAC-CLK", ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, 78 { "CVL-SDP21", ZL_OUT5, DPLL_PIN_TYPE_EXT, 79 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, 80 { "CVL-SDP23", ZL_OUT6, DPLL_PIN_TYPE_EXT, 81 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, 82 }; 83 84 static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = { 85 { "NONE", SI_REF0P, 0, 0 }, 86 { "NONE", SI_REF0N, 0, 0 }, 87 { "SYNCE0_DP", SI_REF1P, DPLL_PIN_TYPE_MUX, 0 }, 88 { "SYNCE0_DN", SI_REF1N, DPLL_PIN_TYPE_MUX, 0 }, 89 { "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT, 90 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 91 { "NONE", SI_REF2N, 0, 0 }, 92 { "EXT_PPS_OUT", SI_REF3, DPLL_PIN_TYPE_EXT, 93 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 94 { "INT_PPS_OUT", SI_REF4, DPLL_PIN_TYPE_EXT, 95 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 96 }; 97 98 static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = { 99 { "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT, 100 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 101 { "PHY-CLK", SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, 102 { "10MHZ-SMA2", SI_OUT2, DPLL_PIN_TYPE_EXT, 103 ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz }, 104 { "PPS-SMA1", SI_OUT3, DPLL_PIN_TYPE_EXT, 105 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 106 }; 107 108 static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = { 109 { "NONE", ZL_REF0P, 0, 0 }, 110 { "INT_PPS_OUT", ZL_REF0N, DPLL_PIN_TYPE_EXT, 111 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, 112 { "SYNCE0_DP", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 }, 113 { "SYNCE0_DN", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 }, 114 { "NONE", ZL_REF2P, 0, 0 }, 115 { "NONE", ZL_REF2N, 0, 0 }, 116 { "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT, 117 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 118 { "NONE", ZL_REF3N, 0, 0 }, 119 { "EXT_PPS_OUT", ZL_REF4P, DPLL_PIN_TYPE_EXT, 120 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, 121 { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 }, 122 }; 123 124 static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = { 125 { "PPS-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT, 126 ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, 127 { "10MHZ-SMA2", ZL_OUT1, DPLL_PIN_TYPE_EXT, 128 ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz }, 129 { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, 130 { "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, 131 { "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT, 132 ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, 133 { "NONE", ZL_OUT5, 0, 0 }, 134 }; 135 136 /* Low level functions for interacting with and managing the device clock used 137 * for the Precision Time Protocol. 138 * 139 * The ice hardware represents the current time using three registers: 140 * 141 * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R 142 * +---------------+ +---------------+ +---------------+ 143 * | 32 bits | | 32 bits | | 32 bits | 144 * +---------------+ +---------------+ +---------------+ 145 * 146 * The registers are incremented every clock tick using a 40bit increment 147 * value defined over two registers: 148 * 149 * GLTSYN_INCVAL_H GLTSYN_INCVAL_L 150 * +---------------+ +---------------+ 151 * | 8 bit s | | 32 bits | 152 * +---------------+ +---------------+ 153 * 154 * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L 155 * registers every clock source tick. Depending on the specific device 156 * configuration, the clock source frequency could be one of a number of 157 * values. 158 * 159 * For E810 devices, the increment frequency is 812.5 MHz 160 * 161 * For E822 devices the clock can be derived from different sources, and the 162 * increment has an effective frequency of one of the following: 163 * - 823.4375 MHz 164 * - 783.36 MHz 165 * - 796.875 MHz 166 * - 816 MHz 167 * - 830.078125 MHz 168 * - 783.36 MHz 169 * 170 * The hardware captures timestamps in the PHY for incoming packets, and for 171 * outgoing packets on request. To support this, the PHY maintains a timer 172 * that matches the lower 64 bits of the global source timer. 173 * 174 * In order to ensure that the PHY timers and the source timer are equivalent, 175 * shadow registers are used to prepare the desired initial values. A special 176 * sync command is issued to trigger copying from the shadow registers into 177 * the appropriate source and PHY registers simultaneously. 178 * 179 * The driver supports devices which have different PHYs with subtly different 180 * mechanisms to program and control the timers. We divide the devices into 181 * families named after the first major device, E810 and similar devices, and 182 * E822 and similar devices. 183 * 184 * - E822 based devices have additional support for fine grained Vernier 185 * calibration which requires significant setup 186 * - The layout of timestamp data in the PHY register blocks is different 187 * - The way timer synchronization commands are issued is different. 188 * 189 * To support this, very low level functions have an e810 or e822 suffix 190 * indicating what type of device they work on. Higher level abstractions for 191 * tasks that can be done on both devices do not have the suffix and will 192 * correctly look up the appropriate low level function when running. 193 * 194 * Functions which only make sense on a single device family may not have 195 * a suitable generic implementation 196 */ 197 198 /** 199 * ice_get_ptp_src_clock_index - determine source clock index 200 * @hw: pointer to HW struct 201 * 202 * Determine the source clock index currently in use, based on device 203 * capabilities reported during initialization. 204 */ 205 u8 ice_get_ptp_src_clock_index(struct ice_hw *hw) 206 { 207 return hw->func_caps.ts_func_info.tmr_index_assoc; 208 } 209 210 /** 211 * ice_ptp_read_src_incval - Read source timer increment value 212 * @hw: pointer to HW struct 213 * 214 * Read the increment value of the source timer and return it. 215 */ 216 static u64 ice_ptp_read_src_incval(struct ice_hw *hw) 217 { 218 u32 lo, hi; 219 u8 tmr_idx; 220 221 tmr_idx = ice_get_ptp_src_clock_index(hw); 222 223 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); 224 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); 225 226 return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo; 227 } 228 229 /** 230 * ice_ptp_src_cmd - Prepare source timer for a timer command 231 * @hw: pointer to HW structure 232 * @cmd: Timer command 233 * 234 * Prepare the source timer for an upcoming timer sync command. 235 */ 236 void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 237 { 238 u32 cmd_val; 239 u8 tmr_idx; 240 241 tmr_idx = ice_get_ptp_src_clock_index(hw); 242 cmd_val = tmr_idx << SEL_CPK_SRC; 243 244 switch (cmd) { 245 case ICE_PTP_INIT_TIME: 246 cmd_val |= GLTSYN_CMD_INIT_TIME; 247 break; 248 case ICE_PTP_INIT_INCVAL: 249 cmd_val |= GLTSYN_CMD_INIT_INCVAL; 250 break; 251 case ICE_PTP_ADJ_TIME: 252 cmd_val |= GLTSYN_CMD_ADJ_TIME; 253 break; 254 case ICE_PTP_ADJ_TIME_AT_TIME: 255 cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; 256 break; 257 case ICE_PTP_READ_TIME: 258 cmd_val |= GLTSYN_CMD_READ_TIME; 259 break; 260 case ICE_PTP_NOP: 261 break; 262 } 263 264 wr32(hw, GLTSYN_CMD, cmd_val); 265 } 266 267 /** 268 * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands 269 * @hw: pointer to HW struct 270 * 271 * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the 272 * write immediately. This triggers the hardware to begin executing all of the 273 * source and PHY timer commands synchronously. 274 */ 275 static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) 276 { 277 struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 278 279 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); 280 wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); 281 ice_flush(hw); 282 } 283 284 /* E822 family functions 285 * 286 * The following functions operate on the E822 family of devices. 287 */ 288 289 /** 290 * ice_fill_phy_msg_e82x - Fill message data for a PHY register access 291 * @msg: the PHY message buffer to fill in 292 * @port: the port to access 293 * @offset: the register offset 294 */ 295 static void 296 ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset) 297 { 298 int phy_port, phy, quadtype; 299 300 phy_port = port % ICE_PORTS_PER_PHY_E82X; 301 phy = port / ICE_PORTS_PER_PHY_E82X; 302 quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X; 303 304 if (quadtype == 0) { 305 msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); 306 msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port); 307 } else { 308 msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port); 309 msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port); 310 } 311 312 if (phy == 0) 313 msg->dest_dev = rmn_0; 314 else if (phy == 1) 315 msg->dest_dev = rmn_1; 316 else 317 msg->dest_dev = rmn_2; 318 } 319 320 /** 321 * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register 322 * @low_addr: the low address to check 323 * @high_addr: on return, contains the high address of the 64bit register 324 * 325 * Checks if the provided low address is one of the known 64bit PHY values 326 * represented as two 32bit registers. If it is, return the appropriate high 327 * register offset to use. 328 */ 329 static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr) 330 { 331 switch (low_addr) { 332 case P_REG_PAR_PCS_TX_OFFSET_L: 333 *high_addr = P_REG_PAR_PCS_TX_OFFSET_U; 334 return true; 335 case P_REG_PAR_PCS_RX_OFFSET_L: 336 *high_addr = P_REG_PAR_PCS_RX_OFFSET_U; 337 return true; 338 case P_REG_PAR_TX_TIME_L: 339 *high_addr = P_REG_PAR_TX_TIME_U; 340 return true; 341 case P_REG_PAR_RX_TIME_L: 342 *high_addr = P_REG_PAR_RX_TIME_U; 343 return true; 344 case P_REG_TOTAL_TX_OFFSET_L: 345 *high_addr = P_REG_TOTAL_TX_OFFSET_U; 346 return true; 347 case P_REG_TOTAL_RX_OFFSET_L: 348 *high_addr = P_REG_TOTAL_RX_OFFSET_U; 349 return true; 350 case P_REG_UIX66_10G_40G_L: 351 *high_addr = P_REG_UIX66_10G_40G_U; 352 return true; 353 case P_REG_UIX66_25G_100G_L: 354 *high_addr = P_REG_UIX66_25G_100G_U; 355 return true; 356 case P_REG_TX_CAPTURE_L: 357 *high_addr = P_REG_TX_CAPTURE_U; 358 return true; 359 case P_REG_RX_CAPTURE_L: 360 *high_addr = P_REG_RX_CAPTURE_U; 361 return true; 362 case P_REG_TX_TIMER_INC_PRE_L: 363 *high_addr = P_REG_TX_TIMER_INC_PRE_U; 364 return true; 365 case P_REG_RX_TIMER_INC_PRE_L: 366 *high_addr = P_REG_RX_TIMER_INC_PRE_U; 367 return true; 368 default: 369 return false; 370 } 371 } 372 373 /** 374 * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register 375 * @low_addr: the low address to check 376 * @high_addr: on return, contains the high address of the 40bit value 377 * 378 * Checks if the provided low address is one of the known 40bit PHY values 379 * split into two registers with the lower 8 bits in the low register and the 380 * upper 32 bits in the high register. If it is, return the appropriate high 381 * register offset to use. 382 */ 383 static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr) 384 { 385 switch (low_addr) { 386 case P_REG_TIMETUS_L: 387 *high_addr = P_REG_TIMETUS_U; 388 return true; 389 case P_REG_PAR_RX_TUS_L: 390 *high_addr = P_REG_PAR_RX_TUS_U; 391 return true; 392 case P_REG_PAR_TX_TUS_L: 393 *high_addr = P_REG_PAR_TX_TUS_U; 394 return true; 395 case P_REG_PCS_RX_TUS_L: 396 *high_addr = P_REG_PCS_RX_TUS_U; 397 return true; 398 case P_REG_PCS_TX_TUS_L: 399 *high_addr = P_REG_PCS_TX_TUS_U; 400 return true; 401 case P_REG_DESK_PAR_RX_TUS_L: 402 *high_addr = P_REG_DESK_PAR_RX_TUS_U; 403 return true; 404 case P_REG_DESK_PAR_TX_TUS_L: 405 *high_addr = P_REG_DESK_PAR_TX_TUS_U; 406 return true; 407 case P_REG_DESK_PCS_RX_TUS_L: 408 *high_addr = P_REG_DESK_PCS_RX_TUS_U; 409 return true; 410 case P_REG_DESK_PCS_TX_TUS_L: 411 *high_addr = P_REG_DESK_PCS_TX_TUS_U; 412 return true; 413 default: 414 return false; 415 } 416 } 417 418 /** 419 * ice_read_phy_reg_e82x - Read a PHY register 420 * @hw: pointer to the HW struct 421 * @port: PHY port to read from 422 * @offset: PHY register offset to read 423 * @val: on return, the contents read from the PHY 424 * 425 * Read a PHY register for the given port over the device sideband queue. 426 */ 427 static int 428 ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val) 429 { 430 struct ice_sbq_msg_input msg = {0}; 431 int err; 432 433 ice_fill_phy_msg_e82x(&msg, port, offset); 434 msg.opcode = ice_sbq_msg_rd; 435 436 err = ice_sbq_rw_reg(hw, &msg); 437 if (err) { 438 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 439 err); 440 return err; 441 } 442 443 *val = msg.data; 444 445 return 0; 446 } 447 448 /** 449 * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers 450 * @hw: pointer to the HW struct 451 * @port: PHY port to read from 452 * @low_addr: offset of the lower register to read from 453 * @val: on return, the contents of the 64bit value from the PHY registers 454 * 455 * Reads the two registers associated with a 64bit value and returns it in the 456 * val pointer. The offset always specifies the lower register offset to use. 457 * The high offset is looked up. This function only operates on registers 458 * known to be two parts of a 64bit value. 459 */ 460 static int 461 ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) 462 { 463 u32 low, high; 464 u16 high_addr; 465 int err; 466 467 /* Only operate on registers known to be split into two 32bit 468 * registers. 469 */ 470 if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) { 471 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", 472 low_addr); 473 return -EINVAL; 474 } 475 476 err = ice_read_phy_reg_e82x(hw, port, low_addr, &low); 477 if (err) { 478 ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d", 479 low_addr, err); 480 return err; 481 } 482 483 err = ice_read_phy_reg_e82x(hw, port, high_addr, &high); 484 if (err) { 485 ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d", 486 high_addr, err); 487 return err; 488 } 489 490 *val = (u64)high << 32 | low; 491 492 return 0; 493 } 494 495 /** 496 * ice_write_phy_reg_e82x - Write a PHY register 497 * @hw: pointer to the HW struct 498 * @port: PHY port to write to 499 * @offset: PHY register offset to write 500 * @val: The value to write to the register 501 * 502 * Write a PHY register for the given port over the device sideband queue. 503 */ 504 static int 505 ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val) 506 { 507 struct ice_sbq_msg_input msg = {0}; 508 int err; 509 510 ice_fill_phy_msg_e82x(&msg, port, offset); 511 msg.opcode = ice_sbq_msg_wr; 512 msg.data = val; 513 514 err = ice_sbq_rw_reg(hw, &msg); 515 if (err) { 516 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 517 err); 518 return err; 519 } 520 521 return 0; 522 } 523 524 /** 525 * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY 526 * @hw: pointer to the HW struct 527 * @port: port to write to 528 * @low_addr: offset of the low register 529 * @val: 40b value to write 530 * 531 * Write the provided 40b value to the two associated registers by splitting 532 * it up into two chunks, the lower 8 bits and the upper 32 bits. 533 */ 534 static int 535 ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) 536 { 537 u32 low, high; 538 u16 high_addr; 539 int err; 540 541 /* Only operate on registers known to be split into a lower 8 bit 542 * register and an upper 32 bit register. 543 */ 544 if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) { 545 ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n", 546 low_addr); 547 return -EINVAL; 548 } 549 550 low = (u32)(val & P_REG_40B_LOW_M); 551 high = (u32)(val >> P_REG_40B_HIGH_S); 552 553 err = ice_write_phy_reg_e82x(hw, port, low_addr, low); 554 if (err) { 555 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", 556 low_addr, err); 557 return err; 558 } 559 560 err = ice_write_phy_reg_e82x(hw, port, high_addr, high); 561 if (err) { 562 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", 563 high_addr, err); 564 return err; 565 } 566 567 return 0; 568 } 569 570 /** 571 * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers 572 * @hw: pointer to the HW struct 573 * @port: PHY port to read from 574 * @low_addr: offset of the lower register to read from 575 * @val: the contents of the 64bit value to write to PHY 576 * 577 * Write the 64bit value to the two associated 32bit PHY registers. The offset 578 * is always specified as the lower register, and the high address is looked 579 * up. This function only operates on registers known to be two parts of 580 * a 64bit value. 581 */ 582 static int 583 ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) 584 { 585 u32 low, high; 586 u16 high_addr; 587 int err; 588 589 /* Only operate on registers known to be split into two 32bit 590 * registers. 591 */ 592 if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) { 593 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", 594 low_addr); 595 return -EINVAL; 596 } 597 598 low = lower_32_bits(val); 599 high = upper_32_bits(val); 600 601 err = ice_write_phy_reg_e82x(hw, port, low_addr, low); 602 if (err) { 603 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", 604 low_addr, err); 605 return err; 606 } 607 608 err = ice_write_phy_reg_e82x(hw, port, high_addr, high); 609 if (err) { 610 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", 611 high_addr, err); 612 return err; 613 } 614 615 return 0; 616 } 617 618 /** 619 * ice_fill_quad_msg_e82x - Fill message data for quad register access 620 * @msg: the PHY message buffer to fill in 621 * @quad: the quad to access 622 * @offset: the register offset 623 * 624 * Fill a message buffer for accessing a register in a quad shared between 625 * multiple PHYs. 626 */ 627 static int 628 ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) 629 { 630 u32 addr; 631 632 if (quad >= ICE_MAX_QUAD) 633 return -EINVAL; 634 635 msg->dest_dev = rmn_0; 636 637 if ((quad % ICE_QUADS_PER_PHY_E82X) == 0) 638 addr = Q_0_BASE + offset; 639 else 640 addr = Q_1_BASE + offset; 641 642 msg->msg_addr_low = lower_16_bits(addr); 643 msg->msg_addr_high = upper_16_bits(addr); 644 645 return 0; 646 } 647 648 /** 649 * ice_read_quad_reg_e82x - Read a PHY quad register 650 * @hw: pointer to the HW struct 651 * @quad: quad to read from 652 * @offset: quad register offset to read 653 * @val: on return, the contents read from the quad 654 * 655 * Read a quad register over the device sideband queue. Quad registers are 656 * shared between multiple PHYs. 657 */ 658 int 659 ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) 660 { 661 struct ice_sbq_msg_input msg = {0}; 662 int err; 663 664 err = ice_fill_quad_msg_e82x(&msg, quad, offset); 665 if (err) 666 return err; 667 668 msg.opcode = ice_sbq_msg_rd; 669 670 err = ice_sbq_rw_reg(hw, &msg); 671 if (err) { 672 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 673 err); 674 return err; 675 } 676 677 *val = msg.data; 678 679 return 0; 680 } 681 682 /** 683 * ice_write_quad_reg_e82x - Write a PHY quad register 684 * @hw: pointer to the HW struct 685 * @quad: quad to write to 686 * @offset: quad register offset to write 687 * @val: The value to write to the register 688 * 689 * Write a quad register over the device sideband queue. Quad registers are 690 * shared between multiple PHYs. 691 */ 692 int 693 ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val) 694 { 695 struct ice_sbq_msg_input msg = {0}; 696 int err; 697 698 err = ice_fill_quad_msg_e82x(&msg, quad, offset); 699 if (err) 700 return err; 701 702 msg.opcode = ice_sbq_msg_wr; 703 msg.data = val; 704 705 err = ice_sbq_rw_reg(hw, &msg); 706 if (err) { 707 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 708 err); 709 return err; 710 } 711 712 return 0; 713 } 714 715 /** 716 * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block 717 * @hw: pointer to the HW struct 718 * @quad: the quad to read from 719 * @idx: the timestamp index to read 720 * @tstamp: on return, the 40bit timestamp value 721 * 722 * Read a 40bit timestamp value out of the two associated registers in the 723 * quad memory block that is shared between the internal PHYs of the E822 724 * family of devices. 725 */ 726 static int 727 ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) 728 { 729 u16 lo_addr, hi_addr; 730 u32 lo, hi; 731 int err; 732 733 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); 734 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); 735 736 err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo); 737 if (err) { 738 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", 739 err); 740 return err; 741 } 742 743 err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi); 744 if (err) { 745 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", 746 err); 747 return err; 748 } 749 750 /* For E822 based internal PHYs, the timestamp is reported with the 751 * lower 8 bits in the low register, and the upper 32 bits in the high 752 * register. 753 */ 754 *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M); 755 756 return 0; 757 } 758 759 /** 760 * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block 761 * @hw: pointer to the HW struct 762 * @quad: the quad to read from 763 * @idx: the timestamp index to reset 764 * 765 * Read the timestamp out of the quad to clear its timestamp status bit from 766 * the PHY quad block that is shared between the internal PHYs of the E822 767 * devices. 768 * 769 * Note that unlike E810, software cannot directly write to the quad memory 770 * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function 771 * to determine which timestamps are valid. Reading a timestamp auto-clears 772 * the valid bit. 773 * 774 * To directly clear the contents of the timestamp block entirely, discarding 775 * all timestamp data at once, software should instead use 776 * ice_ptp_reset_ts_memory_quad_e82x(). 777 * 778 * This function should only be called on an idx whose bit is set according to 779 * ice_get_phy_tx_tstamp_ready(). 780 */ 781 static int 782 ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx) 783 { 784 u64 unused_tstamp; 785 int err; 786 787 err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp); 788 if (err) { 789 ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n", 790 quad, idx, err); 791 return err; 792 } 793 794 return 0; 795 } 796 797 /** 798 * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block 799 * @hw: pointer to the HW struct 800 * @quad: the quad to read from 801 * 802 * Clear all timestamps from the PHY quad block that is shared between the 803 * internal PHYs on the E822 devices. 804 */ 805 void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad) 806 { 807 ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); 808 ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); 809 } 810 811 /** 812 * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks 813 * @hw: pointer to the HW struct 814 */ 815 static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw) 816 { 817 unsigned int quad; 818 819 for (quad = 0; quad < ICE_MAX_QUAD; quad++) 820 ice_ptp_reset_ts_memory_quad_e82x(hw, quad); 821 } 822 823 /** 824 * ice_read_cgu_reg_e82x - Read a CGU register 825 * @hw: pointer to the HW struct 826 * @addr: Register address to read 827 * @val: storage for register value read 828 * 829 * Read the contents of a register of the Clock Generation Unit. Only 830 * applicable to E822 devices. 831 */ 832 static int 833 ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) 834 { 835 struct ice_sbq_msg_input cgu_msg; 836 int err; 837 838 cgu_msg.opcode = ice_sbq_msg_rd; 839 cgu_msg.dest_dev = cgu; 840 cgu_msg.msg_addr_low = addr; 841 cgu_msg.msg_addr_high = 0x0; 842 843 err = ice_sbq_rw_reg(hw, &cgu_msg); 844 if (err) { 845 ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n", 846 addr, err); 847 return err; 848 } 849 850 *val = cgu_msg.data; 851 852 return err; 853 } 854 855 /** 856 * ice_write_cgu_reg_e82x - Write a CGU register 857 * @hw: pointer to the HW struct 858 * @addr: Register address to write 859 * @val: value to write into the register 860 * 861 * Write the specified value to a register of the Clock Generation Unit. Only 862 * applicable to E822 devices. 863 */ 864 static int 865 ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) 866 { 867 struct ice_sbq_msg_input cgu_msg; 868 int err; 869 870 cgu_msg.opcode = ice_sbq_msg_wr; 871 cgu_msg.dest_dev = cgu; 872 cgu_msg.msg_addr_low = addr; 873 cgu_msg.msg_addr_high = 0x0; 874 cgu_msg.data = val; 875 876 err = ice_sbq_rw_reg(hw, &cgu_msg); 877 if (err) { 878 ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n", 879 addr, err); 880 return err; 881 } 882 883 return err; 884 } 885 886 /** 887 * ice_clk_freq_str - Convert time_ref_freq to string 888 * @clk_freq: Clock frequency 889 * 890 * Convert the specified TIME_REF clock frequency to a string. 891 */ 892 static const char *ice_clk_freq_str(u8 clk_freq) 893 { 894 switch ((enum ice_time_ref_freq)clk_freq) { 895 case ICE_TIME_REF_FREQ_25_000: 896 return "25 MHz"; 897 case ICE_TIME_REF_FREQ_122_880: 898 return "122.88 MHz"; 899 case ICE_TIME_REF_FREQ_125_000: 900 return "125 MHz"; 901 case ICE_TIME_REF_FREQ_153_600: 902 return "153.6 MHz"; 903 case ICE_TIME_REF_FREQ_156_250: 904 return "156.25 MHz"; 905 case ICE_TIME_REF_FREQ_245_760: 906 return "245.76 MHz"; 907 default: 908 return "Unknown"; 909 } 910 } 911 912 /** 913 * ice_clk_src_str - Convert time_ref_src to string 914 * @clk_src: Clock source 915 * 916 * Convert the specified clock source to its string name. 917 */ 918 static const char *ice_clk_src_str(u8 clk_src) 919 { 920 switch ((enum ice_clk_src)clk_src) { 921 case ICE_CLK_SRC_TCX0: 922 return "TCX0"; 923 case ICE_CLK_SRC_TIME_REF: 924 return "TIME_REF"; 925 default: 926 return "Unknown"; 927 } 928 } 929 930 /** 931 * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit 932 * @hw: pointer to the HW struct 933 * @clk_freq: Clock frequency to program 934 * @clk_src: Clock source to select (TIME_REF, or TCX0) 935 * 936 * Configure the Clock Generation Unit with the desired clock frequency and 937 * time reference, enabling the PLL which drives the PTP hardware clock. 938 */ 939 static int 940 ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, 941 enum ice_clk_src clk_src) 942 { 943 union tspll_ro_bwm_lf bwm_lf; 944 union nac_cgu_dword19 dw19; 945 union nac_cgu_dword22 dw22; 946 union nac_cgu_dword24 dw24; 947 union nac_cgu_dword9 dw9; 948 int err; 949 950 if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { 951 dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", 952 clk_freq); 953 return -EINVAL; 954 } 955 956 if (clk_src >= NUM_ICE_CLK_SRC) { 957 dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", 958 clk_src); 959 return -EINVAL; 960 } 961 962 if (clk_src == ICE_CLK_SRC_TCX0 && 963 clk_freq != ICE_TIME_REF_FREQ_25_000) { 964 dev_warn(ice_hw_to_dev(hw), 965 "TCX0 only supports 25 MHz frequency\n"); 966 return -EINVAL; 967 } 968 969 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); 970 if (err) 971 return err; 972 973 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); 974 if (err) 975 return err; 976 977 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); 978 if (err) 979 return err; 980 981 /* Log the current clock configuration */ 982 ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", 983 dw24.field.ts_pll_enable ? "enabled" : "disabled", 984 ice_clk_src_str(dw24.field.time_ref_sel), 985 ice_clk_freq_str(dw9.field.time_ref_freq_sel), 986 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); 987 988 /* Disable the PLL before changing the clock source or frequency */ 989 if (dw24.field.ts_pll_enable) { 990 dw24.field.ts_pll_enable = 0; 991 992 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); 993 if (err) 994 return err; 995 } 996 997 /* Set the frequency */ 998 dw9.field.time_ref_freq_sel = clk_freq; 999 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); 1000 if (err) 1001 return err; 1002 1003 /* Configure the TS PLL feedback divisor */ 1004 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); 1005 if (err) 1006 return err; 1007 1008 dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; 1009 dw19.field.tspll_ndivratio = 1; 1010 1011 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); 1012 if (err) 1013 return err; 1014 1015 /* Configure the TS PLL post divisor */ 1016 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); 1017 if (err) 1018 return err; 1019 1020 dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; 1021 dw22.field.time1588clk_sel_div2 = 0; 1022 1023 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); 1024 if (err) 1025 return err; 1026 1027 /* Configure the TS PLL pre divisor and clock source */ 1028 err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); 1029 if (err) 1030 return err; 1031 1032 dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; 1033 dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; 1034 dw24.field.time_ref_sel = clk_src; 1035 1036 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); 1037 if (err) 1038 return err; 1039 1040 /* Finally, enable the PLL */ 1041 dw24.field.ts_pll_enable = 1; 1042 1043 err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); 1044 if (err) 1045 return err; 1046 1047 /* Wait to verify if the PLL locks */ 1048 usleep_range(1000, 5000); 1049 1050 err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); 1051 if (err) 1052 return err; 1053 1054 if (!bwm_lf.field.plllock_true_lock_cri) { 1055 dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); 1056 return -EBUSY; 1057 } 1058 1059 /* Log the current clock configuration */ 1060 ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", 1061 dw24.field.ts_pll_enable ? "enabled" : "disabled", 1062 ice_clk_src_str(dw24.field.time_ref_sel), 1063 ice_clk_freq_str(dw9.field.time_ref_freq_sel), 1064 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); 1065 1066 return 0; 1067 } 1068 1069 /** 1070 * ice_init_cgu_e82x - Initialize CGU with settings from firmware 1071 * @hw: pointer to the HW structure 1072 * 1073 * Initialize the Clock Generation Unit of the E822 device. 1074 */ 1075 static int ice_init_cgu_e82x(struct ice_hw *hw) 1076 { 1077 struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; 1078 union tspll_cntr_bist_settings cntr_bist; 1079 int err; 1080 1081 err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, 1082 &cntr_bist.val); 1083 if (err) 1084 return err; 1085 1086 /* Disable sticky lock detection so lock err reported is accurate */ 1087 cntr_bist.field.i_plllock_sel_0 = 0; 1088 cntr_bist.field.i_plllock_sel_1 = 0; 1089 1090 err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, 1091 cntr_bist.val); 1092 if (err) 1093 return err; 1094 1095 /* Configure the CGU PLL using the parameters from the function 1096 * capabilities. 1097 */ 1098 err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, 1099 (enum ice_clk_src)ts_info->clk_src); 1100 if (err) 1101 return err; 1102 1103 return 0; 1104 } 1105 1106 /** 1107 * ice_ptp_set_vernier_wl - Set the window length for vernier calibration 1108 * @hw: pointer to the HW struct 1109 * 1110 * Set the window length used for the vernier port calibration process. 1111 */ 1112 static int ice_ptp_set_vernier_wl(struct ice_hw *hw) 1113 { 1114 u8 port; 1115 1116 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1117 int err; 1118 1119 err = ice_write_phy_reg_e82x(hw, port, P_REG_WL, 1120 PTP_VERNIER_WL); 1121 if (err) { 1122 ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n", 1123 port, err); 1124 return err; 1125 } 1126 } 1127 1128 return 0; 1129 } 1130 1131 /** 1132 * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization 1133 * @hw: pointer to HW struct 1134 * 1135 * Perform PHC initialization steps specific to E822 devices. 1136 */ 1137 static int ice_ptp_init_phc_e82x(struct ice_hw *hw) 1138 { 1139 int err; 1140 u32 regval; 1141 1142 /* Enable reading switch and PHY registers over the sideband queue */ 1143 #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1) 1144 #define PF_SB_REM_DEV_CTL_PHY0 BIT(2) 1145 regval = rd32(hw, PF_SB_REM_DEV_CTL); 1146 regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ | 1147 PF_SB_REM_DEV_CTL_PHY0); 1148 wr32(hw, PF_SB_REM_DEV_CTL, regval); 1149 1150 /* Initialize the Clock Generation Unit */ 1151 err = ice_init_cgu_e82x(hw); 1152 if (err) 1153 return err; 1154 1155 /* Set window length for all the ports */ 1156 return ice_ptp_set_vernier_wl(hw); 1157 } 1158 1159 /** 1160 * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time 1161 * @hw: pointer to the HW struct 1162 * @time: Time to initialize the PHY port clocks to 1163 * 1164 * Program the PHY port registers with a new initial time value. The port 1165 * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync 1166 * command. The time value is the upper 32 bits of the PHY timer, usually in 1167 * units of nominal nanoseconds. 1168 */ 1169 static int 1170 ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time) 1171 { 1172 u64 phy_time; 1173 u8 port; 1174 int err; 1175 1176 /* The time represents the upper 32 bits of the PHY timer, so we need 1177 * to shift to account for this when programming. 1178 */ 1179 phy_time = (u64)time << 32; 1180 1181 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1182 /* Tx case */ 1183 err = ice_write_64b_phy_reg_e82x(hw, port, 1184 P_REG_TX_TIMER_INC_PRE_L, 1185 phy_time); 1186 if (err) 1187 goto exit_err; 1188 1189 /* Rx case */ 1190 err = ice_write_64b_phy_reg_e82x(hw, port, 1191 P_REG_RX_TIMER_INC_PRE_L, 1192 phy_time); 1193 if (err) 1194 goto exit_err; 1195 } 1196 1197 return 0; 1198 1199 exit_err: 1200 ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n", 1201 port, err); 1202 1203 return err; 1204 } 1205 1206 /** 1207 * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust 1208 * @hw: pointer to HW struct 1209 * @port: Port number to be programmed 1210 * @time: time in cycles to adjust the port Tx and Rx clocks 1211 * 1212 * Program the port for an atomic adjustment by writing the Tx and Rx timer 1213 * registers. The atomic adjustment won't be completed until the driver issues 1214 * an ICE_PTP_ADJ_TIME command. 1215 * 1216 * Note that time is not in units of nanoseconds. It is in clock time 1217 * including the lower sub-nanosecond portion of the port timer. 1218 * 1219 * Negative adjustments are supported using 2s complement arithmetic. 1220 */ 1221 static int 1222 ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time) 1223 { 1224 u32 l_time, u_time; 1225 int err; 1226 1227 l_time = lower_32_bits(time); 1228 u_time = upper_32_bits(time); 1229 1230 /* Tx case */ 1231 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, 1232 l_time); 1233 if (err) 1234 goto exit_err; 1235 1236 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U, 1237 u_time); 1238 if (err) 1239 goto exit_err; 1240 1241 /* Rx case */ 1242 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L, 1243 l_time); 1244 if (err) 1245 goto exit_err; 1246 1247 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U, 1248 u_time); 1249 if (err) 1250 goto exit_err; 1251 1252 return 0; 1253 1254 exit_err: 1255 ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n", 1256 port, err); 1257 return err; 1258 } 1259 1260 /** 1261 * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment 1262 * @hw: pointer to HW struct 1263 * @adj: adjustment in nanoseconds 1264 * 1265 * Prepare the PHY ports for an atomic time adjustment by programming the PHY 1266 * Tx and Rx port registers. The actual adjustment is completed by issuing an 1267 * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. 1268 */ 1269 static int 1270 ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj) 1271 { 1272 s64 cycles; 1273 u8 port; 1274 1275 /* The port clock supports adjustment of the sub-nanosecond portion of 1276 * the clock. We shift the provided adjustment in nanoseconds to 1277 * calculate the appropriate adjustment to program into the PHY ports. 1278 */ 1279 if (adj > 0) 1280 cycles = (s64)adj << 32; 1281 else 1282 cycles = -(((s64)-adj) << 32); 1283 1284 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1285 int err; 1286 1287 err = ice_ptp_prep_port_adj_e82x(hw, port, cycles); 1288 if (err) 1289 return err; 1290 } 1291 1292 return 0; 1293 } 1294 1295 /** 1296 * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment 1297 * @hw: pointer to HW struct 1298 * @incval: new increment value to prepare 1299 * 1300 * Prepare each of the PHY ports for a new increment value by programming the 1301 * port's TIMETUS registers. The new increment value will be updated after 1302 * issuing an ICE_PTP_INIT_INCVAL command. 1303 */ 1304 static int 1305 ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval) 1306 { 1307 int err; 1308 u8 port; 1309 1310 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1311 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, 1312 incval); 1313 if (err) 1314 goto exit_err; 1315 } 1316 1317 return 0; 1318 1319 exit_err: 1320 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n", 1321 port, err); 1322 1323 return err; 1324 } 1325 1326 /** 1327 * ice_ptp_read_port_capture - Read a port's local time capture 1328 * @hw: pointer to HW struct 1329 * @port: Port number to read 1330 * @tx_ts: on return, the Tx port time capture 1331 * @rx_ts: on return, the Rx port time capture 1332 * 1333 * Read the port's Tx and Rx local time capture values. 1334 * 1335 * Note this has no equivalent for the E810 devices. 1336 */ 1337 static int 1338 ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) 1339 { 1340 int err; 1341 1342 /* Tx case */ 1343 err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts); 1344 if (err) { 1345 ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n", 1346 err); 1347 return err; 1348 } 1349 1350 ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n", 1351 (unsigned long long)*tx_ts); 1352 1353 /* Rx case */ 1354 err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts); 1355 if (err) { 1356 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n", 1357 err); 1358 return err; 1359 } 1360 1361 ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n", 1362 (unsigned long long)*rx_ts); 1363 1364 return 0; 1365 } 1366 1367 /** 1368 * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command 1369 * @hw: pointer to HW struct 1370 * @port: Port to which cmd has to be sent 1371 * @cmd: Command to be sent to the port 1372 * 1373 * Prepare the requested port for an upcoming timer sync command. 1374 * 1375 * Do not use this function directly. If you want to configure exactly one 1376 * port, use ice_ptp_one_port_cmd() instead. 1377 */ 1378 static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, 1379 enum ice_ptp_tmr_cmd cmd) 1380 { 1381 u32 cmd_val, val; 1382 u8 tmr_idx; 1383 int err; 1384 1385 tmr_idx = ice_get_ptp_src_clock_index(hw); 1386 cmd_val = tmr_idx << SEL_PHY_SRC; 1387 switch (cmd) { 1388 case ICE_PTP_INIT_TIME: 1389 cmd_val |= PHY_CMD_INIT_TIME; 1390 break; 1391 case ICE_PTP_INIT_INCVAL: 1392 cmd_val |= PHY_CMD_INIT_INCVAL; 1393 break; 1394 case ICE_PTP_ADJ_TIME: 1395 cmd_val |= PHY_CMD_ADJ_TIME; 1396 break; 1397 case ICE_PTP_READ_TIME: 1398 cmd_val |= PHY_CMD_READ_TIME; 1399 break; 1400 case ICE_PTP_ADJ_TIME_AT_TIME: 1401 cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME; 1402 break; 1403 case ICE_PTP_NOP: 1404 break; 1405 } 1406 1407 /* Tx case */ 1408 /* Read, modify, write */ 1409 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val); 1410 if (err) { 1411 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n", 1412 err); 1413 return err; 1414 } 1415 1416 /* Modify necessary bits only and perform write */ 1417 val &= ~TS_CMD_MASK; 1418 val |= cmd_val; 1419 1420 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val); 1421 if (err) { 1422 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", 1423 err); 1424 return err; 1425 } 1426 1427 /* Rx case */ 1428 /* Read, modify, write */ 1429 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val); 1430 if (err) { 1431 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n", 1432 err); 1433 return err; 1434 } 1435 1436 /* Modify necessary bits only and perform write */ 1437 val &= ~TS_CMD_MASK; 1438 val |= cmd_val; 1439 1440 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val); 1441 if (err) { 1442 ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", 1443 err); 1444 return err; 1445 } 1446 1447 return 0; 1448 } 1449 1450 /** 1451 * ice_ptp_one_port_cmd - Prepare one port for a timer command 1452 * @hw: pointer to the HW struct 1453 * @configured_port: the port to configure with configured_cmd 1454 * @configured_cmd: timer command to prepare on the configured_port 1455 * 1456 * Prepare the configured_port for the configured_cmd, and prepare all other 1457 * ports for ICE_PTP_NOP. This causes the configured_port to execute the 1458 * desired command while all other ports perform no operation. 1459 */ 1460 static int 1461 ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, 1462 enum ice_ptp_tmr_cmd configured_cmd) 1463 { 1464 u8 port; 1465 1466 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1467 enum ice_ptp_tmr_cmd cmd; 1468 int err; 1469 1470 if (port == configured_port) 1471 cmd = configured_cmd; 1472 else 1473 cmd = ICE_PTP_NOP; 1474 1475 err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); 1476 if (err) 1477 return err; 1478 } 1479 1480 return 0; 1481 } 1482 1483 /** 1484 * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command 1485 * @hw: pointer to the HW struct 1486 * @cmd: timer command to prepare 1487 * 1488 * Prepare all ports connected to this device for an upcoming timer sync 1489 * command. 1490 */ 1491 static int 1492 ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 1493 { 1494 u8 port; 1495 1496 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1497 int err; 1498 1499 err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); 1500 if (err) 1501 return err; 1502 } 1503 1504 return 0; 1505 } 1506 1507 /* E822 Vernier calibration functions 1508 * 1509 * The following functions are used as part of the vernier calibration of 1510 * a port. This calibration increases the precision of the timestamps on the 1511 * port. 1512 */ 1513 1514 /** 1515 * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode 1516 * @hw: pointer to HW struct 1517 * @port: the port to read from 1518 * @link_out: if non-NULL, holds link speed on success 1519 * @fec_out: if non-NULL, holds FEC algorithm on success 1520 * 1521 * Read the serdes data for the PHY port and extract the link speed and FEC 1522 * algorithm. 1523 */ 1524 static int 1525 ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port, 1526 enum ice_ptp_link_spd *link_out, 1527 enum ice_ptp_fec_mode *fec_out) 1528 { 1529 enum ice_ptp_link_spd link; 1530 enum ice_ptp_fec_mode fec; 1531 u32 serdes; 1532 int err; 1533 1534 err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes); 1535 if (err) { 1536 ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n"); 1537 return err; 1538 } 1539 1540 /* Determine the FEC algorithm */ 1541 fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes); 1542 1543 serdes &= P_REG_LINK_SPEED_SERDES_M; 1544 1545 /* Determine the link speed */ 1546 if (fec == ICE_PTP_FEC_MODE_RS_FEC) { 1547 switch (serdes) { 1548 case ICE_PTP_SERDES_25G: 1549 link = ICE_PTP_LNK_SPD_25G_RS; 1550 break; 1551 case ICE_PTP_SERDES_50G: 1552 link = ICE_PTP_LNK_SPD_50G_RS; 1553 break; 1554 case ICE_PTP_SERDES_100G: 1555 link = ICE_PTP_LNK_SPD_100G_RS; 1556 break; 1557 default: 1558 return -EIO; 1559 } 1560 } else { 1561 switch (serdes) { 1562 case ICE_PTP_SERDES_1G: 1563 link = ICE_PTP_LNK_SPD_1G; 1564 break; 1565 case ICE_PTP_SERDES_10G: 1566 link = ICE_PTP_LNK_SPD_10G; 1567 break; 1568 case ICE_PTP_SERDES_25G: 1569 link = ICE_PTP_LNK_SPD_25G; 1570 break; 1571 case ICE_PTP_SERDES_40G: 1572 link = ICE_PTP_LNK_SPD_40G; 1573 break; 1574 case ICE_PTP_SERDES_50G: 1575 link = ICE_PTP_LNK_SPD_50G; 1576 break; 1577 default: 1578 return -EIO; 1579 } 1580 } 1581 1582 if (link_out) 1583 *link_out = link; 1584 if (fec_out) 1585 *fec_out = fec; 1586 1587 return 0; 1588 } 1589 1590 /** 1591 * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp 1592 * @hw: pointer to HW struct 1593 * @port: to configure the quad for 1594 */ 1595 static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port) 1596 { 1597 enum ice_ptp_link_spd link_spd; 1598 int err; 1599 u32 val; 1600 u8 quad; 1601 1602 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL); 1603 if (err) { 1604 ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n", 1605 err); 1606 return; 1607 } 1608 1609 quad = port / ICE_PORTS_PER_QUAD; 1610 1611 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); 1612 if (err) { 1613 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n", 1614 err); 1615 return; 1616 } 1617 1618 if (link_spd >= ICE_PTP_LNK_SPD_40G) 1619 val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M; 1620 else 1621 val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M; 1622 1623 err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); 1624 if (err) { 1625 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n", 1626 err); 1627 return; 1628 } 1629 } 1630 1631 /** 1632 * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822 1633 * @hw: pointer to the HW structure 1634 * @port: the port to configure 1635 * 1636 * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC 1637 * hardware clock time units (TUs). That is, determine the number of TUs per 1638 * serdes unit interval, and program the UIX registers with this conversion. 1639 * 1640 * This conversion is used as part of the calibration process when determining 1641 * the additional error of a timestamp vs the real time of transmission or 1642 * receipt of the packet. 1643 * 1644 * Hardware uses the number of TUs per 66 UIs, written to the UIX registers 1645 * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks. 1646 * 1647 * To calculate the conversion ratio, we use the following facts: 1648 * 1649 * a) the clock frequency in Hz (cycles per second) 1650 * b) the number of TUs per cycle (the increment value of the clock) 1651 * c) 1 second per 1 billion nanoseconds 1652 * d) the duration of 66 UIs in nanoseconds 1653 * 1654 * Given these facts, we can use the following table to work out what ratios 1655 * to multiply in order to get the number of TUs per 66 UIs: 1656 * 1657 * cycles | 1 second | incval (TUs) | nanoseconds 1658 * -------+--------------+--------------+------------- 1659 * second | 1 billion ns | cycle | 66 UIs 1660 * 1661 * To perform the multiplication using integers without too much loss of 1662 * precision, we can take use the following equation: 1663 * 1664 * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion) 1665 * 1666 * We scale up to using 6600 UI instead of 66 in order to avoid fractional 1667 * nanosecond UIs (66 UI at 10G/40G is 6.4 ns) 1668 * 1669 * The increment value has a maximum expected range of about 34 bits, while 1670 * the frequency value is about 29 bits. Multiplying these values shouldn't 1671 * overflow the 64 bits. However, we must then further multiply them again by 1672 * the Serdes unit interval duration. To avoid overflow here, we split the 1673 * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and 1674 * a divide by 390,625,000. This does lose some precision, but avoids 1675 * miscalculation due to arithmetic overflow. 1676 */ 1677 static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port) 1678 { 1679 u64 cur_freq, clk_incval, tu_per_sec, uix; 1680 int err; 1681 1682 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); 1683 clk_incval = ice_ptp_read_src_incval(hw); 1684 1685 /* Calculate TUs per second divided by 256 */ 1686 tu_per_sec = (cur_freq * clk_incval) >> 8; 1687 1688 #define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */ 1689 #define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */ 1690 1691 /* Program the 10Gb/40Gb conversion ratio */ 1692 uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000); 1693 1694 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L, 1695 uix); 1696 if (err) { 1697 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n", 1698 err); 1699 return err; 1700 } 1701 1702 /* Program the 25Gb/100Gb conversion ratio */ 1703 uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000); 1704 1705 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L, 1706 uix); 1707 if (err) { 1708 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n", 1709 err); 1710 return err; 1711 } 1712 1713 return 0; 1714 } 1715 1716 /** 1717 * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle 1718 * @hw: pointer to the HW struct 1719 * @port: port to configure 1720 * 1721 * Configure the number of TUs for the PAR and PCS clocks used as part of the 1722 * timestamp calibration process. This depends on the link speed, as the PHY 1723 * uses different markers depending on the speed. 1724 * 1725 * 1Gb/10Gb/25Gb: 1726 * - Tx/Rx PAR/PCS markers 1727 * 1728 * 25Gb RS: 1729 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers 1730 * 1731 * 40Gb/50Gb: 1732 * - Tx/Rx PAR/PCS markers 1733 * - Rx Deskew PAR/PCS markers 1734 * 1735 * 50G RS and 100GB RS: 1736 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers 1737 * - Rx Deskew PAR/PCS markers 1738 * - Tx PAR/PCS markers 1739 * 1740 * To calculate the conversion, we use the PHC clock frequency (cycles per 1741 * second), the increment value (TUs per cycle), and the related PHY clock 1742 * frequency to calculate the TUs per unit of the PHY link clock. The 1743 * following table shows how the units convert: 1744 * 1745 * cycles | TUs | second 1746 * -------+-------+-------- 1747 * second | cycle | cycles 1748 * 1749 * For each conversion register, look up the appropriate frequency from the 1750 * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program 1751 * this to the appropriate register, preparing hardware to perform timestamp 1752 * calibration to calculate the total Tx or Rx offset to adjust the timestamp 1753 * in order to calibrate for the internal PHY delays. 1754 * 1755 * Note that the increment value ranges up to ~34 bits, and the clock 1756 * frequency is ~29 bits, so multiplying them together should fit within the 1757 * 64 bit arithmetic. 1758 */ 1759 static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port) 1760 { 1761 u64 cur_freq, clk_incval, tu_per_sec, phy_tus; 1762 enum ice_ptp_link_spd link_spd; 1763 enum ice_ptp_fec_mode fec_mode; 1764 int err; 1765 1766 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); 1767 if (err) 1768 return err; 1769 1770 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); 1771 clk_incval = ice_ptp_read_src_incval(hw); 1772 1773 /* Calculate TUs per cycle of the PHC clock */ 1774 tu_per_sec = cur_freq * clk_incval; 1775 1776 /* For each PHY conversion register, look up the appropriate link 1777 * speed frequency and determine the TUs per that clock's cycle time. 1778 * Split this into a high and low value and then program the 1779 * appropriate register. If that link speed does not use the 1780 * associated register, write zeros to clear it instead. 1781 */ 1782 1783 /* P_REG_PAR_TX_TUS */ 1784 if (e822_vernier[link_spd].tx_par_clk) 1785 phy_tus = div_u64(tu_per_sec, 1786 e822_vernier[link_spd].tx_par_clk); 1787 else 1788 phy_tus = 0; 1789 1790 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L, 1791 phy_tus); 1792 if (err) 1793 return err; 1794 1795 /* P_REG_PAR_RX_TUS */ 1796 if (e822_vernier[link_spd].rx_par_clk) 1797 phy_tus = div_u64(tu_per_sec, 1798 e822_vernier[link_spd].rx_par_clk); 1799 else 1800 phy_tus = 0; 1801 1802 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L, 1803 phy_tus); 1804 if (err) 1805 return err; 1806 1807 /* P_REG_PCS_TX_TUS */ 1808 if (e822_vernier[link_spd].tx_pcs_clk) 1809 phy_tus = div_u64(tu_per_sec, 1810 e822_vernier[link_spd].tx_pcs_clk); 1811 else 1812 phy_tus = 0; 1813 1814 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L, 1815 phy_tus); 1816 if (err) 1817 return err; 1818 1819 /* P_REG_PCS_RX_TUS */ 1820 if (e822_vernier[link_spd].rx_pcs_clk) 1821 phy_tus = div_u64(tu_per_sec, 1822 e822_vernier[link_spd].rx_pcs_clk); 1823 else 1824 phy_tus = 0; 1825 1826 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L, 1827 phy_tus); 1828 if (err) 1829 return err; 1830 1831 /* P_REG_DESK_PAR_TX_TUS */ 1832 if (e822_vernier[link_spd].tx_desk_rsgb_par) 1833 phy_tus = div_u64(tu_per_sec, 1834 e822_vernier[link_spd].tx_desk_rsgb_par); 1835 else 1836 phy_tus = 0; 1837 1838 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L, 1839 phy_tus); 1840 if (err) 1841 return err; 1842 1843 /* P_REG_DESK_PAR_RX_TUS */ 1844 if (e822_vernier[link_spd].rx_desk_rsgb_par) 1845 phy_tus = div_u64(tu_per_sec, 1846 e822_vernier[link_spd].rx_desk_rsgb_par); 1847 else 1848 phy_tus = 0; 1849 1850 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L, 1851 phy_tus); 1852 if (err) 1853 return err; 1854 1855 /* P_REG_DESK_PCS_TX_TUS */ 1856 if (e822_vernier[link_spd].tx_desk_rsgb_pcs) 1857 phy_tus = div_u64(tu_per_sec, 1858 e822_vernier[link_spd].tx_desk_rsgb_pcs); 1859 else 1860 phy_tus = 0; 1861 1862 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L, 1863 phy_tus); 1864 if (err) 1865 return err; 1866 1867 /* P_REG_DESK_PCS_RX_TUS */ 1868 if (e822_vernier[link_spd].rx_desk_rsgb_pcs) 1869 phy_tus = div_u64(tu_per_sec, 1870 e822_vernier[link_spd].rx_desk_rsgb_pcs); 1871 else 1872 phy_tus = 0; 1873 1874 return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L, 1875 phy_tus); 1876 } 1877 1878 /** 1879 * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port 1880 * @hw: pointer to the HW struct 1881 * @link_spd: the Link speed to calculate for 1882 * 1883 * Calculate the fixed offset due to known static latency data. 1884 */ 1885 static u64 1886 ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) 1887 { 1888 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; 1889 1890 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); 1891 clk_incval = ice_ptp_read_src_incval(hw); 1892 1893 /* Calculate TUs per second */ 1894 tu_per_sec = cur_freq * clk_incval; 1895 1896 /* Calculate number of TUs to add for the fixed Tx latency. Since the 1897 * latency measurement is in 1/100th of a nanosecond, we need to 1898 * multiply by tu_per_sec and then divide by 1e11. This calculation 1899 * overflows 64 bit integer arithmetic, so break it up into two 1900 * divisions by 1e4 first then by 1e7. 1901 */ 1902 fixed_offset = div_u64(tu_per_sec, 10000); 1903 fixed_offset *= e822_vernier[link_spd].tx_fixed_delay; 1904 fixed_offset = div_u64(fixed_offset, 10000000); 1905 1906 return fixed_offset; 1907 } 1908 1909 /** 1910 * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset 1911 * @hw: pointer to the HW struct 1912 * @port: the PHY port to configure 1913 * 1914 * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to 1915 * adjust Tx timestamps by. This is calculated by combining some known static 1916 * latency along with the Vernier offset computations done by hardware. 1917 * 1918 * This function will not return successfully until the Tx offset calculations 1919 * have been completed, which requires waiting until at least one packet has 1920 * been transmitted by the device. It is safe to call this function 1921 * periodically until calibration succeeds, as it will only program the offset 1922 * once. 1923 * 1924 * To avoid overflow, when calculating the offset based on the known static 1925 * latency values, we use measurements in 1/100th of a nanosecond, and divide 1926 * the TUs per second up front. This avoids overflow while allowing 1927 * calculation of the adjustment using integer arithmetic. 1928 * 1929 * Returns zero on success, -EBUSY if the hardware vernier offset 1930 * calibration has not completed, or another error code on failure. 1931 */ 1932 int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port) 1933 { 1934 enum ice_ptp_link_spd link_spd; 1935 enum ice_ptp_fec_mode fec_mode; 1936 u64 total_offset, val; 1937 int err; 1938 u32 reg; 1939 1940 /* Nothing to do if we've already programmed the offset */ 1941 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, ®); 1942 if (err) { 1943 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n", 1944 port, err); 1945 return err; 1946 } 1947 1948 if (reg) 1949 return 0; 1950 1951 err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, ®); 1952 if (err) { 1953 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n", 1954 port, err); 1955 return err; 1956 } 1957 1958 if (!(reg & P_REG_TX_OV_STATUS_OV_M)) 1959 return -EBUSY; 1960 1961 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); 1962 if (err) 1963 return err; 1964 1965 total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd); 1966 1967 /* Read the first Vernier offset from the PHY register and add it to 1968 * the total offset. 1969 */ 1970 if (link_spd == ICE_PTP_LNK_SPD_1G || 1971 link_spd == ICE_PTP_LNK_SPD_10G || 1972 link_spd == ICE_PTP_LNK_SPD_25G || 1973 link_spd == ICE_PTP_LNK_SPD_25G_RS || 1974 link_spd == ICE_PTP_LNK_SPD_40G || 1975 link_spd == ICE_PTP_LNK_SPD_50G) { 1976 err = ice_read_64b_phy_reg_e82x(hw, port, 1977 P_REG_PAR_PCS_TX_OFFSET_L, 1978 &val); 1979 if (err) 1980 return err; 1981 1982 total_offset += val; 1983 } 1984 1985 /* For Tx, we only need to use the second Vernier offset for 1986 * multi-lane link speeds with RS-FEC. The lanes will always be 1987 * aligned. 1988 */ 1989 if (link_spd == ICE_PTP_LNK_SPD_50G_RS || 1990 link_spd == ICE_PTP_LNK_SPD_100G_RS) { 1991 err = ice_read_64b_phy_reg_e82x(hw, port, 1992 P_REG_PAR_TX_TIME_L, 1993 &val); 1994 if (err) 1995 return err; 1996 1997 total_offset += val; 1998 } 1999 2000 /* Now that the total offset has been calculated, program it to the 2001 * PHY and indicate that the Tx offset is ready. After this, 2002 * timestamps will be enabled. 2003 */ 2004 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L, 2005 total_offset); 2006 if (err) 2007 return err; 2008 2009 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1); 2010 if (err) 2011 return err; 2012 2013 dev_info(ice_hw_to_dev(hw), "Port=%d Tx vernier offset calibration complete\n", 2014 port); 2015 2016 return 0; 2017 } 2018 2019 /** 2020 * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx 2021 * @hw: pointer to the HW struct 2022 * @port: the PHY port to adjust for 2023 * @link_spd: the current link speed of the PHY 2024 * @fec_mode: the current FEC mode of the PHY 2025 * @pmd_adj: on return, the amount to adjust the Rx total offset by 2026 * 2027 * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY. 2028 * This varies by link speed and FEC mode. The value calculated accounts for 2029 * various delays caused when receiving a packet. 2030 */ 2031 static int 2032 ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port, 2033 enum ice_ptp_link_spd link_spd, 2034 enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj) 2035 { 2036 u64 cur_freq, clk_incval, tu_per_sec, mult, adj; 2037 u8 pmd_align; 2038 u32 val; 2039 int err; 2040 2041 err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val); 2042 if (err) { 2043 ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n", 2044 err); 2045 return err; 2046 } 2047 2048 pmd_align = (u8)val; 2049 2050 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); 2051 clk_incval = ice_ptp_read_src_incval(hw); 2052 2053 /* Calculate TUs per second */ 2054 tu_per_sec = cur_freq * clk_incval; 2055 2056 /* The PMD alignment adjustment measurement depends on the link speed, 2057 * and whether FEC is enabled. For each link speed, the alignment 2058 * adjustment is calculated by dividing a value by the length of 2059 * a Time Unit in nanoseconds. 2060 * 2061 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8 2062 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33) 2063 * 10G w/FEC: align * 0.1 * 32/33 2064 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33) 2065 * 25G w/FEC: align * 0.4 * 32/33 2066 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33) 2067 * 40G w/FEC: align * 0.1 * 32/33 2068 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33) 2069 * 50G w/FEC: align * 0.8 * 32/33 2070 * 2071 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33. 2072 * 2073 * To allow for calculating this value using integer arithmetic, we 2074 * instead start with the number of TUs per second, (inverse of the 2075 * length of a Time Unit in nanoseconds), multiply by a value based 2076 * on the PMD alignment register, and then divide by the right value 2077 * calculated based on the table above. To avoid integer overflow this 2078 * division is broken up into a step of dividing by 125 first. 2079 */ 2080 if (link_spd == ICE_PTP_LNK_SPD_1G) { 2081 if (pmd_align == 4) 2082 mult = 10; 2083 else 2084 mult = (pmd_align + 6) % 10; 2085 } else if (link_spd == ICE_PTP_LNK_SPD_10G || 2086 link_spd == ICE_PTP_LNK_SPD_25G || 2087 link_spd == ICE_PTP_LNK_SPD_40G || 2088 link_spd == ICE_PTP_LNK_SPD_50G) { 2089 /* If Clause 74 FEC, always calculate PMD adjust */ 2090 if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74) 2091 mult = pmd_align; 2092 else 2093 mult = 0; 2094 } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS || 2095 link_spd == ICE_PTP_LNK_SPD_50G_RS || 2096 link_spd == ICE_PTP_LNK_SPD_100G_RS) { 2097 if (pmd_align < 17) 2098 mult = pmd_align + 40; 2099 else 2100 mult = pmd_align; 2101 } else { 2102 ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n", 2103 link_spd); 2104 mult = 0; 2105 } 2106 2107 /* In some cases, there's no need to adjust for the PMD alignment */ 2108 if (!mult) { 2109 *pmd_adj = 0; 2110 return 0; 2111 } 2112 2113 /* Calculate the adjustment by multiplying TUs per second by the 2114 * appropriate multiplier and divisor. To avoid overflow, we first 2115 * divide by 125, and then handle remaining divisor based on the link 2116 * speed pmd_adj_divisor value. 2117 */ 2118 adj = div_u64(tu_per_sec, 125); 2119 adj *= mult; 2120 adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor); 2121 2122 /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx 2123 * cycle count is necessary. 2124 */ 2125 if (link_spd == ICE_PTP_LNK_SPD_25G_RS) { 2126 u64 cycle_adj; 2127 u8 rx_cycle; 2128 2129 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT, 2130 &val); 2131 if (err) { 2132 ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n", 2133 err); 2134 return err; 2135 } 2136 2137 rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M; 2138 if (rx_cycle) { 2139 mult = (4 - rx_cycle) * 40; 2140 2141 cycle_adj = div_u64(tu_per_sec, 125); 2142 cycle_adj *= mult; 2143 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor); 2144 2145 adj += cycle_adj; 2146 } 2147 } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) { 2148 u64 cycle_adj; 2149 u8 rx_cycle; 2150 2151 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT, 2152 &val); 2153 if (err) { 2154 ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n", 2155 err); 2156 return err; 2157 } 2158 2159 rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M; 2160 if (rx_cycle) { 2161 mult = rx_cycle * 40; 2162 2163 cycle_adj = div_u64(tu_per_sec, 125); 2164 cycle_adj *= mult; 2165 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor); 2166 2167 adj += cycle_adj; 2168 } 2169 } 2170 2171 /* Return the calculated adjustment */ 2172 *pmd_adj = adj; 2173 2174 return 0; 2175 } 2176 2177 /** 2178 * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port 2179 * @hw: pointer to HW struct 2180 * @link_spd: The Link speed to calculate for 2181 * 2182 * Determine the fixed Rx latency for a given link speed. 2183 */ 2184 static u64 2185 ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) 2186 { 2187 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; 2188 2189 cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); 2190 clk_incval = ice_ptp_read_src_incval(hw); 2191 2192 /* Calculate TUs per second */ 2193 tu_per_sec = cur_freq * clk_incval; 2194 2195 /* Calculate number of TUs to add for the fixed Rx latency. Since the 2196 * latency measurement is in 1/100th of a nanosecond, we need to 2197 * multiply by tu_per_sec and then divide by 1e11. This calculation 2198 * overflows 64 bit integer arithmetic, so break it up into two 2199 * divisions by 1e4 first then by 1e7. 2200 */ 2201 fixed_offset = div_u64(tu_per_sec, 10000); 2202 fixed_offset *= e822_vernier[link_spd].rx_fixed_delay; 2203 fixed_offset = div_u64(fixed_offset, 10000000); 2204 2205 return fixed_offset; 2206 } 2207 2208 /** 2209 * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset 2210 * @hw: pointer to the HW struct 2211 * @port: the PHY port to configure 2212 * 2213 * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to 2214 * adjust Rx timestamps by. This combines calculations from the Vernier offset 2215 * measurements taken in hardware with some data about known fixed delay as 2216 * well as adjusting for multi-lane alignment delay. 2217 * 2218 * This function will not return successfully until the Rx offset calculations 2219 * have been completed, which requires waiting until at least one packet has 2220 * been received by the device. It is safe to call this function periodically 2221 * until calibration succeeds, as it will only program the offset once. 2222 * 2223 * This function must be called only after the offset registers are valid, 2224 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY 2225 * has measured the offset. 2226 * 2227 * To avoid overflow, when calculating the offset based on the known static 2228 * latency values, we use measurements in 1/100th of a nanosecond, and divide 2229 * the TUs per second up front. This avoids overflow while allowing 2230 * calculation of the adjustment using integer arithmetic. 2231 * 2232 * Returns zero on success, -EBUSY if the hardware vernier offset 2233 * calibration has not completed, or another error code on failure. 2234 */ 2235 int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port) 2236 { 2237 enum ice_ptp_link_spd link_spd; 2238 enum ice_ptp_fec_mode fec_mode; 2239 u64 total_offset, pmd, val; 2240 int err; 2241 u32 reg; 2242 2243 /* Nothing to do if we've already programmed the offset */ 2244 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, ®); 2245 if (err) { 2246 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n", 2247 port, err); 2248 return err; 2249 } 2250 2251 if (reg) 2252 return 0; 2253 2254 err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, ®); 2255 if (err) { 2256 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n", 2257 port, err); 2258 return err; 2259 } 2260 2261 if (!(reg & P_REG_RX_OV_STATUS_OV_M)) 2262 return -EBUSY; 2263 2264 err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); 2265 if (err) 2266 return err; 2267 2268 total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd); 2269 2270 /* Read the first Vernier offset from the PHY register and add it to 2271 * the total offset. 2272 */ 2273 err = ice_read_64b_phy_reg_e82x(hw, port, 2274 P_REG_PAR_PCS_RX_OFFSET_L, 2275 &val); 2276 if (err) 2277 return err; 2278 2279 total_offset += val; 2280 2281 /* For Rx, all multi-lane link speeds include a second Vernier 2282 * calibration, because the lanes might not be aligned. 2283 */ 2284 if (link_spd == ICE_PTP_LNK_SPD_40G || 2285 link_spd == ICE_PTP_LNK_SPD_50G || 2286 link_spd == ICE_PTP_LNK_SPD_50G_RS || 2287 link_spd == ICE_PTP_LNK_SPD_100G_RS) { 2288 err = ice_read_64b_phy_reg_e82x(hw, port, 2289 P_REG_PAR_RX_TIME_L, 2290 &val); 2291 if (err) 2292 return err; 2293 2294 total_offset += val; 2295 } 2296 2297 /* In addition, Rx must account for the PMD alignment */ 2298 err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd); 2299 if (err) 2300 return err; 2301 2302 /* For RS-FEC, this adjustment adds delay, but for other modes, it 2303 * subtracts delay. 2304 */ 2305 if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC) 2306 total_offset += pmd; 2307 else 2308 total_offset -= pmd; 2309 2310 /* Now that the total offset has been calculated, program it to the 2311 * PHY and indicate that the Rx offset is ready. After this, 2312 * timestamps will be enabled. 2313 */ 2314 err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L, 2315 total_offset); 2316 if (err) 2317 return err; 2318 2319 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1); 2320 if (err) 2321 return err; 2322 2323 dev_info(ice_hw_to_dev(hw), "Port=%d Rx vernier offset calibration complete\n", 2324 port); 2325 2326 return 0; 2327 } 2328 2329 /** 2330 * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time 2331 * @hw: pointer to the HW struct 2332 * @port: the PHY port to read 2333 * @phy_time: on return, the 64bit PHY timer value 2334 * @phc_time: on return, the lower 64bits of PHC time 2335 * 2336 * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY 2337 * and PHC timer values. 2338 */ 2339 static int 2340 ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time, 2341 u64 *phc_time) 2342 { 2343 u64 tx_time, rx_time; 2344 u32 zo, lo; 2345 u8 tmr_idx; 2346 int err; 2347 2348 tmr_idx = ice_get_ptp_src_clock_index(hw); 2349 2350 /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */ 2351 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 2352 2353 /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */ 2354 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME); 2355 if (err) 2356 return err; 2357 2358 /* Issue the sync to start the ICE_PTP_READ_TIME capture */ 2359 ice_ptp_exec_tmr_cmd(hw); 2360 2361 /* Read the captured PHC time from the shadow time registers */ 2362 zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); 2363 lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); 2364 *phc_time = (u64)lo << 32 | zo; 2365 2366 /* Read the captured PHY time from the PHY shadow registers */ 2367 err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time); 2368 if (err) 2369 return err; 2370 2371 /* If the PHY Tx and Rx timers don't match, log a warning message. 2372 * Note that this should not happen in normal circumstances since the 2373 * driver always programs them together. 2374 */ 2375 if (tx_time != rx_time) 2376 dev_warn(ice_hw_to_dev(hw), 2377 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n", 2378 port, (unsigned long long)tx_time, 2379 (unsigned long long)rx_time); 2380 2381 *phy_time = tx_time; 2382 2383 return 0; 2384 } 2385 2386 /** 2387 * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer 2388 * @hw: pointer to the HW struct 2389 * @port: the PHY port to synchronize 2390 * 2391 * Perform an adjustment to ensure that the PHY and PHC timers are in sync. 2392 * This is done by issuing a ICE_PTP_READ_TIME command which triggers a 2393 * simultaneous read of the PHY timer and PHC timer. Then we use the 2394 * difference to calculate an appropriate 2s complement addition to add 2395 * to the PHY timer in order to ensure it reads the same value as the 2396 * primary PHC timer. 2397 */ 2398 static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port) 2399 { 2400 u64 phc_time, phy_time, difference; 2401 int err; 2402 2403 if (!ice_ptp_lock(hw)) { 2404 ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n"); 2405 return -EBUSY; 2406 } 2407 2408 err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time); 2409 if (err) 2410 goto err_unlock; 2411 2412 /* Calculate the amount required to add to the port time in order for 2413 * it to match the PHC time. 2414 * 2415 * Note that the port adjustment is done using 2s complement 2416 * arithmetic. This is convenient since it means that we can simply 2417 * calculate the difference between the PHC time and the port time, 2418 * and it will be interpreted correctly. 2419 */ 2420 difference = phc_time - phy_time; 2421 2422 err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference); 2423 if (err) 2424 goto err_unlock; 2425 2426 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME); 2427 if (err) 2428 goto err_unlock; 2429 2430 /* Do not perform any action on the main timer */ 2431 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 2432 2433 /* Issue the sync to activate the time adjustment */ 2434 ice_ptp_exec_tmr_cmd(hw); 2435 2436 /* Re-capture the timer values to flush the command registers and 2437 * verify that the time was properly adjusted. 2438 */ 2439 err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time); 2440 if (err) 2441 goto err_unlock; 2442 2443 dev_info(ice_hw_to_dev(hw), 2444 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n", 2445 port, (unsigned long long)phy_time, 2446 (unsigned long long)phc_time); 2447 2448 ice_ptp_unlock(hw); 2449 2450 return 0; 2451 2452 err_unlock: 2453 ice_ptp_unlock(hw); 2454 return err; 2455 } 2456 2457 /** 2458 * ice_stop_phy_timer_e82x - Stop the PHY clock timer 2459 * @hw: pointer to the HW struct 2460 * @port: the PHY port to stop 2461 * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS 2462 * 2463 * Stop the clock of a PHY port. This must be done as part of the flow to 2464 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is 2465 * initialized or when link speed changes. 2466 */ 2467 int 2468 ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset) 2469 { 2470 int err; 2471 u32 val; 2472 2473 err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0); 2474 if (err) 2475 return err; 2476 2477 err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0); 2478 if (err) 2479 return err; 2480 2481 err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val); 2482 if (err) 2483 return err; 2484 2485 val &= ~P_REG_PS_START_M; 2486 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); 2487 if (err) 2488 return err; 2489 2490 val &= ~P_REG_PS_ENA_CLK_M; 2491 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); 2492 if (err) 2493 return err; 2494 2495 if (soft_reset) { 2496 val |= P_REG_PS_SFT_RESET_M; 2497 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); 2498 if (err) 2499 return err; 2500 } 2501 2502 ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port); 2503 2504 return 0; 2505 } 2506 2507 /** 2508 * ice_start_phy_timer_e82x - Start the PHY clock timer 2509 * @hw: pointer to the HW struct 2510 * @port: the PHY port to start 2511 * 2512 * Start the clock of a PHY port. This must be done as part of the flow to 2513 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is 2514 * initialized or when link speed changes. 2515 * 2516 * Hardware will take Vernier measurements on Tx or Rx of packets. 2517 */ 2518 int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port) 2519 { 2520 u32 lo, hi, val; 2521 u64 incval; 2522 u8 tmr_idx; 2523 int err; 2524 2525 tmr_idx = ice_get_ptp_src_clock_index(hw); 2526 2527 err = ice_stop_phy_timer_e82x(hw, port, false); 2528 if (err) 2529 return err; 2530 2531 ice_phy_cfg_lane_e82x(hw, port); 2532 2533 err = ice_phy_cfg_uix_e82x(hw, port); 2534 if (err) 2535 return err; 2536 2537 err = ice_phy_cfg_parpcs_e82x(hw, port); 2538 if (err) 2539 return err; 2540 2541 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); 2542 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); 2543 incval = (u64)hi << 32 | lo; 2544 2545 err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); 2546 if (err) 2547 return err; 2548 2549 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL); 2550 if (err) 2551 return err; 2552 2553 /* Do not perform any action on the main timer */ 2554 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 2555 2556 ice_ptp_exec_tmr_cmd(hw); 2557 2558 err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val); 2559 if (err) 2560 return err; 2561 2562 val |= P_REG_PS_SFT_RESET_M; 2563 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); 2564 if (err) 2565 return err; 2566 2567 val |= P_REG_PS_START_M; 2568 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); 2569 if (err) 2570 return err; 2571 2572 val &= ~P_REG_PS_SFT_RESET_M; 2573 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); 2574 if (err) 2575 return err; 2576 2577 err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL); 2578 if (err) 2579 return err; 2580 2581 ice_ptp_exec_tmr_cmd(hw); 2582 2583 val |= P_REG_PS_ENA_CLK_M; 2584 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); 2585 if (err) 2586 return err; 2587 2588 val |= P_REG_PS_LOAD_OFFSET_M; 2589 err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); 2590 if (err) 2591 return err; 2592 2593 ice_ptp_exec_tmr_cmd(hw); 2594 2595 err = ice_sync_phy_timer_e82x(hw, port); 2596 if (err) 2597 return err; 2598 2599 ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port); 2600 2601 return 0; 2602 } 2603 2604 /** 2605 * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register 2606 * @hw: pointer to the HW struct 2607 * @quad: the timestamp quad to read from 2608 * @tstamp_ready: contents of the Tx memory status register 2609 * 2610 * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in 2611 * the PHY are ready. A set bit means the corresponding timestamp is valid and 2612 * ready to be captured from the PHY timestamp block. 2613 */ 2614 static int 2615 ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) 2616 { 2617 u32 hi, lo; 2618 int err; 2619 2620 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi); 2621 if (err) { 2622 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n", 2623 quad, err); 2624 return err; 2625 } 2626 2627 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo); 2628 if (err) { 2629 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n", 2630 quad, err); 2631 return err; 2632 } 2633 2634 *tstamp_ready = (u64)hi << 32 | (u64)lo; 2635 2636 return 0; 2637 } 2638 2639 /* E810 functions 2640 * 2641 * The following functions operate on the E810 series devices which use 2642 * a separate external PHY. 2643 */ 2644 2645 /** 2646 * ice_read_phy_reg_e810 - Read register from external PHY on E810 2647 * @hw: pointer to the HW struct 2648 * @addr: the address to read from 2649 * @val: On return, the value read from the PHY 2650 * 2651 * Read a register from the external PHY on the E810 device. 2652 */ 2653 static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) 2654 { 2655 struct ice_sbq_msg_input msg = {0}; 2656 int err; 2657 2658 msg.msg_addr_low = lower_16_bits(addr); 2659 msg.msg_addr_high = upper_16_bits(addr); 2660 msg.opcode = ice_sbq_msg_rd; 2661 msg.dest_dev = rmn_0; 2662 2663 err = ice_sbq_rw_reg(hw, &msg); 2664 if (err) { 2665 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 2666 err); 2667 return err; 2668 } 2669 2670 *val = msg.data; 2671 2672 return 0; 2673 } 2674 2675 /** 2676 * ice_write_phy_reg_e810 - Write register on external PHY on E810 2677 * @hw: pointer to the HW struct 2678 * @addr: the address to writem to 2679 * @val: the value to write to the PHY 2680 * 2681 * Write a value to a register of the external PHY on the E810 device. 2682 */ 2683 static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) 2684 { 2685 struct ice_sbq_msg_input msg = {0}; 2686 int err; 2687 2688 msg.msg_addr_low = lower_16_bits(addr); 2689 msg.msg_addr_high = upper_16_bits(addr); 2690 msg.opcode = ice_sbq_msg_wr; 2691 msg.dest_dev = rmn_0; 2692 msg.data = val; 2693 2694 err = ice_sbq_rw_reg(hw, &msg); 2695 if (err) { 2696 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 2697 err); 2698 return err; 2699 } 2700 2701 return 0; 2702 } 2703 2704 /** 2705 * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW 2706 * @hw: pointer to the HW struct 2707 * @idx: the timestamp index to read 2708 * @hi: 8 bit timestamp high value 2709 * @lo: 32 bit timestamp low value 2710 * 2711 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the 2712 * timestamp block of the external PHY on the E810 device using the low latency 2713 * timestamp read. 2714 */ 2715 static int 2716 ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo) 2717 { 2718 u32 val; 2719 u8 i; 2720 2721 /* Write TS index to read to the PF register so the FW can read it */ 2722 val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS; 2723 wr32(hw, PF_SB_ATQBAL, val); 2724 2725 /* Read the register repeatedly until the FW provides us the TS */ 2726 for (i = TS_LL_READ_RETRIES; i > 0; i--) { 2727 val = rd32(hw, PF_SB_ATQBAL); 2728 2729 /* When the bit is cleared, the TS is ready in the register */ 2730 if (!(FIELD_GET(TS_LL_READ_TS, val))) { 2731 /* High 8 bit value of the TS is on the bits 16:23 */ 2732 *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val); 2733 2734 /* Read the low 32 bit value and set the TS valid bit */ 2735 *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID; 2736 return 0; 2737 } 2738 2739 udelay(10); 2740 } 2741 2742 /* FW failed to provide the TS in time */ 2743 ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n"); 2744 return -EINVAL; 2745 } 2746 2747 /** 2748 * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq 2749 * @hw: pointer to the HW struct 2750 * @lport: the lport to read from 2751 * @idx: the timestamp index to read 2752 * @hi: 8 bit timestamp high value 2753 * @lo: 32 bit timestamp low value 2754 * 2755 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the 2756 * timestamp block of the external PHY on the E810 device using sideband queue. 2757 */ 2758 static int 2759 ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi, 2760 u32 *lo) 2761 { 2762 u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); 2763 u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); 2764 u32 lo_val, hi_val; 2765 int err; 2766 2767 err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val); 2768 if (err) { 2769 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", 2770 err); 2771 return err; 2772 } 2773 2774 err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val); 2775 if (err) { 2776 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", 2777 err); 2778 return err; 2779 } 2780 2781 *lo = lo_val; 2782 *hi = (u8)hi_val; 2783 2784 return 0; 2785 } 2786 2787 /** 2788 * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY 2789 * @hw: pointer to the HW struct 2790 * @lport: the lport to read from 2791 * @idx: the timestamp index to read 2792 * @tstamp: on return, the 40bit timestamp value 2793 * 2794 * Read a 40bit timestamp value out of the timestamp block of the external PHY 2795 * on the E810 device. 2796 */ 2797 static int 2798 ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) 2799 { 2800 u32 lo = 0; 2801 u8 hi = 0; 2802 int err; 2803 2804 if (hw->dev_caps.ts_dev_info.ts_ll_read) 2805 err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo); 2806 else 2807 err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo); 2808 2809 if (err) 2810 return err; 2811 2812 /* For E810 devices, the timestamp is reported with the lower 32 bits 2813 * in the low register, and the upper 8 bits in the high register. 2814 */ 2815 *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M); 2816 2817 return 0; 2818 } 2819 2820 /** 2821 * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY 2822 * @hw: pointer to the HW struct 2823 * @lport: the lport to read from 2824 * @idx: the timestamp index to reset 2825 * 2826 * Read the timestamp and then forcibly overwrite its value to clear the valid 2827 * bit from the timestamp block of the external PHY on the E810 device. 2828 * 2829 * This function should only be called on an idx whose bit is set according to 2830 * ice_get_phy_tx_tstamp_ready(). 2831 */ 2832 static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) 2833 { 2834 u32 lo_addr, hi_addr; 2835 u64 unused_tstamp; 2836 int err; 2837 2838 err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp); 2839 if (err) { 2840 ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n", 2841 lport, idx, err); 2842 return err; 2843 } 2844 2845 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); 2846 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); 2847 2848 err = ice_write_phy_reg_e810(hw, lo_addr, 0); 2849 if (err) { 2850 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n", 2851 lport, idx, err); 2852 return err; 2853 } 2854 2855 err = ice_write_phy_reg_e810(hw, hi_addr, 0); 2856 if (err) { 2857 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n", 2858 lport, idx, err); 2859 return err; 2860 } 2861 2862 return 0; 2863 } 2864 2865 /** 2866 * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY 2867 * @hw: pointer to HW struct 2868 * 2869 * Enable the timesync PTP functionality for the external PHY connected to 2870 * this function. 2871 */ 2872 int ice_ptp_init_phy_e810(struct ice_hw *hw) 2873 { 2874 u8 tmr_idx; 2875 int err; 2876 2877 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2878 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), 2879 GLTSYN_ENA_TSYN_ENA_M); 2880 if (err) 2881 ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n", 2882 err); 2883 2884 return err; 2885 } 2886 2887 /** 2888 * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization 2889 * @hw: pointer to HW struct 2890 * 2891 * Perform E810-specific PTP hardware clock initialization steps. 2892 */ 2893 static int ice_ptp_init_phc_e810(struct ice_hw *hw) 2894 { 2895 /* Ensure synchronization delay is zero */ 2896 wr32(hw, GLTSYN_SYNC_DLAY, 0); 2897 2898 /* Initialize the PHY */ 2899 return ice_ptp_init_phy_e810(hw); 2900 } 2901 2902 /** 2903 * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time 2904 * @hw: Board private structure 2905 * @time: Time to initialize the PHY port clock to 2906 * 2907 * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the 2908 * initial clock time. The time will not actually be programmed until the 2909 * driver issues an ICE_PTP_INIT_TIME command. 2910 * 2911 * The time value is the upper 32 bits of the PHY timer, usually in units of 2912 * nominal nanoseconds. 2913 */ 2914 static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) 2915 { 2916 u8 tmr_idx; 2917 int err; 2918 2919 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2920 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0); 2921 if (err) { 2922 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n", 2923 err); 2924 return err; 2925 } 2926 2927 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time); 2928 if (err) { 2929 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n", 2930 err); 2931 return err; 2932 } 2933 2934 return 0; 2935 } 2936 2937 /** 2938 * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment 2939 * @hw: pointer to HW struct 2940 * @adj: adjustment value to program 2941 * 2942 * Prepare the PHY port for an atomic adjustment by programming the PHY 2943 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment 2944 * is completed by issuing an ICE_PTP_ADJ_TIME sync command. 2945 * 2946 * The adjustment value only contains the portion used for the upper 32bits of 2947 * the PHY timer, usually in units of nominal nanoseconds. Negative 2948 * adjustments are supported using 2s complement arithmetic. 2949 */ 2950 static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) 2951 { 2952 u8 tmr_idx; 2953 int err; 2954 2955 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2956 2957 /* Adjustments are represented as signed 2's complement values in 2958 * nanoseconds. Sub-nanosecond adjustment is not supported. 2959 */ 2960 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0); 2961 if (err) { 2962 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n", 2963 err); 2964 return err; 2965 } 2966 2967 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj); 2968 if (err) { 2969 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n", 2970 err); 2971 return err; 2972 } 2973 2974 return 0; 2975 } 2976 2977 /** 2978 * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change 2979 * @hw: pointer to HW struct 2980 * @incval: The new 40bit increment value to prepare 2981 * 2982 * Prepare the PHY port for a new increment value by programming the PHY 2983 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is 2984 * completed by issuing an ICE_PTP_INIT_INCVAL command. 2985 */ 2986 static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) 2987 { 2988 u32 high, low; 2989 u8 tmr_idx; 2990 int err; 2991 2992 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2993 low = lower_32_bits(incval); 2994 high = upper_32_bits(incval); 2995 2996 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low); 2997 if (err) { 2998 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n", 2999 err); 3000 return err; 3001 } 3002 3003 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high); 3004 if (err) { 3005 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n", 3006 err); 3007 return err; 3008 } 3009 3010 return 0; 3011 } 3012 3013 /** 3014 * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command 3015 * @hw: pointer to HW struct 3016 * @cmd: Command to be sent to the port 3017 * 3018 * Prepare the external PHYs connected to this device for a timer sync 3019 * command. 3020 */ 3021 static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 3022 { 3023 u32 cmd_val, val; 3024 int err; 3025 3026 switch (cmd) { 3027 case ICE_PTP_INIT_TIME: 3028 cmd_val = GLTSYN_CMD_INIT_TIME; 3029 break; 3030 case ICE_PTP_INIT_INCVAL: 3031 cmd_val = GLTSYN_CMD_INIT_INCVAL; 3032 break; 3033 case ICE_PTP_ADJ_TIME: 3034 cmd_val = GLTSYN_CMD_ADJ_TIME; 3035 break; 3036 case ICE_PTP_READ_TIME: 3037 cmd_val = GLTSYN_CMD_READ_TIME; 3038 break; 3039 case ICE_PTP_ADJ_TIME_AT_TIME: 3040 cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; 3041 break; 3042 case ICE_PTP_NOP: 3043 return 0; 3044 } 3045 3046 /* Read, modify, write */ 3047 err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val); 3048 if (err) { 3049 ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err); 3050 return err; 3051 } 3052 3053 /* Modify necessary bits only and perform write */ 3054 val &= ~TS_CMD_MASK_E810; 3055 val |= cmd_val; 3056 3057 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val); 3058 if (err) { 3059 ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err); 3060 return err; 3061 } 3062 3063 return 0; 3064 } 3065 3066 /** 3067 * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register 3068 * @hw: pointer to the HW struct 3069 * @port: the PHY port to read 3070 * @tstamp_ready: contents of the Tx memory status register 3071 * 3072 * E810 devices do not use a Tx memory status register. Instead simply 3073 * indicate that all timestamps are currently ready. 3074 */ 3075 static int 3076 ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) 3077 { 3078 *tstamp_ready = 0xFFFFFFFFFFFFFFFF; 3079 return 0; 3080 } 3081 3082 /* E810T SMA functions 3083 * 3084 * The following functions operate specifically on E810T hardware and are used 3085 * to access the extended GPIOs available. 3086 */ 3087 3088 /** 3089 * ice_get_pca9575_handle 3090 * @hw: pointer to the hw struct 3091 * @pca9575_handle: GPIO controller's handle 3092 * 3093 * Find and return the GPIO controller's handle in the netlist. 3094 * When found - the value will be cached in the hw structure and following calls 3095 * will return cached value 3096 */ 3097 static int 3098 ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) 3099 { 3100 struct ice_aqc_get_link_topo *cmd; 3101 struct ice_aq_desc desc; 3102 int status; 3103 u8 idx; 3104 3105 /* If handle was read previously return cached value */ 3106 if (hw->io_expander_handle) { 3107 *pca9575_handle = hw->io_expander_handle; 3108 return 0; 3109 } 3110 3111 /* If handle was not detected read it from the netlist */ 3112 cmd = &desc.params.get_link_topo; 3113 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 3114 3115 /* Set node type to GPIO controller */ 3116 cmd->addr.topo_params.node_type_ctx = 3117 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & 3118 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); 3119 3120 #define SW_PCA9575_SFP_TOPO_IDX 2 3121 #define SW_PCA9575_QSFP_TOPO_IDX 1 3122 3123 /* Check if the SW IO expander controlling SMA exists in the netlist. */ 3124 if (hw->device_id == ICE_DEV_ID_E810C_SFP) 3125 idx = SW_PCA9575_SFP_TOPO_IDX; 3126 else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) 3127 idx = SW_PCA9575_QSFP_TOPO_IDX; 3128 else 3129 return -EOPNOTSUPP; 3130 3131 cmd->addr.topo_params.index = idx; 3132 3133 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3134 if (status) 3135 return -EOPNOTSUPP; 3136 3137 /* Verify if we found the right IO expander type */ 3138 if (desc.params.get_link_topo.node_part_num != 3139 ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) 3140 return -EOPNOTSUPP; 3141 3142 /* If present save the handle and return it */ 3143 hw->io_expander_handle = 3144 le16_to_cpu(desc.params.get_link_topo.addr.handle); 3145 *pca9575_handle = hw->io_expander_handle; 3146 3147 return 0; 3148 } 3149 3150 /** 3151 * ice_read_sma_ctrl_e810t 3152 * @hw: pointer to the hw struct 3153 * @data: pointer to data to be read from the GPIO controller 3154 * 3155 * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the 3156 * PCA9575 expander, so only bits 3-7 in data are valid. 3157 */ 3158 int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) 3159 { 3160 int status; 3161 u16 handle; 3162 u8 i; 3163 3164 status = ice_get_pca9575_handle(hw, &handle); 3165 if (status) 3166 return status; 3167 3168 *data = 0; 3169 3170 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { 3171 bool pin; 3172 3173 status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, 3174 &pin, NULL); 3175 if (status) 3176 break; 3177 *data |= (u8)(!pin) << i; 3178 } 3179 3180 return status; 3181 } 3182 3183 /** 3184 * ice_write_sma_ctrl_e810t 3185 * @hw: pointer to the hw struct 3186 * @data: data to be written to the GPIO controller 3187 * 3188 * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 3189 * of the PCA9575 expander, so only bits 3-7 in data are valid. 3190 */ 3191 int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) 3192 { 3193 int status; 3194 u16 handle; 3195 u8 i; 3196 3197 status = ice_get_pca9575_handle(hw, &handle); 3198 if (status) 3199 return status; 3200 3201 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { 3202 bool pin; 3203 3204 pin = !(data & (1 << i)); 3205 status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, 3206 pin, NULL); 3207 if (status) 3208 break; 3209 } 3210 3211 return status; 3212 } 3213 3214 /** 3215 * ice_read_pca9575_reg_e810t 3216 * @hw: pointer to the hw struct 3217 * @offset: GPIO controller register offset 3218 * @data: pointer to data to be read from the GPIO controller 3219 * 3220 * Read the register from the GPIO controller 3221 */ 3222 int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) 3223 { 3224 struct ice_aqc_link_topo_addr link_topo; 3225 __le16 addr; 3226 u16 handle; 3227 int err; 3228 3229 memset(&link_topo, 0, sizeof(link_topo)); 3230 3231 err = ice_get_pca9575_handle(hw, &handle); 3232 if (err) 3233 return err; 3234 3235 link_topo.handle = cpu_to_le16(handle); 3236 link_topo.topo_params.node_type_ctx = 3237 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, 3238 ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); 3239 3240 addr = cpu_to_le16((u16)offset); 3241 3242 return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); 3243 } 3244 3245 /* Device agnostic functions 3246 * 3247 * The following functions implement shared behavior common to both E822 and 3248 * E810 devices, possibly calling a device specific implementation where 3249 * necessary. 3250 */ 3251 3252 /** 3253 * ice_ptp_lock - Acquire PTP global semaphore register lock 3254 * @hw: pointer to the HW struct 3255 * 3256 * Acquire the global PTP hardware semaphore lock. Returns true if the lock 3257 * was acquired, false otherwise. 3258 * 3259 * The PFTSYN_SEM register sets the busy bit on read, returning the previous 3260 * value. If software sees the busy bit cleared, this means that this function 3261 * acquired the lock (and the busy bit is now set). If software sees the busy 3262 * bit set, it means that another function acquired the lock. 3263 * 3264 * Software must clear the busy bit with a write to release the lock for other 3265 * functions when done. 3266 */ 3267 bool ice_ptp_lock(struct ice_hw *hw) 3268 { 3269 u32 hw_lock; 3270 int i; 3271 3272 #define MAX_TRIES 15 3273 3274 for (i = 0; i < MAX_TRIES; i++) { 3275 hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 3276 hw_lock = hw_lock & PFTSYN_SEM_BUSY_M; 3277 if (hw_lock) { 3278 /* Somebody is holding the lock */ 3279 usleep_range(5000, 6000); 3280 continue; 3281 } 3282 3283 break; 3284 } 3285 3286 return !hw_lock; 3287 } 3288 3289 /** 3290 * ice_ptp_unlock - Release PTP global semaphore register lock 3291 * @hw: pointer to the HW struct 3292 * 3293 * Release the global PTP hardware semaphore lock. This is done by writing to 3294 * the PFTSYN_SEM register. 3295 */ 3296 void ice_ptp_unlock(struct ice_hw *hw) 3297 { 3298 wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0); 3299 } 3300 3301 /** 3302 * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type 3303 * @hw: pointer to the HW structure 3304 * 3305 * Determine the PHY model for the device, and initialize hw->phy_model 3306 * for use by other functions. 3307 */ 3308 void ice_ptp_init_phy_model(struct ice_hw *hw) 3309 { 3310 if (ice_is_e810(hw)) 3311 hw->phy_model = ICE_PHY_E810; 3312 else 3313 hw->phy_model = ICE_PHY_E82X; 3314 } 3315 3316 /** 3317 * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command 3318 * @hw: pointer to HW struct 3319 * @cmd: the command to issue 3320 * 3321 * Prepare the source timer and PHY timers and then trigger the requested 3322 * command. This causes the shadow registers previously written in preparation 3323 * for the command to be synchronously applied to both the source and PHY 3324 * timers. 3325 */ 3326 static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 3327 { 3328 int err; 3329 3330 /* First, prepare the source timer */ 3331 ice_ptp_src_cmd(hw, cmd); 3332 3333 /* Next, prepare the ports */ 3334 switch (hw->phy_model) { 3335 case ICE_PHY_E810: 3336 err = ice_ptp_port_cmd_e810(hw, cmd); 3337 break; 3338 case ICE_PHY_E82X: 3339 err = ice_ptp_port_cmd_e82x(hw, cmd); 3340 break; 3341 default: 3342 err = -EOPNOTSUPP; 3343 } 3344 3345 if (err) { 3346 ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n", 3347 cmd, err); 3348 return err; 3349 } 3350 3351 /* Write the sync command register to drive both source and PHY timer 3352 * commands synchronously 3353 */ 3354 ice_ptp_exec_tmr_cmd(hw); 3355 3356 return 0; 3357 } 3358 3359 /** 3360 * ice_ptp_init_time - Initialize device time to provided value 3361 * @hw: pointer to HW struct 3362 * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H) 3363 * 3364 * Initialize the device to the specified time provided. This requires a three 3365 * step process: 3366 * 3367 * 1) write the new init time to the source timer shadow registers 3368 * 2) write the new init time to the PHY timer shadow registers 3369 * 3) issue an init_time timer command to synchronously switch both the source 3370 * and port timers to the new init time value at the next clock cycle. 3371 */ 3372 int ice_ptp_init_time(struct ice_hw *hw, u64 time) 3373 { 3374 u8 tmr_idx; 3375 int err; 3376 3377 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3378 3379 /* Source timers */ 3380 wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time)); 3381 wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time)); 3382 wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0); 3383 3384 /* PHY timers */ 3385 /* Fill Rx and Tx ports and send msg to PHY */ 3386 switch (hw->phy_model) { 3387 case ICE_PHY_E810: 3388 err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); 3389 break; 3390 case ICE_PHY_E82X: 3391 err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF); 3392 break; 3393 default: 3394 err = -EOPNOTSUPP; 3395 } 3396 3397 if (err) 3398 return err; 3399 3400 return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME); 3401 } 3402 3403 /** 3404 * ice_ptp_write_incval - Program PHC with new increment value 3405 * @hw: pointer to HW struct 3406 * @incval: Source timer increment value per clock cycle 3407 * 3408 * Program the PHC with a new increment value. This requires a three-step 3409 * process: 3410 * 3411 * 1) Write the increment value to the source timer shadow registers 3412 * 2) Write the increment value to the PHY timer shadow registers 3413 * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both 3414 * the source and port timers to the new increment value at the next clock 3415 * cycle. 3416 */ 3417 int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) 3418 { 3419 u8 tmr_idx; 3420 int err; 3421 3422 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3423 3424 /* Shadow Adjust */ 3425 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); 3426 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); 3427 3428 switch (hw->phy_model) { 3429 case ICE_PHY_E810: 3430 err = ice_ptp_prep_phy_incval_e810(hw, incval); 3431 break; 3432 case ICE_PHY_E82X: 3433 err = ice_ptp_prep_phy_incval_e82x(hw, incval); 3434 break; 3435 default: 3436 err = -EOPNOTSUPP; 3437 } 3438 3439 if (err) 3440 return err; 3441 3442 return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL); 3443 } 3444 3445 /** 3446 * ice_ptp_write_incval_locked - Program new incval while holding semaphore 3447 * @hw: pointer to HW struct 3448 * @incval: Source timer increment value per clock cycle 3449 * 3450 * Program a new PHC incval while holding the PTP semaphore. 3451 */ 3452 int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) 3453 { 3454 int err; 3455 3456 if (!ice_ptp_lock(hw)) 3457 return -EBUSY; 3458 3459 err = ice_ptp_write_incval(hw, incval); 3460 3461 ice_ptp_unlock(hw); 3462 3463 return err; 3464 } 3465 3466 /** 3467 * ice_ptp_adj_clock - Adjust PHC clock time atomically 3468 * @hw: pointer to HW struct 3469 * @adj: Adjustment in nanoseconds 3470 * 3471 * Perform an atomic adjustment of the PHC time by the specified number of 3472 * nanoseconds. This requires a three-step process: 3473 * 3474 * 1) Write the adjustment to the source timer shadow registers 3475 * 2) Write the adjustment to the PHY timer shadow registers 3476 * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the 3477 * adjustment to both the source and port timers at the next clock cycle. 3478 */ 3479 int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) 3480 { 3481 u8 tmr_idx; 3482 int err; 3483 3484 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3485 3486 /* Write the desired clock adjustment into the GLTSYN_SHADJ register. 3487 * For an ICE_PTP_ADJ_TIME command, this set of registers represents 3488 * the value to add to the clock time. It supports subtraction by 3489 * interpreting the value as a 2's complement integer. 3490 */ 3491 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); 3492 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); 3493 3494 switch (hw->phy_model) { 3495 case ICE_PHY_E810: 3496 err = ice_ptp_prep_phy_adj_e810(hw, adj); 3497 break; 3498 case ICE_PHY_E82X: 3499 err = ice_ptp_prep_phy_adj_e82x(hw, adj); 3500 break; 3501 default: 3502 err = -EOPNOTSUPP; 3503 } 3504 3505 if (err) 3506 return err; 3507 3508 return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME); 3509 } 3510 3511 /** 3512 * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block 3513 * @hw: pointer to the HW struct 3514 * @block: the block to read from 3515 * @idx: the timestamp index to read 3516 * @tstamp: on return, the 40bit timestamp value 3517 * 3518 * Read a 40bit timestamp value out of the timestamp block. For E822 devices, 3519 * the block is the quad to read from. For E810 devices, the block is the 3520 * logical port to read from. 3521 */ 3522 int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) 3523 { 3524 switch (hw->phy_model) { 3525 case ICE_PHY_E810: 3526 return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); 3527 case ICE_PHY_E82X: 3528 return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp); 3529 default: 3530 return -EOPNOTSUPP; 3531 } 3532 } 3533 3534 /** 3535 * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block 3536 * @hw: pointer to the HW struct 3537 * @block: the block to read from 3538 * @idx: the timestamp index to reset 3539 * 3540 * Clear a timestamp from the timestamp block, discarding its value without 3541 * returning it. This resets the memory status bit for the timestamp index 3542 * allowing it to be reused for another timestamp in the future. 3543 * 3544 * For E822 devices, the block number is the PHY quad to clear from. For E810 3545 * devices, the block number is the logical port to clear from. 3546 * 3547 * This function must only be called on a timestamp index whose valid bit is 3548 * set according to ice_get_phy_tx_tstamp_ready(). 3549 */ 3550 int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) 3551 { 3552 switch (hw->phy_model) { 3553 case ICE_PHY_E810: 3554 return ice_clear_phy_tstamp_e810(hw, block, idx); 3555 case ICE_PHY_E82X: 3556 return ice_clear_phy_tstamp_e82x(hw, block, idx); 3557 default: 3558 return -EOPNOTSUPP; 3559 } 3560 } 3561 3562 /** 3563 * ice_get_pf_c827_idx - find and return the C827 index for the current pf 3564 * @hw: pointer to the hw struct 3565 * @idx: index of the found C827 PHY 3566 * Return: 3567 * * 0 - success 3568 * * negative - failure 3569 */ 3570 static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) 3571 { 3572 struct ice_aqc_get_link_topo cmd; 3573 u8 node_part_number; 3574 u16 node_handle; 3575 int status; 3576 u8 ctx; 3577 3578 if (hw->mac_type != ICE_MAC_E810) 3579 return -ENODEV; 3580 3581 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) { 3582 *idx = C827_0; 3583 return 0; 3584 } 3585 3586 memset(&cmd, 0, sizeof(cmd)); 3587 3588 ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S; 3589 ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; 3590 cmd.addr.topo_params.node_type_ctx = ctx; 3591 3592 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 3593 &node_handle); 3594 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 3595 return -ENOENT; 3596 3597 if (node_handle == E810C_QSFP_C827_0_HANDLE) 3598 *idx = C827_0; 3599 else if (node_handle == E810C_QSFP_C827_1_HANDLE) 3600 *idx = C827_1; 3601 else 3602 return -EIO; 3603 3604 return 0; 3605 } 3606 3607 /** 3608 * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks 3609 * @hw: pointer to the HW struct 3610 */ 3611 void ice_ptp_reset_ts_memory(struct ice_hw *hw) 3612 { 3613 switch (hw->phy_model) { 3614 case ICE_PHY_E82X: 3615 ice_ptp_reset_ts_memory_e82x(hw); 3616 break; 3617 case ICE_PHY_E810: 3618 default: 3619 return; 3620 } 3621 } 3622 3623 /** 3624 * ice_ptp_init_phc - Initialize PTP hardware clock 3625 * @hw: pointer to the HW struct 3626 * 3627 * Perform the steps required to initialize the PTP hardware clock. 3628 */ 3629 int ice_ptp_init_phc(struct ice_hw *hw) 3630 { 3631 u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3632 3633 /* Enable source clocks */ 3634 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); 3635 3636 /* Clear event err indications for auxiliary pins */ 3637 (void)rd32(hw, GLTSYN_STAT(src_idx)); 3638 3639 switch (hw->phy_model) { 3640 case ICE_PHY_E810: 3641 return ice_ptp_init_phc_e810(hw); 3642 case ICE_PHY_E82X: 3643 return ice_ptp_init_phc_e82x(hw); 3644 default: 3645 return -EOPNOTSUPP; 3646 } 3647 } 3648 3649 /** 3650 * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication 3651 * @hw: pointer to the HW struct 3652 * @block: the timestamp block to check 3653 * @tstamp_ready: storage for the PHY Tx memory status information 3654 * 3655 * Check the PHY for Tx timestamp memory status. This reports a 64 bit value 3656 * which indicates which timestamps in the block may be captured. A set bit 3657 * means the timestamp can be read. An unset bit means the timestamp is not 3658 * ready and software should avoid reading the register. 3659 */ 3660 int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) 3661 { 3662 switch (hw->phy_model) { 3663 case ICE_PHY_E810: 3664 return ice_get_phy_tx_tstamp_ready_e810(hw, block, 3665 tstamp_ready); 3666 case ICE_PHY_E82X: 3667 return ice_get_phy_tx_tstamp_ready_e82x(hw, block, 3668 tstamp_ready); 3669 break; 3670 default: 3671 return -EOPNOTSUPP; 3672 } 3673 } 3674 3675 /** 3676 * ice_cgu_get_pin_desc_e823 - get pin description array 3677 * @hw: pointer to the hw struct 3678 * @input: if request is done against input or output pin 3679 * @size: number of inputs/outputs 3680 * 3681 * Return: pointer to pin description array associated to given hw. 3682 */ 3683 static const struct ice_cgu_pin_desc * 3684 ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size) 3685 { 3686 static const struct ice_cgu_pin_desc *t; 3687 3688 if (hw->cgu_part_number == 3689 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) { 3690 if (input) { 3691 t = ice_e823_zl_cgu_inputs; 3692 *size = ARRAY_SIZE(ice_e823_zl_cgu_inputs); 3693 } else { 3694 t = ice_e823_zl_cgu_outputs; 3695 *size = ARRAY_SIZE(ice_e823_zl_cgu_outputs); 3696 } 3697 } else if (hw->cgu_part_number == 3698 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) { 3699 if (input) { 3700 t = ice_e823_si_cgu_inputs; 3701 *size = ARRAY_SIZE(ice_e823_si_cgu_inputs); 3702 } else { 3703 t = ice_e823_si_cgu_outputs; 3704 *size = ARRAY_SIZE(ice_e823_si_cgu_outputs); 3705 } 3706 } else { 3707 t = NULL; 3708 *size = 0; 3709 } 3710 3711 return t; 3712 } 3713 3714 /** 3715 * ice_cgu_get_pin_desc - get pin description array 3716 * @hw: pointer to the hw struct 3717 * @input: if request is done against input or output pins 3718 * @size: size of array returned by function 3719 * 3720 * Return: pointer to pin description array associated to given hw. 3721 */ 3722 static const struct ice_cgu_pin_desc * 3723 ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size) 3724 { 3725 const struct ice_cgu_pin_desc *t = NULL; 3726 3727 switch (hw->device_id) { 3728 case ICE_DEV_ID_E810C_SFP: 3729 if (input) { 3730 t = ice_e810t_sfp_cgu_inputs; 3731 *size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs); 3732 } else { 3733 t = ice_e810t_sfp_cgu_outputs; 3734 *size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs); 3735 } 3736 break; 3737 case ICE_DEV_ID_E810C_QSFP: 3738 if (input) { 3739 t = ice_e810t_qsfp_cgu_inputs; 3740 *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs); 3741 } else { 3742 t = ice_e810t_qsfp_cgu_outputs; 3743 *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs); 3744 } 3745 break; 3746 case ICE_DEV_ID_E823L_10G_BASE_T: 3747 case ICE_DEV_ID_E823L_1GBE: 3748 case ICE_DEV_ID_E823L_BACKPLANE: 3749 case ICE_DEV_ID_E823L_QSFP: 3750 case ICE_DEV_ID_E823L_SFP: 3751 case ICE_DEV_ID_E823C_10G_BASE_T: 3752 case ICE_DEV_ID_E823C_BACKPLANE: 3753 case ICE_DEV_ID_E823C_QSFP: 3754 case ICE_DEV_ID_E823C_SFP: 3755 case ICE_DEV_ID_E823C_SGMII: 3756 t = ice_cgu_get_pin_desc_e823(hw, input, size); 3757 break; 3758 default: 3759 break; 3760 } 3761 3762 return t; 3763 } 3764 3765 /** 3766 * ice_cgu_get_pin_type - get pin's type 3767 * @hw: pointer to the hw struct 3768 * @pin: pin index 3769 * @input: if request is done against input or output pin 3770 * 3771 * Return: type of a pin. 3772 */ 3773 enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input) 3774 { 3775 const struct ice_cgu_pin_desc *t; 3776 int t_size; 3777 3778 t = ice_cgu_get_pin_desc(hw, input, &t_size); 3779 3780 if (!t) 3781 return 0; 3782 3783 if (pin >= t_size) 3784 return 0; 3785 3786 return t[pin].type; 3787 } 3788 3789 /** 3790 * ice_cgu_get_pin_freq_supp - get pin's supported frequency 3791 * @hw: pointer to the hw struct 3792 * @pin: pin index 3793 * @input: if request is done against input or output pin 3794 * @num: output number of supported frequencies 3795 * 3796 * Get frequency supported number and array of supported frequencies. 3797 * 3798 * Return: array of supported frequencies for given pin. 3799 */ 3800 struct dpll_pin_frequency * 3801 ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num) 3802 { 3803 const struct ice_cgu_pin_desc *t; 3804 int t_size; 3805 3806 *num = 0; 3807 t = ice_cgu_get_pin_desc(hw, input, &t_size); 3808 if (!t) 3809 return NULL; 3810 if (pin >= t_size) 3811 return NULL; 3812 *num = t[pin].freq_supp_num; 3813 3814 return t[pin].freq_supp; 3815 } 3816 3817 /** 3818 * ice_cgu_get_pin_name - get pin's name 3819 * @hw: pointer to the hw struct 3820 * @pin: pin index 3821 * @input: if request is done against input or output pin 3822 * 3823 * Return: 3824 * * null terminated char array with name 3825 * * NULL in case of failure 3826 */ 3827 const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input) 3828 { 3829 const struct ice_cgu_pin_desc *t; 3830 int t_size; 3831 3832 t = ice_cgu_get_pin_desc(hw, input, &t_size); 3833 3834 if (!t) 3835 return NULL; 3836 3837 if (pin >= t_size) 3838 return NULL; 3839 3840 return t[pin].name; 3841 } 3842 3843 /** 3844 * ice_get_cgu_state - get the state of the DPLL 3845 * @hw: pointer to the hw struct 3846 * @dpll_idx: Index of internal DPLL unit 3847 * @last_dpll_state: last known state of DPLL 3848 * @pin: pointer to a buffer for returning currently active pin 3849 * @ref_state: reference clock state 3850 * @eec_mode: eec mode of the DPLL 3851 * @phase_offset: pointer to a buffer for returning phase offset 3852 * @dpll_state: state of the DPLL (output) 3853 * 3854 * This function will read the state of the DPLL(dpll_idx). Non-null 3855 * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to 3856 * retrieve currently active pin, state, mode and phase_offset respectively. 3857 * 3858 * Return: state of the DPLL 3859 */ 3860 int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, 3861 enum dpll_lock_status last_dpll_state, u8 *pin, 3862 u8 *ref_state, u8 *eec_mode, s64 *phase_offset, 3863 enum dpll_lock_status *dpll_state) 3864 { 3865 u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config; 3866 s64 hw_phase_offset; 3867 int status; 3868 3869 status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state, 3870 &hw_dpll_state, &hw_config, 3871 &hw_phase_offset, &hw_eec_mode); 3872 if (status) 3873 return status; 3874 3875 if (pin) 3876 /* current ref pin in dpll_state_refsel_status_X register */ 3877 *pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL; 3878 if (phase_offset) 3879 *phase_offset = hw_phase_offset; 3880 if (ref_state) 3881 *ref_state = hw_ref_state; 3882 if (eec_mode) 3883 *eec_mode = hw_eec_mode; 3884 if (!dpll_state) 3885 return 0; 3886 3887 /* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ 3888 * it would never return to FREERUN. This aligns to ITU-T G.781 3889 * Recommendation. We cannot report HOLDOVER as HO memory is cleared 3890 * while switching to another reference. 3891 * Only for situations where previous state was either: "LOCKED without 3892 * HO_ACQ" or "HOLDOVER" we actually back to FREERUN. 3893 */ 3894 if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) { 3895 if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY) 3896 *dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ; 3897 else 3898 *dpll_state = DPLL_LOCK_STATUS_LOCKED; 3899 } else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ || 3900 last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) { 3901 *dpll_state = DPLL_LOCK_STATUS_HOLDOVER; 3902 } else { 3903 *dpll_state = DPLL_LOCK_STATUS_UNLOCKED; 3904 } 3905 3906 return 0; 3907 } 3908 3909 /** 3910 * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins 3911 * @hw: pointer to the hw struct 3912 * @base_idx: returns index of first recovered clock pin on device 3913 * @pin_num: returns number of recovered clock pins available on device 3914 * 3915 * Based on hw provide caller info about recovery clock pins available on the 3916 * board. 3917 * 3918 * Return: 3919 * * 0 - success, information is valid 3920 * * negative - failure, information is not valid 3921 */ 3922 int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num) 3923 { 3924 u8 phy_idx; 3925 int ret; 3926 3927 switch (hw->device_id) { 3928 case ICE_DEV_ID_E810C_SFP: 3929 case ICE_DEV_ID_E810C_QSFP: 3930 3931 ret = ice_get_pf_c827_idx(hw, &phy_idx); 3932 if (ret) 3933 return ret; 3934 *base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN); 3935 *pin_num = ICE_E810_RCLK_PINS_NUM; 3936 ret = 0; 3937 break; 3938 case ICE_DEV_ID_E823L_10G_BASE_T: 3939 case ICE_DEV_ID_E823L_1GBE: 3940 case ICE_DEV_ID_E823L_BACKPLANE: 3941 case ICE_DEV_ID_E823L_QSFP: 3942 case ICE_DEV_ID_E823L_SFP: 3943 case ICE_DEV_ID_E823C_10G_BASE_T: 3944 case ICE_DEV_ID_E823C_BACKPLANE: 3945 case ICE_DEV_ID_E823C_QSFP: 3946 case ICE_DEV_ID_E823C_SFP: 3947 case ICE_DEV_ID_E823C_SGMII: 3948 *pin_num = ICE_E82X_RCLK_PINS_NUM; 3949 ret = 0; 3950 if (hw->cgu_part_number == 3951 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) 3952 *base_idx = ZL_REF1P; 3953 else if (hw->cgu_part_number == 3954 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) 3955 *base_idx = SI_REF1P; 3956 else 3957 ret = -ENODEV; 3958 3959 break; 3960 default: 3961 ret = -ENODEV; 3962 break; 3963 } 3964 3965 return ret; 3966 } 3967 3968 /** 3969 * ice_cgu_get_output_pin_state_caps - get output pin state capabilities 3970 * @hw: pointer to the hw struct 3971 * @pin_id: id of a pin 3972 * @caps: capabilities to modify 3973 * 3974 * Return: 3975 * * 0 - success, state capabilities were modified 3976 * * negative - failure, capabilities were not modified 3977 */ 3978 int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, 3979 unsigned long *caps) 3980 { 3981 bool can_change = true; 3982 3983 switch (hw->device_id) { 3984 case ICE_DEV_ID_E810C_SFP: 3985 if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3) 3986 can_change = false; 3987 break; 3988 case ICE_DEV_ID_E810C_QSFP: 3989 if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4) 3990 can_change = false; 3991 break; 3992 case ICE_DEV_ID_E823L_10G_BASE_T: 3993 case ICE_DEV_ID_E823L_1GBE: 3994 case ICE_DEV_ID_E823L_BACKPLANE: 3995 case ICE_DEV_ID_E823L_QSFP: 3996 case ICE_DEV_ID_E823L_SFP: 3997 case ICE_DEV_ID_E823C_10G_BASE_T: 3998 case ICE_DEV_ID_E823C_BACKPLANE: 3999 case ICE_DEV_ID_E823C_QSFP: 4000 case ICE_DEV_ID_E823C_SFP: 4001 case ICE_DEV_ID_E823C_SGMII: 4002 if (hw->cgu_part_number == 4003 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 && 4004 pin_id == ZL_OUT2) 4005 can_change = false; 4006 else if (hw->cgu_part_number == 4007 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 && 4008 pin_id == SI_OUT1) 4009 can_change = false; 4010 break; 4011 default: 4012 return -EINVAL; 4013 } 4014 if (can_change) 4015 *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; 4016 else 4017 *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; 4018 4019 return 0; 4020 } 4021