1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 8 #define E810_OUT_PROP_DELAY_NS 1 9 10 #define UNKNOWN_INCVAL_E822 0x100000000ULL 11 12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = { 13 /* name idx func chan */ 14 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, 15 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, 16 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, 17 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, 18 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, 19 }; 20 21 /** 22 * ice_get_sma_config_e810t 23 * @hw: pointer to the hw struct 24 * @ptp_pins: pointer to the ptp_pin_desc struture 25 * 26 * Read the configuration of the SMA control logic and put it into the 27 * ptp_pin_desc structure 28 */ 29 static int 30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) 31 { 32 u8 data, i; 33 int status; 34 35 /* Read initial pin state */ 36 status = ice_read_sma_ctrl_e810t(hw, &data); 37 if (status) 38 return status; 39 40 /* initialize with defaults */ 41 for (i = 0; i < NUM_PTP_PINS_E810T; i++) { 42 snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), 43 "%s", ice_pin_desc_e810t[i].name); 44 ptp_pins[i].index = ice_pin_desc_e810t[i].index; 45 ptp_pins[i].func = ice_pin_desc_e810t[i].func; 46 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; 47 } 48 49 /* Parse SMA1/UFL1 */ 50 switch (data & ICE_SMA1_MASK_E810T) { 51 case ICE_SMA1_MASK_E810T: 52 default: 53 ptp_pins[SMA1].func = PTP_PF_NONE; 54 ptp_pins[UFL1].func = PTP_PF_NONE; 55 break; 56 case ICE_SMA1_DIR_EN_E810T: 57 ptp_pins[SMA1].func = PTP_PF_PEROUT; 58 ptp_pins[UFL1].func = PTP_PF_NONE; 59 break; 60 case ICE_SMA1_TX_EN_E810T: 61 ptp_pins[SMA1].func = PTP_PF_EXTTS; 62 ptp_pins[UFL1].func = PTP_PF_NONE; 63 break; 64 case 0: 65 ptp_pins[SMA1].func = PTP_PF_EXTTS; 66 ptp_pins[UFL1].func = PTP_PF_PEROUT; 67 break; 68 } 69 70 /* Parse SMA2/UFL2 */ 71 switch (data & ICE_SMA2_MASK_E810T) { 72 case ICE_SMA2_MASK_E810T: 73 default: 74 ptp_pins[SMA2].func = PTP_PF_NONE; 75 ptp_pins[UFL2].func = PTP_PF_NONE; 76 break; 77 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 78 ptp_pins[SMA2].func = PTP_PF_EXTTS; 79 ptp_pins[UFL2].func = PTP_PF_NONE; 80 break; 81 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 82 ptp_pins[SMA2].func = PTP_PF_PEROUT; 83 ptp_pins[UFL2].func = PTP_PF_NONE; 84 break; 85 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): 86 ptp_pins[SMA2].func = PTP_PF_NONE; 87 ptp_pins[UFL2].func = PTP_PF_EXTTS; 88 break; 89 case ICE_SMA2_DIR_EN_E810T: 90 ptp_pins[SMA2].func = PTP_PF_PEROUT; 91 ptp_pins[UFL2].func = PTP_PF_EXTTS; 92 break; 93 } 94 95 return 0; 96 } 97 98 /** 99 * ice_ptp_set_sma_config_e810t 100 * @hw: pointer to the hw struct 101 * @ptp_pins: pointer to the ptp_pin_desc struture 102 * 103 * Set the configuration of the SMA control logic based on the configuration in 104 * num_pins parameter 105 */ 106 static int 107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw, 108 const struct ptp_pin_desc *ptp_pins) 109 { 110 int status; 111 u8 data; 112 113 /* SMA1 and UFL1 cannot be set to TX at the same time */ 114 if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 115 ptp_pins[UFL1].func == PTP_PF_PEROUT) 116 return -EINVAL; 117 118 /* SMA2 and UFL2 cannot be set to RX at the same time */ 119 if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 120 ptp_pins[UFL2].func == PTP_PF_EXTTS) 121 return -EINVAL; 122 123 /* Read initial pin state value */ 124 status = ice_read_sma_ctrl_e810t(hw, &data); 125 if (status) 126 return status; 127 128 /* Set the right sate based on the desired configuration */ 129 data &= ~ICE_SMA1_MASK_E810T; 130 if (ptp_pins[SMA1].func == PTP_PF_NONE && 131 ptp_pins[UFL1].func == PTP_PF_NONE) { 132 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); 133 data |= ICE_SMA1_MASK_E810T; 134 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 135 ptp_pins[UFL1].func == PTP_PF_NONE) { 136 dev_info(ice_hw_to_dev(hw), "SMA1 RX"); 137 data |= ICE_SMA1_TX_EN_E810T; 138 } else if (ptp_pins[SMA1].func == PTP_PF_NONE && 139 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 140 /* U.FL 1 TX will always enable SMA 1 RX */ 141 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 142 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 143 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 144 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 145 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 146 ptp_pins[UFL1].func == PTP_PF_NONE) { 147 dev_info(ice_hw_to_dev(hw), "SMA1 TX"); 148 data |= ICE_SMA1_DIR_EN_E810T; 149 } 150 151 data &= ~ICE_SMA2_MASK_E810T; 152 if (ptp_pins[SMA2].func == PTP_PF_NONE && 153 ptp_pins[UFL2].func == PTP_PF_NONE) { 154 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); 155 data |= ICE_SMA2_MASK_E810T; 156 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 157 ptp_pins[UFL2].func == PTP_PF_NONE) { 158 dev_info(ice_hw_to_dev(hw), "SMA2 RX"); 159 data |= (ICE_SMA2_TX_EN_E810T | 160 ICE_SMA2_UFL2_RX_DIS_E810T); 161 } else if (ptp_pins[SMA2].func == PTP_PF_NONE && 162 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 163 dev_info(ice_hw_to_dev(hw), "UFL2 RX"); 164 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); 165 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 166 ptp_pins[UFL2].func == PTP_PF_NONE) { 167 dev_info(ice_hw_to_dev(hw), "SMA2 TX"); 168 data |= (ICE_SMA2_DIR_EN_E810T | 169 ICE_SMA2_UFL2_RX_DIS_E810T); 170 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 171 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 172 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); 173 data |= ICE_SMA2_DIR_EN_E810T; 174 } 175 176 return ice_write_sma_ctrl_e810t(hw, data); 177 } 178 179 /** 180 * ice_ptp_set_sma_e810t 181 * @info: the driver's PTP info structure 182 * @pin: pin index in kernel structure 183 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) 184 * 185 * Set the configuration of a single SMA pin 186 */ 187 static int 188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, 189 enum ptp_pin_function func) 190 { 191 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; 192 struct ice_pf *pf = ptp_info_to_pf(info); 193 struct ice_hw *hw = &pf->hw; 194 int err; 195 196 if (pin < SMA1 || func > PTP_PF_PEROUT) 197 return -EOPNOTSUPP; 198 199 err = ice_get_sma_config_e810t(hw, ptp_pins); 200 if (err) 201 return err; 202 203 /* Disable the same function on the other pin sharing the channel */ 204 if (pin == SMA1 && ptp_pins[UFL1].func == func) 205 ptp_pins[UFL1].func = PTP_PF_NONE; 206 if (pin == UFL1 && ptp_pins[SMA1].func == func) 207 ptp_pins[SMA1].func = PTP_PF_NONE; 208 209 if (pin == SMA2 && ptp_pins[UFL2].func == func) 210 ptp_pins[UFL2].func = PTP_PF_NONE; 211 if (pin == UFL2 && ptp_pins[SMA2].func == func) 212 ptp_pins[SMA2].func = PTP_PF_NONE; 213 214 /* Set up new pin function in the temp table */ 215 ptp_pins[pin].func = func; 216 217 return ice_ptp_set_sma_config_e810t(hw, ptp_pins); 218 } 219 220 /** 221 * ice_verify_pin_e810t 222 * @info: the driver's PTP info structure 223 * @pin: Pin index 224 * @func: Assigned function 225 * @chan: Assigned channel 226 * 227 * Verify if pin supports requested pin function. If the Check pins consistency. 228 * Reconfigure the SMA logic attached to the given pin to enable its 229 * desired functionality 230 */ 231 static int 232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, 233 enum ptp_pin_function func, unsigned int chan) 234 { 235 /* Don't allow channel reassignment */ 236 if (chan != ice_pin_desc_e810t[pin].chan) 237 return -EOPNOTSUPP; 238 239 /* Check if functions are properly assigned */ 240 switch (func) { 241 case PTP_PF_NONE: 242 break; 243 case PTP_PF_EXTTS: 244 if (pin == UFL1) 245 return -EOPNOTSUPP; 246 break; 247 case PTP_PF_PEROUT: 248 if (pin == UFL2 || pin == GNSS) 249 return -EOPNOTSUPP; 250 break; 251 case PTP_PF_PHYSYNC: 252 return -EOPNOTSUPP; 253 } 254 255 return ice_ptp_set_sma_e810t(info, pin, func); 256 } 257 258 /** 259 * ice_set_tx_tstamp - Enable or disable Tx timestamping 260 * @pf: The PF pointer to search in 261 * @on: bool value for whether timestamps are enabled or disabled 262 */ 263 static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) 264 { 265 struct ice_vsi *vsi; 266 u32 val; 267 u16 i; 268 269 vsi = ice_get_main_vsi(pf); 270 if (!vsi) 271 return; 272 273 /* Set the timestamp enable flag for all the Tx rings */ 274 ice_for_each_txq(vsi, i) { 275 if (!vsi->tx_rings[i]) 276 continue; 277 vsi->tx_rings[i]->ptp_tx = on; 278 } 279 280 /* Configure the Tx timestamp interrupt */ 281 val = rd32(&pf->hw, PFINT_OICR_ENA); 282 if (on) 283 val |= PFINT_OICR_TSYN_TX_M; 284 else 285 val &= ~PFINT_OICR_TSYN_TX_M; 286 wr32(&pf->hw, PFINT_OICR_ENA, val); 287 288 pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 289 } 290 291 /** 292 * ice_set_rx_tstamp - Enable or disable Rx timestamping 293 * @pf: The PF pointer to search in 294 * @on: bool value for whether timestamps are enabled or disabled 295 */ 296 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 297 { 298 struct ice_vsi *vsi; 299 u16 i; 300 301 vsi = ice_get_main_vsi(pf); 302 if (!vsi) 303 return; 304 305 /* Set the timestamp flag for all the Rx rings */ 306 ice_for_each_rxq(vsi, i) { 307 if (!vsi->rx_rings[i]) 308 continue; 309 vsi->rx_rings[i]->ptp_rx = on; 310 } 311 312 pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : 313 HWTSTAMP_FILTER_NONE; 314 } 315 316 /** 317 * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit 318 * @pf: Board private structure 319 * @ena: bool value to enable or disable time stamp 320 * 321 * This function will configure timestamping during PTP initialization 322 * and deinitialization 323 */ 324 void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) 325 { 326 ice_set_tx_tstamp(pf, ena); 327 ice_set_rx_tstamp(pf, ena); 328 } 329 330 /** 331 * ice_get_ptp_clock_index - Get the PTP clock index 332 * @pf: the PF pointer 333 * 334 * Determine the clock index of the PTP clock associated with this device. If 335 * this is the PF controlling the clock, just use the local access to the 336 * clock device pointer. 337 * 338 * Otherwise, read from the driver shared parameters to determine the clock 339 * index value. 340 * 341 * Returns: the index of the PTP clock associated with this device, or -1 if 342 * there is no associated clock. 343 */ 344 int ice_get_ptp_clock_index(struct ice_pf *pf) 345 { 346 struct device *dev = ice_pf_to_dev(pf); 347 enum ice_aqc_driver_params param_idx; 348 struct ice_hw *hw = &pf->hw; 349 u8 tmr_idx; 350 u32 value; 351 int err; 352 353 /* Use the ptp_clock structure if we're the main PF */ 354 if (pf->ptp.clock) 355 return ptp_clock_index(pf->ptp.clock); 356 357 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 358 if (!tmr_idx) 359 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 360 else 361 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 362 363 err = ice_aq_get_driver_param(hw, param_idx, &value, NULL); 364 if (err) { 365 dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n", 366 err, ice_aq_str(hw->adminq.sq_last_status)); 367 return -1; 368 } 369 370 /* The PTP clock index is an integer, and will be between 0 and 371 * INT_MAX. The highest bit of the driver shared parameter is used to 372 * indicate whether or not the currently stored clock index is valid. 373 */ 374 if (!(value & PTP_SHARED_CLK_IDX_VALID)) 375 return -1; 376 377 return value & ~PTP_SHARED_CLK_IDX_VALID; 378 } 379 380 /** 381 * ice_set_ptp_clock_index - Set the PTP clock index 382 * @pf: the PF pointer 383 * 384 * Set the PTP clock index for this device into the shared driver parameters, 385 * so that other PFs associated with this device can read it. 386 * 387 * If the PF is unable to store the clock index, it will log an error, but 388 * will continue operating PTP. 389 */ 390 static void ice_set_ptp_clock_index(struct ice_pf *pf) 391 { 392 struct device *dev = ice_pf_to_dev(pf); 393 enum ice_aqc_driver_params param_idx; 394 struct ice_hw *hw = &pf->hw; 395 u8 tmr_idx; 396 u32 value; 397 int err; 398 399 if (!pf->ptp.clock) 400 return; 401 402 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 403 if (!tmr_idx) 404 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 405 else 406 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 407 408 value = (u32)ptp_clock_index(pf->ptp.clock); 409 if (value > INT_MAX) { 410 dev_err(dev, "PTP Clock index is too large to store\n"); 411 return; 412 } 413 value |= PTP_SHARED_CLK_IDX_VALID; 414 415 err = ice_aq_set_driver_param(hw, param_idx, value, NULL); 416 if (err) { 417 dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n", 418 err, ice_aq_str(hw->adminq.sq_last_status)); 419 } 420 } 421 422 /** 423 * ice_clear_ptp_clock_index - Clear the PTP clock index 424 * @pf: the PF pointer 425 * 426 * Clear the PTP clock index for this device. Must be called when 427 * unregistering the PTP clock, in order to ensure other PFs stop reporting 428 * a clock object that no longer exists. 429 */ 430 static void ice_clear_ptp_clock_index(struct ice_pf *pf) 431 { 432 struct device *dev = ice_pf_to_dev(pf); 433 enum ice_aqc_driver_params param_idx; 434 struct ice_hw *hw = &pf->hw; 435 u8 tmr_idx; 436 int err; 437 438 /* Do not clear the index if we don't own the timer */ 439 if (!hw->func_caps.ts_func_info.src_tmr_owned) 440 return; 441 442 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 443 if (!tmr_idx) 444 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 445 else 446 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 447 448 err = ice_aq_set_driver_param(hw, param_idx, 0, NULL); 449 if (err) { 450 dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n", 451 err, ice_aq_str(hw->adminq.sq_last_status)); 452 } 453 } 454 455 /** 456 * ice_ptp_read_src_clk_reg - Read the source clock register 457 * @pf: Board private structure 458 * @sts: Optional parameter for holding a pair of system timestamps from 459 * the system clock. Will be ignored if NULL is given. 460 */ 461 static u64 462 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 463 { 464 struct ice_hw *hw = &pf->hw; 465 u32 hi, lo, lo2; 466 u8 tmr_idx; 467 468 tmr_idx = ice_get_ptp_src_clock_index(hw); 469 /* Read the system timestamp pre PHC read */ 470 ptp_read_system_prets(sts); 471 472 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 473 474 /* Read the system timestamp post PHC read */ 475 ptp_read_system_postts(sts); 476 477 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 478 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 479 480 if (lo2 < lo) { 481 /* if TIME_L rolled over read TIME_L again and update 482 * system timestamps 483 */ 484 ptp_read_system_prets(sts); 485 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 486 ptp_read_system_postts(sts); 487 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 488 } 489 490 return ((u64)hi << 32) | lo; 491 } 492 493 /** 494 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 495 * @cached_phc_time: recently cached copy of PHC time 496 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 497 * 498 * Hardware captures timestamps which contain only 32 bits of nominal 499 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 500 * Note that the captured timestamp values may be 40 bits, but the lower 501 * 8 bits are sub-nanoseconds and generally discarded. 502 * 503 * Extend the 32bit nanosecond timestamp using the following algorithm and 504 * assumptions: 505 * 506 * 1) have a recently cached copy of the PHC time 507 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 508 * seconds) before or after the PHC time was captured. 509 * 3) calculate the delta between the cached time and the timestamp 510 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 511 * captured after the PHC time. In this case, the full timestamp is just 512 * the cached PHC time plus the delta. 513 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 514 * timestamp was captured *before* the PHC time, i.e. because the PHC 515 * cache was updated after the timestamp was captured by hardware. In this 516 * case, the full timestamp is the cached time minus the inverse delta. 517 * 518 * This algorithm works even if the PHC time was updated after a Tx timestamp 519 * was requested, but before the Tx timestamp event was reported from 520 * hardware. 521 * 522 * This calculation primarily relies on keeping the cached PHC time up to 523 * date. If the timestamp was captured more than 2^31 nanoseconds after the 524 * PHC time, it is possible that the lower 32bits of PHC time have 525 * overflowed more than once, and we might generate an incorrect timestamp. 526 * 527 * This is prevented by (a) periodically updating the cached PHC time once 528 * a second, and (b) discarding any Tx timestamp packet if it has waited for 529 * a timestamp for more than one second. 530 */ 531 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 532 { 533 u32 delta, phc_time_lo; 534 u64 ns; 535 536 /* Extract the lower 32 bits of the PHC time */ 537 phc_time_lo = (u32)cached_phc_time; 538 539 /* Calculate the delta between the lower 32bits of the cached PHC 540 * time and the in_tstamp value 541 */ 542 delta = (in_tstamp - phc_time_lo); 543 544 /* Do not assume that the in_tstamp is always more recent than the 545 * cached PHC time. If the delta is large, it indicates that the 546 * in_tstamp was taken in the past, and should be converted 547 * forward. 548 */ 549 if (delta > (U32_MAX / 2)) { 550 /* reverse the delta calculation here */ 551 delta = (phc_time_lo - in_tstamp); 552 ns = cached_phc_time - delta; 553 } else { 554 ns = cached_phc_time + delta; 555 } 556 557 return ns; 558 } 559 560 /** 561 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 562 * @pf: Board private structure 563 * @in_tstamp: Ingress/egress 40b timestamp value 564 * 565 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 566 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 567 * 568 * *--------------------------------------------------------------* 569 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 570 * *--------------------------------------------------------------* 571 * 572 * The low bit is an indicator of whether the timestamp is valid. The next 573 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 574 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 575 * 576 * It is assumed that the caller verifies the timestamp is valid prior to 577 * calling this function. 578 * 579 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 580 * time stored in the device private PTP structure as the basis for timestamp 581 * extension. 582 * 583 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 584 * algorithm. 585 */ 586 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 587 { 588 const u64 mask = GENMASK_ULL(31, 0); 589 unsigned long discard_time; 590 591 /* Discard the hardware timestamp if the cached PHC time is too old */ 592 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 593 if (time_is_before_jiffies(discard_time)) { 594 pf->ptp.tx_hwtstamp_discarded++; 595 return 0; 596 } 597 598 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 599 (in_tstamp >> 8) & mask); 600 } 601 602 /** 603 * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port 604 * @work: pointer to the kthread_work struct 605 * 606 * Process timestamps captured by the PHY associated with this port. To do 607 * this, loop over each index with a waiting skb. 608 * 609 * If a given index has a valid timestamp, perform the following steps: 610 * 611 * 1) copy the timestamp out of the PHY register 612 * 4) clear the timestamp valid bit in the PHY register 613 * 5) unlock the index by clearing the associated in_use bit. 614 * 2) extend the 40b timestamp value to get a 64bit timestamp 615 * 3) send that timestamp to the stack 616 * 617 * After looping, if we still have waiting SKBs, then re-queue the work. This 618 * may cause us effectively poll even when not strictly necessary. We do this 619 * because it's possible a new timestamp was requested around the same time as 620 * the interrupt. In some cases hardware might not interrupt us again when the 621 * timestamp is captured. 622 * 623 * Note that we only take the tracking lock when clearing the bit and when 624 * checking if we need to re-queue this task. The only place where bits can be 625 * set is the hard xmit routine where an SKB has a request flag set. The only 626 * places where we clear bits are this work function, or the periodic cleanup 627 * thread. If the cleanup thread clears a bit we're processing we catch it 628 * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread 629 * starts a new timestamp, we might not begin processing it right away but we 630 * will notice it at the end when we re-queue the work item. If a Tx thread 631 * starts a new timestamp just after this function exits without re-queuing, 632 * the interrupt when the timestamp finishes should trigger. Avoiding holding 633 * the lock for the entire function is important in order to ensure that Tx 634 * threads do not get blocked while waiting for the lock. 635 */ 636 static void ice_ptp_tx_tstamp_work(struct kthread_work *work) 637 { 638 struct ice_ptp_port *ptp_port; 639 struct ice_ptp_tx *tx; 640 struct ice_pf *pf; 641 struct ice_hw *hw; 642 u8 idx; 643 644 tx = container_of(work, struct ice_ptp_tx, work); 645 if (!tx->init) 646 return; 647 648 ptp_port = container_of(tx, struct ice_ptp_port, tx); 649 pf = ptp_port_to_pf(ptp_port); 650 hw = &pf->hw; 651 652 for_each_set_bit(idx, tx->in_use, tx->len) { 653 struct skb_shared_hwtstamps shhwtstamps = {}; 654 u8 phy_idx = idx + tx->quad_offset; 655 u64 raw_tstamp, tstamp; 656 struct sk_buff *skb; 657 int err; 658 659 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 660 661 err = ice_read_phy_tstamp(hw, tx->quad, phy_idx, 662 &raw_tstamp); 663 if (err) 664 continue; 665 666 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 667 668 /* Check if the timestamp is invalid or stale */ 669 if (!(raw_tstamp & ICE_PTP_TS_VALID) || 670 raw_tstamp == tx->tstamps[idx].cached_tstamp) 671 continue; 672 673 /* The timestamp is valid, so we'll go ahead and clear this 674 * index and then send the timestamp up to the stack. 675 */ 676 spin_lock(&tx->lock); 677 tx->tstamps[idx].cached_tstamp = raw_tstamp; 678 clear_bit(idx, tx->in_use); 679 skb = tx->tstamps[idx].skb; 680 tx->tstamps[idx].skb = NULL; 681 spin_unlock(&tx->lock); 682 683 /* it's (unlikely but) possible we raced with the cleanup 684 * thread for discarding old timestamp requests. 685 */ 686 if (!skb) 687 continue; 688 689 /* Extend the timestamp using cached PHC time */ 690 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 691 if (tstamp) { 692 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 693 ice_trace(tx_tstamp_complete, skb, idx); 694 } 695 696 skb_tstamp_tx(skb, &shhwtstamps); 697 dev_kfree_skb_any(skb); 698 } 699 700 /* Check if we still have work to do. If so, re-queue this task to 701 * poll for remaining timestamps. 702 */ 703 spin_lock(&tx->lock); 704 if (!bitmap_empty(tx->in_use, tx->len)) 705 kthread_queue_work(pf->ptp.kworker, &tx->work); 706 spin_unlock(&tx->lock); 707 } 708 709 /** 710 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 711 * @tx: Tx tracking structure to initialize 712 * 713 * Assumes that the length has already been initialized. Do not call directly, 714 * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead. 715 */ 716 static int 717 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 718 { 719 tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL); 720 if (!tx->tstamps) 721 return -ENOMEM; 722 723 tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 724 if (!tx->in_use) { 725 kfree(tx->tstamps); 726 tx->tstamps = NULL; 727 return -ENOMEM; 728 } 729 730 spin_lock_init(&tx->lock); 731 kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work); 732 733 tx->init = 1; 734 735 return 0; 736 } 737 738 /** 739 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 740 * @pf: Board private structure 741 * @tx: the tracker to flush 742 */ 743 static void 744 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 745 { 746 u8 idx; 747 748 for (idx = 0; idx < tx->len; idx++) { 749 u8 phy_idx = idx + tx->quad_offset; 750 751 spin_lock(&tx->lock); 752 if (tx->tstamps[idx].skb) { 753 dev_kfree_skb_any(tx->tstamps[idx].skb); 754 tx->tstamps[idx].skb = NULL; 755 pf->ptp.tx_hwtstamp_flushed++; 756 } 757 clear_bit(idx, tx->in_use); 758 spin_unlock(&tx->lock); 759 760 /* Clear any potential residual timestamp in the PHY block */ 761 if (!pf->hw.reset_ongoing) 762 ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx); 763 } 764 } 765 766 /** 767 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 768 * @pf: Board private structure 769 * @tx: Tx tracking structure to release 770 * 771 * Free memory associated with the Tx timestamp tracker. 772 */ 773 static void 774 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 775 { 776 tx->init = 0; 777 778 kthread_cancel_work_sync(&tx->work); 779 780 ice_ptp_flush_tx_tracker(pf, tx); 781 782 kfree(tx->tstamps); 783 tx->tstamps = NULL; 784 785 bitmap_free(tx->in_use); 786 tx->in_use = NULL; 787 788 tx->len = 0; 789 } 790 791 /** 792 * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps 793 * @pf: Board private structure 794 * @tx: the Tx tracking structure to initialize 795 * @port: the port this structure tracks 796 * 797 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 798 * the timestamp block is shared for all ports in the same quad. To avoid 799 * ports using the same timestamp index, logically break the block of 800 * registers into chunks based on the port number. 801 */ 802 static int 803 ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 804 { 805 tx->quad = port / ICE_PORTS_PER_QUAD; 806 tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT; 807 tx->len = INDEX_PER_PORT; 808 809 return ice_ptp_alloc_tx_tracker(tx); 810 } 811 812 /** 813 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 814 * @pf: Board private structure 815 * @tx: the Tx tracking structure to initialize 816 * 817 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 818 * port has its own block of timestamps, independent of the other ports. 819 */ 820 static int 821 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 822 { 823 tx->quad = pf->hw.port_info->lport; 824 tx->quad_offset = 0; 825 tx->len = INDEX_PER_QUAD; 826 827 return ice_ptp_alloc_tx_tracker(tx); 828 } 829 830 /** 831 * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped 832 * @pf: pointer to the PF struct 833 * @tx: PTP Tx tracker to clean up 834 * 835 * Loop through the Tx timestamp requests and see if any of them have been 836 * waiting for a long time. Discard any SKBs that have been waiting for more 837 * than 2 seconds. This is long enough to be reasonably sure that the 838 * timestamp will never be captured. This might happen if the packet gets 839 * discarded before it reaches the PHY timestamping block. 840 */ 841 static void ice_ptp_tx_tstamp_cleanup(struct ice_pf *pf, struct ice_ptp_tx *tx) 842 { 843 struct ice_hw *hw = &pf->hw; 844 u8 idx; 845 846 if (!tx->init) 847 return; 848 849 for_each_set_bit(idx, tx->in_use, tx->len) { 850 struct sk_buff *skb; 851 u64 raw_tstamp; 852 853 /* Check if this SKB has been waiting for too long */ 854 if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ)) 855 continue; 856 857 /* Read tstamp to be able to use this register again */ 858 ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset, 859 &raw_tstamp); 860 861 spin_lock(&tx->lock); 862 skb = tx->tstamps[idx].skb; 863 tx->tstamps[idx].skb = NULL; 864 clear_bit(idx, tx->in_use); 865 spin_unlock(&tx->lock); 866 867 /* Count the number of Tx timestamps which have timed out */ 868 pf->ptp.tx_hwtstamp_timeouts++; 869 870 /* Free the SKB after we've cleared the bit */ 871 dev_kfree_skb_any(skb); 872 } 873 } 874 875 /** 876 * ice_ptp_update_cached_phctime - Update the cached PHC time values 877 * @pf: Board specific private structure 878 * 879 * This function updates the system time values which are cached in the PF 880 * structure and the Rx rings. 881 * 882 * This function must be called periodically to ensure that the cached value 883 * is never more than 2 seconds old. 884 * 885 * Note that the cached copy in the PF PTP structure is always updated, even 886 * if we can't update the copy in the Rx rings. 887 * 888 * Return: 889 * * 0 - OK, successfully updated 890 * * -EAGAIN - PF was busy, need to reschedule the update 891 */ 892 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 893 { 894 struct device *dev = ice_pf_to_dev(pf); 895 unsigned long update_before; 896 u64 systime; 897 int i; 898 899 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 900 if (pf->ptp.cached_phc_time && 901 time_is_before_jiffies(update_before)) { 902 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 903 904 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 905 jiffies_to_msecs(time_taken)); 906 pf->ptp.late_cached_phc_updates++; 907 } 908 909 /* Read the current PHC time */ 910 systime = ice_ptp_read_src_clk_reg(pf, NULL); 911 912 /* Update the cached PHC time stored in the PF structure */ 913 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 914 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 915 916 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 917 return -EAGAIN; 918 919 ice_for_each_vsi(pf, i) { 920 struct ice_vsi *vsi = pf->vsi[i]; 921 int j; 922 923 if (!vsi) 924 continue; 925 926 if (vsi->type != ICE_VSI_PF) 927 continue; 928 929 ice_for_each_rxq(vsi, j) { 930 if (!vsi->rx_rings[j]) 931 continue; 932 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 933 } 934 } 935 clear_bit(ICE_CFG_BUSY, pf->state); 936 937 return 0; 938 } 939 940 /** 941 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 942 * @pf: Board specific private structure 943 * 944 * This function must be called when the cached PHC time is no longer valid, 945 * such as after a time adjustment. It discards any outstanding Tx timestamps, 946 * and updates the cached PHC time for both the PF and Rx rings. If updating 947 * the PHC time cannot be done immediately, a warning message is logged and 948 * the work item is scheduled. 949 * 950 * These steps are required in order to ensure that we do not accidentally 951 * report a timestamp extended by the wrong PHC cached copy. Note that we 952 * do not directly update the cached timestamp here because it is possible 953 * this might produce an error when ICE_CFG_BUSY is set. If this occurred, we 954 * would have to try again. During that time window, timestamps might be 955 * requested and returned with an invalid extension. Thus, on failure to 956 * immediately update the cached PHC time we would need to zero the value 957 * anyways. For this reason, we just zero the value immediately and queue the 958 * update work item. 959 */ 960 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 961 { 962 struct device *dev = ice_pf_to_dev(pf); 963 int err; 964 965 /* Update the cached PHC time immediately if possible, otherwise 966 * schedule the work item to execute soon. 967 */ 968 err = ice_ptp_update_cached_phctime(pf); 969 if (err) { 970 /* If another thread is updating the Rx rings, we won't 971 * properly reset them here. This could lead to reporting of 972 * invalid timestamps, but there isn't much we can do. 973 */ 974 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 975 __func__); 976 977 /* Queue the work item to update the Rx rings when possible */ 978 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 979 msecs_to_jiffies(10)); 980 } 981 982 /* Flush any outstanding Tx timestamps */ 983 ice_ptp_flush_tx_tracker(pf, &pf->ptp.port.tx); 984 } 985 986 /** 987 * ice_ptp_read_time - Read the time from the device 988 * @pf: Board private structure 989 * @ts: timespec structure to hold the current time value 990 * @sts: Optional parameter for holding a pair of system timestamps from 991 * the system clock. Will be ignored if NULL is given. 992 * 993 * This function reads the source clock registers and stores them in a timespec. 994 * However, since the registers are 64 bits of nanoseconds, we must convert the 995 * result to a timespec before we can return. 996 */ 997 static void 998 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, 999 struct ptp_system_timestamp *sts) 1000 { 1001 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 1002 1003 *ts = ns_to_timespec64(time_ns); 1004 } 1005 1006 /** 1007 * ice_ptp_write_init - Set PHC time to provided value 1008 * @pf: Board private structure 1009 * @ts: timespec structure that holds the new time value 1010 * 1011 * Set the PHC time to the specified time provided in the timespec. 1012 */ 1013 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1014 { 1015 u64 ns = timespec64_to_ns(ts); 1016 struct ice_hw *hw = &pf->hw; 1017 1018 return ice_ptp_init_time(hw, ns); 1019 } 1020 1021 /** 1022 * ice_ptp_write_adj - Adjust PHC clock time atomically 1023 * @pf: Board private structure 1024 * @adj: Adjustment in nanoseconds 1025 * 1026 * Perform an atomic adjustment of the PHC time by the specified number of 1027 * nanoseconds. 1028 */ 1029 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1030 { 1031 struct ice_hw *hw = &pf->hw; 1032 1033 return ice_ptp_adj_clock(hw, adj); 1034 } 1035 1036 /** 1037 * ice_base_incval - Get base timer increment value 1038 * @pf: Board private structure 1039 * 1040 * Look up the base timer increment value for this device. The base increment 1041 * value is used to define the nominal clock tick rate. This increment value 1042 * is programmed during device initialization. It is also used as the basis 1043 * for calculating adjustments using scaled_ppm. 1044 */ 1045 static u64 ice_base_incval(struct ice_pf *pf) 1046 { 1047 struct ice_hw *hw = &pf->hw; 1048 u64 incval; 1049 1050 if (ice_is_e810(hw)) 1051 incval = ICE_PTP_NOMINAL_INCVAL_E810; 1052 else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) 1053 incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); 1054 else 1055 incval = UNKNOWN_INCVAL_E822; 1056 1057 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1058 incval); 1059 1060 return incval; 1061 } 1062 1063 /** 1064 * ice_ptp_reset_ts_memory_quad - Reset timestamp memory for one quad 1065 * @pf: The PF private data structure 1066 * @quad: The quad (0-4) 1067 */ 1068 static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad) 1069 { 1070 struct ice_hw *hw = &pf->hw; 1071 1072 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); 1073 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); 1074 } 1075 1076 /** 1077 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1078 * @port: PTP port for which Tx FIFO is checked 1079 */ 1080 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1081 { 1082 int quad = port->port_num / ICE_PORTS_PER_QUAD; 1083 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1084 struct ice_pf *pf; 1085 struct ice_hw *hw; 1086 u32 val, phy_sts; 1087 int err; 1088 1089 pf = ptp_port_to_pf(port); 1090 hw = &pf->hw; 1091 1092 if (port->tx_fifo_busy_cnt == FIFO_OK) 1093 return 0; 1094 1095 /* need to read FIFO state */ 1096 if (offs == 0 || offs == 1) 1097 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, 1098 &val); 1099 else 1100 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, 1101 &val); 1102 1103 if (err) { 1104 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1105 port->port_num, err); 1106 return err; 1107 } 1108 1109 if (offs & 0x1) 1110 phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S; 1111 else 1112 phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S; 1113 1114 if (phy_sts & FIFO_EMPTY) { 1115 port->tx_fifo_busy_cnt = FIFO_OK; 1116 return 0; 1117 } 1118 1119 port->tx_fifo_busy_cnt++; 1120 1121 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1122 port->tx_fifo_busy_cnt, port->port_num); 1123 1124 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1125 dev_dbg(ice_pf_to_dev(pf), 1126 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1127 port->port_num, quad); 1128 ice_ptp_reset_ts_memory_quad(pf, quad); 1129 port->tx_fifo_busy_cnt = FIFO_OK; 1130 return 0; 1131 } 1132 1133 return -EAGAIN; 1134 } 1135 1136 /** 1137 * ice_ptp_check_tx_offset_valid - Check if the Tx PHY offset is valid 1138 * @port: the PTP port to check 1139 * 1140 * Checks whether the Tx offset for the PHY associated with this port is 1141 * valid. Returns 0 if the offset is valid, and a non-zero error code if it is 1142 * not. 1143 */ 1144 static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port) 1145 { 1146 struct ice_pf *pf = ptp_port_to_pf(port); 1147 struct device *dev = ice_pf_to_dev(pf); 1148 struct ice_hw *hw = &pf->hw; 1149 u32 val; 1150 int err; 1151 1152 err = ice_ptp_check_tx_fifo(port); 1153 if (err) 1154 return err; 1155 1156 err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS, 1157 &val); 1158 if (err) { 1159 dev_err(dev, "Failed to read TX_OV_STATUS for port %d, err %d\n", 1160 port->port_num, err); 1161 return -EAGAIN; 1162 } 1163 1164 if (!(val & P_REG_TX_OV_STATUS_OV_M)) 1165 return -EAGAIN; 1166 1167 return 0; 1168 } 1169 1170 /** 1171 * ice_ptp_check_rx_offset_valid - Check if the Rx PHY offset is valid 1172 * @port: the PTP port to check 1173 * 1174 * Checks whether the Rx offset for the PHY associated with this port is 1175 * valid. Returns 0 if the offset is valid, and a non-zero error code if it is 1176 * not. 1177 */ 1178 static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port) 1179 { 1180 struct ice_pf *pf = ptp_port_to_pf(port); 1181 struct device *dev = ice_pf_to_dev(pf); 1182 struct ice_hw *hw = &pf->hw; 1183 int err; 1184 u32 val; 1185 1186 err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS, 1187 &val); 1188 if (err) { 1189 dev_err(dev, "Failed to read RX_OV_STATUS for port %d, err %d\n", 1190 port->port_num, err); 1191 return err; 1192 } 1193 1194 if (!(val & P_REG_RX_OV_STATUS_OV_M)) 1195 return -EAGAIN; 1196 1197 return 0; 1198 } 1199 1200 /** 1201 * ice_ptp_check_offset_valid - Check port offset valid bit 1202 * @port: Port for which offset valid bit is checked 1203 * 1204 * Returns 0 if both Tx and Rx offset are valid, and -EAGAIN if one of the 1205 * offset is not ready. 1206 */ 1207 static int ice_ptp_check_offset_valid(struct ice_ptp_port *port) 1208 { 1209 int tx_err, rx_err; 1210 1211 /* always check both Tx and Rx offset validity */ 1212 tx_err = ice_ptp_check_tx_offset_valid(port); 1213 rx_err = ice_ptp_check_rx_offset_valid(port); 1214 1215 if (tx_err || rx_err) 1216 return -EAGAIN; 1217 1218 return 0; 1219 } 1220 1221 /** 1222 * ice_ptp_wait_for_offset_valid - Check for valid Tx and Rx offsets 1223 * @work: Pointer to the kthread_work structure for this task 1224 * 1225 * Check whether both the Tx and Rx offsets are valid for enabling the vernier 1226 * calibration. 1227 * 1228 * Once we have valid offsets from hardware, update the total Tx and Rx 1229 * offsets, and exit bypass mode. This enables more precise timestamps using 1230 * the extra data measured during the vernier calibration process. 1231 */ 1232 static void ice_ptp_wait_for_offset_valid(struct kthread_work *work) 1233 { 1234 struct ice_ptp_port *port; 1235 int err; 1236 struct device *dev; 1237 struct ice_pf *pf; 1238 struct ice_hw *hw; 1239 1240 port = container_of(work, struct ice_ptp_port, ov_work.work); 1241 pf = ptp_port_to_pf(port); 1242 hw = &pf->hw; 1243 dev = ice_pf_to_dev(pf); 1244 1245 if (ice_is_reset_in_progress(pf->state)) 1246 return; 1247 1248 if (ice_ptp_check_offset_valid(port)) { 1249 /* Offsets not ready yet, try again later */ 1250 kthread_queue_delayed_work(pf->ptp.kworker, 1251 &port->ov_work, 1252 msecs_to_jiffies(100)); 1253 return; 1254 } 1255 1256 /* Offsets are valid, so it is safe to exit bypass mode */ 1257 err = ice_phy_exit_bypass_e822(hw, port->port_num); 1258 if (err) { 1259 dev_warn(dev, "Failed to exit bypass mode for PHY port %u, err %d\n", 1260 port->port_num, err); 1261 return; 1262 } 1263 } 1264 1265 /** 1266 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1267 * @ptp_port: PTP port to stop 1268 */ 1269 static int 1270 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1271 { 1272 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1273 u8 port = ptp_port->port_num; 1274 struct ice_hw *hw = &pf->hw; 1275 int err; 1276 1277 if (ice_is_e810(hw)) 1278 return 0; 1279 1280 mutex_lock(&ptp_port->ps_lock); 1281 1282 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1283 1284 err = ice_stop_phy_timer_e822(hw, port, true); 1285 if (err) 1286 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1287 port, err); 1288 1289 mutex_unlock(&ptp_port->ps_lock); 1290 1291 return err; 1292 } 1293 1294 /** 1295 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1296 * @ptp_port: PTP port for which the PHY start is set 1297 * 1298 * Start the PHY timestamping block, and initiate Vernier timestamping 1299 * calibration. If timestamping cannot be calibrated (such as if link is down) 1300 * then disable the timestamping block instead. 1301 */ 1302 static int 1303 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1304 { 1305 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1306 u8 port = ptp_port->port_num; 1307 struct ice_hw *hw = &pf->hw; 1308 int err; 1309 1310 if (ice_is_e810(hw)) 1311 return 0; 1312 1313 if (!ptp_port->link_up) 1314 return ice_ptp_port_phy_stop(ptp_port); 1315 1316 mutex_lock(&ptp_port->ps_lock); 1317 1318 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1319 1320 /* temporarily disable Tx timestamps while calibrating PHY offset */ 1321 ptp_port->tx.calibrating = true; 1322 ptp_port->tx_fifo_busy_cnt = 0; 1323 1324 /* Start the PHY timer in bypass mode */ 1325 err = ice_start_phy_timer_e822(hw, port, true); 1326 if (err) 1327 goto out_unlock; 1328 1329 /* Enable Tx timestamps right away */ 1330 ptp_port->tx.calibrating = false; 1331 1332 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); 1333 1334 out_unlock: 1335 if (err) 1336 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1337 port, err); 1338 1339 mutex_unlock(&ptp_port->ps_lock); 1340 1341 return err; 1342 } 1343 1344 /** 1345 * ice_ptp_link_change - Set or clear port registers for timestamping 1346 * @pf: Board private structure 1347 * @port: Port for which the PHY start is set 1348 * @linkup: Link is up or down 1349 */ 1350 int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) 1351 { 1352 struct ice_ptp_port *ptp_port; 1353 1354 if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 1355 return 0; 1356 1357 if (port >= ICE_NUM_EXTERNAL_PORTS) 1358 return -EINVAL; 1359 1360 ptp_port = &pf->ptp.port; 1361 if (ptp_port->port_num != port) 1362 return -EINVAL; 1363 1364 /* Update cached link err for this port immediately */ 1365 ptp_port->link_up = linkup; 1366 1367 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1368 /* PTP is not setup */ 1369 return -EAGAIN; 1370 1371 return ice_ptp_port_phy_restart(ptp_port); 1372 } 1373 1374 /** 1375 * ice_ptp_reset_ts_memory - Reset timestamp memory for all quads 1376 * @pf: The PF private data structure 1377 */ 1378 static void ice_ptp_reset_ts_memory(struct ice_pf *pf) 1379 { 1380 int quad; 1381 1382 quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD; 1383 ice_ptp_reset_ts_memory_quad(pf, quad); 1384 } 1385 1386 /** 1387 * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt 1388 * @pf: PF private structure 1389 * @ena: bool value to enable or disable interrupt 1390 * @threshold: Minimum number of packets at which intr is triggered 1391 * 1392 * Utility function to enable or disable Tx timestamp interrupt and threshold 1393 */ 1394 static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) 1395 { 1396 struct ice_hw *hw = &pf->hw; 1397 int err = 0; 1398 int quad; 1399 u32 val; 1400 1401 ice_ptp_reset_ts_memory(pf); 1402 1403 for (quad = 0; quad < ICE_MAX_QUAD; quad++) { 1404 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1405 &val); 1406 if (err) 1407 break; 1408 1409 if (ena) { 1410 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1411 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; 1412 val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) & 1413 Q_REG_TX_MEM_GBL_CFG_INTR_THR_M); 1414 } else { 1415 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1416 } 1417 1418 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1419 val); 1420 if (err) 1421 break; 1422 } 1423 1424 if (err) 1425 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", 1426 err); 1427 return err; 1428 } 1429 1430 /** 1431 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1432 * @pf: Board private structure 1433 */ 1434 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1435 { 1436 ice_ptp_port_phy_restart(&pf->ptp.port); 1437 } 1438 1439 /** 1440 * ice_ptp_adjfine - Adjust clock increment rate 1441 * @info: the driver's PTP info structure 1442 * @scaled_ppm: Parts per million with 16-bit fractional field 1443 * 1444 * Adjust the frequency of the clock by the indicated scaled ppm from the 1445 * base frequency. 1446 */ 1447 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1448 { 1449 struct ice_pf *pf = ptp_info_to_pf(info); 1450 struct ice_hw *hw = &pf->hw; 1451 u64 incval, diff; 1452 int neg_adj = 0; 1453 int err; 1454 1455 incval = ice_base_incval(pf); 1456 1457 if (scaled_ppm < 0) { 1458 neg_adj = 1; 1459 scaled_ppm = -scaled_ppm; 1460 } 1461 1462 diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm, 1463 1000000ULL << 16); 1464 if (neg_adj) 1465 incval -= diff; 1466 else 1467 incval += diff; 1468 1469 err = ice_ptp_write_incval_locked(hw, incval); 1470 if (err) { 1471 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1472 err); 1473 return -EIO; 1474 } 1475 1476 return 0; 1477 } 1478 1479 /** 1480 * ice_ptp_extts_work - Workqueue task function 1481 * @work: external timestamp work structure 1482 * 1483 * Service for PTP external clock event 1484 */ 1485 static void ice_ptp_extts_work(struct kthread_work *work) 1486 { 1487 struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work); 1488 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 1489 struct ptp_clock_event event; 1490 struct ice_hw *hw = &pf->hw; 1491 u8 chan, tmr_idx; 1492 u32 hi, lo; 1493 1494 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1495 /* Event time is captured by one of the two matched registers 1496 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1497 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1498 * Event is defined in GLTSYN_EVNT_0 register 1499 */ 1500 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1501 /* Check if channel is enabled */ 1502 if (pf->ptp.ext_ts_irq & (1 << chan)) { 1503 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1504 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1505 event.timestamp = (((u64)hi) << 32) | lo; 1506 event.type = PTP_CLOCK_EXTTS; 1507 event.index = chan; 1508 1509 /* Fire event */ 1510 ptp_clock_event(pf->ptp.clock, &event); 1511 pf->ptp.ext_ts_irq &= ~(1 << chan); 1512 } 1513 } 1514 } 1515 1516 /** 1517 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1518 * @pf: Board private structure 1519 * @ena: true to enable; false to disable 1520 * @chan: GPIO channel (0-3) 1521 * @gpio_pin: GPIO pin 1522 * @extts_flags: request flags from the ptp_extts_request.flags 1523 */ 1524 static int 1525 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, 1526 unsigned int extts_flags) 1527 { 1528 u32 func, aux_reg, gpio_reg, irq_reg; 1529 struct ice_hw *hw = &pf->hw; 1530 u8 tmr_idx; 1531 1532 if (chan > (unsigned int)pf->ptp.info.n_ext_ts) 1533 return -EINVAL; 1534 1535 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1536 1537 irq_reg = rd32(hw, PFINT_OICR_ENA); 1538 1539 if (ena) { 1540 /* Enable the interrupt */ 1541 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1542 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1543 1544 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1545 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1546 1547 /* set event level to requested edge */ 1548 if (extts_flags & PTP_FALLING_EDGE) 1549 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1550 if (extts_flags & PTP_RISING_EDGE) 1551 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1552 1553 /* Write GPIO CTL reg. 1554 * 0x1 is input sampled by EVENT register(channel) 1555 * + num_in_channels * tmr_idx 1556 */ 1557 func = 1 + chan + (tmr_idx * 3); 1558 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & 1559 GLGEN_GPIO_CTL_PIN_FUNC_M); 1560 pf->ptp.ext_ts_chan |= (1 << chan); 1561 } else { 1562 /* clear the values we set to reset defaults */ 1563 aux_reg = 0; 1564 gpio_reg = 0; 1565 pf->ptp.ext_ts_chan &= ~(1 << chan); 1566 if (!pf->ptp.ext_ts_chan) 1567 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1568 } 1569 1570 wr32(hw, PFINT_OICR_ENA, irq_reg); 1571 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1572 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1573 1574 return 0; 1575 } 1576 1577 /** 1578 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave 1579 * @pf: Board private structure 1580 * @chan: GPIO channel (0-3) 1581 * @config: desired periodic clk configuration. NULL will disable channel 1582 * @store: If set to true the values will be stored 1583 * 1584 * Configure the internal clock generator modules to generate the clock wave of 1585 * specified period. 1586 */ 1587 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, 1588 struct ice_perout_channel *config, bool store) 1589 { 1590 u64 current_time, period, start_time, phase; 1591 struct ice_hw *hw = &pf->hw; 1592 u32 func, val, gpio_pin; 1593 u8 tmr_idx; 1594 1595 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1596 1597 /* 0. Reset mode & out_en in AUX_OUT */ 1598 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1599 1600 /* If we're disabling the output, clear out CLKO and TGT and keep 1601 * output level low 1602 */ 1603 if (!config || !config->ena) { 1604 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); 1605 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); 1606 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); 1607 1608 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1609 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; 1610 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1611 1612 /* Store the value if requested */ 1613 if (store) 1614 memset(&pf->ptp.perout_channels[chan], 0, 1615 sizeof(struct ice_perout_channel)); 1616 1617 return 0; 1618 } 1619 period = config->period; 1620 start_time = config->start_time; 1621 div64_u64_rem(start_time, period, &phase); 1622 gpio_pin = config->gpio_pin; 1623 1624 /* 1. Write clkout with half of required period value */ 1625 if (period & 0x1) { 1626 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1627 goto err; 1628 } 1629 1630 period >>= 1; 1631 1632 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick 1633 */ 1634 #define MIN_PULSE 3 1635 if (period <= MIN_PULSE || period > U32_MAX) { 1636 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", 1637 MIN_PULSE * 2); 1638 goto err; 1639 } 1640 1641 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1642 1643 /* Allow time for programming before start_time is hit */ 1644 current_time = ice_ptp_read_src_clk_reg(pf, NULL); 1645 1646 /* if start time is in the past start the timer at the nearest second 1647 * maintaining phase 1648 */ 1649 if (start_time < current_time) 1650 start_time = div64_u64(current_time + NSEC_PER_SEC - 1, 1651 NSEC_PER_SEC) * NSEC_PER_SEC + phase; 1652 1653 if (ice_is_e810(hw)) 1654 start_time -= E810_OUT_PROP_DELAY_NS; 1655 else 1656 start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); 1657 1658 /* 2. Write TARGET time */ 1659 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); 1660 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); 1661 1662 /* 3. Write AUX_OUT register */ 1663 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1664 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1665 1666 /* 4. write GPIO CTL reg */ 1667 func = 8 + chan + (tmr_idx * 4); 1668 val = GLGEN_GPIO_CTL_PIN_DIR_M | 1669 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M); 1670 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1671 1672 /* Store the value if requested */ 1673 if (store) { 1674 memcpy(&pf->ptp.perout_channels[chan], config, 1675 sizeof(struct ice_perout_channel)); 1676 pf->ptp.perout_channels[chan].start_time = phase; 1677 } 1678 1679 return 0; 1680 err: 1681 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); 1682 return -EFAULT; 1683 } 1684 1685 /** 1686 * ice_ptp_disable_all_clkout - Disable all currently configured outputs 1687 * @pf: pointer to the PF structure 1688 * 1689 * Disable all currently configured clock outputs. This is necessary before 1690 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to 1691 * re-enable the clocks again. 1692 */ 1693 static void ice_ptp_disable_all_clkout(struct ice_pf *pf) 1694 { 1695 uint i; 1696 1697 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1698 if (pf->ptp.perout_channels[i].ena) 1699 ice_ptp_cfg_clkout(pf, i, NULL, false); 1700 } 1701 1702 /** 1703 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs 1704 * @pf: pointer to the PF structure 1705 * 1706 * Enable all currently configured clock outputs. Use this after 1707 * ice_ptp_disable_all_clkout to reconfigure the output signals according to 1708 * their configuration. 1709 */ 1710 static void ice_ptp_enable_all_clkout(struct ice_pf *pf) 1711 { 1712 uint i; 1713 1714 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1715 if (pf->ptp.perout_channels[i].ena) 1716 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], 1717 false); 1718 } 1719 1720 /** 1721 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC 1722 * @info: the driver's PTP info structure 1723 * @rq: The requested feature to change 1724 * @on: Enable/disable flag 1725 */ 1726 static int 1727 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, 1728 struct ptp_clock_request *rq, int on) 1729 { 1730 struct ice_pf *pf = ptp_info_to_pf(info); 1731 struct ice_perout_channel clk_cfg = {0}; 1732 bool sma_pres = false; 1733 unsigned int chan; 1734 u32 gpio_pin; 1735 int err; 1736 1737 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 1738 sma_pres = true; 1739 1740 switch (rq->type) { 1741 case PTP_CLK_REQ_PEROUT: 1742 chan = rq->perout.index; 1743 if (sma_pres) { 1744 if (chan == ice_pin_desc_e810t[SMA1].chan) 1745 clk_cfg.gpio_pin = GPIO_20; 1746 else if (chan == ice_pin_desc_e810t[SMA2].chan) 1747 clk_cfg.gpio_pin = GPIO_22; 1748 else 1749 return -1; 1750 } else if (ice_is_e810t(&pf->hw)) { 1751 if (chan == 0) 1752 clk_cfg.gpio_pin = GPIO_20; 1753 else 1754 clk_cfg.gpio_pin = GPIO_22; 1755 } else if (chan == PPS_CLK_GEN_CHAN) { 1756 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1757 } else { 1758 clk_cfg.gpio_pin = chan; 1759 } 1760 1761 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + 1762 rq->perout.period.nsec); 1763 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + 1764 rq->perout.start.nsec); 1765 clk_cfg.ena = !!on; 1766 1767 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); 1768 break; 1769 case PTP_CLK_REQ_EXTTS: 1770 chan = rq->extts.index; 1771 if (sma_pres) { 1772 if (chan < ice_pin_desc_e810t[SMA2].chan) 1773 gpio_pin = GPIO_21; 1774 else 1775 gpio_pin = GPIO_23; 1776 } else if (ice_is_e810t(&pf->hw)) { 1777 if (chan == 0) 1778 gpio_pin = GPIO_21; 1779 else 1780 gpio_pin = GPIO_23; 1781 } else { 1782 gpio_pin = chan; 1783 } 1784 1785 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, 1786 rq->extts.flags); 1787 break; 1788 default: 1789 return -EOPNOTSUPP; 1790 } 1791 1792 return err; 1793 } 1794 1795 /** 1796 * ice_ptp_gettimex64 - Get the time of the clock 1797 * @info: the driver's PTP info structure 1798 * @ts: timespec64 structure to hold the current time value 1799 * @sts: Optional parameter for holding a pair of system timestamps from 1800 * the system clock. Will be ignored if NULL is given. 1801 * 1802 * Read the device clock and return the correct value on ns, after converting it 1803 * into a timespec struct. 1804 */ 1805 static int 1806 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 1807 struct ptp_system_timestamp *sts) 1808 { 1809 struct ice_pf *pf = ptp_info_to_pf(info); 1810 struct ice_hw *hw = &pf->hw; 1811 1812 if (!ice_ptp_lock(hw)) { 1813 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); 1814 return -EBUSY; 1815 } 1816 1817 ice_ptp_read_time(pf, ts, sts); 1818 ice_ptp_unlock(hw); 1819 1820 return 0; 1821 } 1822 1823 /** 1824 * ice_ptp_settime64 - Set the time of the clock 1825 * @info: the driver's PTP info structure 1826 * @ts: timespec64 structure that holds the new time value 1827 * 1828 * Set the device clock to the user input value. The conversion from timespec 1829 * to ns happens in the write function. 1830 */ 1831 static int 1832 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 1833 { 1834 struct ice_pf *pf = ptp_info_to_pf(info); 1835 struct timespec64 ts64 = *ts; 1836 struct ice_hw *hw = &pf->hw; 1837 int err; 1838 1839 /* For Vernier mode, we need to recalibrate after new settime 1840 * Start with disabling timestamp block 1841 */ 1842 if (pf->ptp.port.link_up) 1843 ice_ptp_port_phy_stop(&pf->ptp.port); 1844 1845 if (!ice_ptp_lock(hw)) { 1846 err = -EBUSY; 1847 goto exit; 1848 } 1849 1850 /* Disable periodic outputs */ 1851 ice_ptp_disable_all_clkout(pf); 1852 1853 err = ice_ptp_write_init(pf, &ts64); 1854 ice_ptp_unlock(hw); 1855 1856 if (!err) 1857 ice_ptp_reset_cached_phctime(pf); 1858 1859 /* Reenable periodic outputs */ 1860 ice_ptp_enable_all_clkout(pf); 1861 1862 /* Recalibrate and re-enable timestamp block */ 1863 if (pf->ptp.port.link_up) 1864 ice_ptp_port_phy_restart(&pf->ptp.port); 1865 exit: 1866 if (err) { 1867 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 1868 return err; 1869 } 1870 1871 return 0; 1872 } 1873 1874 /** 1875 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 1876 * @info: the driver's PTP info structure 1877 * @delta: Offset in nanoseconds to adjust the time by 1878 */ 1879 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 1880 { 1881 struct timespec64 now, then; 1882 int ret; 1883 1884 then = ns_to_timespec64(delta); 1885 ret = ice_ptp_gettimex64(info, &now, NULL); 1886 if (ret) 1887 return ret; 1888 now = timespec64_add(now, then); 1889 1890 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 1891 } 1892 1893 /** 1894 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 1895 * @info: the driver's PTP info structure 1896 * @delta: Offset in nanoseconds to adjust the time by 1897 */ 1898 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 1899 { 1900 struct ice_pf *pf = ptp_info_to_pf(info); 1901 struct ice_hw *hw = &pf->hw; 1902 struct device *dev; 1903 int err; 1904 1905 dev = ice_pf_to_dev(pf); 1906 1907 /* Hardware only supports atomic adjustments using signed 32-bit 1908 * integers. For any adjustment outside this range, perform 1909 * a non-atomic get->adjust->set flow. 1910 */ 1911 if (delta > S32_MAX || delta < S32_MIN) { 1912 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 1913 return ice_ptp_adjtime_nonatomic(info, delta); 1914 } 1915 1916 if (!ice_ptp_lock(hw)) { 1917 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 1918 return -EBUSY; 1919 } 1920 1921 /* Disable periodic outputs */ 1922 ice_ptp_disable_all_clkout(pf); 1923 1924 err = ice_ptp_write_adj(pf, delta); 1925 1926 /* Reenable periodic outputs */ 1927 ice_ptp_enable_all_clkout(pf); 1928 1929 ice_ptp_unlock(hw); 1930 1931 if (err) { 1932 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 1933 return err; 1934 } 1935 1936 ice_ptp_reset_cached_phctime(pf); 1937 1938 return 0; 1939 } 1940 1941 #ifdef CONFIG_ICE_HWTS 1942 /** 1943 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 1944 * @device: Current device time 1945 * @system: System counter value read synchronously with device time 1946 * @ctx: Context provided by timekeeping code 1947 * 1948 * Read device and system (ART) clock simultaneously and return the corrected 1949 * clock values in ns. 1950 */ 1951 static int 1952 ice_ptp_get_syncdevicetime(ktime_t *device, 1953 struct system_counterval_t *system, 1954 void *ctx) 1955 { 1956 struct ice_pf *pf = (struct ice_pf *)ctx; 1957 struct ice_hw *hw = &pf->hw; 1958 u32 hh_lock, hh_art_ctl; 1959 int i; 1960 1961 /* Get the HW lock */ 1962 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1963 if (hh_lock & PFHH_SEM_BUSY_M) { 1964 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 1965 return -EFAULT; 1966 } 1967 1968 /* Start the ART and device clock sync sequence */ 1969 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1970 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 1971 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 1972 1973 #define MAX_HH_LOCK_TRIES 100 1974 1975 for (i = 0; i < MAX_HH_LOCK_TRIES; i++) { 1976 /* Wait for sync to complete */ 1977 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1978 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 1979 udelay(1); 1980 continue; 1981 } else { 1982 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 1983 u64 hh_ts; 1984 1985 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 1986 /* Read ART time */ 1987 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 1988 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 1989 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1990 *system = convert_art_ns_to_tsc(hh_ts); 1991 /* Read Device source clock time */ 1992 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 1993 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 1994 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1995 *device = ns_to_ktime(hh_ts); 1996 break; 1997 } 1998 } 1999 /* Release HW lock */ 2000 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2001 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 2002 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 2003 2004 if (i == MAX_HH_LOCK_TRIES) 2005 return -ETIMEDOUT; 2006 2007 return 0; 2008 } 2009 2010 /** 2011 * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp 2012 * @info: the driver's PTP info structure 2013 * @cts: The memory to fill the cross timestamp info 2014 * 2015 * Capture a cross timestamp between the ART and the device PTP hardware 2016 * clock. Fill the cross timestamp information and report it back to the 2017 * caller. 2018 * 2019 * This is only valid for E822 devices which have support for generating the 2020 * cross timestamp via PCIe PTM. 2021 * 2022 * In order to correctly correlate the ART timestamp back to the TSC time, the 2023 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2024 */ 2025 static int 2026 ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info, 2027 struct system_device_crosststamp *cts) 2028 { 2029 struct ice_pf *pf = ptp_info_to_pf(info); 2030 2031 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2032 pf, NULL, cts); 2033 } 2034 #endif /* CONFIG_ICE_HWTS */ 2035 2036 /** 2037 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2038 * @pf: Board private structure 2039 * @ifr: ioctl data 2040 * 2041 * Copy the timestamping config to user buffer 2042 */ 2043 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2044 { 2045 struct hwtstamp_config *config; 2046 2047 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2048 return -EIO; 2049 2050 config = &pf->ptp.tstamp_config; 2051 2052 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2053 -EFAULT : 0; 2054 } 2055 2056 /** 2057 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2058 * @pf: Board private structure 2059 * @config: hwtstamp settings requested or saved 2060 */ 2061 static int 2062 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2063 { 2064 switch (config->tx_type) { 2065 case HWTSTAMP_TX_OFF: 2066 ice_set_tx_tstamp(pf, false); 2067 break; 2068 case HWTSTAMP_TX_ON: 2069 ice_set_tx_tstamp(pf, true); 2070 break; 2071 default: 2072 return -ERANGE; 2073 } 2074 2075 switch (config->rx_filter) { 2076 case HWTSTAMP_FILTER_NONE: 2077 ice_set_rx_tstamp(pf, false); 2078 break; 2079 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2080 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2081 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2082 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2083 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2084 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2085 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2086 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2087 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2088 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2089 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2090 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2091 case HWTSTAMP_FILTER_NTP_ALL: 2092 case HWTSTAMP_FILTER_ALL: 2093 ice_set_rx_tstamp(pf, true); 2094 break; 2095 default: 2096 return -ERANGE; 2097 } 2098 2099 return 0; 2100 } 2101 2102 /** 2103 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2104 * @pf: Board private structure 2105 * @ifr: ioctl data 2106 * 2107 * Get the user config and store it 2108 */ 2109 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2110 { 2111 struct hwtstamp_config config; 2112 int err; 2113 2114 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2115 return -EAGAIN; 2116 2117 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2118 return -EFAULT; 2119 2120 err = ice_ptp_set_timestamp_mode(pf, &config); 2121 if (err) 2122 return err; 2123 2124 /* Return the actual configuration set */ 2125 config = pf->ptp.tstamp_config; 2126 2127 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2128 -EFAULT : 0; 2129 } 2130 2131 /** 2132 * ice_ptp_rx_hwtstamp - Check for an Rx timestamp 2133 * @rx_ring: Ring to get the VSI info 2134 * @rx_desc: Receive descriptor 2135 * @skb: Particular skb to send timestamp with 2136 * 2137 * The driver receives a notification in the receive descriptor with timestamp. 2138 * The timestamp is in ns, so we must convert the result first. 2139 */ 2140 void 2141 ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, 2142 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) 2143 { 2144 struct skb_shared_hwtstamps *hwtstamps; 2145 u64 ts_ns, cached_time; 2146 u32 ts_high; 2147 2148 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2149 return; 2150 2151 cached_time = READ_ONCE(rx_ring->cached_phctime); 2152 2153 /* Do not report a timestamp if we don't have a cached PHC time */ 2154 if (!cached_time) 2155 return; 2156 2157 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2158 * PHC value, rather than accessing the PF. This also allows us to 2159 * simply pass the upper 32bits of nanoseconds directly. Calling 2160 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2161 * bits itself. 2162 */ 2163 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2164 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2165 2166 hwtstamps = skb_hwtstamps(skb); 2167 memset(hwtstamps, 0, sizeof(*hwtstamps)); 2168 hwtstamps->hwtstamp = ns_to_ktime(ts_ns); 2169 } 2170 2171 /** 2172 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins 2173 * @pf: pointer to the PF structure 2174 * @info: PTP clock info structure 2175 * 2176 * Disable the OS access to the SMA pins. Called to clear out the OS 2177 * indications of pin support when we fail to setup the E810-T SMA control 2178 * register. 2179 */ 2180 static void 2181 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2182 { 2183 struct device *dev = ice_pf_to_dev(pf); 2184 2185 dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); 2186 2187 info->enable = NULL; 2188 info->verify = NULL; 2189 info->n_pins = 0; 2190 info->n_ext_ts = 0; 2191 info->n_per_out = 0; 2192 } 2193 2194 /** 2195 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins 2196 * @pf: pointer to the PF structure 2197 * @info: PTP clock info structure 2198 * 2199 * Finish setting up the SMA pins by allocating pin_config, and setting it up 2200 * according to the current status of the SMA. On failure, disable all of the 2201 * extended SMA pin support. 2202 */ 2203 static void 2204 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2205 { 2206 struct device *dev = ice_pf_to_dev(pf); 2207 int err; 2208 2209 /* Allocate memory for kernel pins interface */ 2210 info->pin_config = devm_kcalloc(dev, info->n_pins, 2211 sizeof(*info->pin_config), GFP_KERNEL); 2212 if (!info->pin_config) { 2213 ice_ptp_disable_sma_pins_e810t(pf, info); 2214 return; 2215 } 2216 2217 /* Read current SMA status */ 2218 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); 2219 if (err) 2220 ice_ptp_disable_sma_pins_e810t(pf, info); 2221 } 2222 2223 /** 2224 * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs 2225 * @pf: pointer to the PF instance 2226 * @info: PTP clock capabilities 2227 */ 2228 static void 2229 ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2230 { 2231 /* Check if SMA controller is in the netlist */ 2232 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) && 2233 !ice_is_pca9575_present(&pf->hw)) 2234 ice_clear_feature_support(pf, ICE_F_SMA_CTRL); 2235 2236 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2237 info->n_ext_ts = N_EXT_TS_E810_NO_SMA; 2238 info->n_per_out = N_PER_OUT_E810T_NO_SMA; 2239 return; 2240 } 2241 2242 info->n_per_out = N_PER_OUT_E810T; 2243 2244 if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) { 2245 info->n_ext_ts = N_EXT_TS_E810; 2246 info->n_pins = NUM_PTP_PINS_E810T; 2247 info->verify = ice_verify_pin_e810t; 2248 } 2249 2250 /* Complete setup of the SMA pins */ 2251 ice_ptp_setup_sma_pins_e810t(pf, info); 2252 } 2253 2254 /** 2255 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs 2256 * @pf: pointer to the PF instance 2257 * @info: PTP clock capabilities 2258 */ 2259 static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2260 { 2261 info->n_per_out = N_PER_OUT_E810; 2262 2263 if (!ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) 2264 return; 2265 2266 info->n_ext_ts = N_EXT_TS_E810; 2267 } 2268 2269 /** 2270 * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support 2271 * @pf: Board private structure 2272 * @info: PTP info to fill 2273 * 2274 * Assign functions to the PTP capabiltiies structure for E822 devices. 2275 * Functions which operate across all device families should be set directly 2276 * in ice_ptp_set_caps. Only add functions here which are distinct for E822 2277 * devices. 2278 */ 2279 static void 2280 ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info) 2281 { 2282 #ifdef CONFIG_ICE_HWTS 2283 if (boot_cpu_has(X86_FEATURE_ART) && 2284 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2285 info->getcrosststamp = ice_ptp_getcrosststamp_e822; 2286 #endif /* CONFIG_ICE_HWTS */ 2287 } 2288 2289 /** 2290 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2291 * @pf: Board private structure 2292 * @info: PTP info to fill 2293 * 2294 * Assign functions to the PTP capabiltiies structure for E810 devices. 2295 * Functions which operate across all device families should be set directly 2296 * in ice_ptp_set_caps. Only add functions here which are distinct for e810 2297 * devices. 2298 */ 2299 static void 2300 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2301 { 2302 info->enable = ice_ptp_gpio_enable_e810; 2303 2304 if (ice_is_e810t(&pf->hw)) 2305 ice_ptp_setup_pins_e810t(pf, info); 2306 else 2307 ice_ptp_setup_pins_e810(pf, info); 2308 } 2309 2310 /** 2311 * ice_ptp_set_caps - Set PTP capabilities 2312 * @pf: Board private structure 2313 */ 2314 static void ice_ptp_set_caps(struct ice_pf *pf) 2315 { 2316 struct ptp_clock_info *info = &pf->ptp.info; 2317 struct device *dev = ice_pf_to_dev(pf); 2318 2319 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2320 dev_driver_string(dev), dev_name(dev)); 2321 info->owner = THIS_MODULE; 2322 info->max_adj = 999999999; 2323 info->adjtime = ice_ptp_adjtime; 2324 info->adjfine = ice_ptp_adjfine; 2325 info->gettimex64 = ice_ptp_gettimex64; 2326 info->settime64 = ice_ptp_settime64; 2327 2328 if (ice_is_e810(&pf->hw)) 2329 ice_ptp_set_funcs_e810(pf, info); 2330 else 2331 ice_ptp_set_funcs_e822(pf, info); 2332 } 2333 2334 /** 2335 * ice_ptp_create_clock - Create PTP clock device for userspace 2336 * @pf: Board private structure 2337 * 2338 * This function creates a new PTP clock device. It only creates one if we 2339 * don't already have one. Will return error if it can't create one, but success 2340 * if we already have a device. Should be used by ice_ptp_init to create clock 2341 * initially, and prevent global resets from creating new clock devices. 2342 */ 2343 static long ice_ptp_create_clock(struct ice_pf *pf) 2344 { 2345 struct ptp_clock_info *info; 2346 struct ptp_clock *clock; 2347 struct device *dev; 2348 2349 /* No need to create a clock device if we already have one */ 2350 if (pf->ptp.clock) 2351 return 0; 2352 2353 ice_ptp_set_caps(pf); 2354 2355 info = &pf->ptp.info; 2356 dev = ice_pf_to_dev(pf); 2357 2358 /* Attempt to register the clock before enabling the hardware. */ 2359 clock = ptp_clock_register(info, dev); 2360 if (IS_ERR(clock)) 2361 return PTR_ERR(clock); 2362 2363 pf->ptp.clock = clock; 2364 2365 return 0; 2366 } 2367 2368 /** 2369 * ice_ptp_request_ts - Request an available Tx timestamp index 2370 * @tx: the PTP Tx timestamp tracker to request from 2371 * @skb: the SKB to associate with this timestamp request 2372 */ 2373 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2374 { 2375 u8 idx; 2376 2377 /* Check if this tracker is initialized */ 2378 if (!tx->init || tx->calibrating) 2379 return -1; 2380 2381 spin_lock(&tx->lock); 2382 /* Find and set the first available index */ 2383 idx = find_first_zero_bit(tx->in_use, tx->len); 2384 if (idx < tx->len) { 2385 /* We got a valid index that no other thread could have set. Store 2386 * a reference to the skb and the start time to allow discarding old 2387 * requests. 2388 */ 2389 set_bit(idx, tx->in_use); 2390 tx->tstamps[idx].start = jiffies; 2391 tx->tstamps[idx].skb = skb_get(skb); 2392 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2393 ice_trace(tx_tstamp_request, skb, idx); 2394 } 2395 2396 spin_unlock(&tx->lock); 2397 2398 /* return the appropriate PHY timestamp register index, -1 if no 2399 * indexes were available. 2400 */ 2401 if (idx >= tx->len) 2402 return -1; 2403 else 2404 return idx + tx->quad_offset; 2405 } 2406 2407 /** 2408 * ice_ptp_process_ts - Spawn kthread work to handle timestamps 2409 * @pf: Board private structure 2410 * 2411 * Queue work required to process the PTP Tx timestamps outside of interrupt 2412 * context. 2413 */ 2414 void ice_ptp_process_ts(struct ice_pf *pf) 2415 { 2416 if (pf->ptp.port.tx.init) 2417 kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work); 2418 } 2419 2420 static void ice_ptp_periodic_work(struct kthread_work *work) 2421 { 2422 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2423 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2424 int err; 2425 2426 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2427 return; 2428 2429 err = ice_ptp_update_cached_phctime(pf); 2430 2431 ice_ptp_tx_tstamp_cleanup(pf, &pf->ptp.port.tx); 2432 2433 /* Run twice a second or reschedule if phc update failed */ 2434 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2435 msecs_to_jiffies(err ? 10 : 500)); 2436 } 2437 2438 /** 2439 * ice_ptp_reset - Initialize PTP hardware clock support after reset 2440 * @pf: Board private structure 2441 */ 2442 void ice_ptp_reset(struct ice_pf *pf) 2443 { 2444 struct ice_ptp *ptp = &pf->ptp; 2445 struct ice_hw *hw = &pf->hw; 2446 struct timespec64 ts; 2447 int err, itr = 1; 2448 u64 time_diff; 2449 2450 if (test_bit(ICE_PFR_REQ, pf->state)) 2451 goto pfr; 2452 2453 if (!hw->func_caps.ts_func_info.src_tmr_owned) 2454 goto reset_ts; 2455 2456 err = ice_ptp_init_phc(hw); 2457 if (err) 2458 goto err; 2459 2460 /* Acquire the global hardware lock */ 2461 if (!ice_ptp_lock(hw)) { 2462 err = -EBUSY; 2463 goto err; 2464 } 2465 2466 /* Write the increment time value to PHY and LAN */ 2467 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2468 if (err) { 2469 ice_ptp_unlock(hw); 2470 goto err; 2471 } 2472 2473 /* Write the initial Time value to PHY and LAN using the cached PHC 2474 * time before the reset and time difference between stopping and 2475 * starting the clock. 2476 */ 2477 if (ptp->cached_phc_time) { 2478 time_diff = ktime_get_real_ns() - ptp->reset_time; 2479 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2480 } else { 2481 ts = ktime_to_timespec64(ktime_get_real()); 2482 } 2483 err = ice_ptp_write_init(pf, &ts); 2484 if (err) { 2485 ice_ptp_unlock(hw); 2486 goto err; 2487 } 2488 2489 /* Release the global hardware lock */ 2490 ice_ptp_unlock(hw); 2491 2492 if (!ice_is_e810(hw)) { 2493 /* Enable quad interrupts */ 2494 err = ice_ptp_tx_ena_intr(pf, true, itr); 2495 if (err) 2496 goto err; 2497 } 2498 2499 reset_ts: 2500 /* Restart the PHY timestamping block */ 2501 ice_ptp_reset_phy_timestamping(pf); 2502 2503 pfr: 2504 /* Init Tx structures */ 2505 if (ice_is_e810(&pf->hw)) { 2506 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); 2507 } else { 2508 kthread_init_delayed_work(&ptp->port.ov_work, 2509 ice_ptp_wait_for_offset_valid); 2510 err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, 2511 ptp->port.port_num); 2512 } 2513 if (err) 2514 goto err; 2515 2516 set_bit(ICE_FLAG_PTP, pf->flags); 2517 2518 /* Start periodic work going */ 2519 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2520 2521 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2522 return; 2523 2524 err: 2525 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2526 } 2527 2528 /** 2529 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2530 * @pf: Board private structure 2531 */ 2532 void ice_ptp_prepare_for_reset(struct ice_pf *pf) 2533 { 2534 struct ice_ptp *ptp = &pf->ptp; 2535 u8 src_tmr; 2536 2537 clear_bit(ICE_FLAG_PTP, pf->flags); 2538 2539 /* Disable timestamping for both Tx and Rx */ 2540 ice_ptp_cfg_timestamp(pf, false); 2541 2542 kthread_cancel_delayed_work_sync(&ptp->work); 2543 kthread_cancel_work_sync(&ptp->extts_work); 2544 2545 if (test_bit(ICE_PFR_REQ, pf->state)) 2546 return; 2547 2548 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2549 2550 /* Disable periodic outputs */ 2551 ice_ptp_disable_all_clkout(pf); 2552 2553 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2554 2555 /* Disable source clock */ 2556 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2557 2558 /* Acquire PHC and system timer to restore after reset */ 2559 ptp->reset_time = ktime_get_real_ns(); 2560 } 2561 2562 /** 2563 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 2564 * @pf: Board private structure 2565 * 2566 * Setup and initialize a PTP clock device that represents the device hardware 2567 * clock. Save the clock index for other functions connected to the same 2568 * hardware resource. 2569 */ 2570 static int ice_ptp_init_owner(struct ice_pf *pf) 2571 { 2572 struct ice_hw *hw = &pf->hw; 2573 struct timespec64 ts; 2574 int err, itr = 1; 2575 2576 err = ice_ptp_init_phc(hw); 2577 if (err) { 2578 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 2579 err); 2580 return err; 2581 } 2582 2583 /* Acquire the global hardware lock */ 2584 if (!ice_ptp_lock(hw)) { 2585 err = -EBUSY; 2586 goto err_exit; 2587 } 2588 2589 /* Write the increment time value to PHY and LAN */ 2590 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2591 if (err) { 2592 ice_ptp_unlock(hw); 2593 goto err_exit; 2594 } 2595 2596 ts = ktime_to_timespec64(ktime_get_real()); 2597 /* Write the initial Time value to PHY and LAN */ 2598 err = ice_ptp_write_init(pf, &ts); 2599 if (err) { 2600 ice_ptp_unlock(hw); 2601 goto err_exit; 2602 } 2603 2604 /* Release the global hardware lock */ 2605 ice_ptp_unlock(hw); 2606 2607 if (!ice_is_e810(hw)) { 2608 /* Enable quad interrupts */ 2609 err = ice_ptp_tx_ena_intr(pf, true, itr); 2610 if (err) 2611 goto err_exit; 2612 } 2613 2614 /* Ensure we have a clock device */ 2615 err = ice_ptp_create_clock(pf); 2616 if (err) 2617 goto err_clk; 2618 2619 /* Store the PTP clock index for other PFs */ 2620 ice_set_ptp_clock_index(pf); 2621 2622 return 0; 2623 2624 err_clk: 2625 pf->ptp.clock = NULL; 2626 err_exit: 2627 return err; 2628 } 2629 2630 /** 2631 * ice_ptp_init_work - Initialize PTP work threads 2632 * @pf: Board private structure 2633 * @ptp: PF PTP structure 2634 */ 2635 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 2636 { 2637 struct kthread_worker *kworker; 2638 2639 /* Initialize work functions */ 2640 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 2641 kthread_init_work(&ptp->extts_work, ice_ptp_extts_work); 2642 2643 /* Allocate a kworker for handling work required for the ports 2644 * connected to the PTP hardware clock. 2645 */ 2646 kworker = kthread_create_worker(0, "ice-ptp-%s", 2647 dev_name(ice_pf_to_dev(pf))); 2648 if (IS_ERR(kworker)) 2649 return PTR_ERR(kworker); 2650 2651 ptp->kworker = kworker; 2652 2653 /* Start periodic work going */ 2654 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2655 2656 return 0; 2657 } 2658 2659 /** 2660 * ice_ptp_init_port - Initialize PTP port structure 2661 * @pf: Board private structure 2662 * @ptp_port: PTP port structure 2663 */ 2664 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 2665 { 2666 mutex_init(&ptp_port->ps_lock); 2667 2668 if (ice_is_e810(&pf->hw)) 2669 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 2670 2671 kthread_init_delayed_work(&ptp_port->ov_work, 2672 ice_ptp_wait_for_offset_valid); 2673 return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num); 2674 } 2675 2676 /** 2677 * ice_ptp_init - Initialize PTP hardware clock support 2678 * @pf: Board private structure 2679 * 2680 * Set up the device for interacting with the PTP hardware clock for all 2681 * functions, both the function that owns the clock hardware, and the 2682 * functions connected to the clock hardware. 2683 * 2684 * The clock owner will allocate and register a ptp_clock with the 2685 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 2686 * items used for asynchronous work such as Tx timestamps and periodic work. 2687 */ 2688 void ice_ptp_init(struct ice_pf *pf) 2689 { 2690 struct ice_ptp *ptp = &pf->ptp; 2691 struct ice_hw *hw = &pf->hw; 2692 int err; 2693 2694 /* If this function owns the clock hardware, it must allocate and 2695 * configure the PTP clock device to represent it. 2696 */ 2697 if (hw->func_caps.ts_func_info.src_tmr_owned) { 2698 err = ice_ptp_init_owner(pf); 2699 if (err) 2700 goto err; 2701 } 2702 2703 ptp->port.port_num = hw->pf_id; 2704 err = ice_ptp_init_port(pf, &ptp->port); 2705 if (err) 2706 goto err; 2707 2708 /* Start the PHY timestamping block */ 2709 ice_ptp_reset_phy_timestamping(pf); 2710 2711 set_bit(ICE_FLAG_PTP, pf->flags); 2712 err = ice_ptp_init_work(pf, ptp); 2713 if (err) 2714 goto err; 2715 2716 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 2717 return; 2718 2719 err: 2720 /* If we registered a PTP clock, release it */ 2721 if (pf->ptp.clock) { 2722 ptp_clock_unregister(ptp->clock); 2723 pf->ptp.clock = NULL; 2724 } 2725 clear_bit(ICE_FLAG_PTP, pf->flags); 2726 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 2727 } 2728 2729 /** 2730 * ice_ptp_release - Disable the driver/HW support and unregister the clock 2731 * @pf: Board private structure 2732 * 2733 * This function handles the cleanup work required from the initialization by 2734 * clearing out the important information and unregistering the clock 2735 */ 2736 void ice_ptp_release(struct ice_pf *pf) 2737 { 2738 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2739 return; 2740 2741 /* Disable timestamping for both Tx and Rx */ 2742 ice_ptp_cfg_timestamp(pf, false); 2743 2744 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2745 2746 clear_bit(ICE_FLAG_PTP, pf->flags); 2747 2748 kthread_cancel_delayed_work_sync(&pf->ptp.work); 2749 2750 ice_ptp_port_phy_stop(&pf->ptp.port); 2751 mutex_destroy(&pf->ptp.port.ps_lock); 2752 if (pf->ptp.kworker) { 2753 kthread_destroy_worker(pf->ptp.kworker); 2754 pf->ptp.kworker = NULL; 2755 } 2756 2757 if (!pf->ptp.clock) 2758 return; 2759 2760 /* Disable periodic outputs */ 2761 ice_ptp_disable_all_clkout(pf); 2762 2763 ice_clear_ptp_clock_index(pf); 2764 ptp_clock_unregister(pf->ptp.clock); 2765 pf->ptp.clock = NULL; 2766 2767 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 2768 } 2769