1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 8 #define E810_OUT_PROP_DELAY_NS 1 9 10 #define UNKNOWN_INCVAL_E822 0x100000000ULL 11 12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = { 13 /* name idx func chan */ 14 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, 15 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, 16 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, 17 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, 18 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, 19 }; 20 21 /** 22 * ice_get_sma_config_e810t 23 * @hw: pointer to the hw struct 24 * @ptp_pins: pointer to the ptp_pin_desc struture 25 * 26 * Read the configuration of the SMA control logic and put it into the 27 * ptp_pin_desc structure 28 */ 29 static int 30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) 31 { 32 u8 data, i; 33 int status; 34 35 /* Read initial pin state */ 36 status = ice_read_sma_ctrl_e810t(hw, &data); 37 if (status) 38 return status; 39 40 /* initialize with defaults */ 41 for (i = 0; i < NUM_PTP_PINS_E810T; i++) { 42 strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name, 43 sizeof(ptp_pins[i].name)); 44 ptp_pins[i].index = ice_pin_desc_e810t[i].index; 45 ptp_pins[i].func = ice_pin_desc_e810t[i].func; 46 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; 47 } 48 49 /* Parse SMA1/UFL1 */ 50 switch (data & ICE_SMA1_MASK_E810T) { 51 case ICE_SMA1_MASK_E810T: 52 default: 53 ptp_pins[SMA1].func = PTP_PF_NONE; 54 ptp_pins[UFL1].func = PTP_PF_NONE; 55 break; 56 case ICE_SMA1_DIR_EN_E810T: 57 ptp_pins[SMA1].func = PTP_PF_PEROUT; 58 ptp_pins[UFL1].func = PTP_PF_NONE; 59 break; 60 case ICE_SMA1_TX_EN_E810T: 61 ptp_pins[SMA1].func = PTP_PF_EXTTS; 62 ptp_pins[UFL1].func = PTP_PF_NONE; 63 break; 64 case 0: 65 ptp_pins[SMA1].func = PTP_PF_EXTTS; 66 ptp_pins[UFL1].func = PTP_PF_PEROUT; 67 break; 68 } 69 70 /* Parse SMA2/UFL2 */ 71 switch (data & ICE_SMA2_MASK_E810T) { 72 case ICE_SMA2_MASK_E810T: 73 default: 74 ptp_pins[SMA2].func = PTP_PF_NONE; 75 ptp_pins[UFL2].func = PTP_PF_NONE; 76 break; 77 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 78 ptp_pins[SMA2].func = PTP_PF_EXTTS; 79 ptp_pins[UFL2].func = PTP_PF_NONE; 80 break; 81 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 82 ptp_pins[SMA2].func = PTP_PF_PEROUT; 83 ptp_pins[UFL2].func = PTP_PF_NONE; 84 break; 85 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): 86 ptp_pins[SMA2].func = PTP_PF_NONE; 87 ptp_pins[UFL2].func = PTP_PF_EXTTS; 88 break; 89 case ICE_SMA2_DIR_EN_E810T: 90 ptp_pins[SMA2].func = PTP_PF_PEROUT; 91 ptp_pins[UFL2].func = PTP_PF_EXTTS; 92 break; 93 } 94 95 return 0; 96 } 97 98 /** 99 * ice_ptp_set_sma_config_e810t 100 * @hw: pointer to the hw struct 101 * @ptp_pins: pointer to the ptp_pin_desc struture 102 * 103 * Set the configuration of the SMA control logic based on the configuration in 104 * num_pins parameter 105 */ 106 static int 107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw, 108 const struct ptp_pin_desc *ptp_pins) 109 { 110 int status; 111 u8 data; 112 113 /* SMA1 and UFL1 cannot be set to TX at the same time */ 114 if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 115 ptp_pins[UFL1].func == PTP_PF_PEROUT) 116 return -EINVAL; 117 118 /* SMA2 and UFL2 cannot be set to RX at the same time */ 119 if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 120 ptp_pins[UFL2].func == PTP_PF_EXTTS) 121 return -EINVAL; 122 123 /* Read initial pin state value */ 124 status = ice_read_sma_ctrl_e810t(hw, &data); 125 if (status) 126 return status; 127 128 /* Set the right sate based on the desired configuration */ 129 data &= ~ICE_SMA1_MASK_E810T; 130 if (ptp_pins[SMA1].func == PTP_PF_NONE && 131 ptp_pins[UFL1].func == PTP_PF_NONE) { 132 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); 133 data |= ICE_SMA1_MASK_E810T; 134 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 135 ptp_pins[UFL1].func == PTP_PF_NONE) { 136 dev_info(ice_hw_to_dev(hw), "SMA1 RX"); 137 data |= ICE_SMA1_TX_EN_E810T; 138 } else if (ptp_pins[SMA1].func == PTP_PF_NONE && 139 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 140 /* U.FL 1 TX will always enable SMA 1 RX */ 141 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 142 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 143 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 144 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 145 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 146 ptp_pins[UFL1].func == PTP_PF_NONE) { 147 dev_info(ice_hw_to_dev(hw), "SMA1 TX"); 148 data |= ICE_SMA1_DIR_EN_E810T; 149 } 150 151 data &= ~ICE_SMA2_MASK_E810T; 152 if (ptp_pins[SMA2].func == PTP_PF_NONE && 153 ptp_pins[UFL2].func == PTP_PF_NONE) { 154 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); 155 data |= ICE_SMA2_MASK_E810T; 156 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 157 ptp_pins[UFL2].func == PTP_PF_NONE) { 158 dev_info(ice_hw_to_dev(hw), "SMA2 RX"); 159 data |= (ICE_SMA2_TX_EN_E810T | 160 ICE_SMA2_UFL2_RX_DIS_E810T); 161 } else if (ptp_pins[SMA2].func == PTP_PF_NONE && 162 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 163 dev_info(ice_hw_to_dev(hw), "UFL2 RX"); 164 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); 165 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 166 ptp_pins[UFL2].func == PTP_PF_NONE) { 167 dev_info(ice_hw_to_dev(hw), "SMA2 TX"); 168 data |= (ICE_SMA2_DIR_EN_E810T | 169 ICE_SMA2_UFL2_RX_DIS_E810T); 170 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 171 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 172 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); 173 data |= ICE_SMA2_DIR_EN_E810T; 174 } 175 176 return ice_write_sma_ctrl_e810t(hw, data); 177 } 178 179 /** 180 * ice_ptp_set_sma_e810t 181 * @info: the driver's PTP info structure 182 * @pin: pin index in kernel structure 183 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) 184 * 185 * Set the configuration of a single SMA pin 186 */ 187 static int 188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, 189 enum ptp_pin_function func) 190 { 191 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; 192 struct ice_pf *pf = ptp_info_to_pf(info); 193 struct ice_hw *hw = &pf->hw; 194 int err; 195 196 if (pin < SMA1 || func > PTP_PF_PEROUT) 197 return -EOPNOTSUPP; 198 199 err = ice_get_sma_config_e810t(hw, ptp_pins); 200 if (err) 201 return err; 202 203 /* Disable the same function on the other pin sharing the channel */ 204 if (pin == SMA1 && ptp_pins[UFL1].func == func) 205 ptp_pins[UFL1].func = PTP_PF_NONE; 206 if (pin == UFL1 && ptp_pins[SMA1].func == func) 207 ptp_pins[SMA1].func = PTP_PF_NONE; 208 209 if (pin == SMA2 && ptp_pins[UFL2].func == func) 210 ptp_pins[UFL2].func = PTP_PF_NONE; 211 if (pin == UFL2 && ptp_pins[SMA2].func == func) 212 ptp_pins[SMA2].func = PTP_PF_NONE; 213 214 /* Set up new pin function in the temp table */ 215 ptp_pins[pin].func = func; 216 217 return ice_ptp_set_sma_config_e810t(hw, ptp_pins); 218 } 219 220 /** 221 * ice_verify_pin_e810t 222 * @info: the driver's PTP info structure 223 * @pin: Pin index 224 * @func: Assigned function 225 * @chan: Assigned channel 226 * 227 * Verify if pin supports requested pin function. If the Check pins consistency. 228 * Reconfigure the SMA logic attached to the given pin to enable its 229 * desired functionality 230 */ 231 static int 232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, 233 enum ptp_pin_function func, unsigned int chan) 234 { 235 /* Don't allow channel reassignment */ 236 if (chan != ice_pin_desc_e810t[pin].chan) 237 return -EOPNOTSUPP; 238 239 /* Check if functions are properly assigned */ 240 switch (func) { 241 case PTP_PF_NONE: 242 break; 243 case PTP_PF_EXTTS: 244 if (pin == UFL1) 245 return -EOPNOTSUPP; 246 break; 247 case PTP_PF_PEROUT: 248 if (pin == UFL2 || pin == GNSS) 249 return -EOPNOTSUPP; 250 break; 251 case PTP_PF_PHYSYNC: 252 return -EOPNOTSUPP; 253 } 254 255 return ice_ptp_set_sma_e810t(info, pin, func); 256 } 257 258 /** 259 * ice_ptp_configure_tx_tstamp - Enable or disable Tx timestamp interrupt 260 * @pf: The PF pointer to search in 261 * @on: bool value for whether timestamp interrupt is enabled or disabled 262 */ 263 static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on) 264 { 265 u32 val; 266 267 /* Configure the Tx timestamp interrupt */ 268 val = rd32(&pf->hw, PFINT_OICR_ENA); 269 if (on) 270 val |= PFINT_OICR_TSYN_TX_M; 271 else 272 val &= ~PFINT_OICR_TSYN_TX_M; 273 wr32(&pf->hw, PFINT_OICR_ENA, val); 274 } 275 276 /** 277 * ice_set_tx_tstamp - Enable or disable Tx timestamping 278 * @pf: The PF pointer to search in 279 * @on: bool value for whether timestamps are enabled or disabled 280 */ 281 static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) 282 { 283 struct ice_vsi *vsi; 284 u16 i; 285 286 vsi = ice_get_main_vsi(pf); 287 if (!vsi) 288 return; 289 290 /* Set the timestamp enable flag for all the Tx rings */ 291 ice_for_each_txq(vsi, i) { 292 if (!vsi->tx_rings[i]) 293 continue; 294 vsi->tx_rings[i]->ptp_tx = on; 295 } 296 297 if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF) 298 ice_ptp_configure_tx_tstamp(pf, on); 299 300 pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 301 } 302 303 /** 304 * ice_set_rx_tstamp - Enable or disable Rx timestamping 305 * @pf: The PF pointer to search in 306 * @on: bool value for whether timestamps are enabled or disabled 307 */ 308 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 309 { 310 struct ice_vsi *vsi; 311 u16 i; 312 313 vsi = ice_get_main_vsi(pf); 314 if (!vsi) 315 return; 316 317 /* Set the timestamp flag for all the Rx rings */ 318 ice_for_each_rxq(vsi, i) { 319 if (!vsi->rx_rings[i]) 320 continue; 321 vsi->rx_rings[i]->ptp_rx = on; 322 } 323 324 pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : 325 HWTSTAMP_FILTER_NONE; 326 } 327 328 /** 329 * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit 330 * @pf: Board private structure 331 * @ena: bool value to enable or disable time stamp 332 * 333 * This function will configure timestamping during PTP initialization 334 * and deinitialization 335 */ 336 void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) 337 { 338 ice_set_tx_tstamp(pf, ena); 339 ice_set_rx_tstamp(pf, ena); 340 } 341 342 /** 343 * ice_ptp_read_src_clk_reg - Read the source clock register 344 * @pf: Board private structure 345 * @sts: Optional parameter for holding a pair of system timestamps from 346 * the system clock. Will be ignored if NULL is given. 347 */ 348 static u64 349 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 350 { 351 struct ice_hw *hw = &pf->hw; 352 u32 hi, lo, lo2; 353 u8 tmr_idx; 354 355 tmr_idx = ice_get_ptp_src_clock_index(hw); 356 /* Read the system timestamp pre PHC read */ 357 ptp_read_system_prets(sts); 358 359 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 360 361 /* Read the system timestamp post PHC read */ 362 ptp_read_system_postts(sts); 363 364 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 365 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 366 367 if (lo2 < lo) { 368 /* if TIME_L rolled over read TIME_L again and update 369 * system timestamps 370 */ 371 ptp_read_system_prets(sts); 372 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 373 ptp_read_system_postts(sts); 374 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 375 } 376 377 return ((u64)hi << 32) | lo; 378 } 379 380 /** 381 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 382 * @cached_phc_time: recently cached copy of PHC time 383 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 384 * 385 * Hardware captures timestamps which contain only 32 bits of nominal 386 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 387 * Note that the captured timestamp values may be 40 bits, but the lower 388 * 8 bits are sub-nanoseconds and generally discarded. 389 * 390 * Extend the 32bit nanosecond timestamp using the following algorithm and 391 * assumptions: 392 * 393 * 1) have a recently cached copy of the PHC time 394 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 395 * seconds) before or after the PHC time was captured. 396 * 3) calculate the delta between the cached time and the timestamp 397 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 398 * captured after the PHC time. In this case, the full timestamp is just 399 * the cached PHC time plus the delta. 400 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 401 * timestamp was captured *before* the PHC time, i.e. because the PHC 402 * cache was updated after the timestamp was captured by hardware. In this 403 * case, the full timestamp is the cached time minus the inverse delta. 404 * 405 * This algorithm works even if the PHC time was updated after a Tx timestamp 406 * was requested, but before the Tx timestamp event was reported from 407 * hardware. 408 * 409 * This calculation primarily relies on keeping the cached PHC time up to 410 * date. If the timestamp was captured more than 2^31 nanoseconds after the 411 * PHC time, it is possible that the lower 32bits of PHC time have 412 * overflowed more than once, and we might generate an incorrect timestamp. 413 * 414 * This is prevented by (a) periodically updating the cached PHC time once 415 * a second, and (b) discarding any Tx timestamp packet if it has waited for 416 * a timestamp for more than one second. 417 */ 418 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 419 { 420 u32 delta, phc_time_lo; 421 u64 ns; 422 423 /* Extract the lower 32 bits of the PHC time */ 424 phc_time_lo = (u32)cached_phc_time; 425 426 /* Calculate the delta between the lower 32bits of the cached PHC 427 * time and the in_tstamp value 428 */ 429 delta = (in_tstamp - phc_time_lo); 430 431 /* Do not assume that the in_tstamp is always more recent than the 432 * cached PHC time. If the delta is large, it indicates that the 433 * in_tstamp was taken in the past, and should be converted 434 * forward. 435 */ 436 if (delta > (U32_MAX / 2)) { 437 /* reverse the delta calculation here */ 438 delta = (phc_time_lo - in_tstamp); 439 ns = cached_phc_time - delta; 440 } else { 441 ns = cached_phc_time + delta; 442 } 443 444 return ns; 445 } 446 447 /** 448 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 449 * @pf: Board private structure 450 * @in_tstamp: Ingress/egress 40b timestamp value 451 * 452 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 453 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 454 * 455 * *--------------------------------------------------------------* 456 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 457 * *--------------------------------------------------------------* 458 * 459 * The low bit is an indicator of whether the timestamp is valid. The next 460 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 461 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 462 * 463 * It is assumed that the caller verifies the timestamp is valid prior to 464 * calling this function. 465 * 466 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 467 * time stored in the device private PTP structure as the basis for timestamp 468 * extension. 469 * 470 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 471 * algorithm. 472 */ 473 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 474 { 475 const u64 mask = GENMASK_ULL(31, 0); 476 unsigned long discard_time; 477 478 /* Discard the hardware timestamp if the cached PHC time is too old */ 479 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 480 if (time_is_before_jiffies(discard_time)) { 481 pf->ptp.tx_hwtstamp_discarded++; 482 return 0; 483 } 484 485 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 486 (in_tstamp >> 8) & mask); 487 } 488 489 /** 490 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 491 * @tx: the PTP Tx timestamp tracker to check 492 * 493 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 494 * to accept new timestamp requests. 495 * 496 * Assumes the tx->lock spinlock is already held. 497 */ 498 static bool 499 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 500 { 501 lockdep_assert_held(&tx->lock); 502 503 return tx->init && !tx->calibrating; 504 } 505 506 /** 507 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 508 * @tx: the PTP Tx timestamp tracker 509 * 510 * Process timestamps captured by the PHY associated with this port. To do 511 * this, loop over each index with a waiting skb. 512 * 513 * If a given index has a valid timestamp, perform the following steps: 514 * 515 * 1) check that the timestamp request is not stale 516 * 2) check that a timestamp is ready and available in the PHY memory bank 517 * 3) read and copy the timestamp out of the PHY register 518 * 4) unlock the index by clearing the associated in_use bit 519 * 5) check if the timestamp is stale, and discard if so 520 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 521 * 7) send this 64 bit timestamp to the stack 522 * 523 * Note that we do not hold the tracking lock while reading the Tx timestamp. 524 * This is because reading the timestamp requires taking a mutex that might 525 * sleep. 526 * 527 * The only place where we set in_use is when a new timestamp is initiated 528 * with a slot index. This is only called in the hard xmit routine where an 529 * SKB has a request flag set. The only places where we clear this bit is this 530 * function, or during teardown when the Tx timestamp tracker is being 531 * removed. A timestamp index will never be re-used until the in_use bit for 532 * that index is cleared. 533 * 534 * If a Tx thread starts a new timestamp, we might not begin processing it 535 * right away but we will notice it at the end when we re-queue the task. 536 * 537 * If a Tx thread starts a new timestamp just after this function exits, the 538 * interrupt for that timestamp should re-trigger this function once 539 * a timestamp is ready. 540 * 541 * In cases where the PTP hardware clock was directly adjusted, some 542 * timestamps may not be able to safely use the timestamp extension math. In 543 * this case, software will set the stale bit for any outstanding Tx 544 * timestamps when the clock is adjusted. Then this function will discard 545 * those captured timestamps instead of sending them to the stack. 546 * 547 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 548 * to correctly extend the timestamp using the cached PHC time. It is 549 * extremely unlikely that a packet will ever take this long to timestamp. If 550 * we detect a Tx timestamp request that has waited for this long we assume 551 * the packet will never be sent by hardware and discard it without reading 552 * the timestamp register. 553 */ 554 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 555 { 556 struct ice_ptp_port *ptp_port; 557 struct ice_pf *pf; 558 struct ice_hw *hw; 559 u64 tstamp_ready; 560 bool link_up; 561 int err; 562 u8 idx; 563 564 ptp_port = container_of(tx, struct ice_ptp_port, tx); 565 pf = ptp_port_to_pf(ptp_port); 566 hw = &pf->hw; 567 568 /* Read the Tx ready status first */ 569 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 570 if (err) 571 return; 572 573 /* Drop packets if the link went down */ 574 link_up = ptp_port->link_up; 575 576 for_each_set_bit(idx, tx->in_use, tx->len) { 577 struct skb_shared_hwtstamps shhwtstamps = {}; 578 u8 phy_idx = idx + tx->offset; 579 u64 raw_tstamp = 0, tstamp; 580 bool drop_ts = !link_up; 581 struct sk_buff *skb; 582 583 /* Drop packets which have waited for more than 2 seconds */ 584 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 585 drop_ts = true; 586 587 /* Count the number of Tx timestamps that timed out */ 588 pf->ptp.tx_hwtstamp_timeouts++; 589 } 590 591 /* Only read a timestamp from the PHY if its marked as ready 592 * by the tstamp_ready register. This avoids unnecessary 593 * reading of timestamps which are not yet valid. This is 594 * important as we must read all timestamps which are valid 595 * and only timestamps which are valid during each interrupt. 596 * If we do not, the hardware logic for generating a new 597 * interrupt can get stuck on some devices. 598 */ 599 if (!(tstamp_ready & BIT_ULL(phy_idx))) { 600 if (drop_ts) 601 goto skip_ts_read; 602 603 continue; 604 } 605 606 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 607 608 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 609 if (err && !drop_ts) 610 continue; 611 612 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 613 614 /* For PHYs which don't implement a proper timestamp ready 615 * bitmap, verify that the timestamp value is different 616 * from the last cached timestamp. If it is not, skip this for 617 * now assuming it hasn't yet been captured by hardware. 618 */ 619 if (!drop_ts && tx->verify_cached && 620 raw_tstamp == tx->tstamps[idx].cached_tstamp) 621 continue; 622 623 /* Discard any timestamp value without the valid bit set */ 624 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 625 drop_ts = true; 626 627 skip_ts_read: 628 spin_lock(&tx->lock); 629 if (tx->verify_cached && raw_tstamp) 630 tx->tstamps[idx].cached_tstamp = raw_tstamp; 631 clear_bit(idx, tx->in_use); 632 skb = tx->tstamps[idx].skb; 633 tx->tstamps[idx].skb = NULL; 634 if (test_and_clear_bit(idx, tx->stale)) 635 drop_ts = true; 636 spin_unlock(&tx->lock); 637 638 /* It is unlikely but possible that the SKB will have been 639 * flushed at this point due to link change or teardown. 640 */ 641 if (!skb) 642 continue; 643 644 if (drop_ts) { 645 dev_kfree_skb_any(skb); 646 continue; 647 } 648 649 /* Extend the timestamp using cached PHC time */ 650 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 651 if (tstamp) { 652 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 653 ice_trace(tx_tstamp_complete, skb, idx); 654 } 655 656 skb_tstamp_tx(skb, &shhwtstamps); 657 dev_kfree_skb_any(skb); 658 } 659 } 660 661 /** 662 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 663 * @pf: Board private structure 664 */ 665 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 666 { 667 struct ice_ptp_port *port; 668 unsigned int i; 669 670 mutex_lock(&pf->ptp.ports_owner.lock); 671 list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) { 672 struct ice_ptp_tx *tx = &port->tx; 673 674 if (!tx || !tx->init) 675 continue; 676 677 ice_ptp_process_tx_tstamp(tx); 678 } 679 mutex_unlock(&pf->ptp.ports_owner.lock); 680 681 for (i = 0; i < ICE_MAX_QUAD; i++) { 682 u64 tstamp_ready; 683 int err; 684 685 /* Read the Tx ready status first */ 686 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 687 if (err || tstamp_ready) 688 return ICE_TX_TSTAMP_WORK_PENDING; 689 } 690 691 return ICE_TX_TSTAMP_WORK_DONE; 692 } 693 694 /** 695 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 696 * @tx: Tx tracking structure to initialize 697 * 698 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 699 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 700 */ 701 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 702 { 703 bool more_timestamps; 704 705 if (!tx->init) 706 return ICE_TX_TSTAMP_WORK_DONE; 707 708 /* Process the Tx timestamp tracker */ 709 ice_ptp_process_tx_tstamp(tx); 710 711 /* Check if there are outstanding Tx timestamps */ 712 spin_lock(&tx->lock); 713 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 714 spin_unlock(&tx->lock); 715 716 if (more_timestamps) 717 return ICE_TX_TSTAMP_WORK_PENDING; 718 719 return ICE_TX_TSTAMP_WORK_DONE; 720 } 721 722 /** 723 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 724 * @tx: Tx tracking structure to initialize 725 * 726 * Assumes that the length has already been initialized. Do not call directly, 727 * use the ice_ptp_init_tx_* instead. 728 */ 729 static int 730 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 731 { 732 unsigned long *in_use, *stale; 733 struct ice_tx_tstamp *tstamps; 734 735 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 736 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 737 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 738 739 if (!tstamps || !in_use || !stale) { 740 kfree(tstamps); 741 bitmap_free(in_use); 742 bitmap_free(stale); 743 744 return -ENOMEM; 745 } 746 747 tx->tstamps = tstamps; 748 tx->in_use = in_use; 749 tx->stale = stale; 750 tx->init = 1; 751 752 spin_lock_init(&tx->lock); 753 754 return 0; 755 } 756 757 /** 758 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 759 * @pf: Board private structure 760 * @tx: the tracker to flush 761 * 762 * Called during teardown when a Tx tracker is being removed. 763 */ 764 static void 765 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 766 { 767 struct ice_hw *hw = &pf->hw; 768 u64 tstamp_ready; 769 int err; 770 u8 idx; 771 772 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 773 if (err) { 774 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 775 tx->block, err); 776 777 /* If we fail to read the Tx timestamp ready bitmap just 778 * skip clearing the PHY timestamps. 779 */ 780 tstamp_ready = 0; 781 } 782 783 for_each_set_bit(idx, tx->in_use, tx->len) { 784 u8 phy_idx = idx + tx->offset; 785 struct sk_buff *skb; 786 787 /* In case this timestamp is ready, we need to clear it. */ 788 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 789 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 790 791 spin_lock(&tx->lock); 792 skb = tx->tstamps[idx].skb; 793 tx->tstamps[idx].skb = NULL; 794 clear_bit(idx, tx->in_use); 795 clear_bit(idx, tx->stale); 796 spin_unlock(&tx->lock); 797 798 /* Count the number of Tx timestamps flushed */ 799 pf->ptp.tx_hwtstamp_flushed++; 800 801 /* Free the SKB after we've cleared the bit */ 802 dev_kfree_skb_any(skb); 803 } 804 } 805 806 /** 807 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 808 * @tx: the tracker to mark 809 * 810 * Mark currently outstanding Tx timestamps as stale. This prevents sending 811 * their timestamp value to the stack. This is required to prevent extending 812 * the 40bit hardware timestamp incorrectly. 813 * 814 * This should be called when the PTP clock is modified such as after a set 815 * time request. 816 */ 817 static void 818 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 819 { 820 spin_lock(&tx->lock); 821 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 822 spin_unlock(&tx->lock); 823 } 824 825 /** 826 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 827 * @pf: Board private structure 828 * @tx: Tx tracking structure to release 829 * 830 * Free memory associated with the Tx timestamp tracker. 831 */ 832 static void 833 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 834 { 835 spin_lock(&tx->lock); 836 tx->init = 0; 837 spin_unlock(&tx->lock); 838 839 /* wait for potentially outstanding interrupt to complete */ 840 synchronize_irq(pf->oicr_irq.virq); 841 842 ice_ptp_flush_tx_tracker(pf, tx); 843 844 kfree(tx->tstamps); 845 tx->tstamps = NULL; 846 847 bitmap_free(tx->in_use); 848 tx->in_use = NULL; 849 850 bitmap_free(tx->stale); 851 tx->stale = NULL; 852 853 tx->len = 0; 854 } 855 856 /** 857 * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps 858 * @pf: Board private structure 859 * @tx: the Tx tracking structure to initialize 860 * @port: the port this structure tracks 861 * 862 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 863 * the timestamp block is shared for all ports in the same quad. To avoid 864 * ports using the same timestamp index, logically break the block of 865 * registers into chunks based on the port number. 866 */ 867 static int 868 ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 869 { 870 tx->block = port / ICE_PORTS_PER_QUAD; 871 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822; 872 tx->len = INDEX_PER_PORT_E822; 873 tx->verify_cached = 0; 874 875 return ice_ptp_alloc_tx_tracker(tx); 876 } 877 878 /** 879 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 880 * @pf: Board private structure 881 * @tx: the Tx tracking structure to initialize 882 * 883 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 884 * port has its own block of timestamps, independent of the other ports. 885 */ 886 static int 887 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 888 { 889 tx->block = pf->hw.port_info->lport; 890 tx->offset = 0; 891 tx->len = INDEX_PER_PORT_E810; 892 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 893 * verify new timestamps against cached copy of the last read 894 * timestamp. 895 */ 896 tx->verify_cached = 1; 897 898 return ice_ptp_alloc_tx_tracker(tx); 899 } 900 901 /** 902 * ice_ptp_update_cached_phctime - Update the cached PHC time values 903 * @pf: Board specific private structure 904 * 905 * This function updates the system time values which are cached in the PF 906 * structure and the Rx rings. 907 * 908 * This function must be called periodically to ensure that the cached value 909 * is never more than 2 seconds old. 910 * 911 * Note that the cached copy in the PF PTP structure is always updated, even 912 * if we can't update the copy in the Rx rings. 913 * 914 * Return: 915 * * 0 - OK, successfully updated 916 * * -EAGAIN - PF was busy, need to reschedule the update 917 */ 918 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 919 { 920 struct device *dev = ice_pf_to_dev(pf); 921 unsigned long update_before; 922 u64 systime; 923 int i; 924 925 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 926 if (pf->ptp.cached_phc_time && 927 time_is_before_jiffies(update_before)) { 928 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 929 930 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 931 jiffies_to_msecs(time_taken)); 932 pf->ptp.late_cached_phc_updates++; 933 } 934 935 /* Read the current PHC time */ 936 systime = ice_ptp_read_src_clk_reg(pf, NULL); 937 938 /* Update the cached PHC time stored in the PF structure */ 939 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 940 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 941 942 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 943 return -EAGAIN; 944 945 ice_for_each_vsi(pf, i) { 946 struct ice_vsi *vsi = pf->vsi[i]; 947 int j; 948 949 if (!vsi) 950 continue; 951 952 if (vsi->type != ICE_VSI_PF) 953 continue; 954 955 ice_for_each_rxq(vsi, j) { 956 if (!vsi->rx_rings[j]) 957 continue; 958 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 959 } 960 } 961 clear_bit(ICE_CFG_BUSY, pf->state); 962 963 return 0; 964 } 965 966 /** 967 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 968 * @pf: Board specific private structure 969 * 970 * This function must be called when the cached PHC time is no longer valid, 971 * such as after a time adjustment. It marks any currently outstanding Tx 972 * timestamps as stale and updates the cached PHC time for both the PF and Rx 973 * rings. 974 * 975 * If updating the PHC time cannot be done immediately, a warning message is 976 * logged and the work item is scheduled immediately to minimize the window 977 * with a wrong cached timestamp. 978 */ 979 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 980 { 981 struct device *dev = ice_pf_to_dev(pf); 982 int err; 983 984 /* Update the cached PHC time immediately if possible, otherwise 985 * schedule the work item to execute soon. 986 */ 987 err = ice_ptp_update_cached_phctime(pf); 988 if (err) { 989 /* If another thread is updating the Rx rings, we won't 990 * properly reset them here. This could lead to reporting of 991 * invalid timestamps, but there isn't much we can do. 992 */ 993 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 994 __func__); 995 996 /* Queue the work item to update the Rx rings when possible */ 997 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 998 msecs_to_jiffies(10)); 999 } 1000 1001 /* Mark any outstanding timestamps as stale, since they might have 1002 * been captured in hardware before the time update. This could lead 1003 * to us extending them with the wrong cached value resulting in 1004 * incorrect timestamp values. 1005 */ 1006 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1007 } 1008 1009 /** 1010 * ice_ptp_read_time - Read the time from the device 1011 * @pf: Board private structure 1012 * @ts: timespec structure to hold the current time value 1013 * @sts: Optional parameter for holding a pair of system timestamps from 1014 * the system clock. Will be ignored if NULL is given. 1015 * 1016 * This function reads the source clock registers and stores them in a timespec. 1017 * However, since the registers are 64 bits of nanoseconds, we must convert the 1018 * result to a timespec before we can return. 1019 */ 1020 static void 1021 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, 1022 struct ptp_system_timestamp *sts) 1023 { 1024 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 1025 1026 *ts = ns_to_timespec64(time_ns); 1027 } 1028 1029 /** 1030 * ice_ptp_write_init - Set PHC time to provided value 1031 * @pf: Board private structure 1032 * @ts: timespec structure that holds the new time value 1033 * 1034 * Set the PHC time to the specified time provided in the timespec. 1035 */ 1036 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1037 { 1038 u64 ns = timespec64_to_ns(ts); 1039 struct ice_hw *hw = &pf->hw; 1040 1041 return ice_ptp_init_time(hw, ns); 1042 } 1043 1044 /** 1045 * ice_ptp_write_adj - Adjust PHC clock time atomically 1046 * @pf: Board private structure 1047 * @adj: Adjustment in nanoseconds 1048 * 1049 * Perform an atomic adjustment of the PHC time by the specified number of 1050 * nanoseconds. 1051 */ 1052 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1053 { 1054 struct ice_hw *hw = &pf->hw; 1055 1056 return ice_ptp_adj_clock(hw, adj); 1057 } 1058 1059 /** 1060 * ice_base_incval - Get base timer increment value 1061 * @pf: Board private structure 1062 * 1063 * Look up the base timer increment value for this device. The base increment 1064 * value is used to define the nominal clock tick rate. This increment value 1065 * is programmed during device initialization. It is also used as the basis 1066 * for calculating adjustments using scaled_ppm. 1067 */ 1068 static u64 ice_base_incval(struct ice_pf *pf) 1069 { 1070 struct ice_hw *hw = &pf->hw; 1071 u64 incval; 1072 1073 if (ice_is_e810(hw)) 1074 incval = ICE_PTP_NOMINAL_INCVAL_E810; 1075 else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) 1076 incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); 1077 else 1078 incval = UNKNOWN_INCVAL_E822; 1079 1080 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1081 incval); 1082 1083 return incval; 1084 } 1085 1086 /** 1087 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1088 * @port: PTP port for which Tx FIFO is checked 1089 */ 1090 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1091 { 1092 int quad = port->port_num / ICE_PORTS_PER_QUAD; 1093 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1094 struct ice_pf *pf; 1095 struct ice_hw *hw; 1096 u32 val, phy_sts; 1097 int err; 1098 1099 pf = ptp_port_to_pf(port); 1100 hw = &pf->hw; 1101 1102 if (port->tx_fifo_busy_cnt == FIFO_OK) 1103 return 0; 1104 1105 /* need to read FIFO state */ 1106 if (offs == 0 || offs == 1) 1107 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, 1108 &val); 1109 else 1110 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, 1111 &val); 1112 1113 if (err) { 1114 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1115 port->port_num, err); 1116 return err; 1117 } 1118 1119 if (offs & 0x1) 1120 phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S; 1121 else 1122 phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S; 1123 1124 if (phy_sts & FIFO_EMPTY) { 1125 port->tx_fifo_busy_cnt = FIFO_OK; 1126 return 0; 1127 } 1128 1129 port->tx_fifo_busy_cnt++; 1130 1131 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1132 port->tx_fifo_busy_cnt, port->port_num); 1133 1134 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1135 dev_dbg(ice_pf_to_dev(pf), 1136 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1137 port->port_num, quad); 1138 ice_ptp_reset_ts_memory_quad_e822(hw, quad); 1139 port->tx_fifo_busy_cnt = FIFO_OK; 1140 return 0; 1141 } 1142 1143 return -EAGAIN; 1144 } 1145 1146 /** 1147 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1148 * @work: Pointer to the kthread_work structure for this task 1149 * 1150 * Check whether hardware has completed measuring the Tx and Rx offset values 1151 * used to configure and enable vernier timestamp calibration. 1152 * 1153 * Once the offset in either direction is measured, configure the associated 1154 * registers with the calibrated offset values and enable timestamping. The Tx 1155 * and Rx directions are configured independently as soon as their associated 1156 * offsets are known. 1157 * 1158 * This function reschedules itself until both Tx and Rx calibration have 1159 * completed. 1160 */ 1161 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1162 { 1163 struct ice_ptp_port *port; 1164 struct ice_pf *pf; 1165 struct ice_hw *hw; 1166 int tx_err; 1167 int rx_err; 1168 1169 port = container_of(work, struct ice_ptp_port, ov_work.work); 1170 pf = ptp_port_to_pf(port); 1171 hw = &pf->hw; 1172 1173 if (ice_is_reset_in_progress(pf->state)) { 1174 /* wait for device driver to complete reset */ 1175 kthread_queue_delayed_work(pf->ptp.kworker, 1176 &port->ov_work, 1177 msecs_to_jiffies(100)); 1178 return; 1179 } 1180 1181 tx_err = ice_ptp_check_tx_fifo(port); 1182 if (!tx_err) 1183 tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num); 1184 rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num); 1185 if (tx_err || rx_err) { 1186 /* Tx and/or Rx offset not yet configured, try again later */ 1187 kthread_queue_delayed_work(pf->ptp.kworker, 1188 &port->ov_work, 1189 msecs_to_jiffies(100)); 1190 return; 1191 } 1192 } 1193 1194 /** 1195 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1196 * @ptp_port: PTP port to stop 1197 */ 1198 static int 1199 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1200 { 1201 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1202 u8 port = ptp_port->port_num; 1203 struct ice_hw *hw = &pf->hw; 1204 int err; 1205 1206 if (ice_is_e810(hw)) 1207 return 0; 1208 1209 mutex_lock(&ptp_port->ps_lock); 1210 1211 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1212 1213 err = ice_stop_phy_timer_e822(hw, port, true); 1214 if (err) 1215 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1216 port, err); 1217 1218 mutex_unlock(&ptp_port->ps_lock); 1219 1220 return err; 1221 } 1222 1223 /** 1224 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1225 * @ptp_port: PTP port for which the PHY start is set 1226 * 1227 * Start the PHY timestamping block, and initiate Vernier timestamping 1228 * calibration. If timestamping cannot be calibrated (such as if link is down) 1229 * then disable the timestamping block instead. 1230 */ 1231 static int 1232 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1233 { 1234 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1235 u8 port = ptp_port->port_num; 1236 struct ice_hw *hw = &pf->hw; 1237 int err; 1238 1239 if (ice_is_e810(hw)) 1240 return 0; 1241 1242 if (!ptp_port->link_up) 1243 return ice_ptp_port_phy_stop(ptp_port); 1244 1245 mutex_lock(&ptp_port->ps_lock); 1246 1247 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1248 1249 /* temporarily disable Tx timestamps while calibrating PHY offset */ 1250 spin_lock(&ptp_port->tx.lock); 1251 ptp_port->tx.calibrating = true; 1252 spin_unlock(&ptp_port->tx.lock); 1253 ptp_port->tx_fifo_busy_cnt = 0; 1254 1255 /* Start the PHY timer in Vernier mode */ 1256 err = ice_start_phy_timer_e822(hw, port); 1257 if (err) 1258 goto out_unlock; 1259 1260 /* Enable Tx timestamps right away */ 1261 spin_lock(&ptp_port->tx.lock); 1262 ptp_port->tx.calibrating = false; 1263 spin_unlock(&ptp_port->tx.lock); 1264 1265 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); 1266 1267 out_unlock: 1268 if (err) 1269 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1270 port, err); 1271 1272 mutex_unlock(&ptp_port->ps_lock); 1273 1274 return err; 1275 } 1276 1277 /** 1278 * ice_ptp_link_change - Reconfigure PTP after link status change 1279 * @pf: Board private structure 1280 * @port: Port for which the PHY start is set 1281 * @linkup: Link is up or down 1282 */ 1283 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) 1284 { 1285 struct ice_ptp_port *ptp_port; 1286 struct ice_hw *hw = &pf->hw; 1287 1288 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1289 return; 1290 1291 if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) 1292 return; 1293 1294 ptp_port = &pf->ptp.port; 1295 if (WARN_ON_ONCE(ptp_port->port_num != port)) 1296 return; 1297 1298 /* Update cached link status for this port immediately */ 1299 ptp_port->link_up = linkup; 1300 1301 switch (hw->phy_model) { 1302 case ICE_PHY_E810: 1303 /* Do not reconfigure E810 PHY */ 1304 return; 1305 case ICE_PHY_E822: 1306 ice_ptp_port_phy_restart(ptp_port); 1307 return; 1308 default: 1309 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); 1310 } 1311 } 1312 1313 /** 1314 * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt 1315 * @pf: PF private structure 1316 * @ena: bool value to enable or disable interrupt 1317 * @threshold: Minimum number of packets at which intr is triggered 1318 * 1319 * Utility function to enable or disable Tx timestamp interrupt and threshold 1320 */ 1321 static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) 1322 { 1323 struct ice_hw *hw = &pf->hw; 1324 int err = 0; 1325 int quad; 1326 u32 val; 1327 1328 ice_ptp_reset_ts_memory(hw); 1329 1330 for (quad = 0; quad < ICE_MAX_QUAD; quad++) { 1331 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1332 &val); 1333 if (err) 1334 break; 1335 1336 if (ena) { 1337 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1338 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; 1339 val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) & 1340 Q_REG_TX_MEM_GBL_CFG_INTR_THR_M); 1341 } else { 1342 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1343 } 1344 1345 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1346 val); 1347 if (err) 1348 break; 1349 } 1350 1351 if (err) 1352 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", 1353 err); 1354 return err; 1355 } 1356 1357 /** 1358 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1359 * @pf: Board private structure 1360 */ 1361 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1362 { 1363 ice_ptp_port_phy_restart(&pf->ptp.port); 1364 } 1365 1366 /** 1367 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping 1368 * @pf: Board private structure 1369 */ 1370 static void ice_ptp_restart_all_phy(struct ice_pf *pf) 1371 { 1372 struct list_head *entry; 1373 1374 list_for_each(entry, &pf->ptp.ports_owner.ports) { 1375 struct ice_ptp_port *port = list_entry(entry, 1376 struct ice_ptp_port, 1377 list_member); 1378 1379 if (port->link_up) 1380 ice_ptp_port_phy_restart(port); 1381 } 1382 } 1383 1384 /** 1385 * ice_ptp_adjfine - Adjust clock increment rate 1386 * @info: the driver's PTP info structure 1387 * @scaled_ppm: Parts per million with 16-bit fractional field 1388 * 1389 * Adjust the frequency of the clock by the indicated scaled ppm from the 1390 * base frequency. 1391 */ 1392 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1393 { 1394 struct ice_pf *pf = ptp_info_to_pf(info); 1395 struct ice_hw *hw = &pf->hw; 1396 u64 incval; 1397 int err; 1398 1399 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1400 err = ice_ptp_write_incval_locked(hw, incval); 1401 if (err) { 1402 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1403 err); 1404 return -EIO; 1405 } 1406 1407 return 0; 1408 } 1409 1410 /** 1411 * ice_ptp_extts_event - Process PTP external clock event 1412 * @pf: Board private structure 1413 */ 1414 void ice_ptp_extts_event(struct ice_pf *pf) 1415 { 1416 struct ptp_clock_event event; 1417 struct ice_hw *hw = &pf->hw; 1418 u8 chan, tmr_idx; 1419 u32 hi, lo; 1420 1421 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1422 /* Event time is captured by one of the two matched registers 1423 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1424 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1425 * Event is defined in GLTSYN_EVNT_0 register 1426 */ 1427 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1428 /* Check if channel is enabled */ 1429 if (pf->ptp.ext_ts_irq & (1 << chan)) { 1430 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1431 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1432 event.timestamp = (((u64)hi) << 32) | lo; 1433 event.type = PTP_CLOCK_EXTTS; 1434 event.index = chan; 1435 1436 /* Fire event */ 1437 ptp_clock_event(pf->ptp.clock, &event); 1438 pf->ptp.ext_ts_irq &= ~(1 << chan); 1439 } 1440 } 1441 } 1442 1443 /** 1444 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1445 * @pf: Board private structure 1446 * @ena: true to enable; false to disable 1447 * @chan: GPIO channel (0-3) 1448 * @gpio_pin: GPIO pin 1449 * @extts_flags: request flags from the ptp_extts_request.flags 1450 */ 1451 static int 1452 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, 1453 unsigned int extts_flags) 1454 { 1455 u32 func, aux_reg, gpio_reg, irq_reg; 1456 struct ice_hw *hw = &pf->hw; 1457 u8 tmr_idx; 1458 1459 if (chan > (unsigned int)pf->ptp.info.n_ext_ts) 1460 return -EINVAL; 1461 1462 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1463 1464 irq_reg = rd32(hw, PFINT_OICR_ENA); 1465 1466 if (ena) { 1467 /* Enable the interrupt */ 1468 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1469 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1470 1471 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1472 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1473 1474 /* set event level to requested edge */ 1475 if (extts_flags & PTP_FALLING_EDGE) 1476 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1477 if (extts_flags & PTP_RISING_EDGE) 1478 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1479 1480 /* Write GPIO CTL reg. 1481 * 0x1 is input sampled by EVENT register(channel) 1482 * + num_in_channels * tmr_idx 1483 */ 1484 func = 1 + chan + (tmr_idx * 3); 1485 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & 1486 GLGEN_GPIO_CTL_PIN_FUNC_M); 1487 pf->ptp.ext_ts_chan |= (1 << chan); 1488 } else { 1489 /* clear the values we set to reset defaults */ 1490 aux_reg = 0; 1491 gpio_reg = 0; 1492 pf->ptp.ext_ts_chan &= ~(1 << chan); 1493 if (!pf->ptp.ext_ts_chan) 1494 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1495 } 1496 1497 wr32(hw, PFINT_OICR_ENA, irq_reg); 1498 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1499 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1500 1501 return 0; 1502 } 1503 1504 /** 1505 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave 1506 * @pf: Board private structure 1507 * @chan: GPIO channel (0-3) 1508 * @config: desired periodic clk configuration. NULL will disable channel 1509 * @store: If set to true the values will be stored 1510 * 1511 * Configure the internal clock generator modules to generate the clock wave of 1512 * specified period. 1513 */ 1514 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, 1515 struct ice_perout_channel *config, bool store) 1516 { 1517 u64 current_time, period, start_time, phase; 1518 struct ice_hw *hw = &pf->hw; 1519 u32 func, val, gpio_pin; 1520 u8 tmr_idx; 1521 1522 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1523 1524 /* 0. Reset mode & out_en in AUX_OUT */ 1525 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1526 1527 /* If we're disabling the output, clear out CLKO and TGT and keep 1528 * output level low 1529 */ 1530 if (!config || !config->ena) { 1531 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); 1532 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); 1533 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); 1534 1535 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1536 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; 1537 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1538 1539 /* Store the value if requested */ 1540 if (store) 1541 memset(&pf->ptp.perout_channels[chan], 0, 1542 sizeof(struct ice_perout_channel)); 1543 1544 return 0; 1545 } 1546 period = config->period; 1547 start_time = config->start_time; 1548 div64_u64_rem(start_time, period, &phase); 1549 gpio_pin = config->gpio_pin; 1550 1551 /* 1. Write clkout with half of required period value */ 1552 if (period & 0x1) { 1553 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1554 goto err; 1555 } 1556 1557 period >>= 1; 1558 1559 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick 1560 */ 1561 #define MIN_PULSE 3 1562 if (period <= MIN_PULSE || period > U32_MAX) { 1563 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", 1564 MIN_PULSE * 2); 1565 goto err; 1566 } 1567 1568 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1569 1570 /* Allow time for programming before start_time is hit */ 1571 current_time = ice_ptp_read_src_clk_reg(pf, NULL); 1572 1573 /* if start time is in the past start the timer at the nearest second 1574 * maintaining phase 1575 */ 1576 if (start_time < current_time) 1577 start_time = div64_u64(current_time + NSEC_PER_SEC - 1, 1578 NSEC_PER_SEC) * NSEC_PER_SEC + phase; 1579 1580 if (ice_is_e810(hw)) 1581 start_time -= E810_OUT_PROP_DELAY_NS; 1582 else 1583 start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); 1584 1585 /* 2. Write TARGET time */ 1586 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); 1587 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); 1588 1589 /* 3. Write AUX_OUT register */ 1590 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1591 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1592 1593 /* 4. write GPIO CTL reg */ 1594 func = 8 + chan + (tmr_idx * 4); 1595 val = GLGEN_GPIO_CTL_PIN_DIR_M | 1596 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M); 1597 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1598 1599 /* Store the value if requested */ 1600 if (store) { 1601 memcpy(&pf->ptp.perout_channels[chan], config, 1602 sizeof(struct ice_perout_channel)); 1603 pf->ptp.perout_channels[chan].start_time = phase; 1604 } 1605 1606 return 0; 1607 err: 1608 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); 1609 return -EFAULT; 1610 } 1611 1612 /** 1613 * ice_ptp_disable_all_clkout - Disable all currently configured outputs 1614 * @pf: pointer to the PF structure 1615 * 1616 * Disable all currently configured clock outputs. This is necessary before 1617 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to 1618 * re-enable the clocks again. 1619 */ 1620 static void ice_ptp_disable_all_clkout(struct ice_pf *pf) 1621 { 1622 uint i; 1623 1624 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1625 if (pf->ptp.perout_channels[i].ena) 1626 ice_ptp_cfg_clkout(pf, i, NULL, false); 1627 } 1628 1629 /** 1630 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs 1631 * @pf: pointer to the PF structure 1632 * 1633 * Enable all currently configured clock outputs. Use this after 1634 * ice_ptp_disable_all_clkout to reconfigure the output signals according to 1635 * their configuration. 1636 */ 1637 static void ice_ptp_enable_all_clkout(struct ice_pf *pf) 1638 { 1639 uint i; 1640 1641 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1642 if (pf->ptp.perout_channels[i].ena) 1643 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], 1644 false); 1645 } 1646 1647 /** 1648 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC 1649 * @info: the driver's PTP info structure 1650 * @rq: The requested feature to change 1651 * @on: Enable/disable flag 1652 */ 1653 static int 1654 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, 1655 struct ptp_clock_request *rq, int on) 1656 { 1657 struct ice_pf *pf = ptp_info_to_pf(info); 1658 struct ice_perout_channel clk_cfg = {0}; 1659 bool sma_pres = false; 1660 unsigned int chan; 1661 u32 gpio_pin; 1662 int err; 1663 1664 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 1665 sma_pres = true; 1666 1667 switch (rq->type) { 1668 case PTP_CLK_REQ_PEROUT: 1669 chan = rq->perout.index; 1670 if (sma_pres) { 1671 if (chan == ice_pin_desc_e810t[SMA1].chan) 1672 clk_cfg.gpio_pin = GPIO_20; 1673 else if (chan == ice_pin_desc_e810t[SMA2].chan) 1674 clk_cfg.gpio_pin = GPIO_22; 1675 else 1676 return -1; 1677 } else if (ice_is_e810t(&pf->hw)) { 1678 if (chan == 0) 1679 clk_cfg.gpio_pin = GPIO_20; 1680 else 1681 clk_cfg.gpio_pin = GPIO_22; 1682 } else if (chan == PPS_CLK_GEN_CHAN) { 1683 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1684 } else { 1685 clk_cfg.gpio_pin = chan; 1686 } 1687 1688 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + 1689 rq->perout.period.nsec); 1690 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + 1691 rq->perout.start.nsec); 1692 clk_cfg.ena = !!on; 1693 1694 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); 1695 break; 1696 case PTP_CLK_REQ_EXTTS: 1697 chan = rq->extts.index; 1698 if (sma_pres) { 1699 if (chan < ice_pin_desc_e810t[SMA2].chan) 1700 gpio_pin = GPIO_21; 1701 else 1702 gpio_pin = GPIO_23; 1703 } else if (ice_is_e810t(&pf->hw)) { 1704 if (chan == 0) 1705 gpio_pin = GPIO_21; 1706 else 1707 gpio_pin = GPIO_23; 1708 } else { 1709 gpio_pin = chan; 1710 } 1711 1712 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, 1713 rq->extts.flags); 1714 break; 1715 default: 1716 return -EOPNOTSUPP; 1717 } 1718 1719 return err; 1720 } 1721 1722 /** 1723 * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC 1724 * @info: the driver's PTP info structure 1725 * @rq: The requested feature to change 1726 * @on: Enable/disable flag 1727 */ 1728 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, 1729 struct ptp_clock_request *rq, int on) 1730 { 1731 struct ice_pf *pf = ptp_info_to_pf(info); 1732 struct ice_perout_channel clk_cfg = {0}; 1733 int err; 1734 1735 switch (rq->type) { 1736 case PTP_CLK_REQ_PPS: 1737 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1738 clk_cfg.period = NSEC_PER_SEC; 1739 clk_cfg.ena = !!on; 1740 1741 err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); 1742 break; 1743 case PTP_CLK_REQ_EXTTS: 1744 err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, 1745 TIME_SYNC_PIN_INDEX, rq->extts.flags); 1746 break; 1747 default: 1748 return -EOPNOTSUPP; 1749 } 1750 1751 return err; 1752 } 1753 1754 /** 1755 * ice_ptp_gettimex64 - Get the time of the clock 1756 * @info: the driver's PTP info structure 1757 * @ts: timespec64 structure to hold the current time value 1758 * @sts: Optional parameter for holding a pair of system timestamps from 1759 * the system clock. Will be ignored if NULL is given. 1760 * 1761 * Read the device clock and return the correct value on ns, after converting it 1762 * into a timespec struct. 1763 */ 1764 static int 1765 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 1766 struct ptp_system_timestamp *sts) 1767 { 1768 struct ice_pf *pf = ptp_info_to_pf(info); 1769 struct ice_hw *hw = &pf->hw; 1770 1771 if (!ice_ptp_lock(hw)) { 1772 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); 1773 return -EBUSY; 1774 } 1775 1776 ice_ptp_read_time(pf, ts, sts); 1777 ice_ptp_unlock(hw); 1778 1779 return 0; 1780 } 1781 1782 /** 1783 * ice_ptp_settime64 - Set the time of the clock 1784 * @info: the driver's PTP info structure 1785 * @ts: timespec64 structure that holds the new time value 1786 * 1787 * Set the device clock to the user input value. The conversion from timespec 1788 * to ns happens in the write function. 1789 */ 1790 static int 1791 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 1792 { 1793 struct ice_pf *pf = ptp_info_to_pf(info); 1794 struct timespec64 ts64 = *ts; 1795 struct ice_hw *hw = &pf->hw; 1796 int err; 1797 1798 /* For Vernier mode, we need to recalibrate after new settime 1799 * Start with disabling timestamp block 1800 */ 1801 if (pf->ptp.port.link_up) 1802 ice_ptp_port_phy_stop(&pf->ptp.port); 1803 1804 if (!ice_ptp_lock(hw)) { 1805 err = -EBUSY; 1806 goto exit; 1807 } 1808 1809 /* Disable periodic outputs */ 1810 ice_ptp_disable_all_clkout(pf); 1811 1812 err = ice_ptp_write_init(pf, &ts64); 1813 ice_ptp_unlock(hw); 1814 1815 if (!err) 1816 ice_ptp_reset_cached_phctime(pf); 1817 1818 /* Reenable periodic outputs */ 1819 ice_ptp_enable_all_clkout(pf); 1820 1821 /* Recalibrate and re-enable timestamp blocks for E822/E823 */ 1822 if (hw->phy_model == ICE_PHY_E822) 1823 ice_ptp_restart_all_phy(pf); 1824 exit: 1825 if (err) { 1826 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 1827 return err; 1828 } 1829 1830 return 0; 1831 } 1832 1833 /** 1834 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 1835 * @info: the driver's PTP info structure 1836 * @delta: Offset in nanoseconds to adjust the time by 1837 */ 1838 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 1839 { 1840 struct timespec64 now, then; 1841 int ret; 1842 1843 then = ns_to_timespec64(delta); 1844 ret = ice_ptp_gettimex64(info, &now, NULL); 1845 if (ret) 1846 return ret; 1847 now = timespec64_add(now, then); 1848 1849 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 1850 } 1851 1852 /** 1853 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 1854 * @info: the driver's PTP info structure 1855 * @delta: Offset in nanoseconds to adjust the time by 1856 */ 1857 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 1858 { 1859 struct ice_pf *pf = ptp_info_to_pf(info); 1860 struct ice_hw *hw = &pf->hw; 1861 struct device *dev; 1862 int err; 1863 1864 dev = ice_pf_to_dev(pf); 1865 1866 /* Hardware only supports atomic adjustments using signed 32-bit 1867 * integers. For any adjustment outside this range, perform 1868 * a non-atomic get->adjust->set flow. 1869 */ 1870 if (delta > S32_MAX || delta < S32_MIN) { 1871 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 1872 return ice_ptp_adjtime_nonatomic(info, delta); 1873 } 1874 1875 if (!ice_ptp_lock(hw)) { 1876 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 1877 return -EBUSY; 1878 } 1879 1880 /* Disable periodic outputs */ 1881 ice_ptp_disable_all_clkout(pf); 1882 1883 err = ice_ptp_write_adj(pf, delta); 1884 1885 /* Reenable periodic outputs */ 1886 ice_ptp_enable_all_clkout(pf); 1887 1888 ice_ptp_unlock(hw); 1889 1890 if (err) { 1891 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 1892 return err; 1893 } 1894 1895 ice_ptp_reset_cached_phctime(pf); 1896 1897 return 0; 1898 } 1899 1900 #ifdef CONFIG_ICE_HWTS 1901 /** 1902 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 1903 * @device: Current device time 1904 * @system: System counter value read synchronously with device time 1905 * @ctx: Context provided by timekeeping code 1906 * 1907 * Read device and system (ART) clock simultaneously and return the corrected 1908 * clock values in ns. 1909 */ 1910 static int 1911 ice_ptp_get_syncdevicetime(ktime_t *device, 1912 struct system_counterval_t *system, 1913 void *ctx) 1914 { 1915 struct ice_pf *pf = (struct ice_pf *)ctx; 1916 struct ice_hw *hw = &pf->hw; 1917 u32 hh_lock, hh_art_ctl; 1918 int i; 1919 1920 #define MAX_HH_HW_LOCK_TRIES 5 1921 #define MAX_HH_CTL_LOCK_TRIES 100 1922 1923 for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { 1924 /* Get the HW lock */ 1925 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1926 if (hh_lock & PFHH_SEM_BUSY_M) { 1927 usleep_range(10000, 15000); 1928 continue; 1929 } 1930 break; 1931 } 1932 if (hh_lock & PFHH_SEM_BUSY_M) { 1933 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 1934 return -EBUSY; 1935 } 1936 1937 /* Program cmd to master timer */ 1938 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 1939 1940 /* Start the ART and device clock sync sequence */ 1941 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1942 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 1943 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 1944 1945 for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { 1946 /* Wait for sync to complete */ 1947 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1948 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 1949 udelay(1); 1950 continue; 1951 } else { 1952 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 1953 u64 hh_ts; 1954 1955 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 1956 /* Read ART time */ 1957 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 1958 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 1959 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1960 *system = convert_art_ns_to_tsc(hh_ts); 1961 /* Read Device source clock time */ 1962 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 1963 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 1964 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1965 *device = ns_to_ktime(hh_ts); 1966 break; 1967 } 1968 } 1969 1970 /* Clear the master timer */ 1971 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 1972 1973 /* Release HW lock */ 1974 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1975 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 1976 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 1977 1978 if (i == MAX_HH_CTL_LOCK_TRIES) 1979 return -ETIMEDOUT; 1980 1981 return 0; 1982 } 1983 1984 /** 1985 * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp 1986 * @info: the driver's PTP info structure 1987 * @cts: The memory to fill the cross timestamp info 1988 * 1989 * Capture a cross timestamp between the ART and the device PTP hardware 1990 * clock. Fill the cross timestamp information and report it back to the 1991 * caller. 1992 * 1993 * This is only valid for E822 and E823 devices which have support for 1994 * generating the cross timestamp via PCIe PTM. 1995 * 1996 * In order to correctly correlate the ART timestamp back to the TSC time, the 1997 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 1998 */ 1999 static int 2000 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, 2001 struct system_device_crosststamp *cts) 2002 { 2003 struct ice_pf *pf = ptp_info_to_pf(info); 2004 2005 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2006 pf, NULL, cts); 2007 } 2008 #endif /* CONFIG_ICE_HWTS */ 2009 2010 /** 2011 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2012 * @pf: Board private structure 2013 * @ifr: ioctl data 2014 * 2015 * Copy the timestamping config to user buffer 2016 */ 2017 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2018 { 2019 struct hwtstamp_config *config; 2020 2021 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2022 return -EIO; 2023 2024 config = &pf->ptp.tstamp_config; 2025 2026 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2027 -EFAULT : 0; 2028 } 2029 2030 /** 2031 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2032 * @pf: Board private structure 2033 * @config: hwtstamp settings requested or saved 2034 */ 2035 static int 2036 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2037 { 2038 switch (config->tx_type) { 2039 case HWTSTAMP_TX_OFF: 2040 ice_set_tx_tstamp(pf, false); 2041 break; 2042 case HWTSTAMP_TX_ON: 2043 ice_set_tx_tstamp(pf, true); 2044 break; 2045 default: 2046 return -ERANGE; 2047 } 2048 2049 switch (config->rx_filter) { 2050 case HWTSTAMP_FILTER_NONE: 2051 ice_set_rx_tstamp(pf, false); 2052 break; 2053 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2054 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2055 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2056 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2057 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2058 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2059 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2060 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2061 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2062 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2063 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2064 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2065 case HWTSTAMP_FILTER_NTP_ALL: 2066 case HWTSTAMP_FILTER_ALL: 2067 ice_set_rx_tstamp(pf, true); 2068 break; 2069 default: 2070 return -ERANGE; 2071 } 2072 2073 return 0; 2074 } 2075 2076 /** 2077 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2078 * @pf: Board private structure 2079 * @ifr: ioctl data 2080 * 2081 * Get the user config and store it 2082 */ 2083 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2084 { 2085 struct hwtstamp_config config; 2086 int err; 2087 2088 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2089 return -EAGAIN; 2090 2091 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2092 return -EFAULT; 2093 2094 err = ice_ptp_set_timestamp_mode(pf, &config); 2095 if (err) 2096 return err; 2097 2098 /* Return the actual configuration set */ 2099 config = pf->ptp.tstamp_config; 2100 2101 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2102 -EFAULT : 0; 2103 } 2104 2105 /** 2106 * ice_ptp_rx_hwtstamp - Check for an Rx timestamp 2107 * @rx_ring: Ring to get the VSI info 2108 * @rx_desc: Receive descriptor 2109 * @skb: Particular skb to send timestamp with 2110 * 2111 * The driver receives a notification in the receive descriptor with timestamp. 2112 * The timestamp is in ns, so we must convert the result first. 2113 */ 2114 void 2115 ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, 2116 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) 2117 { 2118 struct skb_shared_hwtstamps *hwtstamps; 2119 u64 ts_ns, cached_time; 2120 u32 ts_high; 2121 2122 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2123 return; 2124 2125 cached_time = READ_ONCE(rx_ring->cached_phctime); 2126 2127 /* Do not report a timestamp if we don't have a cached PHC time */ 2128 if (!cached_time) 2129 return; 2130 2131 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2132 * PHC value, rather than accessing the PF. This also allows us to 2133 * simply pass the upper 32bits of nanoseconds directly. Calling 2134 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2135 * bits itself. 2136 */ 2137 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2138 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2139 2140 hwtstamps = skb_hwtstamps(skb); 2141 memset(hwtstamps, 0, sizeof(*hwtstamps)); 2142 hwtstamps->hwtstamp = ns_to_ktime(ts_ns); 2143 } 2144 2145 /** 2146 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins 2147 * @pf: pointer to the PF structure 2148 * @info: PTP clock info structure 2149 * 2150 * Disable the OS access to the SMA pins. Called to clear out the OS 2151 * indications of pin support when we fail to setup the E810-T SMA control 2152 * register. 2153 */ 2154 static void 2155 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2156 { 2157 struct device *dev = ice_pf_to_dev(pf); 2158 2159 dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); 2160 2161 info->enable = NULL; 2162 info->verify = NULL; 2163 info->n_pins = 0; 2164 info->n_ext_ts = 0; 2165 info->n_per_out = 0; 2166 } 2167 2168 /** 2169 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins 2170 * @pf: pointer to the PF structure 2171 * @info: PTP clock info structure 2172 * 2173 * Finish setting up the SMA pins by allocating pin_config, and setting it up 2174 * according to the current status of the SMA. On failure, disable all of the 2175 * extended SMA pin support. 2176 */ 2177 static void 2178 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2179 { 2180 struct device *dev = ice_pf_to_dev(pf); 2181 int err; 2182 2183 /* Allocate memory for kernel pins interface */ 2184 info->pin_config = devm_kcalloc(dev, info->n_pins, 2185 sizeof(*info->pin_config), GFP_KERNEL); 2186 if (!info->pin_config) { 2187 ice_ptp_disable_sma_pins_e810t(pf, info); 2188 return; 2189 } 2190 2191 /* Read current SMA status */ 2192 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); 2193 if (err) 2194 ice_ptp_disable_sma_pins_e810t(pf, info); 2195 } 2196 2197 /** 2198 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs 2199 * @pf: pointer to the PF instance 2200 * @info: PTP clock capabilities 2201 */ 2202 static void 2203 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2204 { 2205 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2206 info->n_ext_ts = N_EXT_TS_E810; 2207 info->n_per_out = N_PER_OUT_E810T; 2208 info->n_pins = NUM_PTP_PINS_E810T; 2209 info->verify = ice_verify_pin_e810t; 2210 2211 /* Complete setup of the SMA pins */ 2212 ice_ptp_setup_sma_pins_e810t(pf, info); 2213 } else if (ice_is_e810t(&pf->hw)) { 2214 info->n_ext_ts = N_EXT_TS_NO_SMA_E810T; 2215 info->n_per_out = N_PER_OUT_NO_SMA_E810T; 2216 } else { 2217 info->n_per_out = N_PER_OUT_E810; 2218 info->n_ext_ts = N_EXT_TS_E810; 2219 } 2220 } 2221 2222 /** 2223 * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs 2224 * @pf: pointer to the PF instance 2225 * @info: PTP clock capabilities 2226 */ 2227 static void 2228 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2229 { 2230 info->pps = 1; 2231 info->n_per_out = 0; 2232 info->n_ext_ts = 1; 2233 } 2234 2235 /** 2236 * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support 2237 * @pf: Board private structure 2238 * @info: PTP info to fill 2239 * 2240 * Assign functions to the PTP capabiltiies structure for E82x devices. 2241 * Functions which operate across all device families should be set directly 2242 * in ice_ptp_set_caps. Only add functions here which are distinct for E82x 2243 * devices. 2244 */ 2245 static void 2246 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info) 2247 { 2248 #ifdef CONFIG_ICE_HWTS 2249 if (boot_cpu_has(X86_FEATURE_ART) && 2250 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2251 info->getcrosststamp = ice_ptp_getcrosststamp_e82x; 2252 #endif /* CONFIG_ICE_HWTS */ 2253 } 2254 2255 /** 2256 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2257 * @pf: Board private structure 2258 * @info: PTP info to fill 2259 * 2260 * Assign functions to the PTP capabiltiies structure for E810 devices. 2261 * Functions which operate across all device families should be set directly 2262 * in ice_ptp_set_caps. Only add functions here which are distinct for e810 2263 * devices. 2264 */ 2265 static void 2266 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2267 { 2268 info->enable = ice_ptp_gpio_enable_e810; 2269 ice_ptp_setup_pins_e810(pf, info); 2270 } 2271 2272 /** 2273 * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support 2274 * @pf: Board private structure 2275 * @info: PTP info to fill 2276 * 2277 * Assign functions to the PTP capabiltiies structure for E823 devices. 2278 * Functions which operate across all device families should be set directly 2279 * in ice_ptp_set_caps. Only add functions here which are distinct for e823 2280 * devices. 2281 */ 2282 static void 2283 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2284 { 2285 ice_ptp_set_funcs_e82x(pf, info); 2286 2287 info->enable = ice_ptp_gpio_enable_e823; 2288 ice_ptp_setup_pins_e823(pf, info); 2289 } 2290 2291 /** 2292 * ice_ptp_set_caps - Set PTP capabilities 2293 * @pf: Board private structure 2294 */ 2295 static void ice_ptp_set_caps(struct ice_pf *pf) 2296 { 2297 struct ptp_clock_info *info = &pf->ptp.info; 2298 struct device *dev = ice_pf_to_dev(pf); 2299 2300 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2301 dev_driver_string(dev), dev_name(dev)); 2302 info->owner = THIS_MODULE; 2303 info->max_adj = 100000000; 2304 info->adjtime = ice_ptp_adjtime; 2305 info->adjfine = ice_ptp_adjfine; 2306 info->gettimex64 = ice_ptp_gettimex64; 2307 info->settime64 = ice_ptp_settime64; 2308 2309 if (ice_is_e810(&pf->hw)) 2310 ice_ptp_set_funcs_e810(pf, info); 2311 else if (ice_is_e823(&pf->hw)) 2312 ice_ptp_set_funcs_e823(pf, info); 2313 else 2314 ice_ptp_set_funcs_e82x(pf, info); 2315 } 2316 2317 /** 2318 * ice_ptp_create_clock - Create PTP clock device for userspace 2319 * @pf: Board private structure 2320 * 2321 * This function creates a new PTP clock device. It only creates one if we 2322 * don't already have one. Will return error if it can't create one, but success 2323 * if we already have a device. Should be used by ice_ptp_init to create clock 2324 * initially, and prevent global resets from creating new clock devices. 2325 */ 2326 static long ice_ptp_create_clock(struct ice_pf *pf) 2327 { 2328 struct ptp_clock_info *info; 2329 struct device *dev; 2330 2331 /* No need to create a clock device if we already have one */ 2332 if (pf->ptp.clock) 2333 return 0; 2334 2335 ice_ptp_set_caps(pf); 2336 2337 info = &pf->ptp.info; 2338 dev = ice_pf_to_dev(pf); 2339 2340 /* Attempt to register the clock before enabling the hardware. */ 2341 pf->ptp.clock = ptp_clock_register(info, dev); 2342 if (IS_ERR(pf->ptp.clock)) { 2343 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); 2344 return PTR_ERR(pf->ptp.clock); 2345 } 2346 2347 return 0; 2348 } 2349 2350 /** 2351 * ice_ptp_request_ts - Request an available Tx timestamp index 2352 * @tx: the PTP Tx timestamp tracker to request from 2353 * @skb: the SKB to associate with this timestamp request 2354 */ 2355 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2356 { 2357 u8 idx; 2358 2359 spin_lock(&tx->lock); 2360 2361 /* Check that this tracker is accepting new timestamp requests */ 2362 if (!ice_ptp_is_tx_tracker_up(tx)) { 2363 spin_unlock(&tx->lock); 2364 return -1; 2365 } 2366 2367 /* Find and set the first available index */ 2368 idx = find_first_zero_bit(tx->in_use, tx->len); 2369 if (idx < tx->len) { 2370 /* We got a valid index that no other thread could have set. Store 2371 * a reference to the skb and the start time to allow discarding old 2372 * requests. 2373 */ 2374 set_bit(idx, tx->in_use); 2375 clear_bit(idx, tx->stale); 2376 tx->tstamps[idx].start = jiffies; 2377 tx->tstamps[idx].skb = skb_get(skb); 2378 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2379 ice_trace(tx_tstamp_request, skb, idx); 2380 } 2381 2382 spin_unlock(&tx->lock); 2383 2384 /* return the appropriate PHY timestamp register index, -1 if no 2385 * indexes were available. 2386 */ 2387 if (idx >= tx->len) 2388 return -1; 2389 else 2390 return idx + tx->offset; 2391 } 2392 2393 /** 2394 * ice_ptp_process_ts - Process the PTP Tx timestamps 2395 * @pf: Board private structure 2396 * 2397 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2398 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2399 */ 2400 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2401 { 2402 switch (pf->ptp.tx_interrupt_mode) { 2403 case ICE_PTP_TX_INTERRUPT_NONE: 2404 /* This device has the clock owner handle timestamps for it */ 2405 return ICE_TX_TSTAMP_WORK_DONE; 2406 case ICE_PTP_TX_INTERRUPT_SELF: 2407 /* This device handles its own timestamps */ 2408 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2409 case ICE_PTP_TX_INTERRUPT_ALL: 2410 /* This device handles timestamps for all ports */ 2411 return ice_ptp_tx_tstamp_owner(pf); 2412 default: 2413 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2414 pf->ptp.tx_interrupt_mode); 2415 return ICE_TX_TSTAMP_WORK_DONE; 2416 } 2417 } 2418 2419 static void ice_ptp_periodic_work(struct kthread_work *work) 2420 { 2421 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2422 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2423 int err; 2424 2425 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2426 return; 2427 2428 err = ice_ptp_update_cached_phctime(pf); 2429 2430 /* Run twice a second or reschedule if phc update failed */ 2431 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2432 msecs_to_jiffies(err ? 10 : 500)); 2433 } 2434 2435 /** 2436 * ice_ptp_reset - Initialize PTP hardware clock support after reset 2437 * @pf: Board private structure 2438 */ 2439 void ice_ptp_reset(struct ice_pf *pf) 2440 { 2441 struct ice_ptp *ptp = &pf->ptp; 2442 struct ice_hw *hw = &pf->hw; 2443 struct timespec64 ts; 2444 int err, itr = 1; 2445 u64 time_diff; 2446 2447 if (test_bit(ICE_PFR_REQ, pf->state)) 2448 goto pfr; 2449 2450 if (!ice_pf_src_tmr_owned(pf)) 2451 goto reset_ts; 2452 2453 err = ice_ptp_init_phc(hw); 2454 if (err) 2455 goto err; 2456 2457 /* Acquire the global hardware lock */ 2458 if (!ice_ptp_lock(hw)) { 2459 err = -EBUSY; 2460 goto err; 2461 } 2462 2463 /* Write the increment time value to PHY and LAN */ 2464 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2465 if (err) { 2466 ice_ptp_unlock(hw); 2467 goto err; 2468 } 2469 2470 /* Write the initial Time value to PHY and LAN using the cached PHC 2471 * time before the reset and time difference between stopping and 2472 * starting the clock. 2473 */ 2474 if (ptp->cached_phc_time) { 2475 time_diff = ktime_get_real_ns() - ptp->reset_time; 2476 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2477 } else { 2478 ts = ktime_to_timespec64(ktime_get_real()); 2479 } 2480 err = ice_ptp_write_init(pf, &ts); 2481 if (err) { 2482 ice_ptp_unlock(hw); 2483 goto err; 2484 } 2485 2486 /* Release the global hardware lock */ 2487 ice_ptp_unlock(hw); 2488 2489 if (!ice_is_e810(hw)) { 2490 /* Enable quad interrupts */ 2491 err = ice_ptp_tx_ena_intr(pf, true, itr); 2492 if (err) 2493 goto err; 2494 } 2495 2496 reset_ts: 2497 /* Restart the PHY timestamping block */ 2498 ice_ptp_reset_phy_timestamping(pf); 2499 2500 pfr: 2501 /* Init Tx structures */ 2502 if (ice_is_e810(&pf->hw)) { 2503 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); 2504 } else { 2505 kthread_init_delayed_work(&ptp->port.ov_work, 2506 ice_ptp_wait_for_offsets); 2507 err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, 2508 ptp->port.port_num); 2509 } 2510 if (err) 2511 goto err; 2512 2513 set_bit(ICE_FLAG_PTP, pf->flags); 2514 2515 /* Start periodic work going */ 2516 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2517 2518 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2519 return; 2520 2521 err: 2522 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2523 } 2524 2525 /** 2526 * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device 2527 * @aux_dev: auxiliary device to get the auxiliary PF for 2528 */ 2529 static struct ice_pf * 2530 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev) 2531 { 2532 struct ice_ptp_port *aux_port; 2533 struct ice_ptp *aux_ptp; 2534 2535 aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev); 2536 aux_ptp = container_of(aux_port, struct ice_ptp, port); 2537 2538 return container_of(aux_ptp, struct ice_pf, ptp); 2539 } 2540 2541 /** 2542 * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device 2543 * @aux_dev: auxiliary device to get the PF for 2544 */ 2545 static struct ice_pf * 2546 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev) 2547 { 2548 struct ice_ptp_port_owner *ports_owner; 2549 struct auxiliary_driver *aux_drv; 2550 struct ice_ptp *owner_ptp; 2551 2552 if (!aux_dev->dev.driver) 2553 return NULL; 2554 2555 aux_drv = to_auxiliary_drv(aux_dev->dev.driver); 2556 ports_owner = container_of(aux_drv, struct ice_ptp_port_owner, 2557 aux_driver); 2558 owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner); 2559 return container_of(owner_ptp, struct ice_pf, ptp); 2560 } 2561 2562 /** 2563 * ice_ptp_auxbus_probe - Probe auxiliary devices 2564 * @aux_dev: PF's auxiliary device 2565 * @id: Auxiliary device ID 2566 */ 2567 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev, 2568 const struct auxiliary_device_id *id) 2569 { 2570 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2571 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); 2572 2573 if (WARN_ON(!owner_pf)) 2574 return -ENODEV; 2575 2576 INIT_LIST_HEAD(&aux_pf->ptp.port.list_member); 2577 mutex_lock(&owner_pf->ptp.ports_owner.lock); 2578 list_add(&aux_pf->ptp.port.list_member, 2579 &owner_pf->ptp.ports_owner.ports); 2580 mutex_unlock(&owner_pf->ptp.ports_owner.lock); 2581 2582 return 0; 2583 } 2584 2585 /** 2586 * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus 2587 * @aux_dev: PF's auxiliary device 2588 */ 2589 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev) 2590 { 2591 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2592 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); 2593 2594 mutex_lock(&owner_pf->ptp.ports_owner.lock); 2595 list_del(&aux_pf->ptp.port.list_member); 2596 mutex_unlock(&owner_pf->ptp.ports_owner.lock); 2597 } 2598 2599 /** 2600 * ice_ptp_auxbus_shutdown 2601 * @aux_dev: PF's auxiliary device 2602 */ 2603 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev) 2604 { 2605 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2606 } 2607 2608 /** 2609 * ice_ptp_auxbus_suspend 2610 * @aux_dev: PF's auxiliary device 2611 * @state: power management state indicator 2612 */ 2613 static int 2614 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state) 2615 { 2616 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2617 return 0; 2618 } 2619 2620 /** 2621 * ice_ptp_auxbus_resume 2622 * @aux_dev: PF's auxiliary device 2623 */ 2624 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev) 2625 { 2626 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2627 return 0; 2628 } 2629 2630 /** 2631 * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table 2632 * @pf: Board private structure 2633 * @name: auxiliary bus driver name 2634 */ 2635 static struct auxiliary_device_id * 2636 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name) 2637 { 2638 struct auxiliary_device_id *ids; 2639 2640 /* Second id left empty to terminate the array */ 2641 ids = devm_kcalloc(ice_pf_to_dev(pf), 2, 2642 sizeof(struct auxiliary_device_id), GFP_KERNEL); 2643 if (!ids) 2644 return NULL; 2645 2646 snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name); 2647 2648 return ids; 2649 } 2650 2651 /** 2652 * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver 2653 * @pf: Board private structure 2654 */ 2655 static int ice_ptp_register_auxbus_driver(struct ice_pf *pf) 2656 { 2657 struct auxiliary_driver *aux_driver; 2658 struct ice_ptp *ptp; 2659 struct device *dev; 2660 char *name; 2661 int err; 2662 2663 ptp = &pf->ptp; 2664 dev = ice_pf_to_dev(pf); 2665 aux_driver = &ptp->ports_owner.aux_driver; 2666 INIT_LIST_HEAD(&ptp->ports_owner.ports); 2667 mutex_init(&ptp->ports_owner.lock); 2668 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", 2669 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), 2670 ice_get_ptp_src_clock_index(&pf->hw)); 2671 2672 aux_driver->name = name; 2673 aux_driver->shutdown = ice_ptp_auxbus_shutdown; 2674 aux_driver->suspend = ice_ptp_auxbus_suspend; 2675 aux_driver->remove = ice_ptp_auxbus_remove; 2676 aux_driver->resume = ice_ptp_auxbus_resume; 2677 aux_driver->probe = ice_ptp_auxbus_probe; 2678 aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name); 2679 if (!aux_driver->id_table) 2680 return -ENOMEM; 2681 2682 err = auxiliary_driver_register(aux_driver); 2683 if (err) { 2684 devm_kfree(dev, aux_driver->id_table); 2685 dev_err(dev, "Failed registering aux_driver, name <%s>\n", 2686 name); 2687 } 2688 2689 return err; 2690 } 2691 2692 /** 2693 * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver 2694 * @pf: Board private structure 2695 */ 2696 static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) 2697 { 2698 struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver; 2699 2700 auxiliary_driver_unregister(aux_driver); 2701 devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table); 2702 2703 mutex_destroy(&pf->ptp.ports_owner.lock); 2704 } 2705 2706 /** 2707 * ice_ptp_clock_index - Get the PTP clock index for this device 2708 * @pf: Board private structure 2709 * 2710 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock 2711 * is associated. 2712 */ 2713 int ice_ptp_clock_index(struct ice_pf *pf) 2714 { 2715 struct auxiliary_device *aux_dev; 2716 struct ice_pf *owner_pf; 2717 struct ptp_clock *clock; 2718 2719 aux_dev = &pf->ptp.port.aux_dev; 2720 owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2721 if (!owner_pf) 2722 return -1; 2723 clock = owner_pf->ptp.clock; 2724 2725 return clock ? ptp_clock_index(clock) : -1; 2726 } 2727 2728 /** 2729 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2730 * @pf: Board private structure 2731 */ 2732 void ice_ptp_prepare_for_reset(struct ice_pf *pf) 2733 { 2734 struct ice_ptp *ptp = &pf->ptp; 2735 u8 src_tmr; 2736 2737 clear_bit(ICE_FLAG_PTP, pf->flags); 2738 2739 /* Disable timestamping for both Tx and Rx */ 2740 ice_ptp_cfg_timestamp(pf, false); 2741 2742 kthread_cancel_delayed_work_sync(&ptp->work); 2743 2744 if (test_bit(ICE_PFR_REQ, pf->state)) 2745 return; 2746 2747 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2748 2749 /* Disable periodic outputs */ 2750 ice_ptp_disable_all_clkout(pf); 2751 2752 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2753 2754 /* Disable source clock */ 2755 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2756 2757 /* Acquire PHC and system timer to restore after reset */ 2758 ptp->reset_time = ktime_get_real_ns(); 2759 } 2760 2761 /** 2762 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 2763 * @pf: Board private structure 2764 * 2765 * Setup and initialize a PTP clock device that represents the device hardware 2766 * clock. Save the clock index for other functions connected to the same 2767 * hardware resource. 2768 */ 2769 static int ice_ptp_init_owner(struct ice_pf *pf) 2770 { 2771 struct ice_hw *hw = &pf->hw; 2772 struct timespec64 ts; 2773 int err, itr = 1; 2774 2775 err = ice_ptp_init_phc(hw); 2776 if (err) { 2777 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 2778 err); 2779 return err; 2780 } 2781 2782 /* Acquire the global hardware lock */ 2783 if (!ice_ptp_lock(hw)) { 2784 err = -EBUSY; 2785 goto err_exit; 2786 } 2787 2788 /* Write the increment time value to PHY and LAN */ 2789 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2790 if (err) { 2791 ice_ptp_unlock(hw); 2792 goto err_exit; 2793 } 2794 2795 ts = ktime_to_timespec64(ktime_get_real()); 2796 /* Write the initial Time value to PHY and LAN */ 2797 err = ice_ptp_write_init(pf, &ts); 2798 if (err) { 2799 ice_ptp_unlock(hw); 2800 goto err_exit; 2801 } 2802 2803 /* Release the global hardware lock */ 2804 ice_ptp_unlock(hw); 2805 2806 if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) { 2807 /* The clock owner for this device type handles the timestamp 2808 * interrupt for all ports. 2809 */ 2810 ice_ptp_configure_tx_tstamp(pf, true); 2811 2812 /* React on all quads interrupts for E82x */ 2813 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); 2814 2815 /* Enable quad interrupts */ 2816 err = ice_ptp_tx_ena_intr(pf, true, itr); 2817 if (err) 2818 goto err_exit; 2819 } 2820 2821 /* Ensure we have a clock device */ 2822 err = ice_ptp_create_clock(pf); 2823 if (err) 2824 goto err_clk; 2825 2826 err = ice_ptp_register_auxbus_driver(pf); 2827 if (err) { 2828 dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver"); 2829 goto err_aux; 2830 } 2831 2832 return 0; 2833 err_aux: 2834 ptp_clock_unregister(pf->ptp.clock); 2835 err_clk: 2836 pf->ptp.clock = NULL; 2837 err_exit: 2838 return err; 2839 } 2840 2841 /** 2842 * ice_ptp_init_work - Initialize PTP work threads 2843 * @pf: Board private structure 2844 * @ptp: PF PTP structure 2845 */ 2846 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 2847 { 2848 struct kthread_worker *kworker; 2849 2850 /* Initialize work functions */ 2851 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 2852 2853 /* Allocate a kworker for handling work required for the ports 2854 * connected to the PTP hardware clock. 2855 */ 2856 kworker = kthread_create_worker(0, "ice-ptp-%s", 2857 dev_name(ice_pf_to_dev(pf))); 2858 if (IS_ERR(kworker)) 2859 return PTR_ERR(kworker); 2860 2861 ptp->kworker = kworker; 2862 2863 /* Start periodic work going */ 2864 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2865 2866 return 0; 2867 } 2868 2869 /** 2870 * ice_ptp_init_port - Initialize PTP port structure 2871 * @pf: Board private structure 2872 * @ptp_port: PTP port structure 2873 */ 2874 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 2875 { 2876 struct ice_hw *hw = &pf->hw; 2877 2878 mutex_init(&ptp_port->ps_lock); 2879 2880 switch (hw->phy_model) { 2881 case ICE_PHY_E810: 2882 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 2883 case ICE_PHY_E822: 2884 /* Non-owner PFs don't react to any interrupts on E82x, 2885 * neither on own quad nor on others 2886 */ 2887 if (!ice_ptp_pf_handles_tx_interrupt(pf)) { 2888 ice_ptp_configure_tx_tstamp(pf, false); 2889 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); 2890 } 2891 kthread_init_delayed_work(&ptp_port->ov_work, 2892 ice_ptp_wait_for_offsets); 2893 2894 return ice_ptp_init_tx_e822(pf, &ptp_port->tx, 2895 ptp_port->port_num); 2896 default: 2897 return -ENODEV; 2898 } 2899 } 2900 2901 /** 2902 * ice_ptp_release_auxbus_device 2903 * @dev: device that utilizes the auxbus 2904 */ 2905 static void ice_ptp_release_auxbus_device(struct device *dev) 2906 { 2907 /* Doing nothing here, but handle to auxbux device must be satisfied */ 2908 } 2909 2910 /** 2911 * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device 2912 * @pf: Board private structure 2913 */ 2914 static int ice_ptp_create_auxbus_device(struct ice_pf *pf) 2915 { 2916 struct auxiliary_device *aux_dev; 2917 struct ice_ptp *ptp; 2918 struct device *dev; 2919 char *name; 2920 int err; 2921 u32 id; 2922 2923 ptp = &pf->ptp; 2924 id = ptp->port.port_num; 2925 dev = ice_pf_to_dev(pf); 2926 2927 aux_dev = &ptp->port.aux_dev; 2928 2929 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", 2930 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), 2931 ice_get_ptp_src_clock_index(&pf->hw)); 2932 2933 aux_dev->name = name; 2934 aux_dev->id = id; 2935 aux_dev->dev.release = ice_ptp_release_auxbus_device; 2936 aux_dev->dev.parent = dev; 2937 2938 err = auxiliary_device_init(aux_dev); 2939 if (err) 2940 goto aux_err; 2941 2942 err = auxiliary_device_add(aux_dev); 2943 if (err) { 2944 auxiliary_device_uninit(aux_dev); 2945 goto aux_err; 2946 } 2947 2948 return 0; 2949 aux_err: 2950 dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name); 2951 devm_kfree(dev, name); 2952 return err; 2953 } 2954 2955 /** 2956 * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device 2957 * @pf: Board private structure 2958 */ 2959 static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) 2960 { 2961 struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev; 2962 2963 auxiliary_device_delete(aux_dev); 2964 auxiliary_device_uninit(aux_dev); 2965 2966 memset(aux_dev, 0, sizeof(*aux_dev)); 2967 } 2968 2969 /** 2970 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode 2971 * @pf: Board private structure 2972 * 2973 * Initialize the Tx timestamp interrupt mode for this device. For most device 2974 * types, each PF processes the interrupt and manages its own timestamps. For 2975 * E822-based devices, only the clock owner processes the timestamps. Other 2976 * PFs disable the interrupt and do not process their own timestamps. 2977 */ 2978 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) 2979 { 2980 switch (pf->hw.phy_model) { 2981 case ICE_PHY_E822: 2982 /* E822 based PHY has the clock owner process the interrupt 2983 * for all ports. 2984 */ 2985 if (ice_pf_src_tmr_owned(pf)) 2986 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; 2987 else 2988 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; 2989 break; 2990 default: 2991 /* other PHY types handle their own Tx interrupt */ 2992 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; 2993 } 2994 } 2995 2996 /** 2997 * ice_ptp_init - Initialize PTP hardware clock support 2998 * @pf: Board private structure 2999 * 3000 * Set up the device for interacting with the PTP hardware clock for all 3001 * functions, both the function that owns the clock hardware, and the 3002 * functions connected to the clock hardware. 3003 * 3004 * The clock owner will allocate and register a ptp_clock with the 3005 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 3006 * items used for asynchronous work such as Tx timestamps and periodic work. 3007 */ 3008 void ice_ptp_init(struct ice_pf *pf) 3009 { 3010 struct ice_ptp *ptp = &pf->ptp; 3011 struct ice_hw *hw = &pf->hw; 3012 int err; 3013 3014 ice_ptp_init_phy_model(hw); 3015 3016 ice_ptp_init_tx_interrupt_mode(pf); 3017 3018 /* If this function owns the clock hardware, it must allocate and 3019 * configure the PTP clock device to represent it. 3020 */ 3021 if (ice_pf_src_tmr_owned(pf)) { 3022 err = ice_ptp_init_owner(pf); 3023 if (err) 3024 goto err; 3025 } 3026 3027 ptp->port.port_num = hw->pf_id; 3028 err = ice_ptp_init_port(pf, &ptp->port); 3029 if (err) 3030 goto err; 3031 3032 /* Start the PHY timestamping block */ 3033 ice_ptp_reset_phy_timestamping(pf); 3034 3035 set_bit(ICE_FLAG_PTP, pf->flags); 3036 err = ice_ptp_init_work(pf, ptp); 3037 if (err) 3038 goto err; 3039 3040 err = ice_ptp_create_auxbus_device(pf); 3041 if (err) 3042 goto err; 3043 3044 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 3045 return; 3046 3047 err: 3048 /* If we registered a PTP clock, release it */ 3049 if (pf->ptp.clock) { 3050 ptp_clock_unregister(ptp->clock); 3051 pf->ptp.clock = NULL; 3052 } 3053 clear_bit(ICE_FLAG_PTP, pf->flags); 3054 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 3055 } 3056 3057 /** 3058 * ice_ptp_release - Disable the driver/HW support and unregister the clock 3059 * @pf: Board private structure 3060 * 3061 * This function handles the cleanup work required from the initialization by 3062 * clearing out the important information and unregistering the clock 3063 */ 3064 void ice_ptp_release(struct ice_pf *pf) 3065 { 3066 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 3067 return; 3068 3069 /* Disable timestamping for both Tx and Rx */ 3070 ice_ptp_cfg_timestamp(pf, false); 3071 3072 ice_ptp_remove_auxbus_device(pf); 3073 3074 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3075 3076 clear_bit(ICE_FLAG_PTP, pf->flags); 3077 3078 kthread_cancel_delayed_work_sync(&pf->ptp.work); 3079 3080 ice_ptp_port_phy_stop(&pf->ptp.port); 3081 mutex_destroy(&pf->ptp.port.ps_lock); 3082 if (pf->ptp.kworker) { 3083 kthread_destroy_worker(pf->ptp.kworker); 3084 pf->ptp.kworker = NULL; 3085 } 3086 3087 if (!pf->ptp.clock) 3088 return; 3089 3090 /* Disable periodic outputs */ 3091 ice_ptp_disable_all_clkout(pf); 3092 3093 ptp_clock_unregister(pf->ptp.clock); 3094 pf->ptp.clock = NULL; 3095 3096 ice_ptp_unregister_auxbus_driver(pf); 3097 3098 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 3099 } 3100