1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 8 #define E810_OUT_PROP_DELAY_NS 1 9 10 #define UNKNOWN_INCVAL_E822 0x100000000ULL 11 12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = { 13 /* name idx func chan */ 14 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, 15 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, 16 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, 17 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, 18 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, 19 }; 20 21 /** 22 * ice_get_sma_config_e810t 23 * @hw: pointer to the hw struct 24 * @ptp_pins: pointer to the ptp_pin_desc struture 25 * 26 * Read the configuration of the SMA control logic and put it into the 27 * ptp_pin_desc structure 28 */ 29 static int 30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) 31 { 32 u8 data, i; 33 int status; 34 35 /* Read initial pin state */ 36 status = ice_read_sma_ctrl_e810t(hw, &data); 37 if (status) 38 return status; 39 40 /* initialize with defaults */ 41 for (i = 0; i < NUM_PTP_PINS_E810T; i++) { 42 strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name, 43 sizeof(ptp_pins[i].name)); 44 ptp_pins[i].index = ice_pin_desc_e810t[i].index; 45 ptp_pins[i].func = ice_pin_desc_e810t[i].func; 46 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; 47 } 48 49 /* Parse SMA1/UFL1 */ 50 switch (data & ICE_SMA1_MASK_E810T) { 51 case ICE_SMA1_MASK_E810T: 52 default: 53 ptp_pins[SMA1].func = PTP_PF_NONE; 54 ptp_pins[UFL1].func = PTP_PF_NONE; 55 break; 56 case ICE_SMA1_DIR_EN_E810T: 57 ptp_pins[SMA1].func = PTP_PF_PEROUT; 58 ptp_pins[UFL1].func = PTP_PF_NONE; 59 break; 60 case ICE_SMA1_TX_EN_E810T: 61 ptp_pins[SMA1].func = PTP_PF_EXTTS; 62 ptp_pins[UFL1].func = PTP_PF_NONE; 63 break; 64 case 0: 65 ptp_pins[SMA1].func = PTP_PF_EXTTS; 66 ptp_pins[UFL1].func = PTP_PF_PEROUT; 67 break; 68 } 69 70 /* Parse SMA2/UFL2 */ 71 switch (data & ICE_SMA2_MASK_E810T) { 72 case ICE_SMA2_MASK_E810T: 73 default: 74 ptp_pins[SMA2].func = PTP_PF_NONE; 75 ptp_pins[UFL2].func = PTP_PF_NONE; 76 break; 77 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 78 ptp_pins[SMA2].func = PTP_PF_EXTTS; 79 ptp_pins[UFL2].func = PTP_PF_NONE; 80 break; 81 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 82 ptp_pins[SMA2].func = PTP_PF_PEROUT; 83 ptp_pins[UFL2].func = PTP_PF_NONE; 84 break; 85 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): 86 ptp_pins[SMA2].func = PTP_PF_NONE; 87 ptp_pins[UFL2].func = PTP_PF_EXTTS; 88 break; 89 case ICE_SMA2_DIR_EN_E810T: 90 ptp_pins[SMA2].func = PTP_PF_PEROUT; 91 ptp_pins[UFL2].func = PTP_PF_EXTTS; 92 break; 93 } 94 95 return 0; 96 } 97 98 /** 99 * ice_ptp_set_sma_config_e810t 100 * @hw: pointer to the hw struct 101 * @ptp_pins: pointer to the ptp_pin_desc struture 102 * 103 * Set the configuration of the SMA control logic based on the configuration in 104 * num_pins parameter 105 */ 106 static int 107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw, 108 const struct ptp_pin_desc *ptp_pins) 109 { 110 int status; 111 u8 data; 112 113 /* SMA1 and UFL1 cannot be set to TX at the same time */ 114 if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 115 ptp_pins[UFL1].func == PTP_PF_PEROUT) 116 return -EINVAL; 117 118 /* SMA2 and UFL2 cannot be set to RX at the same time */ 119 if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 120 ptp_pins[UFL2].func == PTP_PF_EXTTS) 121 return -EINVAL; 122 123 /* Read initial pin state value */ 124 status = ice_read_sma_ctrl_e810t(hw, &data); 125 if (status) 126 return status; 127 128 /* Set the right sate based on the desired configuration */ 129 data &= ~ICE_SMA1_MASK_E810T; 130 if (ptp_pins[SMA1].func == PTP_PF_NONE && 131 ptp_pins[UFL1].func == PTP_PF_NONE) { 132 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); 133 data |= ICE_SMA1_MASK_E810T; 134 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 135 ptp_pins[UFL1].func == PTP_PF_NONE) { 136 dev_info(ice_hw_to_dev(hw), "SMA1 RX"); 137 data |= ICE_SMA1_TX_EN_E810T; 138 } else if (ptp_pins[SMA1].func == PTP_PF_NONE && 139 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 140 /* U.FL 1 TX will always enable SMA 1 RX */ 141 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 142 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 143 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 144 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 145 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 146 ptp_pins[UFL1].func == PTP_PF_NONE) { 147 dev_info(ice_hw_to_dev(hw), "SMA1 TX"); 148 data |= ICE_SMA1_DIR_EN_E810T; 149 } 150 151 data &= ~ICE_SMA2_MASK_E810T; 152 if (ptp_pins[SMA2].func == PTP_PF_NONE && 153 ptp_pins[UFL2].func == PTP_PF_NONE) { 154 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); 155 data |= ICE_SMA2_MASK_E810T; 156 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 157 ptp_pins[UFL2].func == PTP_PF_NONE) { 158 dev_info(ice_hw_to_dev(hw), "SMA2 RX"); 159 data |= (ICE_SMA2_TX_EN_E810T | 160 ICE_SMA2_UFL2_RX_DIS_E810T); 161 } else if (ptp_pins[SMA2].func == PTP_PF_NONE && 162 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 163 dev_info(ice_hw_to_dev(hw), "UFL2 RX"); 164 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); 165 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 166 ptp_pins[UFL2].func == PTP_PF_NONE) { 167 dev_info(ice_hw_to_dev(hw), "SMA2 TX"); 168 data |= (ICE_SMA2_DIR_EN_E810T | 169 ICE_SMA2_UFL2_RX_DIS_E810T); 170 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 171 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 172 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); 173 data |= ICE_SMA2_DIR_EN_E810T; 174 } 175 176 return ice_write_sma_ctrl_e810t(hw, data); 177 } 178 179 /** 180 * ice_ptp_set_sma_e810t 181 * @info: the driver's PTP info structure 182 * @pin: pin index in kernel structure 183 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) 184 * 185 * Set the configuration of a single SMA pin 186 */ 187 static int 188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, 189 enum ptp_pin_function func) 190 { 191 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; 192 struct ice_pf *pf = ptp_info_to_pf(info); 193 struct ice_hw *hw = &pf->hw; 194 int err; 195 196 if (pin < SMA1 || func > PTP_PF_PEROUT) 197 return -EOPNOTSUPP; 198 199 err = ice_get_sma_config_e810t(hw, ptp_pins); 200 if (err) 201 return err; 202 203 /* Disable the same function on the other pin sharing the channel */ 204 if (pin == SMA1 && ptp_pins[UFL1].func == func) 205 ptp_pins[UFL1].func = PTP_PF_NONE; 206 if (pin == UFL1 && ptp_pins[SMA1].func == func) 207 ptp_pins[SMA1].func = PTP_PF_NONE; 208 209 if (pin == SMA2 && ptp_pins[UFL2].func == func) 210 ptp_pins[UFL2].func = PTP_PF_NONE; 211 if (pin == UFL2 && ptp_pins[SMA2].func == func) 212 ptp_pins[SMA2].func = PTP_PF_NONE; 213 214 /* Set up new pin function in the temp table */ 215 ptp_pins[pin].func = func; 216 217 return ice_ptp_set_sma_config_e810t(hw, ptp_pins); 218 } 219 220 /** 221 * ice_verify_pin_e810t 222 * @info: the driver's PTP info structure 223 * @pin: Pin index 224 * @func: Assigned function 225 * @chan: Assigned channel 226 * 227 * Verify if pin supports requested pin function. If the Check pins consistency. 228 * Reconfigure the SMA logic attached to the given pin to enable its 229 * desired functionality 230 */ 231 static int 232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, 233 enum ptp_pin_function func, unsigned int chan) 234 { 235 /* Don't allow channel reassignment */ 236 if (chan != ice_pin_desc_e810t[pin].chan) 237 return -EOPNOTSUPP; 238 239 /* Check if functions are properly assigned */ 240 switch (func) { 241 case PTP_PF_NONE: 242 break; 243 case PTP_PF_EXTTS: 244 if (pin == UFL1) 245 return -EOPNOTSUPP; 246 break; 247 case PTP_PF_PEROUT: 248 if (pin == UFL2 || pin == GNSS) 249 return -EOPNOTSUPP; 250 break; 251 case PTP_PF_PHYSYNC: 252 return -EOPNOTSUPP; 253 } 254 255 return ice_ptp_set_sma_e810t(info, pin, func); 256 } 257 258 /** 259 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device 260 * @pf: Board private structure 261 * 262 * Program the device to respond appropriately to the Tx timestamp interrupt 263 * cause. 264 */ 265 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) 266 { 267 struct ice_hw *hw = &pf->hw; 268 bool enable; 269 u32 val; 270 271 switch (pf->ptp.tx_interrupt_mode) { 272 case ICE_PTP_TX_INTERRUPT_ALL: 273 /* React to interrupts across all quads. */ 274 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); 275 enable = true; 276 break; 277 case ICE_PTP_TX_INTERRUPT_NONE: 278 /* Do not react to interrupts on any quad. */ 279 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); 280 enable = false; 281 break; 282 case ICE_PTP_TX_INTERRUPT_SELF: 283 default: 284 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; 285 break; 286 } 287 288 /* Configure the Tx timestamp interrupt */ 289 val = rd32(hw, PFINT_OICR_ENA); 290 if (enable) 291 val |= PFINT_OICR_TSYN_TX_M; 292 else 293 val &= ~PFINT_OICR_TSYN_TX_M; 294 wr32(hw, PFINT_OICR_ENA, val); 295 } 296 297 /** 298 * ice_set_rx_tstamp - Enable or disable Rx timestamping 299 * @pf: The PF pointer to search in 300 * @on: bool value for whether timestamps are enabled or disabled 301 */ 302 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 303 { 304 struct ice_vsi *vsi; 305 u16 i; 306 307 vsi = ice_get_main_vsi(pf); 308 if (!vsi || !vsi->rx_rings) 309 return; 310 311 /* Set the timestamp flag for all the Rx rings */ 312 ice_for_each_rxq(vsi, i) { 313 if (!vsi->rx_rings[i]) 314 continue; 315 vsi->rx_rings[i]->ptp_rx = on; 316 } 317 } 318 319 /** 320 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode 321 * @pf: Board private structure 322 * 323 * Called during preparation for reset to temporarily disable timestamping on 324 * the device. Called during remove to disable timestamping while cleaning up 325 * driver resources. 326 */ 327 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) 328 { 329 struct ice_hw *hw = &pf->hw; 330 u32 val; 331 332 val = rd32(hw, PFINT_OICR_ENA); 333 val &= ~PFINT_OICR_TSYN_TX_M; 334 wr32(hw, PFINT_OICR_ENA, val); 335 336 ice_set_rx_tstamp(pf, false); 337 } 338 339 /** 340 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration 341 * @pf: Board private structure 342 * 343 * Called at the end of rebuild to restore timestamp configuration after 344 * a device reset. 345 */ 346 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) 347 { 348 struct ice_hw *hw = &pf->hw; 349 bool enable_rx; 350 351 ice_ptp_cfg_tx_interrupt(pf); 352 353 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; 354 ice_set_rx_tstamp(pf, enable_rx); 355 356 /* Trigger an immediate software interrupt to ensure that timestamps 357 * which occurred during reset are handled now. 358 */ 359 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 360 ice_flush(hw); 361 } 362 363 /** 364 * ice_ptp_read_src_clk_reg - Read the source clock register 365 * @pf: Board private structure 366 * @sts: Optional parameter for holding a pair of system timestamps from 367 * the system clock. Will be ignored if NULL is given. 368 */ 369 static u64 370 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 371 { 372 struct ice_hw *hw = &pf->hw; 373 u32 hi, lo, lo2; 374 u8 tmr_idx; 375 376 tmr_idx = ice_get_ptp_src_clock_index(hw); 377 /* Read the system timestamp pre PHC read */ 378 ptp_read_system_prets(sts); 379 380 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 381 382 /* Read the system timestamp post PHC read */ 383 ptp_read_system_postts(sts); 384 385 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 386 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 387 388 if (lo2 < lo) { 389 /* if TIME_L rolled over read TIME_L again and update 390 * system timestamps 391 */ 392 ptp_read_system_prets(sts); 393 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 394 ptp_read_system_postts(sts); 395 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 396 } 397 398 return ((u64)hi << 32) | lo; 399 } 400 401 /** 402 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 403 * @cached_phc_time: recently cached copy of PHC time 404 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 405 * 406 * Hardware captures timestamps which contain only 32 bits of nominal 407 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 408 * Note that the captured timestamp values may be 40 bits, but the lower 409 * 8 bits are sub-nanoseconds and generally discarded. 410 * 411 * Extend the 32bit nanosecond timestamp using the following algorithm and 412 * assumptions: 413 * 414 * 1) have a recently cached copy of the PHC time 415 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 416 * seconds) before or after the PHC time was captured. 417 * 3) calculate the delta between the cached time and the timestamp 418 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 419 * captured after the PHC time. In this case, the full timestamp is just 420 * the cached PHC time plus the delta. 421 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 422 * timestamp was captured *before* the PHC time, i.e. because the PHC 423 * cache was updated after the timestamp was captured by hardware. In this 424 * case, the full timestamp is the cached time minus the inverse delta. 425 * 426 * This algorithm works even if the PHC time was updated after a Tx timestamp 427 * was requested, but before the Tx timestamp event was reported from 428 * hardware. 429 * 430 * This calculation primarily relies on keeping the cached PHC time up to 431 * date. If the timestamp was captured more than 2^31 nanoseconds after the 432 * PHC time, it is possible that the lower 32bits of PHC time have 433 * overflowed more than once, and we might generate an incorrect timestamp. 434 * 435 * This is prevented by (a) periodically updating the cached PHC time once 436 * a second, and (b) discarding any Tx timestamp packet if it has waited for 437 * a timestamp for more than one second. 438 */ 439 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 440 { 441 u32 delta, phc_time_lo; 442 u64 ns; 443 444 /* Extract the lower 32 bits of the PHC time */ 445 phc_time_lo = (u32)cached_phc_time; 446 447 /* Calculate the delta between the lower 32bits of the cached PHC 448 * time and the in_tstamp value 449 */ 450 delta = (in_tstamp - phc_time_lo); 451 452 /* Do not assume that the in_tstamp is always more recent than the 453 * cached PHC time. If the delta is large, it indicates that the 454 * in_tstamp was taken in the past, and should be converted 455 * forward. 456 */ 457 if (delta > (U32_MAX / 2)) { 458 /* reverse the delta calculation here */ 459 delta = (phc_time_lo - in_tstamp); 460 ns = cached_phc_time - delta; 461 } else { 462 ns = cached_phc_time + delta; 463 } 464 465 return ns; 466 } 467 468 /** 469 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 470 * @pf: Board private structure 471 * @in_tstamp: Ingress/egress 40b timestamp value 472 * 473 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 474 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 475 * 476 * *--------------------------------------------------------------* 477 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 478 * *--------------------------------------------------------------* 479 * 480 * The low bit is an indicator of whether the timestamp is valid. The next 481 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 482 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 483 * 484 * It is assumed that the caller verifies the timestamp is valid prior to 485 * calling this function. 486 * 487 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 488 * time stored in the device private PTP structure as the basis for timestamp 489 * extension. 490 * 491 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 492 * algorithm. 493 */ 494 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 495 { 496 const u64 mask = GENMASK_ULL(31, 0); 497 unsigned long discard_time; 498 499 /* Discard the hardware timestamp if the cached PHC time is too old */ 500 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 501 if (time_is_before_jiffies(discard_time)) { 502 pf->ptp.tx_hwtstamp_discarded++; 503 return 0; 504 } 505 506 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 507 (in_tstamp >> 8) & mask); 508 } 509 510 /** 511 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 512 * @tx: the PTP Tx timestamp tracker to check 513 * 514 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 515 * to accept new timestamp requests. 516 * 517 * Assumes the tx->lock spinlock is already held. 518 */ 519 static bool 520 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 521 { 522 lockdep_assert_held(&tx->lock); 523 524 return tx->init && !tx->calibrating; 525 } 526 527 /** 528 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 529 * @tx: the PTP Tx timestamp tracker 530 * 531 * Process timestamps captured by the PHY associated with this port. To do 532 * this, loop over each index with a waiting skb. 533 * 534 * If a given index has a valid timestamp, perform the following steps: 535 * 536 * 1) check that the timestamp request is not stale 537 * 2) check that a timestamp is ready and available in the PHY memory bank 538 * 3) read and copy the timestamp out of the PHY register 539 * 4) unlock the index by clearing the associated in_use bit 540 * 5) check if the timestamp is stale, and discard if so 541 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 542 * 7) send this 64 bit timestamp to the stack 543 * 544 * Note that we do not hold the tracking lock while reading the Tx timestamp. 545 * This is because reading the timestamp requires taking a mutex that might 546 * sleep. 547 * 548 * The only place where we set in_use is when a new timestamp is initiated 549 * with a slot index. This is only called in the hard xmit routine where an 550 * SKB has a request flag set. The only places where we clear this bit is this 551 * function, or during teardown when the Tx timestamp tracker is being 552 * removed. A timestamp index will never be re-used until the in_use bit for 553 * that index is cleared. 554 * 555 * If a Tx thread starts a new timestamp, we might not begin processing it 556 * right away but we will notice it at the end when we re-queue the task. 557 * 558 * If a Tx thread starts a new timestamp just after this function exits, the 559 * interrupt for that timestamp should re-trigger this function once 560 * a timestamp is ready. 561 * 562 * In cases where the PTP hardware clock was directly adjusted, some 563 * timestamps may not be able to safely use the timestamp extension math. In 564 * this case, software will set the stale bit for any outstanding Tx 565 * timestamps when the clock is adjusted. Then this function will discard 566 * those captured timestamps instead of sending them to the stack. 567 * 568 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 569 * to correctly extend the timestamp using the cached PHC time. It is 570 * extremely unlikely that a packet will ever take this long to timestamp. If 571 * we detect a Tx timestamp request that has waited for this long we assume 572 * the packet will never be sent by hardware and discard it without reading 573 * the timestamp register. 574 */ 575 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 576 { 577 struct ice_ptp_port *ptp_port; 578 struct ice_pf *pf; 579 struct ice_hw *hw; 580 u64 tstamp_ready; 581 bool link_up; 582 int err; 583 u8 idx; 584 585 ptp_port = container_of(tx, struct ice_ptp_port, tx); 586 pf = ptp_port_to_pf(ptp_port); 587 hw = &pf->hw; 588 589 /* Read the Tx ready status first */ 590 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 591 if (err) 592 return; 593 594 /* Drop packets if the link went down */ 595 link_up = ptp_port->link_up; 596 597 for_each_set_bit(idx, tx->in_use, tx->len) { 598 struct skb_shared_hwtstamps shhwtstamps = {}; 599 u8 phy_idx = idx + tx->offset; 600 u64 raw_tstamp = 0, tstamp; 601 bool drop_ts = !link_up; 602 struct sk_buff *skb; 603 604 /* Drop packets which have waited for more than 2 seconds */ 605 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 606 drop_ts = true; 607 608 /* Count the number of Tx timestamps that timed out */ 609 pf->ptp.tx_hwtstamp_timeouts++; 610 } 611 612 /* Only read a timestamp from the PHY if its marked as ready 613 * by the tstamp_ready register. This avoids unnecessary 614 * reading of timestamps which are not yet valid. This is 615 * important as we must read all timestamps which are valid 616 * and only timestamps which are valid during each interrupt. 617 * If we do not, the hardware logic for generating a new 618 * interrupt can get stuck on some devices. 619 */ 620 if (!(tstamp_ready & BIT_ULL(phy_idx))) { 621 if (drop_ts) 622 goto skip_ts_read; 623 624 continue; 625 } 626 627 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 628 629 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 630 if (err && !drop_ts) 631 continue; 632 633 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 634 635 /* For PHYs which don't implement a proper timestamp ready 636 * bitmap, verify that the timestamp value is different 637 * from the last cached timestamp. If it is not, skip this for 638 * now assuming it hasn't yet been captured by hardware. 639 */ 640 if (!drop_ts && tx->verify_cached && 641 raw_tstamp == tx->tstamps[idx].cached_tstamp) 642 continue; 643 644 /* Discard any timestamp value without the valid bit set */ 645 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 646 drop_ts = true; 647 648 skip_ts_read: 649 spin_lock(&tx->lock); 650 if (tx->verify_cached && raw_tstamp) 651 tx->tstamps[idx].cached_tstamp = raw_tstamp; 652 clear_bit(idx, tx->in_use); 653 skb = tx->tstamps[idx].skb; 654 tx->tstamps[idx].skb = NULL; 655 if (test_and_clear_bit(idx, tx->stale)) 656 drop_ts = true; 657 spin_unlock(&tx->lock); 658 659 /* It is unlikely but possible that the SKB will have been 660 * flushed at this point due to link change or teardown. 661 */ 662 if (!skb) 663 continue; 664 665 if (drop_ts) { 666 dev_kfree_skb_any(skb); 667 continue; 668 } 669 670 /* Extend the timestamp using cached PHC time */ 671 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 672 if (tstamp) { 673 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 674 ice_trace(tx_tstamp_complete, skb, idx); 675 } 676 677 skb_tstamp_tx(skb, &shhwtstamps); 678 dev_kfree_skb_any(skb); 679 } 680 } 681 682 /** 683 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 684 * @pf: Board private structure 685 */ 686 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 687 { 688 struct ice_ptp_port *port; 689 unsigned int i; 690 691 mutex_lock(&pf->ptp.ports_owner.lock); 692 list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) { 693 struct ice_ptp_tx *tx = &port->tx; 694 695 if (!tx || !tx->init) 696 continue; 697 698 ice_ptp_process_tx_tstamp(tx); 699 } 700 mutex_unlock(&pf->ptp.ports_owner.lock); 701 702 for (i = 0; i < ICE_MAX_QUAD; i++) { 703 u64 tstamp_ready; 704 int err; 705 706 /* Read the Tx ready status first */ 707 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 708 if (err || tstamp_ready) 709 return ICE_TX_TSTAMP_WORK_PENDING; 710 } 711 712 return ICE_TX_TSTAMP_WORK_DONE; 713 } 714 715 /** 716 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 717 * @tx: Tx tracking structure to initialize 718 * 719 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 720 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 721 */ 722 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 723 { 724 bool more_timestamps; 725 726 if (!tx->init) 727 return ICE_TX_TSTAMP_WORK_DONE; 728 729 /* Process the Tx timestamp tracker */ 730 ice_ptp_process_tx_tstamp(tx); 731 732 /* Check if there are outstanding Tx timestamps */ 733 spin_lock(&tx->lock); 734 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 735 spin_unlock(&tx->lock); 736 737 if (more_timestamps) 738 return ICE_TX_TSTAMP_WORK_PENDING; 739 740 return ICE_TX_TSTAMP_WORK_DONE; 741 } 742 743 /** 744 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 745 * @tx: Tx tracking structure to initialize 746 * 747 * Assumes that the length has already been initialized. Do not call directly, 748 * use the ice_ptp_init_tx_* instead. 749 */ 750 static int 751 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 752 { 753 unsigned long *in_use, *stale; 754 struct ice_tx_tstamp *tstamps; 755 756 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 757 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 758 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 759 760 if (!tstamps || !in_use || !stale) { 761 kfree(tstamps); 762 bitmap_free(in_use); 763 bitmap_free(stale); 764 765 return -ENOMEM; 766 } 767 768 tx->tstamps = tstamps; 769 tx->in_use = in_use; 770 tx->stale = stale; 771 tx->init = 1; 772 773 spin_lock_init(&tx->lock); 774 775 return 0; 776 } 777 778 /** 779 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 780 * @pf: Board private structure 781 * @tx: the tracker to flush 782 * 783 * Called during teardown when a Tx tracker is being removed. 784 */ 785 static void 786 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 787 { 788 struct ice_hw *hw = &pf->hw; 789 u64 tstamp_ready; 790 int err; 791 u8 idx; 792 793 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 794 if (err) { 795 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 796 tx->block, err); 797 798 /* If we fail to read the Tx timestamp ready bitmap just 799 * skip clearing the PHY timestamps. 800 */ 801 tstamp_ready = 0; 802 } 803 804 for_each_set_bit(idx, tx->in_use, tx->len) { 805 u8 phy_idx = idx + tx->offset; 806 struct sk_buff *skb; 807 808 /* In case this timestamp is ready, we need to clear it. */ 809 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 810 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 811 812 spin_lock(&tx->lock); 813 skb = tx->tstamps[idx].skb; 814 tx->tstamps[idx].skb = NULL; 815 clear_bit(idx, tx->in_use); 816 clear_bit(idx, tx->stale); 817 spin_unlock(&tx->lock); 818 819 /* Count the number of Tx timestamps flushed */ 820 pf->ptp.tx_hwtstamp_flushed++; 821 822 /* Free the SKB after we've cleared the bit */ 823 dev_kfree_skb_any(skb); 824 } 825 } 826 827 /** 828 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 829 * @tx: the tracker to mark 830 * 831 * Mark currently outstanding Tx timestamps as stale. This prevents sending 832 * their timestamp value to the stack. This is required to prevent extending 833 * the 40bit hardware timestamp incorrectly. 834 * 835 * This should be called when the PTP clock is modified such as after a set 836 * time request. 837 */ 838 static void 839 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 840 { 841 spin_lock(&tx->lock); 842 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 843 spin_unlock(&tx->lock); 844 } 845 846 /** 847 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 848 * @pf: Board private structure 849 * @tx: Tx tracking structure to release 850 * 851 * Free memory associated with the Tx timestamp tracker. 852 */ 853 static void 854 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 855 { 856 spin_lock(&tx->lock); 857 tx->init = 0; 858 spin_unlock(&tx->lock); 859 860 /* wait for potentially outstanding interrupt to complete */ 861 synchronize_irq(pf->oicr_irq.virq); 862 863 ice_ptp_flush_tx_tracker(pf, tx); 864 865 kfree(tx->tstamps); 866 tx->tstamps = NULL; 867 868 bitmap_free(tx->in_use); 869 tx->in_use = NULL; 870 871 bitmap_free(tx->stale); 872 tx->stale = NULL; 873 874 tx->len = 0; 875 } 876 877 /** 878 * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps 879 * @pf: Board private structure 880 * @tx: the Tx tracking structure to initialize 881 * @port: the port this structure tracks 882 * 883 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 884 * the timestamp block is shared for all ports in the same quad. To avoid 885 * ports using the same timestamp index, logically break the block of 886 * registers into chunks based on the port number. 887 */ 888 static int 889 ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 890 { 891 tx->block = port / ICE_PORTS_PER_QUAD; 892 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822; 893 tx->len = INDEX_PER_PORT_E822; 894 tx->verify_cached = 0; 895 896 return ice_ptp_alloc_tx_tracker(tx); 897 } 898 899 /** 900 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 901 * @pf: Board private structure 902 * @tx: the Tx tracking structure to initialize 903 * 904 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 905 * port has its own block of timestamps, independent of the other ports. 906 */ 907 static int 908 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 909 { 910 tx->block = pf->hw.port_info->lport; 911 tx->offset = 0; 912 tx->len = INDEX_PER_PORT_E810; 913 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 914 * verify new timestamps against cached copy of the last read 915 * timestamp. 916 */ 917 tx->verify_cached = 1; 918 919 return ice_ptp_alloc_tx_tracker(tx); 920 } 921 922 /** 923 * ice_ptp_update_cached_phctime - Update the cached PHC time values 924 * @pf: Board specific private structure 925 * 926 * This function updates the system time values which are cached in the PF 927 * structure and the Rx rings. 928 * 929 * This function must be called periodically to ensure that the cached value 930 * is never more than 2 seconds old. 931 * 932 * Note that the cached copy in the PF PTP structure is always updated, even 933 * if we can't update the copy in the Rx rings. 934 * 935 * Return: 936 * * 0 - OK, successfully updated 937 * * -EAGAIN - PF was busy, need to reschedule the update 938 */ 939 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 940 { 941 struct device *dev = ice_pf_to_dev(pf); 942 unsigned long update_before; 943 u64 systime; 944 int i; 945 946 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 947 if (pf->ptp.cached_phc_time && 948 time_is_before_jiffies(update_before)) { 949 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 950 951 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 952 jiffies_to_msecs(time_taken)); 953 pf->ptp.late_cached_phc_updates++; 954 } 955 956 /* Read the current PHC time */ 957 systime = ice_ptp_read_src_clk_reg(pf, NULL); 958 959 /* Update the cached PHC time stored in the PF structure */ 960 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 961 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 962 963 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 964 return -EAGAIN; 965 966 ice_for_each_vsi(pf, i) { 967 struct ice_vsi *vsi = pf->vsi[i]; 968 int j; 969 970 if (!vsi) 971 continue; 972 973 if (vsi->type != ICE_VSI_PF) 974 continue; 975 976 ice_for_each_rxq(vsi, j) { 977 if (!vsi->rx_rings[j]) 978 continue; 979 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 980 } 981 } 982 clear_bit(ICE_CFG_BUSY, pf->state); 983 984 return 0; 985 } 986 987 /** 988 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 989 * @pf: Board specific private structure 990 * 991 * This function must be called when the cached PHC time is no longer valid, 992 * such as after a time adjustment. It marks any currently outstanding Tx 993 * timestamps as stale and updates the cached PHC time for both the PF and Rx 994 * rings. 995 * 996 * If updating the PHC time cannot be done immediately, a warning message is 997 * logged and the work item is scheduled immediately to minimize the window 998 * with a wrong cached timestamp. 999 */ 1000 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1001 { 1002 struct device *dev = ice_pf_to_dev(pf); 1003 int err; 1004 1005 /* Update the cached PHC time immediately if possible, otherwise 1006 * schedule the work item to execute soon. 1007 */ 1008 err = ice_ptp_update_cached_phctime(pf); 1009 if (err) { 1010 /* If another thread is updating the Rx rings, we won't 1011 * properly reset them here. This could lead to reporting of 1012 * invalid timestamps, but there isn't much we can do. 1013 */ 1014 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1015 __func__); 1016 1017 /* Queue the work item to update the Rx rings when possible */ 1018 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1019 msecs_to_jiffies(10)); 1020 } 1021 1022 /* Mark any outstanding timestamps as stale, since they might have 1023 * been captured in hardware before the time update. This could lead 1024 * to us extending them with the wrong cached value resulting in 1025 * incorrect timestamp values. 1026 */ 1027 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1028 } 1029 1030 /** 1031 * ice_ptp_read_time - Read the time from the device 1032 * @pf: Board private structure 1033 * @ts: timespec structure to hold the current time value 1034 * @sts: Optional parameter for holding a pair of system timestamps from 1035 * the system clock. Will be ignored if NULL is given. 1036 * 1037 * This function reads the source clock registers and stores them in a timespec. 1038 * However, since the registers are 64 bits of nanoseconds, we must convert the 1039 * result to a timespec before we can return. 1040 */ 1041 static void 1042 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, 1043 struct ptp_system_timestamp *sts) 1044 { 1045 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 1046 1047 *ts = ns_to_timespec64(time_ns); 1048 } 1049 1050 /** 1051 * ice_ptp_write_init - Set PHC time to provided value 1052 * @pf: Board private structure 1053 * @ts: timespec structure that holds the new time value 1054 * 1055 * Set the PHC time to the specified time provided in the timespec. 1056 */ 1057 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1058 { 1059 u64 ns = timespec64_to_ns(ts); 1060 struct ice_hw *hw = &pf->hw; 1061 1062 return ice_ptp_init_time(hw, ns); 1063 } 1064 1065 /** 1066 * ice_ptp_write_adj - Adjust PHC clock time atomically 1067 * @pf: Board private structure 1068 * @adj: Adjustment in nanoseconds 1069 * 1070 * Perform an atomic adjustment of the PHC time by the specified number of 1071 * nanoseconds. 1072 */ 1073 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1074 { 1075 struct ice_hw *hw = &pf->hw; 1076 1077 return ice_ptp_adj_clock(hw, adj); 1078 } 1079 1080 /** 1081 * ice_base_incval - Get base timer increment value 1082 * @pf: Board private structure 1083 * 1084 * Look up the base timer increment value for this device. The base increment 1085 * value is used to define the nominal clock tick rate. This increment value 1086 * is programmed during device initialization. It is also used as the basis 1087 * for calculating adjustments using scaled_ppm. 1088 */ 1089 static u64 ice_base_incval(struct ice_pf *pf) 1090 { 1091 struct ice_hw *hw = &pf->hw; 1092 u64 incval; 1093 1094 if (ice_is_e810(hw)) 1095 incval = ICE_PTP_NOMINAL_INCVAL_E810; 1096 else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) 1097 incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); 1098 else 1099 incval = UNKNOWN_INCVAL_E822; 1100 1101 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1102 incval); 1103 1104 return incval; 1105 } 1106 1107 /** 1108 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1109 * @port: PTP port for which Tx FIFO is checked 1110 */ 1111 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1112 { 1113 int quad = port->port_num / ICE_PORTS_PER_QUAD; 1114 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1115 struct ice_pf *pf; 1116 struct ice_hw *hw; 1117 u32 val, phy_sts; 1118 int err; 1119 1120 pf = ptp_port_to_pf(port); 1121 hw = &pf->hw; 1122 1123 if (port->tx_fifo_busy_cnt == FIFO_OK) 1124 return 0; 1125 1126 /* need to read FIFO state */ 1127 if (offs == 0 || offs == 1) 1128 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, 1129 &val); 1130 else 1131 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, 1132 &val); 1133 1134 if (err) { 1135 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1136 port->port_num, err); 1137 return err; 1138 } 1139 1140 if (offs & 0x1) 1141 phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S; 1142 else 1143 phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S; 1144 1145 if (phy_sts & FIFO_EMPTY) { 1146 port->tx_fifo_busy_cnt = FIFO_OK; 1147 return 0; 1148 } 1149 1150 port->tx_fifo_busy_cnt++; 1151 1152 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1153 port->tx_fifo_busy_cnt, port->port_num); 1154 1155 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1156 dev_dbg(ice_pf_to_dev(pf), 1157 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1158 port->port_num, quad); 1159 ice_ptp_reset_ts_memory_quad_e822(hw, quad); 1160 port->tx_fifo_busy_cnt = FIFO_OK; 1161 return 0; 1162 } 1163 1164 return -EAGAIN; 1165 } 1166 1167 /** 1168 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1169 * @work: Pointer to the kthread_work structure for this task 1170 * 1171 * Check whether hardware has completed measuring the Tx and Rx offset values 1172 * used to configure and enable vernier timestamp calibration. 1173 * 1174 * Once the offset in either direction is measured, configure the associated 1175 * registers with the calibrated offset values and enable timestamping. The Tx 1176 * and Rx directions are configured independently as soon as their associated 1177 * offsets are known. 1178 * 1179 * This function reschedules itself until both Tx and Rx calibration have 1180 * completed. 1181 */ 1182 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1183 { 1184 struct ice_ptp_port *port; 1185 struct ice_pf *pf; 1186 struct ice_hw *hw; 1187 int tx_err; 1188 int rx_err; 1189 1190 port = container_of(work, struct ice_ptp_port, ov_work.work); 1191 pf = ptp_port_to_pf(port); 1192 hw = &pf->hw; 1193 1194 if (ice_is_reset_in_progress(pf->state)) { 1195 /* wait for device driver to complete reset */ 1196 kthread_queue_delayed_work(pf->ptp.kworker, 1197 &port->ov_work, 1198 msecs_to_jiffies(100)); 1199 return; 1200 } 1201 1202 tx_err = ice_ptp_check_tx_fifo(port); 1203 if (!tx_err) 1204 tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num); 1205 rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num); 1206 if (tx_err || rx_err) { 1207 /* Tx and/or Rx offset not yet configured, try again later */ 1208 kthread_queue_delayed_work(pf->ptp.kworker, 1209 &port->ov_work, 1210 msecs_to_jiffies(100)); 1211 return; 1212 } 1213 } 1214 1215 /** 1216 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1217 * @ptp_port: PTP port to stop 1218 */ 1219 static int 1220 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1221 { 1222 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1223 u8 port = ptp_port->port_num; 1224 struct ice_hw *hw = &pf->hw; 1225 int err; 1226 1227 if (ice_is_e810(hw)) 1228 return 0; 1229 1230 mutex_lock(&ptp_port->ps_lock); 1231 1232 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1233 1234 err = ice_stop_phy_timer_e822(hw, port, true); 1235 if (err) 1236 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1237 port, err); 1238 1239 mutex_unlock(&ptp_port->ps_lock); 1240 1241 return err; 1242 } 1243 1244 /** 1245 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1246 * @ptp_port: PTP port for which the PHY start is set 1247 * 1248 * Start the PHY timestamping block, and initiate Vernier timestamping 1249 * calibration. If timestamping cannot be calibrated (such as if link is down) 1250 * then disable the timestamping block instead. 1251 */ 1252 static int 1253 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1254 { 1255 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1256 u8 port = ptp_port->port_num; 1257 struct ice_hw *hw = &pf->hw; 1258 int err; 1259 1260 if (ice_is_e810(hw)) 1261 return 0; 1262 1263 if (!ptp_port->link_up) 1264 return ice_ptp_port_phy_stop(ptp_port); 1265 1266 mutex_lock(&ptp_port->ps_lock); 1267 1268 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1269 1270 /* temporarily disable Tx timestamps while calibrating PHY offset */ 1271 spin_lock(&ptp_port->tx.lock); 1272 ptp_port->tx.calibrating = true; 1273 spin_unlock(&ptp_port->tx.lock); 1274 ptp_port->tx_fifo_busy_cnt = 0; 1275 1276 /* Start the PHY timer in Vernier mode */ 1277 err = ice_start_phy_timer_e822(hw, port); 1278 if (err) 1279 goto out_unlock; 1280 1281 /* Enable Tx timestamps right away */ 1282 spin_lock(&ptp_port->tx.lock); 1283 ptp_port->tx.calibrating = false; 1284 spin_unlock(&ptp_port->tx.lock); 1285 1286 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); 1287 1288 out_unlock: 1289 if (err) 1290 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1291 port, err); 1292 1293 mutex_unlock(&ptp_port->ps_lock); 1294 1295 return err; 1296 } 1297 1298 /** 1299 * ice_ptp_link_change - Reconfigure PTP after link status change 1300 * @pf: Board private structure 1301 * @port: Port for which the PHY start is set 1302 * @linkup: Link is up or down 1303 */ 1304 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) 1305 { 1306 struct ice_ptp_port *ptp_port; 1307 struct ice_hw *hw = &pf->hw; 1308 1309 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1310 return; 1311 1312 if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) 1313 return; 1314 1315 ptp_port = &pf->ptp.port; 1316 if (WARN_ON_ONCE(ptp_port->port_num != port)) 1317 return; 1318 1319 /* Update cached link status for this port immediately */ 1320 ptp_port->link_up = linkup; 1321 1322 switch (hw->phy_model) { 1323 case ICE_PHY_E810: 1324 /* Do not reconfigure E810 PHY */ 1325 return; 1326 case ICE_PHY_E822: 1327 ice_ptp_port_phy_restart(ptp_port); 1328 return; 1329 default: 1330 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); 1331 } 1332 } 1333 1334 /** 1335 * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt 1336 * @pf: PF private structure 1337 * @ena: bool value to enable or disable interrupt 1338 * @threshold: Minimum number of packets at which intr is triggered 1339 * 1340 * Utility function to enable or disable Tx timestamp interrupt and threshold 1341 */ 1342 static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) 1343 { 1344 struct ice_hw *hw = &pf->hw; 1345 int err = 0; 1346 int quad; 1347 u32 val; 1348 1349 ice_ptp_reset_ts_memory(hw); 1350 1351 for (quad = 0; quad < ICE_MAX_QUAD; quad++) { 1352 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1353 &val); 1354 if (err) 1355 break; 1356 1357 if (ena) { 1358 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1359 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; 1360 val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) & 1361 Q_REG_TX_MEM_GBL_CFG_INTR_THR_M); 1362 } else { 1363 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1364 } 1365 1366 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1367 val); 1368 if (err) 1369 break; 1370 } 1371 1372 if (err) 1373 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", 1374 err); 1375 return err; 1376 } 1377 1378 /** 1379 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1380 * @pf: Board private structure 1381 */ 1382 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1383 { 1384 ice_ptp_port_phy_restart(&pf->ptp.port); 1385 } 1386 1387 /** 1388 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping 1389 * @pf: Board private structure 1390 */ 1391 static void ice_ptp_restart_all_phy(struct ice_pf *pf) 1392 { 1393 struct list_head *entry; 1394 1395 list_for_each(entry, &pf->ptp.ports_owner.ports) { 1396 struct ice_ptp_port *port = list_entry(entry, 1397 struct ice_ptp_port, 1398 list_member); 1399 1400 if (port->link_up) 1401 ice_ptp_port_phy_restart(port); 1402 } 1403 } 1404 1405 /** 1406 * ice_ptp_adjfine - Adjust clock increment rate 1407 * @info: the driver's PTP info structure 1408 * @scaled_ppm: Parts per million with 16-bit fractional field 1409 * 1410 * Adjust the frequency of the clock by the indicated scaled ppm from the 1411 * base frequency. 1412 */ 1413 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1414 { 1415 struct ice_pf *pf = ptp_info_to_pf(info); 1416 struct ice_hw *hw = &pf->hw; 1417 u64 incval; 1418 int err; 1419 1420 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1421 err = ice_ptp_write_incval_locked(hw, incval); 1422 if (err) { 1423 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1424 err); 1425 return -EIO; 1426 } 1427 1428 return 0; 1429 } 1430 1431 /** 1432 * ice_ptp_extts_event - Process PTP external clock event 1433 * @pf: Board private structure 1434 */ 1435 void ice_ptp_extts_event(struct ice_pf *pf) 1436 { 1437 struct ptp_clock_event event; 1438 struct ice_hw *hw = &pf->hw; 1439 u8 chan, tmr_idx; 1440 u32 hi, lo; 1441 1442 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1443 /* Event time is captured by one of the two matched registers 1444 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1445 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1446 * Event is defined in GLTSYN_EVNT_0 register 1447 */ 1448 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1449 /* Check if channel is enabled */ 1450 if (pf->ptp.ext_ts_irq & (1 << chan)) { 1451 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1452 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1453 event.timestamp = (((u64)hi) << 32) | lo; 1454 event.type = PTP_CLOCK_EXTTS; 1455 event.index = chan; 1456 1457 /* Fire event */ 1458 ptp_clock_event(pf->ptp.clock, &event); 1459 pf->ptp.ext_ts_irq &= ~(1 << chan); 1460 } 1461 } 1462 } 1463 1464 /** 1465 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1466 * @pf: Board private structure 1467 * @ena: true to enable; false to disable 1468 * @chan: GPIO channel (0-3) 1469 * @gpio_pin: GPIO pin 1470 * @extts_flags: request flags from the ptp_extts_request.flags 1471 */ 1472 static int 1473 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, 1474 unsigned int extts_flags) 1475 { 1476 u32 func, aux_reg, gpio_reg, irq_reg; 1477 struct ice_hw *hw = &pf->hw; 1478 u8 tmr_idx; 1479 1480 if (chan > (unsigned int)pf->ptp.info.n_ext_ts) 1481 return -EINVAL; 1482 1483 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1484 1485 irq_reg = rd32(hw, PFINT_OICR_ENA); 1486 1487 if (ena) { 1488 /* Enable the interrupt */ 1489 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1490 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1491 1492 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1493 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1494 1495 /* set event level to requested edge */ 1496 if (extts_flags & PTP_FALLING_EDGE) 1497 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1498 if (extts_flags & PTP_RISING_EDGE) 1499 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1500 1501 /* Write GPIO CTL reg. 1502 * 0x1 is input sampled by EVENT register(channel) 1503 * + num_in_channels * tmr_idx 1504 */ 1505 func = 1 + chan + (tmr_idx * 3); 1506 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & 1507 GLGEN_GPIO_CTL_PIN_FUNC_M); 1508 pf->ptp.ext_ts_chan |= (1 << chan); 1509 } else { 1510 /* clear the values we set to reset defaults */ 1511 aux_reg = 0; 1512 gpio_reg = 0; 1513 pf->ptp.ext_ts_chan &= ~(1 << chan); 1514 if (!pf->ptp.ext_ts_chan) 1515 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1516 } 1517 1518 wr32(hw, PFINT_OICR_ENA, irq_reg); 1519 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1520 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1521 1522 return 0; 1523 } 1524 1525 /** 1526 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave 1527 * @pf: Board private structure 1528 * @chan: GPIO channel (0-3) 1529 * @config: desired periodic clk configuration. NULL will disable channel 1530 * @store: If set to true the values will be stored 1531 * 1532 * Configure the internal clock generator modules to generate the clock wave of 1533 * specified period. 1534 */ 1535 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, 1536 struct ice_perout_channel *config, bool store) 1537 { 1538 u64 current_time, period, start_time, phase; 1539 struct ice_hw *hw = &pf->hw; 1540 u32 func, val, gpio_pin; 1541 u8 tmr_idx; 1542 1543 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1544 1545 /* 0. Reset mode & out_en in AUX_OUT */ 1546 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1547 1548 /* If we're disabling the output, clear out CLKO and TGT and keep 1549 * output level low 1550 */ 1551 if (!config || !config->ena) { 1552 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); 1553 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); 1554 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); 1555 1556 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1557 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; 1558 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1559 1560 /* Store the value if requested */ 1561 if (store) 1562 memset(&pf->ptp.perout_channels[chan], 0, 1563 sizeof(struct ice_perout_channel)); 1564 1565 return 0; 1566 } 1567 period = config->period; 1568 start_time = config->start_time; 1569 div64_u64_rem(start_time, period, &phase); 1570 gpio_pin = config->gpio_pin; 1571 1572 /* 1. Write clkout with half of required period value */ 1573 if (period & 0x1) { 1574 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1575 goto err; 1576 } 1577 1578 period >>= 1; 1579 1580 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick 1581 */ 1582 #define MIN_PULSE 3 1583 if (period <= MIN_PULSE || period > U32_MAX) { 1584 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", 1585 MIN_PULSE * 2); 1586 goto err; 1587 } 1588 1589 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1590 1591 /* Allow time for programming before start_time is hit */ 1592 current_time = ice_ptp_read_src_clk_reg(pf, NULL); 1593 1594 /* if start time is in the past start the timer at the nearest second 1595 * maintaining phase 1596 */ 1597 if (start_time < current_time) 1598 start_time = div64_u64(current_time + NSEC_PER_SEC - 1, 1599 NSEC_PER_SEC) * NSEC_PER_SEC + phase; 1600 1601 if (ice_is_e810(hw)) 1602 start_time -= E810_OUT_PROP_DELAY_NS; 1603 else 1604 start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); 1605 1606 /* 2. Write TARGET time */ 1607 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); 1608 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); 1609 1610 /* 3. Write AUX_OUT register */ 1611 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1612 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1613 1614 /* 4. write GPIO CTL reg */ 1615 func = 8 + chan + (tmr_idx * 4); 1616 val = GLGEN_GPIO_CTL_PIN_DIR_M | 1617 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M); 1618 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1619 1620 /* Store the value if requested */ 1621 if (store) { 1622 memcpy(&pf->ptp.perout_channels[chan], config, 1623 sizeof(struct ice_perout_channel)); 1624 pf->ptp.perout_channels[chan].start_time = phase; 1625 } 1626 1627 return 0; 1628 err: 1629 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); 1630 return -EFAULT; 1631 } 1632 1633 /** 1634 * ice_ptp_disable_all_clkout - Disable all currently configured outputs 1635 * @pf: pointer to the PF structure 1636 * 1637 * Disable all currently configured clock outputs. This is necessary before 1638 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to 1639 * re-enable the clocks again. 1640 */ 1641 static void ice_ptp_disable_all_clkout(struct ice_pf *pf) 1642 { 1643 uint i; 1644 1645 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1646 if (pf->ptp.perout_channels[i].ena) 1647 ice_ptp_cfg_clkout(pf, i, NULL, false); 1648 } 1649 1650 /** 1651 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs 1652 * @pf: pointer to the PF structure 1653 * 1654 * Enable all currently configured clock outputs. Use this after 1655 * ice_ptp_disable_all_clkout to reconfigure the output signals according to 1656 * their configuration. 1657 */ 1658 static void ice_ptp_enable_all_clkout(struct ice_pf *pf) 1659 { 1660 uint i; 1661 1662 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1663 if (pf->ptp.perout_channels[i].ena) 1664 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], 1665 false); 1666 } 1667 1668 /** 1669 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC 1670 * @info: the driver's PTP info structure 1671 * @rq: The requested feature to change 1672 * @on: Enable/disable flag 1673 */ 1674 static int 1675 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, 1676 struct ptp_clock_request *rq, int on) 1677 { 1678 struct ice_pf *pf = ptp_info_to_pf(info); 1679 struct ice_perout_channel clk_cfg = {0}; 1680 bool sma_pres = false; 1681 unsigned int chan; 1682 u32 gpio_pin; 1683 int err; 1684 1685 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 1686 sma_pres = true; 1687 1688 switch (rq->type) { 1689 case PTP_CLK_REQ_PEROUT: 1690 chan = rq->perout.index; 1691 if (sma_pres) { 1692 if (chan == ice_pin_desc_e810t[SMA1].chan) 1693 clk_cfg.gpio_pin = GPIO_20; 1694 else if (chan == ice_pin_desc_e810t[SMA2].chan) 1695 clk_cfg.gpio_pin = GPIO_22; 1696 else 1697 return -1; 1698 } else if (ice_is_e810t(&pf->hw)) { 1699 if (chan == 0) 1700 clk_cfg.gpio_pin = GPIO_20; 1701 else 1702 clk_cfg.gpio_pin = GPIO_22; 1703 } else if (chan == PPS_CLK_GEN_CHAN) { 1704 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1705 } else { 1706 clk_cfg.gpio_pin = chan; 1707 } 1708 1709 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + 1710 rq->perout.period.nsec); 1711 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + 1712 rq->perout.start.nsec); 1713 clk_cfg.ena = !!on; 1714 1715 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); 1716 break; 1717 case PTP_CLK_REQ_EXTTS: 1718 chan = rq->extts.index; 1719 if (sma_pres) { 1720 if (chan < ice_pin_desc_e810t[SMA2].chan) 1721 gpio_pin = GPIO_21; 1722 else 1723 gpio_pin = GPIO_23; 1724 } else if (ice_is_e810t(&pf->hw)) { 1725 if (chan == 0) 1726 gpio_pin = GPIO_21; 1727 else 1728 gpio_pin = GPIO_23; 1729 } else { 1730 gpio_pin = chan; 1731 } 1732 1733 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, 1734 rq->extts.flags); 1735 break; 1736 default: 1737 return -EOPNOTSUPP; 1738 } 1739 1740 return err; 1741 } 1742 1743 /** 1744 * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC 1745 * @info: the driver's PTP info structure 1746 * @rq: The requested feature to change 1747 * @on: Enable/disable flag 1748 */ 1749 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, 1750 struct ptp_clock_request *rq, int on) 1751 { 1752 struct ice_pf *pf = ptp_info_to_pf(info); 1753 struct ice_perout_channel clk_cfg = {0}; 1754 int err; 1755 1756 switch (rq->type) { 1757 case PTP_CLK_REQ_PPS: 1758 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1759 clk_cfg.period = NSEC_PER_SEC; 1760 clk_cfg.ena = !!on; 1761 1762 err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); 1763 break; 1764 case PTP_CLK_REQ_EXTTS: 1765 err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, 1766 TIME_SYNC_PIN_INDEX, rq->extts.flags); 1767 break; 1768 default: 1769 return -EOPNOTSUPP; 1770 } 1771 1772 return err; 1773 } 1774 1775 /** 1776 * ice_ptp_gettimex64 - Get the time of the clock 1777 * @info: the driver's PTP info structure 1778 * @ts: timespec64 structure to hold the current time value 1779 * @sts: Optional parameter for holding a pair of system timestamps from 1780 * the system clock. Will be ignored if NULL is given. 1781 * 1782 * Read the device clock and return the correct value on ns, after converting it 1783 * into a timespec struct. 1784 */ 1785 static int 1786 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 1787 struct ptp_system_timestamp *sts) 1788 { 1789 struct ice_pf *pf = ptp_info_to_pf(info); 1790 struct ice_hw *hw = &pf->hw; 1791 1792 if (!ice_ptp_lock(hw)) { 1793 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); 1794 return -EBUSY; 1795 } 1796 1797 ice_ptp_read_time(pf, ts, sts); 1798 ice_ptp_unlock(hw); 1799 1800 return 0; 1801 } 1802 1803 /** 1804 * ice_ptp_settime64 - Set the time of the clock 1805 * @info: the driver's PTP info structure 1806 * @ts: timespec64 structure that holds the new time value 1807 * 1808 * Set the device clock to the user input value. The conversion from timespec 1809 * to ns happens in the write function. 1810 */ 1811 static int 1812 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 1813 { 1814 struct ice_pf *pf = ptp_info_to_pf(info); 1815 struct timespec64 ts64 = *ts; 1816 struct ice_hw *hw = &pf->hw; 1817 int err; 1818 1819 /* For Vernier mode, we need to recalibrate after new settime 1820 * Start with disabling timestamp block 1821 */ 1822 if (pf->ptp.port.link_up) 1823 ice_ptp_port_phy_stop(&pf->ptp.port); 1824 1825 if (!ice_ptp_lock(hw)) { 1826 err = -EBUSY; 1827 goto exit; 1828 } 1829 1830 /* Disable periodic outputs */ 1831 ice_ptp_disable_all_clkout(pf); 1832 1833 err = ice_ptp_write_init(pf, &ts64); 1834 ice_ptp_unlock(hw); 1835 1836 if (!err) 1837 ice_ptp_reset_cached_phctime(pf); 1838 1839 /* Reenable periodic outputs */ 1840 ice_ptp_enable_all_clkout(pf); 1841 1842 /* Recalibrate and re-enable timestamp blocks for E822/E823 */ 1843 if (hw->phy_model == ICE_PHY_E822) 1844 ice_ptp_restart_all_phy(pf); 1845 exit: 1846 if (err) { 1847 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 1848 return err; 1849 } 1850 1851 return 0; 1852 } 1853 1854 /** 1855 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 1856 * @info: the driver's PTP info structure 1857 * @delta: Offset in nanoseconds to adjust the time by 1858 */ 1859 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 1860 { 1861 struct timespec64 now, then; 1862 int ret; 1863 1864 then = ns_to_timespec64(delta); 1865 ret = ice_ptp_gettimex64(info, &now, NULL); 1866 if (ret) 1867 return ret; 1868 now = timespec64_add(now, then); 1869 1870 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 1871 } 1872 1873 /** 1874 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 1875 * @info: the driver's PTP info structure 1876 * @delta: Offset in nanoseconds to adjust the time by 1877 */ 1878 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 1879 { 1880 struct ice_pf *pf = ptp_info_to_pf(info); 1881 struct ice_hw *hw = &pf->hw; 1882 struct device *dev; 1883 int err; 1884 1885 dev = ice_pf_to_dev(pf); 1886 1887 /* Hardware only supports atomic adjustments using signed 32-bit 1888 * integers. For any adjustment outside this range, perform 1889 * a non-atomic get->adjust->set flow. 1890 */ 1891 if (delta > S32_MAX || delta < S32_MIN) { 1892 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 1893 return ice_ptp_adjtime_nonatomic(info, delta); 1894 } 1895 1896 if (!ice_ptp_lock(hw)) { 1897 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 1898 return -EBUSY; 1899 } 1900 1901 /* Disable periodic outputs */ 1902 ice_ptp_disable_all_clkout(pf); 1903 1904 err = ice_ptp_write_adj(pf, delta); 1905 1906 /* Reenable periodic outputs */ 1907 ice_ptp_enable_all_clkout(pf); 1908 1909 ice_ptp_unlock(hw); 1910 1911 if (err) { 1912 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 1913 return err; 1914 } 1915 1916 ice_ptp_reset_cached_phctime(pf); 1917 1918 return 0; 1919 } 1920 1921 #ifdef CONFIG_ICE_HWTS 1922 /** 1923 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 1924 * @device: Current device time 1925 * @system: System counter value read synchronously with device time 1926 * @ctx: Context provided by timekeeping code 1927 * 1928 * Read device and system (ART) clock simultaneously and return the corrected 1929 * clock values in ns. 1930 */ 1931 static int 1932 ice_ptp_get_syncdevicetime(ktime_t *device, 1933 struct system_counterval_t *system, 1934 void *ctx) 1935 { 1936 struct ice_pf *pf = (struct ice_pf *)ctx; 1937 struct ice_hw *hw = &pf->hw; 1938 u32 hh_lock, hh_art_ctl; 1939 int i; 1940 1941 #define MAX_HH_HW_LOCK_TRIES 5 1942 #define MAX_HH_CTL_LOCK_TRIES 100 1943 1944 for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { 1945 /* Get the HW lock */ 1946 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1947 if (hh_lock & PFHH_SEM_BUSY_M) { 1948 usleep_range(10000, 15000); 1949 continue; 1950 } 1951 break; 1952 } 1953 if (hh_lock & PFHH_SEM_BUSY_M) { 1954 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 1955 return -EBUSY; 1956 } 1957 1958 /* Program cmd to master timer */ 1959 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 1960 1961 /* Start the ART and device clock sync sequence */ 1962 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1963 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 1964 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 1965 1966 for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { 1967 /* Wait for sync to complete */ 1968 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1969 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 1970 udelay(1); 1971 continue; 1972 } else { 1973 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 1974 u64 hh_ts; 1975 1976 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 1977 /* Read ART time */ 1978 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 1979 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 1980 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1981 *system = convert_art_ns_to_tsc(hh_ts); 1982 /* Read Device source clock time */ 1983 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 1984 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 1985 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1986 *device = ns_to_ktime(hh_ts); 1987 break; 1988 } 1989 } 1990 1991 /* Clear the master timer */ 1992 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 1993 1994 /* Release HW lock */ 1995 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1996 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 1997 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 1998 1999 if (i == MAX_HH_CTL_LOCK_TRIES) 2000 return -ETIMEDOUT; 2001 2002 return 0; 2003 } 2004 2005 /** 2006 * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp 2007 * @info: the driver's PTP info structure 2008 * @cts: The memory to fill the cross timestamp info 2009 * 2010 * Capture a cross timestamp between the ART and the device PTP hardware 2011 * clock. Fill the cross timestamp information and report it back to the 2012 * caller. 2013 * 2014 * This is only valid for E822 and E823 devices which have support for 2015 * generating the cross timestamp via PCIe PTM. 2016 * 2017 * In order to correctly correlate the ART timestamp back to the TSC time, the 2018 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2019 */ 2020 static int 2021 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, 2022 struct system_device_crosststamp *cts) 2023 { 2024 struct ice_pf *pf = ptp_info_to_pf(info); 2025 2026 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2027 pf, NULL, cts); 2028 } 2029 #endif /* CONFIG_ICE_HWTS */ 2030 2031 /** 2032 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2033 * @pf: Board private structure 2034 * @ifr: ioctl data 2035 * 2036 * Copy the timestamping config to user buffer 2037 */ 2038 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2039 { 2040 struct hwtstamp_config *config; 2041 2042 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2043 return -EIO; 2044 2045 config = &pf->ptp.tstamp_config; 2046 2047 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2048 -EFAULT : 0; 2049 } 2050 2051 /** 2052 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2053 * @pf: Board private structure 2054 * @config: hwtstamp settings requested or saved 2055 */ 2056 static int 2057 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2058 { 2059 switch (config->tx_type) { 2060 case HWTSTAMP_TX_OFF: 2061 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 2062 break; 2063 case HWTSTAMP_TX_ON: 2064 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 2065 break; 2066 default: 2067 return -ERANGE; 2068 } 2069 2070 switch (config->rx_filter) { 2071 case HWTSTAMP_FILTER_NONE: 2072 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2073 break; 2074 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2075 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2076 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2077 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2078 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2079 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2080 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2081 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2082 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2083 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2084 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2085 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2086 case HWTSTAMP_FILTER_NTP_ALL: 2087 case HWTSTAMP_FILTER_ALL: 2088 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 2089 break; 2090 default: 2091 return -ERANGE; 2092 } 2093 2094 /* Immediately update the device timestamping mode */ 2095 ice_ptp_restore_timestamp_mode(pf); 2096 2097 return 0; 2098 } 2099 2100 /** 2101 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2102 * @pf: Board private structure 2103 * @ifr: ioctl data 2104 * 2105 * Get the user config and store it 2106 */ 2107 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2108 { 2109 struct hwtstamp_config config; 2110 int err; 2111 2112 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2113 return -EAGAIN; 2114 2115 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2116 return -EFAULT; 2117 2118 err = ice_ptp_set_timestamp_mode(pf, &config); 2119 if (err) 2120 return err; 2121 2122 /* Return the actual configuration set */ 2123 config = pf->ptp.tstamp_config; 2124 2125 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2126 -EFAULT : 0; 2127 } 2128 2129 /** 2130 * ice_ptp_rx_hwtstamp - Check for an Rx timestamp 2131 * @rx_ring: Ring to get the VSI info 2132 * @rx_desc: Receive descriptor 2133 * @skb: Particular skb to send timestamp with 2134 * 2135 * The driver receives a notification in the receive descriptor with timestamp. 2136 * The timestamp is in ns, so we must convert the result first. 2137 */ 2138 void 2139 ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, 2140 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) 2141 { 2142 struct skb_shared_hwtstamps *hwtstamps; 2143 u64 ts_ns, cached_time; 2144 u32 ts_high; 2145 2146 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2147 return; 2148 2149 cached_time = READ_ONCE(rx_ring->cached_phctime); 2150 2151 /* Do not report a timestamp if we don't have a cached PHC time */ 2152 if (!cached_time) 2153 return; 2154 2155 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2156 * PHC value, rather than accessing the PF. This also allows us to 2157 * simply pass the upper 32bits of nanoseconds directly. Calling 2158 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2159 * bits itself. 2160 */ 2161 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2162 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2163 2164 hwtstamps = skb_hwtstamps(skb); 2165 memset(hwtstamps, 0, sizeof(*hwtstamps)); 2166 hwtstamps->hwtstamp = ns_to_ktime(ts_ns); 2167 } 2168 2169 /** 2170 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins 2171 * @pf: pointer to the PF structure 2172 * @info: PTP clock info structure 2173 * 2174 * Disable the OS access to the SMA pins. Called to clear out the OS 2175 * indications of pin support when we fail to setup the E810-T SMA control 2176 * register. 2177 */ 2178 static void 2179 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2180 { 2181 struct device *dev = ice_pf_to_dev(pf); 2182 2183 dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); 2184 2185 info->enable = NULL; 2186 info->verify = NULL; 2187 info->n_pins = 0; 2188 info->n_ext_ts = 0; 2189 info->n_per_out = 0; 2190 } 2191 2192 /** 2193 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins 2194 * @pf: pointer to the PF structure 2195 * @info: PTP clock info structure 2196 * 2197 * Finish setting up the SMA pins by allocating pin_config, and setting it up 2198 * according to the current status of the SMA. On failure, disable all of the 2199 * extended SMA pin support. 2200 */ 2201 static void 2202 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2203 { 2204 struct device *dev = ice_pf_to_dev(pf); 2205 int err; 2206 2207 /* Allocate memory for kernel pins interface */ 2208 info->pin_config = devm_kcalloc(dev, info->n_pins, 2209 sizeof(*info->pin_config), GFP_KERNEL); 2210 if (!info->pin_config) { 2211 ice_ptp_disable_sma_pins_e810t(pf, info); 2212 return; 2213 } 2214 2215 /* Read current SMA status */ 2216 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); 2217 if (err) 2218 ice_ptp_disable_sma_pins_e810t(pf, info); 2219 } 2220 2221 /** 2222 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs 2223 * @pf: pointer to the PF instance 2224 * @info: PTP clock capabilities 2225 */ 2226 static void 2227 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2228 { 2229 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2230 info->n_ext_ts = N_EXT_TS_E810; 2231 info->n_per_out = N_PER_OUT_E810T; 2232 info->n_pins = NUM_PTP_PINS_E810T; 2233 info->verify = ice_verify_pin_e810t; 2234 2235 /* Complete setup of the SMA pins */ 2236 ice_ptp_setup_sma_pins_e810t(pf, info); 2237 } else if (ice_is_e810t(&pf->hw)) { 2238 info->n_ext_ts = N_EXT_TS_NO_SMA_E810T; 2239 info->n_per_out = N_PER_OUT_NO_SMA_E810T; 2240 } else { 2241 info->n_per_out = N_PER_OUT_E810; 2242 info->n_ext_ts = N_EXT_TS_E810; 2243 } 2244 } 2245 2246 /** 2247 * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs 2248 * @pf: pointer to the PF instance 2249 * @info: PTP clock capabilities 2250 */ 2251 static void 2252 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2253 { 2254 info->pps = 1; 2255 info->n_per_out = 0; 2256 info->n_ext_ts = 1; 2257 } 2258 2259 /** 2260 * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support 2261 * @pf: Board private structure 2262 * @info: PTP info to fill 2263 * 2264 * Assign functions to the PTP capabiltiies structure for E82x devices. 2265 * Functions which operate across all device families should be set directly 2266 * in ice_ptp_set_caps. Only add functions here which are distinct for E82x 2267 * devices. 2268 */ 2269 static void 2270 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info) 2271 { 2272 #ifdef CONFIG_ICE_HWTS 2273 if (boot_cpu_has(X86_FEATURE_ART) && 2274 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2275 info->getcrosststamp = ice_ptp_getcrosststamp_e82x; 2276 #endif /* CONFIG_ICE_HWTS */ 2277 } 2278 2279 /** 2280 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2281 * @pf: Board private structure 2282 * @info: PTP info to fill 2283 * 2284 * Assign functions to the PTP capabiltiies structure for E810 devices. 2285 * Functions which operate across all device families should be set directly 2286 * in ice_ptp_set_caps. Only add functions here which are distinct for e810 2287 * devices. 2288 */ 2289 static void 2290 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2291 { 2292 info->enable = ice_ptp_gpio_enable_e810; 2293 ice_ptp_setup_pins_e810(pf, info); 2294 } 2295 2296 /** 2297 * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support 2298 * @pf: Board private structure 2299 * @info: PTP info to fill 2300 * 2301 * Assign functions to the PTP capabiltiies structure for E823 devices. 2302 * Functions which operate across all device families should be set directly 2303 * in ice_ptp_set_caps. Only add functions here which are distinct for e823 2304 * devices. 2305 */ 2306 static void 2307 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2308 { 2309 ice_ptp_set_funcs_e82x(pf, info); 2310 2311 info->enable = ice_ptp_gpio_enable_e823; 2312 ice_ptp_setup_pins_e823(pf, info); 2313 } 2314 2315 /** 2316 * ice_ptp_set_caps - Set PTP capabilities 2317 * @pf: Board private structure 2318 */ 2319 static void ice_ptp_set_caps(struct ice_pf *pf) 2320 { 2321 struct ptp_clock_info *info = &pf->ptp.info; 2322 struct device *dev = ice_pf_to_dev(pf); 2323 2324 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2325 dev_driver_string(dev), dev_name(dev)); 2326 info->owner = THIS_MODULE; 2327 info->max_adj = 100000000; 2328 info->adjtime = ice_ptp_adjtime; 2329 info->adjfine = ice_ptp_adjfine; 2330 info->gettimex64 = ice_ptp_gettimex64; 2331 info->settime64 = ice_ptp_settime64; 2332 2333 if (ice_is_e810(&pf->hw)) 2334 ice_ptp_set_funcs_e810(pf, info); 2335 else if (ice_is_e823(&pf->hw)) 2336 ice_ptp_set_funcs_e823(pf, info); 2337 else 2338 ice_ptp_set_funcs_e82x(pf, info); 2339 } 2340 2341 /** 2342 * ice_ptp_create_clock - Create PTP clock device for userspace 2343 * @pf: Board private structure 2344 * 2345 * This function creates a new PTP clock device. It only creates one if we 2346 * don't already have one. Will return error if it can't create one, but success 2347 * if we already have a device. Should be used by ice_ptp_init to create clock 2348 * initially, and prevent global resets from creating new clock devices. 2349 */ 2350 static long ice_ptp_create_clock(struct ice_pf *pf) 2351 { 2352 struct ptp_clock_info *info; 2353 struct device *dev; 2354 2355 /* No need to create a clock device if we already have one */ 2356 if (pf->ptp.clock) 2357 return 0; 2358 2359 ice_ptp_set_caps(pf); 2360 2361 info = &pf->ptp.info; 2362 dev = ice_pf_to_dev(pf); 2363 2364 /* Attempt to register the clock before enabling the hardware. */ 2365 pf->ptp.clock = ptp_clock_register(info, dev); 2366 if (IS_ERR(pf->ptp.clock)) { 2367 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); 2368 return PTR_ERR(pf->ptp.clock); 2369 } 2370 2371 return 0; 2372 } 2373 2374 /** 2375 * ice_ptp_request_ts - Request an available Tx timestamp index 2376 * @tx: the PTP Tx timestamp tracker to request from 2377 * @skb: the SKB to associate with this timestamp request 2378 */ 2379 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2380 { 2381 u8 idx; 2382 2383 spin_lock(&tx->lock); 2384 2385 /* Check that this tracker is accepting new timestamp requests */ 2386 if (!ice_ptp_is_tx_tracker_up(tx)) { 2387 spin_unlock(&tx->lock); 2388 return -1; 2389 } 2390 2391 /* Find and set the first available index */ 2392 idx = find_first_zero_bit(tx->in_use, tx->len); 2393 if (idx < tx->len) { 2394 /* We got a valid index that no other thread could have set. Store 2395 * a reference to the skb and the start time to allow discarding old 2396 * requests. 2397 */ 2398 set_bit(idx, tx->in_use); 2399 clear_bit(idx, tx->stale); 2400 tx->tstamps[idx].start = jiffies; 2401 tx->tstamps[idx].skb = skb_get(skb); 2402 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2403 ice_trace(tx_tstamp_request, skb, idx); 2404 } 2405 2406 spin_unlock(&tx->lock); 2407 2408 /* return the appropriate PHY timestamp register index, -1 if no 2409 * indexes were available. 2410 */ 2411 if (idx >= tx->len) 2412 return -1; 2413 else 2414 return idx + tx->offset; 2415 } 2416 2417 /** 2418 * ice_ptp_process_ts - Process the PTP Tx timestamps 2419 * @pf: Board private structure 2420 * 2421 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2422 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2423 */ 2424 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2425 { 2426 switch (pf->ptp.tx_interrupt_mode) { 2427 case ICE_PTP_TX_INTERRUPT_NONE: 2428 /* This device has the clock owner handle timestamps for it */ 2429 return ICE_TX_TSTAMP_WORK_DONE; 2430 case ICE_PTP_TX_INTERRUPT_SELF: 2431 /* This device handles its own timestamps */ 2432 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2433 case ICE_PTP_TX_INTERRUPT_ALL: 2434 /* This device handles timestamps for all ports */ 2435 return ice_ptp_tx_tstamp_owner(pf); 2436 default: 2437 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2438 pf->ptp.tx_interrupt_mode); 2439 return ICE_TX_TSTAMP_WORK_DONE; 2440 } 2441 } 2442 2443 static void ice_ptp_periodic_work(struct kthread_work *work) 2444 { 2445 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2446 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2447 int err; 2448 2449 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2450 return; 2451 2452 err = ice_ptp_update_cached_phctime(pf); 2453 2454 /* Run twice a second or reschedule if phc update failed */ 2455 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2456 msecs_to_jiffies(err ? 10 : 500)); 2457 } 2458 2459 /** 2460 * ice_ptp_reset - Initialize PTP hardware clock support after reset 2461 * @pf: Board private structure 2462 */ 2463 void ice_ptp_reset(struct ice_pf *pf) 2464 { 2465 struct ice_ptp *ptp = &pf->ptp; 2466 struct ice_hw *hw = &pf->hw; 2467 struct timespec64 ts; 2468 int err, itr = 1; 2469 u64 time_diff; 2470 2471 if (test_bit(ICE_PFR_REQ, pf->state)) 2472 goto pfr; 2473 2474 if (!ice_pf_src_tmr_owned(pf)) 2475 goto reset_ts; 2476 2477 err = ice_ptp_init_phc(hw); 2478 if (err) 2479 goto err; 2480 2481 /* Acquire the global hardware lock */ 2482 if (!ice_ptp_lock(hw)) { 2483 err = -EBUSY; 2484 goto err; 2485 } 2486 2487 /* Write the increment time value to PHY and LAN */ 2488 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2489 if (err) { 2490 ice_ptp_unlock(hw); 2491 goto err; 2492 } 2493 2494 /* Write the initial Time value to PHY and LAN using the cached PHC 2495 * time before the reset and time difference between stopping and 2496 * starting the clock. 2497 */ 2498 if (ptp->cached_phc_time) { 2499 time_diff = ktime_get_real_ns() - ptp->reset_time; 2500 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2501 } else { 2502 ts = ktime_to_timespec64(ktime_get_real()); 2503 } 2504 err = ice_ptp_write_init(pf, &ts); 2505 if (err) { 2506 ice_ptp_unlock(hw); 2507 goto err; 2508 } 2509 2510 /* Release the global hardware lock */ 2511 ice_ptp_unlock(hw); 2512 2513 if (!ice_is_e810(hw)) { 2514 /* Enable quad interrupts */ 2515 err = ice_ptp_tx_ena_intr(pf, true, itr); 2516 if (err) 2517 goto err; 2518 } 2519 2520 reset_ts: 2521 /* Restart the PHY timestamping block */ 2522 ice_ptp_reset_phy_timestamping(pf); 2523 2524 pfr: 2525 /* Init Tx structures */ 2526 if (ice_is_e810(&pf->hw)) { 2527 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); 2528 } else { 2529 kthread_init_delayed_work(&ptp->port.ov_work, 2530 ice_ptp_wait_for_offsets); 2531 err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, 2532 ptp->port.port_num); 2533 } 2534 if (err) 2535 goto err; 2536 2537 set_bit(ICE_FLAG_PTP, pf->flags); 2538 2539 /* Start periodic work going */ 2540 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2541 2542 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2543 return; 2544 2545 err: 2546 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2547 } 2548 2549 /** 2550 * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device 2551 * @aux_dev: auxiliary device to get the auxiliary PF for 2552 */ 2553 static struct ice_pf * 2554 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev) 2555 { 2556 struct ice_ptp_port *aux_port; 2557 struct ice_ptp *aux_ptp; 2558 2559 aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev); 2560 aux_ptp = container_of(aux_port, struct ice_ptp, port); 2561 2562 return container_of(aux_ptp, struct ice_pf, ptp); 2563 } 2564 2565 /** 2566 * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device 2567 * @aux_dev: auxiliary device to get the PF for 2568 */ 2569 static struct ice_pf * 2570 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev) 2571 { 2572 struct ice_ptp_port_owner *ports_owner; 2573 struct auxiliary_driver *aux_drv; 2574 struct ice_ptp *owner_ptp; 2575 2576 if (!aux_dev->dev.driver) 2577 return NULL; 2578 2579 aux_drv = to_auxiliary_drv(aux_dev->dev.driver); 2580 ports_owner = container_of(aux_drv, struct ice_ptp_port_owner, 2581 aux_driver); 2582 owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner); 2583 return container_of(owner_ptp, struct ice_pf, ptp); 2584 } 2585 2586 /** 2587 * ice_ptp_auxbus_probe - Probe auxiliary devices 2588 * @aux_dev: PF's auxiliary device 2589 * @id: Auxiliary device ID 2590 */ 2591 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev, 2592 const struct auxiliary_device_id *id) 2593 { 2594 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2595 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); 2596 2597 if (WARN_ON(!owner_pf)) 2598 return -ENODEV; 2599 2600 INIT_LIST_HEAD(&aux_pf->ptp.port.list_member); 2601 mutex_lock(&owner_pf->ptp.ports_owner.lock); 2602 list_add(&aux_pf->ptp.port.list_member, 2603 &owner_pf->ptp.ports_owner.ports); 2604 mutex_unlock(&owner_pf->ptp.ports_owner.lock); 2605 2606 return 0; 2607 } 2608 2609 /** 2610 * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus 2611 * @aux_dev: PF's auxiliary device 2612 */ 2613 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev) 2614 { 2615 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2616 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); 2617 2618 mutex_lock(&owner_pf->ptp.ports_owner.lock); 2619 list_del(&aux_pf->ptp.port.list_member); 2620 mutex_unlock(&owner_pf->ptp.ports_owner.lock); 2621 } 2622 2623 /** 2624 * ice_ptp_auxbus_shutdown 2625 * @aux_dev: PF's auxiliary device 2626 */ 2627 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev) 2628 { 2629 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2630 } 2631 2632 /** 2633 * ice_ptp_auxbus_suspend 2634 * @aux_dev: PF's auxiliary device 2635 * @state: power management state indicator 2636 */ 2637 static int 2638 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state) 2639 { 2640 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2641 return 0; 2642 } 2643 2644 /** 2645 * ice_ptp_auxbus_resume 2646 * @aux_dev: PF's auxiliary device 2647 */ 2648 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev) 2649 { 2650 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2651 return 0; 2652 } 2653 2654 /** 2655 * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table 2656 * @pf: Board private structure 2657 * @name: auxiliary bus driver name 2658 */ 2659 static struct auxiliary_device_id * 2660 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name) 2661 { 2662 struct auxiliary_device_id *ids; 2663 2664 /* Second id left empty to terminate the array */ 2665 ids = devm_kcalloc(ice_pf_to_dev(pf), 2, 2666 sizeof(struct auxiliary_device_id), GFP_KERNEL); 2667 if (!ids) 2668 return NULL; 2669 2670 snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name); 2671 2672 return ids; 2673 } 2674 2675 /** 2676 * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver 2677 * @pf: Board private structure 2678 */ 2679 static int ice_ptp_register_auxbus_driver(struct ice_pf *pf) 2680 { 2681 struct auxiliary_driver *aux_driver; 2682 struct ice_ptp *ptp; 2683 struct device *dev; 2684 char *name; 2685 int err; 2686 2687 ptp = &pf->ptp; 2688 dev = ice_pf_to_dev(pf); 2689 aux_driver = &ptp->ports_owner.aux_driver; 2690 INIT_LIST_HEAD(&ptp->ports_owner.ports); 2691 mutex_init(&ptp->ports_owner.lock); 2692 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", 2693 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), 2694 ice_get_ptp_src_clock_index(&pf->hw)); 2695 2696 aux_driver->name = name; 2697 aux_driver->shutdown = ice_ptp_auxbus_shutdown; 2698 aux_driver->suspend = ice_ptp_auxbus_suspend; 2699 aux_driver->remove = ice_ptp_auxbus_remove; 2700 aux_driver->resume = ice_ptp_auxbus_resume; 2701 aux_driver->probe = ice_ptp_auxbus_probe; 2702 aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name); 2703 if (!aux_driver->id_table) 2704 return -ENOMEM; 2705 2706 err = auxiliary_driver_register(aux_driver); 2707 if (err) { 2708 devm_kfree(dev, aux_driver->id_table); 2709 dev_err(dev, "Failed registering aux_driver, name <%s>\n", 2710 name); 2711 } 2712 2713 return err; 2714 } 2715 2716 /** 2717 * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver 2718 * @pf: Board private structure 2719 */ 2720 static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) 2721 { 2722 struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver; 2723 2724 auxiliary_driver_unregister(aux_driver); 2725 devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table); 2726 2727 mutex_destroy(&pf->ptp.ports_owner.lock); 2728 } 2729 2730 /** 2731 * ice_ptp_clock_index - Get the PTP clock index for this device 2732 * @pf: Board private structure 2733 * 2734 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock 2735 * is associated. 2736 */ 2737 int ice_ptp_clock_index(struct ice_pf *pf) 2738 { 2739 struct auxiliary_device *aux_dev; 2740 struct ice_pf *owner_pf; 2741 struct ptp_clock *clock; 2742 2743 aux_dev = &pf->ptp.port.aux_dev; 2744 owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2745 if (!owner_pf) 2746 return -1; 2747 clock = owner_pf->ptp.clock; 2748 2749 return clock ? ptp_clock_index(clock) : -1; 2750 } 2751 2752 /** 2753 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2754 * @pf: Board private structure 2755 */ 2756 void ice_ptp_prepare_for_reset(struct ice_pf *pf) 2757 { 2758 struct ice_ptp *ptp = &pf->ptp; 2759 u8 src_tmr; 2760 2761 clear_bit(ICE_FLAG_PTP, pf->flags); 2762 2763 /* Disable timestamping for both Tx and Rx */ 2764 ice_ptp_disable_timestamp_mode(pf); 2765 2766 kthread_cancel_delayed_work_sync(&ptp->work); 2767 2768 if (test_bit(ICE_PFR_REQ, pf->state)) 2769 return; 2770 2771 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2772 2773 /* Disable periodic outputs */ 2774 ice_ptp_disable_all_clkout(pf); 2775 2776 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2777 2778 /* Disable source clock */ 2779 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2780 2781 /* Acquire PHC and system timer to restore after reset */ 2782 ptp->reset_time = ktime_get_real_ns(); 2783 } 2784 2785 /** 2786 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 2787 * @pf: Board private structure 2788 * 2789 * Setup and initialize a PTP clock device that represents the device hardware 2790 * clock. Save the clock index for other functions connected to the same 2791 * hardware resource. 2792 */ 2793 static int ice_ptp_init_owner(struct ice_pf *pf) 2794 { 2795 struct ice_hw *hw = &pf->hw; 2796 struct timespec64 ts; 2797 int err, itr = 1; 2798 2799 err = ice_ptp_init_phc(hw); 2800 if (err) { 2801 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 2802 err); 2803 return err; 2804 } 2805 2806 /* Acquire the global hardware lock */ 2807 if (!ice_ptp_lock(hw)) { 2808 err = -EBUSY; 2809 goto err_exit; 2810 } 2811 2812 /* Write the increment time value to PHY and LAN */ 2813 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2814 if (err) { 2815 ice_ptp_unlock(hw); 2816 goto err_exit; 2817 } 2818 2819 ts = ktime_to_timespec64(ktime_get_real()); 2820 /* Write the initial Time value to PHY and LAN */ 2821 err = ice_ptp_write_init(pf, &ts); 2822 if (err) { 2823 ice_ptp_unlock(hw); 2824 goto err_exit; 2825 } 2826 2827 /* Release the global hardware lock */ 2828 ice_ptp_unlock(hw); 2829 2830 if (!ice_is_e810(hw)) { 2831 /* Enable quad interrupts */ 2832 err = ice_ptp_tx_ena_intr(pf, true, itr); 2833 if (err) 2834 goto err_exit; 2835 } 2836 2837 /* Ensure we have a clock device */ 2838 err = ice_ptp_create_clock(pf); 2839 if (err) 2840 goto err_clk; 2841 2842 err = ice_ptp_register_auxbus_driver(pf); 2843 if (err) { 2844 dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver"); 2845 goto err_aux; 2846 } 2847 2848 return 0; 2849 err_aux: 2850 ptp_clock_unregister(pf->ptp.clock); 2851 err_clk: 2852 pf->ptp.clock = NULL; 2853 err_exit: 2854 return err; 2855 } 2856 2857 /** 2858 * ice_ptp_init_work - Initialize PTP work threads 2859 * @pf: Board private structure 2860 * @ptp: PF PTP structure 2861 */ 2862 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 2863 { 2864 struct kthread_worker *kworker; 2865 2866 /* Initialize work functions */ 2867 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 2868 2869 /* Allocate a kworker for handling work required for the ports 2870 * connected to the PTP hardware clock. 2871 */ 2872 kworker = kthread_create_worker(0, "ice-ptp-%s", 2873 dev_name(ice_pf_to_dev(pf))); 2874 if (IS_ERR(kworker)) 2875 return PTR_ERR(kworker); 2876 2877 ptp->kworker = kworker; 2878 2879 /* Start periodic work going */ 2880 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2881 2882 return 0; 2883 } 2884 2885 /** 2886 * ice_ptp_init_port - Initialize PTP port structure 2887 * @pf: Board private structure 2888 * @ptp_port: PTP port structure 2889 */ 2890 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 2891 { 2892 struct ice_hw *hw = &pf->hw; 2893 2894 mutex_init(&ptp_port->ps_lock); 2895 2896 switch (hw->phy_model) { 2897 case ICE_PHY_E810: 2898 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 2899 case ICE_PHY_E822: 2900 kthread_init_delayed_work(&ptp_port->ov_work, 2901 ice_ptp_wait_for_offsets); 2902 2903 return ice_ptp_init_tx_e822(pf, &ptp_port->tx, 2904 ptp_port->port_num); 2905 default: 2906 return -ENODEV; 2907 } 2908 } 2909 2910 /** 2911 * ice_ptp_release_auxbus_device 2912 * @dev: device that utilizes the auxbus 2913 */ 2914 static void ice_ptp_release_auxbus_device(struct device *dev) 2915 { 2916 /* Doing nothing here, but handle to auxbux device must be satisfied */ 2917 } 2918 2919 /** 2920 * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device 2921 * @pf: Board private structure 2922 */ 2923 static int ice_ptp_create_auxbus_device(struct ice_pf *pf) 2924 { 2925 struct auxiliary_device *aux_dev; 2926 struct ice_ptp *ptp; 2927 struct device *dev; 2928 char *name; 2929 int err; 2930 u32 id; 2931 2932 ptp = &pf->ptp; 2933 id = ptp->port.port_num; 2934 dev = ice_pf_to_dev(pf); 2935 2936 aux_dev = &ptp->port.aux_dev; 2937 2938 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", 2939 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), 2940 ice_get_ptp_src_clock_index(&pf->hw)); 2941 2942 aux_dev->name = name; 2943 aux_dev->id = id; 2944 aux_dev->dev.release = ice_ptp_release_auxbus_device; 2945 aux_dev->dev.parent = dev; 2946 2947 err = auxiliary_device_init(aux_dev); 2948 if (err) 2949 goto aux_err; 2950 2951 err = auxiliary_device_add(aux_dev); 2952 if (err) { 2953 auxiliary_device_uninit(aux_dev); 2954 goto aux_err; 2955 } 2956 2957 return 0; 2958 aux_err: 2959 dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name); 2960 devm_kfree(dev, name); 2961 return err; 2962 } 2963 2964 /** 2965 * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device 2966 * @pf: Board private structure 2967 */ 2968 static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) 2969 { 2970 struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev; 2971 2972 auxiliary_device_delete(aux_dev); 2973 auxiliary_device_uninit(aux_dev); 2974 2975 memset(aux_dev, 0, sizeof(*aux_dev)); 2976 } 2977 2978 /** 2979 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode 2980 * @pf: Board private structure 2981 * 2982 * Initialize the Tx timestamp interrupt mode for this device. For most device 2983 * types, each PF processes the interrupt and manages its own timestamps. For 2984 * E822-based devices, only the clock owner processes the timestamps. Other 2985 * PFs disable the interrupt and do not process their own timestamps. 2986 */ 2987 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) 2988 { 2989 switch (pf->hw.phy_model) { 2990 case ICE_PHY_E822: 2991 /* E822 based PHY has the clock owner process the interrupt 2992 * for all ports. 2993 */ 2994 if (ice_pf_src_tmr_owned(pf)) 2995 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; 2996 else 2997 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; 2998 break; 2999 default: 3000 /* other PHY types handle their own Tx interrupt */ 3001 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; 3002 } 3003 } 3004 3005 /** 3006 * ice_ptp_init - Initialize PTP hardware clock support 3007 * @pf: Board private structure 3008 * 3009 * Set up the device for interacting with the PTP hardware clock for all 3010 * functions, both the function that owns the clock hardware, and the 3011 * functions connected to the clock hardware. 3012 * 3013 * The clock owner will allocate and register a ptp_clock with the 3014 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 3015 * items used for asynchronous work such as Tx timestamps and periodic work. 3016 */ 3017 void ice_ptp_init(struct ice_pf *pf) 3018 { 3019 struct ice_ptp *ptp = &pf->ptp; 3020 struct ice_hw *hw = &pf->hw; 3021 int err; 3022 3023 ice_ptp_init_phy_model(hw); 3024 3025 ice_ptp_init_tx_interrupt_mode(pf); 3026 3027 /* If this function owns the clock hardware, it must allocate and 3028 * configure the PTP clock device to represent it. 3029 */ 3030 if (ice_pf_src_tmr_owned(pf)) { 3031 err = ice_ptp_init_owner(pf); 3032 if (err) 3033 goto err; 3034 } 3035 3036 ptp->port.port_num = hw->pf_id; 3037 err = ice_ptp_init_port(pf, &ptp->port); 3038 if (err) 3039 goto err; 3040 3041 /* Start the PHY timestamping block */ 3042 ice_ptp_reset_phy_timestamping(pf); 3043 3044 /* Configure initial Tx interrupt settings */ 3045 ice_ptp_cfg_tx_interrupt(pf); 3046 3047 set_bit(ICE_FLAG_PTP, pf->flags); 3048 err = ice_ptp_init_work(pf, ptp); 3049 if (err) 3050 goto err; 3051 3052 err = ice_ptp_create_auxbus_device(pf); 3053 if (err) 3054 goto err; 3055 3056 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 3057 return; 3058 3059 err: 3060 /* If we registered a PTP clock, release it */ 3061 if (pf->ptp.clock) { 3062 ptp_clock_unregister(ptp->clock); 3063 pf->ptp.clock = NULL; 3064 } 3065 clear_bit(ICE_FLAG_PTP, pf->flags); 3066 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 3067 } 3068 3069 /** 3070 * ice_ptp_release - Disable the driver/HW support and unregister the clock 3071 * @pf: Board private structure 3072 * 3073 * This function handles the cleanup work required from the initialization by 3074 * clearing out the important information and unregistering the clock 3075 */ 3076 void ice_ptp_release(struct ice_pf *pf) 3077 { 3078 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 3079 return; 3080 3081 /* Disable timestamping for both Tx and Rx */ 3082 ice_ptp_disable_timestamp_mode(pf); 3083 3084 ice_ptp_remove_auxbus_device(pf); 3085 3086 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3087 3088 clear_bit(ICE_FLAG_PTP, pf->flags); 3089 3090 kthread_cancel_delayed_work_sync(&pf->ptp.work); 3091 3092 ice_ptp_port_phy_stop(&pf->ptp.port); 3093 mutex_destroy(&pf->ptp.port.ps_lock); 3094 if (pf->ptp.kworker) { 3095 kthread_destroy_worker(pf->ptp.kworker); 3096 pf->ptp.kworker = NULL; 3097 } 3098 3099 if (!pf->ptp.clock) 3100 return; 3101 3102 /* Disable periodic outputs */ 3103 ice_ptp_disable_all_clkout(pf); 3104 3105 ptp_clock_unregister(pf->ptp.clock); 3106 pf->ptp.clock = NULL; 3107 3108 ice_ptp_unregister_auxbus_driver(pf); 3109 3110 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 3111 } 3112