1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 8 #define E810_OUT_PROP_DELAY_NS 1 9 10 #define UNKNOWN_INCVAL_E82X 0x100000000ULL 11 12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = { 13 /* name idx func chan */ 14 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, 15 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, 16 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, 17 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, 18 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, 19 }; 20 21 /** 22 * ice_get_sma_config_e810t 23 * @hw: pointer to the hw struct 24 * @ptp_pins: pointer to the ptp_pin_desc struture 25 * 26 * Read the configuration of the SMA control logic and put it into the 27 * ptp_pin_desc structure 28 */ 29 static int 30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) 31 { 32 u8 data, i; 33 int status; 34 35 /* Read initial pin state */ 36 status = ice_read_sma_ctrl_e810t(hw, &data); 37 if (status) 38 return status; 39 40 /* initialize with defaults */ 41 for (i = 0; i < NUM_PTP_PINS_E810T; i++) { 42 strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name, 43 sizeof(ptp_pins[i].name)); 44 ptp_pins[i].index = ice_pin_desc_e810t[i].index; 45 ptp_pins[i].func = ice_pin_desc_e810t[i].func; 46 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; 47 } 48 49 /* Parse SMA1/UFL1 */ 50 switch (data & ICE_SMA1_MASK_E810T) { 51 case ICE_SMA1_MASK_E810T: 52 default: 53 ptp_pins[SMA1].func = PTP_PF_NONE; 54 ptp_pins[UFL1].func = PTP_PF_NONE; 55 break; 56 case ICE_SMA1_DIR_EN_E810T: 57 ptp_pins[SMA1].func = PTP_PF_PEROUT; 58 ptp_pins[UFL1].func = PTP_PF_NONE; 59 break; 60 case ICE_SMA1_TX_EN_E810T: 61 ptp_pins[SMA1].func = PTP_PF_EXTTS; 62 ptp_pins[UFL1].func = PTP_PF_NONE; 63 break; 64 case 0: 65 ptp_pins[SMA1].func = PTP_PF_EXTTS; 66 ptp_pins[UFL1].func = PTP_PF_PEROUT; 67 break; 68 } 69 70 /* Parse SMA2/UFL2 */ 71 switch (data & ICE_SMA2_MASK_E810T) { 72 case ICE_SMA2_MASK_E810T: 73 default: 74 ptp_pins[SMA2].func = PTP_PF_NONE; 75 ptp_pins[UFL2].func = PTP_PF_NONE; 76 break; 77 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 78 ptp_pins[SMA2].func = PTP_PF_EXTTS; 79 ptp_pins[UFL2].func = PTP_PF_NONE; 80 break; 81 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 82 ptp_pins[SMA2].func = PTP_PF_PEROUT; 83 ptp_pins[UFL2].func = PTP_PF_NONE; 84 break; 85 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): 86 ptp_pins[SMA2].func = PTP_PF_NONE; 87 ptp_pins[UFL2].func = PTP_PF_EXTTS; 88 break; 89 case ICE_SMA2_DIR_EN_E810T: 90 ptp_pins[SMA2].func = PTP_PF_PEROUT; 91 ptp_pins[UFL2].func = PTP_PF_EXTTS; 92 break; 93 } 94 95 return 0; 96 } 97 98 /** 99 * ice_ptp_set_sma_config_e810t 100 * @hw: pointer to the hw struct 101 * @ptp_pins: pointer to the ptp_pin_desc struture 102 * 103 * Set the configuration of the SMA control logic based on the configuration in 104 * num_pins parameter 105 */ 106 static int 107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw, 108 const struct ptp_pin_desc *ptp_pins) 109 { 110 int status; 111 u8 data; 112 113 /* SMA1 and UFL1 cannot be set to TX at the same time */ 114 if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 115 ptp_pins[UFL1].func == PTP_PF_PEROUT) 116 return -EINVAL; 117 118 /* SMA2 and UFL2 cannot be set to RX at the same time */ 119 if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 120 ptp_pins[UFL2].func == PTP_PF_EXTTS) 121 return -EINVAL; 122 123 /* Read initial pin state value */ 124 status = ice_read_sma_ctrl_e810t(hw, &data); 125 if (status) 126 return status; 127 128 /* Set the right sate based on the desired configuration */ 129 data &= ~ICE_SMA1_MASK_E810T; 130 if (ptp_pins[SMA1].func == PTP_PF_NONE && 131 ptp_pins[UFL1].func == PTP_PF_NONE) { 132 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); 133 data |= ICE_SMA1_MASK_E810T; 134 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 135 ptp_pins[UFL1].func == PTP_PF_NONE) { 136 dev_info(ice_hw_to_dev(hw), "SMA1 RX"); 137 data |= ICE_SMA1_TX_EN_E810T; 138 } else if (ptp_pins[SMA1].func == PTP_PF_NONE && 139 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 140 /* U.FL 1 TX will always enable SMA 1 RX */ 141 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 142 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 143 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 144 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 145 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 146 ptp_pins[UFL1].func == PTP_PF_NONE) { 147 dev_info(ice_hw_to_dev(hw), "SMA1 TX"); 148 data |= ICE_SMA1_DIR_EN_E810T; 149 } 150 151 data &= ~ICE_SMA2_MASK_E810T; 152 if (ptp_pins[SMA2].func == PTP_PF_NONE && 153 ptp_pins[UFL2].func == PTP_PF_NONE) { 154 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); 155 data |= ICE_SMA2_MASK_E810T; 156 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 157 ptp_pins[UFL2].func == PTP_PF_NONE) { 158 dev_info(ice_hw_to_dev(hw), "SMA2 RX"); 159 data |= (ICE_SMA2_TX_EN_E810T | 160 ICE_SMA2_UFL2_RX_DIS_E810T); 161 } else if (ptp_pins[SMA2].func == PTP_PF_NONE && 162 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 163 dev_info(ice_hw_to_dev(hw), "UFL2 RX"); 164 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); 165 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 166 ptp_pins[UFL2].func == PTP_PF_NONE) { 167 dev_info(ice_hw_to_dev(hw), "SMA2 TX"); 168 data |= (ICE_SMA2_DIR_EN_E810T | 169 ICE_SMA2_UFL2_RX_DIS_E810T); 170 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 171 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 172 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); 173 data |= ICE_SMA2_DIR_EN_E810T; 174 } 175 176 return ice_write_sma_ctrl_e810t(hw, data); 177 } 178 179 /** 180 * ice_ptp_set_sma_e810t 181 * @info: the driver's PTP info structure 182 * @pin: pin index in kernel structure 183 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) 184 * 185 * Set the configuration of a single SMA pin 186 */ 187 static int 188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, 189 enum ptp_pin_function func) 190 { 191 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; 192 struct ice_pf *pf = ptp_info_to_pf(info); 193 struct ice_hw *hw = &pf->hw; 194 int err; 195 196 if (pin < SMA1 || func > PTP_PF_PEROUT) 197 return -EOPNOTSUPP; 198 199 err = ice_get_sma_config_e810t(hw, ptp_pins); 200 if (err) 201 return err; 202 203 /* Disable the same function on the other pin sharing the channel */ 204 if (pin == SMA1 && ptp_pins[UFL1].func == func) 205 ptp_pins[UFL1].func = PTP_PF_NONE; 206 if (pin == UFL1 && ptp_pins[SMA1].func == func) 207 ptp_pins[SMA1].func = PTP_PF_NONE; 208 209 if (pin == SMA2 && ptp_pins[UFL2].func == func) 210 ptp_pins[UFL2].func = PTP_PF_NONE; 211 if (pin == UFL2 && ptp_pins[SMA2].func == func) 212 ptp_pins[SMA2].func = PTP_PF_NONE; 213 214 /* Set up new pin function in the temp table */ 215 ptp_pins[pin].func = func; 216 217 return ice_ptp_set_sma_config_e810t(hw, ptp_pins); 218 } 219 220 /** 221 * ice_verify_pin_e810t 222 * @info: the driver's PTP info structure 223 * @pin: Pin index 224 * @func: Assigned function 225 * @chan: Assigned channel 226 * 227 * Verify if pin supports requested pin function. If the Check pins consistency. 228 * Reconfigure the SMA logic attached to the given pin to enable its 229 * desired functionality 230 */ 231 static int 232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, 233 enum ptp_pin_function func, unsigned int chan) 234 { 235 /* Don't allow channel reassignment */ 236 if (chan != ice_pin_desc_e810t[pin].chan) 237 return -EOPNOTSUPP; 238 239 /* Check if functions are properly assigned */ 240 switch (func) { 241 case PTP_PF_NONE: 242 break; 243 case PTP_PF_EXTTS: 244 if (pin == UFL1) 245 return -EOPNOTSUPP; 246 break; 247 case PTP_PF_PEROUT: 248 if (pin == UFL2 || pin == GNSS) 249 return -EOPNOTSUPP; 250 break; 251 case PTP_PF_PHYSYNC: 252 return -EOPNOTSUPP; 253 } 254 255 return ice_ptp_set_sma_e810t(info, pin, func); 256 } 257 258 /** 259 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device 260 * @pf: Board private structure 261 * 262 * Program the device to respond appropriately to the Tx timestamp interrupt 263 * cause. 264 */ 265 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) 266 { 267 struct ice_hw *hw = &pf->hw; 268 bool enable; 269 u32 val; 270 271 switch (pf->ptp.tx_interrupt_mode) { 272 case ICE_PTP_TX_INTERRUPT_ALL: 273 /* React to interrupts across all quads. */ 274 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); 275 enable = true; 276 break; 277 case ICE_PTP_TX_INTERRUPT_NONE: 278 /* Do not react to interrupts on any quad. */ 279 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); 280 enable = false; 281 break; 282 case ICE_PTP_TX_INTERRUPT_SELF: 283 default: 284 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; 285 break; 286 } 287 288 /* Configure the Tx timestamp interrupt */ 289 val = rd32(hw, PFINT_OICR_ENA); 290 if (enable) 291 val |= PFINT_OICR_TSYN_TX_M; 292 else 293 val &= ~PFINT_OICR_TSYN_TX_M; 294 wr32(hw, PFINT_OICR_ENA, val); 295 } 296 297 /** 298 * ice_set_rx_tstamp - Enable or disable Rx timestamping 299 * @pf: The PF pointer to search in 300 * @on: bool value for whether timestamps are enabled or disabled 301 */ 302 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 303 { 304 struct ice_vsi *vsi; 305 u16 i; 306 307 vsi = ice_get_main_vsi(pf); 308 if (!vsi || !vsi->rx_rings) 309 return; 310 311 /* Set the timestamp flag for all the Rx rings */ 312 ice_for_each_rxq(vsi, i) { 313 if (!vsi->rx_rings[i]) 314 continue; 315 vsi->rx_rings[i]->ptp_rx = on; 316 } 317 } 318 319 /** 320 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode 321 * @pf: Board private structure 322 * 323 * Called during preparation for reset to temporarily disable timestamping on 324 * the device. Called during remove to disable timestamping while cleaning up 325 * driver resources. 326 */ 327 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) 328 { 329 struct ice_hw *hw = &pf->hw; 330 u32 val; 331 332 val = rd32(hw, PFINT_OICR_ENA); 333 val &= ~PFINT_OICR_TSYN_TX_M; 334 wr32(hw, PFINT_OICR_ENA, val); 335 336 ice_set_rx_tstamp(pf, false); 337 } 338 339 /** 340 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration 341 * @pf: Board private structure 342 * 343 * Called at the end of rebuild to restore timestamp configuration after 344 * a device reset. 345 */ 346 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) 347 { 348 struct ice_hw *hw = &pf->hw; 349 bool enable_rx; 350 351 ice_ptp_cfg_tx_interrupt(pf); 352 353 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; 354 ice_set_rx_tstamp(pf, enable_rx); 355 356 /* Trigger an immediate software interrupt to ensure that timestamps 357 * which occurred during reset are handled now. 358 */ 359 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 360 ice_flush(hw); 361 } 362 363 /** 364 * ice_ptp_read_src_clk_reg - Read the source clock register 365 * @pf: Board private structure 366 * @sts: Optional parameter for holding a pair of system timestamps from 367 * the system clock. Will be ignored if NULL is given. 368 */ 369 static u64 370 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 371 { 372 struct ice_hw *hw = &pf->hw; 373 u32 hi, lo, lo2; 374 u8 tmr_idx; 375 376 tmr_idx = ice_get_ptp_src_clock_index(hw); 377 /* Read the system timestamp pre PHC read */ 378 ptp_read_system_prets(sts); 379 380 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 381 382 /* Read the system timestamp post PHC read */ 383 ptp_read_system_postts(sts); 384 385 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 386 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 387 388 if (lo2 < lo) { 389 /* if TIME_L rolled over read TIME_L again and update 390 * system timestamps 391 */ 392 ptp_read_system_prets(sts); 393 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 394 ptp_read_system_postts(sts); 395 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 396 } 397 398 return ((u64)hi << 32) | lo; 399 } 400 401 /** 402 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 403 * @cached_phc_time: recently cached copy of PHC time 404 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 405 * 406 * Hardware captures timestamps which contain only 32 bits of nominal 407 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 408 * Note that the captured timestamp values may be 40 bits, but the lower 409 * 8 bits are sub-nanoseconds and generally discarded. 410 * 411 * Extend the 32bit nanosecond timestamp using the following algorithm and 412 * assumptions: 413 * 414 * 1) have a recently cached copy of the PHC time 415 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 416 * seconds) before or after the PHC time was captured. 417 * 3) calculate the delta between the cached time and the timestamp 418 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 419 * captured after the PHC time. In this case, the full timestamp is just 420 * the cached PHC time plus the delta. 421 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 422 * timestamp was captured *before* the PHC time, i.e. because the PHC 423 * cache was updated after the timestamp was captured by hardware. In this 424 * case, the full timestamp is the cached time minus the inverse delta. 425 * 426 * This algorithm works even if the PHC time was updated after a Tx timestamp 427 * was requested, but before the Tx timestamp event was reported from 428 * hardware. 429 * 430 * This calculation primarily relies on keeping the cached PHC time up to 431 * date. If the timestamp was captured more than 2^31 nanoseconds after the 432 * PHC time, it is possible that the lower 32bits of PHC time have 433 * overflowed more than once, and we might generate an incorrect timestamp. 434 * 435 * This is prevented by (a) periodically updating the cached PHC time once 436 * a second, and (b) discarding any Tx timestamp packet if it has waited for 437 * a timestamp for more than one second. 438 */ 439 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 440 { 441 u32 delta, phc_time_lo; 442 u64 ns; 443 444 /* Extract the lower 32 bits of the PHC time */ 445 phc_time_lo = (u32)cached_phc_time; 446 447 /* Calculate the delta between the lower 32bits of the cached PHC 448 * time and the in_tstamp value 449 */ 450 delta = (in_tstamp - phc_time_lo); 451 452 /* Do not assume that the in_tstamp is always more recent than the 453 * cached PHC time. If the delta is large, it indicates that the 454 * in_tstamp was taken in the past, and should be converted 455 * forward. 456 */ 457 if (delta > (U32_MAX / 2)) { 458 /* reverse the delta calculation here */ 459 delta = (phc_time_lo - in_tstamp); 460 ns = cached_phc_time - delta; 461 } else { 462 ns = cached_phc_time + delta; 463 } 464 465 return ns; 466 } 467 468 /** 469 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 470 * @pf: Board private structure 471 * @in_tstamp: Ingress/egress 40b timestamp value 472 * 473 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 474 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 475 * 476 * *--------------------------------------------------------------* 477 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 478 * *--------------------------------------------------------------* 479 * 480 * The low bit is an indicator of whether the timestamp is valid. The next 481 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 482 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 483 * 484 * It is assumed that the caller verifies the timestamp is valid prior to 485 * calling this function. 486 * 487 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 488 * time stored in the device private PTP structure as the basis for timestamp 489 * extension. 490 * 491 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 492 * algorithm. 493 */ 494 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 495 { 496 const u64 mask = GENMASK_ULL(31, 0); 497 unsigned long discard_time; 498 499 /* Discard the hardware timestamp if the cached PHC time is too old */ 500 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 501 if (time_is_before_jiffies(discard_time)) { 502 pf->ptp.tx_hwtstamp_discarded++; 503 return 0; 504 } 505 506 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 507 (in_tstamp >> 8) & mask); 508 } 509 510 /** 511 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 512 * @tx: the PTP Tx timestamp tracker to check 513 * 514 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 515 * to accept new timestamp requests. 516 * 517 * Assumes the tx->lock spinlock is already held. 518 */ 519 static bool 520 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 521 { 522 lockdep_assert_held(&tx->lock); 523 524 return tx->init && !tx->calibrating; 525 } 526 527 /** 528 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 529 * @tx: the PTP Tx timestamp tracker 530 * 531 * Process timestamps captured by the PHY associated with this port. To do 532 * this, loop over each index with a waiting skb. 533 * 534 * If a given index has a valid timestamp, perform the following steps: 535 * 536 * 1) check that the timestamp request is not stale 537 * 2) check that a timestamp is ready and available in the PHY memory bank 538 * 3) read and copy the timestamp out of the PHY register 539 * 4) unlock the index by clearing the associated in_use bit 540 * 5) check if the timestamp is stale, and discard if so 541 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 542 * 7) send this 64 bit timestamp to the stack 543 * 544 * Note that we do not hold the tracking lock while reading the Tx timestamp. 545 * This is because reading the timestamp requires taking a mutex that might 546 * sleep. 547 * 548 * The only place where we set in_use is when a new timestamp is initiated 549 * with a slot index. This is only called in the hard xmit routine where an 550 * SKB has a request flag set. The only places where we clear this bit is this 551 * function, or during teardown when the Tx timestamp tracker is being 552 * removed. A timestamp index will never be re-used until the in_use bit for 553 * that index is cleared. 554 * 555 * If a Tx thread starts a new timestamp, we might not begin processing it 556 * right away but we will notice it at the end when we re-queue the task. 557 * 558 * If a Tx thread starts a new timestamp just after this function exits, the 559 * interrupt for that timestamp should re-trigger this function once 560 * a timestamp is ready. 561 * 562 * In cases where the PTP hardware clock was directly adjusted, some 563 * timestamps may not be able to safely use the timestamp extension math. In 564 * this case, software will set the stale bit for any outstanding Tx 565 * timestamps when the clock is adjusted. Then this function will discard 566 * those captured timestamps instead of sending them to the stack. 567 * 568 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 569 * to correctly extend the timestamp using the cached PHC time. It is 570 * extremely unlikely that a packet will ever take this long to timestamp. If 571 * we detect a Tx timestamp request that has waited for this long we assume 572 * the packet will never be sent by hardware and discard it without reading 573 * the timestamp register. 574 */ 575 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 576 { 577 struct ice_ptp_port *ptp_port; 578 struct ice_pf *pf; 579 struct ice_hw *hw; 580 u64 tstamp_ready; 581 bool link_up; 582 int err; 583 u8 idx; 584 585 ptp_port = container_of(tx, struct ice_ptp_port, tx); 586 pf = ptp_port_to_pf(ptp_port); 587 hw = &pf->hw; 588 589 /* Read the Tx ready status first */ 590 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 591 if (err) 592 return; 593 594 /* Drop packets if the link went down */ 595 link_up = ptp_port->link_up; 596 597 for_each_set_bit(idx, tx->in_use, tx->len) { 598 struct skb_shared_hwtstamps shhwtstamps = {}; 599 u8 phy_idx = idx + tx->offset; 600 u64 raw_tstamp = 0, tstamp; 601 bool drop_ts = !link_up; 602 struct sk_buff *skb; 603 604 /* Drop packets which have waited for more than 2 seconds */ 605 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 606 drop_ts = true; 607 608 /* Count the number of Tx timestamps that timed out */ 609 pf->ptp.tx_hwtstamp_timeouts++; 610 } 611 612 /* Only read a timestamp from the PHY if its marked as ready 613 * by the tstamp_ready register. This avoids unnecessary 614 * reading of timestamps which are not yet valid. This is 615 * important as we must read all timestamps which are valid 616 * and only timestamps which are valid during each interrupt. 617 * If we do not, the hardware logic for generating a new 618 * interrupt can get stuck on some devices. 619 */ 620 if (!(tstamp_ready & BIT_ULL(phy_idx))) { 621 if (drop_ts) 622 goto skip_ts_read; 623 624 continue; 625 } 626 627 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 628 629 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 630 if (err && !drop_ts) 631 continue; 632 633 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 634 635 /* For PHYs which don't implement a proper timestamp ready 636 * bitmap, verify that the timestamp value is different 637 * from the last cached timestamp. If it is not, skip this for 638 * now assuming it hasn't yet been captured by hardware. 639 */ 640 if (!drop_ts && tx->verify_cached && 641 raw_tstamp == tx->tstamps[idx].cached_tstamp) 642 continue; 643 644 /* Discard any timestamp value without the valid bit set */ 645 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 646 drop_ts = true; 647 648 skip_ts_read: 649 spin_lock(&tx->lock); 650 if (tx->verify_cached && raw_tstamp) 651 tx->tstamps[idx].cached_tstamp = raw_tstamp; 652 clear_bit(idx, tx->in_use); 653 skb = tx->tstamps[idx].skb; 654 tx->tstamps[idx].skb = NULL; 655 if (test_and_clear_bit(idx, tx->stale)) 656 drop_ts = true; 657 spin_unlock(&tx->lock); 658 659 /* It is unlikely but possible that the SKB will have been 660 * flushed at this point due to link change or teardown. 661 */ 662 if (!skb) 663 continue; 664 665 if (drop_ts) { 666 dev_kfree_skb_any(skb); 667 continue; 668 } 669 670 /* Extend the timestamp using cached PHC time */ 671 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 672 if (tstamp) { 673 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 674 ice_trace(tx_tstamp_complete, skb, idx); 675 } 676 677 skb_tstamp_tx(skb, &shhwtstamps); 678 dev_kfree_skb_any(skb); 679 } 680 } 681 682 /** 683 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 684 * @pf: Board private structure 685 */ 686 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 687 { 688 struct ice_ptp_port *port; 689 unsigned int i; 690 691 mutex_lock(&pf->ptp.ports_owner.lock); 692 list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) { 693 struct ice_ptp_tx *tx = &port->tx; 694 695 if (!tx || !tx->init) 696 continue; 697 698 ice_ptp_process_tx_tstamp(tx); 699 } 700 mutex_unlock(&pf->ptp.ports_owner.lock); 701 702 for (i = 0; i < ICE_MAX_QUAD; i++) { 703 u64 tstamp_ready; 704 int err; 705 706 /* Read the Tx ready status first */ 707 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 708 if (err) 709 break; 710 else if (tstamp_ready) 711 return ICE_TX_TSTAMP_WORK_PENDING; 712 } 713 714 return ICE_TX_TSTAMP_WORK_DONE; 715 } 716 717 /** 718 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 719 * @tx: Tx tracking structure to initialize 720 * 721 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 722 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 723 */ 724 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 725 { 726 bool more_timestamps; 727 728 if (!tx->init) 729 return ICE_TX_TSTAMP_WORK_DONE; 730 731 /* Process the Tx timestamp tracker */ 732 ice_ptp_process_tx_tstamp(tx); 733 734 /* Check if there are outstanding Tx timestamps */ 735 spin_lock(&tx->lock); 736 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 737 spin_unlock(&tx->lock); 738 739 if (more_timestamps) 740 return ICE_TX_TSTAMP_WORK_PENDING; 741 742 return ICE_TX_TSTAMP_WORK_DONE; 743 } 744 745 /** 746 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 747 * @tx: Tx tracking structure to initialize 748 * 749 * Assumes that the length has already been initialized. Do not call directly, 750 * use the ice_ptp_init_tx_* instead. 751 */ 752 static int 753 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 754 { 755 unsigned long *in_use, *stale; 756 struct ice_tx_tstamp *tstamps; 757 758 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 759 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 760 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 761 762 if (!tstamps || !in_use || !stale) { 763 kfree(tstamps); 764 bitmap_free(in_use); 765 bitmap_free(stale); 766 767 return -ENOMEM; 768 } 769 770 tx->tstamps = tstamps; 771 tx->in_use = in_use; 772 tx->stale = stale; 773 tx->init = 1; 774 775 spin_lock_init(&tx->lock); 776 777 return 0; 778 } 779 780 /** 781 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 782 * @pf: Board private structure 783 * @tx: the tracker to flush 784 * 785 * Called during teardown when a Tx tracker is being removed. 786 */ 787 static void 788 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 789 { 790 struct ice_hw *hw = &pf->hw; 791 u64 tstamp_ready; 792 int err; 793 u8 idx; 794 795 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 796 if (err) { 797 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 798 tx->block, err); 799 800 /* If we fail to read the Tx timestamp ready bitmap just 801 * skip clearing the PHY timestamps. 802 */ 803 tstamp_ready = 0; 804 } 805 806 for_each_set_bit(idx, tx->in_use, tx->len) { 807 u8 phy_idx = idx + tx->offset; 808 struct sk_buff *skb; 809 810 /* In case this timestamp is ready, we need to clear it. */ 811 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 812 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 813 814 spin_lock(&tx->lock); 815 skb = tx->tstamps[idx].skb; 816 tx->tstamps[idx].skb = NULL; 817 clear_bit(idx, tx->in_use); 818 clear_bit(idx, tx->stale); 819 spin_unlock(&tx->lock); 820 821 /* Count the number of Tx timestamps flushed */ 822 pf->ptp.tx_hwtstamp_flushed++; 823 824 /* Free the SKB after we've cleared the bit */ 825 dev_kfree_skb_any(skb); 826 } 827 } 828 829 /** 830 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 831 * @tx: the tracker to mark 832 * 833 * Mark currently outstanding Tx timestamps as stale. This prevents sending 834 * their timestamp value to the stack. This is required to prevent extending 835 * the 40bit hardware timestamp incorrectly. 836 * 837 * This should be called when the PTP clock is modified such as after a set 838 * time request. 839 */ 840 static void 841 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 842 { 843 spin_lock(&tx->lock); 844 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 845 spin_unlock(&tx->lock); 846 } 847 848 /** 849 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 850 * @pf: Board private structure 851 * @tx: Tx tracking structure to release 852 * 853 * Free memory associated with the Tx timestamp tracker. 854 */ 855 static void 856 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 857 { 858 spin_lock(&tx->lock); 859 tx->init = 0; 860 spin_unlock(&tx->lock); 861 862 /* wait for potentially outstanding interrupt to complete */ 863 synchronize_irq(pf->oicr_irq.virq); 864 865 ice_ptp_flush_tx_tracker(pf, tx); 866 867 kfree(tx->tstamps); 868 tx->tstamps = NULL; 869 870 bitmap_free(tx->in_use); 871 tx->in_use = NULL; 872 873 bitmap_free(tx->stale); 874 tx->stale = NULL; 875 876 tx->len = 0; 877 } 878 879 /** 880 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps 881 * @pf: Board private structure 882 * @tx: the Tx tracking structure to initialize 883 * @port: the port this structure tracks 884 * 885 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 886 * the timestamp block is shared for all ports in the same quad. To avoid 887 * ports using the same timestamp index, logically break the block of 888 * registers into chunks based on the port number. 889 */ 890 static int 891 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 892 { 893 tx->block = port / ICE_PORTS_PER_QUAD; 894 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; 895 tx->len = INDEX_PER_PORT_E82X; 896 tx->verify_cached = 0; 897 898 return ice_ptp_alloc_tx_tracker(tx); 899 } 900 901 /** 902 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 903 * @pf: Board private structure 904 * @tx: the Tx tracking structure to initialize 905 * 906 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 907 * port has its own block of timestamps, independent of the other ports. 908 */ 909 static int 910 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 911 { 912 tx->block = pf->hw.port_info->lport; 913 tx->offset = 0; 914 tx->len = INDEX_PER_PORT_E810; 915 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 916 * verify new timestamps against cached copy of the last read 917 * timestamp. 918 */ 919 tx->verify_cached = 1; 920 921 return ice_ptp_alloc_tx_tracker(tx); 922 } 923 924 /** 925 * ice_ptp_update_cached_phctime - Update the cached PHC time values 926 * @pf: Board specific private structure 927 * 928 * This function updates the system time values which are cached in the PF 929 * structure and the Rx rings. 930 * 931 * This function must be called periodically to ensure that the cached value 932 * is never more than 2 seconds old. 933 * 934 * Note that the cached copy in the PF PTP structure is always updated, even 935 * if we can't update the copy in the Rx rings. 936 * 937 * Return: 938 * * 0 - OK, successfully updated 939 * * -EAGAIN - PF was busy, need to reschedule the update 940 */ 941 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 942 { 943 struct device *dev = ice_pf_to_dev(pf); 944 unsigned long update_before; 945 u64 systime; 946 int i; 947 948 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 949 if (pf->ptp.cached_phc_time && 950 time_is_before_jiffies(update_before)) { 951 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 952 953 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 954 jiffies_to_msecs(time_taken)); 955 pf->ptp.late_cached_phc_updates++; 956 } 957 958 /* Read the current PHC time */ 959 systime = ice_ptp_read_src_clk_reg(pf, NULL); 960 961 /* Update the cached PHC time stored in the PF structure */ 962 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 963 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 964 965 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 966 return -EAGAIN; 967 968 ice_for_each_vsi(pf, i) { 969 struct ice_vsi *vsi = pf->vsi[i]; 970 int j; 971 972 if (!vsi) 973 continue; 974 975 if (vsi->type != ICE_VSI_PF) 976 continue; 977 978 ice_for_each_rxq(vsi, j) { 979 if (!vsi->rx_rings[j]) 980 continue; 981 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 982 } 983 } 984 clear_bit(ICE_CFG_BUSY, pf->state); 985 986 return 0; 987 } 988 989 /** 990 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 991 * @pf: Board specific private structure 992 * 993 * This function must be called when the cached PHC time is no longer valid, 994 * such as after a time adjustment. It marks any currently outstanding Tx 995 * timestamps as stale and updates the cached PHC time for both the PF and Rx 996 * rings. 997 * 998 * If updating the PHC time cannot be done immediately, a warning message is 999 * logged and the work item is scheduled immediately to minimize the window 1000 * with a wrong cached timestamp. 1001 */ 1002 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1003 { 1004 struct device *dev = ice_pf_to_dev(pf); 1005 int err; 1006 1007 /* Update the cached PHC time immediately if possible, otherwise 1008 * schedule the work item to execute soon. 1009 */ 1010 err = ice_ptp_update_cached_phctime(pf); 1011 if (err) { 1012 /* If another thread is updating the Rx rings, we won't 1013 * properly reset them here. This could lead to reporting of 1014 * invalid timestamps, but there isn't much we can do. 1015 */ 1016 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1017 __func__); 1018 1019 /* Queue the work item to update the Rx rings when possible */ 1020 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1021 msecs_to_jiffies(10)); 1022 } 1023 1024 /* Mark any outstanding timestamps as stale, since they might have 1025 * been captured in hardware before the time update. This could lead 1026 * to us extending them with the wrong cached value resulting in 1027 * incorrect timestamp values. 1028 */ 1029 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1030 } 1031 1032 /** 1033 * ice_ptp_read_time - Read the time from the device 1034 * @pf: Board private structure 1035 * @ts: timespec structure to hold the current time value 1036 * @sts: Optional parameter for holding a pair of system timestamps from 1037 * the system clock. Will be ignored if NULL is given. 1038 * 1039 * This function reads the source clock registers and stores them in a timespec. 1040 * However, since the registers are 64 bits of nanoseconds, we must convert the 1041 * result to a timespec before we can return. 1042 */ 1043 static void 1044 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, 1045 struct ptp_system_timestamp *sts) 1046 { 1047 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 1048 1049 *ts = ns_to_timespec64(time_ns); 1050 } 1051 1052 /** 1053 * ice_ptp_write_init - Set PHC time to provided value 1054 * @pf: Board private structure 1055 * @ts: timespec structure that holds the new time value 1056 * 1057 * Set the PHC time to the specified time provided in the timespec. 1058 */ 1059 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1060 { 1061 u64 ns = timespec64_to_ns(ts); 1062 struct ice_hw *hw = &pf->hw; 1063 1064 return ice_ptp_init_time(hw, ns); 1065 } 1066 1067 /** 1068 * ice_ptp_write_adj - Adjust PHC clock time atomically 1069 * @pf: Board private structure 1070 * @adj: Adjustment in nanoseconds 1071 * 1072 * Perform an atomic adjustment of the PHC time by the specified number of 1073 * nanoseconds. 1074 */ 1075 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1076 { 1077 struct ice_hw *hw = &pf->hw; 1078 1079 return ice_ptp_adj_clock(hw, adj); 1080 } 1081 1082 /** 1083 * ice_base_incval - Get base timer increment value 1084 * @pf: Board private structure 1085 * 1086 * Look up the base timer increment value for this device. The base increment 1087 * value is used to define the nominal clock tick rate. This increment value 1088 * is programmed during device initialization. It is also used as the basis 1089 * for calculating adjustments using scaled_ppm. 1090 */ 1091 static u64 ice_base_incval(struct ice_pf *pf) 1092 { 1093 struct ice_hw *hw = &pf->hw; 1094 u64 incval; 1095 1096 if (ice_is_e810(hw)) 1097 incval = ICE_PTP_NOMINAL_INCVAL_E810; 1098 else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) 1099 incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); 1100 else 1101 incval = UNKNOWN_INCVAL_E82X; 1102 1103 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1104 incval); 1105 1106 return incval; 1107 } 1108 1109 /** 1110 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1111 * @port: PTP port for which Tx FIFO is checked 1112 */ 1113 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1114 { 1115 int quad = port->port_num / ICE_PORTS_PER_QUAD; 1116 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1117 struct ice_pf *pf; 1118 struct ice_hw *hw; 1119 u32 val, phy_sts; 1120 int err; 1121 1122 pf = ptp_port_to_pf(port); 1123 hw = &pf->hw; 1124 1125 if (port->tx_fifo_busy_cnt == FIFO_OK) 1126 return 0; 1127 1128 /* need to read FIFO state */ 1129 if (offs == 0 || offs == 1) 1130 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, 1131 &val); 1132 else 1133 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, 1134 &val); 1135 1136 if (err) { 1137 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1138 port->port_num, err); 1139 return err; 1140 } 1141 1142 if (offs & 0x1) 1143 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val); 1144 else 1145 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val); 1146 1147 if (phy_sts & FIFO_EMPTY) { 1148 port->tx_fifo_busy_cnt = FIFO_OK; 1149 return 0; 1150 } 1151 1152 port->tx_fifo_busy_cnt++; 1153 1154 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1155 port->tx_fifo_busy_cnt, port->port_num); 1156 1157 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1158 dev_dbg(ice_pf_to_dev(pf), 1159 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1160 port->port_num, quad); 1161 ice_ptp_reset_ts_memory_quad_e82x(hw, quad); 1162 port->tx_fifo_busy_cnt = FIFO_OK; 1163 return 0; 1164 } 1165 1166 return -EAGAIN; 1167 } 1168 1169 /** 1170 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1171 * @work: Pointer to the kthread_work structure for this task 1172 * 1173 * Check whether hardware has completed measuring the Tx and Rx offset values 1174 * used to configure and enable vernier timestamp calibration. 1175 * 1176 * Once the offset in either direction is measured, configure the associated 1177 * registers with the calibrated offset values and enable timestamping. The Tx 1178 * and Rx directions are configured independently as soon as their associated 1179 * offsets are known. 1180 * 1181 * This function reschedules itself until both Tx and Rx calibration have 1182 * completed. 1183 */ 1184 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1185 { 1186 struct ice_ptp_port *port; 1187 struct ice_pf *pf; 1188 struct ice_hw *hw; 1189 int tx_err; 1190 int rx_err; 1191 1192 port = container_of(work, struct ice_ptp_port, ov_work.work); 1193 pf = ptp_port_to_pf(port); 1194 hw = &pf->hw; 1195 1196 if (ice_is_reset_in_progress(pf->state)) { 1197 /* wait for device driver to complete reset */ 1198 kthread_queue_delayed_work(pf->ptp.kworker, 1199 &port->ov_work, 1200 msecs_to_jiffies(100)); 1201 return; 1202 } 1203 1204 tx_err = ice_ptp_check_tx_fifo(port); 1205 if (!tx_err) 1206 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); 1207 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); 1208 if (tx_err || rx_err) { 1209 /* Tx and/or Rx offset not yet configured, try again later */ 1210 kthread_queue_delayed_work(pf->ptp.kworker, 1211 &port->ov_work, 1212 msecs_to_jiffies(100)); 1213 return; 1214 } 1215 } 1216 1217 /** 1218 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1219 * @ptp_port: PTP port to stop 1220 */ 1221 static int 1222 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1223 { 1224 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1225 u8 port = ptp_port->port_num; 1226 struct ice_hw *hw = &pf->hw; 1227 int err; 1228 1229 if (ice_is_e810(hw)) 1230 return 0; 1231 1232 mutex_lock(&ptp_port->ps_lock); 1233 1234 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1235 1236 err = ice_stop_phy_timer_e82x(hw, port, true); 1237 if (err) 1238 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1239 port, err); 1240 1241 mutex_unlock(&ptp_port->ps_lock); 1242 1243 return err; 1244 } 1245 1246 /** 1247 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1248 * @ptp_port: PTP port for which the PHY start is set 1249 * 1250 * Start the PHY timestamping block, and initiate Vernier timestamping 1251 * calibration. If timestamping cannot be calibrated (such as if link is down) 1252 * then disable the timestamping block instead. 1253 */ 1254 static int 1255 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1256 { 1257 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1258 u8 port = ptp_port->port_num; 1259 struct ice_hw *hw = &pf->hw; 1260 int err; 1261 1262 if (ice_is_e810(hw)) 1263 return 0; 1264 1265 if (!ptp_port->link_up) 1266 return ice_ptp_port_phy_stop(ptp_port); 1267 1268 mutex_lock(&ptp_port->ps_lock); 1269 1270 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1271 1272 /* temporarily disable Tx timestamps while calibrating PHY offset */ 1273 spin_lock(&ptp_port->tx.lock); 1274 ptp_port->tx.calibrating = true; 1275 spin_unlock(&ptp_port->tx.lock); 1276 ptp_port->tx_fifo_busy_cnt = 0; 1277 1278 /* Start the PHY timer in Vernier mode */ 1279 err = ice_start_phy_timer_e82x(hw, port); 1280 if (err) 1281 goto out_unlock; 1282 1283 /* Enable Tx timestamps right away */ 1284 spin_lock(&ptp_port->tx.lock); 1285 ptp_port->tx.calibrating = false; 1286 spin_unlock(&ptp_port->tx.lock); 1287 1288 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); 1289 1290 out_unlock: 1291 if (err) 1292 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1293 port, err); 1294 1295 mutex_unlock(&ptp_port->ps_lock); 1296 1297 return err; 1298 } 1299 1300 /** 1301 * ice_ptp_link_change - Reconfigure PTP after link status change 1302 * @pf: Board private structure 1303 * @port: Port for which the PHY start is set 1304 * @linkup: Link is up or down 1305 */ 1306 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) 1307 { 1308 struct ice_ptp_port *ptp_port; 1309 struct ice_hw *hw = &pf->hw; 1310 1311 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1312 return; 1313 1314 if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) 1315 return; 1316 1317 ptp_port = &pf->ptp.port; 1318 if (WARN_ON_ONCE(ptp_port->port_num != port)) 1319 return; 1320 1321 /* Update cached link status for this port immediately */ 1322 ptp_port->link_up = linkup; 1323 1324 switch (hw->phy_model) { 1325 case ICE_PHY_E810: 1326 /* Do not reconfigure E810 PHY */ 1327 return; 1328 case ICE_PHY_E82X: 1329 ice_ptp_port_phy_restart(ptp_port); 1330 return; 1331 default: 1332 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); 1333 } 1334 } 1335 1336 /** 1337 * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt 1338 * @pf: PF private structure 1339 * @ena: bool value to enable or disable interrupt 1340 * @threshold: Minimum number of packets at which intr is triggered 1341 * 1342 * Utility function to enable or disable Tx timestamp interrupt and threshold 1343 */ 1344 static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) 1345 { 1346 struct ice_hw *hw = &pf->hw; 1347 int err = 0; 1348 int quad; 1349 u32 val; 1350 1351 ice_ptp_reset_ts_memory(hw); 1352 1353 for (quad = 0; quad < ICE_MAX_QUAD; quad++) { 1354 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1355 &val); 1356 if (err) 1357 break; 1358 1359 if (ena) { 1360 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1361 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; 1362 val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, 1363 threshold); 1364 } else { 1365 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1366 } 1367 1368 err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1369 val); 1370 if (err) 1371 break; 1372 } 1373 1374 if (err) 1375 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", 1376 err); 1377 return err; 1378 } 1379 1380 /** 1381 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1382 * @pf: Board private structure 1383 */ 1384 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1385 { 1386 ice_ptp_port_phy_restart(&pf->ptp.port); 1387 } 1388 1389 /** 1390 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping 1391 * @pf: Board private structure 1392 */ 1393 static void ice_ptp_restart_all_phy(struct ice_pf *pf) 1394 { 1395 struct list_head *entry; 1396 1397 list_for_each(entry, &pf->ptp.ports_owner.ports) { 1398 struct ice_ptp_port *port = list_entry(entry, 1399 struct ice_ptp_port, 1400 list_member); 1401 1402 if (port->link_up) 1403 ice_ptp_port_phy_restart(port); 1404 } 1405 } 1406 1407 /** 1408 * ice_ptp_adjfine - Adjust clock increment rate 1409 * @info: the driver's PTP info structure 1410 * @scaled_ppm: Parts per million with 16-bit fractional field 1411 * 1412 * Adjust the frequency of the clock by the indicated scaled ppm from the 1413 * base frequency. 1414 */ 1415 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1416 { 1417 struct ice_pf *pf = ptp_info_to_pf(info); 1418 struct ice_hw *hw = &pf->hw; 1419 u64 incval; 1420 int err; 1421 1422 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1423 err = ice_ptp_write_incval_locked(hw, incval); 1424 if (err) { 1425 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1426 err); 1427 return -EIO; 1428 } 1429 1430 return 0; 1431 } 1432 1433 /** 1434 * ice_ptp_extts_event - Process PTP external clock event 1435 * @pf: Board private structure 1436 */ 1437 void ice_ptp_extts_event(struct ice_pf *pf) 1438 { 1439 struct ptp_clock_event event; 1440 struct ice_hw *hw = &pf->hw; 1441 u8 chan, tmr_idx; 1442 u32 hi, lo; 1443 1444 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1445 /* Event time is captured by one of the two matched registers 1446 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1447 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1448 * Event is defined in GLTSYN_EVNT_0 register 1449 */ 1450 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1451 /* Check if channel is enabled */ 1452 if (pf->ptp.ext_ts_irq & (1 << chan)) { 1453 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1454 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1455 event.timestamp = (((u64)hi) << 32) | lo; 1456 event.type = PTP_CLOCK_EXTTS; 1457 event.index = chan; 1458 1459 /* Fire event */ 1460 ptp_clock_event(pf->ptp.clock, &event); 1461 pf->ptp.ext_ts_irq &= ~(1 << chan); 1462 } 1463 } 1464 } 1465 1466 /** 1467 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1468 * @pf: Board private structure 1469 * @ena: true to enable; false to disable 1470 * @chan: GPIO channel (0-3) 1471 * @gpio_pin: GPIO pin 1472 * @extts_flags: request flags from the ptp_extts_request.flags 1473 */ 1474 static int 1475 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, 1476 unsigned int extts_flags) 1477 { 1478 u32 func, aux_reg, gpio_reg, irq_reg; 1479 struct ice_hw *hw = &pf->hw; 1480 u8 tmr_idx; 1481 1482 if (chan > (unsigned int)pf->ptp.info.n_ext_ts) 1483 return -EINVAL; 1484 1485 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1486 1487 irq_reg = rd32(hw, PFINT_OICR_ENA); 1488 1489 if (ena) { 1490 /* Enable the interrupt */ 1491 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1492 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1493 1494 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1495 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1496 1497 /* set event level to requested edge */ 1498 if (extts_flags & PTP_FALLING_EDGE) 1499 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1500 if (extts_flags & PTP_RISING_EDGE) 1501 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1502 1503 /* Write GPIO CTL reg. 1504 * 0x1 is input sampled by EVENT register(channel) 1505 * + num_in_channels * tmr_idx 1506 */ 1507 func = 1 + chan + (tmr_idx * 3); 1508 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); 1509 pf->ptp.ext_ts_chan |= (1 << chan); 1510 } else { 1511 /* clear the values we set to reset defaults */ 1512 aux_reg = 0; 1513 gpio_reg = 0; 1514 pf->ptp.ext_ts_chan &= ~(1 << chan); 1515 if (!pf->ptp.ext_ts_chan) 1516 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1517 } 1518 1519 wr32(hw, PFINT_OICR_ENA, irq_reg); 1520 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1521 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1522 1523 return 0; 1524 } 1525 1526 /** 1527 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave 1528 * @pf: Board private structure 1529 * @chan: GPIO channel (0-3) 1530 * @config: desired periodic clk configuration. NULL will disable channel 1531 * @store: If set to true the values will be stored 1532 * 1533 * Configure the internal clock generator modules to generate the clock wave of 1534 * specified period. 1535 */ 1536 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, 1537 struct ice_perout_channel *config, bool store) 1538 { 1539 u64 current_time, period, start_time, phase; 1540 struct ice_hw *hw = &pf->hw; 1541 u32 func, val, gpio_pin; 1542 u8 tmr_idx; 1543 1544 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1545 1546 /* 0. Reset mode & out_en in AUX_OUT */ 1547 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1548 1549 /* If we're disabling the output, clear out CLKO and TGT and keep 1550 * output level low 1551 */ 1552 if (!config || !config->ena) { 1553 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); 1554 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); 1555 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); 1556 1557 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1558 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; 1559 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1560 1561 /* Store the value if requested */ 1562 if (store) 1563 memset(&pf->ptp.perout_channels[chan], 0, 1564 sizeof(struct ice_perout_channel)); 1565 1566 return 0; 1567 } 1568 period = config->period; 1569 start_time = config->start_time; 1570 div64_u64_rem(start_time, period, &phase); 1571 gpio_pin = config->gpio_pin; 1572 1573 /* 1. Write clkout with half of required period value */ 1574 if (period & 0x1) { 1575 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1576 goto err; 1577 } 1578 1579 period >>= 1; 1580 1581 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick 1582 */ 1583 #define MIN_PULSE 3 1584 if (period <= MIN_PULSE || period > U32_MAX) { 1585 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", 1586 MIN_PULSE * 2); 1587 goto err; 1588 } 1589 1590 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1591 1592 /* Allow time for programming before start_time is hit */ 1593 current_time = ice_ptp_read_src_clk_reg(pf, NULL); 1594 1595 /* if start time is in the past start the timer at the nearest second 1596 * maintaining phase 1597 */ 1598 if (start_time < current_time) 1599 start_time = div64_u64(current_time + NSEC_PER_SEC - 1, 1600 NSEC_PER_SEC) * NSEC_PER_SEC + phase; 1601 1602 if (ice_is_e810(hw)) 1603 start_time -= E810_OUT_PROP_DELAY_NS; 1604 else 1605 start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw)); 1606 1607 /* 2. Write TARGET time */ 1608 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); 1609 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); 1610 1611 /* 3. Write AUX_OUT register */ 1612 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1613 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1614 1615 /* 4. write GPIO CTL reg */ 1616 func = 8 + chan + (tmr_idx * 4); 1617 val = GLGEN_GPIO_CTL_PIN_DIR_M | 1618 FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); 1619 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1620 1621 /* Store the value if requested */ 1622 if (store) { 1623 memcpy(&pf->ptp.perout_channels[chan], config, 1624 sizeof(struct ice_perout_channel)); 1625 pf->ptp.perout_channels[chan].start_time = phase; 1626 } 1627 1628 return 0; 1629 err: 1630 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); 1631 return -EFAULT; 1632 } 1633 1634 /** 1635 * ice_ptp_disable_all_clkout - Disable all currently configured outputs 1636 * @pf: pointer to the PF structure 1637 * 1638 * Disable all currently configured clock outputs. This is necessary before 1639 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to 1640 * re-enable the clocks again. 1641 */ 1642 static void ice_ptp_disable_all_clkout(struct ice_pf *pf) 1643 { 1644 uint i; 1645 1646 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1647 if (pf->ptp.perout_channels[i].ena) 1648 ice_ptp_cfg_clkout(pf, i, NULL, false); 1649 } 1650 1651 /** 1652 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs 1653 * @pf: pointer to the PF structure 1654 * 1655 * Enable all currently configured clock outputs. Use this after 1656 * ice_ptp_disable_all_clkout to reconfigure the output signals according to 1657 * their configuration. 1658 */ 1659 static void ice_ptp_enable_all_clkout(struct ice_pf *pf) 1660 { 1661 uint i; 1662 1663 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1664 if (pf->ptp.perout_channels[i].ena) 1665 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], 1666 false); 1667 } 1668 1669 /** 1670 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC 1671 * @info: the driver's PTP info structure 1672 * @rq: The requested feature to change 1673 * @on: Enable/disable flag 1674 */ 1675 static int 1676 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, 1677 struct ptp_clock_request *rq, int on) 1678 { 1679 struct ice_pf *pf = ptp_info_to_pf(info); 1680 struct ice_perout_channel clk_cfg = {0}; 1681 bool sma_pres = false; 1682 unsigned int chan; 1683 u32 gpio_pin; 1684 int err; 1685 1686 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 1687 sma_pres = true; 1688 1689 switch (rq->type) { 1690 case PTP_CLK_REQ_PEROUT: 1691 chan = rq->perout.index; 1692 if (sma_pres) { 1693 if (chan == ice_pin_desc_e810t[SMA1].chan) 1694 clk_cfg.gpio_pin = GPIO_20; 1695 else if (chan == ice_pin_desc_e810t[SMA2].chan) 1696 clk_cfg.gpio_pin = GPIO_22; 1697 else 1698 return -1; 1699 } else if (ice_is_e810t(&pf->hw)) { 1700 if (chan == 0) 1701 clk_cfg.gpio_pin = GPIO_20; 1702 else 1703 clk_cfg.gpio_pin = GPIO_22; 1704 } else if (chan == PPS_CLK_GEN_CHAN) { 1705 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1706 } else { 1707 clk_cfg.gpio_pin = chan; 1708 } 1709 1710 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + 1711 rq->perout.period.nsec); 1712 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + 1713 rq->perout.start.nsec); 1714 clk_cfg.ena = !!on; 1715 1716 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); 1717 break; 1718 case PTP_CLK_REQ_EXTTS: 1719 chan = rq->extts.index; 1720 if (sma_pres) { 1721 if (chan < ice_pin_desc_e810t[SMA2].chan) 1722 gpio_pin = GPIO_21; 1723 else 1724 gpio_pin = GPIO_23; 1725 } else if (ice_is_e810t(&pf->hw)) { 1726 if (chan == 0) 1727 gpio_pin = GPIO_21; 1728 else 1729 gpio_pin = GPIO_23; 1730 } else { 1731 gpio_pin = chan; 1732 } 1733 1734 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, 1735 rq->extts.flags); 1736 break; 1737 default: 1738 return -EOPNOTSUPP; 1739 } 1740 1741 return err; 1742 } 1743 1744 /** 1745 * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC 1746 * @info: the driver's PTP info structure 1747 * @rq: The requested feature to change 1748 * @on: Enable/disable flag 1749 */ 1750 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, 1751 struct ptp_clock_request *rq, int on) 1752 { 1753 struct ice_pf *pf = ptp_info_to_pf(info); 1754 struct ice_perout_channel clk_cfg = {0}; 1755 int err; 1756 1757 switch (rq->type) { 1758 case PTP_CLK_REQ_PPS: 1759 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1760 clk_cfg.period = NSEC_PER_SEC; 1761 clk_cfg.ena = !!on; 1762 1763 err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); 1764 break; 1765 case PTP_CLK_REQ_EXTTS: 1766 err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, 1767 TIME_SYNC_PIN_INDEX, rq->extts.flags); 1768 break; 1769 default: 1770 return -EOPNOTSUPP; 1771 } 1772 1773 return err; 1774 } 1775 1776 /** 1777 * ice_ptp_gettimex64 - Get the time of the clock 1778 * @info: the driver's PTP info structure 1779 * @ts: timespec64 structure to hold the current time value 1780 * @sts: Optional parameter for holding a pair of system timestamps from 1781 * the system clock. Will be ignored if NULL is given. 1782 * 1783 * Read the device clock and return the correct value on ns, after converting it 1784 * into a timespec struct. 1785 */ 1786 static int 1787 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 1788 struct ptp_system_timestamp *sts) 1789 { 1790 struct ice_pf *pf = ptp_info_to_pf(info); 1791 struct ice_hw *hw = &pf->hw; 1792 1793 if (!ice_ptp_lock(hw)) { 1794 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); 1795 return -EBUSY; 1796 } 1797 1798 ice_ptp_read_time(pf, ts, sts); 1799 ice_ptp_unlock(hw); 1800 1801 return 0; 1802 } 1803 1804 /** 1805 * ice_ptp_settime64 - Set the time of the clock 1806 * @info: the driver's PTP info structure 1807 * @ts: timespec64 structure that holds the new time value 1808 * 1809 * Set the device clock to the user input value. The conversion from timespec 1810 * to ns happens in the write function. 1811 */ 1812 static int 1813 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 1814 { 1815 struct ice_pf *pf = ptp_info_to_pf(info); 1816 struct timespec64 ts64 = *ts; 1817 struct ice_hw *hw = &pf->hw; 1818 int err; 1819 1820 /* For Vernier mode, we need to recalibrate after new settime 1821 * Start with disabling timestamp block 1822 */ 1823 if (pf->ptp.port.link_up) 1824 ice_ptp_port_phy_stop(&pf->ptp.port); 1825 1826 if (!ice_ptp_lock(hw)) { 1827 err = -EBUSY; 1828 goto exit; 1829 } 1830 1831 /* Disable periodic outputs */ 1832 ice_ptp_disable_all_clkout(pf); 1833 1834 err = ice_ptp_write_init(pf, &ts64); 1835 ice_ptp_unlock(hw); 1836 1837 if (!err) 1838 ice_ptp_reset_cached_phctime(pf); 1839 1840 /* Reenable periodic outputs */ 1841 ice_ptp_enable_all_clkout(pf); 1842 1843 /* Recalibrate and re-enable timestamp blocks for E822/E823 */ 1844 if (hw->phy_model == ICE_PHY_E82X) 1845 ice_ptp_restart_all_phy(pf); 1846 exit: 1847 if (err) { 1848 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 1849 return err; 1850 } 1851 1852 return 0; 1853 } 1854 1855 /** 1856 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 1857 * @info: the driver's PTP info structure 1858 * @delta: Offset in nanoseconds to adjust the time by 1859 */ 1860 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 1861 { 1862 struct timespec64 now, then; 1863 int ret; 1864 1865 then = ns_to_timespec64(delta); 1866 ret = ice_ptp_gettimex64(info, &now, NULL); 1867 if (ret) 1868 return ret; 1869 now = timespec64_add(now, then); 1870 1871 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 1872 } 1873 1874 /** 1875 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 1876 * @info: the driver's PTP info structure 1877 * @delta: Offset in nanoseconds to adjust the time by 1878 */ 1879 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 1880 { 1881 struct ice_pf *pf = ptp_info_to_pf(info); 1882 struct ice_hw *hw = &pf->hw; 1883 struct device *dev; 1884 int err; 1885 1886 dev = ice_pf_to_dev(pf); 1887 1888 /* Hardware only supports atomic adjustments using signed 32-bit 1889 * integers. For any adjustment outside this range, perform 1890 * a non-atomic get->adjust->set flow. 1891 */ 1892 if (delta > S32_MAX || delta < S32_MIN) { 1893 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 1894 return ice_ptp_adjtime_nonatomic(info, delta); 1895 } 1896 1897 if (!ice_ptp_lock(hw)) { 1898 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 1899 return -EBUSY; 1900 } 1901 1902 /* Disable periodic outputs */ 1903 ice_ptp_disable_all_clkout(pf); 1904 1905 err = ice_ptp_write_adj(pf, delta); 1906 1907 /* Reenable periodic outputs */ 1908 ice_ptp_enable_all_clkout(pf); 1909 1910 ice_ptp_unlock(hw); 1911 1912 if (err) { 1913 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 1914 return err; 1915 } 1916 1917 ice_ptp_reset_cached_phctime(pf); 1918 1919 return 0; 1920 } 1921 1922 #ifdef CONFIG_ICE_HWTS 1923 /** 1924 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 1925 * @device: Current device time 1926 * @system: System counter value read synchronously with device time 1927 * @ctx: Context provided by timekeeping code 1928 * 1929 * Read device and system (ART) clock simultaneously and return the corrected 1930 * clock values in ns. 1931 */ 1932 static int 1933 ice_ptp_get_syncdevicetime(ktime_t *device, 1934 struct system_counterval_t *system, 1935 void *ctx) 1936 { 1937 struct ice_pf *pf = (struct ice_pf *)ctx; 1938 struct ice_hw *hw = &pf->hw; 1939 u32 hh_lock, hh_art_ctl; 1940 int i; 1941 1942 #define MAX_HH_HW_LOCK_TRIES 5 1943 #define MAX_HH_CTL_LOCK_TRIES 100 1944 1945 for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { 1946 /* Get the HW lock */ 1947 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1948 if (hh_lock & PFHH_SEM_BUSY_M) { 1949 usleep_range(10000, 15000); 1950 continue; 1951 } 1952 break; 1953 } 1954 if (hh_lock & PFHH_SEM_BUSY_M) { 1955 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 1956 return -EBUSY; 1957 } 1958 1959 /* Program cmd to master timer */ 1960 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 1961 1962 /* Start the ART and device clock sync sequence */ 1963 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1964 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 1965 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 1966 1967 for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { 1968 /* Wait for sync to complete */ 1969 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1970 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 1971 udelay(1); 1972 continue; 1973 } else { 1974 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 1975 u64 hh_ts; 1976 1977 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 1978 /* Read ART time */ 1979 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 1980 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 1981 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1982 *system = convert_art_ns_to_tsc(hh_ts); 1983 /* Read Device source clock time */ 1984 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 1985 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 1986 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1987 *device = ns_to_ktime(hh_ts); 1988 break; 1989 } 1990 } 1991 1992 /* Clear the master timer */ 1993 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 1994 1995 /* Release HW lock */ 1996 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1997 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 1998 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 1999 2000 if (i == MAX_HH_CTL_LOCK_TRIES) 2001 return -ETIMEDOUT; 2002 2003 return 0; 2004 } 2005 2006 /** 2007 * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp 2008 * @info: the driver's PTP info structure 2009 * @cts: The memory to fill the cross timestamp info 2010 * 2011 * Capture a cross timestamp between the ART and the device PTP hardware 2012 * clock. Fill the cross timestamp information and report it back to the 2013 * caller. 2014 * 2015 * This is only valid for E822 and E823 devices which have support for 2016 * generating the cross timestamp via PCIe PTM. 2017 * 2018 * In order to correctly correlate the ART timestamp back to the TSC time, the 2019 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2020 */ 2021 static int 2022 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, 2023 struct system_device_crosststamp *cts) 2024 { 2025 struct ice_pf *pf = ptp_info_to_pf(info); 2026 2027 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2028 pf, NULL, cts); 2029 } 2030 #endif /* CONFIG_ICE_HWTS */ 2031 2032 /** 2033 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2034 * @pf: Board private structure 2035 * @ifr: ioctl data 2036 * 2037 * Copy the timestamping config to user buffer 2038 */ 2039 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2040 { 2041 struct hwtstamp_config *config; 2042 2043 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2044 return -EIO; 2045 2046 config = &pf->ptp.tstamp_config; 2047 2048 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2049 -EFAULT : 0; 2050 } 2051 2052 /** 2053 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2054 * @pf: Board private structure 2055 * @config: hwtstamp settings requested or saved 2056 */ 2057 static int 2058 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2059 { 2060 switch (config->tx_type) { 2061 case HWTSTAMP_TX_OFF: 2062 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 2063 break; 2064 case HWTSTAMP_TX_ON: 2065 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 2066 break; 2067 default: 2068 return -ERANGE; 2069 } 2070 2071 switch (config->rx_filter) { 2072 case HWTSTAMP_FILTER_NONE: 2073 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2074 break; 2075 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2076 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2077 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2078 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2079 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2080 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2081 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2082 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2083 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2084 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2085 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2086 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2087 case HWTSTAMP_FILTER_NTP_ALL: 2088 case HWTSTAMP_FILTER_ALL: 2089 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 2090 break; 2091 default: 2092 return -ERANGE; 2093 } 2094 2095 /* Immediately update the device timestamping mode */ 2096 ice_ptp_restore_timestamp_mode(pf); 2097 2098 return 0; 2099 } 2100 2101 /** 2102 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2103 * @pf: Board private structure 2104 * @ifr: ioctl data 2105 * 2106 * Get the user config and store it 2107 */ 2108 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2109 { 2110 struct hwtstamp_config config; 2111 int err; 2112 2113 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2114 return -EAGAIN; 2115 2116 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2117 return -EFAULT; 2118 2119 err = ice_ptp_set_timestamp_mode(pf, &config); 2120 if (err) 2121 return err; 2122 2123 /* Return the actual configuration set */ 2124 config = pf->ptp.tstamp_config; 2125 2126 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2127 -EFAULT : 0; 2128 } 2129 2130 /** 2131 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns 2132 * @rx_desc: Receive descriptor 2133 * @pkt_ctx: Packet context to get the cached time 2134 * 2135 * The driver receives a notification in the receive descriptor with timestamp. 2136 */ 2137 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, 2138 const struct ice_pkt_ctx *pkt_ctx) 2139 { 2140 u64 ts_ns, cached_time; 2141 u32 ts_high; 2142 2143 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2144 return 0; 2145 2146 cached_time = READ_ONCE(pkt_ctx->cached_phctime); 2147 2148 /* Do not report a timestamp if we don't have a cached PHC time */ 2149 if (!cached_time) 2150 return 0; 2151 2152 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2153 * PHC value, rather than accessing the PF. This also allows us to 2154 * simply pass the upper 32bits of nanoseconds directly. Calling 2155 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2156 * bits itself. 2157 */ 2158 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2159 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2160 2161 return ts_ns; 2162 } 2163 2164 /** 2165 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins 2166 * @pf: pointer to the PF structure 2167 * @info: PTP clock info structure 2168 * 2169 * Disable the OS access to the SMA pins. Called to clear out the OS 2170 * indications of pin support when we fail to setup the E810-T SMA control 2171 * register. 2172 */ 2173 static void 2174 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2175 { 2176 struct device *dev = ice_pf_to_dev(pf); 2177 2178 dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); 2179 2180 info->enable = NULL; 2181 info->verify = NULL; 2182 info->n_pins = 0; 2183 info->n_ext_ts = 0; 2184 info->n_per_out = 0; 2185 } 2186 2187 /** 2188 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins 2189 * @pf: pointer to the PF structure 2190 * @info: PTP clock info structure 2191 * 2192 * Finish setting up the SMA pins by allocating pin_config, and setting it up 2193 * according to the current status of the SMA. On failure, disable all of the 2194 * extended SMA pin support. 2195 */ 2196 static void 2197 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2198 { 2199 struct device *dev = ice_pf_to_dev(pf); 2200 int err; 2201 2202 /* Allocate memory for kernel pins interface */ 2203 info->pin_config = devm_kcalloc(dev, info->n_pins, 2204 sizeof(*info->pin_config), GFP_KERNEL); 2205 if (!info->pin_config) { 2206 ice_ptp_disable_sma_pins_e810t(pf, info); 2207 return; 2208 } 2209 2210 /* Read current SMA status */ 2211 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); 2212 if (err) 2213 ice_ptp_disable_sma_pins_e810t(pf, info); 2214 } 2215 2216 /** 2217 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs 2218 * @pf: pointer to the PF instance 2219 * @info: PTP clock capabilities 2220 */ 2221 static void 2222 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2223 { 2224 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2225 info->n_ext_ts = N_EXT_TS_E810; 2226 info->n_per_out = N_PER_OUT_E810T; 2227 info->n_pins = NUM_PTP_PINS_E810T; 2228 info->verify = ice_verify_pin_e810t; 2229 2230 /* Complete setup of the SMA pins */ 2231 ice_ptp_setup_sma_pins_e810t(pf, info); 2232 } else if (ice_is_e810t(&pf->hw)) { 2233 info->n_ext_ts = N_EXT_TS_NO_SMA_E810T; 2234 info->n_per_out = N_PER_OUT_NO_SMA_E810T; 2235 } else { 2236 info->n_per_out = N_PER_OUT_E810; 2237 info->n_ext_ts = N_EXT_TS_E810; 2238 } 2239 } 2240 2241 /** 2242 * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs 2243 * @pf: pointer to the PF instance 2244 * @info: PTP clock capabilities 2245 */ 2246 static void 2247 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2248 { 2249 info->pps = 1; 2250 info->n_per_out = 0; 2251 info->n_ext_ts = 1; 2252 } 2253 2254 /** 2255 * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support 2256 * @pf: Board private structure 2257 * @info: PTP info to fill 2258 * 2259 * Assign functions to the PTP capabiltiies structure for E82x devices. 2260 * Functions which operate across all device families should be set directly 2261 * in ice_ptp_set_caps. Only add functions here which are distinct for E82x 2262 * devices. 2263 */ 2264 static void 2265 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info) 2266 { 2267 #ifdef CONFIG_ICE_HWTS 2268 if (boot_cpu_has(X86_FEATURE_ART) && 2269 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2270 info->getcrosststamp = ice_ptp_getcrosststamp_e82x; 2271 #endif /* CONFIG_ICE_HWTS */ 2272 } 2273 2274 /** 2275 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2276 * @pf: Board private structure 2277 * @info: PTP info to fill 2278 * 2279 * Assign functions to the PTP capabiltiies structure for E810 devices. 2280 * Functions which operate across all device families should be set directly 2281 * in ice_ptp_set_caps. Only add functions here which are distinct for e810 2282 * devices. 2283 */ 2284 static void 2285 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2286 { 2287 info->enable = ice_ptp_gpio_enable_e810; 2288 ice_ptp_setup_pins_e810(pf, info); 2289 } 2290 2291 /** 2292 * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support 2293 * @pf: Board private structure 2294 * @info: PTP info to fill 2295 * 2296 * Assign functions to the PTP capabiltiies structure for E823 devices. 2297 * Functions which operate across all device families should be set directly 2298 * in ice_ptp_set_caps. Only add functions here which are distinct for e823 2299 * devices. 2300 */ 2301 static void 2302 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2303 { 2304 ice_ptp_set_funcs_e82x(pf, info); 2305 2306 info->enable = ice_ptp_gpio_enable_e823; 2307 ice_ptp_setup_pins_e823(pf, info); 2308 } 2309 2310 /** 2311 * ice_ptp_set_caps - Set PTP capabilities 2312 * @pf: Board private structure 2313 */ 2314 static void ice_ptp_set_caps(struct ice_pf *pf) 2315 { 2316 struct ptp_clock_info *info = &pf->ptp.info; 2317 struct device *dev = ice_pf_to_dev(pf); 2318 2319 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2320 dev_driver_string(dev), dev_name(dev)); 2321 info->owner = THIS_MODULE; 2322 info->max_adj = 100000000; 2323 info->adjtime = ice_ptp_adjtime; 2324 info->adjfine = ice_ptp_adjfine; 2325 info->gettimex64 = ice_ptp_gettimex64; 2326 info->settime64 = ice_ptp_settime64; 2327 2328 if (ice_is_e810(&pf->hw)) 2329 ice_ptp_set_funcs_e810(pf, info); 2330 else if (ice_is_e823(&pf->hw)) 2331 ice_ptp_set_funcs_e823(pf, info); 2332 else 2333 ice_ptp_set_funcs_e82x(pf, info); 2334 } 2335 2336 /** 2337 * ice_ptp_create_clock - Create PTP clock device for userspace 2338 * @pf: Board private structure 2339 * 2340 * This function creates a new PTP clock device. It only creates one if we 2341 * don't already have one. Will return error if it can't create one, but success 2342 * if we already have a device. Should be used by ice_ptp_init to create clock 2343 * initially, and prevent global resets from creating new clock devices. 2344 */ 2345 static long ice_ptp_create_clock(struct ice_pf *pf) 2346 { 2347 struct ptp_clock_info *info; 2348 struct device *dev; 2349 2350 /* No need to create a clock device if we already have one */ 2351 if (pf->ptp.clock) 2352 return 0; 2353 2354 ice_ptp_set_caps(pf); 2355 2356 info = &pf->ptp.info; 2357 dev = ice_pf_to_dev(pf); 2358 2359 /* Attempt to register the clock before enabling the hardware. */ 2360 pf->ptp.clock = ptp_clock_register(info, dev); 2361 if (IS_ERR(pf->ptp.clock)) { 2362 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); 2363 return PTR_ERR(pf->ptp.clock); 2364 } 2365 2366 return 0; 2367 } 2368 2369 /** 2370 * ice_ptp_request_ts - Request an available Tx timestamp index 2371 * @tx: the PTP Tx timestamp tracker to request from 2372 * @skb: the SKB to associate with this timestamp request 2373 */ 2374 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2375 { 2376 u8 idx; 2377 2378 spin_lock(&tx->lock); 2379 2380 /* Check that this tracker is accepting new timestamp requests */ 2381 if (!ice_ptp_is_tx_tracker_up(tx)) { 2382 spin_unlock(&tx->lock); 2383 return -1; 2384 } 2385 2386 /* Find and set the first available index */ 2387 idx = find_first_zero_bit(tx->in_use, tx->len); 2388 if (idx < tx->len) { 2389 /* We got a valid index that no other thread could have set. Store 2390 * a reference to the skb and the start time to allow discarding old 2391 * requests. 2392 */ 2393 set_bit(idx, tx->in_use); 2394 clear_bit(idx, tx->stale); 2395 tx->tstamps[idx].start = jiffies; 2396 tx->tstamps[idx].skb = skb_get(skb); 2397 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2398 ice_trace(tx_tstamp_request, skb, idx); 2399 } 2400 2401 spin_unlock(&tx->lock); 2402 2403 /* return the appropriate PHY timestamp register index, -1 if no 2404 * indexes were available. 2405 */ 2406 if (idx >= tx->len) 2407 return -1; 2408 else 2409 return idx + tx->offset; 2410 } 2411 2412 /** 2413 * ice_ptp_process_ts - Process the PTP Tx timestamps 2414 * @pf: Board private structure 2415 * 2416 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2417 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2418 */ 2419 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2420 { 2421 switch (pf->ptp.tx_interrupt_mode) { 2422 case ICE_PTP_TX_INTERRUPT_NONE: 2423 /* This device has the clock owner handle timestamps for it */ 2424 return ICE_TX_TSTAMP_WORK_DONE; 2425 case ICE_PTP_TX_INTERRUPT_SELF: 2426 /* This device handles its own timestamps */ 2427 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2428 case ICE_PTP_TX_INTERRUPT_ALL: 2429 /* This device handles timestamps for all ports */ 2430 return ice_ptp_tx_tstamp_owner(pf); 2431 default: 2432 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2433 pf->ptp.tx_interrupt_mode); 2434 return ICE_TX_TSTAMP_WORK_DONE; 2435 } 2436 } 2437 2438 /** 2439 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt 2440 * @pf: Board private structure 2441 * 2442 * The device PHY issues Tx timestamp interrupts to the driver for processing 2443 * timestamp data from the PHY. It will not interrupt again until all 2444 * current timestamp data is read. In rare circumstances, it is possible that 2445 * the driver fails to read all outstanding data. 2446 * 2447 * To avoid getting permanently stuck, periodically check if the PHY has 2448 * outstanding timestamp data. If so, trigger an interrupt from software to 2449 * process this data. 2450 */ 2451 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) 2452 { 2453 struct device *dev = ice_pf_to_dev(pf); 2454 struct ice_hw *hw = &pf->hw; 2455 bool trigger_oicr = false; 2456 unsigned int i; 2457 2458 if (ice_is_e810(hw)) 2459 return; 2460 2461 if (!ice_pf_src_tmr_owned(pf)) 2462 return; 2463 2464 for (i = 0; i < ICE_MAX_QUAD; i++) { 2465 u64 tstamp_ready; 2466 int err; 2467 2468 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 2469 if (!err && tstamp_ready) { 2470 trigger_oicr = true; 2471 break; 2472 } 2473 } 2474 2475 if (trigger_oicr) { 2476 /* Trigger a software interrupt, to ensure this data 2477 * gets processed. 2478 */ 2479 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); 2480 2481 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 2482 ice_flush(hw); 2483 } 2484 } 2485 2486 static void ice_ptp_periodic_work(struct kthread_work *work) 2487 { 2488 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2489 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2490 int err; 2491 2492 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2493 return; 2494 2495 err = ice_ptp_update_cached_phctime(pf); 2496 2497 ice_ptp_maybe_trigger_tx_interrupt(pf); 2498 2499 /* Run twice a second or reschedule if phc update failed */ 2500 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2501 msecs_to_jiffies(err ? 10 : 500)); 2502 } 2503 2504 /** 2505 * ice_ptp_reset - Initialize PTP hardware clock support after reset 2506 * @pf: Board private structure 2507 */ 2508 void ice_ptp_reset(struct ice_pf *pf) 2509 { 2510 struct ice_ptp *ptp = &pf->ptp; 2511 struct ice_hw *hw = &pf->hw; 2512 struct timespec64 ts; 2513 int err, itr = 1; 2514 u64 time_diff; 2515 2516 if (test_bit(ICE_PFR_REQ, pf->state) || 2517 !ice_pf_src_tmr_owned(pf)) 2518 goto pfr; 2519 2520 err = ice_ptp_init_phc(hw); 2521 if (err) 2522 goto err; 2523 2524 /* Acquire the global hardware lock */ 2525 if (!ice_ptp_lock(hw)) { 2526 err = -EBUSY; 2527 goto err; 2528 } 2529 2530 /* Write the increment time value to PHY and LAN */ 2531 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2532 if (err) { 2533 ice_ptp_unlock(hw); 2534 goto err; 2535 } 2536 2537 /* Write the initial Time value to PHY and LAN using the cached PHC 2538 * time before the reset and time difference between stopping and 2539 * starting the clock. 2540 */ 2541 if (ptp->cached_phc_time) { 2542 time_diff = ktime_get_real_ns() - ptp->reset_time; 2543 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2544 } else { 2545 ts = ktime_to_timespec64(ktime_get_real()); 2546 } 2547 err = ice_ptp_write_init(pf, &ts); 2548 if (err) { 2549 ice_ptp_unlock(hw); 2550 goto err; 2551 } 2552 2553 /* Release the global hardware lock */ 2554 ice_ptp_unlock(hw); 2555 2556 if (!ice_is_e810(hw)) { 2557 /* Enable quad interrupts */ 2558 err = ice_ptp_tx_ena_intr(pf, true, itr); 2559 if (err) 2560 goto err; 2561 } 2562 2563 pfr: 2564 /* Init Tx structures */ 2565 if (ice_is_e810(&pf->hw)) { 2566 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); 2567 } else { 2568 kthread_init_delayed_work(&ptp->port.ov_work, 2569 ice_ptp_wait_for_offsets); 2570 err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx, 2571 ptp->port.port_num); 2572 } 2573 if (err) 2574 goto err; 2575 2576 set_bit(ICE_FLAG_PTP, pf->flags); 2577 2578 /* Restart the PHY timestamping block */ 2579 if (!test_bit(ICE_PFR_REQ, pf->state) && 2580 ice_pf_src_tmr_owned(pf)) 2581 ice_ptp_restart_all_phy(pf); 2582 2583 /* Start periodic work going */ 2584 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2585 2586 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2587 return; 2588 2589 err: 2590 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2591 } 2592 2593 /** 2594 * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device 2595 * @aux_dev: auxiliary device to get the auxiliary PF for 2596 */ 2597 static struct ice_pf * 2598 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev) 2599 { 2600 struct ice_ptp_port *aux_port; 2601 struct ice_ptp *aux_ptp; 2602 2603 aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev); 2604 aux_ptp = container_of(aux_port, struct ice_ptp, port); 2605 2606 return container_of(aux_ptp, struct ice_pf, ptp); 2607 } 2608 2609 /** 2610 * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device 2611 * @aux_dev: auxiliary device to get the PF for 2612 */ 2613 static struct ice_pf * 2614 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev) 2615 { 2616 struct ice_ptp_port_owner *ports_owner; 2617 struct auxiliary_driver *aux_drv; 2618 struct ice_ptp *owner_ptp; 2619 2620 if (!aux_dev->dev.driver) 2621 return NULL; 2622 2623 aux_drv = to_auxiliary_drv(aux_dev->dev.driver); 2624 ports_owner = container_of(aux_drv, struct ice_ptp_port_owner, 2625 aux_driver); 2626 owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner); 2627 return container_of(owner_ptp, struct ice_pf, ptp); 2628 } 2629 2630 /** 2631 * ice_ptp_auxbus_probe - Probe auxiliary devices 2632 * @aux_dev: PF's auxiliary device 2633 * @id: Auxiliary device ID 2634 */ 2635 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev, 2636 const struct auxiliary_device_id *id) 2637 { 2638 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2639 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); 2640 2641 if (WARN_ON(!owner_pf)) 2642 return -ENODEV; 2643 2644 INIT_LIST_HEAD(&aux_pf->ptp.port.list_member); 2645 mutex_lock(&owner_pf->ptp.ports_owner.lock); 2646 list_add(&aux_pf->ptp.port.list_member, 2647 &owner_pf->ptp.ports_owner.ports); 2648 mutex_unlock(&owner_pf->ptp.ports_owner.lock); 2649 2650 return 0; 2651 } 2652 2653 /** 2654 * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus 2655 * @aux_dev: PF's auxiliary device 2656 */ 2657 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev) 2658 { 2659 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2660 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); 2661 2662 mutex_lock(&owner_pf->ptp.ports_owner.lock); 2663 list_del(&aux_pf->ptp.port.list_member); 2664 mutex_unlock(&owner_pf->ptp.ports_owner.lock); 2665 } 2666 2667 /** 2668 * ice_ptp_auxbus_shutdown 2669 * @aux_dev: PF's auxiliary device 2670 */ 2671 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev) 2672 { 2673 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2674 } 2675 2676 /** 2677 * ice_ptp_auxbus_suspend 2678 * @aux_dev: PF's auxiliary device 2679 * @state: power management state indicator 2680 */ 2681 static int 2682 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state) 2683 { 2684 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2685 return 0; 2686 } 2687 2688 /** 2689 * ice_ptp_auxbus_resume 2690 * @aux_dev: PF's auxiliary device 2691 */ 2692 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev) 2693 { 2694 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2695 return 0; 2696 } 2697 2698 /** 2699 * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table 2700 * @pf: Board private structure 2701 * @name: auxiliary bus driver name 2702 */ 2703 static struct auxiliary_device_id * 2704 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name) 2705 { 2706 struct auxiliary_device_id *ids; 2707 2708 /* Second id left empty to terminate the array */ 2709 ids = devm_kcalloc(ice_pf_to_dev(pf), 2, 2710 sizeof(struct auxiliary_device_id), GFP_KERNEL); 2711 if (!ids) 2712 return NULL; 2713 2714 snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name); 2715 2716 return ids; 2717 } 2718 2719 /** 2720 * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver 2721 * @pf: Board private structure 2722 */ 2723 static int ice_ptp_register_auxbus_driver(struct ice_pf *pf) 2724 { 2725 struct auxiliary_driver *aux_driver; 2726 struct ice_ptp *ptp; 2727 struct device *dev; 2728 char *name; 2729 int err; 2730 2731 ptp = &pf->ptp; 2732 dev = ice_pf_to_dev(pf); 2733 aux_driver = &ptp->ports_owner.aux_driver; 2734 INIT_LIST_HEAD(&ptp->ports_owner.ports); 2735 mutex_init(&ptp->ports_owner.lock); 2736 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", 2737 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), 2738 ice_get_ptp_src_clock_index(&pf->hw)); 2739 2740 aux_driver->name = name; 2741 aux_driver->shutdown = ice_ptp_auxbus_shutdown; 2742 aux_driver->suspend = ice_ptp_auxbus_suspend; 2743 aux_driver->remove = ice_ptp_auxbus_remove; 2744 aux_driver->resume = ice_ptp_auxbus_resume; 2745 aux_driver->probe = ice_ptp_auxbus_probe; 2746 aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name); 2747 if (!aux_driver->id_table) 2748 return -ENOMEM; 2749 2750 err = auxiliary_driver_register(aux_driver); 2751 if (err) { 2752 devm_kfree(dev, aux_driver->id_table); 2753 dev_err(dev, "Failed registering aux_driver, name <%s>\n", 2754 name); 2755 } 2756 2757 return err; 2758 } 2759 2760 /** 2761 * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver 2762 * @pf: Board private structure 2763 */ 2764 static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) 2765 { 2766 struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver; 2767 2768 auxiliary_driver_unregister(aux_driver); 2769 devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table); 2770 2771 mutex_destroy(&pf->ptp.ports_owner.lock); 2772 } 2773 2774 /** 2775 * ice_ptp_clock_index - Get the PTP clock index for this device 2776 * @pf: Board private structure 2777 * 2778 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock 2779 * is associated. 2780 */ 2781 int ice_ptp_clock_index(struct ice_pf *pf) 2782 { 2783 struct auxiliary_device *aux_dev; 2784 struct ice_pf *owner_pf; 2785 struct ptp_clock *clock; 2786 2787 aux_dev = &pf->ptp.port.aux_dev; 2788 owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2789 if (!owner_pf) 2790 return -1; 2791 clock = owner_pf->ptp.clock; 2792 2793 return clock ? ptp_clock_index(clock) : -1; 2794 } 2795 2796 /** 2797 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2798 * @pf: Board private structure 2799 */ 2800 void ice_ptp_prepare_for_reset(struct ice_pf *pf) 2801 { 2802 struct ice_ptp *ptp = &pf->ptp; 2803 u8 src_tmr; 2804 2805 clear_bit(ICE_FLAG_PTP, pf->flags); 2806 2807 /* Disable timestamping for both Tx and Rx */ 2808 ice_ptp_disable_timestamp_mode(pf); 2809 2810 kthread_cancel_delayed_work_sync(&ptp->work); 2811 2812 if (test_bit(ICE_PFR_REQ, pf->state)) 2813 return; 2814 2815 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2816 2817 /* Disable periodic outputs */ 2818 ice_ptp_disable_all_clkout(pf); 2819 2820 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2821 2822 /* Disable source clock */ 2823 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2824 2825 /* Acquire PHC and system timer to restore after reset */ 2826 ptp->reset_time = ktime_get_real_ns(); 2827 } 2828 2829 /** 2830 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 2831 * @pf: Board private structure 2832 * 2833 * Setup and initialize a PTP clock device that represents the device hardware 2834 * clock. Save the clock index for other functions connected to the same 2835 * hardware resource. 2836 */ 2837 static int ice_ptp_init_owner(struct ice_pf *pf) 2838 { 2839 struct ice_hw *hw = &pf->hw; 2840 struct timespec64 ts; 2841 int err, itr = 1; 2842 2843 err = ice_ptp_init_phc(hw); 2844 if (err) { 2845 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 2846 err); 2847 return err; 2848 } 2849 2850 /* Acquire the global hardware lock */ 2851 if (!ice_ptp_lock(hw)) { 2852 err = -EBUSY; 2853 goto err_exit; 2854 } 2855 2856 /* Write the increment time value to PHY and LAN */ 2857 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2858 if (err) { 2859 ice_ptp_unlock(hw); 2860 goto err_exit; 2861 } 2862 2863 ts = ktime_to_timespec64(ktime_get_real()); 2864 /* Write the initial Time value to PHY and LAN */ 2865 err = ice_ptp_write_init(pf, &ts); 2866 if (err) { 2867 ice_ptp_unlock(hw); 2868 goto err_exit; 2869 } 2870 2871 /* Release the global hardware lock */ 2872 ice_ptp_unlock(hw); 2873 2874 if (!ice_is_e810(hw)) { 2875 /* Enable quad interrupts */ 2876 err = ice_ptp_tx_ena_intr(pf, true, itr); 2877 if (err) 2878 goto err_exit; 2879 } 2880 2881 /* Ensure we have a clock device */ 2882 err = ice_ptp_create_clock(pf); 2883 if (err) 2884 goto err_clk; 2885 2886 err = ice_ptp_register_auxbus_driver(pf); 2887 if (err) { 2888 dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver"); 2889 goto err_aux; 2890 } 2891 2892 return 0; 2893 err_aux: 2894 ptp_clock_unregister(pf->ptp.clock); 2895 err_clk: 2896 pf->ptp.clock = NULL; 2897 err_exit: 2898 return err; 2899 } 2900 2901 /** 2902 * ice_ptp_init_work - Initialize PTP work threads 2903 * @pf: Board private structure 2904 * @ptp: PF PTP structure 2905 */ 2906 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 2907 { 2908 struct kthread_worker *kworker; 2909 2910 /* Initialize work functions */ 2911 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 2912 2913 /* Allocate a kworker for handling work required for the ports 2914 * connected to the PTP hardware clock. 2915 */ 2916 kworker = kthread_create_worker(0, "ice-ptp-%s", 2917 dev_name(ice_pf_to_dev(pf))); 2918 if (IS_ERR(kworker)) 2919 return PTR_ERR(kworker); 2920 2921 ptp->kworker = kworker; 2922 2923 /* Start periodic work going */ 2924 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2925 2926 return 0; 2927 } 2928 2929 /** 2930 * ice_ptp_init_port - Initialize PTP port structure 2931 * @pf: Board private structure 2932 * @ptp_port: PTP port structure 2933 */ 2934 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 2935 { 2936 struct ice_hw *hw = &pf->hw; 2937 2938 mutex_init(&ptp_port->ps_lock); 2939 2940 switch (hw->phy_model) { 2941 case ICE_PHY_E810: 2942 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 2943 case ICE_PHY_E82X: 2944 kthread_init_delayed_work(&ptp_port->ov_work, 2945 ice_ptp_wait_for_offsets); 2946 2947 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, 2948 ptp_port->port_num); 2949 default: 2950 return -ENODEV; 2951 } 2952 } 2953 2954 /** 2955 * ice_ptp_release_auxbus_device 2956 * @dev: device that utilizes the auxbus 2957 */ 2958 static void ice_ptp_release_auxbus_device(struct device *dev) 2959 { 2960 /* Doing nothing here, but handle to auxbux device must be satisfied */ 2961 } 2962 2963 /** 2964 * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device 2965 * @pf: Board private structure 2966 */ 2967 static int ice_ptp_create_auxbus_device(struct ice_pf *pf) 2968 { 2969 struct auxiliary_device *aux_dev; 2970 struct ice_ptp *ptp; 2971 struct device *dev; 2972 char *name; 2973 int err; 2974 u32 id; 2975 2976 ptp = &pf->ptp; 2977 id = ptp->port.port_num; 2978 dev = ice_pf_to_dev(pf); 2979 2980 aux_dev = &ptp->port.aux_dev; 2981 2982 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", 2983 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), 2984 ice_get_ptp_src_clock_index(&pf->hw)); 2985 2986 aux_dev->name = name; 2987 aux_dev->id = id; 2988 aux_dev->dev.release = ice_ptp_release_auxbus_device; 2989 aux_dev->dev.parent = dev; 2990 2991 err = auxiliary_device_init(aux_dev); 2992 if (err) 2993 goto aux_err; 2994 2995 err = auxiliary_device_add(aux_dev); 2996 if (err) { 2997 auxiliary_device_uninit(aux_dev); 2998 goto aux_err; 2999 } 3000 3001 return 0; 3002 aux_err: 3003 dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name); 3004 devm_kfree(dev, name); 3005 return err; 3006 } 3007 3008 /** 3009 * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device 3010 * @pf: Board private structure 3011 */ 3012 static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) 3013 { 3014 struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev; 3015 3016 auxiliary_device_delete(aux_dev); 3017 auxiliary_device_uninit(aux_dev); 3018 3019 memset(aux_dev, 0, sizeof(*aux_dev)); 3020 } 3021 3022 /** 3023 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode 3024 * @pf: Board private structure 3025 * 3026 * Initialize the Tx timestamp interrupt mode for this device. For most device 3027 * types, each PF processes the interrupt and manages its own timestamps. For 3028 * E822-based devices, only the clock owner processes the timestamps. Other 3029 * PFs disable the interrupt and do not process their own timestamps. 3030 */ 3031 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) 3032 { 3033 switch (pf->hw.phy_model) { 3034 case ICE_PHY_E82X: 3035 /* E822 based PHY has the clock owner process the interrupt 3036 * for all ports. 3037 */ 3038 if (ice_pf_src_tmr_owned(pf)) 3039 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; 3040 else 3041 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; 3042 break; 3043 default: 3044 /* other PHY types handle their own Tx interrupt */ 3045 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; 3046 } 3047 } 3048 3049 /** 3050 * ice_ptp_init - Initialize PTP hardware clock support 3051 * @pf: Board private structure 3052 * 3053 * Set up the device for interacting with the PTP hardware clock for all 3054 * functions, both the function that owns the clock hardware, and the 3055 * functions connected to the clock hardware. 3056 * 3057 * The clock owner will allocate and register a ptp_clock with the 3058 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 3059 * items used for asynchronous work such as Tx timestamps and periodic work. 3060 */ 3061 void ice_ptp_init(struct ice_pf *pf) 3062 { 3063 struct ice_ptp *ptp = &pf->ptp; 3064 struct ice_hw *hw = &pf->hw; 3065 int err; 3066 3067 ice_ptp_init_phy_model(hw); 3068 3069 ice_ptp_init_tx_interrupt_mode(pf); 3070 3071 /* If this function owns the clock hardware, it must allocate and 3072 * configure the PTP clock device to represent it. 3073 */ 3074 if (ice_pf_src_tmr_owned(pf)) { 3075 err = ice_ptp_init_owner(pf); 3076 if (err) 3077 goto err; 3078 } 3079 3080 ptp->port.port_num = hw->pf_id; 3081 err = ice_ptp_init_port(pf, &ptp->port); 3082 if (err) 3083 goto err; 3084 3085 /* Start the PHY timestamping block */ 3086 ice_ptp_reset_phy_timestamping(pf); 3087 3088 /* Configure initial Tx interrupt settings */ 3089 ice_ptp_cfg_tx_interrupt(pf); 3090 3091 set_bit(ICE_FLAG_PTP, pf->flags); 3092 err = ice_ptp_init_work(pf, ptp); 3093 if (err) 3094 goto err; 3095 3096 err = ice_ptp_create_auxbus_device(pf); 3097 if (err) 3098 goto err; 3099 3100 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 3101 return; 3102 3103 err: 3104 /* If we registered a PTP clock, release it */ 3105 if (pf->ptp.clock) { 3106 ptp_clock_unregister(ptp->clock); 3107 pf->ptp.clock = NULL; 3108 } 3109 clear_bit(ICE_FLAG_PTP, pf->flags); 3110 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 3111 } 3112 3113 /** 3114 * ice_ptp_release - Disable the driver/HW support and unregister the clock 3115 * @pf: Board private structure 3116 * 3117 * This function handles the cleanup work required from the initialization by 3118 * clearing out the important information and unregistering the clock 3119 */ 3120 void ice_ptp_release(struct ice_pf *pf) 3121 { 3122 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 3123 return; 3124 3125 /* Disable timestamping for both Tx and Rx */ 3126 ice_ptp_disable_timestamp_mode(pf); 3127 3128 ice_ptp_remove_auxbus_device(pf); 3129 3130 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3131 3132 clear_bit(ICE_FLAG_PTP, pf->flags); 3133 3134 kthread_cancel_delayed_work_sync(&pf->ptp.work); 3135 3136 ice_ptp_port_phy_stop(&pf->ptp.port); 3137 mutex_destroy(&pf->ptp.port.ps_lock); 3138 if (pf->ptp.kworker) { 3139 kthread_destroy_worker(pf->ptp.kworker); 3140 pf->ptp.kworker = NULL; 3141 } 3142 3143 if (!pf->ptp.clock) 3144 return; 3145 3146 /* Disable periodic outputs */ 3147 ice_ptp_disable_all_clkout(pf); 3148 3149 ptp_clock_unregister(pf->ptp.clock); 3150 pf->ptp.clock = NULL; 3151 3152 ice_ptp_unregister_auxbus_driver(pf); 3153 3154 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 3155 } 3156