1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 8 #define E810_OUT_PROP_DELAY_NS 1 9 10 #define UNKNOWN_INCVAL_E82X 0x100000000ULL 11 12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = { 13 /* name idx func chan */ 14 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, 15 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, 16 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, 17 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, 18 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, 19 }; 20 21 /** 22 * ice_get_sma_config_e810t 23 * @hw: pointer to the hw struct 24 * @ptp_pins: pointer to the ptp_pin_desc struture 25 * 26 * Read the configuration of the SMA control logic and put it into the 27 * ptp_pin_desc structure 28 */ 29 static int 30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) 31 { 32 u8 data, i; 33 int status; 34 35 /* Read initial pin state */ 36 status = ice_read_sma_ctrl_e810t(hw, &data); 37 if (status) 38 return status; 39 40 /* initialize with defaults */ 41 for (i = 0; i < NUM_PTP_PINS_E810T; i++) { 42 strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name, 43 sizeof(ptp_pins[i].name)); 44 ptp_pins[i].index = ice_pin_desc_e810t[i].index; 45 ptp_pins[i].func = ice_pin_desc_e810t[i].func; 46 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; 47 } 48 49 /* Parse SMA1/UFL1 */ 50 switch (data & ICE_SMA1_MASK_E810T) { 51 case ICE_SMA1_MASK_E810T: 52 default: 53 ptp_pins[SMA1].func = PTP_PF_NONE; 54 ptp_pins[UFL1].func = PTP_PF_NONE; 55 break; 56 case ICE_SMA1_DIR_EN_E810T: 57 ptp_pins[SMA1].func = PTP_PF_PEROUT; 58 ptp_pins[UFL1].func = PTP_PF_NONE; 59 break; 60 case ICE_SMA1_TX_EN_E810T: 61 ptp_pins[SMA1].func = PTP_PF_EXTTS; 62 ptp_pins[UFL1].func = PTP_PF_NONE; 63 break; 64 case 0: 65 ptp_pins[SMA1].func = PTP_PF_EXTTS; 66 ptp_pins[UFL1].func = PTP_PF_PEROUT; 67 break; 68 } 69 70 /* Parse SMA2/UFL2 */ 71 switch (data & ICE_SMA2_MASK_E810T) { 72 case ICE_SMA2_MASK_E810T: 73 default: 74 ptp_pins[SMA2].func = PTP_PF_NONE; 75 ptp_pins[UFL2].func = PTP_PF_NONE; 76 break; 77 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 78 ptp_pins[SMA2].func = PTP_PF_EXTTS; 79 ptp_pins[UFL2].func = PTP_PF_NONE; 80 break; 81 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 82 ptp_pins[SMA2].func = PTP_PF_PEROUT; 83 ptp_pins[UFL2].func = PTP_PF_NONE; 84 break; 85 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): 86 ptp_pins[SMA2].func = PTP_PF_NONE; 87 ptp_pins[UFL2].func = PTP_PF_EXTTS; 88 break; 89 case ICE_SMA2_DIR_EN_E810T: 90 ptp_pins[SMA2].func = PTP_PF_PEROUT; 91 ptp_pins[UFL2].func = PTP_PF_EXTTS; 92 break; 93 } 94 95 return 0; 96 } 97 98 /** 99 * ice_ptp_set_sma_config_e810t 100 * @hw: pointer to the hw struct 101 * @ptp_pins: pointer to the ptp_pin_desc struture 102 * 103 * Set the configuration of the SMA control logic based on the configuration in 104 * num_pins parameter 105 */ 106 static int 107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw, 108 const struct ptp_pin_desc *ptp_pins) 109 { 110 int status; 111 u8 data; 112 113 /* SMA1 and UFL1 cannot be set to TX at the same time */ 114 if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 115 ptp_pins[UFL1].func == PTP_PF_PEROUT) 116 return -EINVAL; 117 118 /* SMA2 and UFL2 cannot be set to RX at the same time */ 119 if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 120 ptp_pins[UFL2].func == PTP_PF_EXTTS) 121 return -EINVAL; 122 123 /* Read initial pin state value */ 124 status = ice_read_sma_ctrl_e810t(hw, &data); 125 if (status) 126 return status; 127 128 /* Set the right sate based on the desired configuration */ 129 data &= ~ICE_SMA1_MASK_E810T; 130 if (ptp_pins[SMA1].func == PTP_PF_NONE && 131 ptp_pins[UFL1].func == PTP_PF_NONE) { 132 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); 133 data |= ICE_SMA1_MASK_E810T; 134 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 135 ptp_pins[UFL1].func == PTP_PF_NONE) { 136 dev_info(ice_hw_to_dev(hw), "SMA1 RX"); 137 data |= ICE_SMA1_TX_EN_E810T; 138 } else if (ptp_pins[SMA1].func == PTP_PF_NONE && 139 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 140 /* U.FL 1 TX will always enable SMA 1 RX */ 141 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 142 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 143 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 144 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 145 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 146 ptp_pins[UFL1].func == PTP_PF_NONE) { 147 dev_info(ice_hw_to_dev(hw), "SMA1 TX"); 148 data |= ICE_SMA1_DIR_EN_E810T; 149 } 150 151 data &= ~ICE_SMA2_MASK_E810T; 152 if (ptp_pins[SMA2].func == PTP_PF_NONE && 153 ptp_pins[UFL2].func == PTP_PF_NONE) { 154 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); 155 data |= ICE_SMA2_MASK_E810T; 156 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 157 ptp_pins[UFL2].func == PTP_PF_NONE) { 158 dev_info(ice_hw_to_dev(hw), "SMA2 RX"); 159 data |= (ICE_SMA2_TX_EN_E810T | 160 ICE_SMA2_UFL2_RX_DIS_E810T); 161 } else if (ptp_pins[SMA2].func == PTP_PF_NONE && 162 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 163 dev_info(ice_hw_to_dev(hw), "UFL2 RX"); 164 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); 165 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 166 ptp_pins[UFL2].func == PTP_PF_NONE) { 167 dev_info(ice_hw_to_dev(hw), "SMA2 TX"); 168 data |= (ICE_SMA2_DIR_EN_E810T | 169 ICE_SMA2_UFL2_RX_DIS_E810T); 170 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 171 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 172 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); 173 data |= ICE_SMA2_DIR_EN_E810T; 174 } 175 176 return ice_write_sma_ctrl_e810t(hw, data); 177 } 178 179 /** 180 * ice_ptp_set_sma_e810t 181 * @info: the driver's PTP info structure 182 * @pin: pin index in kernel structure 183 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) 184 * 185 * Set the configuration of a single SMA pin 186 */ 187 static int 188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, 189 enum ptp_pin_function func) 190 { 191 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; 192 struct ice_pf *pf = ptp_info_to_pf(info); 193 struct ice_hw *hw = &pf->hw; 194 int err; 195 196 if (pin < SMA1 || func > PTP_PF_PEROUT) 197 return -EOPNOTSUPP; 198 199 err = ice_get_sma_config_e810t(hw, ptp_pins); 200 if (err) 201 return err; 202 203 /* Disable the same function on the other pin sharing the channel */ 204 if (pin == SMA1 && ptp_pins[UFL1].func == func) 205 ptp_pins[UFL1].func = PTP_PF_NONE; 206 if (pin == UFL1 && ptp_pins[SMA1].func == func) 207 ptp_pins[SMA1].func = PTP_PF_NONE; 208 209 if (pin == SMA2 && ptp_pins[UFL2].func == func) 210 ptp_pins[UFL2].func = PTP_PF_NONE; 211 if (pin == UFL2 && ptp_pins[SMA2].func == func) 212 ptp_pins[SMA2].func = PTP_PF_NONE; 213 214 /* Set up new pin function in the temp table */ 215 ptp_pins[pin].func = func; 216 217 return ice_ptp_set_sma_config_e810t(hw, ptp_pins); 218 } 219 220 /** 221 * ice_verify_pin_e810t 222 * @info: the driver's PTP info structure 223 * @pin: Pin index 224 * @func: Assigned function 225 * @chan: Assigned channel 226 * 227 * Verify if pin supports requested pin function. If the Check pins consistency. 228 * Reconfigure the SMA logic attached to the given pin to enable its 229 * desired functionality 230 */ 231 static int 232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, 233 enum ptp_pin_function func, unsigned int chan) 234 { 235 /* Don't allow channel reassignment */ 236 if (chan != ice_pin_desc_e810t[pin].chan) 237 return -EOPNOTSUPP; 238 239 /* Check if functions are properly assigned */ 240 switch (func) { 241 case PTP_PF_NONE: 242 break; 243 case PTP_PF_EXTTS: 244 if (pin == UFL1) 245 return -EOPNOTSUPP; 246 break; 247 case PTP_PF_PEROUT: 248 if (pin == UFL2 || pin == GNSS) 249 return -EOPNOTSUPP; 250 break; 251 case PTP_PF_PHYSYNC: 252 return -EOPNOTSUPP; 253 } 254 255 return ice_ptp_set_sma_e810t(info, pin, func); 256 } 257 258 /** 259 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device 260 * @pf: Board private structure 261 * 262 * Program the device to respond appropriately to the Tx timestamp interrupt 263 * cause. 264 */ 265 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) 266 { 267 struct ice_hw *hw = &pf->hw; 268 bool enable; 269 u32 val; 270 271 switch (pf->ptp.tx_interrupt_mode) { 272 case ICE_PTP_TX_INTERRUPT_ALL: 273 /* React to interrupts across all quads. */ 274 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); 275 enable = true; 276 break; 277 case ICE_PTP_TX_INTERRUPT_NONE: 278 /* Do not react to interrupts on any quad. */ 279 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); 280 enable = false; 281 break; 282 case ICE_PTP_TX_INTERRUPT_SELF: 283 default: 284 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; 285 break; 286 } 287 288 /* Configure the Tx timestamp interrupt */ 289 val = rd32(hw, PFINT_OICR_ENA); 290 if (enable) 291 val |= PFINT_OICR_TSYN_TX_M; 292 else 293 val &= ~PFINT_OICR_TSYN_TX_M; 294 wr32(hw, PFINT_OICR_ENA, val); 295 } 296 297 /** 298 * ice_set_rx_tstamp - Enable or disable Rx timestamping 299 * @pf: The PF pointer to search in 300 * @on: bool value for whether timestamps are enabled or disabled 301 */ 302 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 303 { 304 struct ice_vsi *vsi; 305 u16 i; 306 307 vsi = ice_get_main_vsi(pf); 308 if (!vsi || !vsi->rx_rings) 309 return; 310 311 /* Set the timestamp flag for all the Rx rings */ 312 ice_for_each_rxq(vsi, i) { 313 if (!vsi->rx_rings[i]) 314 continue; 315 vsi->rx_rings[i]->ptp_rx = on; 316 } 317 } 318 319 /** 320 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode 321 * @pf: Board private structure 322 * 323 * Called during preparation for reset to temporarily disable timestamping on 324 * the device. Called during remove to disable timestamping while cleaning up 325 * driver resources. 326 */ 327 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) 328 { 329 struct ice_hw *hw = &pf->hw; 330 u32 val; 331 332 val = rd32(hw, PFINT_OICR_ENA); 333 val &= ~PFINT_OICR_TSYN_TX_M; 334 wr32(hw, PFINT_OICR_ENA, val); 335 336 ice_set_rx_tstamp(pf, false); 337 } 338 339 /** 340 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration 341 * @pf: Board private structure 342 * 343 * Called at the end of rebuild to restore timestamp configuration after 344 * a device reset. 345 */ 346 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) 347 { 348 struct ice_hw *hw = &pf->hw; 349 bool enable_rx; 350 351 ice_ptp_cfg_tx_interrupt(pf); 352 353 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; 354 ice_set_rx_tstamp(pf, enable_rx); 355 356 /* Trigger an immediate software interrupt to ensure that timestamps 357 * which occurred during reset are handled now. 358 */ 359 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 360 ice_flush(hw); 361 } 362 363 /** 364 * ice_ptp_read_src_clk_reg - Read the source clock register 365 * @pf: Board private structure 366 * @sts: Optional parameter for holding a pair of system timestamps from 367 * the system clock. Will be ignored if NULL is given. 368 */ 369 static u64 370 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 371 { 372 struct ice_hw *hw = &pf->hw; 373 u32 hi, lo, lo2; 374 u8 tmr_idx; 375 376 tmr_idx = ice_get_ptp_src_clock_index(hw); 377 /* Read the system timestamp pre PHC read */ 378 ptp_read_system_prets(sts); 379 380 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 381 382 /* Read the system timestamp post PHC read */ 383 ptp_read_system_postts(sts); 384 385 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 386 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 387 388 if (lo2 < lo) { 389 /* if TIME_L rolled over read TIME_L again and update 390 * system timestamps 391 */ 392 ptp_read_system_prets(sts); 393 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 394 ptp_read_system_postts(sts); 395 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 396 } 397 398 return ((u64)hi << 32) | lo; 399 } 400 401 /** 402 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 403 * @cached_phc_time: recently cached copy of PHC time 404 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 405 * 406 * Hardware captures timestamps which contain only 32 bits of nominal 407 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 408 * Note that the captured timestamp values may be 40 bits, but the lower 409 * 8 bits are sub-nanoseconds and generally discarded. 410 * 411 * Extend the 32bit nanosecond timestamp using the following algorithm and 412 * assumptions: 413 * 414 * 1) have a recently cached copy of the PHC time 415 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 416 * seconds) before or after the PHC time was captured. 417 * 3) calculate the delta between the cached time and the timestamp 418 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 419 * captured after the PHC time. In this case, the full timestamp is just 420 * the cached PHC time plus the delta. 421 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 422 * timestamp was captured *before* the PHC time, i.e. because the PHC 423 * cache was updated after the timestamp was captured by hardware. In this 424 * case, the full timestamp is the cached time minus the inverse delta. 425 * 426 * This algorithm works even if the PHC time was updated after a Tx timestamp 427 * was requested, but before the Tx timestamp event was reported from 428 * hardware. 429 * 430 * This calculation primarily relies on keeping the cached PHC time up to 431 * date. If the timestamp was captured more than 2^31 nanoseconds after the 432 * PHC time, it is possible that the lower 32bits of PHC time have 433 * overflowed more than once, and we might generate an incorrect timestamp. 434 * 435 * This is prevented by (a) periodically updating the cached PHC time once 436 * a second, and (b) discarding any Tx timestamp packet if it has waited for 437 * a timestamp for more than one second. 438 */ 439 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 440 { 441 u32 delta, phc_time_lo; 442 u64 ns; 443 444 /* Extract the lower 32 bits of the PHC time */ 445 phc_time_lo = (u32)cached_phc_time; 446 447 /* Calculate the delta between the lower 32bits of the cached PHC 448 * time and the in_tstamp value 449 */ 450 delta = (in_tstamp - phc_time_lo); 451 452 /* Do not assume that the in_tstamp is always more recent than the 453 * cached PHC time. If the delta is large, it indicates that the 454 * in_tstamp was taken in the past, and should be converted 455 * forward. 456 */ 457 if (delta > (U32_MAX / 2)) { 458 /* reverse the delta calculation here */ 459 delta = (phc_time_lo - in_tstamp); 460 ns = cached_phc_time - delta; 461 } else { 462 ns = cached_phc_time + delta; 463 } 464 465 return ns; 466 } 467 468 /** 469 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 470 * @pf: Board private structure 471 * @in_tstamp: Ingress/egress 40b timestamp value 472 * 473 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 474 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 475 * 476 * *--------------------------------------------------------------* 477 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 478 * *--------------------------------------------------------------* 479 * 480 * The low bit is an indicator of whether the timestamp is valid. The next 481 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 482 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 483 * 484 * It is assumed that the caller verifies the timestamp is valid prior to 485 * calling this function. 486 * 487 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 488 * time stored in the device private PTP structure as the basis for timestamp 489 * extension. 490 * 491 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 492 * algorithm. 493 */ 494 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 495 { 496 const u64 mask = GENMASK_ULL(31, 0); 497 unsigned long discard_time; 498 499 /* Discard the hardware timestamp if the cached PHC time is too old */ 500 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 501 if (time_is_before_jiffies(discard_time)) { 502 pf->ptp.tx_hwtstamp_discarded++; 503 return 0; 504 } 505 506 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 507 (in_tstamp >> 8) & mask); 508 } 509 510 /** 511 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 512 * @tx: the PTP Tx timestamp tracker to check 513 * 514 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 515 * to accept new timestamp requests. 516 * 517 * Assumes the tx->lock spinlock is already held. 518 */ 519 static bool 520 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 521 { 522 lockdep_assert_held(&tx->lock); 523 524 return tx->init && !tx->calibrating; 525 } 526 527 /** 528 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 529 * @tx: the PTP Tx timestamp tracker 530 * 531 * Process timestamps captured by the PHY associated with this port. To do 532 * this, loop over each index with a waiting skb. 533 * 534 * If a given index has a valid timestamp, perform the following steps: 535 * 536 * 1) check that the timestamp request is not stale 537 * 2) check that a timestamp is ready and available in the PHY memory bank 538 * 3) read and copy the timestamp out of the PHY register 539 * 4) unlock the index by clearing the associated in_use bit 540 * 5) check if the timestamp is stale, and discard if so 541 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 542 * 7) send this 64 bit timestamp to the stack 543 * 544 * Note that we do not hold the tracking lock while reading the Tx timestamp. 545 * This is because reading the timestamp requires taking a mutex that might 546 * sleep. 547 * 548 * The only place where we set in_use is when a new timestamp is initiated 549 * with a slot index. This is only called in the hard xmit routine where an 550 * SKB has a request flag set. The only places where we clear this bit is this 551 * function, or during teardown when the Tx timestamp tracker is being 552 * removed. A timestamp index will never be re-used until the in_use bit for 553 * that index is cleared. 554 * 555 * If a Tx thread starts a new timestamp, we might not begin processing it 556 * right away but we will notice it at the end when we re-queue the task. 557 * 558 * If a Tx thread starts a new timestamp just after this function exits, the 559 * interrupt for that timestamp should re-trigger this function once 560 * a timestamp is ready. 561 * 562 * In cases where the PTP hardware clock was directly adjusted, some 563 * timestamps may not be able to safely use the timestamp extension math. In 564 * this case, software will set the stale bit for any outstanding Tx 565 * timestamps when the clock is adjusted. Then this function will discard 566 * those captured timestamps instead of sending them to the stack. 567 * 568 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 569 * to correctly extend the timestamp using the cached PHC time. It is 570 * extremely unlikely that a packet will ever take this long to timestamp. If 571 * we detect a Tx timestamp request that has waited for this long we assume 572 * the packet will never be sent by hardware and discard it without reading 573 * the timestamp register. 574 */ 575 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 576 { 577 struct ice_ptp_port *ptp_port; 578 struct ice_pf *pf; 579 struct ice_hw *hw; 580 u64 tstamp_ready; 581 bool link_up; 582 int err; 583 u8 idx; 584 585 ptp_port = container_of(tx, struct ice_ptp_port, tx); 586 pf = ptp_port_to_pf(ptp_port); 587 hw = &pf->hw; 588 589 /* Read the Tx ready status first */ 590 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 591 if (err) 592 return; 593 594 /* Drop packets if the link went down */ 595 link_up = ptp_port->link_up; 596 597 for_each_set_bit(idx, tx->in_use, tx->len) { 598 struct skb_shared_hwtstamps shhwtstamps = {}; 599 u8 phy_idx = idx + tx->offset; 600 u64 raw_tstamp = 0, tstamp; 601 bool drop_ts = !link_up; 602 struct sk_buff *skb; 603 604 /* Drop packets which have waited for more than 2 seconds */ 605 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 606 drop_ts = true; 607 608 /* Count the number of Tx timestamps that timed out */ 609 pf->ptp.tx_hwtstamp_timeouts++; 610 } 611 612 /* Only read a timestamp from the PHY if its marked as ready 613 * by the tstamp_ready register. This avoids unnecessary 614 * reading of timestamps which are not yet valid. This is 615 * important as we must read all timestamps which are valid 616 * and only timestamps which are valid during each interrupt. 617 * If we do not, the hardware logic for generating a new 618 * interrupt can get stuck on some devices. 619 */ 620 if (!(tstamp_ready & BIT_ULL(phy_idx))) { 621 if (drop_ts) 622 goto skip_ts_read; 623 624 continue; 625 } 626 627 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 628 629 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 630 if (err && !drop_ts) 631 continue; 632 633 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 634 635 /* For PHYs which don't implement a proper timestamp ready 636 * bitmap, verify that the timestamp value is different 637 * from the last cached timestamp. If it is not, skip this for 638 * now assuming it hasn't yet been captured by hardware. 639 */ 640 if (!drop_ts && tx->verify_cached && 641 raw_tstamp == tx->tstamps[idx].cached_tstamp) 642 continue; 643 644 /* Discard any timestamp value without the valid bit set */ 645 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 646 drop_ts = true; 647 648 skip_ts_read: 649 spin_lock(&tx->lock); 650 if (tx->verify_cached && raw_tstamp) 651 tx->tstamps[idx].cached_tstamp = raw_tstamp; 652 clear_bit(idx, tx->in_use); 653 skb = tx->tstamps[idx].skb; 654 tx->tstamps[idx].skb = NULL; 655 if (test_and_clear_bit(idx, tx->stale)) 656 drop_ts = true; 657 spin_unlock(&tx->lock); 658 659 /* It is unlikely but possible that the SKB will have been 660 * flushed at this point due to link change or teardown. 661 */ 662 if (!skb) 663 continue; 664 665 if (drop_ts) { 666 dev_kfree_skb_any(skb); 667 continue; 668 } 669 670 /* Extend the timestamp using cached PHC time */ 671 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 672 if (tstamp) { 673 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 674 ice_trace(tx_tstamp_complete, skb, idx); 675 } 676 677 skb_tstamp_tx(skb, &shhwtstamps); 678 dev_kfree_skb_any(skb); 679 } 680 } 681 682 /** 683 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 684 * @pf: Board private structure 685 */ 686 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 687 { 688 struct ice_ptp_port *port; 689 unsigned int i; 690 691 mutex_lock(&pf->ptp.ports_owner.lock); 692 list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) { 693 struct ice_ptp_tx *tx = &port->tx; 694 695 if (!tx || !tx->init) 696 continue; 697 698 ice_ptp_process_tx_tstamp(tx); 699 } 700 mutex_unlock(&pf->ptp.ports_owner.lock); 701 702 for (i = 0; i < ICE_MAX_QUAD; i++) { 703 u64 tstamp_ready; 704 int err; 705 706 /* Read the Tx ready status first */ 707 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 708 if (err) 709 break; 710 else if (tstamp_ready) 711 return ICE_TX_TSTAMP_WORK_PENDING; 712 } 713 714 return ICE_TX_TSTAMP_WORK_DONE; 715 } 716 717 /** 718 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 719 * @tx: Tx tracking structure to initialize 720 * 721 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 722 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 723 */ 724 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 725 { 726 bool more_timestamps; 727 728 if (!tx->init) 729 return ICE_TX_TSTAMP_WORK_DONE; 730 731 /* Process the Tx timestamp tracker */ 732 ice_ptp_process_tx_tstamp(tx); 733 734 /* Check if there are outstanding Tx timestamps */ 735 spin_lock(&tx->lock); 736 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 737 spin_unlock(&tx->lock); 738 739 if (more_timestamps) 740 return ICE_TX_TSTAMP_WORK_PENDING; 741 742 return ICE_TX_TSTAMP_WORK_DONE; 743 } 744 745 /** 746 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 747 * @tx: Tx tracking structure to initialize 748 * 749 * Assumes that the length has already been initialized. Do not call directly, 750 * use the ice_ptp_init_tx_* instead. 751 */ 752 static int 753 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 754 { 755 unsigned long *in_use, *stale; 756 struct ice_tx_tstamp *tstamps; 757 758 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 759 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 760 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 761 762 if (!tstamps || !in_use || !stale) { 763 kfree(tstamps); 764 bitmap_free(in_use); 765 bitmap_free(stale); 766 767 return -ENOMEM; 768 } 769 770 tx->tstamps = tstamps; 771 tx->in_use = in_use; 772 tx->stale = stale; 773 tx->init = 1; 774 775 spin_lock_init(&tx->lock); 776 777 return 0; 778 } 779 780 /** 781 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 782 * @pf: Board private structure 783 * @tx: the tracker to flush 784 * 785 * Called during teardown when a Tx tracker is being removed. 786 */ 787 static void 788 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 789 { 790 struct ice_hw *hw = &pf->hw; 791 u64 tstamp_ready; 792 int err; 793 u8 idx; 794 795 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 796 if (err) { 797 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 798 tx->block, err); 799 800 /* If we fail to read the Tx timestamp ready bitmap just 801 * skip clearing the PHY timestamps. 802 */ 803 tstamp_ready = 0; 804 } 805 806 for_each_set_bit(idx, tx->in_use, tx->len) { 807 u8 phy_idx = idx + tx->offset; 808 struct sk_buff *skb; 809 810 /* In case this timestamp is ready, we need to clear it. */ 811 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 812 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 813 814 spin_lock(&tx->lock); 815 skb = tx->tstamps[idx].skb; 816 tx->tstamps[idx].skb = NULL; 817 clear_bit(idx, tx->in_use); 818 clear_bit(idx, tx->stale); 819 spin_unlock(&tx->lock); 820 821 /* Count the number of Tx timestamps flushed */ 822 pf->ptp.tx_hwtstamp_flushed++; 823 824 /* Free the SKB after we've cleared the bit */ 825 dev_kfree_skb_any(skb); 826 } 827 } 828 829 /** 830 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 831 * @tx: the tracker to mark 832 * 833 * Mark currently outstanding Tx timestamps as stale. This prevents sending 834 * their timestamp value to the stack. This is required to prevent extending 835 * the 40bit hardware timestamp incorrectly. 836 * 837 * This should be called when the PTP clock is modified such as after a set 838 * time request. 839 */ 840 static void 841 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 842 { 843 spin_lock(&tx->lock); 844 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 845 spin_unlock(&tx->lock); 846 } 847 848 /** 849 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 850 * @pf: Board private structure 851 * @tx: Tx tracking structure to release 852 * 853 * Free memory associated with the Tx timestamp tracker. 854 */ 855 static void 856 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 857 { 858 spin_lock(&tx->lock); 859 tx->init = 0; 860 spin_unlock(&tx->lock); 861 862 /* wait for potentially outstanding interrupt to complete */ 863 synchronize_irq(pf->oicr_irq.virq); 864 865 ice_ptp_flush_tx_tracker(pf, tx); 866 867 kfree(tx->tstamps); 868 tx->tstamps = NULL; 869 870 bitmap_free(tx->in_use); 871 tx->in_use = NULL; 872 873 bitmap_free(tx->stale); 874 tx->stale = NULL; 875 876 tx->len = 0; 877 } 878 879 /** 880 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps 881 * @pf: Board private structure 882 * @tx: the Tx tracking structure to initialize 883 * @port: the port this structure tracks 884 * 885 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 886 * the timestamp block is shared for all ports in the same quad. To avoid 887 * ports using the same timestamp index, logically break the block of 888 * registers into chunks based on the port number. 889 */ 890 static int 891 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 892 { 893 tx->block = port / ICE_PORTS_PER_QUAD; 894 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; 895 tx->len = INDEX_PER_PORT_E82X; 896 tx->verify_cached = 0; 897 898 return ice_ptp_alloc_tx_tracker(tx); 899 } 900 901 /** 902 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 903 * @pf: Board private structure 904 * @tx: the Tx tracking structure to initialize 905 * 906 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 907 * port has its own block of timestamps, independent of the other ports. 908 */ 909 static int 910 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 911 { 912 tx->block = pf->hw.port_info->lport; 913 tx->offset = 0; 914 tx->len = INDEX_PER_PORT_E810; 915 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 916 * verify new timestamps against cached copy of the last read 917 * timestamp. 918 */ 919 tx->verify_cached = 1; 920 921 return ice_ptp_alloc_tx_tracker(tx); 922 } 923 924 /** 925 * ice_ptp_update_cached_phctime - Update the cached PHC time values 926 * @pf: Board specific private structure 927 * 928 * This function updates the system time values which are cached in the PF 929 * structure and the Rx rings. 930 * 931 * This function must be called periodically to ensure that the cached value 932 * is never more than 2 seconds old. 933 * 934 * Note that the cached copy in the PF PTP structure is always updated, even 935 * if we can't update the copy in the Rx rings. 936 * 937 * Return: 938 * * 0 - OK, successfully updated 939 * * -EAGAIN - PF was busy, need to reschedule the update 940 */ 941 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 942 { 943 struct device *dev = ice_pf_to_dev(pf); 944 unsigned long update_before; 945 u64 systime; 946 int i; 947 948 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 949 if (pf->ptp.cached_phc_time && 950 time_is_before_jiffies(update_before)) { 951 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 952 953 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 954 jiffies_to_msecs(time_taken)); 955 pf->ptp.late_cached_phc_updates++; 956 } 957 958 /* Read the current PHC time */ 959 systime = ice_ptp_read_src_clk_reg(pf, NULL); 960 961 /* Update the cached PHC time stored in the PF structure */ 962 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 963 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 964 965 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 966 return -EAGAIN; 967 968 ice_for_each_vsi(pf, i) { 969 struct ice_vsi *vsi = pf->vsi[i]; 970 int j; 971 972 if (!vsi) 973 continue; 974 975 if (vsi->type != ICE_VSI_PF) 976 continue; 977 978 ice_for_each_rxq(vsi, j) { 979 if (!vsi->rx_rings[j]) 980 continue; 981 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 982 } 983 } 984 clear_bit(ICE_CFG_BUSY, pf->state); 985 986 return 0; 987 } 988 989 /** 990 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 991 * @pf: Board specific private structure 992 * 993 * This function must be called when the cached PHC time is no longer valid, 994 * such as after a time adjustment. It marks any currently outstanding Tx 995 * timestamps as stale and updates the cached PHC time for both the PF and Rx 996 * rings. 997 * 998 * If updating the PHC time cannot be done immediately, a warning message is 999 * logged and the work item is scheduled immediately to minimize the window 1000 * with a wrong cached timestamp. 1001 */ 1002 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1003 { 1004 struct device *dev = ice_pf_to_dev(pf); 1005 int err; 1006 1007 /* Update the cached PHC time immediately if possible, otherwise 1008 * schedule the work item to execute soon. 1009 */ 1010 err = ice_ptp_update_cached_phctime(pf); 1011 if (err) { 1012 /* If another thread is updating the Rx rings, we won't 1013 * properly reset them here. This could lead to reporting of 1014 * invalid timestamps, but there isn't much we can do. 1015 */ 1016 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1017 __func__); 1018 1019 /* Queue the work item to update the Rx rings when possible */ 1020 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1021 msecs_to_jiffies(10)); 1022 } 1023 1024 /* Mark any outstanding timestamps as stale, since they might have 1025 * been captured in hardware before the time update. This could lead 1026 * to us extending them with the wrong cached value resulting in 1027 * incorrect timestamp values. 1028 */ 1029 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1030 } 1031 1032 /** 1033 * ice_ptp_read_time - Read the time from the device 1034 * @pf: Board private structure 1035 * @ts: timespec structure to hold the current time value 1036 * @sts: Optional parameter for holding a pair of system timestamps from 1037 * the system clock. Will be ignored if NULL is given. 1038 * 1039 * This function reads the source clock registers and stores them in a timespec. 1040 * However, since the registers are 64 bits of nanoseconds, we must convert the 1041 * result to a timespec before we can return. 1042 */ 1043 static void 1044 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, 1045 struct ptp_system_timestamp *sts) 1046 { 1047 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 1048 1049 *ts = ns_to_timespec64(time_ns); 1050 } 1051 1052 /** 1053 * ice_ptp_write_init - Set PHC time to provided value 1054 * @pf: Board private structure 1055 * @ts: timespec structure that holds the new time value 1056 * 1057 * Set the PHC time to the specified time provided in the timespec. 1058 */ 1059 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1060 { 1061 u64 ns = timespec64_to_ns(ts); 1062 struct ice_hw *hw = &pf->hw; 1063 1064 return ice_ptp_init_time(hw, ns); 1065 } 1066 1067 /** 1068 * ice_ptp_write_adj - Adjust PHC clock time atomically 1069 * @pf: Board private structure 1070 * @adj: Adjustment in nanoseconds 1071 * 1072 * Perform an atomic adjustment of the PHC time by the specified number of 1073 * nanoseconds. 1074 */ 1075 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1076 { 1077 struct ice_hw *hw = &pf->hw; 1078 1079 return ice_ptp_adj_clock(hw, adj); 1080 } 1081 1082 /** 1083 * ice_base_incval - Get base timer increment value 1084 * @pf: Board private structure 1085 * 1086 * Look up the base timer increment value for this device. The base increment 1087 * value is used to define the nominal clock tick rate. This increment value 1088 * is programmed during device initialization. It is also used as the basis 1089 * for calculating adjustments using scaled_ppm. 1090 */ 1091 static u64 ice_base_incval(struct ice_pf *pf) 1092 { 1093 struct ice_hw *hw = &pf->hw; 1094 u64 incval; 1095 1096 if (ice_is_e810(hw)) 1097 incval = ICE_PTP_NOMINAL_INCVAL_E810; 1098 else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) 1099 incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); 1100 else 1101 incval = UNKNOWN_INCVAL_E82X; 1102 1103 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1104 incval); 1105 1106 return incval; 1107 } 1108 1109 /** 1110 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1111 * @port: PTP port for which Tx FIFO is checked 1112 */ 1113 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1114 { 1115 int quad = port->port_num / ICE_PORTS_PER_QUAD; 1116 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1117 struct ice_pf *pf; 1118 struct ice_hw *hw; 1119 u32 val, phy_sts; 1120 int err; 1121 1122 pf = ptp_port_to_pf(port); 1123 hw = &pf->hw; 1124 1125 if (port->tx_fifo_busy_cnt == FIFO_OK) 1126 return 0; 1127 1128 /* need to read FIFO state */ 1129 if (offs == 0 || offs == 1) 1130 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, 1131 &val); 1132 else 1133 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, 1134 &val); 1135 1136 if (err) { 1137 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1138 port->port_num, err); 1139 return err; 1140 } 1141 1142 if (offs & 0x1) 1143 phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S; 1144 else 1145 phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S; 1146 1147 if (phy_sts & FIFO_EMPTY) { 1148 port->tx_fifo_busy_cnt = FIFO_OK; 1149 return 0; 1150 } 1151 1152 port->tx_fifo_busy_cnt++; 1153 1154 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1155 port->tx_fifo_busy_cnt, port->port_num); 1156 1157 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1158 dev_dbg(ice_pf_to_dev(pf), 1159 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1160 port->port_num, quad); 1161 ice_ptp_reset_ts_memory_quad_e82x(hw, quad); 1162 port->tx_fifo_busy_cnt = FIFO_OK; 1163 return 0; 1164 } 1165 1166 return -EAGAIN; 1167 } 1168 1169 /** 1170 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1171 * @work: Pointer to the kthread_work structure for this task 1172 * 1173 * Check whether hardware has completed measuring the Tx and Rx offset values 1174 * used to configure and enable vernier timestamp calibration. 1175 * 1176 * Once the offset in either direction is measured, configure the associated 1177 * registers with the calibrated offset values and enable timestamping. The Tx 1178 * and Rx directions are configured independently as soon as their associated 1179 * offsets are known. 1180 * 1181 * This function reschedules itself until both Tx and Rx calibration have 1182 * completed. 1183 */ 1184 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1185 { 1186 struct ice_ptp_port *port; 1187 struct ice_pf *pf; 1188 struct ice_hw *hw; 1189 int tx_err; 1190 int rx_err; 1191 1192 port = container_of(work, struct ice_ptp_port, ov_work.work); 1193 pf = ptp_port_to_pf(port); 1194 hw = &pf->hw; 1195 1196 if (ice_is_reset_in_progress(pf->state)) { 1197 /* wait for device driver to complete reset */ 1198 kthread_queue_delayed_work(pf->ptp.kworker, 1199 &port->ov_work, 1200 msecs_to_jiffies(100)); 1201 return; 1202 } 1203 1204 tx_err = ice_ptp_check_tx_fifo(port); 1205 if (!tx_err) 1206 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); 1207 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); 1208 if (tx_err || rx_err) { 1209 /* Tx and/or Rx offset not yet configured, try again later */ 1210 kthread_queue_delayed_work(pf->ptp.kworker, 1211 &port->ov_work, 1212 msecs_to_jiffies(100)); 1213 return; 1214 } 1215 } 1216 1217 /** 1218 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1219 * @ptp_port: PTP port to stop 1220 */ 1221 static int 1222 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1223 { 1224 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1225 u8 port = ptp_port->port_num; 1226 struct ice_hw *hw = &pf->hw; 1227 int err; 1228 1229 if (ice_is_e810(hw)) 1230 return 0; 1231 1232 mutex_lock(&ptp_port->ps_lock); 1233 1234 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1235 1236 err = ice_stop_phy_timer_e82x(hw, port, true); 1237 if (err) 1238 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1239 port, err); 1240 1241 mutex_unlock(&ptp_port->ps_lock); 1242 1243 return err; 1244 } 1245 1246 /** 1247 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1248 * @ptp_port: PTP port for which the PHY start is set 1249 * 1250 * Start the PHY timestamping block, and initiate Vernier timestamping 1251 * calibration. If timestamping cannot be calibrated (such as if link is down) 1252 * then disable the timestamping block instead. 1253 */ 1254 static int 1255 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1256 { 1257 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1258 u8 port = ptp_port->port_num; 1259 struct ice_hw *hw = &pf->hw; 1260 int err; 1261 1262 if (ice_is_e810(hw)) 1263 return 0; 1264 1265 if (!ptp_port->link_up) 1266 return ice_ptp_port_phy_stop(ptp_port); 1267 1268 mutex_lock(&ptp_port->ps_lock); 1269 1270 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1271 1272 /* temporarily disable Tx timestamps while calibrating PHY offset */ 1273 spin_lock(&ptp_port->tx.lock); 1274 ptp_port->tx.calibrating = true; 1275 spin_unlock(&ptp_port->tx.lock); 1276 ptp_port->tx_fifo_busy_cnt = 0; 1277 1278 /* Start the PHY timer in Vernier mode */ 1279 err = ice_start_phy_timer_e82x(hw, port); 1280 if (err) 1281 goto out_unlock; 1282 1283 /* Enable Tx timestamps right away */ 1284 spin_lock(&ptp_port->tx.lock); 1285 ptp_port->tx.calibrating = false; 1286 spin_unlock(&ptp_port->tx.lock); 1287 1288 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); 1289 1290 out_unlock: 1291 if (err) 1292 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1293 port, err); 1294 1295 mutex_unlock(&ptp_port->ps_lock); 1296 1297 return err; 1298 } 1299 1300 /** 1301 * ice_ptp_link_change - Reconfigure PTP after link status change 1302 * @pf: Board private structure 1303 * @port: Port for which the PHY start is set 1304 * @linkup: Link is up or down 1305 */ 1306 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) 1307 { 1308 struct ice_ptp_port *ptp_port; 1309 struct ice_hw *hw = &pf->hw; 1310 1311 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1312 return; 1313 1314 if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) 1315 return; 1316 1317 ptp_port = &pf->ptp.port; 1318 if (WARN_ON_ONCE(ptp_port->port_num != port)) 1319 return; 1320 1321 /* Update cached link status for this port immediately */ 1322 ptp_port->link_up = linkup; 1323 1324 switch (hw->phy_model) { 1325 case ICE_PHY_E810: 1326 /* Do not reconfigure E810 PHY */ 1327 return; 1328 case ICE_PHY_E82X: 1329 ice_ptp_port_phy_restart(ptp_port); 1330 return; 1331 default: 1332 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); 1333 } 1334 } 1335 1336 /** 1337 * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt 1338 * @pf: PF private structure 1339 * @ena: bool value to enable or disable interrupt 1340 * @threshold: Minimum number of packets at which intr is triggered 1341 * 1342 * Utility function to enable or disable Tx timestamp interrupt and threshold 1343 */ 1344 static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) 1345 { 1346 struct ice_hw *hw = &pf->hw; 1347 int err = 0; 1348 int quad; 1349 u32 val; 1350 1351 ice_ptp_reset_ts_memory(hw); 1352 1353 for (quad = 0; quad < ICE_MAX_QUAD; quad++) { 1354 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1355 &val); 1356 if (err) 1357 break; 1358 1359 if (ena) { 1360 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1361 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; 1362 val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) & 1363 Q_REG_TX_MEM_GBL_CFG_INTR_THR_M); 1364 } else { 1365 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1366 } 1367 1368 err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1369 val); 1370 if (err) 1371 break; 1372 } 1373 1374 if (err) 1375 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", 1376 err); 1377 return err; 1378 } 1379 1380 /** 1381 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1382 * @pf: Board private structure 1383 */ 1384 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1385 { 1386 ice_ptp_port_phy_restart(&pf->ptp.port); 1387 } 1388 1389 /** 1390 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping 1391 * @pf: Board private structure 1392 */ 1393 static void ice_ptp_restart_all_phy(struct ice_pf *pf) 1394 { 1395 struct list_head *entry; 1396 1397 list_for_each(entry, &pf->ptp.ports_owner.ports) { 1398 struct ice_ptp_port *port = list_entry(entry, 1399 struct ice_ptp_port, 1400 list_member); 1401 1402 if (port->link_up) 1403 ice_ptp_port_phy_restart(port); 1404 } 1405 } 1406 1407 /** 1408 * ice_ptp_adjfine - Adjust clock increment rate 1409 * @info: the driver's PTP info structure 1410 * @scaled_ppm: Parts per million with 16-bit fractional field 1411 * 1412 * Adjust the frequency of the clock by the indicated scaled ppm from the 1413 * base frequency. 1414 */ 1415 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1416 { 1417 struct ice_pf *pf = ptp_info_to_pf(info); 1418 struct ice_hw *hw = &pf->hw; 1419 u64 incval; 1420 int err; 1421 1422 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1423 err = ice_ptp_write_incval_locked(hw, incval); 1424 if (err) { 1425 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1426 err); 1427 return -EIO; 1428 } 1429 1430 return 0; 1431 } 1432 1433 /** 1434 * ice_ptp_extts_event - Process PTP external clock event 1435 * @pf: Board private structure 1436 */ 1437 void ice_ptp_extts_event(struct ice_pf *pf) 1438 { 1439 struct ptp_clock_event event; 1440 struct ice_hw *hw = &pf->hw; 1441 u8 chan, tmr_idx; 1442 u32 hi, lo; 1443 1444 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1445 /* Event time is captured by one of the two matched registers 1446 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1447 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1448 * Event is defined in GLTSYN_EVNT_0 register 1449 */ 1450 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1451 /* Check if channel is enabled */ 1452 if (pf->ptp.ext_ts_irq & (1 << chan)) { 1453 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1454 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1455 event.timestamp = (((u64)hi) << 32) | lo; 1456 event.type = PTP_CLOCK_EXTTS; 1457 event.index = chan; 1458 1459 /* Fire event */ 1460 ptp_clock_event(pf->ptp.clock, &event); 1461 pf->ptp.ext_ts_irq &= ~(1 << chan); 1462 } 1463 } 1464 } 1465 1466 /** 1467 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1468 * @pf: Board private structure 1469 * @ena: true to enable; false to disable 1470 * @chan: GPIO channel (0-3) 1471 * @gpio_pin: GPIO pin 1472 * @extts_flags: request flags from the ptp_extts_request.flags 1473 */ 1474 static int 1475 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, 1476 unsigned int extts_flags) 1477 { 1478 u32 func, aux_reg, gpio_reg, irq_reg; 1479 struct ice_hw *hw = &pf->hw; 1480 u8 tmr_idx; 1481 1482 if (chan > (unsigned int)pf->ptp.info.n_ext_ts) 1483 return -EINVAL; 1484 1485 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1486 1487 irq_reg = rd32(hw, PFINT_OICR_ENA); 1488 1489 if (ena) { 1490 /* Enable the interrupt */ 1491 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1492 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1493 1494 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1495 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1496 1497 /* set event level to requested edge */ 1498 if (extts_flags & PTP_FALLING_EDGE) 1499 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1500 if (extts_flags & PTP_RISING_EDGE) 1501 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1502 1503 /* Write GPIO CTL reg. 1504 * 0x1 is input sampled by EVENT register(channel) 1505 * + num_in_channels * tmr_idx 1506 */ 1507 func = 1 + chan + (tmr_idx * 3); 1508 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & 1509 GLGEN_GPIO_CTL_PIN_FUNC_M); 1510 pf->ptp.ext_ts_chan |= (1 << chan); 1511 } else { 1512 /* clear the values we set to reset defaults */ 1513 aux_reg = 0; 1514 gpio_reg = 0; 1515 pf->ptp.ext_ts_chan &= ~(1 << chan); 1516 if (!pf->ptp.ext_ts_chan) 1517 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1518 } 1519 1520 wr32(hw, PFINT_OICR_ENA, irq_reg); 1521 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1522 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1523 1524 return 0; 1525 } 1526 1527 /** 1528 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave 1529 * @pf: Board private structure 1530 * @chan: GPIO channel (0-3) 1531 * @config: desired periodic clk configuration. NULL will disable channel 1532 * @store: If set to true the values will be stored 1533 * 1534 * Configure the internal clock generator modules to generate the clock wave of 1535 * specified period. 1536 */ 1537 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, 1538 struct ice_perout_channel *config, bool store) 1539 { 1540 u64 current_time, period, start_time, phase; 1541 struct ice_hw *hw = &pf->hw; 1542 u32 func, val, gpio_pin; 1543 u8 tmr_idx; 1544 1545 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1546 1547 /* 0. Reset mode & out_en in AUX_OUT */ 1548 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1549 1550 /* If we're disabling the output, clear out CLKO and TGT and keep 1551 * output level low 1552 */ 1553 if (!config || !config->ena) { 1554 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); 1555 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); 1556 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); 1557 1558 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1559 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; 1560 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1561 1562 /* Store the value if requested */ 1563 if (store) 1564 memset(&pf->ptp.perout_channels[chan], 0, 1565 sizeof(struct ice_perout_channel)); 1566 1567 return 0; 1568 } 1569 period = config->period; 1570 start_time = config->start_time; 1571 div64_u64_rem(start_time, period, &phase); 1572 gpio_pin = config->gpio_pin; 1573 1574 /* 1. Write clkout with half of required period value */ 1575 if (period & 0x1) { 1576 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1577 goto err; 1578 } 1579 1580 period >>= 1; 1581 1582 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick 1583 */ 1584 #define MIN_PULSE 3 1585 if (period <= MIN_PULSE || period > U32_MAX) { 1586 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", 1587 MIN_PULSE * 2); 1588 goto err; 1589 } 1590 1591 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1592 1593 /* Allow time for programming before start_time is hit */ 1594 current_time = ice_ptp_read_src_clk_reg(pf, NULL); 1595 1596 /* if start time is in the past start the timer at the nearest second 1597 * maintaining phase 1598 */ 1599 if (start_time < current_time) 1600 start_time = div64_u64(current_time + NSEC_PER_SEC - 1, 1601 NSEC_PER_SEC) * NSEC_PER_SEC + phase; 1602 1603 if (ice_is_e810(hw)) 1604 start_time -= E810_OUT_PROP_DELAY_NS; 1605 else 1606 start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw)); 1607 1608 /* 2. Write TARGET time */ 1609 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); 1610 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); 1611 1612 /* 3. Write AUX_OUT register */ 1613 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1614 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1615 1616 /* 4. write GPIO CTL reg */ 1617 func = 8 + chan + (tmr_idx * 4); 1618 val = GLGEN_GPIO_CTL_PIN_DIR_M | 1619 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M); 1620 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1621 1622 /* Store the value if requested */ 1623 if (store) { 1624 memcpy(&pf->ptp.perout_channels[chan], config, 1625 sizeof(struct ice_perout_channel)); 1626 pf->ptp.perout_channels[chan].start_time = phase; 1627 } 1628 1629 return 0; 1630 err: 1631 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); 1632 return -EFAULT; 1633 } 1634 1635 /** 1636 * ice_ptp_disable_all_clkout - Disable all currently configured outputs 1637 * @pf: pointer to the PF structure 1638 * 1639 * Disable all currently configured clock outputs. This is necessary before 1640 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to 1641 * re-enable the clocks again. 1642 */ 1643 static void ice_ptp_disable_all_clkout(struct ice_pf *pf) 1644 { 1645 uint i; 1646 1647 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1648 if (pf->ptp.perout_channels[i].ena) 1649 ice_ptp_cfg_clkout(pf, i, NULL, false); 1650 } 1651 1652 /** 1653 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs 1654 * @pf: pointer to the PF structure 1655 * 1656 * Enable all currently configured clock outputs. Use this after 1657 * ice_ptp_disable_all_clkout to reconfigure the output signals according to 1658 * their configuration. 1659 */ 1660 static void ice_ptp_enable_all_clkout(struct ice_pf *pf) 1661 { 1662 uint i; 1663 1664 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1665 if (pf->ptp.perout_channels[i].ena) 1666 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], 1667 false); 1668 } 1669 1670 /** 1671 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC 1672 * @info: the driver's PTP info structure 1673 * @rq: The requested feature to change 1674 * @on: Enable/disable flag 1675 */ 1676 static int 1677 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, 1678 struct ptp_clock_request *rq, int on) 1679 { 1680 struct ice_pf *pf = ptp_info_to_pf(info); 1681 struct ice_perout_channel clk_cfg = {0}; 1682 bool sma_pres = false; 1683 unsigned int chan; 1684 u32 gpio_pin; 1685 int err; 1686 1687 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 1688 sma_pres = true; 1689 1690 switch (rq->type) { 1691 case PTP_CLK_REQ_PEROUT: 1692 chan = rq->perout.index; 1693 if (sma_pres) { 1694 if (chan == ice_pin_desc_e810t[SMA1].chan) 1695 clk_cfg.gpio_pin = GPIO_20; 1696 else if (chan == ice_pin_desc_e810t[SMA2].chan) 1697 clk_cfg.gpio_pin = GPIO_22; 1698 else 1699 return -1; 1700 } else if (ice_is_e810t(&pf->hw)) { 1701 if (chan == 0) 1702 clk_cfg.gpio_pin = GPIO_20; 1703 else 1704 clk_cfg.gpio_pin = GPIO_22; 1705 } else if (chan == PPS_CLK_GEN_CHAN) { 1706 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1707 } else { 1708 clk_cfg.gpio_pin = chan; 1709 } 1710 1711 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + 1712 rq->perout.period.nsec); 1713 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + 1714 rq->perout.start.nsec); 1715 clk_cfg.ena = !!on; 1716 1717 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); 1718 break; 1719 case PTP_CLK_REQ_EXTTS: 1720 chan = rq->extts.index; 1721 if (sma_pres) { 1722 if (chan < ice_pin_desc_e810t[SMA2].chan) 1723 gpio_pin = GPIO_21; 1724 else 1725 gpio_pin = GPIO_23; 1726 } else if (ice_is_e810t(&pf->hw)) { 1727 if (chan == 0) 1728 gpio_pin = GPIO_21; 1729 else 1730 gpio_pin = GPIO_23; 1731 } else { 1732 gpio_pin = chan; 1733 } 1734 1735 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, 1736 rq->extts.flags); 1737 break; 1738 default: 1739 return -EOPNOTSUPP; 1740 } 1741 1742 return err; 1743 } 1744 1745 /** 1746 * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC 1747 * @info: the driver's PTP info structure 1748 * @rq: The requested feature to change 1749 * @on: Enable/disable flag 1750 */ 1751 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, 1752 struct ptp_clock_request *rq, int on) 1753 { 1754 struct ice_pf *pf = ptp_info_to_pf(info); 1755 struct ice_perout_channel clk_cfg = {0}; 1756 int err; 1757 1758 switch (rq->type) { 1759 case PTP_CLK_REQ_PPS: 1760 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1761 clk_cfg.period = NSEC_PER_SEC; 1762 clk_cfg.ena = !!on; 1763 1764 err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); 1765 break; 1766 case PTP_CLK_REQ_EXTTS: 1767 err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, 1768 TIME_SYNC_PIN_INDEX, rq->extts.flags); 1769 break; 1770 default: 1771 return -EOPNOTSUPP; 1772 } 1773 1774 return err; 1775 } 1776 1777 /** 1778 * ice_ptp_gettimex64 - Get the time of the clock 1779 * @info: the driver's PTP info structure 1780 * @ts: timespec64 structure to hold the current time value 1781 * @sts: Optional parameter for holding a pair of system timestamps from 1782 * the system clock. Will be ignored if NULL is given. 1783 * 1784 * Read the device clock and return the correct value on ns, after converting it 1785 * into a timespec struct. 1786 */ 1787 static int 1788 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 1789 struct ptp_system_timestamp *sts) 1790 { 1791 struct ice_pf *pf = ptp_info_to_pf(info); 1792 struct ice_hw *hw = &pf->hw; 1793 1794 if (!ice_ptp_lock(hw)) { 1795 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); 1796 return -EBUSY; 1797 } 1798 1799 ice_ptp_read_time(pf, ts, sts); 1800 ice_ptp_unlock(hw); 1801 1802 return 0; 1803 } 1804 1805 /** 1806 * ice_ptp_settime64 - Set the time of the clock 1807 * @info: the driver's PTP info structure 1808 * @ts: timespec64 structure that holds the new time value 1809 * 1810 * Set the device clock to the user input value. The conversion from timespec 1811 * to ns happens in the write function. 1812 */ 1813 static int 1814 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 1815 { 1816 struct ice_pf *pf = ptp_info_to_pf(info); 1817 struct timespec64 ts64 = *ts; 1818 struct ice_hw *hw = &pf->hw; 1819 int err; 1820 1821 /* For Vernier mode, we need to recalibrate after new settime 1822 * Start with disabling timestamp block 1823 */ 1824 if (pf->ptp.port.link_up) 1825 ice_ptp_port_phy_stop(&pf->ptp.port); 1826 1827 if (!ice_ptp_lock(hw)) { 1828 err = -EBUSY; 1829 goto exit; 1830 } 1831 1832 /* Disable periodic outputs */ 1833 ice_ptp_disable_all_clkout(pf); 1834 1835 err = ice_ptp_write_init(pf, &ts64); 1836 ice_ptp_unlock(hw); 1837 1838 if (!err) 1839 ice_ptp_reset_cached_phctime(pf); 1840 1841 /* Reenable periodic outputs */ 1842 ice_ptp_enable_all_clkout(pf); 1843 1844 /* Recalibrate and re-enable timestamp blocks for E822/E823 */ 1845 if (hw->phy_model == ICE_PHY_E82X) 1846 ice_ptp_restart_all_phy(pf); 1847 exit: 1848 if (err) { 1849 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 1850 return err; 1851 } 1852 1853 return 0; 1854 } 1855 1856 /** 1857 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 1858 * @info: the driver's PTP info structure 1859 * @delta: Offset in nanoseconds to adjust the time by 1860 */ 1861 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 1862 { 1863 struct timespec64 now, then; 1864 int ret; 1865 1866 then = ns_to_timespec64(delta); 1867 ret = ice_ptp_gettimex64(info, &now, NULL); 1868 if (ret) 1869 return ret; 1870 now = timespec64_add(now, then); 1871 1872 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 1873 } 1874 1875 /** 1876 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 1877 * @info: the driver's PTP info structure 1878 * @delta: Offset in nanoseconds to adjust the time by 1879 */ 1880 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 1881 { 1882 struct ice_pf *pf = ptp_info_to_pf(info); 1883 struct ice_hw *hw = &pf->hw; 1884 struct device *dev; 1885 int err; 1886 1887 dev = ice_pf_to_dev(pf); 1888 1889 /* Hardware only supports atomic adjustments using signed 32-bit 1890 * integers. For any adjustment outside this range, perform 1891 * a non-atomic get->adjust->set flow. 1892 */ 1893 if (delta > S32_MAX || delta < S32_MIN) { 1894 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 1895 return ice_ptp_adjtime_nonatomic(info, delta); 1896 } 1897 1898 if (!ice_ptp_lock(hw)) { 1899 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 1900 return -EBUSY; 1901 } 1902 1903 /* Disable periodic outputs */ 1904 ice_ptp_disable_all_clkout(pf); 1905 1906 err = ice_ptp_write_adj(pf, delta); 1907 1908 /* Reenable periodic outputs */ 1909 ice_ptp_enable_all_clkout(pf); 1910 1911 ice_ptp_unlock(hw); 1912 1913 if (err) { 1914 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 1915 return err; 1916 } 1917 1918 ice_ptp_reset_cached_phctime(pf); 1919 1920 return 0; 1921 } 1922 1923 #ifdef CONFIG_ICE_HWTS 1924 /** 1925 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 1926 * @device: Current device time 1927 * @system: System counter value read synchronously with device time 1928 * @ctx: Context provided by timekeeping code 1929 * 1930 * Read device and system (ART) clock simultaneously and return the corrected 1931 * clock values in ns. 1932 */ 1933 static int 1934 ice_ptp_get_syncdevicetime(ktime_t *device, 1935 struct system_counterval_t *system, 1936 void *ctx) 1937 { 1938 struct ice_pf *pf = (struct ice_pf *)ctx; 1939 struct ice_hw *hw = &pf->hw; 1940 u32 hh_lock, hh_art_ctl; 1941 int i; 1942 1943 #define MAX_HH_HW_LOCK_TRIES 5 1944 #define MAX_HH_CTL_LOCK_TRIES 100 1945 1946 for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { 1947 /* Get the HW lock */ 1948 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1949 if (hh_lock & PFHH_SEM_BUSY_M) { 1950 usleep_range(10000, 15000); 1951 continue; 1952 } 1953 break; 1954 } 1955 if (hh_lock & PFHH_SEM_BUSY_M) { 1956 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 1957 return -EBUSY; 1958 } 1959 1960 /* Program cmd to master timer */ 1961 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 1962 1963 /* Start the ART and device clock sync sequence */ 1964 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1965 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 1966 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 1967 1968 for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { 1969 /* Wait for sync to complete */ 1970 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1971 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 1972 udelay(1); 1973 continue; 1974 } else { 1975 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 1976 u64 hh_ts; 1977 1978 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 1979 /* Read ART time */ 1980 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 1981 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 1982 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1983 *system = convert_art_ns_to_tsc(hh_ts); 1984 /* Read Device source clock time */ 1985 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 1986 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 1987 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 1988 *device = ns_to_ktime(hh_ts); 1989 break; 1990 } 1991 } 1992 1993 /* Clear the master timer */ 1994 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 1995 1996 /* Release HW lock */ 1997 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1998 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 1999 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 2000 2001 if (i == MAX_HH_CTL_LOCK_TRIES) 2002 return -ETIMEDOUT; 2003 2004 return 0; 2005 } 2006 2007 /** 2008 * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp 2009 * @info: the driver's PTP info structure 2010 * @cts: The memory to fill the cross timestamp info 2011 * 2012 * Capture a cross timestamp between the ART and the device PTP hardware 2013 * clock. Fill the cross timestamp information and report it back to the 2014 * caller. 2015 * 2016 * This is only valid for E822 and E823 devices which have support for 2017 * generating the cross timestamp via PCIe PTM. 2018 * 2019 * In order to correctly correlate the ART timestamp back to the TSC time, the 2020 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2021 */ 2022 static int 2023 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, 2024 struct system_device_crosststamp *cts) 2025 { 2026 struct ice_pf *pf = ptp_info_to_pf(info); 2027 2028 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2029 pf, NULL, cts); 2030 } 2031 #endif /* CONFIG_ICE_HWTS */ 2032 2033 /** 2034 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2035 * @pf: Board private structure 2036 * @ifr: ioctl data 2037 * 2038 * Copy the timestamping config to user buffer 2039 */ 2040 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2041 { 2042 struct hwtstamp_config *config; 2043 2044 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2045 return -EIO; 2046 2047 config = &pf->ptp.tstamp_config; 2048 2049 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2050 -EFAULT : 0; 2051 } 2052 2053 /** 2054 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2055 * @pf: Board private structure 2056 * @config: hwtstamp settings requested or saved 2057 */ 2058 static int 2059 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2060 { 2061 switch (config->tx_type) { 2062 case HWTSTAMP_TX_OFF: 2063 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 2064 break; 2065 case HWTSTAMP_TX_ON: 2066 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 2067 break; 2068 default: 2069 return -ERANGE; 2070 } 2071 2072 switch (config->rx_filter) { 2073 case HWTSTAMP_FILTER_NONE: 2074 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2075 break; 2076 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2077 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2078 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2079 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2080 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2081 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2082 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2083 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2084 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2085 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2086 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2087 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2088 case HWTSTAMP_FILTER_NTP_ALL: 2089 case HWTSTAMP_FILTER_ALL: 2090 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 2091 break; 2092 default: 2093 return -ERANGE; 2094 } 2095 2096 /* Immediately update the device timestamping mode */ 2097 ice_ptp_restore_timestamp_mode(pf); 2098 2099 return 0; 2100 } 2101 2102 /** 2103 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2104 * @pf: Board private structure 2105 * @ifr: ioctl data 2106 * 2107 * Get the user config and store it 2108 */ 2109 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2110 { 2111 struct hwtstamp_config config; 2112 int err; 2113 2114 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2115 return -EAGAIN; 2116 2117 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2118 return -EFAULT; 2119 2120 err = ice_ptp_set_timestamp_mode(pf, &config); 2121 if (err) 2122 return err; 2123 2124 /* Return the actual configuration set */ 2125 config = pf->ptp.tstamp_config; 2126 2127 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2128 -EFAULT : 0; 2129 } 2130 2131 /** 2132 * ice_ptp_rx_hwtstamp - Check for an Rx timestamp 2133 * @rx_ring: Ring to get the VSI info 2134 * @rx_desc: Receive descriptor 2135 * @skb: Particular skb to send timestamp with 2136 * 2137 * The driver receives a notification in the receive descriptor with timestamp. 2138 * The timestamp is in ns, so we must convert the result first. 2139 */ 2140 void 2141 ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, 2142 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) 2143 { 2144 struct skb_shared_hwtstamps *hwtstamps; 2145 u64 ts_ns, cached_time; 2146 u32 ts_high; 2147 2148 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2149 return; 2150 2151 cached_time = READ_ONCE(rx_ring->cached_phctime); 2152 2153 /* Do not report a timestamp if we don't have a cached PHC time */ 2154 if (!cached_time) 2155 return; 2156 2157 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2158 * PHC value, rather than accessing the PF. This also allows us to 2159 * simply pass the upper 32bits of nanoseconds directly. Calling 2160 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2161 * bits itself. 2162 */ 2163 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2164 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2165 2166 hwtstamps = skb_hwtstamps(skb); 2167 memset(hwtstamps, 0, sizeof(*hwtstamps)); 2168 hwtstamps->hwtstamp = ns_to_ktime(ts_ns); 2169 } 2170 2171 /** 2172 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins 2173 * @pf: pointer to the PF structure 2174 * @info: PTP clock info structure 2175 * 2176 * Disable the OS access to the SMA pins. Called to clear out the OS 2177 * indications of pin support when we fail to setup the E810-T SMA control 2178 * register. 2179 */ 2180 static void 2181 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2182 { 2183 struct device *dev = ice_pf_to_dev(pf); 2184 2185 dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); 2186 2187 info->enable = NULL; 2188 info->verify = NULL; 2189 info->n_pins = 0; 2190 info->n_ext_ts = 0; 2191 info->n_per_out = 0; 2192 } 2193 2194 /** 2195 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins 2196 * @pf: pointer to the PF structure 2197 * @info: PTP clock info structure 2198 * 2199 * Finish setting up the SMA pins by allocating pin_config, and setting it up 2200 * according to the current status of the SMA. On failure, disable all of the 2201 * extended SMA pin support. 2202 */ 2203 static void 2204 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2205 { 2206 struct device *dev = ice_pf_to_dev(pf); 2207 int err; 2208 2209 /* Allocate memory for kernel pins interface */ 2210 info->pin_config = devm_kcalloc(dev, info->n_pins, 2211 sizeof(*info->pin_config), GFP_KERNEL); 2212 if (!info->pin_config) { 2213 ice_ptp_disable_sma_pins_e810t(pf, info); 2214 return; 2215 } 2216 2217 /* Read current SMA status */ 2218 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); 2219 if (err) 2220 ice_ptp_disable_sma_pins_e810t(pf, info); 2221 } 2222 2223 /** 2224 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs 2225 * @pf: pointer to the PF instance 2226 * @info: PTP clock capabilities 2227 */ 2228 static void 2229 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2230 { 2231 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2232 info->n_ext_ts = N_EXT_TS_E810; 2233 info->n_per_out = N_PER_OUT_E810T; 2234 info->n_pins = NUM_PTP_PINS_E810T; 2235 info->verify = ice_verify_pin_e810t; 2236 2237 /* Complete setup of the SMA pins */ 2238 ice_ptp_setup_sma_pins_e810t(pf, info); 2239 } else if (ice_is_e810t(&pf->hw)) { 2240 info->n_ext_ts = N_EXT_TS_NO_SMA_E810T; 2241 info->n_per_out = N_PER_OUT_NO_SMA_E810T; 2242 } else { 2243 info->n_per_out = N_PER_OUT_E810; 2244 info->n_ext_ts = N_EXT_TS_E810; 2245 } 2246 } 2247 2248 /** 2249 * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs 2250 * @pf: pointer to the PF instance 2251 * @info: PTP clock capabilities 2252 */ 2253 static void 2254 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2255 { 2256 info->pps = 1; 2257 info->n_per_out = 0; 2258 info->n_ext_ts = 1; 2259 } 2260 2261 /** 2262 * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support 2263 * @pf: Board private structure 2264 * @info: PTP info to fill 2265 * 2266 * Assign functions to the PTP capabiltiies structure for E82x devices. 2267 * Functions which operate across all device families should be set directly 2268 * in ice_ptp_set_caps. Only add functions here which are distinct for E82x 2269 * devices. 2270 */ 2271 static void 2272 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info) 2273 { 2274 #ifdef CONFIG_ICE_HWTS 2275 if (boot_cpu_has(X86_FEATURE_ART) && 2276 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2277 info->getcrosststamp = ice_ptp_getcrosststamp_e82x; 2278 #endif /* CONFIG_ICE_HWTS */ 2279 } 2280 2281 /** 2282 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2283 * @pf: Board private structure 2284 * @info: PTP info to fill 2285 * 2286 * Assign functions to the PTP capabiltiies structure for E810 devices. 2287 * Functions which operate across all device families should be set directly 2288 * in ice_ptp_set_caps. Only add functions here which are distinct for e810 2289 * devices. 2290 */ 2291 static void 2292 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2293 { 2294 info->enable = ice_ptp_gpio_enable_e810; 2295 ice_ptp_setup_pins_e810(pf, info); 2296 } 2297 2298 /** 2299 * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support 2300 * @pf: Board private structure 2301 * @info: PTP info to fill 2302 * 2303 * Assign functions to the PTP capabiltiies structure for E823 devices. 2304 * Functions which operate across all device families should be set directly 2305 * in ice_ptp_set_caps. Only add functions here which are distinct for e823 2306 * devices. 2307 */ 2308 static void 2309 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2310 { 2311 ice_ptp_set_funcs_e82x(pf, info); 2312 2313 info->enable = ice_ptp_gpio_enable_e823; 2314 ice_ptp_setup_pins_e823(pf, info); 2315 } 2316 2317 /** 2318 * ice_ptp_set_caps - Set PTP capabilities 2319 * @pf: Board private structure 2320 */ 2321 static void ice_ptp_set_caps(struct ice_pf *pf) 2322 { 2323 struct ptp_clock_info *info = &pf->ptp.info; 2324 struct device *dev = ice_pf_to_dev(pf); 2325 2326 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2327 dev_driver_string(dev), dev_name(dev)); 2328 info->owner = THIS_MODULE; 2329 info->max_adj = 100000000; 2330 info->adjtime = ice_ptp_adjtime; 2331 info->adjfine = ice_ptp_adjfine; 2332 info->gettimex64 = ice_ptp_gettimex64; 2333 info->settime64 = ice_ptp_settime64; 2334 2335 if (ice_is_e810(&pf->hw)) 2336 ice_ptp_set_funcs_e810(pf, info); 2337 else if (ice_is_e823(&pf->hw)) 2338 ice_ptp_set_funcs_e823(pf, info); 2339 else 2340 ice_ptp_set_funcs_e82x(pf, info); 2341 } 2342 2343 /** 2344 * ice_ptp_create_clock - Create PTP clock device for userspace 2345 * @pf: Board private structure 2346 * 2347 * This function creates a new PTP clock device. It only creates one if we 2348 * don't already have one. Will return error if it can't create one, but success 2349 * if we already have a device. Should be used by ice_ptp_init to create clock 2350 * initially, and prevent global resets from creating new clock devices. 2351 */ 2352 static long ice_ptp_create_clock(struct ice_pf *pf) 2353 { 2354 struct ptp_clock_info *info; 2355 struct device *dev; 2356 2357 /* No need to create a clock device if we already have one */ 2358 if (pf->ptp.clock) 2359 return 0; 2360 2361 ice_ptp_set_caps(pf); 2362 2363 info = &pf->ptp.info; 2364 dev = ice_pf_to_dev(pf); 2365 2366 /* Attempt to register the clock before enabling the hardware. */ 2367 pf->ptp.clock = ptp_clock_register(info, dev); 2368 if (IS_ERR(pf->ptp.clock)) { 2369 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); 2370 return PTR_ERR(pf->ptp.clock); 2371 } 2372 2373 return 0; 2374 } 2375 2376 /** 2377 * ice_ptp_request_ts - Request an available Tx timestamp index 2378 * @tx: the PTP Tx timestamp tracker to request from 2379 * @skb: the SKB to associate with this timestamp request 2380 */ 2381 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2382 { 2383 u8 idx; 2384 2385 spin_lock(&tx->lock); 2386 2387 /* Check that this tracker is accepting new timestamp requests */ 2388 if (!ice_ptp_is_tx_tracker_up(tx)) { 2389 spin_unlock(&tx->lock); 2390 return -1; 2391 } 2392 2393 /* Find and set the first available index */ 2394 idx = find_first_zero_bit(tx->in_use, tx->len); 2395 if (idx < tx->len) { 2396 /* We got a valid index that no other thread could have set. Store 2397 * a reference to the skb and the start time to allow discarding old 2398 * requests. 2399 */ 2400 set_bit(idx, tx->in_use); 2401 clear_bit(idx, tx->stale); 2402 tx->tstamps[idx].start = jiffies; 2403 tx->tstamps[idx].skb = skb_get(skb); 2404 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2405 ice_trace(tx_tstamp_request, skb, idx); 2406 } 2407 2408 spin_unlock(&tx->lock); 2409 2410 /* return the appropriate PHY timestamp register index, -1 if no 2411 * indexes were available. 2412 */ 2413 if (idx >= tx->len) 2414 return -1; 2415 else 2416 return idx + tx->offset; 2417 } 2418 2419 /** 2420 * ice_ptp_process_ts - Process the PTP Tx timestamps 2421 * @pf: Board private structure 2422 * 2423 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2424 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2425 */ 2426 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2427 { 2428 switch (pf->ptp.tx_interrupt_mode) { 2429 case ICE_PTP_TX_INTERRUPT_NONE: 2430 /* This device has the clock owner handle timestamps for it */ 2431 return ICE_TX_TSTAMP_WORK_DONE; 2432 case ICE_PTP_TX_INTERRUPT_SELF: 2433 /* This device handles its own timestamps */ 2434 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2435 case ICE_PTP_TX_INTERRUPT_ALL: 2436 /* This device handles timestamps for all ports */ 2437 return ice_ptp_tx_tstamp_owner(pf); 2438 default: 2439 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2440 pf->ptp.tx_interrupt_mode); 2441 return ICE_TX_TSTAMP_WORK_DONE; 2442 } 2443 } 2444 2445 /** 2446 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt 2447 * @pf: Board private structure 2448 * 2449 * The device PHY issues Tx timestamp interrupts to the driver for processing 2450 * timestamp data from the PHY. It will not interrupt again until all 2451 * current timestamp data is read. In rare circumstances, it is possible that 2452 * the driver fails to read all outstanding data. 2453 * 2454 * To avoid getting permanently stuck, periodically check if the PHY has 2455 * outstanding timestamp data. If so, trigger an interrupt from software to 2456 * process this data. 2457 */ 2458 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) 2459 { 2460 struct device *dev = ice_pf_to_dev(pf); 2461 struct ice_hw *hw = &pf->hw; 2462 bool trigger_oicr = false; 2463 unsigned int i; 2464 2465 if (ice_is_e810(hw)) 2466 return; 2467 2468 if (!ice_pf_src_tmr_owned(pf)) 2469 return; 2470 2471 for (i = 0; i < ICE_MAX_QUAD; i++) { 2472 u64 tstamp_ready; 2473 int err; 2474 2475 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 2476 if (!err && tstamp_ready) { 2477 trigger_oicr = true; 2478 break; 2479 } 2480 } 2481 2482 if (trigger_oicr) { 2483 /* Trigger a software interrupt, to ensure this data 2484 * gets processed. 2485 */ 2486 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); 2487 2488 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 2489 ice_flush(hw); 2490 } 2491 } 2492 2493 static void ice_ptp_periodic_work(struct kthread_work *work) 2494 { 2495 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2496 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2497 int err; 2498 2499 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2500 return; 2501 2502 err = ice_ptp_update_cached_phctime(pf); 2503 2504 ice_ptp_maybe_trigger_tx_interrupt(pf); 2505 2506 /* Run twice a second or reschedule if phc update failed */ 2507 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2508 msecs_to_jiffies(err ? 10 : 500)); 2509 } 2510 2511 /** 2512 * ice_ptp_reset - Initialize PTP hardware clock support after reset 2513 * @pf: Board private structure 2514 */ 2515 void ice_ptp_reset(struct ice_pf *pf) 2516 { 2517 struct ice_ptp *ptp = &pf->ptp; 2518 struct ice_hw *hw = &pf->hw; 2519 struct timespec64 ts; 2520 int err, itr = 1; 2521 u64 time_diff; 2522 2523 if (test_bit(ICE_PFR_REQ, pf->state) || 2524 !ice_pf_src_tmr_owned(pf)) 2525 goto pfr; 2526 2527 err = ice_ptp_init_phc(hw); 2528 if (err) 2529 goto err; 2530 2531 /* Acquire the global hardware lock */ 2532 if (!ice_ptp_lock(hw)) { 2533 err = -EBUSY; 2534 goto err; 2535 } 2536 2537 /* Write the increment time value to PHY and LAN */ 2538 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2539 if (err) { 2540 ice_ptp_unlock(hw); 2541 goto err; 2542 } 2543 2544 /* Write the initial Time value to PHY and LAN using the cached PHC 2545 * time before the reset and time difference between stopping and 2546 * starting the clock. 2547 */ 2548 if (ptp->cached_phc_time) { 2549 time_diff = ktime_get_real_ns() - ptp->reset_time; 2550 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2551 } else { 2552 ts = ktime_to_timespec64(ktime_get_real()); 2553 } 2554 err = ice_ptp_write_init(pf, &ts); 2555 if (err) { 2556 ice_ptp_unlock(hw); 2557 goto err; 2558 } 2559 2560 /* Release the global hardware lock */ 2561 ice_ptp_unlock(hw); 2562 2563 if (!ice_is_e810(hw)) { 2564 /* Enable quad interrupts */ 2565 err = ice_ptp_tx_ena_intr(pf, true, itr); 2566 if (err) 2567 goto err; 2568 } 2569 2570 pfr: 2571 /* Init Tx structures */ 2572 if (ice_is_e810(&pf->hw)) { 2573 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); 2574 } else { 2575 kthread_init_delayed_work(&ptp->port.ov_work, 2576 ice_ptp_wait_for_offsets); 2577 err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx, 2578 ptp->port.port_num); 2579 } 2580 if (err) 2581 goto err; 2582 2583 set_bit(ICE_FLAG_PTP, pf->flags); 2584 2585 /* Restart the PHY timestamping block */ 2586 if (!test_bit(ICE_PFR_REQ, pf->state) && 2587 ice_pf_src_tmr_owned(pf)) 2588 ice_ptp_restart_all_phy(pf); 2589 2590 /* Start periodic work going */ 2591 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2592 2593 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2594 return; 2595 2596 err: 2597 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2598 } 2599 2600 /** 2601 * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device 2602 * @aux_dev: auxiliary device to get the auxiliary PF for 2603 */ 2604 static struct ice_pf * 2605 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev) 2606 { 2607 struct ice_ptp_port *aux_port; 2608 struct ice_ptp *aux_ptp; 2609 2610 aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev); 2611 aux_ptp = container_of(aux_port, struct ice_ptp, port); 2612 2613 return container_of(aux_ptp, struct ice_pf, ptp); 2614 } 2615 2616 /** 2617 * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device 2618 * @aux_dev: auxiliary device to get the PF for 2619 */ 2620 static struct ice_pf * 2621 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev) 2622 { 2623 struct ice_ptp_port_owner *ports_owner; 2624 struct auxiliary_driver *aux_drv; 2625 struct ice_ptp *owner_ptp; 2626 2627 if (!aux_dev->dev.driver) 2628 return NULL; 2629 2630 aux_drv = to_auxiliary_drv(aux_dev->dev.driver); 2631 ports_owner = container_of(aux_drv, struct ice_ptp_port_owner, 2632 aux_driver); 2633 owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner); 2634 return container_of(owner_ptp, struct ice_pf, ptp); 2635 } 2636 2637 /** 2638 * ice_ptp_auxbus_probe - Probe auxiliary devices 2639 * @aux_dev: PF's auxiliary device 2640 * @id: Auxiliary device ID 2641 */ 2642 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev, 2643 const struct auxiliary_device_id *id) 2644 { 2645 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2646 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); 2647 2648 if (WARN_ON(!owner_pf)) 2649 return -ENODEV; 2650 2651 INIT_LIST_HEAD(&aux_pf->ptp.port.list_member); 2652 mutex_lock(&owner_pf->ptp.ports_owner.lock); 2653 list_add(&aux_pf->ptp.port.list_member, 2654 &owner_pf->ptp.ports_owner.ports); 2655 mutex_unlock(&owner_pf->ptp.ports_owner.lock); 2656 2657 return 0; 2658 } 2659 2660 /** 2661 * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus 2662 * @aux_dev: PF's auxiliary device 2663 */ 2664 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev) 2665 { 2666 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2667 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); 2668 2669 mutex_lock(&owner_pf->ptp.ports_owner.lock); 2670 list_del(&aux_pf->ptp.port.list_member); 2671 mutex_unlock(&owner_pf->ptp.ports_owner.lock); 2672 } 2673 2674 /** 2675 * ice_ptp_auxbus_shutdown 2676 * @aux_dev: PF's auxiliary device 2677 */ 2678 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev) 2679 { 2680 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2681 } 2682 2683 /** 2684 * ice_ptp_auxbus_suspend 2685 * @aux_dev: PF's auxiliary device 2686 * @state: power management state indicator 2687 */ 2688 static int 2689 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state) 2690 { 2691 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2692 return 0; 2693 } 2694 2695 /** 2696 * ice_ptp_auxbus_resume 2697 * @aux_dev: PF's auxiliary device 2698 */ 2699 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev) 2700 { 2701 /* Doing nothing here, but handle to auxbus driver must be satisfied */ 2702 return 0; 2703 } 2704 2705 /** 2706 * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table 2707 * @pf: Board private structure 2708 * @name: auxiliary bus driver name 2709 */ 2710 static struct auxiliary_device_id * 2711 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name) 2712 { 2713 struct auxiliary_device_id *ids; 2714 2715 /* Second id left empty to terminate the array */ 2716 ids = devm_kcalloc(ice_pf_to_dev(pf), 2, 2717 sizeof(struct auxiliary_device_id), GFP_KERNEL); 2718 if (!ids) 2719 return NULL; 2720 2721 snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name); 2722 2723 return ids; 2724 } 2725 2726 /** 2727 * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver 2728 * @pf: Board private structure 2729 */ 2730 static int ice_ptp_register_auxbus_driver(struct ice_pf *pf) 2731 { 2732 struct auxiliary_driver *aux_driver; 2733 struct ice_ptp *ptp; 2734 struct device *dev; 2735 char *name; 2736 int err; 2737 2738 ptp = &pf->ptp; 2739 dev = ice_pf_to_dev(pf); 2740 aux_driver = &ptp->ports_owner.aux_driver; 2741 INIT_LIST_HEAD(&ptp->ports_owner.ports); 2742 mutex_init(&ptp->ports_owner.lock); 2743 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", 2744 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), 2745 ice_get_ptp_src_clock_index(&pf->hw)); 2746 2747 aux_driver->name = name; 2748 aux_driver->shutdown = ice_ptp_auxbus_shutdown; 2749 aux_driver->suspend = ice_ptp_auxbus_suspend; 2750 aux_driver->remove = ice_ptp_auxbus_remove; 2751 aux_driver->resume = ice_ptp_auxbus_resume; 2752 aux_driver->probe = ice_ptp_auxbus_probe; 2753 aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name); 2754 if (!aux_driver->id_table) 2755 return -ENOMEM; 2756 2757 err = auxiliary_driver_register(aux_driver); 2758 if (err) { 2759 devm_kfree(dev, aux_driver->id_table); 2760 dev_err(dev, "Failed registering aux_driver, name <%s>\n", 2761 name); 2762 } 2763 2764 return err; 2765 } 2766 2767 /** 2768 * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver 2769 * @pf: Board private structure 2770 */ 2771 static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) 2772 { 2773 struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver; 2774 2775 auxiliary_driver_unregister(aux_driver); 2776 devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table); 2777 2778 mutex_destroy(&pf->ptp.ports_owner.lock); 2779 } 2780 2781 /** 2782 * ice_ptp_clock_index - Get the PTP clock index for this device 2783 * @pf: Board private structure 2784 * 2785 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock 2786 * is associated. 2787 */ 2788 int ice_ptp_clock_index(struct ice_pf *pf) 2789 { 2790 struct auxiliary_device *aux_dev; 2791 struct ice_pf *owner_pf; 2792 struct ptp_clock *clock; 2793 2794 aux_dev = &pf->ptp.port.aux_dev; 2795 owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); 2796 if (!owner_pf) 2797 return -1; 2798 clock = owner_pf->ptp.clock; 2799 2800 return clock ? ptp_clock_index(clock) : -1; 2801 } 2802 2803 /** 2804 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2805 * @pf: Board private structure 2806 */ 2807 void ice_ptp_prepare_for_reset(struct ice_pf *pf) 2808 { 2809 struct ice_ptp *ptp = &pf->ptp; 2810 u8 src_tmr; 2811 2812 clear_bit(ICE_FLAG_PTP, pf->flags); 2813 2814 /* Disable timestamping for both Tx and Rx */ 2815 ice_ptp_disable_timestamp_mode(pf); 2816 2817 kthread_cancel_delayed_work_sync(&ptp->work); 2818 2819 if (test_bit(ICE_PFR_REQ, pf->state)) 2820 return; 2821 2822 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2823 2824 /* Disable periodic outputs */ 2825 ice_ptp_disable_all_clkout(pf); 2826 2827 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2828 2829 /* Disable source clock */ 2830 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2831 2832 /* Acquire PHC and system timer to restore after reset */ 2833 ptp->reset_time = ktime_get_real_ns(); 2834 } 2835 2836 /** 2837 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 2838 * @pf: Board private structure 2839 * 2840 * Setup and initialize a PTP clock device that represents the device hardware 2841 * clock. Save the clock index for other functions connected to the same 2842 * hardware resource. 2843 */ 2844 static int ice_ptp_init_owner(struct ice_pf *pf) 2845 { 2846 struct ice_hw *hw = &pf->hw; 2847 struct timespec64 ts; 2848 int err, itr = 1; 2849 2850 err = ice_ptp_init_phc(hw); 2851 if (err) { 2852 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 2853 err); 2854 return err; 2855 } 2856 2857 /* Acquire the global hardware lock */ 2858 if (!ice_ptp_lock(hw)) { 2859 err = -EBUSY; 2860 goto err_exit; 2861 } 2862 2863 /* Write the increment time value to PHY and LAN */ 2864 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2865 if (err) { 2866 ice_ptp_unlock(hw); 2867 goto err_exit; 2868 } 2869 2870 ts = ktime_to_timespec64(ktime_get_real()); 2871 /* Write the initial Time value to PHY and LAN */ 2872 err = ice_ptp_write_init(pf, &ts); 2873 if (err) { 2874 ice_ptp_unlock(hw); 2875 goto err_exit; 2876 } 2877 2878 /* Release the global hardware lock */ 2879 ice_ptp_unlock(hw); 2880 2881 if (!ice_is_e810(hw)) { 2882 /* Enable quad interrupts */ 2883 err = ice_ptp_tx_ena_intr(pf, true, itr); 2884 if (err) 2885 goto err_exit; 2886 } 2887 2888 /* Ensure we have a clock device */ 2889 err = ice_ptp_create_clock(pf); 2890 if (err) 2891 goto err_clk; 2892 2893 err = ice_ptp_register_auxbus_driver(pf); 2894 if (err) { 2895 dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver"); 2896 goto err_aux; 2897 } 2898 2899 return 0; 2900 err_aux: 2901 ptp_clock_unregister(pf->ptp.clock); 2902 err_clk: 2903 pf->ptp.clock = NULL; 2904 err_exit: 2905 return err; 2906 } 2907 2908 /** 2909 * ice_ptp_init_work - Initialize PTP work threads 2910 * @pf: Board private structure 2911 * @ptp: PF PTP structure 2912 */ 2913 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 2914 { 2915 struct kthread_worker *kworker; 2916 2917 /* Initialize work functions */ 2918 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 2919 2920 /* Allocate a kworker for handling work required for the ports 2921 * connected to the PTP hardware clock. 2922 */ 2923 kworker = kthread_create_worker(0, "ice-ptp-%s", 2924 dev_name(ice_pf_to_dev(pf))); 2925 if (IS_ERR(kworker)) 2926 return PTR_ERR(kworker); 2927 2928 ptp->kworker = kworker; 2929 2930 /* Start periodic work going */ 2931 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2932 2933 return 0; 2934 } 2935 2936 /** 2937 * ice_ptp_init_port - Initialize PTP port structure 2938 * @pf: Board private structure 2939 * @ptp_port: PTP port structure 2940 */ 2941 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 2942 { 2943 struct ice_hw *hw = &pf->hw; 2944 2945 mutex_init(&ptp_port->ps_lock); 2946 2947 switch (hw->phy_model) { 2948 case ICE_PHY_E810: 2949 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 2950 case ICE_PHY_E82X: 2951 kthread_init_delayed_work(&ptp_port->ov_work, 2952 ice_ptp_wait_for_offsets); 2953 2954 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, 2955 ptp_port->port_num); 2956 default: 2957 return -ENODEV; 2958 } 2959 } 2960 2961 /** 2962 * ice_ptp_release_auxbus_device 2963 * @dev: device that utilizes the auxbus 2964 */ 2965 static void ice_ptp_release_auxbus_device(struct device *dev) 2966 { 2967 /* Doing nothing here, but handle to auxbux device must be satisfied */ 2968 } 2969 2970 /** 2971 * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device 2972 * @pf: Board private structure 2973 */ 2974 static int ice_ptp_create_auxbus_device(struct ice_pf *pf) 2975 { 2976 struct auxiliary_device *aux_dev; 2977 struct ice_ptp *ptp; 2978 struct device *dev; 2979 char *name; 2980 int err; 2981 u32 id; 2982 2983 ptp = &pf->ptp; 2984 id = ptp->port.port_num; 2985 dev = ice_pf_to_dev(pf); 2986 2987 aux_dev = &ptp->port.aux_dev; 2988 2989 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", 2990 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), 2991 ice_get_ptp_src_clock_index(&pf->hw)); 2992 2993 aux_dev->name = name; 2994 aux_dev->id = id; 2995 aux_dev->dev.release = ice_ptp_release_auxbus_device; 2996 aux_dev->dev.parent = dev; 2997 2998 err = auxiliary_device_init(aux_dev); 2999 if (err) 3000 goto aux_err; 3001 3002 err = auxiliary_device_add(aux_dev); 3003 if (err) { 3004 auxiliary_device_uninit(aux_dev); 3005 goto aux_err; 3006 } 3007 3008 return 0; 3009 aux_err: 3010 dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name); 3011 devm_kfree(dev, name); 3012 return err; 3013 } 3014 3015 /** 3016 * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device 3017 * @pf: Board private structure 3018 */ 3019 static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) 3020 { 3021 struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev; 3022 3023 auxiliary_device_delete(aux_dev); 3024 auxiliary_device_uninit(aux_dev); 3025 3026 memset(aux_dev, 0, sizeof(*aux_dev)); 3027 } 3028 3029 /** 3030 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode 3031 * @pf: Board private structure 3032 * 3033 * Initialize the Tx timestamp interrupt mode for this device. For most device 3034 * types, each PF processes the interrupt and manages its own timestamps. For 3035 * E822-based devices, only the clock owner processes the timestamps. Other 3036 * PFs disable the interrupt and do not process their own timestamps. 3037 */ 3038 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) 3039 { 3040 switch (pf->hw.phy_model) { 3041 case ICE_PHY_E82X: 3042 /* E822 based PHY has the clock owner process the interrupt 3043 * for all ports. 3044 */ 3045 if (ice_pf_src_tmr_owned(pf)) 3046 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; 3047 else 3048 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; 3049 break; 3050 default: 3051 /* other PHY types handle their own Tx interrupt */ 3052 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; 3053 } 3054 } 3055 3056 /** 3057 * ice_ptp_init - Initialize PTP hardware clock support 3058 * @pf: Board private structure 3059 * 3060 * Set up the device for interacting with the PTP hardware clock for all 3061 * functions, both the function that owns the clock hardware, and the 3062 * functions connected to the clock hardware. 3063 * 3064 * The clock owner will allocate and register a ptp_clock with the 3065 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 3066 * items used for asynchronous work such as Tx timestamps and periodic work. 3067 */ 3068 void ice_ptp_init(struct ice_pf *pf) 3069 { 3070 struct ice_ptp *ptp = &pf->ptp; 3071 struct ice_hw *hw = &pf->hw; 3072 int err; 3073 3074 ice_ptp_init_phy_model(hw); 3075 3076 ice_ptp_init_tx_interrupt_mode(pf); 3077 3078 /* If this function owns the clock hardware, it must allocate and 3079 * configure the PTP clock device to represent it. 3080 */ 3081 if (ice_pf_src_tmr_owned(pf)) { 3082 err = ice_ptp_init_owner(pf); 3083 if (err) 3084 goto err; 3085 } 3086 3087 ptp->port.port_num = hw->pf_id; 3088 err = ice_ptp_init_port(pf, &ptp->port); 3089 if (err) 3090 goto err; 3091 3092 /* Start the PHY timestamping block */ 3093 ice_ptp_reset_phy_timestamping(pf); 3094 3095 /* Configure initial Tx interrupt settings */ 3096 ice_ptp_cfg_tx_interrupt(pf); 3097 3098 set_bit(ICE_FLAG_PTP, pf->flags); 3099 err = ice_ptp_init_work(pf, ptp); 3100 if (err) 3101 goto err; 3102 3103 err = ice_ptp_create_auxbus_device(pf); 3104 if (err) 3105 goto err; 3106 3107 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 3108 return; 3109 3110 err: 3111 /* If we registered a PTP clock, release it */ 3112 if (pf->ptp.clock) { 3113 ptp_clock_unregister(ptp->clock); 3114 pf->ptp.clock = NULL; 3115 } 3116 clear_bit(ICE_FLAG_PTP, pf->flags); 3117 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 3118 } 3119 3120 /** 3121 * ice_ptp_release - Disable the driver/HW support and unregister the clock 3122 * @pf: Board private structure 3123 * 3124 * This function handles the cleanup work required from the initialization by 3125 * clearing out the important information and unregistering the clock 3126 */ 3127 void ice_ptp_release(struct ice_pf *pf) 3128 { 3129 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 3130 return; 3131 3132 /* Disable timestamping for both Tx and Rx */ 3133 ice_ptp_disable_timestamp_mode(pf); 3134 3135 ice_ptp_remove_auxbus_device(pf); 3136 3137 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3138 3139 clear_bit(ICE_FLAG_PTP, pf->flags); 3140 3141 kthread_cancel_delayed_work_sync(&pf->ptp.work); 3142 3143 ice_ptp_port_phy_stop(&pf->ptp.port); 3144 mutex_destroy(&pf->ptp.port.ps_lock); 3145 if (pf->ptp.kworker) { 3146 kthread_destroy_worker(pf->ptp.kworker); 3147 pf->ptp.kworker = NULL; 3148 } 3149 3150 if (!pf->ptp.clock) 3151 return; 3152 3153 /* Disable periodic outputs */ 3154 ice_ptp_disable_all_clkout(pf); 3155 3156 ptp_clock_unregister(pf->ptp.clock); 3157 pf->ptp.clock = NULL; 3158 3159 ice_ptp_unregister_auxbus_driver(pf); 3160 3161 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 3162 } 3163