1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 #include "ice_cgu_regs.h" 8 9 static const char ice_pin_names[][64] = { 10 "SDP0", 11 "SDP1", 12 "SDP2", 13 "SDP3", 14 "TIME_SYNC", 15 "1PPS" 16 }; 17 18 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { 19 /* name, gpio */ 20 { TIME_SYNC, { 4, -1 }}, 21 { ONE_PPS, { -1, 5 }}, 22 }; 23 24 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { 25 /* name, gpio */ 26 { SDP0, { 0, 0 }}, 27 { SDP1, { 1, 1 }}, 28 { SDP2, { 2, 2 }}, 29 { SDP3, { 3, 3 }}, 30 { TIME_SYNC, { 4, -1 }}, 31 { ONE_PPS, { -1, 5 }}, 32 }; 33 34 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { 35 /* name, gpio */ 36 { SDP0, { 0, 0 }}, 37 { SDP1, { 1, 1 }}, 38 { SDP2, { 2, 2 }}, 39 { SDP3, { 3, 3 }}, 40 { ONE_PPS, { -1, 5 }}, 41 }; 42 43 static const char ice_pin_names_nvm[][64] = { 44 "GNSS", 45 "SMA1", 46 "U.FL1", 47 "SMA2", 48 "U.FL2", 49 }; 50 51 static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { 52 /* name, gpio */ 53 { GNSS, { 1, -1 }}, 54 { SMA1, { 1, 0 }}, 55 { UFL1, { -1, 0 }}, 56 { SMA2, { 3, 2 }}, 57 { UFL2, { 3, -1 }}, 58 }; 59 60 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) 61 { 62 return !pf->adapter ? NULL : pf->adapter->ctrl_pf; 63 } 64 65 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf) 66 { 67 struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf); 68 69 return !ctrl_pf ? NULL : &ctrl_pf->ptp; 70 } 71 72 /** 73 * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc 74 * @pf: Board private structure 75 * @func: Pin function 76 * @chan: GPIO channel 77 * 78 * Return: positive pin number when pin is present, -1 otherwise 79 */ 80 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func, 81 unsigned int chan) 82 { 83 const struct ptp_clock_info *info = &pf->ptp.info; 84 int i; 85 86 for (i = 0; i < info->n_pins; i++) { 87 if (info->pin_config[i].func == func && 88 info->pin_config[i].chan == chan) 89 return i; 90 } 91 92 return -1; 93 } 94 95 /** 96 * ice_ptp_update_sma_data - update SMA pins data according to pins setup 97 * @pf: Board private structure 98 * @sma_pins: parsed SMA pins status 99 * @data: SMA data to update 100 */ 101 static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[], 102 u8 *data) 103 { 104 const char *state1, *state2; 105 106 /* Set the right state based on the desired configuration. 107 * When bit is set, functionality is disabled. 108 */ 109 *data &= ~ICE_ALL_SMA_MASK; 110 if (!sma_pins[UFL1 - 1]) { 111 if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) { 112 state1 = "SMA1 Rx, U.FL1 disabled"; 113 *data |= ICE_SMA1_TX_EN; 114 } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) { 115 state1 = "SMA1 Tx U.FL1 disabled"; 116 *data |= ICE_SMA1_DIR_EN; 117 } else { 118 state1 = "SMA1 disabled, U.FL1 disabled"; 119 *data |= ICE_SMA1_MASK; 120 } 121 } else { 122 /* U.FL1 Tx will always enable SMA1 Rx */ 123 state1 = "SMA1 Rx, U.FL1 Tx"; 124 } 125 126 if (!sma_pins[UFL2 - 1]) { 127 if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) { 128 state2 = "SMA2 Rx, U.FL2 disabled"; 129 *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS; 130 } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) { 131 state2 = "SMA2 Tx, U.FL2 disabled"; 132 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS; 133 } else { 134 state2 = "SMA2 disabled, U.FL2 disabled"; 135 *data |= ICE_SMA2_MASK; 136 } 137 } else { 138 if (!sma_pins[SMA2 - 1]) { 139 state2 = "SMA2 disabled, U.FL2 Rx"; 140 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN; 141 } else { 142 state2 = "SMA2 Tx, U.FL2 Rx"; 143 *data |= ICE_SMA2_DIR_EN; 144 } 145 } 146 147 dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2); 148 } 149 150 /** 151 * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic 152 * @pf: Board private structure 153 * 154 * Return: 0 on success, negative error code otherwise 155 */ 156 static int ice_ptp_set_sma_cfg(struct ice_pf *pf) 157 { 158 const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc; 159 struct ptp_pin_desc *pins = pf->ptp.pin_desc; 160 unsigned int sma_pins[ICE_SMA_PINS_NUM] = {}; 161 int err; 162 u8 data; 163 164 /* Read initial pin state value */ 165 err = ice_read_sma_ctrl(&pf->hw, &data); 166 if (err) 167 return err; 168 169 /* Get SMA/U.FL pins states */ 170 for (int i = 0; i < pf->ptp.info.n_pins; i++) 171 if (pins[i].func) { 172 int name_idx = ice_pins[i].name_idx; 173 174 switch (name_idx) { 175 case SMA1: 176 case UFL1: 177 case SMA2: 178 case UFL2: 179 sma_pins[name_idx - 1] = pins[i].func; 180 break; 181 default: 182 continue; 183 } 184 } 185 186 ice_ptp_update_sma_data(pf, sma_pins, &data); 187 return ice_write_sma_ctrl(&pf->hw, data); 188 } 189 190 /** 191 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device 192 * @pf: Board private structure 193 * 194 * Program the device to respond appropriately to the Tx timestamp interrupt 195 * cause. 196 */ 197 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) 198 { 199 struct ice_hw *hw = &pf->hw; 200 bool enable; 201 u32 val; 202 203 switch (pf->ptp.tx_interrupt_mode) { 204 case ICE_PTP_TX_INTERRUPT_ALL: 205 /* React to interrupts across all quads. */ 206 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); 207 enable = true; 208 break; 209 case ICE_PTP_TX_INTERRUPT_NONE: 210 /* Do not react to interrupts on any quad. */ 211 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); 212 enable = false; 213 break; 214 case ICE_PTP_TX_INTERRUPT_SELF: 215 default: 216 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; 217 break; 218 } 219 220 /* Configure the Tx timestamp interrupt */ 221 val = rd32(hw, PFINT_OICR_ENA); 222 if (enable) 223 val |= PFINT_OICR_TSYN_TX_M; 224 else 225 val &= ~PFINT_OICR_TSYN_TX_M; 226 wr32(hw, PFINT_OICR_ENA, val); 227 } 228 229 /** 230 * ice_set_rx_tstamp - Enable or disable Rx timestamping 231 * @pf: The PF pointer to search in 232 * @on: bool value for whether timestamps are enabled or disabled 233 */ 234 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 235 { 236 struct ice_vsi *vsi; 237 u16 i; 238 239 vsi = ice_get_main_vsi(pf); 240 if (!vsi || !vsi->rx_rings) 241 return; 242 243 /* Set the timestamp flag for all the Rx rings */ 244 ice_for_each_rxq(vsi, i) { 245 if (!vsi->rx_rings[i]) 246 continue; 247 vsi->rx_rings[i]->ptp_rx = on; 248 } 249 } 250 251 /** 252 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode 253 * @pf: Board private structure 254 * 255 * Called during preparation for reset to temporarily disable timestamping on 256 * the device. Called during remove to disable timestamping while cleaning up 257 * driver resources. 258 */ 259 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) 260 { 261 struct ice_hw *hw = &pf->hw; 262 u32 val; 263 264 val = rd32(hw, PFINT_OICR_ENA); 265 val &= ~PFINT_OICR_TSYN_TX_M; 266 wr32(hw, PFINT_OICR_ENA, val); 267 268 ice_set_rx_tstamp(pf, false); 269 } 270 271 /** 272 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration 273 * @pf: Board private structure 274 * 275 * Called at the end of rebuild to restore timestamp configuration after 276 * a device reset. 277 */ 278 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) 279 { 280 struct ice_hw *hw = &pf->hw; 281 bool enable_rx; 282 283 ice_ptp_cfg_tx_interrupt(pf); 284 285 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; 286 ice_set_rx_tstamp(pf, enable_rx); 287 288 /* Trigger an immediate software interrupt to ensure that timestamps 289 * which occurred during reset are handled now. 290 */ 291 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 292 ice_flush(hw); 293 } 294 295 /** 296 * ice_ptp_read_src_clk_reg - Read the source clock register 297 * @pf: Board private structure 298 * @sts: Optional parameter for holding a pair of system timestamps from 299 * the system clock. Will be ignored if NULL is given. 300 */ 301 static u64 302 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 303 { 304 struct ice_hw *hw = &pf->hw; 305 u32 hi, lo, lo2; 306 u8 tmr_idx; 307 308 tmr_idx = ice_get_ptp_src_clock_index(hw); 309 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); 310 /* Read the system timestamp pre PHC read */ 311 ptp_read_system_prets(sts); 312 313 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 314 315 /* Read the system timestamp post PHC read */ 316 ptp_read_system_postts(sts); 317 318 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 319 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 320 321 if (lo2 < lo) { 322 /* if TIME_L rolled over read TIME_L again and update 323 * system timestamps 324 */ 325 ptp_read_system_prets(sts); 326 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 327 ptp_read_system_postts(sts); 328 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 329 } 330 331 return ((u64)hi << 32) | lo; 332 } 333 334 /** 335 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 336 * @cached_phc_time: recently cached copy of PHC time 337 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 338 * 339 * Hardware captures timestamps which contain only 32 bits of nominal 340 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 341 * Note that the captured timestamp values may be 40 bits, but the lower 342 * 8 bits are sub-nanoseconds and generally discarded. 343 * 344 * Extend the 32bit nanosecond timestamp using the following algorithm and 345 * assumptions: 346 * 347 * 1) have a recently cached copy of the PHC time 348 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 349 * seconds) before or after the PHC time was captured. 350 * 3) calculate the delta between the cached time and the timestamp 351 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 352 * captured after the PHC time. In this case, the full timestamp is just 353 * the cached PHC time plus the delta. 354 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 355 * timestamp was captured *before* the PHC time, i.e. because the PHC 356 * cache was updated after the timestamp was captured by hardware. In this 357 * case, the full timestamp is the cached time minus the inverse delta. 358 * 359 * This algorithm works even if the PHC time was updated after a Tx timestamp 360 * was requested, but before the Tx timestamp event was reported from 361 * hardware. 362 * 363 * This calculation primarily relies on keeping the cached PHC time up to 364 * date. If the timestamp was captured more than 2^31 nanoseconds after the 365 * PHC time, it is possible that the lower 32bits of PHC time have 366 * overflowed more than once, and we might generate an incorrect timestamp. 367 * 368 * This is prevented by (a) periodically updating the cached PHC time once 369 * a second, and (b) discarding any Tx timestamp packet if it has waited for 370 * a timestamp for more than one second. 371 */ 372 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 373 { 374 u32 delta, phc_time_lo; 375 u64 ns; 376 377 /* Extract the lower 32 bits of the PHC time */ 378 phc_time_lo = (u32)cached_phc_time; 379 380 /* Calculate the delta between the lower 32bits of the cached PHC 381 * time and the in_tstamp value 382 */ 383 delta = (in_tstamp - phc_time_lo); 384 385 /* Do not assume that the in_tstamp is always more recent than the 386 * cached PHC time. If the delta is large, it indicates that the 387 * in_tstamp was taken in the past, and should be converted 388 * forward. 389 */ 390 if (delta > (U32_MAX / 2)) { 391 /* reverse the delta calculation here */ 392 delta = (phc_time_lo - in_tstamp); 393 ns = cached_phc_time - delta; 394 } else { 395 ns = cached_phc_time + delta; 396 } 397 398 return ns; 399 } 400 401 /** 402 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 403 * @pf: Board private structure 404 * @in_tstamp: Ingress/egress 40b timestamp value 405 * 406 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 407 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 408 * 409 * *--------------------------------------------------------------* 410 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 411 * *--------------------------------------------------------------* 412 * 413 * The low bit is an indicator of whether the timestamp is valid. The next 414 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 415 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 416 * 417 * It is assumed that the caller verifies the timestamp is valid prior to 418 * calling this function. 419 * 420 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 421 * time stored in the device private PTP structure as the basis for timestamp 422 * extension. 423 * 424 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 425 * algorithm. 426 */ 427 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 428 { 429 const u64 mask = GENMASK_ULL(31, 0); 430 unsigned long discard_time; 431 432 /* Discard the hardware timestamp if the cached PHC time is too old */ 433 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 434 if (time_is_before_jiffies(discard_time)) { 435 pf->ptp.tx_hwtstamp_discarded++; 436 return 0; 437 } 438 439 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 440 (in_tstamp >> 8) & mask); 441 } 442 443 /** 444 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 445 * @tx: the PTP Tx timestamp tracker to check 446 * 447 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 448 * to accept new timestamp requests. 449 * 450 * Assumes the tx->lock spinlock is already held. 451 */ 452 static bool 453 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 454 { 455 lockdep_assert_held(&tx->lock); 456 457 return tx->init && !tx->calibrating; 458 } 459 460 /** 461 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW 462 * @tx: the PTP Tx timestamp tracker 463 * @idx: index of the timestamp to request 464 */ 465 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) 466 { 467 struct ice_ptp_port *ptp_port; 468 struct sk_buff *skb; 469 struct ice_pf *pf; 470 471 if (!tx->init) 472 return; 473 474 ptp_port = container_of(tx, struct ice_ptp_port, tx); 475 pf = ptp_port_to_pf(ptp_port); 476 477 /* Drop packets which have waited for more than 2 seconds */ 478 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 479 /* Count the number of Tx timestamps that timed out */ 480 pf->ptp.tx_hwtstamp_timeouts++; 481 482 skb = tx->tstamps[idx].skb; 483 tx->tstamps[idx].skb = NULL; 484 clear_bit(idx, tx->in_use); 485 486 dev_kfree_skb_any(skb); 487 return; 488 } 489 490 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 491 492 /* Write TS index to read to the PF register so the FW can read it */ 493 wr32(&pf->hw, PF_SB_ATQBAL, 494 TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) | 495 TS_LL_READ_TS); 496 tx->last_ll_ts_idx_read = idx; 497 } 498 499 /** 500 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port 501 * @tx: the PTP Tx timestamp tracker 502 */ 503 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) 504 { 505 struct skb_shared_hwtstamps shhwtstamps = {}; 506 u8 idx = tx->last_ll_ts_idx_read; 507 struct ice_ptp_port *ptp_port; 508 u64 raw_tstamp, tstamp; 509 bool drop_ts = false; 510 struct sk_buff *skb; 511 struct ice_pf *pf; 512 u32 val; 513 514 if (!tx->init || tx->last_ll_ts_idx_read < 0) 515 return; 516 517 ptp_port = container_of(tx, struct ice_ptp_port, tx); 518 pf = ptp_port_to_pf(ptp_port); 519 520 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 521 522 val = rd32(&pf->hw, PF_SB_ATQBAL); 523 524 /* When the bit is cleared, the TS is ready in the register */ 525 if (val & TS_LL_READ_TS) { 526 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); 527 return; 528 } 529 530 /* High 8 bit value of the TS is on the bits 16:23 */ 531 raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val); 532 raw_tstamp <<= 32; 533 534 /* Read the low 32 bit value */ 535 raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH); 536 537 /* Devices using this interface always verify the timestamp differs 538 * relative to the last cached timestamp value. 539 */ 540 if (raw_tstamp == tx->tstamps[idx].cached_tstamp) 541 return; 542 543 tx->tstamps[idx].cached_tstamp = raw_tstamp; 544 clear_bit(idx, tx->in_use); 545 skb = tx->tstamps[idx].skb; 546 tx->tstamps[idx].skb = NULL; 547 if (test_and_clear_bit(idx, tx->stale)) 548 drop_ts = true; 549 550 if (!skb) 551 return; 552 553 if (drop_ts) { 554 dev_kfree_skb_any(skb); 555 return; 556 } 557 558 /* Extend the timestamp using cached PHC time */ 559 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 560 if (tstamp) { 561 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 562 ice_trace(tx_tstamp_complete, skb, idx); 563 } 564 565 skb_tstamp_tx(skb, &shhwtstamps); 566 dev_kfree_skb_any(skb); 567 } 568 569 /** 570 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 571 * @tx: the PTP Tx timestamp tracker 572 * 573 * Process timestamps captured by the PHY associated with this port. To do 574 * this, loop over each index with a waiting skb. 575 * 576 * If a given index has a valid timestamp, perform the following steps: 577 * 578 * 1) check that the timestamp request is not stale 579 * 2) check that a timestamp is ready and available in the PHY memory bank 580 * 3) read and copy the timestamp out of the PHY register 581 * 4) unlock the index by clearing the associated in_use bit 582 * 5) check if the timestamp is stale, and discard if so 583 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 584 * 7) send this 64 bit timestamp to the stack 585 * 586 * Note that we do not hold the tracking lock while reading the Tx timestamp. 587 * This is because reading the timestamp requires taking a mutex that might 588 * sleep. 589 * 590 * The only place where we set in_use is when a new timestamp is initiated 591 * with a slot index. This is only called in the hard xmit routine where an 592 * SKB has a request flag set. The only places where we clear this bit is this 593 * function, or during teardown when the Tx timestamp tracker is being 594 * removed. A timestamp index will never be re-used until the in_use bit for 595 * that index is cleared. 596 * 597 * If a Tx thread starts a new timestamp, we might not begin processing it 598 * right away but we will notice it at the end when we re-queue the task. 599 * 600 * If a Tx thread starts a new timestamp just after this function exits, the 601 * interrupt for that timestamp should re-trigger this function once 602 * a timestamp is ready. 603 * 604 * In cases where the PTP hardware clock was directly adjusted, some 605 * timestamps may not be able to safely use the timestamp extension math. In 606 * this case, software will set the stale bit for any outstanding Tx 607 * timestamps when the clock is adjusted. Then this function will discard 608 * those captured timestamps instead of sending them to the stack. 609 * 610 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 611 * to correctly extend the timestamp using the cached PHC time. It is 612 * extremely unlikely that a packet will ever take this long to timestamp. If 613 * we detect a Tx timestamp request that has waited for this long we assume 614 * the packet will never be sent by hardware and discard it without reading 615 * the timestamp register. 616 */ 617 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 618 { 619 struct ice_ptp_port *ptp_port; 620 unsigned long flags; 621 struct ice_pf *pf; 622 struct ice_hw *hw; 623 u64 tstamp_ready; 624 bool link_up; 625 int err; 626 u8 idx; 627 628 ptp_port = container_of(tx, struct ice_ptp_port, tx); 629 pf = ptp_port_to_pf(ptp_port); 630 hw = &pf->hw; 631 632 /* Read the Tx ready status first */ 633 if (tx->has_ready_bitmap) { 634 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 635 if (err) 636 return; 637 } 638 639 /* Drop packets if the link went down */ 640 link_up = ptp_port->link_up; 641 642 for_each_set_bit(idx, tx->in_use, tx->len) { 643 struct skb_shared_hwtstamps shhwtstamps = {}; 644 u8 phy_idx = idx + tx->offset; 645 u64 raw_tstamp = 0, tstamp; 646 bool drop_ts = !link_up; 647 struct sk_buff *skb; 648 649 /* Drop packets which have waited for more than 2 seconds */ 650 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 651 drop_ts = true; 652 653 /* Count the number of Tx timestamps that timed out */ 654 pf->ptp.tx_hwtstamp_timeouts++; 655 } 656 657 /* Only read a timestamp from the PHY if its marked as ready 658 * by the tstamp_ready register. This avoids unnecessary 659 * reading of timestamps which are not yet valid. This is 660 * important as we must read all timestamps which are valid 661 * and only timestamps which are valid during each interrupt. 662 * If we do not, the hardware logic for generating a new 663 * interrupt can get stuck on some devices. 664 */ 665 if (tx->has_ready_bitmap && 666 !(tstamp_ready & BIT_ULL(phy_idx))) { 667 if (drop_ts) 668 goto skip_ts_read; 669 670 continue; 671 } 672 673 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 674 675 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 676 if (err && !drop_ts) 677 continue; 678 679 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 680 681 /* For PHYs which don't implement a proper timestamp ready 682 * bitmap, verify that the timestamp value is different 683 * from the last cached timestamp. If it is not, skip this for 684 * now assuming it hasn't yet been captured by hardware. 685 */ 686 if (!drop_ts && !tx->has_ready_bitmap && 687 raw_tstamp == tx->tstamps[idx].cached_tstamp) 688 continue; 689 690 /* Discard any timestamp value without the valid bit set */ 691 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 692 drop_ts = true; 693 694 skip_ts_read: 695 spin_lock_irqsave(&tx->lock, flags); 696 if (!tx->has_ready_bitmap && raw_tstamp) 697 tx->tstamps[idx].cached_tstamp = raw_tstamp; 698 clear_bit(idx, tx->in_use); 699 skb = tx->tstamps[idx].skb; 700 tx->tstamps[idx].skb = NULL; 701 if (test_and_clear_bit(idx, tx->stale)) 702 drop_ts = true; 703 spin_unlock_irqrestore(&tx->lock, flags); 704 705 /* It is unlikely but possible that the SKB will have been 706 * flushed at this point due to link change or teardown. 707 */ 708 if (!skb) 709 continue; 710 711 if (drop_ts) { 712 dev_kfree_skb_any(skb); 713 continue; 714 } 715 716 /* Extend the timestamp using cached PHC time */ 717 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 718 if (tstamp) { 719 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 720 ice_trace(tx_tstamp_complete, skb, idx); 721 } 722 723 skb_tstamp_tx(skb, &shhwtstamps); 724 dev_kfree_skb_any(skb); 725 } 726 } 727 728 /** 729 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 730 * @pf: Board private structure 731 */ 732 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 733 { 734 struct ice_ptp_port *port; 735 unsigned int i; 736 737 mutex_lock(&pf->adapter->ports.lock); 738 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { 739 struct ice_ptp_tx *tx = &port->tx; 740 741 if (!tx || !tx->init) 742 continue; 743 744 ice_ptp_process_tx_tstamp(tx); 745 } 746 mutex_unlock(&pf->adapter->ports.lock); 747 748 for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { 749 u64 tstamp_ready; 750 int err; 751 752 /* Read the Tx ready status first */ 753 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 754 if (err) 755 break; 756 else if (tstamp_ready) 757 return ICE_TX_TSTAMP_WORK_PENDING; 758 } 759 760 return ICE_TX_TSTAMP_WORK_DONE; 761 } 762 763 /** 764 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 765 * @tx: Tx tracking structure to initialize 766 * 767 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 768 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 769 */ 770 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 771 { 772 bool more_timestamps; 773 unsigned long flags; 774 775 if (!tx->init) 776 return ICE_TX_TSTAMP_WORK_DONE; 777 778 /* Process the Tx timestamp tracker */ 779 ice_ptp_process_tx_tstamp(tx); 780 781 /* Check if there are outstanding Tx timestamps */ 782 spin_lock_irqsave(&tx->lock, flags); 783 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 784 spin_unlock_irqrestore(&tx->lock, flags); 785 786 if (more_timestamps) 787 return ICE_TX_TSTAMP_WORK_PENDING; 788 789 return ICE_TX_TSTAMP_WORK_DONE; 790 } 791 792 /** 793 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 794 * @tx: Tx tracking structure to initialize 795 * 796 * Assumes that the length has already been initialized. Do not call directly, 797 * use the ice_ptp_init_tx_* instead. 798 */ 799 static int 800 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 801 { 802 unsigned long *in_use, *stale; 803 struct ice_tx_tstamp *tstamps; 804 805 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 806 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 807 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 808 809 if (!tstamps || !in_use || !stale) { 810 kfree(tstamps); 811 bitmap_free(in_use); 812 bitmap_free(stale); 813 814 return -ENOMEM; 815 } 816 817 tx->tstamps = tstamps; 818 tx->in_use = in_use; 819 tx->stale = stale; 820 tx->init = 1; 821 tx->last_ll_ts_idx_read = -1; 822 823 spin_lock_init(&tx->lock); 824 825 return 0; 826 } 827 828 /** 829 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 830 * @pf: Board private structure 831 * @tx: the tracker to flush 832 * 833 * Called during teardown when a Tx tracker is being removed. 834 */ 835 static void 836 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 837 { 838 struct ice_hw *hw = &pf->hw; 839 unsigned long flags; 840 u64 tstamp_ready; 841 int err; 842 u8 idx; 843 844 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 845 if (err) { 846 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 847 tx->block, err); 848 849 /* If we fail to read the Tx timestamp ready bitmap just 850 * skip clearing the PHY timestamps. 851 */ 852 tstamp_ready = 0; 853 } 854 855 for_each_set_bit(idx, tx->in_use, tx->len) { 856 u8 phy_idx = idx + tx->offset; 857 struct sk_buff *skb; 858 859 /* In case this timestamp is ready, we need to clear it. */ 860 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 861 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 862 863 spin_lock_irqsave(&tx->lock, flags); 864 skb = tx->tstamps[idx].skb; 865 tx->tstamps[idx].skb = NULL; 866 clear_bit(idx, tx->in_use); 867 clear_bit(idx, tx->stale); 868 spin_unlock_irqrestore(&tx->lock, flags); 869 870 /* Count the number of Tx timestamps flushed */ 871 pf->ptp.tx_hwtstamp_flushed++; 872 873 /* Free the SKB after we've cleared the bit */ 874 dev_kfree_skb_any(skb); 875 } 876 } 877 878 /** 879 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 880 * @tx: the tracker to mark 881 * 882 * Mark currently outstanding Tx timestamps as stale. This prevents sending 883 * their timestamp value to the stack. This is required to prevent extending 884 * the 40bit hardware timestamp incorrectly. 885 * 886 * This should be called when the PTP clock is modified such as after a set 887 * time request. 888 */ 889 static void 890 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 891 { 892 unsigned long flags; 893 894 spin_lock_irqsave(&tx->lock, flags); 895 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 896 spin_unlock_irqrestore(&tx->lock, flags); 897 } 898 899 /** 900 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock 901 * @pf: Board private structure 902 * 903 * Called by the clock owner to flush all the Tx timestamp trackers associated 904 * with the clock. 905 */ 906 static void 907 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf) 908 { 909 struct ice_ptp_port *port; 910 911 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) 912 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx); 913 } 914 915 /** 916 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 917 * @pf: Board private structure 918 * @tx: Tx tracking structure to release 919 * 920 * Free memory associated with the Tx timestamp tracker. 921 */ 922 static void 923 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 924 { 925 unsigned long flags; 926 927 spin_lock_irqsave(&tx->lock, flags); 928 tx->init = 0; 929 spin_unlock_irqrestore(&tx->lock, flags); 930 931 /* wait for potentially outstanding interrupt to complete */ 932 synchronize_irq(pf->oicr_irq.virq); 933 934 ice_ptp_flush_tx_tracker(pf, tx); 935 936 kfree(tx->tstamps); 937 tx->tstamps = NULL; 938 939 bitmap_free(tx->in_use); 940 tx->in_use = NULL; 941 942 bitmap_free(tx->stale); 943 tx->stale = NULL; 944 945 tx->len = 0; 946 } 947 948 /** 949 * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps 950 * @pf: Board private structure 951 * @tx: the Tx tracking structure to initialize 952 * @port: the port this structure tracks 953 * 954 * Initialize the Tx timestamp tracker for this port. ETH56G PHYs 955 * have independent memory blocks for all ports. 956 * 957 * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker 958 */ 959 static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx, 960 u8 port) 961 { 962 tx->block = port; 963 tx->offset = 0; 964 tx->len = INDEX_PER_PORT_ETH56G; 965 tx->has_ready_bitmap = 1; 966 967 return ice_ptp_alloc_tx_tracker(tx); 968 } 969 970 /** 971 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps 972 * @pf: Board private structure 973 * @tx: the Tx tracking structure to initialize 974 * @port: the port this structure tracks 975 * 976 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 977 * the timestamp block is shared for all ports in the same quad. To avoid 978 * ports using the same timestamp index, logically break the block of 979 * registers into chunks based on the port number. 980 */ 981 static int 982 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 983 { 984 tx->block = ICE_GET_QUAD_NUM(port); 985 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; 986 tx->len = INDEX_PER_PORT_E82X; 987 tx->has_ready_bitmap = 1; 988 989 return ice_ptp_alloc_tx_tracker(tx); 990 } 991 992 /** 993 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 994 * @pf: Board private structure 995 * @tx: the Tx tracking structure to initialize 996 * 997 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 998 * port has its own block of timestamps, independent of the other ports. 999 */ 1000 static int 1001 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 1002 { 1003 tx->block = pf->hw.port_info->lport; 1004 tx->offset = 0; 1005 tx->len = INDEX_PER_PORT_E810; 1006 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 1007 * verify new timestamps against cached copy of the last read 1008 * timestamp. 1009 */ 1010 tx->has_ready_bitmap = 0; 1011 1012 return ice_ptp_alloc_tx_tracker(tx); 1013 } 1014 1015 /** 1016 * ice_ptp_update_cached_phctime - Update the cached PHC time values 1017 * @pf: Board specific private structure 1018 * 1019 * This function updates the system time values which are cached in the PF 1020 * structure and the Rx rings. 1021 * 1022 * This function must be called periodically to ensure that the cached value 1023 * is never more than 2 seconds old. 1024 * 1025 * Note that the cached copy in the PF PTP structure is always updated, even 1026 * if we can't update the copy in the Rx rings. 1027 * 1028 * Return: 1029 * * 0 - OK, successfully updated 1030 * * -EAGAIN - PF was busy, need to reschedule the update 1031 */ 1032 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 1033 { 1034 struct device *dev = ice_pf_to_dev(pf); 1035 unsigned long update_before; 1036 u64 systime; 1037 int i; 1038 1039 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 1040 if (pf->ptp.cached_phc_time && 1041 time_is_before_jiffies(update_before)) { 1042 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 1043 1044 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 1045 jiffies_to_msecs(time_taken)); 1046 pf->ptp.late_cached_phc_updates++; 1047 } 1048 1049 /* Read the current PHC time */ 1050 systime = ice_ptp_read_src_clk_reg(pf, NULL); 1051 1052 /* Update the cached PHC time stored in the PF structure */ 1053 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 1054 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 1055 1056 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 1057 return -EAGAIN; 1058 1059 ice_for_each_vsi(pf, i) { 1060 struct ice_vsi *vsi = pf->vsi[i]; 1061 int j; 1062 1063 if (!vsi) 1064 continue; 1065 1066 if (vsi->type != ICE_VSI_PF) 1067 continue; 1068 1069 ice_for_each_rxq(vsi, j) { 1070 if (!vsi->rx_rings[j]) 1071 continue; 1072 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 1073 } 1074 } 1075 clear_bit(ICE_CFG_BUSY, pf->state); 1076 1077 return 0; 1078 } 1079 1080 /** 1081 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 1082 * @pf: Board specific private structure 1083 * 1084 * This function must be called when the cached PHC time is no longer valid, 1085 * such as after a time adjustment. It marks any currently outstanding Tx 1086 * timestamps as stale and updates the cached PHC time for both the PF and Rx 1087 * rings. 1088 * 1089 * If updating the PHC time cannot be done immediately, a warning message is 1090 * logged and the work item is scheduled immediately to minimize the window 1091 * with a wrong cached timestamp. 1092 */ 1093 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1094 { 1095 struct device *dev = ice_pf_to_dev(pf); 1096 int err; 1097 1098 /* Update the cached PHC time immediately if possible, otherwise 1099 * schedule the work item to execute soon. 1100 */ 1101 err = ice_ptp_update_cached_phctime(pf); 1102 if (err) { 1103 /* If another thread is updating the Rx rings, we won't 1104 * properly reset them here. This could lead to reporting of 1105 * invalid timestamps, but there isn't much we can do. 1106 */ 1107 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1108 __func__); 1109 1110 /* Queue the work item to update the Rx rings when possible */ 1111 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1112 msecs_to_jiffies(10)); 1113 } 1114 1115 /* Mark any outstanding timestamps as stale, since they might have 1116 * been captured in hardware before the time update. This could lead 1117 * to us extending them with the wrong cached value resulting in 1118 * incorrect timestamp values. 1119 */ 1120 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1121 } 1122 1123 /** 1124 * ice_ptp_write_init - Set PHC time to provided value 1125 * @pf: Board private structure 1126 * @ts: timespec structure that holds the new time value 1127 * 1128 * Set the PHC time to the specified time provided in the timespec. 1129 */ 1130 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1131 { 1132 u64 ns = timespec64_to_ns(ts); 1133 struct ice_hw *hw = &pf->hw; 1134 1135 return ice_ptp_init_time(hw, ns); 1136 } 1137 1138 /** 1139 * ice_ptp_write_adj - Adjust PHC clock time atomically 1140 * @pf: Board private structure 1141 * @adj: Adjustment in nanoseconds 1142 * 1143 * Perform an atomic adjustment of the PHC time by the specified number of 1144 * nanoseconds. 1145 */ 1146 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1147 { 1148 struct ice_hw *hw = &pf->hw; 1149 1150 return ice_ptp_adj_clock(hw, adj); 1151 } 1152 1153 /** 1154 * ice_base_incval - Get base timer increment value 1155 * @pf: Board private structure 1156 * 1157 * Look up the base timer increment value for this device. The base increment 1158 * value is used to define the nominal clock tick rate. This increment value 1159 * is programmed during device initialization. It is also used as the basis 1160 * for calculating adjustments using scaled_ppm. 1161 */ 1162 static u64 ice_base_incval(struct ice_pf *pf) 1163 { 1164 struct ice_hw *hw = &pf->hw; 1165 u64 incval; 1166 1167 incval = ice_get_base_incval(hw); 1168 1169 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1170 incval); 1171 1172 return incval; 1173 } 1174 1175 /** 1176 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1177 * @port: PTP port for which Tx FIFO is checked 1178 */ 1179 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1180 { 1181 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1182 int quad = ICE_GET_QUAD_NUM(port->port_num); 1183 struct ice_pf *pf; 1184 struct ice_hw *hw; 1185 u32 val, phy_sts; 1186 int err; 1187 1188 pf = ptp_port_to_pf(port); 1189 hw = &pf->hw; 1190 1191 if (port->tx_fifo_busy_cnt == FIFO_OK) 1192 return 0; 1193 1194 /* need to read FIFO state */ 1195 if (offs == 0 || offs == 1) 1196 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, 1197 &val); 1198 else 1199 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, 1200 &val); 1201 1202 if (err) { 1203 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1204 port->port_num, err); 1205 return err; 1206 } 1207 1208 if (offs & 0x1) 1209 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val); 1210 else 1211 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val); 1212 1213 if (phy_sts & FIFO_EMPTY) { 1214 port->tx_fifo_busy_cnt = FIFO_OK; 1215 return 0; 1216 } 1217 1218 port->tx_fifo_busy_cnt++; 1219 1220 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1221 port->tx_fifo_busy_cnt, port->port_num); 1222 1223 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1224 dev_dbg(ice_pf_to_dev(pf), 1225 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1226 port->port_num, quad); 1227 ice_ptp_reset_ts_memory_quad_e82x(hw, quad); 1228 port->tx_fifo_busy_cnt = FIFO_OK; 1229 return 0; 1230 } 1231 1232 return -EAGAIN; 1233 } 1234 1235 /** 1236 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1237 * @work: Pointer to the kthread_work structure for this task 1238 * 1239 * Check whether hardware has completed measuring the Tx and Rx offset values 1240 * used to configure and enable vernier timestamp calibration. 1241 * 1242 * Once the offset in either direction is measured, configure the associated 1243 * registers with the calibrated offset values and enable timestamping. The Tx 1244 * and Rx directions are configured independently as soon as their associated 1245 * offsets are known. 1246 * 1247 * This function reschedules itself until both Tx and Rx calibration have 1248 * completed. 1249 */ 1250 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1251 { 1252 struct ice_ptp_port *port; 1253 struct ice_pf *pf; 1254 struct ice_hw *hw; 1255 int tx_err; 1256 int rx_err; 1257 1258 port = container_of(work, struct ice_ptp_port, ov_work.work); 1259 pf = ptp_port_to_pf(port); 1260 hw = &pf->hw; 1261 1262 if (ice_is_reset_in_progress(pf->state)) { 1263 /* wait for device driver to complete reset */ 1264 kthread_queue_delayed_work(pf->ptp.kworker, 1265 &port->ov_work, 1266 msecs_to_jiffies(100)); 1267 return; 1268 } 1269 1270 tx_err = ice_ptp_check_tx_fifo(port); 1271 if (!tx_err) 1272 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); 1273 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); 1274 if (tx_err || rx_err) { 1275 /* Tx and/or Rx offset not yet configured, try again later */ 1276 kthread_queue_delayed_work(pf->ptp.kworker, 1277 &port->ov_work, 1278 msecs_to_jiffies(100)); 1279 return; 1280 } 1281 } 1282 1283 /** 1284 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1285 * @ptp_port: PTP port to stop 1286 */ 1287 static int 1288 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1289 { 1290 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1291 u8 port = ptp_port->port_num; 1292 struct ice_hw *hw = &pf->hw; 1293 int err; 1294 1295 if (ice_is_e810(hw)) 1296 return 0; 1297 1298 mutex_lock(&ptp_port->ps_lock); 1299 1300 switch (ice_get_phy_model(hw)) { 1301 case ICE_PHY_ETH56G: 1302 err = ice_stop_phy_timer_eth56g(hw, port, true); 1303 break; 1304 case ICE_PHY_E82X: 1305 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1306 1307 err = ice_stop_phy_timer_e82x(hw, port, true); 1308 break; 1309 default: 1310 err = -ENODEV; 1311 } 1312 if (err && err != -EBUSY) 1313 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1314 port, err); 1315 1316 mutex_unlock(&ptp_port->ps_lock); 1317 1318 return err; 1319 } 1320 1321 /** 1322 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1323 * @ptp_port: PTP port for which the PHY start is set 1324 * 1325 * Start the PHY timestamping block, and initiate Vernier timestamping 1326 * calibration. If timestamping cannot be calibrated (such as if link is down) 1327 * then disable the timestamping block instead. 1328 */ 1329 static int 1330 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1331 { 1332 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1333 u8 port = ptp_port->port_num; 1334 struct ice_hw *hw = &pf->hw; 1335 unsigned long flags; 1336 int err; 1337 1338 if (ice_is_e810(hw)) 1339 return 0; 1340 1341 if (!ptp_port->link_up) 1342 return ice_ptp_port_phy_stop(ptp_port); 1343 1344 mutex_lock(&ptp_port->ps_lock); 1345 1346 switch (ice_get_phy_model(hw)) { 1347 case ICE_PHY_ETH56G: 1348 err = ice_start_phy_timer_eth56g(hw, port); 1349 break; 1350 case ICE_PHY_E82X: 1351 /* Start the PHY timer in Vernier mode */ 1352 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1353 1354 /* temporarily disable Tx timestamps while calibrating 1355 * PHY offset 1356 */ 1357 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1358 ptp_port->tx.calibrating = true; 1359 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1360 ptp_port->tx_fifo_busy_cnt = 0; 1361 1362 /* Start the PHY timer in Vernier mode */ 1363 err = ice_start_phy_timer_e82x(hw, port); 1364 if (err) 1365 break; 1366 1367 /* Enable Tx timestamps right away */ 1368 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1369 ptp_port->tx.calibrating = false; 1370 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1371 1372 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 1373 0); 1374 break; 1375 default: 1376 err = -ENODEV; 1377 } 1378 1379 if (err) 1380 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1381 port, err); 1382 1383 mutex_unlock(&ptp_port->ps_lock); 1384 1385 return err; 1386 } 1387 1388 /** 1389 * ice_ptp_link_change - Reconfigure PTP after link status change 1390 * @pf: Board private structure 1391 * @port: Port for which the PHY start is set 1392 * @linkup: Link is up or down 1393 */ 1394 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) 1395 { 1396 struct ice_ptp_port *ptp_port; 1397 struct ice_hw *hw = &pf->hw; 1398 1399 if (pf->ptp.state != ICE_PTP_READY) 1400 return; 1401 1402 if (WARN_ON_ONCE(port >= hw->ptp.num_lports)) 1403 return; 1404 1405 ptp_port = &pf->ptp.port; 1406 if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) 1407 port *= 2; 1408 if (WARN_ON_ONCE(ptp_port->port_num != port)) 1409 return; 1410 1411 /* Update cached link status for this port immediately */ 1412 ptp_port->link_up = linkup; 1413 1414 /* Skip HW writes if reset is in progress */ 1415 if (pf->hw.reset_ongoing) 1416 return; 1417 switch (ice_get_phy_model(hw)) { 1418 case ICE_PHY_E810: 1419 /* Do not reconfigure E810 PHY */ 1420 return; 1421 case ICE_PHY_ETH56G: 1422 case ICE_PHY_E82X: 1423 ice_ptp_port_phy_restart(ptp_port); 1424 return; 1425 default: 1426 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); 1427 } 1428 } 1429 1430 /** 1431 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings 1432 * @pf: PF private structure 1433 * @ena: bool value to enable or disable interrupt 1434 * @threshold: Minimum number of packets at which intr is triggered 1435 * 1436 * Utility function to configure all the PHY interrupt settings, including 1437 * whether the PHY interrupt is enabled, and what threshold to use. Also 1438 * configures The E82X timestamp owner to react to interrupts from all PHYs. 1439 * 1440 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes 1441 * when failed to configure PHY interrupt for E82X 1442 */ 1443 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) 1444 { 1445 struct device *dev = ice_pf_to_dev(pf); 1446 struct ice_hw *hw = &pf->hw; 1447 1448 ice_ptp_reset_ts_memory(hw); 1449 1450 switch (ice_get_phy_model(hw)) { 1451 case ICE_PHY_ETH56G: { 1452 int port; 1453 1454 for (port = 0; port < hw->ptp.num_lports; port++) { 1455 int err; 1456 1457 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); 1458 if (err) { 1459 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", 1460 port, err); 1461 return err; 1462 } 1463 } 1464 1465 return 0; 1466 } 1467 case ICE_PHY_E82X: { 1468 int quad; 1469 1470 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); 1471 quad++) { 1472 int err; 1473 1474 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); 1475 if (err) { 1476 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", 1477 quad, err); 1478 return err; 1479 } 1480 } 1481 1482 return 0; 1483 } 1484 case ICE_PHY_E810: 1485 return 0; 1486 case ICE_PHY_UNSUP: 1487 default: 1488 dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__, 1489 ice_get_phy_model(hw)); 1490 return -EOPNOTSUPP; 1491 } 1492 } 1493 1494 /** 1495 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1496 * @pf: Board private structure 1497 */ 1498 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1499 { 1500 ice_ptp_port_phy_restart(&pf->ptp.port); 1501 } 1502 1503 /** 1504 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping 1505 * @pf: Board private structure 1506 */ 1507 static void ice_ptp_restart_all_phy(struct ice_pf *pf) 1508 { 1509 struct list_head *entry; 1510 1511 list_for_each(entry, &pf->adapter->ports.ports) { 1512 struct ice_ptp_port *port = list_entry(entry, 1513 struct ice_ptp_port, 1514 list_node); 1515 1516 if (port->link_up) 1517 ice_ptp_port_phy_restart(port); 1518 } 1519 } 1520 1521 /** 1522 * ice_ptp_adjfine - Adjust clock increment rate 1523 * @info: the driver's PTP info structure 1524 * @scaled_ppm: Parts per million with 16-bit fractional field 1525 * 1526 * Adjust the frequency of the clock by the indicated scaled ppm from the 1527 * base frequency. 1528 */ 1529 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1530 { 1531 struct ice_pf *pf = ptp_info_to_pf(info); 1532 struct ice_hw *hw = &pf->hw; 1533 u64 incval; 1534 int err; 1535 1536 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1537 err = ice_ptp_write_incval_locked(hw, incval); 1538 if (err) { 1539 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1540 err); 1541 return -EIO; 1542 } 1543 1544 return 0; 1545 } 1546 1547 /** 1548 * ice_ptp_extts_event - Process PTP external clock event 1549 * @pf: Board private structure 1550 */ 1551 void ice_ptp_extts_event(struct ice_pf *pf) 1552 { 1553 struct ptp_clock_event event; 1554 struct ice_hw *hw = &pf->hw; 1555 u8 chan, tmr_idx; 1556 u32 hi, lo; 1557 1558 /* Don't process timestamp events if PTP is not ready */ 1559 if (pf->ptp.state != ICE_PTP_READY) 1560 return; 1561 1562 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1563 /* Event time is captured by one of the two matched registers 1564 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1565 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1566 * Event is defined in GLTSYN_EVNT_0 register 1567 */ 1568 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1569 /* Check if channel is enabled */ 1570 if (pf->ptp.ext_ts_irq & (1 << chan)) { 1571 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1572 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1573 event.timestamp = (((u64)hi) << 32) | lo; 1574 event.type = PTP_CLOCK_EXTTS; 1575 event.index = chan; 1576 1577 /* Fire event */ 1578 ptp_clock_event(pf->ptp.clock, &event); 1579 pf->ptp.ext_ts_irq &= ~(1 << chan); 1580 } 1581 } 1582 } 1583 1584 /** 1585 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1586 * @pf: Board private structure 1587 * @rq: External timestamp request 1588 * @on: Enable/disable flag 1589 * 1590 * Configure an external timestamp event on the requested channel. 1591 * 1592 * Return: 0 on success, negative error code otherwise 1593 */ 1594 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq, 1595 int on) 1596 { 1597 u32 aux_reg, gpio_reg, irq_reg; 1598 struct ice_hw *hw = &pf->hw; 1599 unsigned int chan, gpio_pin; 1600 int pin_desc_idx; 1601 u8 tmr_idx; 1602 1603 /* Reject requests with unsupported flags */ 1604 1605 if (rq->flags & ~(PTP_ENABLE_FEATURE | 1606 PTP_RISING_EDGE | 1607 PTP_FALLING_EDGE | 1608 PTP_STRICT_FLAGS)) 1609 return -EOPNOTSUPP; 1610 1611 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1612 chan = rq->index; 1613 1614 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); 1615 if (pin_desc_idx < 0) 1616 return -EIO; 1617 1618 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0]; 1619 irq_reg = rd32(hw, PFINT_OICR_ENA); 1620 1621 if (on) { 1622 /* Enable the interrupt */ 1623 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1624 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1625 1626 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1627 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1628 1629 /* set event level to requested edge */ 1630 if (rq->flags & PTP_FALLING_EDGE) 1631 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1632 if (rq->flags & PTP_RISING_EDGE) 1633 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1634 1635 /* Write GPIO CTL reg. 1636 * 0x1 is input sampled by EVENT register(channel) 1637 * + num_in_channels * tmr_idx 1638 */ 1639 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1640 1 + chan + (tmr_idx * 3)); 1641 } else { 1642 bool last_enabled = true; 1643 1644 /* clear the values we set to reset defaults */ 1645 aux_reg = 0; 1646 gpio_reg = 0; 1647 1648 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++) 1649 if ((pf->ptp.extts_rqs[i].flags & 1650 PTP_ENABLE_FEATURE) && 1651 i != chan) { 1652 last_enabled = false; 1653 } 1654 1655 if (last_enabled) 1656 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1657 } 1658 1659 wr32(hw, PFINT_OICR_ENA, irq_reg); 1660 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1661 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1662 1663 return 0; 1664 } 1665 1666 /** 1667 * ice_ptp_disable_all_extts - Disable all EXTTS channels 1668 * @pf: Board private structure 1669 */ 1670 static void ice_ptp_disable_all_extts(struct ice_pf *pf) 1671 { 1672 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1673 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1674 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1675 false); 1676 1677 synchronize_irq(pf->oicr_irq.virq); 1678 } 1679 1680 /** 1681 * ice_ptp_enable_all_extts - Enable all EXTTS channels 1682 * @pf: Board private structure 1683 * 1684 * Called during reset to restore user configuration. 1685 */ 1686 static void ice_ptp_enable_all_extts(struct ice_pf *pf) 1687 { 1688 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1689 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1690 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1691 true); 1692 } 1693 1694 /** 1695 * ice_ptp_write_perout - Write periodic wave parameters to HW 1696 * @hw: pointer to the HW struct 1697 * @chan: target channel 1698 * @gpio_pin: target GPIO pin 1699 * @start: target time to start periodic output 1700 * @period: target period 1701 * 1702 * Return: 0 on success, negative error code otherwise 1703 */ 1704 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, 1705 unsigned int gpio_pin, u64 start, u64 period) 1706 { 1707 1708 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1709 u32 val = 0; 1710 1711 /* 0. Reset mode & out_en in AUX_OUT */ 1712 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1713 1714 if (ice_is_e825c(hw)) { 1715 int err; 1716 1717 /* Enable/disable CGU 1PPS output for E825C */ 1718 err = ice_cgu_cfg_pps_out(hw, !!period); 1719 if (err) 1720 return err; 1721 } 1722 1723 /* 1. Write perout with half of required period value. 1724 * HW toggles output when source clock hits the TGT and then adds 1725 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle. 1726 */ 1727 period >>= 1; 1728 1729 /* For proper operation, GLTSYN_CLKO must be larger than clock tick and 1730 * period has to fit in 32 bit register. 1731 */ 1732 #define MIN_PULSE 3 1733 if (!!period && (period <= MIN_PULSE || period > U32_MAX)) { 1734 dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32", 1735 MIN_PULSE); 1736 return -EIO; 1737 } 1738 1739 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1740 1741 /* 2. Write TARGET time */ 1742 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start)); 1743 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start)); 1744 1745 /* 3. Write AUX_OUT register */ 1746 if (!!period) 1747 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1748 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1749 1750 /* 4. write GPIO CTL reg */ 1751 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1752 if (!!period) 1753 val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1754 8 + chan + (tmr_idx * 4)); 1755 1756 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1757 1758 return 0; 1759 } 1760 1761 /** 1762 * ice_ptp_cfg_perout - Configure clock to generate periodic wave 1763 * @pf: Board private structure 1764 * @rq: Periodic output request 1765 * @on: Enable/disable flag 1766 * 1767 * Configure the internal clock generator modules to generate the clock wave of 1768 * specified period. 1769 * 1770 * Return: 0 on success, negative error code otherwise 1771 */ 1772 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, 1773 int on) 1774 { 1775 u64 clk, period, start, phase; 1776 struct ice_hw *hw = &pf->hw; 1777 unsigned int gpio_pin; 1778 int pin_desc_idx; 1779 1780 if (rq->flags & ~PTP_PEROUT_PHASE) 1781 return -EOPNOTSUPP; 1782 1783 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index); 1784 if (pin_desc_idx < 0) 1785 return -EIO; 1786 1787 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; 1788 period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; 1789 1790 /* If we're disabling the output or period is 0, clear out CLKO and TGT 1791 * and keep output level low. 1792 */ 1793 if (!on || !period) 1794 return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0); 1795 1796 if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 && 1797 period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) { 1798 dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n"); 1799 return -EOPNOTSUPP; 1800 } 1801 1802 if (period & 0x1) { 1803 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1804 return -EIO; 1805 } 1806 1807 start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec; 1808 1809 /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */ 1810 if (rq->flags & PTP_PEROUT_PHASE) 1811 phase = start; 1812 else 1813 div64_u64_rem(start, period, &phase); 1814 1815 /* If we have only phase or start time is in the past, start the timer 1816 * at the next multiple of period, maintaining phase. 1817 */ 1818 clk = ice_ptp_read_src_clk_reg(pf, NULL); 1819 if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw)) 1820 start = div64_u64(clk + period - 1, period) * period + phase; 1821 1822 /* Compensate for propagation delay from the generator to the pin. */ 1823 start -= ice_prop_delay(hw); 1824 1825 return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); 1826 } 1827 1828 /** 1829 * ice_ptp_disable_all_perout - Disable all currently configured outputs 1830 * @pf: Board private structure 1831 * 1832 * Disable all currently configured clock outputs. This is necessary before 1833 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to 1834 * re-enable the clocks again. 1835 */ 1836 static void ice_ptp_disable_all_perout(struct ice_pf *pf) 1837 { 1838 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1839 if (pf->ptp.perout_rqs[i].period.sec || 1840 pf->ptp.perout_rqs[i].period.nsec) 1841 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1842 false); 1843 } 1844 1845 /** 1846 * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs 1847 * @pf: Board private structure 1848 * 1849 * Enable all currently configured clock outputs. Use this after 1850 * ice_ptp_disable_all_perout to reconfigure the output signals according to 1851 * their configuration. 1852 */ 1853 static void ice_ptp_enable_all_perout(struct ice_pf *pf) 1854 { 1855 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1856 if (pf->ptp.perout_rqs[i].period.sec || 1857 pf->ptp.perout_rqs[i].period.nsec) 1858 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1859 true); 1860 } 1861 1862 /** 1863 * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO 1864 * @pf: Board private structure 1865 * @pin: Pin index 1866 * @func: Assigned function 1867 * 1868 * Return: 0 on success, negative error code otherwise 1869 */ 1870 static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin, 1871 enum ptp_pin_function func) 1872 { 1873 unsigned int gpio_pin; 1874 1875 switch (func) { 1876 case PTP_PF_PEROUT: 1877 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1]; 1878 break; 1879 case PTP_PF_EXTTS: 1880 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0]; 1881 break; 1882 default: 1883 return -EOPNOTSUPP; 1884 } 1885 1886 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 1887 struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i]; 1888 unsigned int chan = pin_desc->chan; 1889 1890 /* Skip pin idx from the request */ 1891 if (i == pin) 1892 continue; 1893 1894 if (pin_desc->func == PTP_PF_PEROUT && 1895 pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) { 1896 pf->ptp.perout_rqs[chan].period.sec = 0; 1897 pf->ptp.perout_rqs[chan].period.nsec = 0; 1898 pin_desc->func = PTP_PF_NONE; 1899 pin_desc->chan = 0; 1900 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n", 1901 i, gpio_pin); 1902 return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan], 1903 false); 1904 } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS && 1905 pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) { 1906 pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE; 1907 pin_desc->func = PTP_PF_NONE; 1908 pin_desc->chan = 0; 1909 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n", 1910 i, gpio_pin); 1911 return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan], 1912 false); 1913 } 1914 } 1915 1916 return 0; 1917 } 1918 1919 /** 1920 * ice_verify_pin - verify if pin supports requested pin function 1921 * @info: the driver's PTP info structure 1922 * @pin: Pin index 1923 * @func: Assigned function 1924 * @chan: Assigned channel 1925 * 1926 * Return: 0 on success, -EOPNOTSUPP when function is not supported. 1927 */ 1928 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin, 1929 enum ptp_pin_function func, unsigned int chan) 1930 { 1931 struct ice_pf *pf = ptp_info_to_pf(info); 1932 const struct ice_ptp_pin_desc *pin_desc; 1933 1934 pin_desc = &pf->ptp.ice_pin_desc[pin]; 1935 1936 /* Is assigned function allowed? */ 1937 switch (func) { 1938 case PTP_PF_EXTTS: 1939 if (pin_desc->gpio[0] < 0) 1940 return -EOPNOTSUPP; 1941 break; 1942 case PTP_PF_PEROUT: 1943 if (pin_desc->gpio[1] < 0) 1944 return -EOPNOTSUPP; 1945 break; 1946 case PTP_PF_NONE: 1947 break; 1948 case PTP_PF_PHYSYNC: 1949 default: 1950 return -EOPNOTSUPP; 1951 } 1952 1953 /* On adapters with SMA_CTRL disable other pins that share same GPIO */ 1954 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 1955 ice_ptp_disable_shared_pin(pf, pin, func); 1956 pf->ptp.pin_desc[pin].func = func; 1957 pf->ptp.pin_desc[pin].chan = chan; 1958 return ice_ptp_set_sma_cfg(pf); 1959 } 1960 1961 return 0; 1962 } 1963 1964 /** 1965 * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC 1966 * @info: The driver's PTP info structure 1967 * @rq: The requested feature to change 1968 * @on: Enable/disable flag 1969 * 1970 * Return: 0 on success, negative error code otherwise 1971 */ 1972 static int ice_ptp_gpio_enable(struct ptp_clock_info *info, 1973 struct ptp_clock_request *rq, int on) 1974 { 1975 struct ice_pf *pf = ptp_info_to_pf(info); 1976 int err; 1977 1978 switch (rq->type) { 1979 case PTP_CLK_REQ_PEROUT: 1980 { 1981 struct ptp_perout_request *cached = 1982 &pf->ptp.perout_rqs[rq->perout.index]; 1983 1984 err = ice_ptp_cfg_perout(pf, &rq->perout, on); 1985 if (!err) { 1986 *cached = rq->perout; 1987 } else { 1988 cached->period.sec = 0; 1989 cached->period.nsec = 0; 1990 } 1991 return err; 1992 } 1993 case PTP_CLK_REQ_EXTTS: 1994 { 1995 struct ptp_extts_request *cached = 1996 &pf->ptp.extts_rqs[rq->extts.index]; 1997 1998 err = ice_ptp_cfg_extts(pf, &rq->extts, on); 1999 if (!err) 2000 *cached = rq->extts; 2001 else 2002 cached->flags &= ~PTP_ENABLE_FEATURE; 2003 return err; 2004 } 2005 default: 2006 return -EOPNOTSUPP; 2007 } 2008 } 2009 2010 /** 2011 * ice_ptp_gettimex64 - Get the time of the clock 2012 * @info: the driver's PTP info structure 2013 * @ts: timespec64 structure to hold the current time value 2014 * @sts: Optional parameter for holding a pair of system timestamps from 2015 * the system clock. Will be ignored if NULL is given. 2016 * 2017 * Read the device clock and return the correct value on ns, after converting it 2018 * into a timespec struct. 2019 */ 2020 static int 2021 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 2022 struct ptp_system_timestamp *sts) 2023 { 2024 struct ice_pf *pf = ptp_info_to_pf(info); 2025 u64 time_ns; 2026 2027 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 2028 *ts = ns_to_timespec64(time_ns); 2029 return 0; 2030 } 2031 2032 /** 2033 * ice_ptp_settime64 - Set the time of the clock 2034 * @info: the driver's PTP info structure 2035 * @ts: timespec64 structure that holds the new time value 2036 * 2037 * Set the device clock to the user input value. The conversion from timespec 2038 * to ns happens in the write function. 2039 */ 2040 static int 2041 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 2042 { 2043 struct ice_pf *pf = ptp_info_to_pf(info); 2044 struct timespec64 ts64 = *ts; 2045 struct ice_hw *hw = &pf->hw; 2046 int err; 2047 2048 /* For Vernier mode on E82X, we need to recalibrate after new settime. 2049 * Start with marking timestamps as invalid. 2050 */ 2051 if (ice_get_phy_model(hw) == ICE_PHY_E82X) { 2052 err = ice_ptp_clear_phy_offset_ready_e82x(hw); 2053 if (err) 2054 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); 2055 } 2056 2057 if (!ice_ptp_lock(hw)) { 2058 err = -EBUSY; 2059 goto exit; 2060 } 2061 2062 /* Disable periodic outputs */ 2063 ice_ptp_disable_all_perout(pf); 2064 2065 err = ice_ptp_write_init(pf, &ts64); 2066 ice_ptp_unlock(hw); 2067 2068 if (!err) 2069 ice_ptp_reset_cached_phctime(pf); 2070 2071 /* Reenable periodic outputs */ 2072 ice_ptp_enable_all_perout(pf); 2073 2074 /* Recalibrate and re-enable timestamp blocks for E822/E823 */ 2075 if (ice_get_phy_model(hw) == ICE_PHY_E82X) 2076 ice_ptp_restart_all_phy(pf); 2077 exit: 2078 if (err) { 2079 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 2080 return err; 2081 } 2082 2083 return 0; 2084 } 2085 2086 /** 2087 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 2088 * @info: the driver's PTP info structure 2089 * @delta: Offset in nanoseconds to adjust the time by 2090 */ 2091 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 2092 { 2093 struct timespec64 now, then; 2094 int ret; 2095 2096 then = ns_to_timespec64(delta); 2097 ret = ice_ptp_gettimex64(info, &now, NULL); 2098 if (ret) 2099 return ret; 2100 now = timespec64_add(now, then); 2101 2102 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 2103 } 2104 2105 /** 2106 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 2107 * @info: the driver's PTP info structure 2108 * @delta: Offset in nanoseconds to adjust the time by 2109 */ 2110 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 2111 { 2112 struct ice_pf *pf = ptp_info_to_pf(info); 2113 struct ice_hw *hw = &pf->hw; 2114 struct device *dev; 2115 int err; 2116 2117 dev = ice_pf_to_dev(pf); 2118 2119 /* Hardware only supports atomic adjustments using signed 32-bit 2120 * integers. For any adjustment outside this range, perform 2121 * a non-atomic get->adjust->set flow. 2122 */ 2123 if (delta > S32_MAX || delta < S32_MIN) { 2124 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 2125 return ice_ptp_adjtime_nonatomic(info, delta); 2126 } 2127 2128 if (!ice_ptp_lock(hw)) { 2129 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 2130 return -EBUSY; 2131 } 2132 2133 /* Disable periodic outputs */ 2134 ice_ptp_disable_all_perout(pf); 2135 2136 err = ice_ptp_write_adj(pf, delta); 2137 2138 /* Reenable periodic outputs */ 2139 ice_ptp_enable_all_perout(pf); 2140 2141 ice_ptp_unlock(hw); 2142 2143 if (err) { 2144 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 2145 return err; 2146 } 2147 2148 ice_ptp_reset_cached_phctime(pf); 2149 2150 return 0; 2151 } 2152 2153 #ifdef CONFIG_ICE_HWTS 2154 /** 2155 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 2156 * @device: Current device time 2157 * @system: System counter value read synchronously with device time 2158 * @ctx: Context provided by timekeeping code 2159 * 2160 * Read device and system (ART) clock simultaneously and return the corrected 2161 * clock values in ns. 2162 */ 2163 static int 2164 ice_ptp_get_syncdevicetime(ktime_t *device, 2165 struct system_counterval_t *system, 2166 void *ctx) 2167 { 2168 struct ice_pf *pf = (struct ice_pf *)ctx; 2169 struct ice_hw *hw = &pf->hw; 2170 u32 hh_lock, hh_art_ctl; 2171 int i; 2172 2173 #define MAX_HH_HW_LOCK_TRIES 5 2174 #define MAX_HH_CTL_LOCK_TRIES 100 2175 2176 for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { 2177 /* Get the HW lock */ 2178 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2179 if (hh_lock & PFHH_SEM_BUSY_M) { 2180 usleep_range(10000, 15000); 2181 continue; 2182 } 2183 break; 2184 } 2185 if (hh_lock & PFHH_SEM_BUSY_M) { 2186 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 2187 return -EBUSY; 2188 } 2189 2190 /* Program cmd to master timer */ 2191 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 2192 2193 /* Start the ART and device clock sync sequence */ 2194 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 2195 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 2196 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 2197 2198 for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { 2199 /* Wait for sync to complete */ 2200 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 2201 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 2202 udelay(1); 2203 continue; 2204 } else { 2205 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 2206 u64 hh_ts; 2207 2208 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 2209 /* Read ART time */ 2210 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 2211 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 2212 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2213 system->cycles = hh_ts; 2214 system->cs_id = CSID_X86_ART; 2215 /* Read Device source clock time */ 2216 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 2217 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 2218 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2219 *device = ns_to_ktime(hh_ts); 2220 break; 2221 } 2222 } 2223 2224 /* Clear the master timer */ 2225 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 2226 2227 /* Release HW lock */ 2228 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2229 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 2230 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 2231 2232 if (i == MAX_HH_CTL_LOCK_TRIES) 2233 return -ETIMEDOUT; 2234 2235 return 0; 2236 } 2237 2238 /** 2239 * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp 2240 * @info: the driver's PTP info structure 2241 * @cts: The memory to fill the cross timestamp info 2242 * 2243 * Capture a cross timestamp between the ART and the device PTP hardware 2244 * clock. Fill the cross timestamp information and report it back to the 2245 * caller. 2246 * 2247 * This is only valid for E822 and E823 devices which have support for 2248 * generating the cross timestamp via PCIe PTM. 2249 * 2250 * In order to correctly correlate the ART timestamp back to the TSC time, the 2251 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2252 */ 2253 static int 2254 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, 2255 struct system_device_crosststamp *cts) 2256 { 2257 struct ice_pf *pf = ptp_info_to_pf(info); 2258 2259 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2260 pf, NULL, cts); 2261 } 2262 #endif /* CONFIG_ICE_HWTS */ 2263 2264 /** 2265 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2266 * @pf: Board private structure 2267 * @ifr: ioctl data 2268 * 2269 * Copy the timestamping config to user buffer 2270 */ 2271 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2272 { 2273 struct hwtstamp_config *config; 2274 2275 if (pf->ptp.state != ICE_PTP_READY) 2276 return -EIO; 2277 2278 config = &pf->ptp.tstamp_config; 2279 2280 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2281 -EFAULT : 0; 2282 } 2283 2284 /** 2285 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2286 * @pf: Board private structure 2287 * @config: hwtstamp settings requested or saved 2288 */ 2289 static int 2290 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2291 { 2292 switch (config->tx_type) { 2293 case HWTSTAMP_TX_OFF: 2294 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 2295 break; 2296 case HWTSTAMP_TX_ON: 2297 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 2298 break; 2299 default: 2300 return -ERANGE; 2301 } 2302 2303 switch (config->rx_filter) { 2304 case HWTSTAMP_FILTER_NONE: 2305 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2306 break; 2307 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2308 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2309 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2310 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2311 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2312 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2313 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2314 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2315 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2316 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2317 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2318 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2319 case HWTSTAMP_FILTER_NTP_ALL: 2320 case HWTSTAMP_FILTER_ALL: 2321 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 2322 break; 2323 default: 2324 return -ERANGE; 2325 } 2326 2327 /* Immediately update the device timestamping mode */ 2328 ice_ptp_restore_timestamp_mode(pf); 2329 2330 return 0; 2331 } 2332 2333 /** 2334 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2335 * @pf: Board private structure 2336 * @ifr: ioctl data 2337 * 2338 * Get the user config and store it 2339 */ 2340 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2341 { 2342 struct hwtstamp_config config; 2343 int err; 2344 2345 if (pf->ptp.state != ICE_PTP_READY) 2346 return -EAGAIN; 2347 2348 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2349 return -EFAULT; 2350 2351 err = ice_ptp_set_timestamp_mode(pf, &config); 2352 if (err) 2353 return err; 2354 2355 /* Return the actual configuration set */ 2356 config = pf->ptp.tstamp_config; 2357 2358 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2359 -EFAULT : 0; 2360 } 2361 2362 /** 2363 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns 2364 * @rx_desc: Receive descriptor 2365 * @pkt_ctx: Packet context to get the cached time 2366 * 2367 * The driver receives a notification in the receive descriptor with timestamp. 2368 */ 2369 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, 2370 const struct ice_pkt_ctx *pkt_ctx) 2371 { 2372 u64 ts_ns, cached_time; 2373 u32 ts_high; 2374 2375 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2376 return 0; 2377 2378 cached_time = READ_ONCE(pkt_ctx->cached_phctime); 2379 2380 /* Do not report a timestamp if we don't have a cached PHC time */ 2381 if (!cached_time) 2382 return 0; 2383 2384 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2385 * PHC value, rather than accessing the PF. This also allows us to 2386 * simply pass the upper 32bits of nanoseconds directly. Calling 2387 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2388 * bits itself. 2389 */ 2390 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2391 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2392 2393 return ts_ns; 2394 } 2395 2396 /** 2397 * ice_ptp_setup_pin_cfg - setup PTP pin_config structure 2398 * @pf: Board private structure 2399 */ 2400 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf) 2401 { 2402 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 2403 const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i]; 2404 struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i]; 2405 const char *name = NULL; 2406 2407 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2408 name = ice_pin_names[desc->name_idx]; 2409 else if (desc->name_idx != GPIO_NA) 2410 name = ice_pin_names_nvm[desc->name_idx]; 2411 if (name) 2412 strscpy(pin->name, name, sizeof(pin->name)); 2413 2414 pin->index = i; 2415 } 2416 2417 pf->ptp.info.pin_config = pf->ptp.pin_desc; 2418 } 2419 2420 /** 2421 * ice_ptp_disable_pins - Disable PTP pins 2422 * @pf: pointer to the PF structure 2423 * 2424 * Disable the OS access to the SMA pins. Called to clear out the OS 2425 * indications of pin support when we fail to setup the SMA control register. 2426 */ 2427 static void ice_ptp_disable_pins(struct ice_pf *pf) 2428 { 2429 struct ptp_clock_info *info = &pf->ptp.info; 2430 2431 dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n"); 2432 2433 info->enable = NULL; 2434 info->verify = NULL; 2435 info->n_pins = 0; 2436 info->n_ext_ts = 0; 2437 info->n_per_out = 0; 2438 } 2439 2440 /** 2441 * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM 2442 * @pf: pointer to the PF structure 2443 * @entries: SDP connection section from NVM 2444 * @num_entries: number of valid entries in sdp_entries 2445 * @pins: PTP pins array to update 2446 * 2447 * Return: 0 on success, negative error code otherwise. 2448 */ 2449 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, 2450 unsigned int num_entries, 2451 struct ice_ptp_pin_desc *pins) 2452 { 2453 unsigned int n_pins = 0; 2454 unsigned int i; 2455 2456 /* Setup ice_pin_desc array */ 2457 for (i = 0; i < ICE_N_PINS_MAX; i++) { 2458 pins[i].name_idx = -1; 2459 pins[i].gpio[0] = -1; 2460 pins[i].gpio[1] = -1; 2461 } 2462 2463 for (i = 0; i < num_entries; i++) { 2464 u16 entry = le16_to_cpu(entries[i]); 2465 DECLARE_BITMAP(bitmap, GPIO_NA); 2466 unsigned int bitmap_idx; 2467 bool dir; 2468 u16 gpio; 2469 2470 *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry); 2471 dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry); 2472 gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry); 2473 for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) { 2474 unsigned int idx; 2475 2476 /* Check if entry's pin bit is valid */ 2477 if (bitmap_idx >= NUM_PTP_PINS_NVM && 2478 bitmap_idx != GPIO_NA) 2479 continue; 2480 2481 /* Check if pin already exists */ 2482 for (idx = 0; idx < ICE_N_PINS_MAX; idx++) 2483 if (pins[idx].name_idx == bitmap_idx) 2484 break; 2485 2486 if (idx == ICE_N_PINS_MAX) { 2487 /* Pin not found, setup its entry and name */ 2488 idx = n_pins++; 2489 pins[idx].name_idx = bitmap_idx; 2490 if (bitmap_idx == GPIO_NA) 2491 strscpy(pf->ptp.pin_desc[idx].name, 2492 ice_pin_names[gpio], 2493 sizeof(pf->ptp.pin_desc[idx] 2494 .name)); 2495 } 2496 2497 /* Setup in/out GPIO number */ 2498 pins[idx].gpio[dir] = gpio; 2499 } 2500 } 2501 2502 for (i = 0; i < n_pins; i++) { 2503 dev_dbg(ice_pf_to_dev(pf), 2504 "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n", 2505 i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]); 2506 } 2507 2508 pf->ptp.info.n_pins = n_pins; 2509 return 0; 2510 } 2511 2512 /** 2513 * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support 2514 * @pf: Board private structure 2515 * 2516 * Assign functions to the PTP capabilities structure for E82X devices. 2517 * Functions which operate across all device families should be set directly 2518 * in ice_ptp_set_caps. Only add functions here which are distinct for E82X 2519 * devices. 2520 */ 2521 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) 2522 { 2523 #ifdef CONFIG_ICE_HWTS 2524 if (boot_cpu_has(X86_FEATURE_ART) && 2525 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2526 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x; 2527 2528 #endif /* CONFIG_ICE_HWTS */ 2529 if (ice_is_e825c(&pf->hw)) { 2530 pf->ptp.ice_pin_desc = ice_pin_desc_e825c; 2531 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); 2532 } else { 2533 pf->ptp.ice_pin_desc = ice_pin_desc_e82x; 2534 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x); 2535 } 2536 ice_ptp_setup_pin_cfg(pf); 2537 } 2538 2539 /** 2540 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2541 * @pf: Board private structure 2542 * 2543 * Assign functions to the PTP capabiltiies structure for E810 devices. 2544 * Functions which operate across all device families should be set directly 2545 * in ice_ptp_set_caps. Only add functions here which are distinct for E810 2546 * devices. 2547 */ 2548 static void ice_ptp_set_funcs_e810(struct ice_pf *pf) 2549 { 2550 __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE]; 2551 struct ice_ptp_pin_desc *desc = NULL; 2552 struct ice_ptp *ptp = &pf->ptp; 2553 unsigned int num_entries; 2554 int err; 2555 2556 err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries); 2557 if (err) { 2558 /* SDP section does not exist in NVM or is corrupted */ 2559 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2560 ptp->ice_pin_desc = ice_pin_desc_e810_sma; 2561 ptp->info.n_pins = 2562 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma); 2563 } else { 2564 pf->ptp.ice_pin_desc = ice_pin_desc_e810; 2565 pf->ptp.info.n_pins = 2566 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); 2567 err = 0; 2568 } 2569 } else { 2570 desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX, 2571 sizeof(struct ice_ptp_pin_desc), 2572 GFP_KERNEL); 2573 if (!desc) 2574 goto err; 2575 2576 err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc); 2577 if (err) 2578 goto err; 2579 2580 ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc; 2581 } 2582 2583 ptp->info.pin_config = ptp->pin_desc; 2584 ice_ptp_setup_pin_cfg(pf); 2585 2586 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2587 err = ice_ptp_set_sma_cfg(pf); 2588 err: 2589 if (err) { 2590 devm_kfree(ice_pf_to_dev(pf), desc); 2591 ice_ptp_disable_pins(pf); 2592 } 2593 } 2594 2595 /** 2596 * ice_ptp_set_caps - Set PTP capabilities 2597 * @pf: Board private structure 2598 */ 2599 static void ice_ptp_set_caps(struct ice_pf *pf) 2600 { 2601 struct ptp_clock_info *info = &pf->ptp.info; 2602 struct device *dev = ice_pf_to_dev(pf); 2603 2604 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2605 dev_driver_string(dev), dev_name(dev)); 2606 info->owner = THIS_MODULE; 2607 info->max_adj = 100000000; 2608 info->adjtime = ice_ptp_adjtime; 2609 info->adjfine = ice_ptp_adjfine; 2610 info->gettimex64 = ice_ptp_gettimex64; 2611 info->settime64 = ice_ptp_settime64; 2612 info->n_per_out = GLTSYN_TGT_H_IDX_MAX; 2613 info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX; 2614 info->enable = ice_ptp_gpio_enable; 2615 info->verify = ice_verify_pin; 2616 2617 if (ice_is_e810(&pf->hw)) 2618 ice_ptp_set_funcs_e810(pf); 2619 else 2620 ice_ptp_set_funcs_e82x(pf); 2621 } 2622 2623 /** 2624 * ice_ptp_create_clock - Create PTP clock device for userspace 2625 * @pf: Board private structure 2626 * 2627 * This function creates a new PTP clock device. It only creates one if we 2628 * don't already have one. Will return error if it can't create one, but success 2629 * if we already have a device. Should be used by ice_ptp_init to create clock 2630 * initially, and prevent global resets from creating new clock devices. 2631 */ 2632 static long ice_ptp_create_clock(struct ice_pf *pf) 2633 { 2634 struct ptp_clock_info *info; 2635 struct device *dev; 2636 2637 /* No need to create a clock device if we already have one */ 2638 if (pf->ptp.clock) 2639 return 0; 2640 2641 ice_ptp_set_caps(pf); 2642 2643 info = &pf->ptp.info; 2644 dev = ice_pf_to_dev(pf); 2645 2646 /* Attempt to register the clock before enabling the hardware. */ 2647 pf->ptp.clock = ptp_clock_register(info, dev); 2648 if (IS_ERR(pf->ptp.clock)) { 2649 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); 2650 return PTR_ERR(pf->ptp.clock); 2651 } 2652 2653 return 0; 2654 } 2655 2656 /** 2657 * ice_ptp_request_ts - Request an available Tx timestamp index 2658 * @tx: the PTP Tx timestamp tracker to request from 2659 * @skb: the SKB to associate with this timestamp request 2660 */ 2661 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2662 { 2663 unsigned long flags; 2664 u8 idx; 2665 2666 spin_lock_irqsave(&tx->lock, flags); 2667 2668 /* Check that this tracker is accepting new timestamp requests */ 2669 if (!ice_ptp_is_tx_tracker_up(tx)) { 2670 spin_unlock_irqrestore(&tx->lock, flags); 2671 return -1; 2672 } 2673 2674 /* Find and set the first available index */ 2675 idx = find_next_zero_bit(tx->in_use, tx->len, 2676 tx->last_ll_ts_idx_read + 1); 2677 if (idx == tx->len) 2678 idx = find_first_zero_bit(tx->in_use, tx->len); 2679 2680 if (idx < tx->len) { 2681 /* We got a valid index that no other thread could have set. Store 2682 * a reference to the skb and the start time to allow discarding old 2683 * requests. 2684 */ 2685 set_bit(idx, tx->in_use); 2686 clear_bit(idx, tx->stale); 2687 tx->tstamps[idx].start = jiffies; 2688 tx->tstamps[idx].skb = skb_get(skb); 2689 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2690 ice_trace(tx_tstamp_request, skb, idx); 2691 } 2692 2693 spin_unlock_irqrestore(&tx->lock, flags); 2694 2695 /* return the appropriate PHY timestamp register index, -1 if no 2696 * indexes were available. 2697 */ 2698 if (idx >= tx->len) 2699 return -1; 2700 else 2701 return idx + tx->offset; 2702 } 2703 2704 /** 2705 * ice_ptp_process_ts - Process the PTP Tx timestamps 2706 * @pf: Board private structure 2707 * 2708 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2709 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2710 */ 2711 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2712 { 2713 switch (pf->ptp.tx_interrupt_mode) { 2714 case ICE_PTP_TX_INTERRUPT_NONE: 2715 /* This device has the clock owner handle timestamps for it */ 2716 return ICE_TX_TSTAMP_WORK_DONE; 2717 case ICE_PTP_TX_INTERRUPT_SELF: 2718 /* This device handles its own timestamps */ 2719 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2720 case ICE_PTP_TX_INTERRUPT_ALL: 2721 /* This device handles timestamps for all ports */ 2722 return ice_ptp_tx_tstamp_owner(pf); 2723 default: 2724 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2725 pf->ptp.tx_interrupt_mode); 2726 return ICE_TX_TSTAMP_WORK_DONE; 2727 } 2728 } 2729 2730 /** 2731 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt 2732 * @pf: Board private structure 2733 * 2734 * The device PHY issues Tx timestamp interrupts to the driver for processing 2735 * timestamp data from the PHY. It will not interrupt again until all 2736 * current timestamp data is read. In rare circumstances, it is possible that 2737 * the driver fails to read all outstanding data. 2738 * 2739 * To avoid getting permanently stuck, periodically check if the PHY has 2740 * outstanding timestamp data. If so, trigger an interrupt from software to 2741 * process this data. 2742 */ 2743 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) 2744 { 2745 struct device *dev = ice_pf_to_dev(pf); 2746 struct ice_hw *hw = &pf->hw; 2747 bool trigger_oicr = false; 2748 unsigned int i; 2749 2750 if (ice_is_e810(hw)) 2751 return; 2752 2753 if (!ice_pf_src_tmr_owned(pf)) 2754 return; 2755 2756 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { 2757 u64 tstamp_ready; 2758 int err; 2759 2760 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 2761 if (!err && tstamp_ready) { 2762 trigger_oicr = true; 2763 break; 2764 } 2765 } 2766 2767 if (trigger_oicr) { 2768 /* Trigger a software interrupt, to ensure this data 2769 * gets processed. 2770 */ 2771 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); 2772 2773 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 2774 ice_flush(hw); 2775 } 2776 } 2777 2778 static void ice_ptp_periodic_work(struct kthread_work *work) 2779 { 2780 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2781 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2782 int err; 2783 2784 if (pf->ptp.state != ICE_PTP_READY) 2785 return; 2786 2787 err = ice_ptp_update_cached_phctime(pf); 2788 2789 ice_ptp_maybe_trigger_tx_interrupt(pf); 2790 2791 /* Run twice a second or reschedule if phc update failed */ 2792 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2793 msecs_to_jiffies(err ? 10 : 500)); 2794 } 2795 2796 /** 2797 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2798 * @pf: Board private structure 2799 * @reset_type: the reset type being performed 2800 */ 2801 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 2802 { 2803 struct ice_ptp *ptp = &pf->ptp; 2804 u8 src_tmr; 2805 2806 if (ptp->state != ICE_PTP_READY) 2807 return; 2808 2809 ptp->state = ICE_PTP_RESETTING; 2810 2811 /* Disable timestamping for both Tx and Rx */ 2812 ice_ptp_disable_timestamp_mode(pf); 2813 2814 kthread_cancel_delayed_work_sync(&ptp->work); 2815 2816 if (reset_type == ICE_RESET_PFR) 2817 return; 2818 2819 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2820 2821 /* Disable periodic outputs */ 2822 ice_ptp_disable_all_perout(pf); 2823 2824 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2825 2826 /* Disable source clock */ 2827 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2828 2829 /* Acquire PHC and system timer to restore after reset */ 2830 ptp->reset_time = ktime_get_real_ns(); 2831 } 2832 2833 /** 2834 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset 2835 * @pf: Board private structure 2836 * 2837 * Companion function for ice_ptp_rebuild() which handles tasks that only the 2838 * PTP clock owner instance should perform. 2839 */ 2840 static int ice_ptp_rebuild_owner(struct ice_pf *pf) 2841 { 2842 struct ice_ptp *ptp = &pf->ptp; 2843 struct ice_hw *hw = &pf->hw; 2844 struct timespec64 ts; 2845 u64 time_diff; 2846 int err; 2847 2848 err = ice_ptp_init_phc(hw); 2849 if (err) 2850 return err; 2851 2852 /* Acquire the global hardware lock */ 2853 if (!ice_ptp_lock(hw)) { 2854 err = -EBUSY; 2855 return err; 2856 } 2857 2858 /* Write the increment time value to PHY and LAN */ 2859 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2860 if (err) 2861 goto err_unlock; 2862 2863 /* Write the initial Time value to PHY and LAN using the cached PHC 2864 * time before the reset and time difference between stopping and 2865 * starting the clock. 2866 */ 2867 if (ptp->cached_phc_time) { 2868 time_diff = ktime_get_real_ns() - ptp->reset_time; 2869 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2870 } else { 2871 ts = ktime_to_timespec64(ktime_get_real()); 2872 } 2873 err = ice_ptp_write_init(pf, &ts); 2874 if (err) 2875 goto err_unlock; 2876 2877 /* Release the global hardware lock */ 2878 ice_ptp_unlock(hw); 2879 2880 /* Flush software tracking of any outstanding timestamps since we're 2881 * about to flush the PHY timestamp block. 2882 */ 2883 ice_ptp_flush_all_tx_tracker(pf); 2884 2885 if (!ice_is_e810(hw)) { 2886 /* Enable quad interrupts */ 2887 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 2888 if (err) 2889 return err; 2890 2891 ice_ptp_restart_all_phy(pf); 2892 } 2893 2894 /* Re-enable all periodic outputs and external timestamp events */ 2895 ice_ptp_enable_all_perout(pf); 2896 ice_ptp_enable_all_extts(pf); 2897 2898 return 0; 2899 2900 err_unlock: 2901 ice_ptp_unlock(hw); 2902 return err; 2903 } 2904 2905 /** 2906 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset 2907 * @pf: Board private structure 2908 * @reset_type: the reset type being performed 2909 */ 2910 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 2911 { 2912 struct ice_ptp *ptp = &pf->ptp; 2913 int err; 2914 2915 if (ptp->state == ICE_PTP_READY) { 2916 ice_ptp_prepare_for_reset(pf, reset_type); 2917 } else if (ptp->state != ICE_PTP_RESETTING) { 2918 err = -EINVAL; 2919 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n"); 2920 goto err; 2921 } 2922 2923 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) { 2924 err = ice_ptp_rebuild_owner(pf); 2925 if (err) 2926 goto err; 2927 } 2928 2929 ptp->state = ICE_PTP_READY; 2930 2931 /* Start periodic work going */ 2932 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2933 2934 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2935 return; 2936 2937 err: 2938 ptp->state = ICE_PTP_ERROR; 2939 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2940 } 2941 2942 static bool ice_is_primary(struct ice_hw *hw) 2943 { 2944 return ice_is_e825c(hw) && ice_is_dual(hw) ? 2945 !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true; 2946 } 2947 2948 static int ice_ptp_setup_adapter(struct ice_pf *pf) 2949 { 2950 if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw)) 2951 return -EPERM; 2952 2953 pf->adapter->ctrl_pf = pf; 2954 2955 return 0; 2956 } 2957 2958 static int ice_ptp_setup_pf(struct ice_pf *pf) 2959 { 2960 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 2961 struct ice_ptp *ptp = &pf->ptp; 2962 2963 if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP) 2964 return -ENODEV; 2965 2966 INIT_LIST_HEAD(&ptp->port.list_node); 2967 mutex_lock(&pf->adapter->ports.lock); 2968 2969 list_add(&ptp->port.list_node, 2970 &pf->adapter->ports.ports); 2971 mutex_unlock(&pf->adapter->ports.lock); 2972 2973 return 0; 2974 } 2975 2976 static void ice_ptp_cleanup_pf(struct ice_pf *pf) 2977 { 2978 struct ice_ptp *ptp = &pf->ptp; 2979 2980 if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) { 2981 mutex_lock(&pf->adapter->ports.lock); 2982 list_del(&ptp->port.list_node); 2983 mutex_unlock(&pf->adapter->ports.lock); 2984 } 2985 } 2986 2987 /** 2988 * ice_ptp_clock_index - Get the PTP clock index for this device 2989 * @pf: Board private structure 2990 * 2991 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock 2992 * is associated. 2993 */ 2994 int ice_ptp_clock_index(struct ice_pf *pf) 2995 { 2996 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 2997 struct ptp_clock *clock; 2998 2999 if (!ctrl_ptp) 3000 return -1; 3001 clock = ctrl_ptp->clock; 3002 3003 return clock ? ptp_clock_index(clock) : -1; 3004 } 3005 3006 /** 3007 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 3008 * @pf: Board private structure 3009 * 3010 * Setup and initialize a PTP clock device that represents the device hardware 3011 * clock. Save the clock index for other functions connected to the same 3012 * hardware resource. 3013 */ 3014 static int ice_ptp_init_owner(struct ice_pf *pf) 3015 { 3016 struct ice_hw *hw = &pf->hw; 3017 struct timespec64 ts; 3018 int err; 3019 3020 err = ice_ptp_init_phc(hw); 3021 if (err) { 3022 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 3023 err); 3024 return err; 3025 } 3026 3027 /* Acquire the global hardware lock */ 3028 if (!ice_ptp_lock(hw)) { 3029 err = -EBUSY; 3030 goto err_exit; 3031 } 3032 3033 /* Write the increment time value to PHY and LAN */ 3034 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 3035 if (err) 3036 goto err_unlock; 3037 3038 ts = ktime_to_timespec64(ktime_get_real()); 3039 /* Write the initial Time value to PHY and LAN */ 3040 err = ice_ptp_write_init(pf, &ts); 3041 if (err) 3042 goto err_unlock; 3043 3044 /* Release the global hardware lock */ 3045 ice_ptp_unlock(hw); 3046 3047 /* Configure PHY interrupt settings */ 3048 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 3049 if (err) 3050 goto err_exit; 3051 3052 /* Ensure we have a clock device */ 3053 err = ice_ptp_create_clock(pf); 3054 if (err) 3055 goto err_clk; 3056 3057 return 0; 3058 err_clk: 3059 pf->ptp.clock = NULL; 3060 err_exit: 3061 return err; 3062 3063 err_unlock: 3064 ice_ptp_unlock(hw); 3065 return err; 3066 } 3067 3068 /** 3069 * ice_ptp_init_work - Initialize PTP work threads 3070 * @pf: Board private structure 3071 * @ptp: PF PTP structure 3072 */ 3073 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 3074 { 3075 struct kthread_worker *kworker; 3076 3077 /* Initialize work functions */ 3078 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 3079 3080 /* Allocate a kworker for handling work required for the ports 3081 * connected to the PTP hardware clock. 3082 */ 3083 kworker = kthread_create_worker(0, "ice-ptp-%s", 3084 dev_name(ice_pf_to_dev(pf))); 3085 if (IS_ERR(kworker)) 3086 return PTR_ERR(kworker); 3087 3088 ptp->kworker = kworker; 3089 3090 /* Start periodic work going */ 3091 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 3092 3093 return 0; 3094 } 3095 3096 /** 3097 * ice_ptp_init_port - Initialize PTP port structure 3098 * @pf: Board private structure 3099 * @ptp_port: PTP port structure 3100 */ 3101 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 3102 { 3103 struct ice_hw *hw = &pf->hw; 3104 3105 mutex_init(&ptp_port->ps_lock); 3106 3107 switch (ice_get_phy_model(hw)) { 3108 case ICE_PHY_ETH56G: 3109 return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx, 3110 ptp_port->port_num); 3111 case ICE_PHY_E810: 3112 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 3113 case ICE_PHY_E82X: 3114 kthread_init_delayed_work(&ptp_port->ov_work, 3115 ice_ptp_wait_for_offsets); 3116 3117 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, 3118 ptp_port->port_num); 3119 default: 3120 return -ENODEV; 3121 } 3122 } 3123 3124 /** 3125 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode 3126 * @pf: Board private structure 3127 * 3128 * Initialize the Tx timestamp interrupt mode for this device. For most device 3129 * types, each PF processes the interrupt and manages its own timestamps. For 3130 * E822-based devices, only the clock owner processes the timestamps. Other 3131 * PFs disable the interrupt and do not process their own timestamps. 3132 */ 3133 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) 3134 { 3135 switch (ice_get_phy_model(&pf->hw)) { 3136 case ICE_PHY_E82X: 3137 /* E822 based PHY has the clock owner process the interrupt 3138 * for all ports. 3139 */ 3140 if (ice_pf_src_tmr_owned(pf)) 3141 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; 3142 else 3143 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; 3144 break; 3145 default: 3146 /* other PHY types handle their own Tx interrupt */ 3147 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; 3148 } 3149 } 3150 3151 /** 3152 * ice_ptp_init - Initialize PTP hardware clock support 3153 * @pf: Board private structure 3154 * 3155 * Set up the device for interacting with the PTP hardware clock for all 3156 * functions, both the function that owns the clock hardware, and the 3157 * functions connected to the clock hardware. 3158 * 3159 * The clock owner will allocate and register a ptp_clock with the 3160 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 3161 * items used for asynchronous work such as Tx timestamps and periodic work. 3162 */ 3163 void ice_ptp_init(struct ice_pf *pf) 3164 { 3165 struct ice_ptp *ptp = &pf->ptp; 3166 struct ice_hw *hw = &pf->hw; 3167 int err; 3168 3169 ptp->state = ICE_PTP_INITIALIZING; 3170 3171 ice_ptp_init_hw(hw); 3172 3173 ice_ptp_init_tx_interrupt_mode(pf); 3174 3175 /* If this function owns the clock hardware, it must allocate and 3176 * configure the PTP clock device to represent it. 3177 */ 3178 if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) { 3179 err = ice_ptp_setup_adapter(pf); 3180 if (err) 3181 goto err_exit; 3182 err = ice_ptp_init_owner(pf); 3183 if (err) 3184 goto err_exit; 3185 } 3186 3187 err = ice_ptp_setup_pf(pf); 3188 if (err) 3189 goto err_exit; 3190 3191 ptp->port.port_num = hw->pf_id; 3192 if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) 3193 ptp->port.port_num = hw->pf_id * 2; 3194 3195 err = ice_ptp_init_port(pf, &ptp->port); 3196 if (err) 3197 goto err_exit; 3198 3199 /* Start the PHY timestamping block */ 3200 ice_ptp_reset_phy_timestamping(pf); 3201 3202 /* Configure initial Tx interrupt settings */ 3203 ice_ptp_cfg_tx_interrupt(pf); 3204 3205 ptp->state = ICE_PTP_READY; 3206 3207 err = ice_ptp_init_work(pf, ptp); 3208 if (err) 3209 goto err_exit; 3210 3211 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 3212 return; 3213 3214 err_exit: 3215 /* If we registered a PTP clock, release it */ 3216 if (pf->ptp.clock) { 3217 ptp_clock_unregister(ptp->clock); 3218 pf->ptp.clock = NULL; 3219 } 3220 ptp->state = ICE_PTP_ERROR; 3221 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 3222 } 3223 3224 /** 3225 * ice_ptp_release - Disable the driver/HW support and unregister the clock 3226 * @pf: Board private structure 3227 * 3228 * This function handles the cleanup work required from the initialization by 3229 * clearing out the important information and unregistering the clock 3230 */ 3231 void ice_ptp_release(struct ice_pf *pf) 3232 { 3233 if (pf->ptp.state != ICE_PTP_READY) 3234 return; 3235 3236 pf->ptp.state = ICE_PTP_UNINIT; 3237 3238 /* Disable timestamping for both Tx and Rx */ 3239 ice_ptp_disable_timestamp_mode(pf); 3240 3241 ice_ptp_cleanup_pf(pf); 3242 3243 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3244 3245 ice_ptp_disable_all_extts(pf); 3246 3247 kthread_cancel_delayed_work_sync(&pf->ptp.work); 3248 3249 ice_ptp_port_phy_stop(&pf->ptp.port); 3250 mutex_destroy(&pf->ptp.port.ps_lock); 3251 if (pf->ptp.kworker) { 3252 kthread_destroy_worker(pf->ptp.kworker); 3253 pf->ptp.kworker = NULL; 3254 } 3255 3256 if (!pf->ptp.clock) 3257 return; 3258 3259 /* Disable periodic outputs */ 3260 ice_ptp_disable_all_perout(pf); 3261 3262 ptp_clock_unregister(pf->ptp.clock); 3263 pf->ptp.clock = NULL; 3264 3265 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 3266 } 3267