1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 #include "ice_cgu_regs.h" 8 9 static const char ice_pin_names[][64] = { 10 "SDP0", 11 "SDP1", 12 "SDP2", 13 "SDP3", 14 "TIME_SYNC", 15 "1PPS" 16 }; 17 18 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { 19 /* name, gpio, delay */ 20 { TIME_SYNC, { 4, -1 }, { 0, 0 }}, 21 { ONE_PPS, { -1, 5 }, { 0, 11 }}, 22 }; 23 24 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { 25 /* name, gpio, delay */ 26 { SDP0, { 0, 0 }, { 15, 14 }}, 27 { SDP1, { 1, 1 }, { 15, 14 }}, 28 { SDP2, { 2, 2 }, { 15, 14 }}, 29 { SDP3, { 3, 3 }, { 15, 14 }}, 30 { TIME_SYNC, { 4, -1 }, { 11, 0 }}, 31 { ONE_PPS, { -1, 5 }, { 0, 9 }}, 32 }; 33 34 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { 35 /* name, gpio, delay */ 36 { SDP0, { 0, 0 }, { 0, 1 }}, 37 { SDP1, { 1, 1 }, { 0, 1 }}, 38 { SDP2, { 2, 2 }, { 0, 1 }}, 39 { SDP3, { 3, 3 }, { 0, 1 }}, 40 { ONE_PPS, { -1, 5 }, { 0, 1 }}, 41 }; 42 43 static const char ice_pin_names_nvm[][64] = { 44 "GNSS", 45 "SMA1", 46 "U.FL1", 47 "SMA2", 48 "U.FL2", 49 }; 50 51 static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { 52 /* name, gpio, delay */ 53 { GNSS, { 1, -1 }, { 0, 0 }}, 54 { SMA1, { 1, 0 }, { 0, 1 }}, 55 { UFL1, { -1, 0 }, { 0, 1 }}, 56 { SMA2, { 3, 2 }, { 0, 1 }}, 57 { UFL2, { 3, -1 }, { 0, 0 }}, 58 }; 59 60 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) 61 { 62 return !pf->adapter ? NULL : pf->adapter->ctrl_pf; 63 } 64 65 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf) 66 { 67 struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf); 68 69 return !ctrl_pf ? NULL : &ctrl_pf->ptp; 70 } 71 72 /** 73 * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc 74 * @pf: Board private structure 75 * @func: Pin function 76 * @chan: GPIO channel 77 * 78 * Return: positive pin number when pin is present, -1 otherwise 79 */ 80 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func, 81 unsigned int chan) 82 { 83 const struct ptp_clock_info *info = &pf->ptp.info; 84 int i; 85 86 for (i = 0; i < info->n_pins; i++) { 87 if (info->pin_config[i].func == func && 88 info->pin_config[i].chan == chan) 89 return i; 90 } 91 92 return -1; 93 } 94 95 /** 96 * ice_ptp_update_sma_data - update SMA pins data according to pins setup 97 * @pf: Board private structure 98 * @sma_pins: parsed SMA pins status 99 * @data: SMA data to update 100 */ 101 static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[], 102 u8 *data) 103 { 104 const char *state1, *state2; 105 106 /* Set the right state based on the desired configuration. 107 * When bit is set, functionality is disabled. 108 */ 109 *data &= ~ICE_ALL_SMA_MASK; 110 if (!sma_pins[UFL1 - 1]) { 111 if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) { 112 state1 = "SMA1 Rx, U.FL1 disabled"; 113 *data |= ICE_SMA1_TX_EN; 114 } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) { 115 state1 = "SMA1 Tx U.FL1 disabled"; 116 *data |= ICE_SMA1_DIR_EN; 117 } else { 118 state1 = "SMA1 disabled, U.FL1 disabled"; 119 *data |= ICE_SMA1_MASK; 120 } 121 } else { 122 /* U.FL1 Tx will always enable SMA1 Rx */ 123 state1 = "SMA1 Rx, U.FL1 Tx"; 124 } 125 126 if (!sma_pins[UFL2 - 1]) { 127 if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) { 128 state2 = "SMA2 Rx, U.FL2 disabled"; 129 *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS; 130 } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) { 131 state2 = "SMA2 Tx, U.FL2 disabled"; 132 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS; 133 } else { 134 state2 = "SMA2 disabled, U.FL2 disabled"; 135 *data |= ICE_SMA2_MASK; 136 } 137 } else { 138 if (!sma_pins[SMA2 - 1]) { 139 state2 = "SMA2 disabled, U.FL2 Rx"; 140 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN; 141 } else { 142 state2 = "SMA2 Tx, U.FL2 Rx"; 143 *data |= ICE_SMA2_DIR_EN; 144 } 145 } 146 147 dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2); 148 } 149 150 /** 151 * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic 152 * @pf: Board private structure 153 * 154 * Return: 0 on success, negative error code otherwise 155 */ 156 static int ice_ptp_set_sma_cfg(struct ice_pf *pf) 157 { 158 const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc; 159 struct ptp_pin_desc *pins = pf->ptp.pin_desc; 160 unsigned int sma_pins[ICE_SMA_PINS_NUM] = {}; 161 int err; 162 u8 data; 163 164 /* Read initial pin state value */ 165 err = ice_read_sma_ctrl(&pf->hw, &data); 166 if (err) 167 return err; 168 169 /* Get SMA/U.FL pins states */ 170 for (int i = 0; i < pf->ptp.info.n_pins; i++) 171 if (pins[i].func) { 172 int name_idx = ice_pins[i].name_idx; 173 174 switch (name_idx) { 175 case SMA1: 176 case UFL1: 177 case SMA2: 178 case UFL2: 179 sma_pins[name_idx - 1] = pins[i].func; 180 break; 181 default: 182 continue; 183 } 184 } 185 186 ice_ptp_update_sma_data(pf, sma_pins, &data); 187 return ice_write_sma_ctrl(&pf->hw, data); 188 } 189 190 /** 191 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device 192 * @pf: Board private structure 193 * 194 * Program the device to respond appropriately to the Tx timestamp interrupt 195 * cause. 196 */ 197 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) 198 { 199 struct ice_hw *hw = &pf->hw; 200 bool enable; 201 u32 val; 202 203 switch (pf->ptp.tx_interrupt_mode) { 204 case ICE_PTP_TX_INTERRUPT_ALL: 205 /* React to interrupts across all quads. */ 206 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); 207 enable = true; 208 break; 209 case ICE_PTP_TX_INTERRUPT_NONE: 210 /* Do not react to interrupts on any quad. */ 211 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); 212 enable = false; 213 break; 214 case ICE_PTP_TX_INTERRUPT_SELF: 215 default: 216 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; 217 break; 218 } 219 220 /* Configure the Tx timestamp interrupt */ 221 val = rd32(hw, PFINT_OICR_ENA); 222 if (enable) 223 val |= PFINT_OICR_TSYN_TX_M; 224 else 225 val &= ~PFINT_OICR_TSYN_TX_M; 226 wr32(hw, PFINT_OICR_ENA, val); 227 } 228 229 /** 230 * ice_set_rx_tstamp - Enable or disable Rx timestamping 231 * @pf: The PF pointer to search in 232 * @on: bool value for whether timestamps are enabled or disabled 233 */ 234 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 235 { 236 struct ice_vsi *vsi; 237 u16 i; 238 239 vsi = ice_get_main_vsi(pf); 240 if (!vsi || !vsi->rx_rings) 241 return; 242 243 /* Set the timestamp flag for all the Rx rings */ 244 ice_for_each_rxq(vsi, i) { 245 if (!vsi->rx_rings[i]) 246 continue; 247 vsi->rx_rings[i]->ptp_rx = on; 248 } 249 } 250 251 /** 252 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode 253 * @pf: Board private structure 254 * 255 * Called during preparation for reset to temporarily disable timestamping on 256 * the device. Called during remove to disable timestamping while cleaning up 257 * driver resources. 258 */ 259 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) 260 { 261 struct ice_hw *hw = &pf->hw; 262 u32 val; 263 264 val = rd32(hw, PFINT_OICR_ENA); 265 val &= ~PFINT_OICR_TSYN_TX_M; 266 wr32(hw, PFINT_OICR_ENA, val); 267 268 ice_set_rx_tstamp(pf, false); 269 } 270 271 /** 272 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration 273 * @pf: Board private structure 274 * 275 * Called at the end of rebuild to restore timestamp configuration after 276 * a device reset. 277 */ 278 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) 279 { 280 struct ice_hw *hw = &pf->hw; 281 bool enable_rx; 282 283 ice_ptp_cfg_tx_interrupt(pf); 284 285 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; 286 ice_set_rx_tstamp(pf, enable_rx); 287 288 /* Trigger an immediate software interrupt to ensure that timestamps 289 * which occurred during reset are handled now. 290 */ 291 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 292 ice_flush(hw); 293 } 294 295 /** 296 * ice_ptp_read_src_clk_reg - Read the source clock register 297 * @pf: Board private structure 298 * @sts: Optional parameter for holding a pair of system timestamps from 299 * the system clock. Will be ignored if NULL is given. 300 */ 301 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, 302 struct ptp_system_timestamp *sts) 303 { 304 struct ice_hw *hw = &pf->hw; 305 u32 hi, lo, lo2; 306 u8 tmr_idx; 307 308 if (!ice_is_primary(hw)) 309 hw = ice_get_primary_hw(pf); 310 311 tmr_idx = ice_get_ptp_src_clock_index(hw); 312 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); 313 /* Read the system timestamp pre PHC read */ 314 ptp_read_system_prets(sts); 315 316 if (hw->mac_type == ICE_MAC_E830) { 317 u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx)); 318 319 /* Read the system timestamp post PHC read */ 320 ptp_read_system_postts(sts); 321 322 return clk_time; 323 } 324 325 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 326 327 /* Read the system timestamp post PHC read */ 328 ptp_read_system_postts(sts); 329 330 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 331 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 332 333 if (lo2 < lo) { 334 /* if TIME_L rolled over read TIME_L again and update 335 * system timestamps 336 */ 337 ptp_read_system_prets(sts); 338 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 339 ptp_read_system_postts(sts); 340 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 341 } 342 343 return ((u64)hi << 32) | lo; 344 } 345 346 /** 347 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 348 * @cached_phc_time: recently cached copy of PHC time 349 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 350 * 351 * Hardware captures timestamps which contain only 32 bits of nominal 352 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 353 * Note that the captured timestamp values may be 40 bits, but the lower 354 * 8 bits are sub-nanoseconds and generally discarded. 355 * 356 * Extend the 32bit nanosecond timestamp using the following algorithm and 357 * assumptions: 358 * 359 * 1) have a recently cached copy of the PHC time 360 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 361 * seconds) before or after the PHC time was captured. 362 * 3) calculate the delta between the cached time and the timestamp 363 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 364 * captured after the PHC time. In this case, the full timestamp is just 365 * the cached PHC time plus the delta. 366 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 367 * timestamp was captured *before* the PHC time, i.e. because the PHC 368 * cache was updated after the timestamp was captured by hardware. In this 369 * case, the full timestamp is the cached time minus the inverse delta. 370 * 371 * This algorithm works even if the PHC time was updated after a Tx timestamp 372 * was requested, but before the Tx timestamp event was reported from 373 * hardware. 374 * 375 * This calculation primarily relies on keeping the cached PHC time up to 376 * date. If the timestamp was captured more than 2^31 nanoseconds after the 377 * PHC time, it is possible that the lower 32bits of PHC time have 378 * overflowed more than once, and we might generate an incorrect timestamp. 379 * 380 * This is prevented by (a) periodically updating the cached PHC time once 381 * a second, and (b) discarding any Tx timestamp packet if it has waited for 382 * a timestamp for more than one second. 383 */ 384 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 385 { 386 u32 delta, phc_time_lo; 387 u64 ns; 388 389 /* Extract the lower 32 bits of the PHC time */ 390 phc_time_lo = (u32)cached_phc_time; 391 392 /* Calculate the delta between the lower 32bits of the cached PHC 393 * time and the in_tstamp value 394 */ 395 delta = (in_tstamp - phc_time_lo); 396 397 /* Do not assume that the in_tstamp is always more recent than the 398 * cached PHC time. If the delta is large, it indicates that the 399 * in_tstamp was taken in the past, and should be converted 400 * forward. 401 */ 402 if (delta > (U32_MAX / 2)) { 403 /* reverse the delta calculation here */ 404 delta = (phc_time_lo - in_tstamp); 405 ns = cached_phc_time - delta; 406 } else { 407 ns = cached_phc_time + delta; 408 } 409 410 return ns; 411 } 412 413 /** 414 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 415 * @pf: Board private structure 416 * @in_tstamp: Ingress/egress 40b timestamp value 417 * 418 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 419 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 420 * 421 * *--------------------------------------------------------------* 422 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 423 * *--------------------------------------------------------------* 424 * 425 * The low bit is an indicator of whether the timestamp is valid. The next 426 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 427 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 428 * 429 * It is assumed that the caller verifies the timestamp is valid prior to 430 * calling this function. 431 * 432 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 433 * time stored in the device private PTP structure as the basis for timestamp 434 * extension. 435 * 436 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 437 * algorithm. 438 */ 439 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 440 { 441 const u64 mask = GENMASK_ULL(31, 0); 442 unsigned long discard_time; 443 444 /* Discard the hardware timestamp if the cached PHC time is too old */ 445 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 446 if (time_is_before_jiffies(discard_time)) { 447 pf->ptp.tx_hwtstamp_discarded++; 448 return 0; 449 } 450 451 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 452 (in_tstamp >> 8) & mask); 453 } 454 455 /** 456 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 457 * @tx: the PTP Tx timestamp tracker to check 458 * 459 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 460 * to accept new timestamp requests. 461 * 462 * Assumes the tx->lock spinlock is already held. 463 */ 464 static bool 465 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 466 { 467 lockdep_assert_held(&tx->lock); 468 469 return tx->init && !tx->calibrating; 470 } 471 472 /** 473 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW 474 * @tx: the PTP Tx timestamp tracker 475 * @idx: index of the timestamp to request 476 */ 477 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) 478 { 479 struct ice_e810_params *params; 480 struct ice_ptp_port *ptp_port; 481 unsigned long flags; 482 struct sk_buff *skb; 483 struct ice_pf *pf; 484 485 if (!tx->init) 486 return; 487 488 ptp_port = container_of(tx, struct ice_ptp_port, tx); 489 pf = ptp_port_to_pf(ptp_port); 490 params = &pf->hw.ptp.phy.e810; 491 492 /* Drop packets which have waited for more than 2 seconds */ 493 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 494 /* Count the number of Tx timestamps that timed out */ 495 pf->ptp.tx_hwtstamp_timeouts++; 496 497 skb = tx->tstamps[idx].skb; 498 tx->tstamps[idx].skb = NULL; 499 clear_bit(idx, tx->in_use); 500 501 dev_kfree_skb_any(skb); 502 return; 503 } 504 505 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 506 507 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); 508 509 params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS; 510 511 /* Write TS index to read to the PF register so the FW can read it */ 512 wr32(&pf->hw, REG_LL_PROXY_H, 513 REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | 514 REG_LL_PROXY_H_EXEC); 515 tx->last_ll_ts_idx_read = idx; 516 517 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); 518 } 519 520 /** 521 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port 522 * @tx: the PTP Tx timestamp tracker 523 */ 524 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) 525 { 526 struct skb_shared_hwtstamps shhwtstamps = {}; 527 u8 idx = tx->last_ll_ts_idx_read; 528 struct ice_e810_params *params; 529 struct ice_ptp_port *ptp_port; 530 u64 raw_tstamp, tstamp; 531 bool drop_ts = false; 532 struct sk_buff *skb; 533 unsigned long flags; 534 struct device *dev; 535 struct ice_pf *pf; 536 u32 reg_ll_high; 537 538 if (!tx->init || tx->last_ll_ts_idx_read < 0) 539 return; 540 541 ptp_port = container_of(tx, struct ice_ptp_port, tx); 542 pf = ptp_port_to_pf(ptp_port); 543 dev = ice_pf_to_dev(pf); 544 params = &pf->hw.ptp.phy.e810; 545 546 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 547 548 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); 549 550 if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS)) 551 dev_dbg(dev, "%s: low latency interrupt request not in progress?\n", 552 __func__); 553 554 /* Read the low 32 bit value */ 555 raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L); 556 /* Read the status together with high TS part */ 557 reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H); 558 559 /* Wake up threads waiting on low latency interface */ 560 params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS; 561 562 wake_up_locked(¶ms->atqbal_wq); 563 564 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); 565 566 /* When the bit is cleared, the TS is ready in the register */ 567 if (reg_ll_high & REG_LL_PROXY_H_EXEC) { 568 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); 569 return; 570 } 571 572 /* High 8 bit value of the TS is on the bits 16:23 */ 573 raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32; 574 575 /* Devices using this interface always verify the timestamp differs 576 * relative to the last cached timestamp value. 577 */ 578 if (raw_tstamp == tx->tstamps[idx].cached_tstamp) 579 return; 580 581 tx->tstamps[idx].cached_tstamp = raw_tstamp; 582 clear_bit(idx, tx->in_use); 583 skb = tx->tstamps[idx].skb; 584 tx->tstamps[idx].skb = NULL; 585 if (test_and_clear_bit(idx, tx->stale)) 586 drop_ts = true; 587 588 if (!skb) 589 return; 590 591 if (drop_ts) { 592 dev_kfree_skb_any(skb); 593 return; 594 } 595 596 /* Extend the timestamp using cached PHC time */ 597 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 598 if (tstamp) { 599 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 600 ice_trace(tx_tstamp_complete, skb, idx); 601 } 602 603 skb_tstamp_tx(skb, &shhwtstamps); 604 dev_kfree_skb_any(skb); 605 } 606 607 /** 608 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 609 * @tx: the PTP Tx timestamp tracker 610 * 611 * Process timestamps captured by the PHY associated with this port. To do 612 * this, loop over each index with a waiting skb. 613 * 614 * If a given index has a valid timestamp, perform the following steps: 615 * 616 * 1) check that the timestamp request is not stale 617 * 2) check that a timestamp is ready and available in the PHY memory bank 618 * 3) read and copy the timestamp out of the PHY register 619 * 4) unlock the index by clearing the associated in_use bit 620 * 5) check if the timestamp is stale, and discard if so 621 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 622 * 7) send this 64 bit timestamp to the stack 623 * 624 * Note that we do not hold the tracking lock while reading the Tx timestamp. 625 * This is because reading the timestamp requires taking a mutex that might 626 * sleep. 627 * 628 * The only place where we set in_use is when a new timestamp is initiated 629 * with a slot index. This is only called in the hard xmit routine where an 630 * SKB has a request flag set. The only places where we clear this bit is this 631 * function, or during teardown when the Tx timestamp tracker is being 632 * removed. A timestamp index will never be re-used until the in_use bit for 633 * that index is cleared. 634 * 635 * If a Tx thread starts a new timestamp, we might not begin processing it 636 * right away but we will notice it at the end when we re-queue the task. 637 * 638 * If a Tx thread starts a new timestamp just after this function exits, the 639 * interrupt for that timestamp should re-trigger this function once 640 * a timestamp is ready. 641 * 642 * In cases where the PTP hardware clock was directly adjusted, some 643 * timestamps may not be able to safely use the timestamp extension math. In 644 * this case, software will set the stale bit for any outstanding Tx 645 * timestamps when the clock is adjusted. Then this function will discard 646 * those captured timestamps instead of sending them to the stack. 647 * 648 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 649 * to correctly extend the timestamp using the cached PHC time. It is 650 * extremely unlikely that a packet will ever take this long to timestamp. If 651 * we detect a Tx timestamp request that has waited for this long we assume 652 * the packet will never be sent by hardware and discard it without reading 653 * the timestamp register. 654 */ 655 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 656 { 657 struct ice_ptp_port *ptp_port; 658 unsigned long flags; 659 struct ice_pf *pf; 660 struct ice_hw *hw; 661 u64 tstamp_ready; 662 bool link_up; 663 int err; 664 u8 idx; 665 666 ptp_port = container_of(tx, struct ice_ptp_port, tx); 667 pf = ptp_port_to_pf(ptp_port); 668 hw = &pf->hw; 669 670 /* Read the Tx ready status first */ 671 if (tx->has_ready_bitmap) { 672 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 673 if (err) 674 return; 675 } 676 677 /* Drop packets if the link went down */ 678 link_up = ptp_port->link_up; 679 680 for_each_set_bit(idx, tx->in_use, tx->len) { 681 struct skb_shared_hwtstamps shhwtstamps = {}; 682 u8 phy_idx = idx + tx->offset; 683 u64 raw_tstamp = 0, tstamp; 684 bool drop_ts = !link_up; 685 struct sk_buff *skb; 686 687 /* Drop packets which have waited for more than 2 seconds */ 688 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 689 drop_ts = true; 690 691 /* Count the number of Tx timestamps that timed out */ 692 pf->ptp.tx_hwtstamp_timeouts++; 693 } 694 695 /* Only read a timestamp from the PHY if its marked as ready 696 * by the tstamp_ready register. This avoids unnecessary 697 * reading of timestamps which are not yet valid. This is 698 * important as we must read all timestamps which are valid 699 * and only timestamps which are valid during each interrupt. 700 * If we do not, the hardware logic for generating a new 701 * interrupt can get stuck on some devices. 702 */ 703 if (tx->has_ready_bitmap && 704 !(tstamp_ready & BIT_ULL(phy_idx))) { 705 if (drop_ts) 706 goto skip_ts_read; 707 708 continue; 709 } 710 711 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 712 713 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 714 if (err && !drop_ts) 715 continue; 716 717 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 718 719 /* For PHYs which don't implement a proper timestamp ready 720 * bitmap, verify that the timestamp value is different 721 * from the last cached timestamp. If it is not, skip this for 722 * now assuming it hasn't yet been captured by hardware. 723 */ 724 if (!drop_ts && !tx->has_ready_bitmap && 725 raw_tstamp == tx->tstamps[idx].cached_tstamp) 726 continue; 727 728 /* Discard any timestamp value without the valid bit set */ 729 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 730 drop_ts = true; 731 732 skip_ts_read: 733 spin_lock_irqsave(&tx->lock, flags); 734 if (!tx->has_ready_bitmap && raw_tstamp) 735 tx->tstamps[idx].cached_tstamp = raw_tstamp; 736 clear_bit(idx, tx->in_use); 737 skb = tx->tstamps[idx].skb; 738 tx->tstamps[idx].skb = NULL; 739 if (test_and_clear_bit(idx, tx->stale)) 740 drop_ts = true; 741 spin_unlock_irqrestore(&tx->lock, flags); 742 743 /* It is unlikely but possible that the SKB will have been 744 * flushed at this point due to link change or teardown. 745 */ 746 if (!skb) 747 continue; 748 749 if (drop_ts) { 750 dev_kfree_skb_any(skb); 751 continue; 752 } 753 754 /* Extend the timestamp using cached PHC time */ 755 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 756 if (tstamp) { 757 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 758 ice_trace(tx_tstamp_complete, skb, idx); 759 } 760 761 skb_tstamp_tx(skb, &shhwtstamps); 762 dev_kfree_skb_any(skb); 763 } 764 } 765 766 /** 767 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 768 * @pf: Board private structure 769 */ 770 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 771 { 772 struct ice_ptp_port *port; 773 unsigned int i; 774 775 mutex_lock(&pf->adapter->ports.lock); 776 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { 777 struct ice_ptp_tx *tx = &port->tx; 778 779 if (!tx || !tx->init) 780 continue; 781 782 ice_ptp_process_tx_tstamp(tx); 783 } 784 mutex_unlock(&pf->adapter->ports.lock); 785 786 for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { 787 u64 tstamp_ready; 788 int err; 789 790 /* Read the Tx ready status first */ 791 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 792 if (err) 793 break; 794 else if (tstamp_ready) 795 return ICE_TX_TSTAMP_WORK_PENDING; 796 } 797 798 return ICE_TX_TSTAMP_WORK_DONE; 799 } 800 801 /** 802 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 803 * @tx: Tx tracking structure to initialize 804 * 805 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 806 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 807 */ 808 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 809 { 810 bool more_timestamps; 811 unsigned long flags; 812 813 if (!tx->init) 814 return ICE_TX_TSTAMP_WORK_DONE; 815 816 /* Process the Tx timestamp tracker */ 817 ice_ptp_process_tx_tstamp(tx); 818 819 /* Check if there are outstanding Tx timestamps */ 820 spin_lock_irqsave(&tx->lock, flags); 821 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 822 spin_unlock_irqrestore(&tx->lock, flags); 823 824 if (more_timestamps) 825 return ICE_TX_TSTAMP_WORK_PENDING; 826 827 return ICE_TX_TSTAMP_WORK_DONE; 828 } 829 830 /** 831 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 832 * @tx: Tx tracking structure to initialize 833 * 834 * Assumes that the length has already been initialized. Do not call directly, 835 * use the ice_ptp_init_tx_* instead. 836 */ 837 static int 838 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 839 { 840 unsigned long *in_use, *stale; 841 struct ice_tx_tstamp *tstamps; 842 843 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 844 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 845 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 846 847 if (!tstamps || !in_use || !stale) { 848 kfree(tstamps); 849 bitmap_free(in_use); 850 bitmap_free(stale); 851 852 return -ENOMEM; 853 } 854 855 tx->tstamps = tstamps; 856 tx->in_use = in_use; 857 tx->stale = stale; 858 tx->init = 1; 859 tx->last_ll_ts_idx_read = -1; 860 861 spin_lock_init(&tx->lock); 862 863 return 0; 864 } 865 866 /** 867 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 868 * @pf: Board private structure 869 * @tx: the tracker to flush 870 * 871 * Called during teardown when a Tx tracker is being removed. 872 */ 873 static void 874 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 875 { 876 struct ice_hw *hw = &pf->hw; 877 unsigned long flags; 878 u64 tstamp_ready; 879 int err; 880 u8 idx; 881 882 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 883 if (err) { 884 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 885 tx->block, err); 886 887 /* If we fail to read the Tx timestamp ready bitmap just 888 * skip clearing the PHY timestamps. 889 */ 890 tstamp_ready = 0; 891 } 892 893 for_each_set_bit(idx, tx->in_use, tx->len) { 894 u8 phy_idx = idx + tx->offset; 895 struct sk_buff *skb; 896 897 /* In case this timestamp is ready, we need to clear it. */ 898 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 899 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 900 901 spin_lock_irqsave(&tx->lock, flags); 902 skb = tx->tstamps[idx].skb; 903 tx->tstamps[idx].skb = NULL; 904 clear_bit(idx, tx->in_use); 905 clear_bit(idx, tx->stale); 906 spin_unlock_irqrestore(&tx->lock, flags); 907 908 /* Count the number of Tx timestamps flushed */ 909 pf->ptp.tx_hwtstamp_flushed++; 910 911 /* Free the SKB after we've cleared the bit */ 912 dev_kfree_skb_any(skb); 913 } 914 } 915 916 /** 917 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 918 * @tx: the tracker to mark 919 * 920 * Mark currently outstanding Tx timestamps as stale. This prevents sending 921 * their timestamp value to the stack. This is required to prevent extending 922 * the 40bit hardware timestamp incorrectly. 923 * 924 * This should be called when the PTP clock is modified such as after a set 925 * time request. 926 */ 927 static void 928 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 929 { 930 unsigned long flags; 931 932 spin_lock_irqsave(&tx->lock, flags); 933 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 934 spin_unlock_irqrestore(&tx->lock, flags); 935 } 936 937 /** 938 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock 939 * @pf: Board private structure 940 * 941 * Called by the clock owner to flush all the Tx timestamp trackers associated 942 * with the clock. 943 */ 944 static void 945 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf) 946 { 947 struct ice_ptp_port *port; 948 949 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) 950 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx); 951 } 952 953 /** 954 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 955 * @pf: Board private structure 956 * @tx: Tx tracking structure to release 957 * 958 * Free memory associated with the Tx timestamp tracker. 959 */ 960 static void 961 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 962 { 963 unsigned long flags; 964 965 spin_lock_irqsave(&tx->lock, flags); 966 tx->init = 0; 967 spin_unlock_irqrestore(&tx->lock, flags); 968 969 /* wait for potentially outstanding interrupt to complete */ 970 synchronize_irq(pf->oicr_irq.virq); 971 972 ice_ptp_flush_tx_tracker(pf, tx); 973 974 kfree(tx->tstamps); 975 tx->tstamps = NULL; 976 977 bitmap_free(tx->in_use); 978 tx->in_use = NULL; 979 980 bitmap_free(tx->stale); 981 tx->stale = NULL; 982 983 tx->len = 0; 984 } 985 986 /** 987 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps 988 * @pf: Board private structure 989 * @tx: the Tx tracking structure to initialize 990 * @port: the port this structure tracks 991 * 992 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 993 * the timestamp block is shared for all ports in the same quad. To avoid 994 * ports using the same timestamp index, logically break the block of 995 * registers into chunks based on the port number. 996 * 997 * Return: 0 on success, -ENOMEM when out of memory 998 */ 999 static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, 1000 u8 port) 1001 { 1002 tx->block = ICE_GET_QUAD_NUM(port); 1003 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; 1004 tx->len = INDEX_PER_PORT_E82X; 1005 tx->has_ready_bitmap = 1; 1006 1007 return ice_ptp_alloc_tx_tracker(tx); 1008 } 1009 1010 /** 1011 * ice_ptp_init_tx - Initialize tracking for Tx timestamps 1012 * @pf: Board private structure 1013 * @tx: the Tx tracking structure to initialize 1014 * @port: the port this structure tracks 1015 * 1016 * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X, 1017 * each port has its own block of timestamps, independent of the other ports. 1018 * 1019 * Return: 0 on success, -ENOMEM when out of memory 1020 */ 1021 static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 1022 { 1023 tx->block = port; 1024 tx->offset = 0; 1025 tx->len = INDEX_PER_PORT; 1026 1027 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 1028 * verify new timestamps against cached copy of the last read 1029 * timestamp. 1030 */ 1031 tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810; 1032 1033 return ice_ptp_alloc_tx_tracker(tx); 1034 } 1035 1036 /** 1037 * ice_ptp_update_cached_phctime - Update the cached PHC time values 1038 * @pf: Board specific private structure 1039 * 1040 * This function updates the system time values which are cached in the PF 1041 * structure and the Rx rings. 1042 * 1043 * This function must be called periodically to ensure that the cached value 1044 * is never more than 2 seconds old. 1045 * 1046 * Note that the cached copy in the PF PTP structure is always updated, even 1047 * if we can't update the copy in the Rx rings. 1048 * 1049 * Return: 1050 * * 0 - OK, successfully updated 1051 * * -EAGAIN - PF was busy, need to reschedule the update 1052 */ 1053 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 1054 { 1055 struct device *dev = ice_pf_to_dev(pf); 1056 unsigned long update_before; 1057 u64 systime; 1058 int i; 1059 1060 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 1061 if (pf->ptp.cached_phc_time && 1062 time_is_before_jiffies(update_before)) { 1063 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 1064 1065 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 1066 jiffies_to_msecs(time_taken)); 1067 pf->ptp.late_cached_phc_updates++; 1068 } 1069 1070 /* Read the current PHC time */ 1071 systime = ice_ptp_read_src_clk_reg(pf, NULL); 1072 1073 /* Update the cached PHC time stored in the PF structure */ 1074 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 1075 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 1076 1077 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 1078 return -EAGAIN; 1079 1080 ice_for_each_vsi(pf, i) { 1081 struct ice_vsi *vsi = pf->vsi[i]; 1082 int j; 1083 1084 if (!vsi) 1085 continue; 1086 1087 if (vsi->type != ICE_VSI_PF) 1088 continue; 1089 1090 ice_for_each_rxq(vsi, j) { 1091 if (!vsi->rx_rings[j]) 1092 continue; 1093 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 1094 } 1095 } 1096 clear_bit(ICE_CFG_BUSY, pf->state); 1097 1098 return 0; 1099 } 1100 1101 /** 1102 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 1103 * @pf: Board specific private structure 1104 * 1105 * This function must be called when the cached PHC time is no longer valid, 1106 * such as after a time adjustment. It marks any currently outstanding Tx 1107 * timestamps as stale and updates the cached PHC time for both the PF and Rx 1108 * rings. 1109 * 1110 * If updating the PHC time cannot be done immediately, a warning message is 1111 * logged and the work item is scheduled immediately to minimize the window 1112 * with a wrong cached timestamp. 1113 */ 1114 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1115 { 1116 struct device *dev = ice_pf_to_dev(pf); 1117 int err; 1118 1119 /* Update the cached PHC time immediately if possible, otherwise 1120 * schedule the work item to execute soon. 1121 */ 1122 err = ice_ptp_update_cached_phctime(pf); 1123 if (err) { 1124 /* If another thread is updating the Rx rings, we won't 1125 * properly reset them here. This could lead to reporting of 1126 * invalid timestamps, but there isn't much we can do. 1127 */ 1128 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1129 __func__); 1130 1131 /* Queue the work item to update the Rx rings when possible */ 1132 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1133 msecs_to_jiffies(10)); 1134 } 1135 1136 /* Mark any outstanding timestamps as stale, since they might have 1137 * been captured in hardware before the time update. This could lead 1138 * to us extending them with the wrong cached value resulting in 1139 * incorrect timestamp values. 1140 */ 1141 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1142 } 1143 1144 /** 1145 * ice_ptp_write_init - Set PHC time to provided value 1146 * @pf: Board private structure 1147 * @ts: timespec structure that holds the new time value 1148 * 1149 * Set the PHC time to the specified time provided in the timespec. 1150 */ 1151 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1152 { 1153 u64 ns = timespec64_to_ns(ts); 1154 struct ice_hw *hw = &pf->hw; 1155 1156 return ice_ptp_init_time(hw, ns); 1157 } 1158 1159 /** 1160 * ice_ptp_write_adj - Adjust PHC clock time atomically 1161 * @pf: Board private structure 1162 * @adj: Adjustment in nanoseconds 1163 * 1164 * Perform an atomic adjustment of the PHC time by the specified number of 1165 * nanoseconds. 1166 */ 1167 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1168 { 1169 struct ice_hw *hw = &pf->hw; 1170 1171 return ice_ptp_adj_clock(hw, adj); 1172 } 1173 1174 /** 1175 * ice_base_incval - Get base timer increment value 1176 * @pf: Board private structure 1177 * 1178 * Look up the base timer increment value for this device. The base increment 1179 * value is used to define the nominal clock tick rate. This increment value 1180 * is programmed during device initialization. It is also used as the basis 1181 * for calculating adjustments using scaled_ppm. 1182 */ 1183 static u64 ice_base_incval(struct ice_pf *pf) 1184 { 1185 struct ice_hw *hw = &pf->hw; 1186 u64 incval; 1187 1188 incval = ice_get_base_incval(hw); 1189 1190 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1191 incval); 1192 1193 return incval; 1194 } 1195 1196 /** 1197 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1198 * @port: PTP port for which Tx FIFO is checked 1199 */ 1200 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1201 { 1202 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1203 int quad = ICE_GET_QUAD_NUM(port->port_num); 1204 struct ice_pf *pf; 1205 struct ice_hw *hw; 1206 u32 val, phy_sts; 1207 int err; 1208 1209 pf = ptp_port_to_pf(port); 1210 hw = &pf->hw; 1211 1212 if (port->tx_fifo_busy_cnt == FIFO_OK) 1213 return 0; 1214 1215 /* need to read FIFO state */ 1216 if (offs == 0 || offs == 1) 1217 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, 1218 &val); 1219 else 1220 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, 1221 &val); 1222 1223 if (err) { 1224 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1225 port->port_num, err); 1226 return err; 1227 } 1228 1229 if (offs & 0x1) 1230 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val); 1231 else 1232 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val); 1233 1234 if (phy_sts & FIFO_EMPTY) { 1235 port->tx_fifo_busy_cnt = FIFO_OK; 1236 return 0; 1237 } 1238 1239 port->tx_fifo_busy_cnt++; 1240 1241 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1242 port->tx_fifo_busy_cnt, port->port_num); 1243 1244 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1245 dev_dbg(ice_pf_to_dev(pf), 1246 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1247 port->port_num, quad); 1248 ice_ptp_reset_ts_memory_quad_e82x(hw, quad); 1249 port->tx_fifo_busy_cnt = FIFO_OK; 1250 return 0; 1251 } 1252 1253 return -EAGAIN; 1254 } 1255 1256 /** 1257 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1258 * @work: Pointer to the kthread_work structure for this task 1259 * 1260 * Check whether hardware has completed measuring the Tx and Rx offset values 1261 * used to configure and enable vernier timestamp calibration. 1262 * 1263 * Once the offset in either direction is measured, configure the associated 1264 * registers with the calibrated offset values and enable timestamping. The Tx 1265 * and Rx directions are configured independently as soon as their associated 1266 * offsets are known. 1267 * 1268 * This function reschedules itself until both Tx and Rx calibration have 1269 * completed. 1270 */ 1271 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1272 { 1273 struct ice_ptp_port *port; 1274 struct ice_pf *pf; 1275 struct ice_hw *hw; 1276 int tx_err; 1277 int rx_err; 1278 1279 port = container_of(work, struct ice_ptp_port, ov_work.work); 1280 pf = ptp_port_to_pf(port); 1281 hw = &pf->hw; 1282 1283 if (ice_is_reset_in_progress(pf->state)) { 1284 /* wait for device driver to complete reset */ 1285 kthread_queue_delayed_work(pf->ptp.kworker, 1286 &port->ov_work, 1287 msecs_to_jiffies(100)); 1288 return; 1289 } 1290 1291 tx_err = ice_ptp_check_tx_fifo(port); 1292 if (!tx_err) 1293 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); 1294 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); 1295 if (tx_err || rx_err) { 1296 /* Tx and/or Rx offset not yet configured, try again later */ 1297 kthread_queue_delayed_work(pf->ptp.kworker, 1298 &port->ov_work, 1299 msecs_to_jiffies(100)); 1300 return; 1301 } 1302 } 1303 1304 /** 1305 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1306 * @ptp_port: PTP port to stop 1307 */ 1308 static int 1309 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1310 { 1311 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1312 u8 port = ptp_port->port_num; 1313 struct ice_hw *hw = &pf->hw; 1314 int err; 1315 1316 mutex_lock(&ptp_port->ps_lock); 1317 1318 switch (hw->mac_type) { 1319 case ICE_MAC_E810: 1320 case ICE_MAC_E830: 1321 err = 0; 1322 break; 1323 case ICE_MAC_GENERIC: 1324 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1325 1326 err = ice_stop_phy_timer_e82x(hw, port, true); 1327 break; 1328 case ICE_MAC_GENERIC_3K_E825: 1329 err = ice_stop_phy_timer_eth56g(hw, port, true); 1330 break; 1331 default: 1332 err = -ENODEV; 1333 } 1334 if (err && err != -EBUSY) 1335 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1336 port, err); 1337 1338 mutex_unlock(&ptp_port->ps_lock); 1339 1340 return err; 1341 } 1342 1343 /** 1344 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1345 * @ptp_port: PTP port for which the PHY start is set 1346 * 1347 * Start the PHY timestamping block, and initiate Vernier timestamping 1348 * calibration. If timestamping cannot be calibrated (such as if link is down) 1349 * then disable the timestamping block instead. 1350 */ 1351 static int 1352 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1353 { 1354 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1355 u8 port = ptp_port->port_num; 1356 struct ice_hw *hw = &pf->hw; 1357 unsigned long flags; 1358 int err; 1359 1360 if (!ptp_port->link_up) 1361 return ice_ptp_port_phy_stop(ptp_port); 1362 1363 mutex_lock(&ptp_port->ps_lock); 1364 1365 switch (hw->mac_type) { 1366 case ICE_MAC_E810: 1367 case ICE_MAC_E830: 1368 err = 0; 1369 break; 1370 case ICE_MAC_GENERIC: 1371 /* Start the PHY timer in Vernier mode */ 1372 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1373 1374 /* temporarily disable Tx timestamps while calibrating 1375 * PHY offset 1376 */ 1377 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1378 ptp_port->tx.calibrating = true; 1379 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1380 ptp_port->tx_fifo_busy_cnt = 0; 1381 1382 /* Start the PHY timer in Vernier mode */ 1383 err = ice_start_phy_timer_e82x(hw, port); 1384 if (err) 1385 break; 1386 1387 /* Enable Tx timestamps right away */ 1388 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1389 ptp_port->tx.calibrating = false; 1390 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1391 1392 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 1393 0); 1394 break; 1395 case ICE_MAC_GENERIC_3K_E825: 1396 err = ice_start_phy_timer_eth56g(hw, port); 1397 break; 1398 default: 1399 err = -ENODEV; 1400 } 1401 1402 if (err) 1403 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1404 port, err); 1405 1406 mutex_unlock(&ptp_port->ps_lock); 1407 1408 return err; 1409 } 1410 1411 /** 1412 * ice_ptp_link_change - Reconfigure PTP after link status change 1413 * @pf: Board private structure 1414 * @linkup: Link is up or down 1415 */ 1416 void ice_ptp_link_change(struct ice_pf *pf, bool linkup) 1417 { 1418 struct ice_ptp_port *ptp_port; 1419 struct ice_hw *hw = &pf->hw; 1420 1421 if (pf->ptp.state != ICE_PTP_READY) 1422 return; 1423 1424 ptp_port = &pf->ptp.port; 1425 1426 /* Update cached link status for this port immediately */ 1427 ptp_port->link_up = linkup; 1428 1429 /* Skip HW writes if reset is in progress */ 1430 if (pf->hw.reset_ongoing) 1431 return; 1432 1433 switch (hw->mac_type) { 1434 case ICE_MAC_E810: 1435 case ICE_MAC_E830: 1436 /* Do not reconfigure E810 or E830 PHY */ 1437 return; 1438 case ICE_MAC_GENERIC: 1439 case ICE_MAC_GENERIC_3K_E825: 1440 ice_ptp_port_phy_restart(ptp_port); 1441 return; 1442 default: 1443 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); 1444 } 1445 } 1446 1447 /** 1448 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings 1449 * @pf: PF private structure 1450 * @ena: bool value to enable or disable interrupt 1451 * @threshold: Minimum number of packets at which intr is triggered 1452 * 1453 * Utility function to configure all the PHY interrupt settings, including 1454 * whether the PHY interrupt is enabled, and what threshold to use. Also 1455 * configures The E82X timestamp owner to react to interrupts from all PHYs. 1456 * 1457 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes 1458 * when failed to configure PHY interrupt for E82X 1459 */ 1460 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) 1461 { 1462 struct device *dev = ice_pf_to_dev(pf); 1463 struct ice_hw *hw = &pf->hw; 1464 1465 ice_ptp_reset_ts_memory(hw); 1466 1467 switch (hw->mac_type) { 1468 case ICE_MAC_E810: 1469 case ICE_MAC_E830: 1470 return 0; 1471 case ICE_MAC_GENERIC: { 1472 int quad; 1473 1474 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); 1475 quad++) { 1476 int err; 1477 1478 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); 1479 if (err) { 1480 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", 1481 quad, err); 1482 return err; 1483 } 1484 } 1485 1486 return 0; 1487 } 1488 case ICE_MAC_GENERIC_3K_E825: { 1489 int port; 1490 1491 for (port = 0; port < hw->ptp.num_lports; port++) { 1492 int err; 1493 1494 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); 1495 if (err) { 1496 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", 1497 port, err); 1498 return err; 1499 } 1500 } 1501 1502 return 0; 1503 } 1504 case ICE_MAC_UNKNOWN: 1505 default: 1506 return -EOPNOTSUPP; 1507 } 1508 } 1509 1510 /** 1511 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1512 * @pf: Board private structure 1513 */ 1514 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1515 { 1516 ice_ptp_port_phy_restart(&pf->ptp.port); 1517 } 1518 1519 /** 1520 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping 1521 * @pf: Board private structure 1522 */ 1523 static void ice_ptp_restart_all_phy(struct ice_pf *pf) 1524 { 1525 struct list_head *entry; 1526 1527 list_for_each(entry, &pf->adapter->ports.ports) { 1528 struct ice_ptp_port *port = list_entry(entry, 1529 struct ice_ptp_port, 1530 list_node); 1531 1532 if (port->link_up) 1533 ice_ptp_port_phy_restart(port); 1534 } 1535 } 1536 1537 /** 1538 * ice_ptp_adjfine - Adjust clock increment rate 1539 * @info: the driver's PTP info structure 1540 * @scaled_ppm: Parts per million with 16-bit fractional field 1541 * 1542 * Adjust the frequency of the clock by the indicated scaled ppm from the 1543 * base frequency. 1544 */ 1545 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1546 { 1547 struct ice_pf *pf = ptp_info_to_pf(info); 1548 struct ice_hw *hw = &pf->hw; 1549 u64 incval; 1550 int err; 1551 1552 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1553 err = ice_ptp_write_incval_locked(hw, incval); 1554 if (err) { 1555 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1556 err); 1557 return -EIO; 1558 } 1559 1560 return 0; 1561 } 1562 1563 /** 1564 * ice_ptp_extts_event - Process PTP external clock event 1565 * @pf: Board private structure 1566 */ 1567 void ice_ptp_extts_event(struct ice_pf *pf) 1568 { 1569 struct ptp_clock_event event; 1570 struct ice_hw *hw = &pf->hw; 1571 u8 chan, tmr_idx; 1572 u32 hi, lo; 1573 1574 /* Don't process timestamp events if PTP is not ready */ 1575 if (pf->ptp.state != ICE_PTP_READY) 1576 return; 1577 1578 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1579 /* Event time is captured by one of the two matched registers 1580 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1581 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1582 * Event is defined in GLTSYN_EVNT_0 register 1583 */ 1584 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1585 int pin_desc_idx; 1586 1587 /* Check if channel is enabled */ 1588 if (!(pf->ptp.ext_ts_irq & (1 << chan))) 1589 continue; 1590 1591 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1592 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1593 event.timestamp = (u64)hi << 32 | lo; 1594 1595 /* Add delay compensation */ 1596 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); 1597 if (pin_desc_idx >= 0) { 1598 const struct ice_ptp_pin_desc *desc; 1599 1600 desc = &pf->ptp.ice_pin_desc[pin_desc_idx]; 1601 event.timestamp -= desc->delay[0]; 1602 } 1603 1604 event.type = PTP_CLOCK_EXTTS; 1605 event.index = chan; 1606 pf->ptp.ext_ts_irq &= ~(1 << chan); 1607 ptp_clock_event(pf->ptp.clock, &event); 1608 } 1609 } 1610 1611 /** 1612 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1613 * @pf: Board private structure 1614 * @rq: External timestamp request 1615 * @on: Enable/disable flag 1616 * 1617 * Configure an external timestamp event on the requested channel. 1618 * 1619 * Return: 0 on success, negative error code otherwise 1620 */ 1621 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq, 1622 int on) 1623 { 1624 u32 aux_reg, gpio_reg, irq_reg; 1625 struct ice_hw *hw = &pf->hw; 1626 unsigned int chan, gpio_pin; 1627 int pin_desc_idx; 1628 u8 tmr_idx; 1629 1630 /* Reject requests with unsupported flags */ 1631 1632 if (rq->flags & ~(PTP_ENABLE_FEATURE | 1633 PTP_RISING_EDGE | 1634 PTP_FALLING_EDGE | 1635 PTP_STRICT_FLAGS)) 1636 return -EOPNOTSUPP; 1637 1638 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1639 chan = rq->index; 1640 1641 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); 1642 if (pin_desc_idx < 0) 1643 return -EIO; 1644 1645 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0]; 1646 irq_reg = rd32(hw, PFINT_OICR_ENA); 1647 1648 if (on) { 1649 /* Enable the interrupt */ 1650 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1651 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1652 1653 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1654 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1655 1656 /* set event level to requested edge */ 1657 if (rq->flags & PTP_FALLING_EDGE) 1658 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1659 if (rq->flags & PTP_RISING_EDGE) 1660 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1661 1662 /* Write GPIO CTL reg. 1663 * 0x1 is input sampled by EVENT register(channel) 1664 * + num_in_channels * tmr_idx 1665 */ 1666 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1667 1 + chan + (tmr_idx * 3)); 1668 } else { 1669 bool last_enabled = true; 1670 1671 /* clear the values we set to reset defaults */ 1672 aux_reg = 0; 1673 gpio_reg = 0; 1674 1675 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++) 1676 if ((pf->ptp.extts_rqs[i].flags & 1677 PTP_ENABLE_FEATURE) && 1678 i != chan) { 1679 last_enabled = false; 1680 } 1681 1682 if (last_enabled) 1683 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1684 } 1685 1686 wr32(hw, PFINT_OICR_ENA, irq_reg); 1687 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1688 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1689 1690 return 0; 1691 } 1692 1693 /** 1694 * ice_ptp_disable_all_extts - Disable all EXTTS channels 1695 * @pf: Board private structure 1696 */ 1697 static void ice_ptp_disable_all_extts(struct ice_pf *pf) 1698 { 1699 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1700 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1701 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1702 false); 1703 1704 synchronize_irq(pf->oicr_irq.virq); 1705 } 1706 1707 /** 1708 * ice_ptp_enable_all_extts - Enable all EXTTS channels 1709 * @pf: Board private structure 1710 * 1711 * Called during reset to restore user configuration. 1712 */ 1713 static void ice_ptp_enable_all_extts(struct ice_pf *pf) 1714 { 1715 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1716 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1717 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1718 true); 1719 } 1720 1721 /** 1722 * ice_ptp_write_perout - Write periodic wave parameters to HW 1723 * @hw: pointer to the HW struct 1724 * @chan: target channel 1725 * @gpio_pin: target GPIO pin 1726 * @start: target time to start periodic output 1727 * @period: target period 1728 * 1729 * Return: 0 on success, negative error code otherwise 1730 */ 1731 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, 1732 unsigned int gpio_pin, u64 start, u64 period) 1733 { 1734 1735 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1736 u32 val = 0; 1737 1738 /* 0. Reset mode & out_en in AUX_OUT */ 1739 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1740 1741 if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) { 1742 int err; 1743 1744 /* Enable/disable CGU 1PPS output for E825C */ 1745 err = ice_cgu_cfg_pps_out(hw, !!period); 1746 if (err) 1747 return err; 1748 } 1749 1750 /* 1. Write perout with half of required period value. 1751 * HW toggles output when source clock hits the TGT and then adds 1752 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle. 1753 */ 1754 period >>= 1; 1755 1756 /* For proper operation, GLTSYN_CLKO must be larger than clock tick and 1757 * period has to fit in 32 bit register. 1758 */ 1759 #define MIN_PULSE 3 1760 if (!!period && (period <= MIN_PULSE || period > U32_MAX)) { 1761 dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32", 1762 MIN_PULSE); 1763 return -EIO; 1764 } 1765 1766 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1767 1768 /* 2. Write TARGET time */ 1769 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start)); 1770 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start)); 1771 1772 /* 3. Write AUX_OUT register */ 1773 if (!!period) 1774 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1775 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1776 1777 /* 4. write GPIO CTL reg */ 1778 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1779 if (!!period) 1780 val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1781 8 + chan + (tmr_idx * 4)); 1782 1783 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1784 ice_flush(hw); 1785 1786 return 0; 1787 } 1788 1789 /** 1790 * ice_ptp_cfg_perout - Configure clock to generate periodic wave 1791 * @pf: Board private structure 1792 * @rq: Periodic output request 1793 * @on: Enable/disable flag 1794 * 1795 * Configure the internal clock generator modules to generate the clock wave of 1796 * specified period. 1797 * 1798 * Return: 0 on success, negative error code otherwise 1799 */ 1800 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, 1801 int on) 1802 { 1803 unsigned int gpio_pin, prop_delay_ns; 1804 u64 clk, period, start, phase; 1805 struct ice_hw *hw = &pf->hw; 1806 int pin_desc_idx; 1807 1808 if (rq->flags & ~PTP_PEROUT_PHASE) 1809 return -EOPNOTSUPP; 1810 1811 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index); 1812 if (pin_desc_idx < 0) 1813 return -EIO; 1814 1815 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; 1816 prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1]; 1817 period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; 1818 1819 /* If we're disabling the output or period is 0, clear out CLKO and TGT 1820 * and keep output level low. 1821 */ 1822 if (!on || !period) 1823 return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0); 1824 1825 if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 && 1826 period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) { 1827 dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n"); 1828 return -EOPNOTSUPP; 1829 } 1830 1831 if (period & 0x1) { 1832 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1833 return -EIO; 1834 } 1835 1836 start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec; 1837 1838 /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */ 1839 if (rq->flags & PTP_PEROUT_PHASE) 1840 phase = start; 1841 else 1842 div64_u64_rem(start, period, &phase); 1843 1844 /* If we have only phase or start time is in the past, start the timer 1845 * at the next multiple of period, maintaining phase at least 0.5 second 1846 * from now, so we have time to write it to HW. 1847 */ 1848 clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500; 1849 if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) 1850 start = div64_u64(clk + period - 1, period) * period + phase; 1851 1852 /* Compensate for propagation delay from the generator to the pin. */ 1853 start -= prop_delay_ns; 1854 1855 return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); 1856 } 1857 1858 /** 1859 * ice_ptp_disable_all_perout - Disable all currently configured outputs 1860 * @pf: Board private structure 1861 * 1862 * Disable all currently configured clock outputs. This is necessary before 1863 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to 1864 * re-enable the clocks again. 1865 */ 1866 static void ice_ptp_disable_all_perout(struct ice_pf *pf) 1867 { 1868 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1869 if (pf->ptp.perout_rqs[i].period.sec || 1870 pf->ptp.perout_rqs[i].period.nsec) 1871 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1872 false); 1873 } 1874 1875 /** 1876 * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs 1877 * @pf: Board private structure 1878 * 1879 * Enable all currently configured clock outputs. Use this after 1880 * ice_ptp_disable_all_perout to reconfigure the output signals according to 1881 * their configuration. 1882 */ 1883 static void ice_ptp_enable_all_perout(struct ice_pf *pf) 1884 { 1885 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1886 if (pf->ptp.perout_rqs[i].period.sec || 1887 pf->ptp.perout_rqs[i].period.nsec) 1888 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1889 true); 1890 } 1891 1892 /** 1893 * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO 1894 * @pf: Board private structure 1895 * @pin: Pin index 1896 * @func: Assigned function 1897 * 1898 * Return: 0 on success, negative error code otherwise 1899 */ 1900 static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin, 1901 enum ptp_pin_function func) 1902 { 1903 unsigned int gpio_pin; 1904 1905 switch (func) { 1906 case PTP_PF_PEROUT: 1907 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1]; 1908 break; 1909 case PTP_PF_EXTTS: 1910 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0]; 1911 break; 1912 default: 1913 return -EOPNOTSUPP; 1914 } 1915 1916 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 1917 struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i]; 1918 unsigned int chan = pin_desc->chan; 1919 1920 /* Skip pin idx from the request */ 1921 if (i == pin) 1922 continue; 1923 1924 if (pin_desc->func == PTP_PF_PEROUT && 1925 pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) { 1926 pf->ptp.perout_rqs[chan].period.sec = 0; 1927 pf->ptp.perout_rqs[chan].period.nsec = 0; 1928 pin_desc->func = PTP_PF_NONE; 1929 pin_desc->chan = 0; 1930 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n", 1931 i, gpio_pin); 1932 return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan], 1933 false); 1934 } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS && 1935 pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) { 1936 pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE; 1937 pin_desc->func = PTP_PF_NONE; 1938 pin_desc->chan = 0; 1939 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n", 1940 i, gpio_pin); 1941 return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan], 1942 false); 1943 } 1944 } 1945 1946 return 0; 1947 } 1948 1949 /** 1950 * ice_verify_pin - verify if pin supports requested pin function 1951 * @info: the driver's PTP info structure 1952 * @pin: Pin index 1953 * @func: Assigned function 1954 * @chan: Assigned channel 1955 * 1956 * Return: 0 on success, -EOPNOTSUPP when function is not supported. 1957 */ 1958 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin, 1959 enum ptp_pin_function func, unsigned int chan) 1960 { 1961 struct ice_pf *pf = ptp_info_to_pf(info); 1962 const struct ice_ptp_pin_desc *pin_desc; 1963 1964 pin_desc = &pf->ptp.ice_pin_desc[pin]; 1965 1966 /* Is assigned function allowed? */ 1967 switch (func) { 1968 case PTP_PF_EXTTS: 1969 if (pin_desc->gpio[0] < 0) 1970 return -EOPNOTSUPP; 1971 break; 1972 case PTP_PF_PEROUT: 1973 if (pin_desc->gpio[1] < 0) 1974 return -EOPNOTSUPP; 1975 break; 1976 case PTP_PF_NONE: 1977 break; 1978 case PTP_PF_PHYSYNC: 1979 default: 1980 return -EOPNOTSUPP; 1981 } 1982 1983 /* On adapters with SMA_CTRL disable other pins that share same GPIO */ 1984 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 1985 ice_ptp_disable_shared_pin(pf, pin, func); 1986 pf->ptp.pin_desc[pin].func = func; 1987 pf->ptp.pin_desc[pin].chan = chan; 1988 return ice_ptp_set_sma_cfg(pf); 1989 } 1990 1991 return 0; 1992 } 1993 1994 /** 1995 * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC 1996 * @info: The driver's PTP info structure 1997 * @rq: The requested feature to change 1998 * @on: Enable/disable flag 1999 * 2000 * Return: 0 on success, negative error code otherwise 2001 */ 2002 static int ice_ptp_gpio_enable(struct ptp_clock_info *info, 2003 struct ptp_clock_request *rq, int on) 2004 { 2005 struct ice_pf *pf = ptp_info_to_pf(info); 2006 int err; 2007 2008 switch (rq->type) { 2009 case PTP_CLK_REQ_PEROUT: 2010 { 2011 struct ptp_perout_request *cached = 2012 &pf->ptp.perout_rqs[rq->perout.index]; 2013 2014 err = ice_ptp_cfg_perout(pf, &rq->perout, on); 2015 if (!err) { 2016 *cached = rq->perout; 2017 } else { 2018 cached->period.sec = 0; 2019 cached->period.nsec = 0; 2020 } 2021 return err; 2022 } 2023 case PTP_CLK_REQ_EXTTS: 2024 { 2025 struct ptp_extts_request *cached = 2026 &pf->ptp.extts_rqs[rq->extts.index]; 2027 2028 err = ice_ptp_cfg_extts(pf, &rq->extts, on); 2029 if (!err) 2030 *cached = rq->extts; 2031 else 2032 cached->flags &= ~PTP_ENABLE_FEATURE; 2033 return err; 2034 } 2035 default: 2036 return -EOPNOTSUPP; 2037 } 2038 } 2039 2040 /** 2041 * ice_ptp_gettimex64 - Get the time of the clock 2042 * @info: the driver's PTP info structure 2043 * @ts: timespec64 structure to hold the current time value 2044 * @sts: Optional parameter for holding a pair of system timestamps from 2045 * the system clock. Will be ignored if NULL is given. 2046 * 2047 * Read the device clock and return the correct value on ns, after converting it 2048 * into a timespec struct. 2049 */ 2050 static int 2051 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 2052 struct ptp_system_timestamp *sts) 2053 { 2054 struct ice_pf *pf = ptp_info_to_pf(info); 2055 u64 time_ns; 2056 2057 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 2058 *ts = ns_to_timespec64(time_ns); 2059 return 0; 2060 } 2061 2062 /** 2063 * ice_ptp_settime64 - Set the time of the clock 2064 * @info: the driver's PTP info structure 2065 * @ts: timespec64 structure that holds the new time value 2066 * 2067 * Set the device clock to the user input value. The conversion from timespec 2068 * to ns happens in the write function. 2069 */ 2070 static int 2071 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 2072 { 2073 struct ice_pf *pf = ptp_info_to_pf(info); 2074 struct timespec64 ts64 = *ts; 2075 struct ice_hw *hw = &pf->hw; 2076 int err; 2077 2078 /* For Vernier mode on E82X, we need to recalibrate after new settime. 2079 * Start with marking timestamps as invalid. 2080 */ 2081 if (hw->mac_type == ICE_MAC_GENERIC) { 2082 err = ice_ptp_clear_phy_offset_ready_e82x(hw); 2083 if (err) 2084 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); 2085 } 2086 2087 if (!ice_ptp_lock(hw)) { 2088 err = -EBUSY; 2089 goto exit; 2090 } 2091 2092 /* Disable periodic outputs */ 2093 ice_ptp_disable_all_perout(pf); 2094 2095 err = ice_ptp_write_init(pf, &ts64); 2096 ice_ptp_unlock(hw); 2097 2098 if (!err) 2099 ice_ptp_reset_cached_phctime(pf); 2100 2101 /* Reenable periodic outputs */ 2102 ice_ptp_enable_all_perout(pf); 2103 2104 /* Recalibrate and re-enable timestamp blocks for E822/E823 */ 2105 if (hw->mac_type == ICE_MAC_GENERIC) 2106 ice_ptp_restart_all_phy(pf); 2107 exit: 2108 if (err) { 2109 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 2110 return err; 2111 } 2112 2113 return 0; 2114 } 2115 2116 /** 2117 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 2118 * @info: the driver's PTP info structure 2119 * @delta: Offset in nanoseconds to adjust the time by 2120 */ 2121 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 2122 { 2123 struct timespec64 now, then; 2124 int ret; 2125 2126 then = ns_to_timespec64(delta); 2127 ret = ice_ptp_gettimex64(info, &now, NULL); 2128 if (ret) 2129 return ret; 2130 now = timespec64_add(now, then); 2131 2132 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 2133 } 2134 2135 /** 2136 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 2137 * @info: the driver's PTP info structure 2138 * @delta: Offset in nanoseconds to adjust the time by 2139 */ 2140 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 2141 { 2142 struct ice_pf *pf = ptp_info_to_pf(info); 2143 struct ice_hw *hw = &pf->hw; 2144 struct device *dev; 2145 int err; 2146 2147 dev = ice_pf_to_dev(pf); 2148 2149 /* Hardware only supports atomic adjustments using signed 32-bit 2150 * integers. For any adjustment outside this range, perform 2151 * a non-atomic get->adjust->set flow. 2152 */ 2153 if (delta > S32_MAX || delta < S32_MIN) { 2154 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 2155 return ice_ptp_adjtime_nonatomic(info, delta); 2156 } 2157 2158 if (!ice_ptp_lock(hw)) { 2159 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 2160 return -EBUSY; 2161 } 2162 2163 /* Disable periodic outputs */ 2164 ice_ptp_disable_all_perout(pf); 2165 2166 err = ice_ptp_write_adj(pf, delta); 2167 2168 /* Reenable periodic outputs */ 2169 ice_ptp_enable_all_perout(pf); 2170 2171 ice_ptp_unlock(hw); 2172 2173 if (err) { 2174 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 2175 return err; 2176 } 2177 2178 ice_ptp_reset_cached_phctime(pf); 2179 2180 return 0; 2181 } 2182 2183 /** 2184 * struct ice_crosststamp_cfg - Device cross timestamp configuration 2185 * @lock_reg: The hardware semaphore lock to use 2186 * @lock_busy: Bit in the semaphore lock indicating the lock is busy 2187 * @ctl_reg: The hardware register to request cross timestamp 2188 * @ctl_active: Bit in the control register to request cross timestamp 2189 * @art_time_l: Lower 32-bits of ART system time 2190 * @art_time_h: Upper 32-bits of ART system time 2191 * @dev_time_l: Lower 32-bits of device time (per timer index) 2192 * @dev_time_h: Upper 32-bits of device time (per timer index) 2193 */ 2194 struct ice_crosststamp_cfg { 2195 /* HW semaphore lock register */ 2196 u32 lock_reg; 2197 u32 lock_busy; 2198 2199 /* Capture control register */ 2200 u32 ctl_reg; 2201 u32 ctl_active; 2202 2203 /* Time storage */ 2204 u32 art_time_l; 2205 u32 art_time_h; 2206 u32 dev_time_l[2]; 2207 u32 dev_time_h[2]; 2208 }; 2209 2210 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = { 2211 .lock_reg = PFHH_SEM, 2212 .lock_busy = PFHH_SEM_BUSY_M, 2213 .ctl_reg = GLHH_ART_CTL, 2214 .ctl_active = GLHH_ART_CTL_ACTIVE_M, 2215 .art_time_l = GLHH_ART_TIME_L, 2216 .art_time_h = GLHH_ART_TIME_H, 2217 .dev_time_l[0] = GLTSYN_HHTIME_L(0), 2218 .dev_time_h[0] = GLTSYN_HHTIME_H(0), 2219 .dev_time_l[1] = GLTSYN_HHTIME_L(1), 2220 .dev_time_h[1] = GLTSYN_HHTIME_H(1), 2221 }; 2222 2223 #ifdef CONFIG_ICE_HWTS 2224 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = { 2225 .lock_reg = E830_PFPTM_SEM, 2226 .lock_busy = E830_PFPTM_SEM_BUSY_M, 2227 .ctl_reg = E830_GLPTM_ART_CTL, 2228 .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M, 2229 .art_time_l = E830_GLPTM_ART_TIME_L, 2230 .art_time_h = E830_GLPTM_ART_TIME_H, 2231 .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0), 2232 .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0), 2233 .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1), 2234 .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1), 2235 }; 2236 2237 #endif /* CONFIG_ICE_HWTS */ 2238 /** 2239 * struct ice_crosststamp_ctx - Device cross timestamp context 2240 * @snapshot: snapshot of system clocks for historic interpolation 2241 * @pf: pointer to the PF private structure 2242 * @cfg: pointer to hardware configuration for cross timestamp 2243 */ 2244 struct ice_crosststamp_ctx { 2245 struct system_time_snapshot snapshot; 2246 struct ice_pf *pf; 2247 const struct ice_crosststamp_cfg *cfg; 2248 }; 2249 2250 /** 2251 * ice_capture_crosststamp - Capture a device/system cross timestamp 2252 * @device: Current device time 2253 * @system: System counter value read synchronously with device time 2254 * @__ctx: Context passed from ice_ptp_getcrosststamp 2255 * 2256 * Read device and system (ART) clock simultaneously and return the corrected 2257 * clock values in ns. 2258 * 2259 * Return: zero on success, or a negative error code on failure. 2260 */ 2261 static int ice_capture_crosststamp(ktime_t *device, 2262 struct system_counterval_t *system, 2263 void *__ctx) 2264 { 2265 struct ice_crosststamp_ctx *ctx = __ctx; 2266 const struct ice_crosststamp_cfg *cfg; 2267 u32 lock, ctl, ts_lo, ts_hi, tmr_idx; 2268 struct ice_pf *pf; 2269 struct ice_hw *hw; 2270 int err; 2271 u64 ts; 2272 2273 cfg = ctx->cfg; 2274 pf = ctx->pf; 2275 hw = &pf->hw; 2276 2277 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 2278 if (tmr_idx > 1) 2279 return -EINVAL; 2280 2281 /* Poll until we obtain the cross-timestamp hardware semaphore */ 2282 err = rd32_poll_timeout(hw, cfg->lock_reg, lock, 2283 !(lock & cfg->lock_busy), 2284 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC); 2285 if (err) { 2286 dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n"); 2287 return -EBUSY; 2288 } 2289 2290 /* Snapshot system time for historic interpolation */ 2291 ktime_get_snapshot(&ctx->snapshot); 2292 2293 /* Program cmd to master timer */ 2294 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 2295 2296 /* Start the ART and device clock sync sequence */ 2297 ctl = rd32(hw, cfg->ctl_reg); 2298 ctl |= cfg->ctl_active; 2299 wr32(hw, cfg->ctl_reg, ctl); 2300 2301 /* Poll until hardware completes the capture */ 2302 err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active), 2303 5, 20 * USEC_PER_MSEC); 2304 if (err) 2305 goto err_timeout; 2306 2307 /* Read ART system time */ 2308 ts_lo = rd32(hw, cfg->art_time_l); 2309 ts_hi = rd32(hw, cfg->art_time_h); 2310 ts = ((u64)ts_hi << 32) | ts_lo; 2311 system->cycles = ts; 2312 system->cs_id = CSID_X86_ART; 2313 2314 /* Read Device source clock time */ 2315 ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]); 2316 ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]); 2317 ts = ((u64)ts_hi << 32) | ts_lo; 2318 *device = ns_to_ktime(ts); 2319 2320 err_timeout: 2321 /* Clear the master timer */ 2322 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 2323 2324 /* Release HW lock */ 2325 lock = rd32(hw, cfg->lock_reg); 2326 lock &= ~cfg->lock_busy; 2327 wr32(hw, cfg->lock_reg, lock); 2328 2329 return err; 2330 } 2331 2332 /** 2333 * ice_ptp_getcrosststamp - Capture a device cross timestamp 2334 * @info: the driver's PTP info structure 2335 * @cts: The memory to fill the cross timestamp info 2336 * 2337 * Capture a cross timestamp between the ART and the device PTP hardware 2338 * clock. Fill the cross timestamp information and report it back to the 2339 * caller. 2340 * 2341 * In order to correctly correlate the ART timestamp back to the TSC time, the 2342 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2343 * 2344 * Return: zero on success, or a negative error code on failure. 2345 */ 2346 static int ice_ptp_getcrosststamp(struct ptp_clock_info *info, 2347 struct system_device_crosststamp *cts) 2348 { 2349 struct ice_pf *pf = ptp_info_to_pf(info); 2350 struct ice_crosststamp_ctx ctx = { 2351 .pf = pf, 2352 }; 2353 2354 switch (pf->hw.mac_type) { 2355 case ICE_MAC_GENERIC: 2356 case ICE_MAC_GENERIC_3K_E825: 2357 ctx.cfg = &ice_crosststamp_cfg_e82x; 2358 break; 2359 #ifdef CONFIG_ICE_HWTS 2360 case ICE_MAC_E830: 2361 ctx.cfg = &ice_crosststamp_cfg_e830; 2362 break; 2363 #endif /* CONFIG_ICE_HWTS */ 2364 default: 2365 return -EOPNOTSUPP; 2366 } 2367 2368 return get_device_system_crosststamp(ice_capture_crosststamp, &ctx, 2369 &ctx.snapshot, cts); 2370 } 2371 2372 /** 2373 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2374 * @pf: Board private structure 2375 * @ifr: ioctl data 2376 * 2377 * Copy the timestamping config to user buffer 2378 */ 2379 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2380 { 2381 struct hwtstamp_config *config; 2382 2383 if (pf->ptp.state != ICE_PTP_READY) 2384 return -EIO; 2385 2386 config = &pf->ptp.tstamp_config; 2387 2388 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2389 -EFAULT : 0; 2390 } 2391 2392 /** 2393 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2394 * @pf: Board private structure 2395 * @config: hwtstamp settings requested or saved 2396 */ 2397 static int 2398 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2399 { 2400 switch (config->tx_type) { 2401 case HWTSTAMP_TX_OFF: 2402 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 2403 break; 2404 case HWTSTAMP_TX_ON: 2405 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 2406 break; 2407 default: 2408 return -ERANGE; 2409 } 2410 2411 switch (config->rx_filter) { 2412 case HWTSTAMP_FILTER_NONE: 2413 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2414 break; 2415 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2416 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2417 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2418 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2419 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2420 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2421 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2422 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2423 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2424 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2425 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2426 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2427 case HWTSTAMP_FILTER_NTP_ALL: 2428 case HWTSTAMP_FILTER_ALL: 2429 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 2430 break; 2431 default: 2432 return -ERANGE; 2433 } 2434 2435 /* Immediately update the device timestamping mode */ 2436 ice_ptp_restore_timestamp_mode(pf); 2437 2438 return 0; 2439 } 2440 2441 /** 2442 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2443 * @pf: Board private structure 2444 * @ifr: ioctl data 2445 * 2446 * Get the user config and store it 2447 */ 2448 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2449 { 2450 struct hwtstamp_config config; 2451 int err; 2452 2453 if (pf->ptp.state != ICE_PTP_READY) 2454 return -EAGAIN; 2455 2456 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2457 return -EFAULT; 2458 2459 err = ice_ptp_set_timestamp_mode(pf, &config); 2460 if (err) 2461 return err; 2462 2463 /* Return the actual configuration set */ 2464 config = pf->ptp.tstamp_config; 2465 2466 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2467 -EFAULT : 0; 2468 } 2469 2470 /** 2471 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns 2472 * @rx_desc: Receive descriptor 2473 * @pkt_ctx: Packet context to get the cached time 2474 * 2475 * The driver receives a notification in the receive descriptor with timestamp. 2476 */ 2477 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, 2478 const struct ice_pkt_ctx *pkt_ctx) 2479 { 2480 u64 ts_ns, cached_time; 2481 u32 ts_high; 2482 2483 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2484 return 0; 2485 2486 cached_time = READ_ONCE(pkt_ctx->cached_phctime); 2487 2488 /* Do not report a timestamp if we don't have a cached PHC time */ 2489 if (!cached_time) 2490 return 0; 2491 2492 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2493 * PHC value, rather than accessing the PF. This also allows us to 2494 * simply pass the upper 32bits of nanoseconds directly. Calling 2495 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2496 * bits itself. 2497 */ 2498 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2499 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2500 2501 return ts_ns; 2502 } 2503 2504 /** 2505 * ice_ptp_setup_pin_cfg - setup PTP pin_config structure 2506 * @pf: Board private structure 2507 */ 2508 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf) 2509 { 2510 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 2511 const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i]; 2512 struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i]; 2513 const char *name = NULL; 2514 2515 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2516 name = ice_pin_names[desc->name_idx]; 2517 else if (desc->name_idx != GPIO_NA) 2518 name = ice_pin_names_nvm[desc->name_idx]; 2519 if (name) 2520 strscpy(pin->name, name, sizeof(pin->name)); 2521 2522 pin->index = i; 2523 } 2524 2525 pf->ptp.info.pin_config = pf->ptp.pin_desc; 2526 } 2527 2528 /** 2529 * ice_ptp_disable_pins - Disable PTP pins 2530 * @pf: pointer to the PF structure 2531 * 2532 * Disable the OS access to the SMA pins. Called to clear out the OS 2533 * indications of pin support when we fail to setup the SMA control register. 2534 */ 2535 static void ice_ptp_disable_pins(struct ice_pf *pf) 2536 { 2537 struct ptp_clock_info *info = &pf->ptp.info; 2538 2539 dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n"); 2540 2541 info->enable = NULL; 2542 info->verify = NULL; 2543 info->n_pins = 0; 2544 info->n_ext_ts = 0; 2545 info->n_per_out = 0; 2546 } 2547 2548 /** 2549 * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM 2550 * @pf: pointer to the PF structure 2551 * @entries: SDP connection section from NVM 2552 * @num_entries: number of valid entries in sdp_entries 2553 * @pins: PTP pins array to update 2554 * 2555 * Return: 0 on success, negative error code otherwise. 2556 */ 2557 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, 2558 unsigned int num_entries, 2559 struct ice_ptp_pin_desc *pins) 2560 { 2561 unsigned int n_pins = 0; 2562 unsigned int i; 2563 2564 /* Setup ice_pin_desc array */ 2565 for (i = 0; i < ICE_N_PINS_MAX; i++) { 2566 pins[i].name_idx = -1; 2567 pins[i].gpio[0] = -1; 2568 pins[i].gpio[1] = -1; 2569 } 2570 2571 for (i = 0; i < num_entries; i++) { 2572 u16 entry = le16_to_cpu(entries[i]); 2573 DECLARE_BITMAP(bitmap, GPIO_NA); 2574 unsigned int bitmap_idx; 2575 bool dir; 2576 u16 gpio; 2577 2578 *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry); 2579 dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry); 2580 gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry); 2581 for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) { 2582 unsigned int idx; 2583 2584 /* Check if entry's pin bit is valid */ 2585 if (bitmap_idx >= NUM_PTP_PINS_NVM && 2586 bitmap_idx != GPIO_NA) 2587 continue; 2588 2589 /* Check if pin already exists */ 2590 for (idx = 0; idx < ICE_N_PINS_MAX; idx++) 2591 if (pins[idx].name_idx == bitmap_idx) 2592 break; 2593 2594 if (idx == ICE_N_PINS_MAX) { 2595 /* Pin not found, setup its entry and name */ 2596 idx = n_pins++; 2597 pins[idx].name_idx = bitmap_idx; 2598 if (bitmap_idx == GPIO_NA) 2599 strscpy(pf->ptp.pin_desc[idx].name, 2600 ice_pin_names[gpio], 2601 sizeof(pf->ptp.pin_desc[idx] 2602 .name)); 2603 } 2604 2605 /* Setup in/out GPIO number */ 2606 pins[idx].gpio[dir] = gpio; 2607 } 2608 } 2609 2610 for (i = 0; i < n_pins; i++) { 2611 dev_dbg(ice_pf_to_dev(pf), 2612 "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n", 2613 i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]); 2614 } 2615 2616 pf->ptp.info.n_pins = n_pins; 2617 return 0; 2618 } 2619 2620 /** 2621 * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support 2622 * @pf: Board private structure 2623 * 2624 * Assign functions to the PTP capabilities structure for E82X devices. 2625 * Functions which operate across all device families should be set directly 2626 * in ice_ptp_set_caps. Only add functions here which are distinct for E82X 2627 * devices. 2628 */ 2629 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) 2630 { 2631 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; 2632 2633 if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) { 2634 pf->ptp.ice_pin_desc = ice_pin_desc_e825c; 2635 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); 2636 } else { 2637 pf->ptp.ice_pin_desc = ice_pin_desc_e82x; 2638 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x); 2639 } 2640 ice_ptp_setup_pin_cfg(pf); 2641 } 2642 2643 /** 2644 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2645 * @pf: Board private structure 2646 * 2647 * Assign functions to the PTP capabiltiies structure for E810 devices. 2648 * Functions which operate across all device families should be set directly 2649 * in ice_ptp_set_caps. Only add functions here which are distinct for E810 2650 * devices. 2651 */ 2652 static void ice_ptp_set_funcs_e810(struct ice_pf *pf) 2653 { 2654 __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE]; 2655 struct ice_ptp_pin_desc *desc = NULL; 2656 struct ice_ptp *ptp = &pf->ptp; 2657 unsigned int num_entries; 2658 int err; 2659 2660 err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries); 2661 if (err) { 2662 /* SDP section does not exist in NVM or is corrupted */ 2663 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2664 ptp->ice_pin_desc = ice_pin_desc_e810_sma; 2665 ptp->info.n_pins = 2666 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma); 2667 } else { 2668 pf->ptp.ice_pin_desc = ice_pin_desc_e810; 2669 pf->ptp.info.n_pins = 2670 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); 2671 err = 0; 2672 } 2673 } else { 2674 desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX, 2675 sizeof(struct ice_ptp_pin_desc), 2676 GFP_KERNEL); 2677 if (!desc) 2678 goto err; 2679 2680 err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc); 2681 if (err) 2682 goto err; 2683 2684 ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc; 2685 } 2686 2687 ptp->info.pin_config = ptp->pin_desc; 2688 ice_ptp_setup_pin_cfg(pf); 2689 2690 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2691 err = ice_ptp_set_sma_cfg(pf); 2692 err: 2693 if (err) { 2694 devm_kfree(ice_pf_to_dev(pf), desc); 2695 ice_ptp_disable_pins(pf); 2696 } 2697 } 2698 2699 /** 2700 * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support 2701 * @pf: Board private structure 2702 * 2703 * Assign functions to the PTP capabiltiies structure for E830 devices. 2704 * Functions which operate across all device families should be set directly 2705 * in ice_ptp_set_caps. Only add functions here which are distinct for E830 2706 * devices. 2707 */ 2708 static void ice_ptp_set_funcs_e830(struct ice_pf *pf) 2709 { 2710 #ifdef CONFIG_ICE_HWTS 2711 if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART)) 2712 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; 2713 2714 #endif /* CONFIG_ICE_HWTS */ 2715 /* Rest of the config is the same as base E810 */ 2716 pf->ptp.ice_pin_desc = ice_pin_desc_e810; 2717 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); 2718 ice_ptp_setup_pin_cfg(pf); 2719 } 2720 2721 /** 2722 * ice_ptp_set_caps - Set PTP capabilities 2723 * @pf: Board private structure 2724 */ 2725 static void ice_ptp_set_caps(struct ice_pf *pf) 2726 { 2727 struct ptp_clock_info *info = &pf->ptp.info; 2728 struct device *dev = ice_pf_to_dev(pf); 2729 2730 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2731 dev_driver_string(dev), dev_name(dev)); 2732 info->owner = THIS_MODULE; 2733 info->max_adj = 100000000; 2734 info->adjtime = ice_ptp_adjtime; 2735 info->adjfine = ice_ptp_adjfine; 2736 info->gettimex64 = ice_ptp_gettimex64; 2737 info->settime64 = ice_ptp_settime64; 2738 info->n_per_out = GLTSYN_TGT_H_IDX_MAX; 2739 info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX; 2740 info->enable = ice_ptp_gpio_enable; 2741 info->verify = ice_verify_pin; 2742 2743 switch (pf->hw.mac_type) { 2744 case ICE_MAC_E810: 2745 ice_ptp_set_funcs_e810(pf); 2746 return; 2747 case ICE_MAC_E830: 2748 ice_ptp_set_funcs_e830(pf); 2749 return; 2750 case ICE_MAC_GENERIC: 2751 case ICE_MAC_GENERIC_3K_E825: 2752 ice_ptp_set_funcs_e82x(pf); 2753 return; 2754 default: 2755 return; 2756 } 2757 } 2758 2759 /** 2760 * ice_ptp_create_clock - Create PTP clock device for userspace 2761 * @pf: Board private structure 2762 * 2763 * This function creates a new PTP clock device. It only creates one if we 2764 * don't already have one. Will return error if it can't create one, but success 2765 * if we already have a device. Should be used by ice_ptp_init to create clock 2766 * initially, and prevent global resets from creating new clock devices. 2767 */ 2768 static long ice_ptp_create_clock(struct ice_pf *pf) 2769 { 2770 struct ptp_clock_info *info; 2771 struct device *dev; 2772 2773 /* No need to create a clock device if we already have one */ 2774 if (pf->ptp.clock) 2775 return 0; 2776 2777 ice_ptp_set_caps(pf); 2778 2779 info = &pf->ptp.info; 2780 dev = ice_pf_to_dev(pf); 2781 2782 /* Attempt to register the clock before enabling the hardware. */ 2783 pf->ptp.clock = ptp_clock_register(info, dev); 2784 if (IS_ERR(pf->ptp.clock)) { 2785 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); 2786 return PTR_ERR(pf->ptp.clock); 2787 } 2788 2789 return 0; 2790 } 2791 2792 /** 2793 * ice_ptp_request_ts - Request an available Tx timestamp index 2794 * @tx: the PTP Tx timestamp tracker to request from 2795 * @skb: the SKB to associate with this timestamp request 2796 */ 2797 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2798 { 2799 unsigned long flags; 2800 u8 idx; 2801 2802 spin_lock_irqsave(&tx->lock, flags); 2803 2804 /* Check that this tracker is accepting new timestamp requests */ 2805 if (!ice_ptp_is_tx_tracker_up(tx)) { 2806 spin_unlock_irqrestore(&tx->lock, flags); 2807 return -1; 2808 } 2809 2810 /* Find and set the first available index */ 2811 idx = find_next_zero_bit(tx->in_use, tx->len, 2812 tx->last_ll_ts_idx_read + 1); 2813 if (idx == tx->len) 2814 idx = find_first_zero_bit(tx->in_use, tx->len); 2815 2816 if (idx < tx->len) { 2817 /* We got a valid index that no other thread could have set. Store 2818 * a reference to the skb and the start time to allow discarding old 2819 * requests. 2820 */ 2821 set_bit(idx, tx->in_use); 2822 clear_bit(idx, tx->stale); 2823 tx->tstamps[idx].start = jiffies; 2824 tx->tstamps[idx].skb = skb_get(skb); 2825 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2826 ice_trace(tx_tstamp_request, skb, idx); 2827 } 2828 2829 spin_unlock_irqrestore(&tx->lock, flags); 2830 2831 /* return the appropriate PHY timestamp register index, -1 if no 2832 * indexes were available. 2833 */ 2834 if (idx >= tx->len) 2835 return -1; 2836 else 2837 return idx + tx->offset; 2838 } 2839 2840 /** 2841 * ice_ptp_process_ts - Process the PTP Tx timestamps 2842 * @pf: Board private structure 2843 * 2844 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2845 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2846 */ 2847 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2848 { 2849 switch (pf->ptp.tx_interrupt_mode) { 2850 case ICE_PTP_TX_INTERRUPT_NONE: 2851 /* This device has the clock owner handle timestamps for it */ 2852 return ICE_TX_TSTAMP_WORK_DONE; 2853 case ICE_PTP_TX_INTERRUPT_SELF: 2854 /* This device handles its own timestamps */ 2855 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2856 case ICE_PTP_TX_INTERRUPT_ALL: 2857 /* This device handles timestamps for all ports */ 2858 return ice_ptp_tx_tstamp_owner(pf); 2859 default: 2860 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2861 pf->ptp.tx_interrupt_mode); 2862 return ICE_TX_TSTAMP_WORK_DONE; 2863 } 2864 } 2865 2866 /** 2867 * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context 2868 * @pf: Board private structure 2869 * 2870 * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom 2871 * half of the interrupt and IRQ_HANDLED otherwise. 2872 */ 2873 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) 2874 { 2875 struct ice_hw *hw = &pf->hw; 2876 2877 switch (hw->mac_type) { 2878 case ICE_MAC_E810: 2879 /* E810 capable of low latency timestamping with interrupt can 2880 * request a single timestamp in the top half and wait for 2881 * a second LL TS interrupt from the FW when it's ready. 2882 */ 2883 if (hw->dev_caps.ts_dev_info.ts_ll_int_read) { 2884 struct ice_ptp_tx *tx = &pf->ptp.port.tx; 2885 u8 idx; 2886 2887 if (!ice_pf_state_is_nominal(pf)) 2888 return IRQ_HANDLED; 2889 2890 spin_lock(&tx->lock); 2891 idx = find_next_bit_wrap(tx->in_use, tx->len, 2892 tx->last_ll_ts_idx_read + 1); 2893 if (idx != tx->len) 2894 ice_ptp_req_tx_single_tstamp(tx, idx); 2895 spin_unlock(&tx->lock); 2896 2897 return IRQ_HANDLED; 2898 } 2899 fallthrough; /* non-LL_TS E810 */ 2900 case ICE_MAC_GENERIC: 2901 case ICE_MAC_GENERIC_3K_E825: 2902 /* All other devices process timestamps in the bottom half due 2903 * to sleeping or polling. 2904 */ 2905 if (!ice_ptp_pf_handles_tx_interrupt(pf)) 2906 return IRQ_HANDLED; 2907 2908 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); 2909 return IRQ_WAKE_THREAD; 2910 case ICE_MAC_E830: 2911 /* E830 can read timestamps in the top half using rd32() */ 2912 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { 2913 /* Process outstanding Tx timestamps. If there 2914 * is more work, re-arm the interrupt to trigger again. 2915 */ 2916 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 2917 ice_flush(hw); 2918 } 2919 return IRQ_HANDLED; 2920 default: 2921 return IRQ_HANDLED; 2922 } 2923 } 2924 2925 /** 2926 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt 2927 * @pf: Board private structure 2928 * 2929 * The device PHY issues Tx timestamp interrupts to the driver for processing 2930 * timestamp data from the PHY. It will not interrupt again until all 2931 * current timestamp data is read. In rare circumstances, it is possible that 2932 * the driver fails to read all outstanding data. 2933 * 2934 * To avoid getting permanently stuck, periodically check if the PHY has 2935 * outstanding timestamp data. If so, trigger an interrupt from software to 2936 * process this data. 2937 */ 2938 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) 2939 { 2940 struct device *dev = ice_pf_to_dev(pf); 2941 struct ice_hw *hw = &pf->hw; 2942 bool trigger_oicr = false; 2943 unsigned int i; 2944 2945 if (!pf->ptp.port.tx.has_ready_bitmap) 2946 return; 2947 2948 if (!ice_pf_src_tmr_owned(pf)) 2949 return; 2950 2951 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { 2952 u64 tstamp_ready; 2953 int err; 2954 2955 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 2956 if (!err && tstamp_ready) { 2957 trigger_oicr = true; 2958 break; 2959 } 2960 } 2961 2962 if (trigger_oicr) { 2963 /* Trigger a software interrupt, to ensure this data 2964 * gets processed. 2965 */ 2966 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); 2967 2968 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 2969 ice_flush(hw); 2970 } 2971 } 2972 2973 static void ice_ptp_periodic_work(struct kthread_work *work) 2974 { 2975 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2976 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2977 int err; 2978 2979 if (pf->ptp.state != ICE_PTP_READY) 2980 return; 2981 2982 err = ice_ptp_update_cached_phctime(pf); 2983 2984 ice_ptp_maybe_trigger_tx_interrupt(pf); 2985 2986 /* Run twice a second or reschedule if phc update failed */ 2987 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2988 msecs_to_jiffies(err ? 10 : 500)); 2989 } 2990 2991 /** 2992 * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild 2993 * @pf: Board private structure 2994 * @rebuild: rebuild if true, prepare if false 2995 * @reset_type: the reset type being performed 2996 */ 2997 static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild, 2998 enum ice_reset_req reset_type) 2999 { 3000 struct list_head *entry; 3001 3002 list_for_each(entry, &pf->adapter->ports.ports) { 3003 struct ice_ptp_port *port = list_entry(entry, 3004 struct ice_ptp_port, 3005 list_node); 3006 struct ice_pf *peer_pf = ptp_port_to_pf(port); 3007 3008 if (!ice_is_primary(&peer_pf->hw)) { 3009 if (rebuild) 3010 ice_ptp_rebuild(peer_pf, reset_type); 3011 else 3012 ice_ptp_prepare_for_reset(peer_pf, reset_type); 3013 } 3014 } 3015 } 3016 3017 /** 3018 * ice_ptp_prepare_for_reset - Prepare PTP for reset 3019 * @pf: Board private structure 3020 * @reset_type: the reset type being performed 3021 */ 3022 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 3023 { 3024 struct ice_ptp *ptp = &pf->ptp; 3025 struct ice_hw *hw = &pf->hw; 3026 u8 src_tmr; 3027 3028 if (ptp->state != ICE_PTP_READY) 3029 return; 3030 3031 ptp->state = ICE_PTP_RESETTING; 3032 3033 /* Disable timestamping for both Tx and Rx */ 3034 ice_ptp_disable_timestamp_mode(pf); 3035 3036 kthread_cancel_delayed_work_sync(&ptp->work); 3037 3038 if (reset_type == ICE_RESET_PFR) 3039 return; 3040 3041 if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825) 3042 ice_ptp_prepare_rebuild_sec(pf, false, reset_type); 3043 3044 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3045 3046 /* Disable periodic outputs */ 3047 ice_ptp_disable_all_perout(pf); 3048 3049 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 3050 3051 /* Disable source clock */ 3052 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 3053 3054 /* Acquire PHC and system timer to restore after reset */ 3055 ptp->reset_time = ktime_get_real_ns(); 3056 } 3057 3058 /** 3059 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset 3060 * @pf: Board private structure 3061 * 3062 * Companion function for ice_ptp_rebuild() which handles tasks that only the 3063 * PTP clock owner instance should perform. 3064 */ 3065 static int ice_ptp_rebuild_owner(struct ice_pf *pf) 3066 { 3067 struct ice_ptp *ptp = &pf->ptp; 3068 struct ice_hw *hw = &pf->hw; 3069 struct timespec64 ts; 3070 u64 time_diff; 3071 int err; 3072 3073 err = ice_ptp_init_phc(hw); 3074 if (err) 3075 return err; 3076 3077 /* Acquire the global hardware lock */ 3078 if (!ice_ptp_lock(hw)) { 3079 err = -EBUSY; 3080 return err; 3081 } 3082 3083 /* Write the increment time value to PHY and LAN */ 3084 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 3085 if (err) 3086 goto err_unlock; 3087 3088 /* Write the initial Time value to PHY and LAN using the cached PHC 3089 * time before the reset and time difference between stopping and 3090 * starting the clock. 3091 */ 3092 if (ptp->cached_phc_time) { 3093 time_diff = ktime_get_real_ns() - ptp->reset_time; 3094 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 3095 } else { 3096 ts = ktime_to_timespec64(ktime_get_real()); 3097 } 3098 err = ice_ptp_write_init(pf, &ts); 3099 if (err) 3100 goto err_unlock; 3101 3102 /* Release the global hardware lock */ 3103 ice_ptp_unlock(hw); 3104 3105 /* Flush software tracking of any outstanding timestamps since we're 3106 * about to flush the PHY timestamp block. 3107 */ 3108 ice_ptp_flush_all_tx_tracker(pf); 3109 3110 /* Enable quad interrupts */ 3111 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 3112 if (err) 3113 return err; 3114 3115 ice_ptp_restart_all_phy(pf); 3116 3117 /* Re-enable all periodic outputs and external timestamp events */ 3118 ice_ptp_enable_all_perout(pf); 3119 ice_ptp_enable_all_extts(pf); 3120 3121 return 0; 3122 3123 err_unlock: 3124 ice_ptp_unlock(hw); 3125 return err; 3126 } 3127 3128 /** 3129 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset 3130 * @pf: Board private structure 3131 * @reset_type: the reset type being performed 3132 */ 3133 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 3134 { 3135 struct ice_ptp *ptp = &pf->ptp; 3136 int err; 3137 3138 if (ptp->state == ICE_PTP_READY) { 3139 ice_ptp_prepare_for_reset(pf, reset_type); 3140 } else if (ptp->state != ICE_PTP_RESETTING) { 3141 err = -EINVAL; 3142 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n"); 3143 goto err; 3144 } 3145 3146 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) { 3147 err = ice_ptp_rebuild_owner(pf); 3148 if (err) 3149 goto err; 3150 } 3151 3152 ptp->state = ICE_PTP_READY; 3153 3154 /* Start periodic work going */ 3155 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 3156 3157 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 3158 return; 3159 3160 err: 3161 ptp->state = ICE_PTP_ERROR; 3162 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 3163 } 3164 3165 static int ice_ptp_setup_adapter(struct ice_pf *pf) 3166 { 3167 if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw)) 3168 return -EPERM; 3169 3170 pf->adapter->ctrl_pf = pf; 3171 3172 return 0; 3173 } 3174 3175 static int ice_ptp_setup_pf(struct ice_pf *pf) 3176 { 3177 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 3178 struct ice_ptp *ptp = &pf->ptp; 3179 3180 if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN) 3181 return -ENODEV; 3182 3183 INIT_LIST_HEAD(&ptp->port.list_node); 3184 mutex_lock(&pf->adapter->ports.lock); 3185 3186 list_add(&ptp->port.list_node, 3187 &pf->adapter->ports.ports); 3188 mutex_unlock(&pf->adapter->ports.lock); 3189 3190 return 0; 3191 } 3192 3193 static void ice_ptp_cleanup_pf(struct ice_pf *pf) 3194 { 3195 struct ice_ptp *ptp = &pf->ptp; 3196 3197 if (pf->hw.mac_type != ICE_MAC_UNKNOWN) { 3198 mutex_lock(&pf->adapter->ports.lock); 3199 list_del(&ptp->port.list_node); 3200 mutex_unlock(&pf->adapter->ports.lock); 3201 } 3202 } 3203 3204 /** 3205 * ice_ptp_clock_index - Get the PTP clock index for this device 3206 * @pf: Board private structure 3207 * 3208 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock 3209 * is associated. 3210 */ 3211 int ice_ptp_clock_index(struct ice_pf *pf) 3212 { 3213 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 3214 struct ptp_clock *clock; 3215 3216 if (!ctrl_ptp) 3217 return -1; 3218 clock = ctrl_ptp->clock; 3219 3220 return clock ? ptp_clock_index(clock) : -1; 3221 } 3222 3223 /** 3224 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 3225 * @pf: Board private structure 3226 * 3227 * Setup and initialize a PTP clock device that represents the device hardware 3228 * clock. Save the clock index for other functions connected to the same 3229 * hardware resource. 3230 */ 3231 static int ice_ptp_init_owner(struct ice_pf *pf) 3232 { 3233 struct ice_hw *hw = &pf->hw; 3234 struct timespec64 ts; 3235 int err; 3236 3237 err = ice_ptp_init_phc(hw); 3238 if (err) { 3239 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 3240 err); 3241 return err; 3242 } 3243 3244 /* Acquire the global hardware lock */ 3245 if (!ice_ptp_lock(hw)) { 3246 err = -EBUSY; 3247 goto err_exit; 3248 } 3249 3250 /* Write the increment time value to PHY and LAN */ 3251 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 3252 if (err) 3253 goto err_unlock; 3254 3255 ts = ktime_to_timespec64(ktime_get_real()); 3256 /* Write the initial Time value to PHY and LAN */ 3257 err = ice_ptp_write_init(pf, &ts); 3258 if (err) 3259 goto err_unlock; 3260 3261 /* Release the global hardware lock */ 3262 ice_ptp_unlock(hw); 3263 3264 /* Configure PHY interrupt settings */ 3265 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 3266 if (err) 3267 goto err_exit; 3268 3269 /* Ensure we have a clock device */ 3270 err = ice_ptp_create_clock(pf); 3271 if (err) 3272 goto err_clk; 3273 3274 return 0; 3275 err_clk: 3276 pf->ptp.clock = NULL; 3277 err_exit: 3278 return err; 3279 3280 err_unlock: 3281 ice_ptp_unlock(hw); 3282 return err; 3283 } 3284 3285 /** 3286 * ice_ptp_init_work - Initialize PTP work threads 3287 * @pf: Board private structure 3288 * @ptp: PF PTP structure 3289 */ 3290 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 3291 { 3292 struct kthread_worker *kworker; 3293 3294 /* Initialize work functions */ 3295 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 3296 3297 /* Allocate a kworker for handling work required for the ports 3298 * connected to the PTP hardware clock. 3299 */ 3300 kworker = kthread_run_worker(0, "ice-ptp-%s", 3301 dev_name(ice_pf_to_dev(pf))); 3302 if (IS_ERR(kworker)) 3303 return PTR_ERR(kworker); 3304 3305 ptp->kworker = kworker; 3306 3307 /* Start periodic work going */ 3308 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 3309 3310 return 0; 3311 } 3312 3313 /** 3314 * ice_ptp_init_port - Initialize PTP port structure 3315 * @pf: Board private structure 3316 * @ptp_port: PTP port structure 3317 * 3318 * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc. 3319 */ 3320 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 3321 { 3322 struct ice_hw *hw = &pf->hw; 3323 3324 mutex_init(&ptp_port->ps_lock); 3325 3326 switch (hw->mac_type) { 3327 case ICE_MAC_E810: 3328 case ICE_MAC_E830: 3329 case ICE_MAC_GENERIC_3K_E825: 3330 return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num); 3331 case ICE_MAC_GENERIC: 3332 kthread_init_delayed_work(&ptp_port->ov_work, 3333 ice_ptp_wait_for_offsets); 3334 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, 3335 ptp_port->port_num); 3336 default: 3337 return -ENODEV; 3338 } 3339 } 3340 3341 /** 3342 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode 3343 * @pf: Board private structure 3344 * 3345 * Initialize the Tx timestamp interrupt mode for this device. For most device 3346 * types, each PF processes the interrupt and manages its own timestamps. For 3347 * E822-based devices, only the clock owner processes the timestamps. Other 3348 * PFs disable the interrupt and do not process their own timestamps. 3349 */ 3350 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) 3351 { 3352 switch (pf->hw.mac_type) { 3353 case ICE_MAC_GENERIC: 3354 /* E822 based PHY has the clock owner process the interrupt 3355 * for all ports. 3356 */ 3357 if (ice_pf_src_tmr_owned(pf)) 3358 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; 3359 else 3360 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; 3361 break; 3362 default: 3363 /* other PHY types handle their own Tx interrupt */ 3364 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; 3365 } 3366 } 3367 3368 /** 3369 * ice_ptp_init - Initialize PTP hardware clock support 3370 * @pf: Board private structure 3371 * 3372 * Set up the device for interacting with the PTP hardware clock for all 3373 * functions, both the function that owns the clock hardware, and the 3374 * functions connected to the clock hardware. 3375 * 3376 * The clock owner will allocate and register a ptp_clock with the 3377 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 3378 * items used for asynchronous work such as Tx timestamps and periodic work. 3379 */ 3380 void ice_ptp_init(struct ice_pf *pf) 3381 { 3382 struct ice_ptp *ptp = &pf->ptp; 3383 struct ice_hw *hw = &pf->hw; 3384 int err; 3385 3386 ptp->state = ICE_PTP_INITIALIZING; 3387 3388 if (hw->lane_num < 0) { 3389 err = hw->lane_num; 3390 goto err_exit; 3391 } 3392 ptp->port.port_num = hw->lane_num; 3393 3394 ice_ptp_init_hw(hw); 3395 3396 ice_ptp_init_tx_interrupt_mode(pf); 3397 3398 /* If this function owns the clock hardware, it must allocate and 3399 * configure the PTP clock device to represent it. 3400 */ 3401 if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) { 3402 err = ice_ptp_setup_adapter(pf); 3403 if (err) 3404 goto err_exit; 3405 err = ice_ptp_init_owner(pf); 3406 if (err) 3407 goto err_exit; 3408 } 3409 3410 err = ice_ptp_setup_pf(pf); 3411 if (err) 3412 goto err_exit; 3413 3414 err = ice_ptp_init_port(pf, &ptp->port); 3415 if (err) 3416 goto err_exit; 3417 3418 /* Start the PHY timestamping block */ 3419 ice_ptp_reset_phy_timestamping(pf); 3420 3421 /* Configure initial Tx interrupt settings */ 3422 ice_ptp_cfg_tx_interrupt(pf); 3423 3424 ptp->state = ICE_PTP_READY; 3425 3426 err = ice_ptp_init_work(pf, ptp); 3427 if (err) 3428 goto err_exit; 3429 3430 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 3431 return; 3432 3433 err_exit: 3434 /* If we registered a PTP clock, release it */ 3435 if (pf->ptp.clock) { 3436 ptp_clock_unregister(ptp->clock); 3437 pf->ptp.clock = NULL; 3438 } 3439 ptp->state = ICE_PTP_ERROR; 3440 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 3441 } 3442 3443 /** 3444 * ice_ptp_release - Disable the driver/HW support and unregister the clock 3445 * @pf: Board private structure 3446 * 3447 * This function handles the cleanup work required from the initialization by 3448 * clearing out the important information and unregistering the clock 3449 */ 3450 void ice_ptp_release(struct ice_pf *pf) 3451 { 3452 if (pf->ptp.state != ICE_PTP_READY) 3453 return; 3454 3455 pf->ptp.state = ICE_PTP_UNINIT; 3456 3457 /* Disable timestamping for both Tx and Rx */ 3458 ice_ptp_disable_timestamp_mode(pf); 3459 3460 ice_ptp_cleanup_pf(pf); 3461 3462 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3463 3464 ice_ptp_disable_all_extts(pf); 3465 3466 kthread_cancel_delayed_work_sync(&pf->ptp.work); 3467 3468 ice_ptp_port_phy_stop(&pf->ptp.port); 3469 mutex_destroy(&pf->ptp.port.ps_lock); 3470 if (pf->ptp.kworker) { 3471 kthread_destroy_worker(pf->ptp.kworker); 3472 pf->ptp.kworker = NULL; 3473 } 3474 3475 if (!pf->ptp.clock) 3476 return; 3477 3478 /* Disable periodic outputs */ 3479 ice_ptp_disable_all_perout(pf); 3480 3481 ptp_clock_unregister(pf->ptp.clock); 3482 pf->ptp.clock = NULL; 3483 3484 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 3485 } 3486