1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 #include "ice_cgu_regs.h" 8 9 static const char ice_pin_names[][64] = { 10 "SDP0", 11 "SDP1", 12 "SDP2", 13 "SDP3", 14 "TIME_SYNC", 15 "1PPS" 16 }; 17 18 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { 19 /* name, gpio, delay */ 20 { TIME_SYNC, { 4, -1 }, { 0, 0 }}, 21 { ONE_PPS, { -1, 5 }, { 0, 11 }}, 22 }; 23 24 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { 25 /* name, gpio, delay */ 26 { SDP0, { 0, 0 }, { 15, 14 }}, 27 { SDP1, { 1, 1 }, { 15, 14 }}, 28 { SDP2, { 2, 2 }, { 15, 14 }}, 29 { SDP3, { 3, 3 }, { 15, 14 }}, 30 { TIME_SYNC, { 4, -1 }, { 11, 0 }}, 31 { ONE_PPS, { -1, 5 }, { 0, 9 }}, 32 }; 33 34 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { 35 /* name, gpio, delay */ 36 { SDP0, { 0, 0 }, { 0, 1 }}, 37 { SDP1, { 1, 1 }, { 0, 1 }}, 38 { SDP2, { 2, 2 }, { 0, 1 }}, 39 { SDP3, { 3, 3 }, { 0, 1 }}, 40 { ONE_PPS, { -1, 5 }, { 0, 1 }}, 41 }; 42 43 static const char ice_pin_names_nvm[][64] = { 44 "GNSS", 45 "SMA1", 46 "U.FL1", 47 "SMA2", 48 "U.FL2", 49 }; 50 51 static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { 52 /* name, gpio, delay */ 53 { GNSS, { 1, -1 }, { 0, 0 }}, 54 { SMA1, { 1, 0 }, { 0, 1 }}, 55 { UFL1, { -1, 0 }, { 0, 1 }}, 56 { SMA2, { 3, 2 }, { 0, 1 }}, 57 { UFL2, { 3, -1 }, { 0, 0 }}, 58 }; 59 60 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) 61 { 62 return !pf->adapter ? NULL : pf->adapter->ctrl_pf; 63 } 64 65 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf) 66 { 67 struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf); 68 69 return !ctrl_pf ? NULL : &ctrl_pf->ptp; 70 } 71 72 /** 73 * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc 74 * @pf: Board private structure 75 * @func: Pin function 76 * @chan: GPIO channel 77 * 78 * Return: positive pin number when pin is present, -1 otherwise 79 */ 80 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func, 81 unsigned int chan) 82 { 83 const struct ptp_clock_info *info = &pf->ptp.info; 84 int i; 85 86 for (i = 0; i < info->n_pins; i++) { 87 if (info->pin_config[i].func == func && 88 info->pin_config[i].chan == chan) 89 return i; 90 } 91 92 return -1; 93 } 94 95 /** 96 * ice_ptp_update_sma_data - update SMA pins data according to pins setup 97 * @pf: Board private structure 98 * @sma_pins: parsed SMA pins status 99 * @data: SMA data to update 100 */ 101 static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[], 102 u8 *data) 103 { 104 const char *state1, *state2; 105 106 /* Set the right state based on the desired configuration. 107 * When bit is set, functionality is disabled. 108 */ 109 *data &= ~ICE_ALL_SMA_MASK; 110 if (!sma_pins[UFL1 - 1]) { 111 if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) { 112 state1 = "SMA1 Rx, U.FL1 disabled"; 113 *data |= ICE_SMA1_TX_EN; 114 } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) { 115 state1 = "SMA1 Tx U.FL1 disabled"; 116 *data |= ICE_SMA1_DIR_EN; 117 } else { 118 state1 = "SMA1 disabled, U.FL1 disabled"; 119 *data |= ICE_SMA1_MASK; 120 } 121 } else { 122 /* U.FL1 Tx will always enable SMA1 Rx */ 123 state1 = "SMA1 Rx, U.FL1 Tx"; 124 } 125 126 if (!sma_pins[UFL2 - 1]) { 127 if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) { 128 state2 = "SMA2 Rx, U.FL2 disabled"; 129 *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS; 130 } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) { 131 state2 = "SMA2 Tx, U.FL2 disabled"; 132 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS; 133 } else { 134 state2 = "SMA2 disabled, U.FL2 disabled"; 135 *data |= ICE_SMA2_MASK; 136 } 137 } else { 138 if (!sma_pins[SMA2 - 1]) { 139 state2 = "SMA2 disabled, U.FL2 Rx"; 140 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN; 141 } else { 142 state2 = "SMA2 Tx, U.FL2 Rx"; 143 *data |= ICE_SMA2_DIR_EN; 144 } 145 } 146 147 dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2); 148 } 149 150 /** 151 * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic 152 * @pf: Board private structure 153 * 154 * Return: 0 on success, negative error code otherwise 155 */ 156 static int ice_ptp_set_sma_cfg(struct ice_pf *pf) 157 { 158 const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc; 159 struct ptp_pin_desc *pins = pf->ptp.pin_desc; 160 unsigned int sma_pins[ICE_SMA_PINS_NUM] = {}; 161 int err; 162 u8 data; 163 164 /* Read initial pin state value */ 165 err = ice_read_sma_ctrl(&pf->hw, &data); 166 if (err) 167 return err; 168 169 /* Get SMA/U.FL pins states */ 170 for (int i = 0; i < pf->ptp.info.n_pins; i++) 171 if (pins[i].func) { 172 int name_idx = ice_pins[i].name_idx; 173 174 switch (name_idx) { 175 case SMA1: 176 case UFL1: 177 case SMA2: 178 case UFL2: 179 sma_pins[name_idx - 1] = pins[i].func; 180 break; 181 default: 182 continue; 183 } 184 } 185 186 ice_ptp_update_sma_data(pf, sma_pins, &data); 187 return ice_write_sma_ctrl(&pf->hw, data); 188 } 189 190 /** 191 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device 192 * @pf: Board private structure 193 * 194 * Program the device to respond appropriately to the Tx timestamp interrupt 195 * cause. 196 */ 197 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) 198 { 199 struct ice_hw *hw = &pf->hw; 200 bool enable; 201 u32 val; 202 203 switch (pf->ptp.tx_interrupt_mode) { 204 case ICE_PTP_TX_INTERRUPT_ALL: 205 /* React to interrupts across all quads. */ 206 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); 207 enable = true; 208 break; 209 case ICE_PTP_TX_INTERRUPT_NONE: 210 /* Do not react to interrupts on any quad. */ 211 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); 212 enable = false; 213 break; 214 case ICE_PTP_TX_INTERRUPT_SELF: 215 default: 216 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; 217 break; 218 } 219 220 /* Configure the Tx timestamp interrupt */ 221 val = rd32(hw, PFINT_OICR_ENA); 222 if (enable) 223 val |= PFINT_OICR_TSYN_TX_M; 224 else 225 val &= ~PFINT_OICR_TSYN_TX_M; 226 wr32(hw, PFINT_OICR_ENA, val); 227 } 228 229 /** 230 * ice_set_rx_tstamp - Enable or disable Rx timestamping 231 * @pf: The PF pointer to search in 232 * @on: bool value for whether timestamps are enabled or disabled 233 */ 234 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 235 { 236 struct ice_vsi *vsi; 237 u16 i; 238 239 vsi = ice_get_main_vsi(pf); 240 if (!vsi || !vsi->rx_rings) 241 return; 242 243 /* Set the timestamp flag for all the Rx rings */ 244 ice_for_each_rxq(vsi, i) { 245 if (!vsi->rx_rings[i]) 246 continue; 247 vsi->rx_rings[i]->ptp_rx = on; 248 } 249 } 250 251 /** 252 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode 253 * @pf: Board private structure 254 * 255 * Called during preparation for reset to temporarily disable timestamping on 256 * the device. Called during remove to disable timestamping while cleaning up 257 * driver resources. 258 */ 259 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) 260 { 261 struct ice_hw *hw = &pf->hw; 262 u32 val; 263 264 val = rd32(hw, PFINT_OICR_ENA); 265 val &= ~PFINT_OICR_TSYN_TX_M; 266 wr32(hw, PFINT_OICR_ENA, val); 267 268 ice_set_rx_tstamp(pf, false); 269 } 270 271 /** 272 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration 273 * @pf: Board private structure 274 * 275 * Called at the end of rebuild to restore timestamp configuration after 276 * a device reset. 277 */ 278 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) 279 { 280 struct ice_hw *hw = &pf->hw; 281 bool enable_rx; 282 283 ice_ptp_cfg_tx_interrupt(pf); 284 285 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; 286 ice_set_rx_tstamp(pf, enable_rx); 287 288 /* Trigger an immediate software interrupt to ensure that timestamps 289 * which occurred during reset are handled now. 290 */ 291 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 292 ice_flush(hw); 293 } 294 295 /** 296 * ice_ptp_read_src_clk_reg - Read the source clock register 297 * @pf: Board private structure 298 * @sts: Optional parameter for holding a pair of system timestamps from 299 * the system clock. Will be ignored if NULL is given. 300 */ 301 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, 302 struct ptp_system_timestamp *sts) 303 { 304 struct ice_hw *hw = &pf->hw; 305 u32 hi, lo, lo2; 306 u8 tmr_idx; 307 308 tmr_idx = ice_get_ptp_src_clock_index(hw); 309 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); 310 /* Read the system timestamp pre PHC read */ 311 ptp_read_system_prets(sts); 312 313 if (hw->mac_type == ICE_MAC_E830) { 314 u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx)); 315 316 /* Read the system timestamp post PHC read */ 317 ptp_read_system_postts(sts); 318 319 return clk_time; 320 } 321 322 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 323 324 /* Read the system timestamp post PHC read */ 325 ptp_read_system_postts(sts); 326 327 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 328 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 329 330 if (lo2 < lo) { 331 /* if TIME_L rolled over read TIME_L again and update 332 * system timestamps 333 */ 334 ptp_read_system_prets(sts); 335 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 336 ptp_read_system_postts(sts); 337 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 338 } 339 340 return ((u64)hi << 32) | lo; 341 } 342 343 /** 344 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 345 * @cached_phc_time: recently cached copy of PHC time 346 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 347 * 348 * Hardware captures timestamps which contain only 32 bits of nominal 349 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 350 * Note that the captured timestamp values may be 40 bits, but the lower 351 * 8 bits are sub-nanoseconds and generally discarded. 352 * 353 * Extend the 32bit nanosecond timestamp using the following algorithm and 354 * assumptions: 355 * 356 * 1) have a recently cached copy of the PHC time 357 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 358 * seconds) before or after the PHC time was captured. 359 * 3) calculate the delta between the cached time and the timestamp 360 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 361 * captured after the PHC time. In this case, the full timestamp is just 362 * the cached PHC time plus the delta. 363 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 364 * timestamp was captured *before* the PHC time, i.e. because the PHC 365 * cache was updated after the timestamp was captured by hardware. In this 366 * case, the full timestamp is the cached time minus the inverse delta. 367 * 368 * This algorithm works even if the PHC time was updated after a Tx timestamp 369 * was requested, but before the Tx timestamp event was reported from 370 * hardware. 371 * 372 * This calculation primarily relies on keeping the cached PHC time up to 373 * date. If the timestamp was captured more than 2^31 nanoseconds after the 374 * PHC time, it is possible that the lower 32bits of PHC time have 375 * overflowed more than once, and we might generate an incorrect timestamp. 376 * 377 * This is prevented by (a) periodically updating the cached PHC time once 378 * a second, and (b) discarding any Tx timestamp packet if it has waited for 379 * a timestamp for more than one second. 380 */ 381 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 382 { 383 u32 delta, phc_time_lo; 384 u64 ns; 385 386 /* Extract the lower 32 bits of the PHC time */ 387 phc_time_lo = (u32)cached_phc_time; 388 389 /* Calculate the delta between the lower 32bits of the cached PHC 390 * time and the in_tstamp value 391 */ 392 delta = (in_tstamp - phc_time_lo); 393 394 /* Do not assume that the in_tstamp is always more recent than the 395 * cached PHC time. If the delta is large, it indicates that the 396 * in_tstamp was taken in the past, and should be converted 397 * forward. 398 */ 399 if (delta > (U32_MAX / 2)) { 400 /* reverse the delta calculation here */ 401 delta = (phc_time_lo - in_tstamp); 402 ns = cached_phc_time - delta; 403 } else { 404 ns = cached_phc_time + delta; 405 } 406 407 return ns; 408 } 409 410 /** 411 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 412 * @pf: Board private structure 413 * @in_tstamp: Ingress/egress 40b timestamp value 414 * 415 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 416 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 417 * 418 * *--------------------------------------------------------------* 419 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 420 * *--------------------------------------------------------------* 421 * 422 * The low bit is an indicator of whether the timestamp is valid. The next 423 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 424 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 425 * 426 * It is assumed that the caller verifies the timestamp is valid prior to 427 * calling this function. 428 * 429 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 430 * time stored in the device private PTP structure as the basis for timestamp 431 * extension. 432 * 433 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 434 * algorithm. 435 */ 436 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 437 { 438 const u64 mask = GENMASK_ULL(31, 0); 439 unsigned long discard_time; 440 441 /* Discard the hardware timestamp if the cached PHC time is too old */ 442 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 443 if (time_is_before_jiffies(discard_time)) { 444 pf->ptp.tx_hwtstamp_discarded++; 445 return 0; 446 } 447 448 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 449 (in_tstamp >> 8) & mask); 450 } 451 452 /** 453 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 454 * @tx: the PTP Tx timestamp tracker to check 455 * 456 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 457 * to accept new timestamp requests. 458 * 459 * Assumes the tx->lock spinlock is already held. 460 */ 461 static bool 462 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 463 { 464 lockdep_assert_held(&tx->lock); 465 466 return tx->init && !tx->calibrating; 467 } 468 469 /** 470 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW 471 * @tx: the PTP Tx timestamp tracker 472 * @idx: index of the timestamp to request 473 */ 474 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) 475 { 476 struct ice_e810_params *params; 477 struct ice_ptp_port *ptp_port; 478 unsigned long flags; 479 struct sk_buff *skb; 480 struct ice_pf *pf; 481 482 if (!tx->init) 483 return; 484 485 ptp_port = container_of(tx, struct ice_ptp_port, tx); 486 pf = ptp_port_to_pf(ptp_port); 487 params = &pf->hw.ptp.phy.e810; 488 489 /* Drop packets which have waited for more than 2 seconds */ 490 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 491 /* Count the number of Tx timestamps that timed out */ 492 pf->ptp.tx_hwtstamp_timeouts++; 493 494 skb = tx->tstamps[idx].skb; 495 tx->tstamps[idx].skb = NULL; 496 clear_bit(idx, tx->in_use); 497 498 dev_kfree_skb_any(skb); 499 return; 500 } 501 502 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 503 504 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); 505 506 params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS; 507 508 /* Write TS index to read to the PF register so the FW can read it */ 509 wr32(&pf->hw, REG_LL_PROXY_H, 510 REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | 511 REG_LL_PROXY_H_EXEC); 512 tx->last_ll_ts_idx_read = idx; 513 514 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); 515 } 516 517 /** 518 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port 519 * @tx: the PTP Tx timestamp tracker 520 */ 521 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) 522 { 523 struct skb_shared_hwtstamps shhwtstamps = {}; 524 u8 idx = tx->last_ll_ts_idx_read; 525 struct ice_e810_params *params; 526 struct ice_ptp_port *ptp_port; 527 u64 raw_tstamp, tstamp; 528 bool drop_ts = false; 529 struct sk_buff *skb; 530 unsigned long flags; 531 struct device *dev; 532 struct ice_pf *pf; 533 u32 reg_ll_high; 534 535 if (!tx->init || tx->last_ll_ts_idx_read < 0) 536 return; 537 538 ptp_port = container_of(tx, struct ice_ptp_port, tx); 539 pf = ptp_port_to_pf(ptp_port); 540 dev = ice_pf_to_dev(pf); 541 params = &pf->hw.ptp.phy.e810; 542 543 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 544 545 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); 546 547 if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS)) 548 dev_dbg(dev, "%s: low latency interrupt request not in progress?\n", 549 __func__); 550 551 /* Read the low 32 bit value */ 552 raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L); 553 /* Read the status together with high TS part */ 554 reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H); 555 556 /* Wake up threads waiting on low latency interface */ 557 params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS; 558 559 wake_up_locked(¶ms->atqbal_wq); 560 561 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); 562 563 /* When the bit is cleared, the TS is ready in the register */ 564 if (reg_ll_high & REG_LL_PROXY_H_EXEC) { 565 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); 566 return; 567 } 568 569 /* High 8 bit value of the TS is on the bits 16:23 */ 570 raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32; 571 572 /* Devices using this interface always verify the timestamp differs 573 * relative to the last cached timestamp value. 574 */ 575 if (raw_tstamp == tx->tstamps[idx].cached_tstamp) 576 return; 577 578 tx->tstamps[idx].cached_tstamp = raw_tstamp; 579 clear_bit(idx, tx->in_use); 580 skb = tx->tstamps[idx].skb; 581 tx->tstamps[idx].skb = NULL; 582 if (test_and_clear_bit(idx, tx->stale)) 583 drop_ts = true; 584 585 if (!skb) 586 return; 587 588 if (drop_ts) { 589 dev_kfree_skb_any(skb); 590 return; 591 } 592 593 /* Extend the timestamp using cached PHC time */ 594 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 595 if (tstamp) { 596 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 597 ice_trace(tx_tstamp_complete, skb, idx); 598 } 599 600 skb_tstamp_tx(skb, &shhwtstamps); 601 dev_kfree_skb_any(skb); 602 } 603 604 /** 605 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 606 * @tx: the PTP Tx timestamp tracker 607 * 608 * Process timestamps captured by the PHY associated with this port. To do 609 * this, loop over each index with a waiting skb. 610 * 611 * If a given index has a valid timestamp, perform the following steps: 612 * 613 * 1) check that the timestamp request is not stale 614 * 2) check that a timestamp is ready and available in the PHY memory bank 615 * 3) read and copy the timestamp out of the PHY register 616 * 4) unlock the index by clearing the associated in_use bit 617 * 5) check if the timestamp is stale, and discard if so 618 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 619 * 7) send this 64 bit timestamp to the stack 620 * 621 * Note that we do not hold the tracking lock while reading the Tx timestamp. 622 * This is because reading the timestamp requires taking a mutex that might 623 * sleep. 624 * 625 * The only place where we set in_use is when a new timestamp is initiated 626 * with a slot index. This is only called in the hard xmit routine where an 627 * SKB has a request flag set. The only places where we clear this bit is this 628 * function, or during teardown when the Tx timestamp tracker is being 629 * removed. A timestamp index will never be re-used until the in_use bit for 630 * that index is cleared. 631 * 632 * If a Tx thread starts a new timestamp, we might not begin processing it 633 * right away but we will notice it at the end when we re-queue the task. 634 * 635 * If a Tx thread starts a new timestamp just after this function exits, the 636 * interrupt for that timestamp should re-trigger this function once 637 * a timestamp is ready. 638 * 639 * In cases where the PTP hardware clock was directly adjusted, some 640 * timestamps may not be able to safely use the timestamp extension math. In 641 * this case, software will set the stale bit for any outstanding Tx 642 * timestamps when the clock is adjusted. Then this function will discard 643 * those captured timestamps instead of sending them to the stack. 644 * 645 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 646 * to correctly extend the timestamp using the cached PHC time. It is 647 * extremely unlikely that a packet will ever take this long to timestamp. If 648 * we detect a Tx timestamp request that has waited for this long we assume 649 * the packet will never be sent by hardware and discard it without reading 650 * the timestamp register. 651 */ 652 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 653 { 654 struct ice_ptp_port *ptp_port; 655 unsigned long flags; 656 struct ice_pf *pf; 657 struct ice_hw *hw; 658 u64 tstamp_ready; 659 bool link_up; 660 int err; 661 u8 idx; 662 663 ptp_port = container_of(tx, struct ice_ptp_port, tx); 664 pf = ptp_port_to_pf(ptp_port); 665 hw = &pf->hw; 666 667 /* Read the Tx ready status first */ 668 if (tx->has_ready_bitmap) { 669 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 670 if (err) 671 return; 672 } 673 674 /* Drop packets if the link went down */ 675 link_up = ptp_port->link_up; 676 677 for_each_set_bit(idx, tx->in_use, tx->len) { 678 struct skb_shared_hwtstamps shhwtstamps = {}; 679 u8 phy_idx = idx + tx->offset; 680 u64 raw_tstamp = 0, tstamp; 681 bool drop_ts = !link_up; 682 struct sk_buff *skb; 683 684 /* Drop packets which have waited for more than 2 seconds */ 685 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 686 drop_ts = true; 687 688 /* Count the number of Tx timestamps that timed out */ 689 pf->ptp.tx_hwtstamp_timeouts++; 690 } 691 692 /* Only read a timestamp from the PHY if its marked as ready 693 * by the tstamp_ready register. This avoids unnecessary 694 * reading of timestamps which are not yet valid. This is 695 * important as we must read all timestamps which are valid 696 * and only timestamps which are valid during each interrupt. 697 * If we do not, the hardware logic for generating a new 698 * interrupt can get stuck on some devices. 699 */ 700 if (tx->has_ready_bitmap && 701 !(tstamp_ready & BIT_ULL(phy_idx))) { 702 if (drop_ts) 703 goto skip_ts_read; 704 705 continue; 706 } 707 708 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 709 710 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 711 if (err && !drop_ts) 712 continue; 713 714 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 715 716 /* For PHYs which don't implement a proper timestamp ready 717 * bitmap, verify that the timestamp value is different 718 * from the last cached timestamp. If it is not, skip this for 719 * now assuming it hasn't yet been captured by hardware. 720 */ 721 if (!drop_ts && !tx->has_ready_bitmap && 722 raw_tstamp == tx->tstamps[idx].cached_tstamp) 723 continue; 724 725 /* Discard any timestamp value without the valid bit set */ 726 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 727 drop_ts = true; 728 729 skip_ts_read: 730 spin_lock_irqsave(&tx->lock, flags); 731 if (!tx->has_ready_bitmap && raw_tstamp) 732 tx->tstamps[idx].cached_tstamp = raw_tstamp; 733 clear_bit(idx, tx->in_use); 734 skb = tx->tstamps[idx].skb; 735 tx->tstamps[idx].skb = NULL; 736 if (test_and_clear_bit(idx, tx->stale)) 737 drop_ts = true; 738 spin_unlock_irqrestore(&tx->lock, flags); 739 740 /* It is unlikely but possible that the SKB will have been 741 * flushed at this point due to link change or teardown. 742 */ 743 if (!skb) 744 continue; 745 746 if (drop_ts) { 747 dev_kfree_skb_any(skb); 748 continue; 749 } 750 751 /* Extend the timestamp using cached PHC time */ 752 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 753 if (tstamp) { 754 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 755 ice_trace(tx_tstamp_complete, skb, idx); 756 } 757 758 skb_tstamp_tx(skb, &shhwtstamps); 759 dev_kfree_skb_any(skb); 760 } 761 } 762 763 /** 764 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 765 * @pf: Board private structure 766 */ 767 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 768 { 769 struct ice_ptp_port *port; 770 unsigned int i; 771 772 mutex_lock(&pf->adapter->ports.lock); 773 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { 774 struct ice_ptp_tx *tx = &port->tx; 775 776 if (!tx || !tx->init) 777 continue; 778 779 ice_ptp_process_tx_tstamp(tx); 780 } 781 mutex_unlock(&pf->adapter->ports.lock); 782 783 for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { 784 u64 tstamp_ready; 785 int err; 786 787 /* Read the Tx ready status first */ 788 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 789 if (err) 790 break; 791 else if (tstamp_ready) 792 return ICE_TX_TSTAMP_WORK_PENDING; 793 } 794 795 return ICE_TX_TSTAMP_WORK_DONE; 796 } 797 798 /** 799 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 800 * @tx: Tx tracking structure to initialize 801 * 802 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 803 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 804 */ 805 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 806 { 807 bool more_timestamps; 808 unsigned long flags; 809 810 if (!tx->init) 811 return ICE_TX_TSTAMP_WORK_DONE; 812 813 /* Process the Tx timestamp tracker */ 814 ice_ptp_process_tx_tstamp(tx); 815 816 /* Check if there are outstanding Tx timestamps */ 817 spin_lock_irqsave(&tx->lock, flags); 818 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 819 spin_unlock_irqrestore(&tx->lock, flags); 820 821 if (more_timestamps) 822 return ICE_TX_TSTAMP_WORK_PENDING; 823 824 return ICE_TX_TSTAMP_WORK_DONE; 825 } 826 827 /** 828 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 829 * @tx: Tx tracking structure to initialize 830 * 831 * Assumes that the length has already been initialized. Do not call directly, 832 * use the ice_ptp_init_tx_* instead. 833 */ 834 static int 835 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 836 { 837 unsigned long *in_use, *stale; 838 struct ice_tx_tstamp *tstamps; 839 840 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 841 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 842 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 843 844 if (!tstamps || !in_use || !stale) { 845 kfree(tstamps); 846 bitmap_free(in_use); 847 bitmap_free(stale); 848 849 return -ENOMEM; 850 } 851 852 tx->tstamps = tstamps; 853 tx->in_use = in_use; 854 tx->stale = stale; 855 tx->init = 1; 856 tx->last_ll_ts_idx_read = -1; 857 858 spin_lock_init(&tx->lock); 859 860 return 0; 861 } 862 863 /** 864 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 865 * @pf: Board private structure 866 * @tx: the tracker to flush 867 * 868 * Called during teardown when a Tx tracker is being removed. 869 */ 870 static void 871 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 872 { 873 struct ice_hw *hw = &pf->hw; 874 unsigned long flags; 875 u64 tstamp_ready; 876 int err; 877 u8 idx; 878 879 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 880 if (err) { 881 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 882 tx->block, err); 883 884 /* If we fail to read the Tx timestamp ready bitmap just 885 * skip clearing the PHY timestamps. 886 */ 887 tstamp_ready = 0; 888 } 889 890 for_each_set_bit(idx, tx->in_use, tx->len) { 891 u8 phy_idx = idx + tx->offset; 892 struct sk_buff *skb; 893 894 /* In case this timestamp is ready, we need to clear it. */ 895 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 896 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 897 898 spin_lock_irqsave(&tx->lock, flags); 899 skb = tx->tstamps[idx].skb; 900 tx->tstamps[idx].skb = NULL; 901 clear_bit(idx, tx->in_use); 902 clear_bit(idx, tx->stale); 903 spin_unlock_irqrestore(&tx->lock, flags); 904 905 /* Count the number of Tx timestamps flushed */ 906 pf->ptp.tx_hwtstamp_flushed++; 907 908 /* Free the SKB after we've cleared the bit */ 909 dev_kfree_skb_any(skb); 910 } 911 } 912 913 /** 914 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 915 * @tx: the tracker to mark 916 * 917 * Mark currently outstanding Tx timestamps as stale. This prevents sending 918 * their timestamp value to the stack. This is required to prevent extending 919 * the 40bit hardware timestamp incorrectly. 920 * 921 * This should be called when the PTP clock is modified such as after a set 922 * time request. 923 */ 924 static void 925 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 926 { 927 unsigned long flags; 928 929 spin_lock_irqsave(&tx->lock, flags); 930 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 931 spin_unlock_irqrestore(&tx->lock, flags); 932 } 933 934 /** 935 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock 936 * @pf: Board private structure 937 * 938 * Called by the clock owner to flush all the Tx timestamp trackers associated 939 * with the clock. 940 */ 941 static void 942 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf) 943 { 944 struct ice_ptp_port *port; 945 946 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) 947 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx); 948 } 949 950 /** 951 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 952 * @pf: Board private structure 953 * @tx: Tx tracking structure to release 954 * 955 * Free memory associated with the Tx timestamp tracker. 956 */ 957 static void 958 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 959 { 960 unsigned long flags; 961 962 spin_lock_irqsave(&tx->lock, flags); 963 tx->init = 0; 964 spin_unlock_irqrestore(&tx->lock, flags); 965 966 /* wait for potentially outstanding interrupt to complete */ 967 synchronize_irq(pf->oicr_irq.virq); 968 969 ice_ptp_flush_tx_tracker(pf, tx); 970 971 kfree(tx->tstamps); 972 tx->tstamps = NULL; 973 974 bitmap_free(tx->in_use); 975 tx->in_use = NULL; 976 977 bitmap_free(tx->stale); 978 tx->stale = NULL; 979 980 tx->len = 0; 981 } 982 983 /** 984 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps 985 * @pf: Board private structure 986 * @tx: the Tx tracking structure to initialize 987 * @port: the port this structure tracks 988 * 989 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 990 * the timestamp block is shared for all ports in the same quad. To avoid 991 * ports using the same timestamp index, logically break the block of 992 * registers into chunks based on the port number. 993 * 994 * Return: 0 on success, -ENOMEM when out of memory 995 */ 996 static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, 997 u8 port) 998 { 999 tx->block = ICE_GET_QUAD_NUM(port); 1000 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; 1001 tx->len = INDEX_PER_PORT_E82X; 1002 tx->has_ready_bitmap = 1; 1003 1004 return ice_ptp_alloc_tx_tracker(tx); 1005 } 1006 1007 /** 1008 * ice_ptp_init_tx - Initialize tracking for Tx timestamps 1009 * @pf: Board private structure 1010 * @tx: the Tx tracking structure to initialize 1011 * @port: the port this structure tracks 1012 * 1013 * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X, 1014 * each port has its own block of timestamps, independent of the other ports. 1015 * 1016 * Return: 0 on success, -ENOMEM when out of memory 1017 */ 1018 static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 1019 { 1020 tx->block = port; 1021 tx->offset = 0; 1022 tx->len = INDEX_PER_PORT; 1023 1024 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 1025 * verify new timestamps against cached copy of the last read 1026 * timestamp. 1027 */ 1028 tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810; 1029 1030 return ice_ptp_alloc_tx_tracker(tx); 1031 } 1032 1033 /** 1034 * ice_ptp_update_cached_phctime - Update the cached PHC time values 1035 * @pf: Board specific private structure 1036 * 1037 * This function updates the system time values which are cached in the PF 1038 * structure and the Rx rings. 1039 * 1040 * This function must be called periodically to ensure that the cached value 1041 * is never more than 2 seconds old. 1042 * 1043 * Note that the cached copy in the PF PTP structure is always updated, even 1044 * if we can't update the copy in the Rx rings. 1045 * 1046 * Return: 1047 * * 0 - OK, successfully updated 1048 * * -EAGAIN - PF was busy, need to reschedule the update 1049 */ 1050 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 1051 { 1052 struct device *dev = ice_pf_to_dev(pf); 1053 unsigned long update_before; 1054 u64 systime; 1055 int i; 1056 1057 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 1058 if (pf->ptp.cached_phc_time && 1059 time_is_before_jiffies(update_before)) { 1060 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 1061 1062 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 1063 jiffies_to_msecs(time_taken)); 1064 pf->ptp.late_cached_phc_updates++; 1065 } 1066 1067 /* Read the current PHC time */ 1068 systime = ice_ptp_read_src_clk_reg(pf, NULL); 1069 1070 /* Update the cached PHC time stored in the PF structure */ 1071 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 1072 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 1073 1074 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 1075 return -EAGAIN; 1076 1077 ice_for_each_vsi(pf, i) { 1078 struct ice_vsi *vsi = pf->vsi[i]; 1079 int j; 1080 1081 if (!vsi) 1082 continue; 1083 1084 if (vsi->type != ICE_VSI_PF) 1085 continue; 1086 1087 ice_for_each_rxq(vsi, j) { 1088 if (!vsi->rx_rings[j]) 1089 continue; 1090 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 1091 } 1092 } 1093 clear_bit(ICE_CFG_BUSY, pf->state); 1094 1095 return 0; 1096 } 1097 1098 /** 1099 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 1100 * @pf: Board specific private structure 1101 * 1102 * This function must be called when the cached PHC time is no longer valid, 1103 * such as after a time adjustment. It marks any currently outstanding Tx 1104 * timestamps as stale and updates the cached PHC time for both the PF and Rx 1105 * rings. 1106 * 1107 * If updating the PHC time cannot be done immediately, a warning message is 1108 * logged and the work item is scheduled immediately to minimize the window 1109 * with a wrong cached timestamp. 1110 */ 1111 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1112 { 1113 struct device *dev = ice_pf_to_dev(pf); 1114 int err; 1115 1116 /* Update the cached PHC time immediately if possible, otherwise 1117 * schedule the work item to execute soon. 1118 */ 1119 err = ice_ptp_update_cached_phctime(pf); 1120 if (err) { 1121 /* If another thread is updating the Rx rings, we won't 1122 * properly reset them here. This could lead to reporting of 1123 * invalid timestamps, but there isn't much we can do. 1124 */ 1125 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1126 __func__); 1127 1128 /* Queue the work item to update the Rx rings when possible */ 1129 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1130 msecs_to_jiffies(10)); 1131 } 1132 1133 /* Mark any outstanding timestamps as stale, since they might have 1134 * been captured in hardware before the time update. This could lead 1135 * to us extending them with the wrong cached value resulting in 1136 * incorrect timestamp values. 1137 */ 1138 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1139 } 1140 1141 /** 1142 * ice_ptp_write_init - Set PHC time to provided value 1143 * @pf: Board private structure 1144 * @ts: timespec structure that holds the new time value 1145 * 1146 * Set the PHC time to the specified time provided in the timespec. 1147 */ 1148 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1149 { 1150 u64 ns = timespec64_to_ns(ts); 1151 struct ice_hw *hw = &pf->hw; 1152 1153 return ice_ptp_init_time(hw, ns); 1154 } 1155 1156 /** 1157 * ice_ptp_write_adj - Adjust PHC clock time atomically 1158 * @pf: Board private structure 1159 * @adj: Adjustment in nanoseconds 1160 * 1161 * Perform an atomic adjustment of the PHC time by the specified number of 1162 * nanoseconds. 1163 */ 1164 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1165 { 1166 struct ice_hw *hw = &pf->hw; 1167 1168 return ice_ptp_adj_clock(hw, adj); 1169 } 1170 1171 /** 1172 * ice_base_incval - Get base timer increment value 1173 * @pf: Board private structure 1174 * 1175 * Look up the base timer increment value for this device. The base increment 1176 * value is used to define the nominal clock tick rate. This increment value 1177 * is programmed during device initialization. It is also used as the basis 1178 * for calculating adjustments using scaled_ppm. 1179 */ 1180 static u64 ice_base_incval(struct ice_pf *pf) 1181 { 1182 struct ice_hw *hw = &pf->hw; 1183 u64 incval; 1184 1185 incval = ice_get_base_incval(hw); 1186 1187 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1188 incval); 1189 1190 return incval; 1191 } 1192 1193 /** 1194 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1195 * @port: PTP port for which Tx FIFO is checked 1196 */ 1197 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1198 { 1199 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1200 int quad = ICE_GET_QUAD_NUM(port->port_num); 1201 struct ice_pf *pf; 1202 struct ice_hw *hw; 1203 u32 val, phy_sts; 1204 int err; 1205 1206 pf = ptp_port_to_pf(port); 1207 hw = &pf->hw; 1208 1209 if (port->tx_fifo_busy_cnt == FIFO_OK) 1210 return 0; 1211 1212 /* need to read FIFO state */ 1213 if (offs == 0 || offs == 1) 1214 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, 1215 &val); 1216 else 1217 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, 1218 &val); 1219 1220 if (err) { 1221 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1222 port->port_num, err); 1223 return err; 1224 } 1225 1226 if (offs & 0x1) 1227 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val); 1228 else 1229 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val); 1230 1231 if (phy_sts & FIFO_EMPTY) { 1232 port->tx_fifo_busy_cnt = FIFO_OK; 1233 return 0; 1234 } 1235 1236 port->tx_fifo_busy_cnt++; 1237 1238 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1239 port->tx_fifo_busy_cnt, port->port_num); 1240 1241 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1242 dev_dbg(ice_pf_to_dev(pf), 1243 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1244 port->port_num, quad); 1245 ice_ptp_reset_ts_memory_quad_e82x(hw, quad); 1246 port->tx_fifo_busy_cnt = FIFO_OK; 1247 return 0; 1248 } 1249 1250 return -EAGAIN; 1251 } 1252 1253 /** 1254 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1255 * @work: Pointer to the kthread_work structure for this task 1256 * 1257 * Check whether hardware has completed measuring the Tx and Rx offset values 1258 * used to configure and enable vernier timestamp calibration. 1259 * 1260 * Once the offset in either direction is measured, configure the associated 1261 * registers with the calibrated offset values and enable timestamping. The Tx 1262 * and Rx directions are configured independently as soon as their associated 1263 * offsets are known. 1264 * 1265 * This function reschedules itself until both Tx and Rx calibration have 1266 * completed. 1267 */ 1268 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1269 { 1270 struct ice_ptp_port *port; 1271 struct ice_pf *pf; 1272 struct ice_hw *hw; 1273 int tx_err; 1274 int rx_err; 1275 1276 port = container_of(work, struct ice_ptp_port, ov_work.work); 1277 pf = ptp_port_to_pf(port); 1278 hw = &pf->hw; 1279 1280 if (ice_is_reset_in_progress(pf->state)) { 1281 /* wait for device driver to complete reset */ 1282 kthread_queue_delayed_work(pf->ptp.kworker, 1283 &port->ov_work, 1284 msecs_to_jiffies(100)); 1285 return; 1286 } 1287 1288 tx_err = ice_ptp_check_tx_fifo(port); 1289 if (!tx_err) 1290 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); 1291 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); 1292 if (tx_err || rx_err) { 1293 /* Tx and/or Rx offset not yet configured, try again later */ 1294 kthread_queue_delayed_work(pf->ptp.kworker, 1295 &port->ov_work, 1296 msecs_to_jiffies(100)); 1297 return; 1298 } 1299 } 1300 1301 /** 1302 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1303 * @ptp_port: PTP port to stop 1304 */ 1305 static int 1306 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1307 { 1308 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1309 u8 port = ptp_port->port_num; 1310 struct ice_hw *hw = &pf->hw; 1311 int err; 1312 1313 mutex_lock(&ptp_port->ps_lock); 1314 1315 switch (hw->mac_type) { 1316 case ICE_MAC_E810: 1317 case ICE_MAC_E830: 1318 err = 0; 1319 break; 1320 case ICE_MAC_GENERIC: 1321 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1322 1323 err = ice_stop_phy_timer_e82x(hw, port, true); 1324 break; 1325 case ICE_MAC_GENERIC_3K_E825: 1326 err = ice_stop_phy_timer_eth56g(hw, port, true); 1327 break; 1328 default: 1329 err = -ENODEV; 1330 } 1331 if (err && err != -EBUSY) 1332 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1333 port, err); 1334 1335 mutex_unlock(&ptp_port->ps_lock); 1336 1337 return err; 1338 } 1339 1340 /** 1341 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1342 * @ptp_port: PTP port for which the PHY start is set 1343 * 1344 * Start the PHY timestamping block, and initiate Vernier timestamping 1345 * calibration. If timestamping cannot be calibrated (such as if link is down) 1346 * then disable the timestamping block instead. 1347 */ 1348 static int 1349 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1350 { 1351 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1352 u8 port = ptp_port->port_num; 1353 struct ice_hw *hw = &pf->hw; 1354 unsigned long flags; 1355 int err; 1356 1357 if (!ptp_port->link_up) 1358 return ice_ptp_port_phy_stop(ptp_port); 1359 1360 mutex_lock(&ptp_port->ps_lock); 1361 1362 switch (hw->mac_type) { 1363 case ICE_MAC_E810: 1364 case ICE_MAC_E830: 1365 err = 0; 1366 break; 1367 case ICE_MAC_GENERIC: 1368 /* Start the PHY timer in Vernier mode */ 1369 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1370 1371 /* temporarily disable Tx timestamps while calibrating 1372 * PHY offset 1373 */ 1374 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1375 ptp_port->tx.calibrating = true; 1376 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1377 ptp_port->tx_fifo_busy_cnt = 0; 1378 1379 /* Start the PHY timer in Vernier mode */ 1380 err = ice_start_phy_timer_e82x(hw, port); 1381 if (err) 1382 break; 1383 1384 /* Enable Tx timestamps right away */ 1385 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1386 ptp_port->tx.calibrating = false; 1387 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1388 1389 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 1390 0); 1391 break; 1392 case ICE_MAC_GENERIC_3K_E825: 1393 err = ice_start_phy_timer_eth56g(hw, port); 1394 break; 1395 default: 1396 err = -ENODEV; 1397 } 1398 1399 if (err) 1400 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1401 port, err); 1402 1403 mutex_unlock(&ptp_port->ps_lock); 1404 1405 return err; 1406 } 1407 1408 /** 1409 * ice_ptp_link_change - Reconfigure PTP after link status change 1410 * @pf: Board private structure 1411 * @linkup: Link is up or down 1412 */ 1413 void ice_ptp_link_change(struct ice_pf *pf, bool linkup) 1414 { 1415 struct ice_ptp_port *ptp_port; 1416 struct ice_hw *hw = &pf->hw; 1417 1418 if (pf->ptp.state != ICE_PTP_READY) 1419 return; 1420 1421 ptp_port = &pf->ptp.port; 1422 1423 /* Update cached link status for this port immediately */ 1424 ptp_port->link_up = linkup; 1425 1426 /* Skip HW writes if reset is in progress */ 1427 if (pf->hw.reset_ongoing) 1428 return; 1429 1430 switch (hw->mac_type) { 1431 case ICE_MAC_E810: 1432 case ICE_MAC_E830: 1433 /* Do not reconfigure E810 or E830 PHY */ 1434 return; 1435 case ICE_MAC_GENERIC: 1436 case ICE_MAC_GENERIC_3K_E825: 1437 ice_ptp_port_phy_restart(ptp_port); 1438 return; 1439 default: 1440 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); 1441 } 1442 } 1443 1444 /** 1445 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings 1446 * @pf: PF private structure 1447 * @ena: bool value to enable or disable interrupt 1448 * @threshold: Minimum number of packets at which intr is triggered 1449 * 1450 * Utility function to configure all the PHY interrupt settings, including 1451 * whether the PHY interrupt is enabled, and what threshold to use. Also 1452 * configures The E82X timestamp owner to react to interrupts from all PHYs. 1453 * 1454 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes 1455 * when failed to configure PHY interrupt for E82X 1456 */ 1457 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) 1458 { 1459 struct device *dev = ice_pf_to_dev(pf); 1460 struct ice_hw *hw = &pf->hw; 1461 1462 ice_ptp_reset_ts_memory(hw); 1463 1464 switch (hw->mac_type) { 1465 case ICE_MAC_E810: 1466 case ICE_MAC_E830: 1467 return 0; 1468 case ICE_MAC_GENERIC: { 1469 int quad; 1470 1471 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); 1472 quad++) { 1473 int err; 1474 1475 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); 1476 if (err) { 1477 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", 1478 quad, err); 1479 return err; 1480 } 1481 } 1482 1483 return 0; 1484 } 1485 case ICE_MAC_GENERIC_3K_E825: { 1486 int port; 1487 1488 for (port = 0; port < hw->ptp.num_lports; port++) { 1489 int err; 1490 1491 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); 1492 if (err) { 1493 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", 1494 port, err); 1495 return err; 1496 } 1497 } 1498 1499 return 0; 1500 } 1501 case ICE_MAC_UNKNOWN: 1502 default: 1503 return -EOPNOTSUPP; 1504 } 1505 } 1506 1507 /** 1508 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1509 * @pf: Board private structure 1510 */ 1511 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1512 { 1513 ice_ptp_port_phy_restart(&pf->ptp.port); 1514 } 1515 1516 /** 1517 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping 1518 * @pf: Board private structure 1519 */ 1520 static void ice_ptp_restart_all_phy(struct ice_pf *pf) 1521 { 1522 struct list_head *entry; 1523 1524 list_for_each(entry, &pf->adapter->ports.ports) { 1525 struct ice_ptp_port *port = list_entry(entry, 1526 struct ice_ptp_port, 1527 list_node); 1528 1529 if (port->link_up) 1530 ice_ptp_port_phy_restart(port); 1531 } 1532 } 1533 1534 /** 1535 * ice_ptp_adjfine - Adjust clock increment rate 1536 * @info: the driver's PTP info structure 1537 * @scaled_ppm: Parts per million with 16-bit fractional field 1538 * 1539 * Adjust the frequency of the clock by the indicated scaled ppm from the 1540 * base frequency. 1541 */ 1542 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1543 { 1544 struct ice_pf *pf = ptp_info_to_pf(info); 1545 struct ice_hw *hw = &pf->hw; 1546 u64 incval; 1547 int err; 1548 1549 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1550 err = ice_ptp_write_incval_locked(hw, incval); 1551 if (err) { 1552 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1553 err); 1554 return -EIO; 1555 } 1556 1557 return 0; 1558 } 1559 1560 /** 1561 * ice_ptp_extts_event - Process PTP external clock event 1562 * @pf: Board private structure 1563 */ 1564 void ice_ptp_extts_event(struct ice_pf *pf) 1565 { 1566 struct ptp_clock_event event; 1567 struct ice_hw *hw = &pf->hw; 1568 u8 chan, tmr_idx; 1569 u32 hi, lo; 1570 1571 /* Don't process timestamp events if PTP is not ready */ 1572 if (pf->ptp.state != ICE_PTP_READY) 1573 return; 1574 1575 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1576 /* Event time is captured by one of the two matched registers 1577 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1578 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1579 * Event is defined in GLTSYN_EVNT_0 register 1580 */ 1581 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1582 int pin_desc_idx; 1583 1584 /* Check if channel is enabled */ 1585 if (!(pf->ptp.ext_ts_irq & (1 << chan))) 1586 continue; 1587 1588 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1589 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1590 event.timestamp = (u64)hi << 32 | lo; 1591 1592 /* Add delay compensation */ 1593 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); 1594 if (pin_desc_idx >= 0) { 1595 const struct ice_ptp_pin_desc *desc; 1596 1597 desc = &pf->ptp.ice_pin_desc[pin_desc_idx]; 1598 event.timestamp -= desc->delay[0]; 1599 } 1600 1601 event.type = PTP_CLOCK_EXTTS; 1602 event.index = chan; 1603 pf->ptp.ext_ts_irq &= ~(1 << chan); 1604 ptp_clock_event(pf->ptp.clock, &event); 1605 } 1606 } 1607 1608 /** 1609 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1610 * @pf: Board private structure 1611 * @rq: External timestamp request 1612 * @on: Enable/disable flag 1613 * 1614 * Configure an external timestamp event on the requested channel. 1615 * 1616 * Return: 0 on success, negative error code otherwise 1617 */ 1618 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq, 1619 int on) 1620 { 1621 u32 aux_reg, gpio_reg, irq_reg; 1622 struct ice_hw *hw = &pf->hw; 1623 unsigned int chan, gpio_pin; 1624 int pin_desc_idx; 1625 u8 tmr_idx; 1626 1627 /* Reject requests with unsupported flags */ 1628 1629 if (rq->flags & ~(PTP_ENABLE_FEATURE | 1630 PTP_RISING_EDGE | 1631 PTP_FALLING_EDGE | 1632 PTP_STRICT_FLAGS)) 1633 return -EOPNOTSUPP; 1634 1635 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1636 chan = rq->index; 1637 1638 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); 1639 if (pin_desc_idx < 0) 1640 return -EIO; 1641 1642 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0]; 1643 irq_reg = rd32(hw, PFINT_OICR_ENA); 1644 1645 if (on) { 1646 /* Enable the interrupt */ 1647 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1648 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1649 1650 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1651 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1652 1653 /* set event level to requested edge */ 1654 if (rq->flags & PTP_FALLING_EDGE) 1655 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1656 if (rq->flags & PTP_RISING_EDGE) 1657 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1658 1659 /* Write GPIO CTL reg. 1660 * 0x1 is input sampled by EVENT register(channel) 1661 * + num_in_channels * tmr_idx 1662 */ 1663 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1664 1 + chan + (tmr_idx * 3)); 1665 } else { 1666 bool last_enabled = true; 1667 1668 /* clear the values we set to reset defaults */ 1669 aux_reg = 0; 1670 gpio_reg = 0; 1671 1672 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++) 1673 if ((pf->ptp.extts_rqs[i].flags & 1674 PTP_ENABLE_FEATURE) && 1675 i != chan) { 1676 last_enabled = false; 1677 } 1678 1679 if (last_enabled) 1680 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1681 } 1682 1683 wr32(hw, PFINT_OICR_ENA, irq_reg); 1684 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1685 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1686 1687 return 0; 1688 } 1689 1690 /** 1691 * ice_ptp_disable_all_extts - Disable all EXTTS channels 1692 * @pf: Board private structure 1693 */ 1694 static void ice_ptp_disable_all_extts(struct ice_pf *pf) 1695 { 1696 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1697 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1698 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1699 false); 1700 1701 synchronize_irq(pf->oicr_irq.virq); 1702 } 1703 1704 /** 1705 * ice_ptp_enable_all_extts - Enable all EXTTS channels 1706 * @pf: Board private structure 1707 * 1708 * Called during reset to restore user configuration. 1709 */ 1710 static void ice_ptp_enable_all_extts(struct ice_pf *pf) 1711 { 1712 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1713 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1714 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1715 true); 1716 } 1717 1718 /** 1719 * ice_ptp_write_perout - Write periodic wave parameters to HW 1720 * @hw: pointer to the HW struct 1721 * @chan: target channel 1722 * @gpio_pin: target GPIO pin 1723 * @start: target time to start periodic output 1724 * @period: target period 1725 * 1726 * Return: 0 on success, negative error code otherwise 1727 */ 1728 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, 1729 unsigned int gpio_pin, u64 start, u64 period) 1730 { 1731 1732 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1733 u32 val = 0; 1734 1735 /* 0. Reset mode & out_en in AUX_OUT */ 1736 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1737 1738 if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) { 1739 int err; 1740 1741 /* Enable/disable CGU 1PPS output for E825C */ 1742 err = ice_cgu_cfg_pps_out(hw, !!period); 1743 if (err) 1744 return err; 1745 } 1746 1747 /* 1. Write perout with half of required period value. 1748 * HW toggles output when source clock hits the TGT and then adds 1749 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle. 1750 */ 1751 period >>= 1; 1752 1753 /* For proper operation, GLTSYN_CLKO must be larger than clock tick and 1754 * period has to fit in 32 bit register. 1755 */ 1756 #define MIN_PULSE 3 1757 if (!!period && (period <= MIN_PULSE || period > U32_MAX)) { 1758 dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32", 1759 MIN_PULSE); 1760 return -EIO; 1761 } 1762 1763 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1764 1765 /* 2. Write TARGET time */ 1766 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start)); 1767 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start)); 1768 1769 /* 3. Write AUX_OUT register */ 1770 if (!!period) 1771 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1772 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1773 1774 /* 4. write GPIO CTL reg */ 1775 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1776 if (!!period) 1777 val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1778 8 + chan + (tmr_idx * 4)); 1779 1780 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1781 1782 return 0; 1783 } 1784 1785 /** 1786 * ice_ptp_cfg_perout - Configure clock to generate periodic wave 1787 * @pf: Board private structure 1788 * @rq: Periodic output request 1789 * @on: Enable/disable flag 1790 * 1791 * Configure the internal clock generator modules to generate the clock wave of 1792 * specified period. 1793 * 1794 * Return: 0 on success, negative error code otherwise 1795 */ 1796 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, 1797 int on) 1798 { 1799 unsigned int gpio_pin, prop_delay_ns; 1800 u64 clk, period, start, phase; 1801 struct ice_hw *hw = &pf->hw; 1802 int pin_desc_idx; 1803 1804 if (rq->flags & ~PTP_PEROUT_PHASE) 1805 return -EOPNOTSUPP; 1806 1807 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index); 1808 if (pin_desc_idx < 0) 1809 return -EIO; 1810 1811 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; 1812 prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1]; 1813 period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; 1814 1815 /* If we're disabling the output or period is 0, clear out CLKO and TGT 1816 * and keep output level low. 1817 */ 1818 if (!on || !period) 1819 return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0); 1820 1821 if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 && 1822 period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) { 1823 dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n"); 1824 return -EOPNOTSUPP; 1825 } 1826 1827 if (period & 0x1) { 1828 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1829 return -EIO; 1830 } 1831 1832 start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec; 1833 1834 /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */ 1835 if (rq->flags & PTP_PEROUT_PHASE) 1836 phase = start; 1837 else 1838 div64_u64_rem(start, period, &phase); 1839 1840 /* If we have only phase or start time is in the past, start the timer 1841 * at the next multiple of period, maintaining phase. 1842 */ 1843 clk = ice_ptp_read_src_clk_reg(pf, NULL); 1844 if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) 1845 start = div64_u64(clk + period - 1, period) * period + phase; 1846 1847 /* Compensate for propagation delay from the generator to the pin. */ 1848 start -= prop_delay_ns; 1849 1850 return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); 1851 } 1852 1853 /** 1854 * ice_ptp_disable_all_perout - Disable all currently configured outputs 1855 * @pf: Board private structure 1856 * 1857 * Disable all currently configured clock outputs. This is necessary before 1858 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to 1859 * re-enable the clocks again. 1860 */ 1861 static void ice_ptp_disable_all_perout(struct ice_pf *pf) 1862 { 1863 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1864 if (pf->ptp.perout_rqs[i].period.sec || 1865 pf->ptp.perout_rqs[i].period.nsec) 1866 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1867 false); 1868 } 1869 1870 /** 1871 * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs 1872 * @pf: Board private structure 1873 * 1874 * Enable all currently configured clock outputs. Use this after 1875 * ice_ptp_disable_all_perout to reconfigure the output signals according to 1876 * their configuration. 1877 */ 1878 static void ice_ptp_enable_all_perout(struct ice_pf *pf) 1879 { 1880 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1881 if (pf->ptp.perout_rqs[i].period.sec || 1882 pf->ptp.perout_rqs[i].period.nsec) 1883 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1884 true); 1885 } 1886 1887 /** 1888 * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO 1889 * @pf: Board private structure 1890 * @pin: Pin index 1891 * @func: Assigned function 1892 * 1893 * Return: 0 on success, negative error code otherwise 1894 */ 1895 static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin, 1896 enum ptp_pin_function func) 1897 { 1898 unsigned int gpio_pin; 1899 1900 switch (func) { 1901 case PTP_PF_PEROUT: 1902 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1]; 1903 break; 1904 case PTP_PF_EXTTS: 1905 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0]; 1906 break; 1907 default: 1908 return -EOPNOTSUPP; 1909 } 1910 1911 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 1912 struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i]; 1913 unsigned int chan = pin_desc->chan; 1914 1915 /* Skip pin idx from the request */ 1916 if (i == pin) 1917 continue; 1918 1919 if (pin_desc->func == PTP_PF_PEROUT && 1920 pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) { 1921 pf->ptp.perout_rqs[chan].period.sec = 0; 1922 pf->ptp.perout_rqs[chan].period.nsec = 0; 1923 pin_desc->func = PTP_PF_NONE; 1924 pin_desc->chan = 0; 1925 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n", 1926 i, gpio_pin); 1927 return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan], 1928 false); 1929 } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS && 1930 pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) { 1931 pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE; 1932 pin_desc->func = PTP_PF_NONE; 1933 pin_desc->chan = 0; 1934 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n", 1935 i, gpio_pin); 1936 return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan], 1937 false); 1938 } 1939 } 1940 1941 return 0; 1942 } 1943 1944 /** 1945 * ice_verify_pin - verify if pin supports requested pin function 1946 * @info: the driver's PTP info structure 1947 * @pin: Pin index 1948 * @func: Assigned function 1949 * @chan: Assigned channel 1950 * 1951 * Return: 0 on success, -EOPNOTSUPP when function is not supported. 1952 */ 1953 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin, 1954 enum ptp_pin_function func, unsigned int chan) 1955 { 1956 struct ice_pf *pf = ptp_info_to_pf(info); 1957 const struct ice_ptp_pin_desc *pin_desc; 1958 1959 pin_desc = &pf->ptp.ice_pin_desc[pin]; 1960 1961 /* Is assigned function allowed? */ 1962 switch (func) { 1963 case PTP_PF_EXTTS: 1964 if (pin_desc->gpio[0] < 0) 1965 return -EOPNOTSUPP; 1966 break; 1967 case PTP_PF_PEROUT: 1968 if (pin_desc->gpio[1] < 0) 1969 return -EOPNOTSUPP; 1970 break; 1971 case PTP_PF_NONE: 1972 break; 1973 case PTP_PF_PHYSYNC: 1974 default: 1975 return -EOPNOTSUPP; 1976 } 1977 1978 /* On adapters with SMA_CTRL disable other pins that share same GPIO */ 1979 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 1980 ice_ptp_disable_shared_pin(pf, pin, func); 1981 pf->ptp.pin_desc[pin].func = func; 1982 pf->ptp.pin_desc[pin].chan = chan; 1983 return ice_ptp_set_sma_cfg(pf); 1984 } 1985 1986 return 0; 1987 } 1988 1989 /** 1990 * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC 1991 * @info: The driver's PTP info structure 1992 * @rq: The requested feature to change 1993 * @on: Enable/disable flag 1994 * 1995 * Return: 0 on success, negative error code otherwise 1996 */ 1997 static int ice_ptp_gpio_enable(struct ptp_clock_info *info, 1998 struct ptp_clock_request *rq, int on) 1999 { 2000 struct ice_pf *pf = ptp_info_to_pf(info); 2001 int err; 2002 2003 switch (rq->type) { 2004 case PTP_CLK_REQ_PEROUT: 2005 { 2006 struct ptp_perout_request *cached = 2007 &pf->ptp.perout_rqs[rq->perout.index]; 2008 2009 err = ice_ptp_cfg_perout(pf, &rq->perout, on); 2010 if (!err) { 2011 *cached = rq->perout; 2012 } else { 2013 cached->period.sec = 0; 2014 cached->period.nsec = 0; 2015 } 2016 return err; 2017 } 2018 case PTP_CLK_REQ_EXTTS: 2019 { 2020 struct ptp_extts_request *cached = 2021 &pf->ptp.extts_rqs[rq->extts.index]; 2022 2023 err = ice_ptp_cfg_extts(pf, &rq->extts, on); 2024 if (!err) 2025 *cached = rq->extts; 2026 else 2027 cached->flags &= ~PTP_ENABLE_FEATURE; 2028 return err; 2029 } 2030 default: 2031 return -EOPNOTSUPP; 2032 } 2033 } 2034 2035 /** 2036 * ice_ptp_gettimex64 - Get the time of the clock 2037 * @info: the driver's PTP info structure 2038 * @ts: timespec64 structure to hold the current time value 2039 * @sts: Optional parameter for holding a pair of system timestamps from 2040 * the system clock. Will be ignored if NULL is given. 2041 * 2042 * Read the device clock and return the correct value on ns, after converting it 2043 * into a timespec struct. 2044 */ 2045 static int 2046 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 2047 struct ptp_system_timestamp *sts) 2048 { 2049 struct ice_pf *pf = ptp_info_to_pf(info); 2050 u64 time_ns; 2051 2052 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 2053 *ts = ns_to_timespec64(time_ns); 2054 return 0; 2055 } 2056 2057 /** 2058 * ice_ptp_settime64 - Set the time of the clock 2059 * @info: the driver's PTP info structure 2060 * @ts: timespec64 structure that holds the new time value 2061 * 2062 * Set the device clock to the user input value. The conversion from timespec 2063 * to ns happens in the write function. 2064 */ 2065 static int 2066 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 2067 { 2068 struct ice_pf *pf = ptp_info_to_pf(info); 2069 struct timespec64 ts64 = *ts; 2070 struct ice_hw *hw = &pf->hw; 2071 int err; 2072 2073 /* For Vernier mode on E82X, we need to recalibrate after new settime. 2074 * Start with marking timestamps as invalid. 2075 */ 2076 if (hw->mac_type == ICE_MAC_GENERIC) { 2077 err = ice_ptp_clear_phy_offset_ready_e82x(hw); 2078 if (err) 2079 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); 2080 } 2081 2082 if (!ice_ptp_lock(hw)) { 2083 err = -EBUSY; 2084 goto exit; 2085 } 2086 2087 /* Disable periodic outputs */ 2088 ice_ptp_disable_all_perout(pf); 2089 2090 err = ice_ptp_write_init(pf, &ts64); 2091 ice_ptp_unlock(hw); 2092 2093 if (!err) 2094 ice_ptp_reset_cached_phctime(pf); 2095 2096 /* Reenable periodic outputs */ 2097 ice_ptp_enable_all_perout(pf); 2098 2099 /* Recalibrate and re-enable timestamp blocks for E822/E823 */ 2100 if (hw->mac_type == ICE_MAC_GENERIC) 2101 ice_ptp_restart_all_phy(pf); 2102 exit: 2103 if (err) { 2104 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 2105 return err; 2106 } 2107 2108 return 0; 2109 } 2110 2111 /** 2112 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 2113 * @info: the driver's PTP info structure 2114 * @delta: Offset in nanoseconds to adjust the time by 2115 */ 2116 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 2117 { 2118 struct timespec64 now, then; 2119 int ret; 2120 2121 then = ns_to_timespec64(delta); 2122 ret = ice_ptp_gettimex64(info, &now, NULL); 2123 if (ret) 2124 return ret; 2125 now = timespec64_add(now, then); 2126 2127 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 2128 } 2129 2130 /** 2131 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 2132 * @info: the driver's PTP info structure 2133 * @delta: Offset in nanoseconds to adjust the time by 2134 */ 2135 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 2136 { 2137 struct ice_pf *pf = ptp_info_to_pf(info); 2138 struct ice_hw *hw = &pf->hw; 2139 struct device *dev; 2140 int err; 2141 2142 dev = ice_pf_to_dev(pf); 2143 2144 /* Hardware only supports atomic adjustments using signed 32-bit 2145 * integers. For any adjustment outside this range, perform 2146 * a non-atomic get->adjust->set flow. 2147 */ 2148 if (delta > S32_MAX || delta < S32_MIN) { 2149 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 2150 return ice_ptp_adjtime_nonatomic(info, delta); 2151 } 2152 2153 if (!ice_ptp_lock(hw)) { 2154 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 2155 return -EBUSY; 2156 } 2157 2158 /* Disable periodic outputs */ 2159 ice_ptp_disable_all_perout(pf); 2160 2161 err = ice_ptp_write_adj(pf, delta); 2162 2163 /* Reenable periodic outputs */ 2164 ice_ptp_enable_all_perout(pf); 2165 2166 ice_ptp_unlock(hw); 2167 2168 if (err) { 2169 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 2170 return err; 2171 } 2172 2173 ice_ptp_reset_cached_phctime(pf); 2174 2175 return 0; 2176 } 2177 2178 /** 2179 * struct ice_crosststamp_cfg - Device cross timestamp configuration 2180 * @lock_reg: The hardware semaphore lock to use 2181 * @lock_busy: Bit in the semaphore lock indicating the lock is busy 2182 * @ctl_reg: The hardware register to request cross timestamp 2183 * @ctl_active: Bit in the control register to request cross timestamp 2184 * @art_time_l: Lower 32-bits of ART system time 2185 * @art_time_h: Upper 32-bits of ART system time 2186 * @dev_time_l: Lower 32-bits of device time (per timer index) 2187 * @dev_time_h: Upper 32-bits of device time (per timer index) 2188 */ 2189 struct ice_crosststamp_cfg { 2190 /* HW semaphore lock register */ 2191 u32 lock_reg; 2192 u32 lock_busy; 2193 2194 /* Capture control register */ 2195 u32 ctl_reg; 2196 u32 ctl_active; 2197 2198 /* Time storage */ 2199 u32 art_time_l; 2200 u32 art_time_h; 2201 u32 dev_time_l[2]; 2202 u32 dev_time_h[2]; 2203 }; 2204 2205 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = { 2206 .lock_reg = PFHH_SEM, 2207 .lock_busy = PFHH_SEM_BUSY_M, 2208 .ctl_reg = GLHH_ART_CTL, 2209 .ctl_active = GLHH_ART_CTL_ACTIVE_M, 2210 .art_time_l = GLHH_ART_TIME_L, 2211 .art_time_h = GLHH_ART_TIME_H, 2212 .dev_time_l[0] = GLTSYN_HHTIME_L(0), 2213 .dev_time_h[0] = GLTSYN_HHTIME_H(0), 2214 .dev_time_l[1] = GLTSYN_HHTIME_L(1), 2215 .dev_time_h[1] = GLTSYN_HHTIME_H(1), 2216 }; 2217 2218 #ifdef CONFIG_ICE_HWTS 2219 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = { 2220 .lock_reg = E830_PFPTM_SEM, 2221 .lock_busy = E830_PFPTM_SEM_BUSY_M, 2222 .ctl_reg = E830_GLPTM_ART_CTL, 2223 .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M, 2224 .art_time_l = E830_GLPTM_ART_TIME_L, 2225 .art_time_h = E830_GLPTM_ART_TIME_H, 2226 .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0), 2227 .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0), 2228 .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1), 2229 .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1), 2230 }; 2231 2232 #endif /* CONFIG_ICE_HWTS */ 2233 /** 2234 * struct ice_crosststamp_ctx - Device cross timestamp context 2235 * @snapshot: snapshot of system clocks for historic interpolation 2236 * @pf: pointer to the PF private structure 2237 * @cfg: pointer to hardware configuration for cross timestamp 2238 */ 2239 struct ice_crosststamp_ctx { 2240 struct system_time_snapshot snapshot; 2241 struct ice_pf *pf; 2242 const struct ice_crosststamp_cfg *cfg; 2243 }; 2244 2245 /** 2246 * ice_capture_crosststamp - Capture a device/system cross timestamp 2247 * @device: Current device time 2248 * @system: System counter value read synchronously with device time 2249 * @__ctx: Context passed from ice_ptp_getcrosststamp 2250 * 2251 * Read device and system (ART) clock simultaneously and return the corrected 2252 * clock values in ns. 2253 * 2254 * Return: zero on success, or a negative error code on failure. 2255 */ 2256 static int ice_capture_crosststamp(ktime_t *device, 2257 struct system_counterval_t *system, 2258 void *__ctx) 2259 { 2260 struct ice_crosststamp_ctx *ctx = __ctx; 2261 const struct ice_crosststamp_cfg *cfg; 2262 u32 lock, ctl, ts_lo, ts_hi, tmr_idx; 2263 struct ice_pf *pf; 2264 struct ice_hw *hw; 2265 int err; 2266 u64 ts; 2267 2268 cfg = ctx->cfg; 2269 pf = ctx->pf; 2270 hw = &pf->hw; 2271 2272 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 2273 if (tmr_idx > 1) 2274 return -EINVAL; 2275 2276 /* Poll until we obtain the cross-timestamp hardware semaphore */ 2277 err = rd32_poll_timeout(hw, cfg->lock_reg, lock, 2278 !(lock & cfg->lock_busy), 2279 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC); 2280 if (err) { 2281 dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n"); 2282 return -EBUSY; 2283 } 2284 2285 /* Snapshot system time for historic interpolation */ 2286 ktime_get_snapshot(&ctx->snapshot); 2287 2288 /* Program cmd to master timer */ 2289 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 2290 2291 /* Start the ART and device clock sync sequence */ 2292 ctl = rd32(hw, cfg->ctl_reg); 2293 ctl |= cfg->ctl_active; 2294 wr32(hw, cfg->ctl_reg, ctl); 2295 2296 /* Poll until hardware completes the capture */ 2297 err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active), 2298 5, 20 * USEC_PER_MSEC); 2299 if (err) 2300 goto err_timeout; 2301 2302 /* Read ART system time */ 2303 ts_lo = rd32(hw, cfg->art_time_l); 2304 ts_hi = rd32(hw, cfg->art_time_h); 2305 ts = ((u64)ts_hi << 32) | ts_lo; 2306 system->cycles = ts; 2307 system->cs_id = CSID_X86_ART; 2308 2309 /* Read Device source clock time */ 2310 ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]); 2311 ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]); 2312 ts = ((u64)ts_hi << 32) | ts_lo; 2313 *device = ns_to_ktime(ts); 2314 2315 err_timeout: 2316 /* Clear the master timer */ 2317 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 2318 2319 /* Release HW lock */ 2320 lock = rd32(hw, cfg->lock_reg); 2321 lock &= ~cfg->lock_busy; 2322 wr32(hw, cfg->lock_reg, lock); 2323 2324 return err; 2325 } 2326 2327 /** 2328 * ice_ptp_getcrosststamp - Capture a device cross timestamp 2329 * @info: the driver's PTP info structure 2330 * @cts: The memory to fill the cross timestamp info 2331 * 2332 * Capture a cross timestamp between the ART and the device PTP hardware 2333 * clock. Fill the cross timestamp information and report it back to the 2334 * caller. 2335 * 2336 * In order to correctly correlate the ART timestamp back to the TSC time, the 2337 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2338 * 2339 * Return: zero on success, or a negative error code on failure. 2340 */ 2341 static int ice_ptp_getcrosststamp(struct ptp_clock_info *info, 2342 struct system_device_crosststamp *cts) 2343 { 2344 struct ice_pf *pf = ptp_info_to_pf(info); 2345 struct ice_crosststamp_ctx ctx = { 2346 .pf = pf, 2347 }; 2348 2349 switch (pf->hw.mac_type) { 2350 case ICE_MAC_GENERIC: 2351 case ICE_MAC_GENERIC_3K_E825: 2352 ctx.cfg = &ice_crosststamp_cfg_e82x; 2353 break; 2354 #ifdef CONFIG_ICE_HWTS 2355 case ICE_MAC_E830: 2356 ctx.cfg = &ice_crosststamp_cfg_e830; 2357 break; 2358 #endif /* CONFIG_ICE_HWTS */ 2359 default: 2360 return -EOPNOTSUPP; 2361 } 2362 2363 return get_device_system_crosststamp(ice_capture_crosststamp, &ctx, 2364 &ctx.snapshot, cts); 2365 } 2366 2367 /** 2368 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2369 * @pf: Board private structure 2370 * @ifr: ioctl data 2371 * 2372 * Copy the timestamping config to user buffer 2373 */ 2374 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2375 { 2376 struct hwtstamp_config *config; 2377 2378 if (pf->ptp.state != ICE_PTP_READY) 2379 return -EIO; 2380 2381 config = &pf->ptp.tstamp_config; 2382 2383 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2384 -EFAULT : 0; 2385 } 2386 2387 /** 2388 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2389 * @pf: Board private structure 2390 * @config: hwtstamp settings requested or saved 2391 */ 2392 static int 2393 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2394 { 2395 switch (config->tx_type) { 2396 case HWTSTAMP_TX_OFF: 2397 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 2398 break; 2399 case HWTSTAMP_TX_ON: 2400 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 2401 break; 2402 default: 2403 return -ERANGE; 2404 } 2405 2406 switch (config->rx_filter) { 2407 case HWTSTAMP_FILTER_NONE: 2408 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2409 break; 2410 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2411 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2412 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2413 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2414 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2415 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2416 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2417 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2418 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2419 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2420 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2421 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2422 case HWTSTAMP_FILTER_NTP_ALL: 2423 case HWTSTAMP_FILTER_ALL: 2424 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 2425 break; 2426 default: 2427 return -ERANGE; 2428 } 2429 2430 /* Immediately update the device timestamping mode */ 2431 ice_ptp_restore_timestamp_mode(pf); 2432 2433 return 0; 2434 } 2435 2436 /** 2437 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2438 * @pf: Board private structure 2439 * @ifr: ioctl data 2440 * 2441 * Get the user config and store it 2442 */ 2443 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2444 { 2445 struct hwtstamp_config config; 2446 int err; 2447 2448 if (pf->ptp.state != ICE_PTP_READY) 2449 return -EAGAIN; 2450 2451 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2452 return -EFAULT; 2453 2454 err = ice_ptp_set_timestamp_mode(pf, &config); 2455 if (err) 2456 return err; 2457 2458 /* Return the actual configuration set */ 2459 config = pf->ptp.tstamp_config; 2460 2461 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2462 -EFAULT : 0; 2463 } 2464 2465 /** 2466 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns 2467 * @rx_desc: Receive descriptor 2468 * @pkt_ctx: Packet context to get the cached time 2469 * 2470 * The driver receives a notification in the receive descriptor with timestamp. 2471 */ 2472 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, 2473 const struct ice_pkt_ctx *pkt_ctx) 2474 { 2475 u64 ts_ns, cached_time; 2476 u32 ts_high; 2477 2478 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2479 return 0; 2480 2481 cached_time = READ_ONCE(pkt_ctx->cached_phctime); 2482 2483 /* Do not report a timestamp if we don't have a cached PHC time */ 2484 if (!cached_time) 2485 return 0; 2486 2487 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2488 * PHC value, rather than accessing the PF. This also allows us to 2489 * simply pass the upper 32bits of nanoseconds directly. Calling 2490 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2491 * bits itself. 2492 */ 2493 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2494 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2495 2496 return ts_ns; 2497 } 2498 2499 /** 2500 * ice_ptp_setup_pin_cfg - setup PTP pin_config structure 2501 * @pf: Board private structure 2502 */ 2503 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf) 2504 { 2505 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 2506 const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i]; 2507 struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i]; 2508 const char *name = NULL; 2509 2510 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2511 name = ice_pin_names[desc->name_idx]; 2512 else if (desc->name_idx != GPIO_NA) 2513 name = ice_pin_names_nvm[desc->name_idx]; 2514 if (name) 2515 strscpy(pin->name, name, sizeof(pin->name)); 2516 2517 pin->index = i; 2518 } 2519 2520 pf->ptp.info.pin_config = pf->ptp.pin_desc; 2521 } 2522 2523 /** 2524 * ice_ptp_disable_pins - Disable PTP pins 2525 * @pf: pointer to the PF structure 2526 * 2527 * Disable the OS access to the SMA pins. Called to clear out the OS 2528 * indications of pin support when we fail to setup the SMA control register. 2529 */ 2530 static void ice_ptp_disable_pins(struct ice_pf *pf) 2531 { 2532 struct ptp_clock_info *info = &pf->ptp.info; 2533 2534 dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n"); 2535 2536 info->enable = NULL; 2537 info->verify = NULL; 2538 info->n_pins = 0; 2539 info->n_ext_ts = 0; 2540 info->n_per_out = 0; 2541 } 2542 2543 /** 2544 * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM 2545 * @pf: pointer to the PF structure 2546 * @entries: SDP connection section from NVM 2547 * @num_entries: number of valid entries in sdp_entries 2548 * @pins: PTP pins array to update 2549 * 2550 * Return: 0 on success, negative error code otherwise. 2551 */ 2552 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, 2553 unsigned int num_entries, 2554 struct ice_ptp_pin_desc *pins) 2555 { 2556 unsigned int n_pins = 0; 2557 unsigned int i; 2558 2559 /* Setup ice_pin_desc array */ 2560 for (i = 0; i < ICE_N_PINS_MAX; i++) { 2561 pins[i].name_idx = -1; 2562 pins[i].gpio[0] = -1; 2563 pins[i].gpio[1] = -1; 2564 } 2565 2566 for (i = 0; i < num_entries; i++) { 2567 u16 entry = le16_to_cpu(entries[i]); 2568 DECLARE_BITMAP(bitmap, GPIO_NA); 2569 unsigned int bitmap_idx; 2570 bool dir; 2571 u16 gpio; 2572 2573 *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry); 2574 dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry); 2575 gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry); 2576 for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) { 2577 unsigned int idx; 2578 2579 /* Check if entry's pin bit is valid */ 2580 if (bitmap_idx >= NUM_PTP_PINS_NVM && 2581 bitmap_idx != GPIO_NA) 2582 continue; 2583 2584 /* Check if pin already exists */ 2585 for (idx = 0; idx < ICE_N_PINS_MAX; idx++) 2586 if (pins[idx].name_idx == bitmap_idx) 2587 break; 2588 2589 if (idx == ICE_N_PINS_MAX) { 2590 /* Pin not found, setup its entry and name */ 2591 idx = n_pins++; 2592 pins[idx].name_idx = bitmap_idx; 2593 if (bitmap_idx == GPIO_NA) 2594 strscpy(pf->ptp.pin_desc[idx].name, 2595 ice_pin_names[gpio], 2596 sizeof(pf->ptp.pin_desc[idx] 2597 .name)); 2598 } 2599 2600 /* Setup in/out GPIO number */ 2601 pins[idx].gpio[dir] = gpio; 2602 } 2603 } 2604 2605 for (i = 0; i < n_pins; i++) { 2606 dev_dbg(ice_pf_to_dev(pf), 2607 "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n", 2608 i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]); 2609 } 2610 2611 pf->ptp.info.n_pins = n_pins; 2612 return 0; 2613 } 2614 2615 /** 2616 * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support 2617 * @pf: Board private structure 2618 * 2619 * Assign functions to the PTP capabilities structure for E82X devices. 2620 * Functions which operate across all device families should be set directly 2621 * in ice_ptp_set_caps. Only add functions here which are distinct for E82X 2622 * devices. 2623 */ 2624 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) 2625 { 2626 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; 2627 2628 if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) { 2629 pf->ptp.ice_pin_desc = ice_pin_desc_e825c; 2630 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); 2631 } else { 2632 pf->ptp.ice_pin_desc = ice_pin_desc_e82x; 2633 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x); 2634 } 2635 ice_ptp_setup_pin_cfg(pf); 2636 } 2637 2638 /** 2639 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2640 * @pf: Board private structure 2641 * 2642 * Assign functions to the PTP capabiltiies structure for E810 devices. 2643 * Functions which operate across all device families should be set directly 2644 * in ice_ptp_set_caps. Only add functions here which are distinct for E810 2645 * devices. 2646 */ 2647 static void ice_ptp_set_funcs_e810(struct ice_pf *pf) 2648 { 2649 __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE]; 2650 struct ice_ptp_pin_desc *desc = NULL; 2651 struct ice_ptp *ptp = &pf->ptp; 2652 unsigned int num_entries; 2653 int err; 2654 2655 err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries); 2656 if (err) { 2657 /* SDP section does not exist in NVM or is corrupted */ 2658 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2659 ptp->ice_pin_desc = ice_pin_desc_e810_sma; 2660 ptp->info.n_pins = 2661 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma); 2662 } else { 2663 pf->ptp.ice_pin_desc = ice_pin_desc_e810; 2664 pf->ptp.info.n_pins = 2665 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); 2666 err = 0; 2667 } 2668 } else { 2669 desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX, 2670 sizeof(struct ice_ptp_pin_desc), 2671 GFP_KERNEL); 2672 if (!desc) 2673 goto err; 2674 2675 err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc); 2676 if (err) 2677 goto err; 2678 2679 ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc; 2680 } 2681 2682 ptp->info.pin_config = ptp->pin_desc; 2683 ice_ptp_setup_pin_cfg(pf); 2684 2685 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2686 err = ice_ptp_set_sma_cfg(pf); 2687 err: 2688 if (err) { 2689 devm_kfree(ice_pf_to_dev(pf), desc); 2690 ice_ptp_disable_pins(pf); 2691 } 2692 } 2693 2694 /** 2695 * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support 2696 * @pf: Board private structure 2697 * 2698 * Assign functions to the PTP capabiltiies structure for E830 devices. 2699 * Functions which operate across all device families should be set directly 2700 * in ice_ptp_set_caps. Only add functions here which are distinct for E830 2701 * devices. 2702 */ 2703 static void ice_ptp_set_funcs_e830(struct ice_pf *pf) 2704 { 2705 #ifdef CONFIG_ICE_HWTS 2706 if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART)) 2707 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; 2708 2709 #endif /* CONFIG_ICE_HWTS */ 2710 /* Rest of the config is the same as base E810 */ 2711 pf->ptp.ice_pin_desc = ice_pin_desc_e810; 2712 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); 2713 ice_ptp_setup_pin_cfg(pf); 2714 } 2715 2716 /** 2717 * ice_ptp_set_caps - Set PTP capabilities 2718 * @pf: Board private structure 2719 */ 2720 static void ice_ptp_set_caps(struct ice_pf *pf) 2721 { 2722 struct ptp_clock_info *info = &pf->ptp.info; 2723 struct device *dev = ice_pf_to_dev(pf); 2724 2725 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2726 dev_driver_string(dev), dev_name(dev)); 2727 info->owner = THIS_MODULE; 2728 info->max_adj = 100000000; 2729 info->adjtime = ice_ptp_adjtime; 2730 info->adjfine = ice_ptp_adjfine; 2731 info->gettimex64 = ice_ptp_gettimex64; 2732 info->settime64 = ice_ptp_settime64; 2733 info->n_per_out = GLTSYN_TGT_H_IDX_MAX; 2734 info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX; 2735 info->enable = ice_ptp_gpio_enable; 2736 info->verify = ice_verify_pin; 2737 2738 switch (pf->hw.mac_type) { 2739 case ICE_MAC_E810: 2740 ice_ptp_set_funcs_e810(pf); 2741 return; 2742 case ICE_MAC_E830: 2743 ice_ptp_set_funcs_e830(pf); 2744 return; 2745 case ICE_MAC_GENERIC: 2746 case ICE_MAC_GENERIC_3K_E825: 2747 ice_ptp_set_funcs_e82x(pf); 2748 return; 2749 default: 2750 return; 2751 } 2752 } 2753 2754 /** 2755 * ice_ptp_create_clock - Create PTP clock device for userspace 2756 * @pf: Board private structure 2757 * 2758 * This function creates a new PTP clock device. It only creates one if we 2759 * don't already have one. Will return error if it can't create one, but success 2760 * if we already have a device. Should be used by ice_ptp_init to create clock 2761 * initially, and prevent global resets from creating new clock devices. 2762 */ 2763 static long ice_ptp_create_clock(struct ice_pf *pf) 2764 { 2765 struct ptp_clock_info *info; 2766 struct device *dev; 2767 2768 /* No need to create a clock device if we already have one */ 2769 if (pf->ptp.clock) 2770 return 0; 2771 2772 ice_ptp_set_caps(pf); 2773 2774 info = &pf->ptp.info; 2775 dev = ice_pf_to_dev(pf); 2776 2777 /* Attempt to register the clock before enabling the hardware. */ 2778 pf->ptp.clock = ptp_clock_register(info, dev); 2779 if (IS_ERR(pf->ptp.clock)) { 2780 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); 2781 return PTR_ERR(pf->ptp.clock); 2782 } 2783 2784 return 0; 2785 } 2786 2787 /** 2788 * ice_ptp_request_ts - Request an available Tx timestamp index 2789 * @tx: the PTP Tx timestamp tracker to request from 2790 * @skb: the SKB to associate with this timestamp request 2791 */ 2792 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2793 { 2794 unsigned long flags; 2795 u8 idx; 2796 2797 spin_lock_irqsave(&tx->lock, flags); 2798 2799 /* Check that this tracker is accepting new timestamp requests */ 2800 if (!ice_ptp_is_tx_tracker_up(tx)) { 2801 spin_unlock_irqrestore(&tx->lock, flags); 2802 return -1; 2803 } 2804 2805 /* Find and set the first available index */ 2806 idx = find_next_zero_bit(tx->in_use, tx->len, 2807 tx->last_ll_ts_idx_read + 1); 2808 if (idx == tx->len) 2809 idx = find_first_zero_bit(tx->in_use, tx->len); 2810 2811 if (idx < tx->len) { 2812 /* We got a valid index that no other thread could have set. Store 2813 * a reference to the skb and the start time to allow discarding old 2814 * requests. 2815 */ 2816 set_bit(idx, tx->in_use); 2817 clear_bit(idx, tx->stale); 2818 tx->tstamps[idx].start = jiffies; 2819 tx->tstamps[idx].skb = skb_get(skb); 2820 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2821 ice_trace(tx_tstamp_request, skb, idx); 2822 } 2823 2824 spin_unlock_irqrestore(&tx->lock, flags); 2825 2826 /* return the appropriate PHY timestamp register index, -1 if no 2827 * indexes were available. 2828 */ 2829 if (idx >= tx->len) 2830 return -1; 2831 else 2832 return idx + tx->offset; 2833 } 2834 2835 /** 2836 * ice_ptp_process_ts - Process the PTP Tx timestamps 2837 * @pf: Board private structure 2838 * 2839 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2840 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2841 */ 2842 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2843 { 2844 switch (pf->ptp.tx_interrupt_mode) { 2845 case ICE_PTP_TX_INTERRUPT_NONE: 2846 /* This device has the clock owner handle timestamps for it */ 2847 return ICE_TX_TSTAMP_WORK_DONE; 2848 case ICE_PTP_TX_INTERRUPT_SELF: 2849 /* This device handles its own timestamps */ 2850 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2851 case ICE_PTP_TX_INTERRUPT_ALL: 2852 /* This device handles timestamps for all ports */ 2853 return ice_ptp_tx_tstamp_owner(pf); 2854 default: 2855 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2856 pf->ptp.tx_interrupt_mode); 2857 return ICE_TX_TSTAMP_WORK_DONE; 2858 } 2859 } 2860 2861 /** 2862 * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context 2863 * @pf: Board private structure 2864 * 2865 * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom 2866 * half of the interrupt and IRQ_HANDLED otherwise. 2867 */ 2868 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) 2869 { 2870 struct ice_hw *hw = &pf->hw; 2871 2872 switch (hw->mac_type) { 2873 case ICE_MAC_E810: 2874 /* E810 capable of low latency timestamping with interrupt can 2875 * request a single timestamp in the top half and wait for 2876 * a second LL TS interrupt from the FW when it's ready. 2877 */ 2878 if (hw->dev_caps.ts_dev_info.ts_ll_int_read) { 2879 struct ice_ptp_tx *tx = &pf->ptp.port.tx; 2880 u8 idx; 2881 2882 if (!ice_pf_state_is_nominal(pf)) 2883 return IRQ_HANDLED; 2884 2885 spin_lock(&tx->lock); 2886 idx = find_next_bit_wrap(tx->in_use, tx->len, 2887 tx->last_ll_ts_idx_read + 1); 2888 if (idx != tx->len) 2889 ice_ptp_req_tx_single_tstamp(tx, idx); 2890 spin_unlock(&tx->lock); 2891 2892 return IRQ_HANDLED; 2893 } 2894 fallthrough; /* non-LL_TS E810 */ 2895 case ICE_MAC_GENERIC: 2896 case ICE_MAC_GENERIC_3K_E825: 2897 /* All other devices process timestamps in the bottom half due 2898 * to sleeping or polling. 2899 */ 2900 if (!ice_ptp_pf_handles_tx_interrupt(pf)) 2901 return IRQ_HANDLED; 2902 2903 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); 2904 return IRQ_WAKE_THREAD; 2905 case ICE_MAC_E830: 2906 /* E830 can read timestamps in the top half using rd32() */ 2907 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { 2908 /* Process outstanding Tx timestamps. If there 2909 * is more work, re-arm the interrupt to trigger again. 2910 */ 2911 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 2912 ice_flush(hw); 2913 } 2914 return IRQ_HANDLED; 2915 default: 2916 return IRQ_HANDLED; 2917 } 2918 } 2919 2920 /** 2921 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt 2922 * @pf: Board private structure 2923 * 2924 * The device PHY issues Tx timestamp interrupts to the driver for processing 2925 * timestamp data from the PHY. It will not interrupt again until all 2926 * current timestamp data is read. In rare circumstances, it is possible that 2927 * the driver fails to read all outstanding data. 2928 * 2929 * To avoid getting permanently stuck, periodically check if the PHY has 2930 * outstanding timestamp data. If so, trigger an interrupt from software to 2931 * process this data. 2932 */ 2933 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) 2934 { 2935 struct device *dev = ice_pf_to_dev(pf); 2936 struct ice_hw *hw = &pf->hw; 2937 bool trigger_oicr = false; 2938 unsigned int i; 2939 2940 if (!pf->ptp.port.tx.has_ready_bitmap) 2941 return; 2942 2943 if (!ice_pf_src_tmr_owned(pf)) 2944 return; 2945 2946 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { 2947 u64 tstamp_ready; 2948 int err; 2949 2950 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 2951 if (!err && tstamp_ready) { 2952 trigger_oicr = true; 2953 break; 2954 } 2955 } 2956 2957 if (trigger_oicr) { 2958 /* Trigger a software interrupt, to ensure this data 2959 * gets processed. 2960 */ 2961 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); 2962 2963 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 2964 ice_flush(hw); 2965 } 2966 } 2967 2968 static void ice_ptp_periodic_work(struct kthread_work *work) 2969 { 2970 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2971 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2972 int err; 2973 2974 if (pf->ptp.state != ICE_PTP_READY) 2975 return; 2976 2977 err = ice_ptp_update_cached_phctime(pf); 2978 2979 ice_ptp_maybe_trigger_tx_interrupt(pf); 2980 2981 /* Run twice a second or reschedule if phc update failed */ 2982 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2983 msecs_to_jiffies(err ? 10 : 500)); 2984 } 2985 2986 /** 2987 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2988 * @pf: Board private structure 2989 * @reset_type: the reset type being performed 2990 */ 2991 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 2992 { 2993 struct ice_ptp *ptp = &pf->ptp; 2994 u8 src_tmr; 2995 2996 if (ptp->state != ICE_PTP_READY) 2997 return; 2998 2999 ptp->state = ICE_PTP_RESETTING; 3000 3001 /* Disable timestamping for both Tx and Rx */ 3002 ice_ptp_disable_timestamp_mode(pf); 3003 3004 kthread_cancel_delayed_work_sync(&ptp->work); 3005 3006 if (reset_type == ICE_RESET_PFR) 3007 return; 3008 3009 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3010 3011 /* Disable periodic outputs */ 3012 ice_ptp_disable_all_perout(pf); 3013 3014 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 3015 3016 /* Disable source clock */ 3017 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 3018 3019 /* Acquire PHC and system timer to restore after reset */ 3020 ptp->reset_time = ktime_get_real_ns(); 3021 } 3022 3023 /** 3024 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset 3025 * @pf: Board private structure 3026 * 3027 * Companion function for ice_ptp_rebuild() which handles tasks that only the 3028 * PTP clock owner instance should perform. 3029 */ 3030 static int ice_ptp_rebuild_owner(struct ice_pf *pf) 3031 { 3032 struct ice_ptp *ptp = &pf->ptp; 3033 struct ice_hw *hw = &pf->hw; 3034 struct timespec64 ts; 3035 u64 time_diff; 3036 int err; 3037 3038 err = ice_ptp_init_phc(hw); 3039 if (err) 3040 return err; 3041 3042 /* Acquire the global hardware lock */ 3043 if (!ice_ptp_lock(hw)) { 3044 err = -EBUSY; 3045 return err; 3046 } 3047 3048 /* Write the increment time value to PHY and LAN */ 3049 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 3050 if (err) 3051 goto err_unlock; 3052 3053 /* Write the initial Time value to PHY and LAN using the cached PHC 3054 * time before the reset and time difference between stopping and 3055 * starting the clock. 3056 */ 3057 if (ptp->cached_phc_time) { 3058 time_diff = ktime_get_real_ns() - ptp->reset_time; 3059 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 3060 } else { 3061 ts = ktime_to_timespec64(ktime_get_real()); 3062 } 3063 err = ice_ptp_write_init(pf, &ts); 3064 if (err) 3065 goto err_unlock; 3066 3067 /* Release the global hardware lock */ 3068 ice_ptp_unlock(hw); 3069 3070 /* Flush software tracking of any outstanding timestamps since we're 3071 * about to flush the PHY timestamp block. 3072 */ 3073 ice_ptp_flush_all_tx_tracker(pf); 3074 3075 /* Enable quad interrupts */ 3076 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 3077 if (err) 3078 return err; 3079 3080 ice_ptp_restart_all_phy(pf); 3081 3082 /* Re-enable all periodic outputs and external timestamp events */ 3083 ice_ptp_enable_all_perout(pf); 3084 ice_ptp_enable_all_extts(pf); 3085 3086 return 0; 3087 3088 err_unlock: 3089 ice_ptp_unlock(hw); 3090 return err; 3091 } 3092 3093 /** 3094 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset 3095 * @pf: Board private structure 3096 * @reset_type: the reset type being performed 3097 */ 3098 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 3099 { 3100 struct ice_ptp *ptp = &pf->ptp; 3101 int err; 3102 3103 if (ptp->state == ICE_PTP_READY) { 3104 ice_ptp_prepare_for_reset(pf, reset_type); 3105 } else if (ptp->state != ICE_PTP_RESETTING) { 3106 err = -EINVAL; 3107 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n"); 3108 goto err; 3109 } 3110 3111 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) { 3112 err = ice_ptp_rebuild_owner(pf); 3113 if (err) 3114 goto err; 3115 } 3116 3117 ptp->state = ICE_PTP_READY; 3118 3119 /* Start periodic work going */ 3120 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 3121 3122 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 3123 return; 3124 3125 err: 3126 ptp->state = ICE_PTP_ERROR; 3127 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 3128 } 3129 3130 static bool ice_is_primary(struct ice_hw *hw) 3131 { 3132 return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ? 3133 !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : 3134 true; 3135 } 3136 3137 static int ice_ptp_setup_adapter(struct ice_pf *pf) 3138 { 3139 if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw)) 3140 return -EPERM; 3141 3142 pf->adapter->ctrl_pf = pf; 3143 3144 return 0; 3145 } 3146 3147 static int ice_ptp_setup_pf(struct ice_pf *pf) 3148 { 3149 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 3150 struct ice_ptp *ptp = &pf->ptp; 3151 3152 if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN) 3153 return -ENODEV; 3154 3155 INIT_LIST_HEAD(&ptp->port.list_node); 3156 mutex_lock(&pf->adapter->ports.lock); 3157 3158 list_add(&ptp->port.list_node, 3159 &pf->adapter->ports.ports); 3160 mutex_unlock(&pf->adapter->ports.lock); 3161 3162 return 0; 3163 } 3164 3165 static void ice_ptp_cleanup_pf(struct ice_pf *pf) 3166 { 3167 struct ice_ptp *ptp = &pf->ptp; 3168 3169 if (pf->hw.mac_type != ICE_MAC_UNKNOWN) { 3170 mutex_lock(&pf->adapter->ports.lock); 3171 list_del(&ptp->port.list_node); 3172 mutex_unlock(&pf->adapter->ports.lock); 3173 } 3174 } 3175 3176 /** 3177 * ice_ptp_clock_index - Get the PTP clock index for this device 3178 * @pf: Board private structure 3179 * 3180 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock 3181 * is associated. 3182 */ 3183 int ice_ptp_clock_index(struct ice_pf *pf) 3184 { 3185 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 3186 struct ptp_clock *clock; 3187 3188 if (!ctrl_ptp) 3189 return -1; 3190 clock = ctrl_ptp->clock; 3191 3192 return clock ? ptp_clock_index(clock) : -1; 3193 } 3194 3195 /** 3196 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 3197 * @pf: Board private structure 3198 * 3199 * Setup and initialize a PTP clock device that represents the device hardware 3200 * clock. Save the clock index for other functions connected to the same 3201 * hardware resource. 3202 */ 3203 static int ice_ptp_init_owner(struct ice_pf *pf) 3204 { 3205 struct ice_hw *hw = &pf->hw; 3206 struct timespec64 ts; 3207 int err; 3208 3209 err = ice_ptp_init_phc(hw); 3210 if (err) { 3211 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 3212 err); 3213 return err; 3214 } 3215 3216 /* Acquire the global hardware lock */ 3217 if (!ice_ptp_lock(hw)) { 3218 err = -EBUSY; 3219 goto err_exit; 3220 } 3221 3222 /* Write the increment time value to PHY and LAN */ 3223 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 3224 if (err) 3225 goto err_unlock; 3226 3227 ts = ktime_to_timespec64(ktime_get_real()); 3228 /* Write the initial Time value to PHY and LAN */ 3229 err = ice_ptp_write_init(pf, &ts); 3230 if (err) 3231 goto err_unlock; 3232 3233 /* Release the global hardware lock */ 3234 ice_ptp_unlock(hw); 3235 3236 /* Configure PHY interrupt settings */ 3237 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 3238 if (err) 3239 goto err_exit; 3240 3241 /* Ensure we have a clock device */ 3242 err = ice_ptp_create_clock(pf); 3243 if (err) 3244 goto err_clk; 3245 3246 return 0; 3247 err_clk: 3248 pf->ptp.clock = NULL; 3249 err_exit: 3250 return err; 3251 3252 err_unlock: 3253 ice_ptp_unlock(hw); 3254 return err; 3255 } 3256 3257 /** 3258 * ice_ptp_init_work - Initialize PTP work threads 3259 * @pf: Board private structure 3260 * @ptp: PF PTP structure 3261 */ 3262 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 3263 { 3264 struct kthread_worker *kworker; 3265 3266 /* Initialize work functions */ 3267 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 3268 3269 /* Allocate a kworker for handling work required for the ports 3270 * connected to the PTP hardware clock. 3271 */ 3272 kworker = kthread_run_worker(0, "ice-ptp-%s", 3273 dev_name(ice_pf_to_dev(pf))); 3274 if (IS_ERR(kworker)) 3275 return PTR_ERR(kworker); 3276 3277 ptp->kworker = kworker; 3278 3279 /* Start periodic work going */ 3280 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 3281 3282 return 0; 3283 } 3284 3285 /** 3286 * ice_ptp_init_port - Initialize PTP port structure 3287 * @pf: Board private structure 3288 * @ptp_port: PTP port structure 3289 * 3290 * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc. 3291 */ 3292 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 3293 { 3294 struct ice_hw *hw = &pf->hw; 3295 3296 mutex_init(&ptp_port->ps_lock); 3297 3298 switch (hw->mac_type) { 3299 case ICE_MAC_E810: 3300 case ICE_MAC_E830: 3301 case ICE_MAC_GENERIC_3K_E825: 3302 return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num); 3303 case ICE_MAC_GENERIC: 3304 kthread_init_delayed_work(&ptp_port->ov_work, 3305 ice_ptp_wait_for_offsets); 3306 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, 3307 ptp_port->port_num); 3308 default: 3309 return -ENODEV; 3310 } 3311 } 3312 3313 /** 3314 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode 3315 * @pf: Board private structure 3316 * 3317 * Initialize the Tx timestamp interrupt mode for this device. For most device 3318 * types, each PF processes the interrupt and manages its own timestamps. For 3319 * E822-based devices, only the clock owner processes the timestamps. Other 3320 * PFs disable the interrupt and do not process their own timestamps. 3321 */ 3322 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) 3323 { 3324 switch (pf->hw.mac_type) { 3325 case ICE_MAC_GENERIC: 3326 /* E822 based PHY has the clock owner process the interrupt 3327 * for all ports. 3328 */ 3329 if (ice_pf_src_tmr_owned(pf)) 3330 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; 3331 else 3332 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; 3333 break; 3334 default: 3335 /* other PHY types handle their own Tx interrupt */ 3336 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; 3337 } 3338 } 3339 3340 /** 3341 * ice_ptp_init - Initialize PTP hardware clock support 3342 * @pf: Board private structure 3343 * 3344 * Set up the device for interacting with the PTP hardware clock for all 3345 * functions, both the function that owns the clock hardware, and the 3346 * functions connected to the clock hardware. 3347 * 3348 * The clock owner will allocate and register a ptp_clock with the 3349 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 3350 * items used for asynchronous work such as Tx timestamps and periodic work. 3351 */ 3352 void ice_ptp_init(struct ice_pf *pf) 3353 { 3354 struct ice_ptp *ptp = &pf->ptp; 3355 struct ice_hw *hw = &pf->hw; 3356 int lane_num, err; 3357 3358 ptp->state = ICE_PTP_INITIALIZING; 3359 3360 lane_num = ice_get_phy_lane_number(hw); 3361 if (lane_num < 0) { 3362 err = lane_num; 3363 goto err_exit; 3364 } 3365 3366 ptp->port.port_num = (u8)lane_num; 3367 ice_ptp_init_hw(hw); 3368 3369 ice_ptp_init_tx_interrupt_mode(pf); 3370 3371 /* If this function owns the clock hardware, it must allocate and 3372 * configure the PTP clock device to represent it. 3373 */ 3374 if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) { 3375 err = ice_ptp_setup_adapter(pf); 3376 if (err) 3377 goto err_exit; 3378 err = ice_ptp_init_owner(pf); 3379 if (err) 3380 goto err_exit; 3381 } 3382 3383 err = ice_ptp_setup_pf(pf); 3384 if (err) 3385 goto err_exit; 3386 3387 err = ice_ptp_init_port(pf, &ptp->port); 3388 if (err) 3389 goto err_exit; 3390 3391 /* Start the PHY timestamping block */ 3392 ice_ptp_reset_phy_timestamping(pf); 3393 3394 /* Configure initial Tx interrupt settings */ 3395 ice_ptp_cfg_tx_interrupt(pf); 3396 3397 ptp->state = ICE_PTP_READY; 3398 3399 err = ice_ptp_init_work(pf, ptp); 3400 if (err) 3401 goto err_exit; 3402 3403 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 3404 return; 3405 3406 err_exit: 3407 /* If we registered a PTP clock, release it */ 3408 if (pf->ptp.clock) { 3409 ptp_clock_unregister(ptp->clock); 3410 pf->ptp.clock = NULL; 3411 } 3412 ptp->state = ICE_PTP_ERROR; 3413 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 3414 } 3415 3416 /** 3417 * ice_ptp_release - Disable the driver/HW support and unregister the clock 3418 * @pf: Board private structure 3419 * 3420 * This function handles the cleanup work required from the initialization by 3421 * clearing out the important information and unregistering the clock 3422 */ 3423 void ice_ptp_release(struct ice_pf *pf) 3424 { 3425 if (pf->ptp.state != ICE_PTP_READY) 3426 return; 3427 3428 pf->ptp.state = ICE_PTP_UNINIT; 3429 3430 /* Disable timestamping for both Tx and Rx */ 3431 ice_ptp_disable_timestamp_mode(pf); 3432 3433 ice_ptp_cleanup_pf(pf); 3434 3435 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3436 3437 ice_ptp_disable_all_extts(pf); 3438 3439 kthread_cancel_delayed_work_sync(&pf->ptp.work); 3440 3441 ice_ptp_port_phy_stop(&pf->ptp.port); 3442 mutex_destroy(&pf->ptp.port.ps_lock); 3443 if (pf->ptp.kworker) { 3444 kthread_destroy_worker(pf->ptp.kworker); 3445 pf->ptp.kworker = NULL; 3446 } 3447 3448 if (!pf->ptp.clock) 3449 return; 3450 3451 /* Disable periodic outputs */ 3452 ice_ptp_disable_all_perout(pf); 3453 3454 ptp_clock_unregister(pf->ptp.clock); 3455 pf->ptp.clock = NULL; 3456 3457 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 3458 } 3459