1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 #include "ice_cgu_regs.h" 8 9 static const char ice_pin_names[][64] = { 10 "SDP0", 11 "SDP1", 12 "SDP2", 13 "SDP3", 14 "TIME_SYNC", 15 "1PPS" 16 }; 17 18 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { 19 /* name, gpio, delay */ 20 { TIME_SYNC, { 4, -1 }, { 0, 0 }}, 21 { ONE_PPS, { -1, 5 }, { 0, 11 }}, 22 }; 23 24 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { 25 /* name, gpio, delay */ 26 { SDP0, { 0, 0 }, { 15, 14 }}, 27 { SDP1, { 1, 1 }, { 15, 14 }}, 28 { SDP2, { 2, 2 }, { 15, 14 }}, 29 { SDP3, { 3, 3 }, { 15, 14 }}, 30 { TIME_SYNC, { 4, -1 }, { 11, 0 }}, 31 { ONE_PPS, { -1, 5 }, { 0, 9 }}, 32 }; 33 34 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { 35 /* name, gpio, delay */ 36 { SDP0, { 0, 0 }, { 0, 1 }}, 37 { SDP1, { 1, 1 }, { 0, 1 }}, 38 { SDP2, { 2, 2 }, { 0, 1 }}, 39 { SDP3, { 3, 3 }, { 0, 1 }}, 40 { ONE_PPS, { -1, 5 }, { 0, 1 }}, 41 }; 42 43 static const char ice_pin_names_nvm[][64] = { 44 "GNSS", 45 "SMA1", 46 "U.FL1", 47 "SMA2", 48 "U.FL2", 49 }; 50 51 static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { 52 /* name, gpio, delay */ 53 { GNSS, { 1, -1 }, { 0, 0 }}, 54 { SMA1, { 1, 0 }, { 0, 1 }}, 55 { UFL1, { -1, 0 }, { 0, 1 }}, 56 { SMA2, { 3, 2 }, { 0, 1 }}, 57 { UFL2, { 3, -1 }, { 0, 0 }}, 58 }; 59 60 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) 61 { 62 return !pf->adapter ? NULL : pf->adapter->ctrl_pf; 63 } 64 65 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf) 66 { 67 struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf); 68 69 return !ctrl_pf ? NULL : &ctrl_pf->ptp; 70 } 71 72 /** 73 * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc 74 * @pf: Board private structure 75 * @func: Pin function 76 * @chan: GPIO channel 77 * 78 * Return: positive pin number when pin is present, -1 otherwise 79 */ 80 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func, 81 unsigned int chan) 82 { 83 const struct ptp_clock_info *info = &pf->ptp.info; 84 int i; 85 86 for (i = 0; i < info->n_pins; i++) { 87 if (info->pin_config[i].func == func && 88 info->pin_config[i].chan == chan) 89 return i; 90 } 91 92 return -1; 93 } 94 95 /** 96 * ice_ptp_update_sma_data - update SMA pins data according to pins setup 97 * @pf: Board private structure 98 * @sma_pins: parsed SMA pins status 99 * @data: SMA data to update 100 */ 101 static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[], 102 u8 *data) 103 { 104 const char *state1, *state2; 105 106 /* Set the right state based on the desired configuration. 107 * When bit is set, functionality is disabled. 108 */ 109 *data &= ~ICE_ALL_SMA_MASK; 110 if (!sma_pins[UFL1 - 1]) { 111 if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) { 112 state1 = "SMA1 Rx, U.FL1 disabled"; 113 *data |= ICE_SMA1_TX_EN; 114 } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) { 115 state1 = "SMA1 Tx U.FL1 disabled"; 116 *data |= ICE_SMA1_DIR_EN; 117 } else { 118 state1 = "SMA1 disabled, U.FL1 disabled"; 119 *data |= ICE_SMA1_MASK; 120 } 121 } else { 122 /* U.FL1 Tx will always enable SMA1 Rx */ 123 state1 = "SMA1 Rx, U.FL1 Tx"; 124 } 125 126 if (!sma_pins[UFL2 - 1]) { 127 if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) { 128 state2 = "SMA2 Rx, U.FL2 disabled"; 129 *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS; 130 } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) { 131 state2 = "SMA2 Tx, U.FL2 disabled"; 132 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS; 133 } else { 134 state2 = "SMA2 disabled, U.FL2 disabled"; 135 *data |= ICE_SMA2_MASK; 136 } 137 } else { 138 if (!sma_pins[SMA2 - 1]) { 139 state2 = "SMA2 disabled, U.FL2 Rx"; 140 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN; 141 } else { 142 state2 = "SMA2 Tx, U.FL2 Rx"; 143 *data |= ICE_SMA2_DIR_EN; 144 } 145 } 146 147 dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2); 148 } 149 150 /** 151 * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic 152 * @pf: Board private structure 153 * 154 * Return: 0 on success, negative error code otherwise 155 */ 156 static int ice_ptp_set_sma_cfg(struct ice_pf *pf) 157 { 158 const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc; 159 struct ptp_pin_desc *pins = pf->ptp.pin_desc; 160 unsigned int sma_pins[ICE_SMA_PINS_NUM] = {}; 161 int err; 162 u8 data; 163 164 /* Read initial pin state value */ 165 err = ice_read_sma_ctrl(&pf->hw, &data); 166 if (err) 167 return err; 168 169 /* Get SMA/U.FL pins states */ 170 for (int i = 0; i < pf->ptp.info.n_pins; i++) 171 if (pins[i].func) { 172 int name_idx = ice_pins[i].name_idx; 173 174 switch (name_idx) { 175 case SMA1: 176 case UFL1: 177 case SMA2: 178 case UFL2: 179 sma_pins[name_idx - 1] = pins[i].func; 180 break; 181 default: 182 continue; 183 } 184 } 185 186 ice_ptp_update_sma_data(pf, sma_pins, &data); 187 return ice_write_sma_ctrl(&pf->hw, data); 188 } 189 190 /** 191 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device 192 * @pf: Board private structure 193 * 194 * Program the device to respond appropriately to the Tx timestamp interrupt 195 * cause. 196 */ 197 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) 198 { 199 struct ice_hw *hw = &pf->hw; 200 bool enable; 201 u32 val; 202 203 switch (pf->ptp.tx_interrupt_mode) { 204 case ICE_PTP_TX_INTERRUPT_ALL: 205 /* React to interrupts across all quads. */ 206 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); 207 enable = true; 208 break; 209 case ICE_PTP_TX_INTERRUPT_NONE: 210 /* Do not react to interrupts on any quad. */ 211 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); 212 enable = false; 213 break; 214 case ICE_PTP_TX_INTERRUPT_SELF: 215 default: 216 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; 217 break; 218 } 219 220 /* Configure the Tx timestamp interrupt */ 221 val = rd32(hw, PFINT_OICR_ENA); 222 if (enable) 223 val |= PFINT_OICR_TSYN_TX_M; 224 else 225 val &= ~PFINT_OICR_TSYN_TX_M; 226 wr32(hw, PFINT_OICR_ENA, val); 227 } 228 229 /** 230 * ice_set_rx_tstamp - Enable or disable Rx timestamping 231 * @pf: The PF pointer to search in 232 * @on: bool value for whether timestamps are enabled or disabled 233 */ 234 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 235 { 236 struct ice_vsi *vsi; 237 u16 i; 238 239 vsi = ice_get_main_vsi(pf); 240 if (!vsi || !vsi->rx_rings) 241 return; 242 243 /* Set the timestamp flag for all the Rx rings */ 244 ice_for_each_rxq(vsi, i) { 245 if (!vsi->rx_rings[i]) 246 continue; 247 vsi->rx_rings[i]->ptp_rx = on; 248 } 249 } 250 251 /** 252 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode 253 * @pf: Board private structure 254 * 255 * Called during preparation for reset to temporarily disable timestamping on 256 * the device. Called during remove to disable timestamping while cleaning up 257 * driver resources. 258 */ 259 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) 260 { 261 struct ice_hw *hw = &pf->hw; 262 u32 val; 263 264 val = rd32(hw, PFINT_OICR_ENA); 265 val &= ~PFINT_OICR_TSYN_TX_M; 266 wr32(hw, PFINT_OICR_ENA, val); 267 268 ice_set_rx_tstamp(pf, false); 269 } 270 271 /** 272 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration 273 * @pf: Board private structure 274 * 275 * Called at the end of rebuild to restore timestamp configuration after 276 * a device reset. 277 */ 278 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) 279 { 280 struct ice_hw *hw = &pf->hw; 281 bool enable_rx; 282 283 ice_ptp_cfg_tx_interrupt(pf); 284 285 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; 286 ice_set_rx_tstamp(pf, enable_rx); 287 288 /* Trigger an immediate software interrupt to ensure that timestamps 289 * which occurred during reset are handled now. 290 */ 291 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 292 ice_flush(hw); 293 } 294 295 /** 296 * ice_ptp_read_src_clk_reg - Read the source clock register 297 * @pf: Board private structure 298 * @sts: Optional parameter for holding a pair of system timestamps from 299 * the system clock. Will be ignored if NULL is given. 300 */ 301 static u64 302 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 303 { 304 struct ice_hw *hw = &pf->hw; 305 u32 hi, lo, lo2; 306 u8 tmr_idx; 307 308 tmr_idx = ice_get_ptp_src_clock_index(hw); 309 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); 310 /* Read the system timestamp pre PHC read */ 311 ptp_read_system_prets(sts); 312 313 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 314 315 /* Read the system timestamp post PHC read */ 316 ptp_read_system_postts(sts); 317 318 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 319 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 320 321 if (lo2 < lo) { 322 /* if TIME_L rolled over read TIME_L again and update 323 * system timestamps 324 */ 325 ptp_read_system_prets(sts); 326 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 327 ptp_read_system_postts(sts); 328 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 329 } 330 331 return ((u64)hi << 32) | lo; 332 } 333 334 /** 335 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 336 * @cached_phc_time: recently cached copy of PHC time 337 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 338 * 339 * Hardware captures timestamps which contain only 32 bits of nominal 340 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 341 * Note that the captured timestamp values may be 40 bits, but the lower 342 * 8 bits are sub-nanoseconds and generally discarded. 343 * 344 * Extend the 32bit nanosecond timestamp using the following algorithm and 345 * assumptions: 346 * 347 * 1) have a recently cached copy of the PHC time 348 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 349 * seconds) before or after the PHC time was captured. 350 * 3) calculate the delta between the cached time and the timestamp 351 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 352 * captured after the PHC time. In this case, the full timestamp is just 353 * the cached PHC time plus the delta. 354 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 355 * timestamp was captured *before* the PHC time, i.e. because the PHC 356 * cache was updated after the timestamp was captured by hardware. In this 357 * case, the full timestamp is the cached time minus the inverse delta. 358 * 359 * This algorithm works even if the PHC time was updated after a Tx timestamp 360 * was requested, but before the Tx timestamp event was reported from 361 * hardware. 362 * 363 * This calculation primarily relies on keeping the cached PHC time up to 364 * date. If the timestamp was captured more than 2^31 nanoseconds after the 365 * PHC time, it is possible that the lower 32bits of PHC time have 366 * overflowed more than once, and we might generate an incorrect timestamp. 367 * 368 * This is prevented by (a) periodically updating the cached PHC time once 369 * a second, and (b) discarding any Tx timestamp packet if it has waited for 370 * a timestamp for more than one second. 371 */ 372 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 373 { 374 u32 delta, phc_time_lo; 375 u64 ns; 376 377 /* Extract the lower 32 bits of the PHC time */ 378 phc_time_lo = (u32)cached_phc_time; 379 380 /* Calculate the delta between the lower 32bits of the cached PHC 381 * time and the in_tstamp value 382 */ 383 delta = (in_tstamp - phc_time_lo); 384 385 /* Do not assume that the in_tstamp is always more recent than the 386 * cached PHC time. If the delta is large, it indicates that the 387 * in_tstamp was taken in the past, and should be converted 388 * forward. 389 */ 390 if (delta > (U32_MAX / 2)) { 391 /* reverse the delta calculation here */ 392 delta = (phc_time_lo - in_tstamp); 393 ns = cached_phc_time - delta; 394 } else { 395 ns = cached_phc_time + delta; 396 } 397 398 return ns; 399 } 400 401 /** 402 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 403 * @pf: Board private structure 404 * @in_tstamp: Ingress/egress 40b timestamp value 405 * 406 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 407 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 408 * 409 * *--------------------------------------------------------------* 410 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 411 * *--------------------------------------------------------------* 412 * 413 * The low bit is an indicator of whether the timestamp is valid. The next 414 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 415 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 416 * 417 * It is assumed that the caller verifies the timestamp is valid prior to 418 * calling this function. 419 * 420 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 421 * time stored in the device private PTP structure as the basis for timestamp 422 * extension. 423 * 424 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 425 * algorithm. 426 */ 427 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 428 { 429 const u64 mask = GENMASK_ULL(31, 0); 430 unsigned long discard_time; 431 432 /* Discard the hardware timestamp if the cached PHC time is too old */ 433 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 434 if (time_is_before_jiffies(discard_time)) { 435 pf->ptp.tx_hwtstamp_discarded++; 436 return 0; 437 } 438 439 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 440 (in_tstamp >> 8) & mask); 441 } 442 443 /** 444 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 445 * @tx: the PTP Tx timestamp tracker to check 446 * 447 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 448 * to accept new timestamp requests. 449 * 450 * Assumes the tx->lock spinlock is already held. 451 */ 452 static bool 453 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 454 { 455 lockdep_assert_held(&tx->lock); 456 457 return tx->init && !tx->calibrating; 458 } 459 460 /** 461 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW 462 * @tx: the PTP Tx timestamp tracker 463 * @idx: index of the timestamp to request 464 */ 465 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) 466 { 467 struct ice_e810_params *params; 468 struct ice_ptp_port *ptp_port; 469 unsigned long flags; 470 struct sk_buff *skb; 471 struct ice_pf *pf; 472 473 if (!tx->init) 474 return; 475 476 ptp_port = container_of(tx, struct ice_ptp_port, tx); 477 pf = ptp_port_to_pf(ptp_port); 478 params = &pf->hw.ptp.phy.e810; 479 480 /* Drop packets which have waited for more than 2 seconds */ 481 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 482 /* Count the number of Tx timestamps that timed out */ 483 pf->ptp.tx_hwtstamp_timeouts++; 484 485 skb = tx->tstamps[idx].skb; 486 tx->tstamps[idx].skb = NULL; 487 clear_bit(idx, tx->in_use); 488 489 dev_kfree_skb_any(skb); 490 return; 491 } 492 493 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 494 495 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); 496 497 params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS; 498 499 /* Write TS index to read to the PF register so the FW can read it */ 500 wr32(&pf->hw, REG_LL_PROXY_H, 501 REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | 502 REG_LL_PROXY_H_EXEC); 503 tx->last_ll_ts_idx_read = idx; 504 505 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); 506 } 507 508 /** 509 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port 510 * @tx: the PTP Tx timestamp tracker 511 */ 512 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) 513 { 514 struct skb_shared_hwtstamps shhwtstamps = {}; 515 u8 idx = tx->last_ll_ts_idx_read; 516 struct ice_e810_params *params; 517 struct ice_ptp_port *ptp_port; 518 u64 raw_tstamp, tstamp; 519 bool drop_ts = false; 520 struct sk_buff *skb; 521 unsigned long flags; 522 struct device *dev; 523 struct ice_pf *pf; 524 u32 reg_ll_high; 525 526 if (!tx->init || tx->last_ll_ts_idx_read < 0) 527 return; 528 529 ptp_port = container_of(tx, struct ice_ptp_port, tx); 530 pf = ptp_port_to_pf(ptp_port); 531 dev = ice_pf_to_dev(pf); 532 params = &pf->hw.ptp.phy.e810; 533 534 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 535 536 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); 537 538 if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS)) 539 dev_dbg(dev, "%s: low latency interrupt request not in progress?\n", 540 __func__); 541 542 /* Read the low 32 bit value */ 543 raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L); 544 /* Read the status together with high TS part */ 545 reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H); 546 547 /* Wake up threads waiting on low latency interface */ 548 params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS; 549 550 wake_up_locked(¶ms->atqbal_wq); 551 552 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); 553 554 /* When the bit is cleared, the TS is ready in the register */ 555 if (reg_ll_high & REG_LL_PROXY_H_EXEC) { 556 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); 557 return; 558 } 559 560 /* High 8 bit value of the TS is on the bits 16:23 */ 561 raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32; 562 563 /* Devices using this interface always verify the timestamp differs 564 * relative to the last cached timestamp value. 565 */ 566 if (raw_tstamp == tx->tstamps[idx].cached_tstamp) 567 return; 568 569 tx->tstamps[idx].cached_tstamp = raw_tstamp; 570 clear_bit(idx, tx->in_use); 571 skb = tx->tstamps[idx].skb; 572 tx->tstamps[idx].skb = NULL; 573 if (test_and_clear_bit(idx, tx->stale)) 574 drop_ts = true; 575 576 if (!skb) 577 return; 578 579 if (drop_ts) { 580 dev_kfree_skb_any(skb); 581 return; 582 } 583 584 /* Extend the timestamp using cached PHC time */ 585 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 586 if (tstamp) { 587 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 588 ice_trace(tx_tstamp_complete, skb, idx); 589 } 590 591 skb_tstamp_tx(skb, &shhwtstamps); 592 dev_kfree_skb_any(skb); 593 } 594 595 /** 596 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 597 * @tx: the PTP Tx timestamp tracker 598 * 599 * Process timestamps captured by the PHY associated with this port. To do 600 * this, loop over each index with a waiting skb. 601 * 602 * If a given index has a valid timestamp, perform the following steps: 603 * 604 * 1) check that the timestamp request is not stale 605 * 2) check that a timestamp is ready and available in the PHY memory bank 606 * 3) read and copy the timestamp out of the PHY register 607 * 4) unlock the index by clearing the associated in_use bit 608 * 5) check if the timestamp is stale, and discard if so 609 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 610 * 7) send this 64 bit timestamp to the stack 611 * 612 * Note that we do not hold the tracking lock while reading the Tx timestamp. 613 * This is because reading the timestamp requires taking a mutex that might 614 * sleep. 615 * 616 * The only place where we set in_use is when a new timestamp is initiated 617 * with a slot index. This is only called in the hard xmit routine where an 618 * SKB has a request flag set. The only places where we clear this bit is this 619 * function, or during teardown when the Tx timestamp tracker is being 620 * removed. A timestamp index will never be re-used until the in_use bit for 621 * that index is cleared. 622 * 623 * If a Tx thread starts a new timestamp, we might not begin processing it 624 * right away but we will notice it at the end when we re-queue the task. 625 * 626 * If a Tx thread starts a new timestamp just after this function exits, the 627 * interrupt for that timestamp should re-trigger this function once 628 * a timestamp is ready. 629 * 630 * In cases where the PTP hardware clock was directly adjusted, some 631 * timestamps may not be able to safely use the timestamp extension math. In 632 * this case, software will set the stale bit for any outstanding Tx 633 * timestamps when the clock is adjusted. Then this function will discard 634 * those captured timestamps instead of sending them to the stack. 635 * 636 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 637 * to correctly extend the timestamp using the cached PHC time. It is 638 * extremely unlikely that a packet will ever take this long to timestamp. If 639 * we detect a Tx timestamp request that has waited for this long we assume 640 * the packet will never be sent by hardware and discard it without reading 641 * the timestamp register. 642 */ 643 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 644 { 645 struct ice_ptp_port *ptp_port; 646 unsigned long flags; 647 struct ice_pf *pf; 648 struct ice_hw *hw; 649 u64 tstamp_ready; 650 bool link_up; 651 int err; 652 u8 idx; 653 654 ptp_port = container_of(tx, struct ice_ptp_port, tx); 655 pf = ptp_port_to_pf(ptp_port); 656 hw = &pf->hw; 657 658 /* Read the Tx ready status first */ 659 if (tx->has_ready_bitmap) { 660 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 661 if (err) 662 return; 663 } 664 665 /* Drop packets if the link went down */ 666 link_up = ptp_port->link_up; 667 668 for_each_set_bit(idx, tx->in_use, tx->len) { 669 struct skb_shared_hwtstamps shhwtstamps = {}; 670 u8 phy_idx = idx + tx->offset; 671 u64 raw_tstamp = 0, tstamp; 672 bool drop_ts = !link_up; 673 struct sk_buff *skb; 674 675 /* Drop packets which have waited for more than 2 seconds */ 676 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 677 drop_ts = true; 678 679 /* Count the number of Tx timestamps that timed out */ 680 pf->ptp.tx_hwtstamp_timeouts++; 681 } 682 683 /* Only read a timestamp from the PHY if its marked as ready 684 * by the tstamp_ready register. This avoids unnecessary 685 * reading of timestamps which are not yet valid. This is 686 * important as we must read all timestamps which are valid 687 * and only timestamps which are valid during each interrupt. 688 * If we do not, the hardware logic for generating a new 689 * interrupt can get stuck on some devices. 690 */ 691 if (tx->has_ready_bitmap && 692 !(tstamp_ready & BIT_ULL(phy_idx))) { 693 if (drop_ts) 694 goto skip_ts_read; 695 696 continue; 697 } 698 699 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 700 701 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 702 if (err && !drop_ts) 703 continue; 704 705 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 706 707 /* For PHYs which don't implement a proper timestamp ready 708 * bitmap, verify that the timestamp value is different 709 * from the last cached timestamp. If it is not, skip this for 710 * now assuming it hasn't yet been captured by hardware. 711 */ 712 if (!drop_ts && !tx->has_ready_bitmap && 713 raw_tstamp == tx->tstamps[idx].cached_tstamp) 714 continue; 715 716 /* Discard any timestamp value without the valid bit set */ 717 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 718 drop_ts = true; 719 720 skip_ts_read: 721 spin_lock_irqsave(&tx->lock, flags); 722 if (!tx->has_ready_bitmap && raw_tstamp) 723 tx->tstamps[idx].cached_tstamp = raw_tstamp; 724 clear_bit(idx, tx->in_use); 725 skb = tx->tstamps[idx].skb; 726 tx->tstamps[idx].skb = NULL; 727 if (test_and_clear_bit(idx, tx->stale)) 728 drop_ts = true; 729 spin_unlock_irqrestore(&tx->lock, flags); 730 731 /* It is unlikely but possible that the SKB will have been 732 * flushed at this point due to link change or teardown. 733 */ 734 if (!skb) 735 continue; 736 737 if (drop_ts) { 738 dev_kfree_skb_any(skb); 739 continue; 740 } 741 742 /* Extend the timestamp using cached PHC time */ 743 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 744 if (tstamp) { 745 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 746 ice_trace(tx_tstamp_complete, skb, idx); 747 } 748 749 skb_tstamp_tx(skb, &shhwtstamps); 750 dev_kfree_skb_any(skb); 751 } 752 } 753 754 /** 755 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 756 * @pf: Board private structure 757 */ 758 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 759 { 760 struct ice_ptp_port *port; 761 unsigned int i; 762 763 mutex_lock(&pf->adapter->ports.lock); 764 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { 765 struct ice_ptp_tx *tx = &port->tx; 766 767 if (!tx || !tx->init) 768 continue; 769 770 ice_ptp_process_tx_tstamp(tx); 771 } 772 mutex_unlock(&pf->adapter->ports.lock); 773 774 for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { 775 u64 tstamp_ready; 776 int err; 777 778 /* Read the Tx ready status first */ 779 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 780 if (err) 781 break; 782 else if (tstamp_ready) 783 return ICE_TX_TSTAMP_WORK_PENDING; 784 } 785 786 return ICE_TX_TSTAMP_WORK_DONE; 787 } 788 789 /** 790 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 791 * @tx: Tx tracking structure to initialize 792 * 793 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 794 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 795 */ 796 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 797 { 798 bool more_timestamps; 799 unsigned long flags; 800 801 if (!tx->init) 802 return ICE_TX_TSTAMP_WORK_DONE; 803 804 /* Process the Tx timestamp tracker */ 805 ice_ptp_process_tx_tstamp(tx); 806 807 /* Check if there are outstanding Tx timestamps */ 808 spin_lock_irqsave(&tx->lock, flags); 809 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 810 spin_unlock_irqrestore(&tx->lock, flags); 811 812 if (more_timestamps) 813 return ICE_TX_TSTAMP_WORK_PENDING; 814 815 return ICE_TX_TSTAMP_WORK_DONE; 816 } 817 818 /** 819 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 820 * @tx: Tx tracking structure to initialize 821 * 822 * Assumes that the length has already been initialized. Do not call directly, 823 * use the ice_ptp_init_tx_* instead. 824 */ 825 static int 826 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 827 { 828 unsigned long *in_use, *stale; 829 struct ice_tx_tstamp *tstamps; 830 831 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 832 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 833 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 834 835 if (!tstamps || !in_use || !stale) { 836 kfree(tstamps); 837 bitmap_free(in_use); 838 bitmap_free(stale); 839 840 return -ENOMEM; 841 } 842 843 tx->tstamps = tstamps; 844 tx->in_use = in_use; 845 tx->stale = stale; 846 tx->init = 1; 847 tx->last_ll_ts_idx_read = -1; 848 849 spin_lock_init(&tx->lock); 850 851 return 0; 852 } 853 854 /** 855 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 856 * @pf: Board private structure 857 * @tx: the tracker to flush 858 * 859 * Called during teardown when a Tx tracker is being removed. 860 */ 861 static void 862 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 863 { 864 struct ice_hw *hw = &pf->hw; 865 unsigned long flags; 866 u64 tstamp_ready; 867 int err; 868 u8 idx; 869 870 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 871 if (err) { 872 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 873 tx->block, err); 874 875 /* If we fail to read the Tx timestamp ready bitmap just 876 * skip clearing the PHY timestamps. 877 */ 878 tstamp_ready = 0; 879 } 880 881 for_each_set_bit(idx, tx->in_use, tx->len) { 882 u8 phy_idx = idx + tx->offset; 883 struct sk_buff *skb; 884 885 /* In case this timestamp is ready, we need to clear it. */ 886 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 887 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 888 889 spin_lock_irqsave(&tx->lock, flags); 890 skb = tx->tstamps[idx].skb; 891 tx->tstamps[idx].skb = NULL; 892 clear_bit(idx, tx->in_use); 893 clear_bit(idx, tx->stale); 894 spin_unlock_irqrestore(&tx->lock, flags); 895 896 /* Count the number of Tx timestamps flushed */ 897 pf->ptp.tx_hwtstamp_flushed++; 898 899 /* Free the SKB after we've cleared the bit */ 900 dev_kfree_skb_any(skb); 901 } 902 } 903 904 /** 905 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 906 * @tx: the tracker to mark 907 * 908 * Mark currently outstanding Tx timestamps as stale. This prevents sending 909 * their timestamp value to the stack. This is required to prevent extending 910 * the 40bit hardware timestamp incorrectly. 911 * 912 * This should be called when the PTP clock is modified such as after a set 913 * time request. 914 */ 915 static void 916 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 917 { 918 unsigned long flags; 919 920 spin_lock_irqsave(&tx->lock, flags); 921 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 922 spin_unlock_irqrestore(&tx->lock, flags); 923 } 924 925 /** 926 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock 927 * @pf: Board private structure 928 * 929 * Called by the clock owner to flush all the Tx timestamp trackers associated 930 * with the clock. 931 */ 932 static void 933 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf) 934 { 935 struct ice_ptp_port *port; 936 937 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) 938 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx); 939 } 940 941 /** 942 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 943 * @pf: Board private structure 944 * @tx: Tx tracking structure to release 945 * 946 * Free memory associated with the Tx timestamp tracker. 947 */ 948 static void 949 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 950 { 951 unsigned long flags; 952 953 spin_lock_irqsave(&tx->lock, flags); 954 tx->init = 0; 955 spin_unlock_irqrestore(&tx->lock, flags); 956 957 /* wait for potentially outstanding interrupt to complete */ 958 synchronize_irq(pf->oicr_irq.virq); 959 960 ice_ptp_flush_tx_tracker(pf, tx); 961 962 kfree(tx->tstamps); 963 tx->tstamps = NULL; 964 965 bitmap_free(tx->in_use); 966 tx->in_use = NULL; 967 968 bitmap_free(tx->stale); 969 tx->stale = NULL; 970 971 tx->len = 0; 972 } 973 974 /** 975 * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps 976 * @pf: Board private structure 977 * @tx: the Tx tracking structure to initialize 978 * @port: the port this structure tracks 979 * 980 * Initialize the Tx timestamp tracker for this port. ETH56G PHYs 981 * have independent memory blocks for all ports. 982 * 983 * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker 984 */ 985 static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx, 986 u8 port) 987 { 988 tx->block = port; 989 tx->offset = 0; 990 tx->len = INDEX_PER_PORT_ETH56G; 991 tx->has_ready_bitmap = 1; 992 993 return ice_ptp_alloc_tx_tracker(tx); 994 } 995 996 /** 997 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps 998 * @pf: Board private structure 999 * @tx: the Tx tracking structure to initialize 1000 * @port: the port this structure tracks 1001 * 1002 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 1003 * the timestamp block is shared for all ports in the same quad. To avoid 1004 * ports using the same timestamp index, logically break the block of 1005 * registers into chunks based on the port number. 1006 */ 1007 static int 1008 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 1009 { 1010 tx->block = ICE_GET_QUAD_NUM(port); 1011 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; 1012 tx->len = INDEX_PER_PORT_E82X; 1013 tx->has_ready_bitmap = 1; 1014 1015 return ice_ptp_alloc_tx_tracker(tx); 1016 } 1017 1018 /** 1019 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 1020 * @pf: Board private structure 1021 * @tx: the Tx tracking structure to initialize 1022 * 1023 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 1024 * port has its own block of timestamps, independent of the other ports. 1025 */ 1026 static int 1027 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 1028 { 1029 tx->block = pf->hw.port_info->lport; 1030 tx->offset = 0; 1031 tx->len = INDEX_PER_PORT_E810; 1032 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 1033 * verify new timestamps against cached copy of the last read 1034 * timestamp. 1035 */ 1036 tx->has_ready_bitmap = 0; 1037 1038 return ice_ptp_alloc_tx_tracker(tx); 1039 } 1040 1041 /** 1042 * ice_ptp_update_cached_phctime - Update the cached PHC time values 1043 * @pf: Board specific private structure 1044 * 1045 * This function updates the system time values which are cached in the PF 1046 * structure and the Rx rings. 1047 * 1048 * This function must be called periodically to ensure that the cached value 1049 * is never more than 2 seconds old. 1050 * 1051 * Note that the cached copy in the PF PTP structure is always updated, even 1052 * if we can't update the copy in the Rx rings. 1053 * 1054 * Return: 1055 * * 0 - OK, successfully updated 1056 * * -EAGAIN - PF was busy, need to reschedule the update 1057 */ 1058 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 1059 { 1060 struct device *dev = ice_pf_to_dev(pf); 1061 unsigned long update_before; 1062 u64 systime; 1063 int i; 1064 1065 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 1066 if (pf->ptp.cached_phc_time && 1067 time_is_before_jiffies(update_before)) { 1068 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 1069 1070 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 1071 jiffies_to_msecs(time_taken)); 1072 pf->ptp.late_cached_phc_updates++; 1073 } 1074 1075 /* Read the current PHC time */ 1076 systime = ice_ptp_read_src_clk_reg(pf, NULL); 1077 1078 /* Update the cached PHC time stored in the PF structure */ 1079 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 1080 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 1081 1082 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 1083 return -EAGAIN; 1084 1085 ice_for_each_vsi(pf, i) { 1086 struct ice_vsi *vsi = pf->vsi[i]; 1087 int j; 1088 1089 if (!vsi) 1090 continue; 1091 1092 if (vsi->type != ICE_VSI_PF) 1093 continue; 1094 1095 ice_for_each_rxq(vsi, j) { 1096 if (!vsi->rx_rings[j]) 1097 continue; 1098 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 1099 } 1100 } 1101 clear_bit(ICE_CFG_BUSY, pf->state); 1102 1103 return 0; 1104 } 1105 1106 /** 1107 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 1108 * @pf: Board specific private structure 1109 * 1110 * This function must be called when the cached PHC time is no longer valid, 1111 * such as after a time adjustment. It marks any currently outstanding Tx 1112 * timestamps as stale and updates the cached PHC time for both the PF and Rx 1113 * rings. 1114 * 1115 * If updating the PHC time cannot be done immediately, a warning message is 1116 * logged and the work item is scheduled immediately to minimize the window 1117 * with a wrong cached timestamp. 1118 */ 1119 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1120 { 1121 struct device *dev = ice_pf_to_dev(pf); 1122 int err; 1123 1124 /* Update the cached PHC time immediately if possible, otherwise 1125 * schedule the work item to execute soon. 1126 */ 1127 err = ice_ptp_update_cached_phctime(pf); 1128 if (err) { 1129 /* If another thread is updating the Rx rings, we won't 1130 * properly reset them here. This could lead to reporting of 1131 * invalid timestamps, but there isn't much we can do. 1132 */ 1133 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1134 __func__); 1135 1136 /* Queue the work item to update the Rx rings when possible */ 1137 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1138 msecs_to_jiffies(10)); 1139 } 1140 1141 /* Mark any outstanding timestamps as stale, since they might have 1142 * been captured in hardware before the time update. This could lead 1143 * to us extending them with the wrong cached value resulting in 1144 * incorrect timestamp values. 1145 */ 1146 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1147 } 1148 1149 /** 1150 * ice_ptp_write_init - Set PHC time to provided value 1151 * @pf: Board private structure 1152 * @ts: timespec structure that holds the new time value 1153 * 1154 * Set the PHC time to the specified time provided in the timespec. 1155 */ 1156 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1157 { 1158 u64 ns = timespec64_to_ns(ts); 1159 struct ice_hw *hw = &pf->hw; 1160 1161 return ice_ptp_init_time(hw, ns); 1162 } 1163 1164 /** 1165 * ice_ptp_write_adj - Adjust PHC clock time atomically 1166 * @pf: Board private structure 1167 * @adj: Adjustment in nanoseconds 1168 * 1169 * Perform an atomic adjustment of the PHC time by the specified number of 1170 * nanoseconds. 1171 */ 1172 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1173 { 1174 struct ice_hw *hw = &pf->hw; 1175 1176 return ice_ptp_adj_clock(hw, adj); 1177 } 1178 1179 /** 1180 * ice_base_incval - Get base timer increment value 1181 * @pf: Board private structure 1182 * 1183 * Look up the base timer increment value for this device. The base increment 1184 * value is used to define the nominal clock tick rate. This increment value 1185 * is programmed during device initialization. It is also used as the basis 1186 * for calculating adjustments using scaled_ppm. 1187 */ 1188 static u64 ice_base_incval(struct ice_pf *pf) 1189 { 1190 struct ice_hw *hw = &pf->hw; 1191 u64 incval; 1192 1193 incval = ice_get_base_incval(hw); 1194 1195 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1196 incval); 1197 1198 return incval; 1199 } 1200 1201 /** 1202 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1203 * @port: PTP port for which Tx FIFO is checked 1204 */ 1205 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1206 { 1207 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1208 int quad = ICE_GET_QUAD_NUM(port->port_num); 1209 struct ice_pf *pf; 1210 struct ice_hw *hw; 1211 u32 val, phy_sts; 1212 int err; 1213 1214 pf = ptp_port_to_pf(port); 1215 hw = &pf->hw; 1216 1217 if (port->tx_fifo_busy_cnt == FIFO_OK) 1218 return 0; 1219 1220 /* need to read FIFO state */ 1221 if (offs == 0 || offs == 1) 1222 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, 1223 &val); 1224 else 1225 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, 1226 &val); 1227 1228 if (err) { 1229 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1230 port->port_num, err); 1231 return err; 1232 } 1233 1234 if (offs & 0x1) 1235 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val); 1236 else 1237 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val); 1238 1239 if (phy_sts & FIFO_EMPTY) { 1240 port->tx_fifo_busy_cnt = FIFO_OK; 1241 return 0; 1242 } 1243 1244 port->tx_fifo_busy_cnt++; 1245 1246 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1247 port->tx_fifo_busy_cnt, port->port_num); 1248 1249 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1250 dev_dbg(ice_pf_to_dev(pf), 1251 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1252 port->port_num, quad); 1253 ice_ptp_reset_ts_memory_quad_e82x(hw, quad); 1254 port->tx_fifo_busy_cnt = FIFO_OK; 1255 return 0; 1256 } 1257 1258 return -EAGAIN; 1259 } 1260 1261 /** 1262 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1263 * @work: Pointer to the kthread_work structure for this task 1264 * 1265 * Check whether hardware has completed measuring the Tx and Rx offset values 1266 * used to configure and enable vernier timestamp calibration. 1267 * 1268 * Once the offset in either direction is measured, configure the associated 1269 * registers with the calibrated offset values and enable timestamping. The Tx 1270 * and Rx directions are configured independently as soon as their associated 1271 * offsets are known. 1272 * 1273 * This function reschedules itself until both Tx and Rx calibration have 1274 * completed. 1275 */ 1276 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1277 { 1278 struct ice_ptp_port *port; 1279 struct ice_pf *pf; 1280 struct ice_hw *hw; 1281 int tx_err; 1282 int rx_err; 1283 1284 port = container_of(work, struct ice_ptp_port, ov_work.work); 1285 pf = ptp_port_to_pf(port); 1286 hw = &pf->hw; 1287 1288 if (ice_is_reset_in_progress(pf->state)) { 1289 /* wait for device driver to complete reset */ 1290 kthread_queue_delayed_work(pf->ptp.kworker, 1291 &port->ov_work, 1292 msecs_to_jiffies(100)); 1293 return; 1294 } 1295 1296 tx_err = ice_ptp_check_tx_fifo(port); 1297 if (!tx_err) 1298 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); 1299 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); 1300 if (tx_err || rx_err) { 1301 /* Tx and/or Rx offset not yet configured, try again later */ 1302 kthread_queue_delayed_work(pf->ptp.kworker, 1303 &port->ov_work, 1304 msecs_to_jiffies(100)); 1305 return; 1306 } 1307 } 1308 1309 /** 1310 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1311 * @ptp_port: PTP port to stop 1312 */ 1313 static int 1314 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1315 { 1316 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1317 u8 port = ptp_port->port_num; 1318 struct ice_hw *hw = &pf->hw; 1319 int err; 1320 1321 if (ice_is_e810(hw)) 1322 return 0; 1323 1324 mutex_lock(&ptp_port->ps_lock); 1325 1326 switch (ice_get_phy_model(hw)) { 1327 case ICE_PHY_ETH56G: 1328 err = ice_stop_phy_timer_eth56g(hw, port, true); 1329 break; 1330 case ICE_PHY_E82X: 1331 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1332 1333 err = ice_stop_phy_timer_e82x(hw, port, true); 1334 break; 1335 default: 1336 err = -ENODEV; 1337 } 1338 if (err && err != -EBUSY) 1339 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1340 port, err); 1341 1342 mutex_unlock(&ptp_port->ps_lock); 1343 1344 return err; 1345 } 1346 1347 /** 1348 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1349 * @ptp_port: PTP port for which the PHY start is set 1350 * 1351 * Start the PHY timestamping block, and initiate Vernier timestamping 1352 * calibration. If timestamping cannot be calibrated (such as if link is down) 1353 * then disable the timestamping block instead. 1354 */ 1355 static int 1356 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1357 { 1358 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1359 u8 port = ptp_port->port_num; 1360 struct ice_hw *hw = &pf->hw; 1361 unsigned long flags; 1362 int err; 1363 1364 if (ice_is_e810(hw)) 1365 return 0; 1366 1367 if (!ptp_port->link_up) 1368 return ice_ptp_port_phy_stop(ptp_port); 1369 1370 mutex_lock(&ptp_port->ps_lock); 1371 1372 switch (ice_get_phy_model(hw)) { 1373 case ICE_PHY_ETH56G: 1374 err = ice_start_phy_timer_eth56g(hw, port); 1375 break; 1376 case ICE_PHY_E82X: 1377 /* Start the PHY timer in Vernier mode */ 1378 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1379 1380 /* temporarily disable Tx timestamps while calibrating 1381 * PHY offset 1382 */ 1383 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1384 ptp_port->tx.calibrating = true; 1385 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1386 ptp_port->tx_fifo_busy_cnt = 0; 1387 1388 /* Start the PHY timer in Vernier mode */ 1389 err = ice_start_phy_timer_e82x(hw, port); 1390 if (err) 1391 break; 1392 1393 /* Enable Tx timestamps right away */ 1394 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1395 ptp_port->tx.calibrating = false; 1396 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1397 1398 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 1399 0); 1400 break; 1401 default: 1402 err = -ENODEV; 1403 } 1404 1405 if (err) 1406 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1407 port, err); 1408 1409 mutex_unlock(&ptp_port->ps_lock); 1410 1411 return err; 1412 } 1413 1414 /** 1415 * ice_ptp_link_change - Reconfigure PTP after link status change 1416 * @pf: Board private structure 1417 * @linkup: Link is up or down 1418 */ 1419 void ice_ptp_link_change(struct ice_pf *pf, bool linkup) 1420 { 1421 struct ice_ptp_port *ptp_port; 1422 struct ice_hw *hw = &pf->hw; 1423 1424 if (pf->ptp.state != ICE_PTP_READY) 1425 return; 1426 1427 ptp_port = &pf->ptp.port; 1428 1429 /* Update cached link status for this port immediately */ 1430 ptp_port->link_up = linkup; 1431 1432 /* Skip HW writes if reset is in progress */ 1433 if (pf->hw.reset_ongoing) 1434 return; 1435 switch (ice_get_phy_model(hw)) { 1436 case ICE_PHY_E810: 1437 /* Do not reconfigure E810 PHY */ 1438 return; 1439 case ICE_PHY_ETH56G: 1440 case ICE_PHY_E82X: 1441 ice_ptp_port_phy_restart(ptp_port); 1442 return; 1443 default: 1444 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); 1445 } 1446 } 1447 1448 /** 1449 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings 1450 * @pf: PF private structure 1451 * @ena: bool value to enable or disable interrupt 1452 * @threshold: Minimum number of packets at which intr is triggered 1453 * 1454 * Utility function to configure all the PHY interrupt settings, including 1455 * whether the PHY interrupt is enabled, and what threshold to use. Also 1456 * configures The E82X timestamp owner to react to interrupts from all PHYs. 1457 * 1458 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes 1459 * when failed to configure PHY interrupt for E82X 1460 */ 1461 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) 1462 { 1463 struct device *dev = ice_pf_to_dev(pf); 1464 struct ice_hw *hw = &pf->hw; 1465 1466 ice_ptp_reset_ts_memory(hw); 1467 1468 switch (ice_get_phy_model(hw)) { 1469 case ICE_PHY_ETH56G: { 1470 int port; 1471 1472 for (port = 0; port < hw->ptp.num_lports; port++) { 1473 int err; 1474 1475 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); 1476 if (err) { 1477 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", 1478 port, err); 1479 return err; 1480 } 1481 } 1482 1483 return 0; 1484 } 1485 case ICE_PHY_E82X: { 1486 int quad; 1487 1488 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); 1489 quad++) { 1490 int err; 1491 1492 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); 1493 if (err) { 1494 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", 1495 quad, err); 1496 return err; 1497 } 1498 } 1499 1500 return 0; 1501 } 1502 case ICE_PHY_E810: 1503 return 0; 1504 case ICE_PHY_UNSUP: 1505 default: 1506 dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__, 1507 ice_get_phy_model(hw)); 1508 return -EOPNOTSUPP; 1509 } 1510 } 1511 1512 /** 1513 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1514 * @pf: Board private structure 1515 */ 1516 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1517 { 1518 ice_ptp_port_phy_restart(&pf->ptp.port); 1519 } 1520 1521 /** 1522 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping 1523 * @pf: Board private structure 1524 */ 1525 static void ice_ptp_restart_all_phy(struct ice_pf *pf) 1526 { 1527 struct list_head *entry; 1528 1529 list_for_each(entry, &pf->adapter->ports.ports) { 1530 struct ice_ptp_port *port = list_entry(entry, 1531 struct ice_ptp_port, 1532 list_node); 1533 1534 if (port->link_up) 1535 ice_ptp_port_phy_restart(port); 1536 } 1537 } 1538 1539 /** 1540 * ice_ptp_adjfine - Adjust clock increment rate 1541 * @info: the driver's PTP info structure 1542 * @scaled_ppm: Parts per million with 16-bit fractional field 1543 * 1544 * Adjust the frequency of the clock by the indicated scaled ppm from the 1545 * base frequency. 1546 */ 1547 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1548 { 1549 struct ice_pf *pf = ptp_info_to_pf(info); 1550 struct ice_hw *hw = &pf->hw; 1551 u64 incval; 1552 int err; 1553 1554 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1555 err = ice_ptp_write_incval_locked(hw, incval); 1556 if (err) { 1557 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1558 err); 1559 return -EIO; 1560 } 1561 1562 return 0; 1563 } 1564 1565 /** 1566 * ice_ptp_extts_event - Process PTP external clock event 1567 * @pf: Board private structure 1568 */ 1569 void ice_ptp_extts_event(struct ice_pf *pf) 1570 { 1571 struct ptp_clock_event event; 1572 struct ice_hw *hw = &pf->hw; 1573 u8 chan, tmr_idx; 1574 u32 hi, lo; 1575 1576 /* Don't process timestamp events if PTP is not ready */ 1577 if (pf->ptp.state != ICE_PTP_READY) 1578 return; 1579 1580 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1581 /* Event time is captured by one of the two matched registers 1582 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1583 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1584 * Event is defined in GLTSYN_EVNT_0 register 1585 */ 1586 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1587 int pin_desc_idx; 1588 1589 /* Check if channel is enabled */ 1590 if (!(pf->ptp.ext_ts_irq & (1 << chan))) 1591 continue; 1592 1593 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1594 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1595 event.timestamp = (u64)hi << 32 | lo; 1596 1597 /* Add delay compensation */ 1598 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); 1599 if (pin_desc_idx >= 0) { 1600 const struct ice_ptp_pin_desc *desc; 1601 1602 desc = &pf->ptp.ice_pin_desc[pin_desc_idx]; 1603 event.timestamp -= desc->delay[0]; 1604 } 1605 1606 event.type = PTP_CLOCK_EXTTS; 1607 event.index = chan; 1608 pf->ptp.ext_ts_irq &= ~(1 << chan); 1609 ptp_clock_event(pf->ptp.clock, &event); 1610 } 1611 } 1612 1613 /** 1614 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1615 * @pf: Board private structure 1616 * @rq: External timestamp request 1617 * @on: Enable/disable flag 1618 * 1619 * Configure an external timestamp event on the requested channel. 1620 * 1621 * Return: 0 on success, negative error code otherwise 1622 */ 1623 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq, 1624 int on) 1625 { 1626 u32 aux_reg, gpio_reg, irq_reg; 1627 struct ice_hw *hw = &pf->hw; 1628 unsigned int chan, gpio_pin; 1629 int pin_desc_idx; 1630 u8 tmr_idx; 1631 1632 /* Reject requests with unsupported flags */ 1633 1634 if (rq->flags & ~(PTP_ENABLE_FEATURE | 1635 PTP_RISING_EDGE | 1636 PTP_FALLING_EDGE | 1637 PTP_STRICT_FLAGS)) 1638 return -EOPNOTSUPP; 1639 1640 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1641 chan = rq->index; 1642 1643 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); 1644 if (pin_desc_idx < 0) 1645 return -EIO; 1646 1647 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0]; 1648 irq_reg = rd32(hw, PFINT_OICR_ENA); 1649 1650 if (on) { 1651 /* Enable the interrupt */ 1652 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1653 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1654 1655 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1656 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1657 1658 /* set event level to requested edge */ 1659 if (rq->flags & PTP_FALLING_EDGE) 1660 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1661 if (rq->flags & PTP_RISING_EDGE) 1662 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1663 1664 /* Write GPIO CTL reg. 1665 * 0x1 is input sampled by EVENT register(channel) 1666 * + num_in_channels * tmr_idx 1667 */ 1668 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1669 1 + chan + (tmr_idx * 3)); 1670 } else { 1671 bool last_enabled = true; 1672 1673 /* clear the values we set to reset defaults */ 1674 aux_reg = 0; 1675 gpio_reg = 0; 1676 1677 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++) 1678 if ((pf->ptp.extts_rqs[i].flags & 1679 PTP_ENABLE_FEATURE) && 1680 i != chan) { 1681 last_enabled = false; 1682 } 1683 1684 if (last_enabled) 1685 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1686 } 1687 1688 wr32(hw, PFINT_OICR_ENA, irq_reg); 1689 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1690 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1691 1692 return 0; 1693 } 1694 1695 /** 1696 * ice_ptp_disable_all_extts - Disable all EXTTS channels 1697 * @pf: Board private structure 1698 */ 1699 static void ice_ptp_disable_all_extts(struct ice_pf *pf) 1700 { 1701 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1702 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1703 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1704 false); 1705 1706 synchronize_irq(pf->oicr_irq.virq); 1707 } 1708 1709 /** 1710 * ice_ptp_enable_all_extts - Enable all EXTTS channels 1711 * @pf: Board private structure 1712 * 1713 * Called during reset to restore user configuration. 1714 */ 1715 static void ice_ptp_enable_all_extts(struct ice_pf *pf) 1716 { 1717 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1718 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1719 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1720 true); 1721 } 1722 1723 /** 1724 * ice_ptp_write_perout - Write periodic wave parameters to HW 1725 * @hw: pointer to the HW struct 1726 * @chan: target channel 1727 * @gpio_pin: target GPIO pin 1728 * @start: target time to start periodic output 1729 * @period: target period 1730 * 1731 * Return: 0 on success, negative error code otherwise 1732 */ 1733 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, 1734 unsigned int gpio_pin, u64 start, u64 period) 1735 { 1736 1737 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1738 u32 val = 0; 1739 1740 /* 0. Reset mode & out_en in AUX_OUT */ 1741 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1742 1743 if (ice_is_e825c(hw)) { 1744 int err; 1745 1746 /* Enable/disable CGU 1PPS output for E825C */ 1747 err = ice_cgu_cfg_pps_out(hw, !!period); 1748 if (err) 1749 return err; 1750 } 1751 1752 /* 1. Write perout with half of required period value. 1753 * HW toggles output when source clock hits the TGT and then adds 1754 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle. 1755 */ 1756 period >>= 1; 1757 1758 /* For proper operation, GLTSYN_CLKO must be larger than clock tick and 1759 * period has to fit in 32 bit register. 1760 */ 1761 #define MIN_PULSE 3 1762 if (!!period && (period <= MIN_PULSE || period > U32_MAX)) { 1763 dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32", 1764 MIN_PULSE); 1765 return -EIO; 1766 } 1767 1768 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1769 1770 /* 2. Write TARGET time */ 1771 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start)); 1772 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start)); 1773 1774 /* 3. Write AUX_OUT register */ 1775 if (!!period) 1776 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1777 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1778 1779 /* 4. write GPIO CTL reg */ 1780 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1781 if (!!period) 1782 val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1783 8 + chan + (tmr_idx * 4)); 1784 1785 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1786 1787 return 0; 1788 } 1789 1790 /** 1791 * ice_ptp_cfg_perout - Configure clock to generate periodic wave 1792 * @pf: Board private structure 1793 * @rq: Periodic output request 1794 * @on: Enable/disable flag 1795 * 1796 * Configure the internal clock generator modules to generate the clock wave of 1797 * specified period. 1798 * 1799 * Return: 0 on success, negative error code otherwise 1800 */ 1801 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, 1802 int on) 1803 { 1804 unsigned int gpio_pin, prop_delay_ns; 1805 u64 clk, period, start, phase; 1806 struct ice_hw *hw = &pf->hw; 1807 int pin_desc_idx; 1808 1809 if (rq->flags & ~PTP_PEROUT_PHASE) 1810 return -EOPNOTSUPP; 1811 1812 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index); 1813 if (pin_desc_idx < 0) 1814 return -EIO; 1815 1816 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; 1817 prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1]; 1818 period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; 1819 1820 /* If we're disabling the output or period is 0, clear out CLKO and TGT 1821 * and keep output level low. 1822 */ 1823 if (!on || !period) 1824 return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0); 1825 1826 if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 && 1827 period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) { 1828 dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n"); 1829 return -EOPNOTSUPP; 1830 } 1831 1832 if (period & 0x1) { 1833 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1834 return -EIO; 1835 } 1836 1837 start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec; 1838 1839 /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */ 1840 if (rq->flags & PTP_PEROUT_PHASE) 1841 phase = start; 1842 else 1843 div64_u64_rem(start, period, &phase); 1844 1845 /* If we have only phase or start time is in the past, start the timer 1846 * at the next multiple of period, maintaining phase. 1847 */ 1848 clk = ice_ptp_read_src_clk_reg(pf, NULL); 1849 if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) 1850 start = div64_u64(clk + period - 1, period) * period + phase; 1851 1852 /* Compensate for propagation delay from the generator to the pin. */ 1853 start -= prop_delay_ns; 1854 1855 return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); 1856 } 1857 1858 /** 1859 * ice_ptp_disable_all_perout - Disable all currently configured outputs 1860 * @pf: Board private structure 1861 * 1862 * Disable all currently configured clock outputs. This is necessary before 1863 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to 1864 * re-enable the clocks again. 1865 */ 1866 static void ice_ptp_disable_all_perout(struct ice_pf *pf) 1867 { 1868 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1869 if (pf->ptp.perout_rqs[i].period.sec || 1870 pf->ptp.perout_rqs[i].period.nsec) 1871 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1872 false); 1873 } 1874 1875 /** 1876 * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs 1877 * @pf: Board private structure 1878 * 1879 * Enable all currently configured clock outputs. Use this after 1880 * ice_ptp_disable_all_perout to reconfigure the output signals according to 1881 * their configuration. 1882 */ 1883 static void ice_ptp_enable_all_perout(struct ice_pf *pf) 1884 { 1885 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1886 if (pf->ptp.perout_rqs[i].period.sec || 1887 pf->ptp.perout_rqs[i].period.nsec) 1888 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1889 true); 1890 } 1891 1892 /** 1893 * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO 1894 * @pf: Board private structure 1895 * @pin: Pin index 1896 * @func: Assigned function 1897 * 1898 * Return: 0 on success, negative error code otherwise 1899 */ 1900 static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin, 1901 enum ptp_pin_function func) 1902 { 1903 unsigned int gpio_pin; 1904 1905 switch (func) { 1906 case PTP_PF_PEROUT: 1907 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1]; 1908 break; 1909 case PTP_PF_EXTTS: 1910 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0]; 1911 break; 1912 default: 1913 return -EOPNOTSUPP; 1914 } 1915 1916 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 1917 struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i]; 1918 unsigned int chan = pin_desc->chan; 1919 1920 /* Skip pin idx from the request */ 1921 if (i == pin) 1922 continue; 1923 1924 if (pin_desc->func == PTP_PF_PEROUT && 1925 pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) { 1926 pf->ptp.perout_rqs[chan].period.sec = 0; 1927 pf->ptp.perout_rqs[chan].period.nsec = 0; 1928 pin_desc->func = PTP_PF_NONE; 1929 pin_desc->chan = 0; 1930 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n", 1931 i, gpio_pin); 1932 return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan], 1933 false); 1934 } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS && 1935 pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) { 1936 pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE; 1937 pin_desc->func = PTP_PF_NONE; 1938 pin_desc->chan = 0; 1939 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n", 1940 i, gpio_pin); 1941 return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan], 1942 false); 1943 } 1944 } 1945 1946 return 0; 1947 } 1948 1949 /** 1950 * ice_verify_pin - verify if pin supports requested pin function 1951 * @info: the driver's PTP info structure 1952 * @pin: Pin index 1953 * @func: Assigned function 1954 * @chan: Assigned channel 1955 * 1956 * Return: 0 on success, -EOPNOTSUPP when function is not supported. 1957 */ 1958 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin, 1959 enum ptp_pin_function func, unsigned int chan) 1960 { 1961 struct ice_pf *pf = ptp_info_to_pf(info); 1962 const struct ice_ptp_pin_desc *pin_desc; 1963 1964 pin_desc = &pf->ptp.ice_pin_desc[pin]; 1965 1966 /* Is assigned function allowed? */ 1967 switch (func) { 1968 case PTP_PF_EXTTS: 1969 if (pin_desc->gpio[0] < 0) 1970 return -EOPNOTSUPP; 1971 break; 1972 case PTP_PF_PEROUT: 1973 if (pin_desc->gpio[1] < 0) 1974 return -EOPNOTSUPP; 1975 break; 1976 case PTP_PF_NONE: 1977 break; 1978 case PTP_PF_PHYSYNC: 1979 default: 1980 return -EOPNOTSUPP; 1981 } 1982 1983 /* On adapters with SMA_CTRL disable other pins that share same GPIO */ 1984 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 1985 ice_ptp_disable_shared_pin(pf, pin, func); 1986 pf->ptp.pin_desc[pin].func = func; 1987 pf->ptp.pin_desc[pin].chan = chan; 1988 return ice_ptp_set_sma_cfg(pf); 1989 } 1990 1991 return 0; 1992 } 1993 1994 /** 1995 * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC 1996 * @info: The driver's PTP info structure 1997 * @rq: The requested feature to change 1998 * @on: Enable/disable flag 1999 * 2000 * Return: 0 on success, negative error code otherwise 2001 */ 2002 static int ice_ptp_gpio_enable(struct ptp_clock_info *info, 2003 struct ptp_clock_request *rq, int on) 2004 { 2005 struct ice_pf *pf = ptp_info_to_pf(info); 2006 int err; 2007 2008 switch (rq->type) { 2009 case PTP_CLK_REQ_PEROUT: 2010 { 2011 struct ptp_perout_request *cached = 2012 &pf->ptp.perout_rqs[rq->perout.index]; 2013 2014 err = ice_ptp_cfg_perout(pf, &rq->perout, on); 2015 if (!err) { 2016 *cached = rq->perout; 2017 } else { 2018 cached->period.sec = 0; 2019 cached->period.nsec = 0; 2020 } 2021 return err; 2022 } 2023 case PTP_CLK_REQ_EXTTS: 2024 { 2025 struct ptp_extts_request *cached = 2026 &pf->ptp.extts_rqs[rq->extts.index]; 2027 2028 err = ice_ptp_cfg_extts(pf, &rq->extts, on); 2029 if (!err) 2030 *cached = rq->extts; 2031 else 2032 cached->flags &= ~PTP_ENABLE_FEATURE; 2033 return err; 2034 } 2035 default: 2036 return -EOPNOTSUPP; 2037 } 2038 } 2039 2040 /** 2041 * ice_ptp_gettimex64 - Get the time of the clock 2042 * @info: the driver's PTP info structure 2043 * @ts: timespec64 structure to hold the current time value 2044 * @sts: Optional parameter for holding a pair of system timestamps from 2045 * the system clock. Will be ignored if NULL is given. 2046 * 2047 * Read the device clock and return the correct value on ns, after converting it 2048 * into a timespec struct. 2049 */ 2050 static int 2051 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 2052 struct ptp_system_timestamp *sts) 2053 { 2054 struct ice_pf *pf = ptp_info_to_pf(info); 2055 u64 time_ns; 2056 2057 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 2058 *ts = ns_to_timespec64(time_ns); 2059 return 0; 2060 } 2061 2062 /** 2063 * ice_ptp_settime64 - Set the time of the clock 2064 * @info: the driver's PTP info structure 2065 * @ts: timespec64 structure that holds the new time value 2066 * 2067 * Set the device clock to the user input value. The conversion from timespec 2068 * to ns happens in the write function. 2069 */ 2070 static int 2071 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 2072 { 2073 struct ice_pf *pf = ptp_info_to_pf(info); 2074 struct timespec64 ts64 = *ts; 2075 struct ice_hw *hw = &pf->hw; 2076 int err; 2077 2078 /* For Vernier mode on E82X, we need to recalibrate after new settime. 2079 * Start with marking timestamps as invalid. 2080 */ 2081 if (ice_get_phy_model(hw) == ICE_PHY_E82X) { 2082 err = ice_ptp_clear_phy_offset_ready_e82x(hw); 2083 if (err) 2084 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); 2085 } 2086 2087 if (!ice_ptp_lock(hw)) { 2088 err = -EBUSY; 2089 goto exit; 2090 } 2091 2092 /* Disable periodic outputs */ 2093 ice_ptp_disable_all_perout(pf); 2094 2095 err = ice_ptp_write_init(pf, &ts64); 2096 ice_ptp_unlock(hw); 2097 2098 if (!err) 2099 ice_ptp_reset_cached_phctime(pf); 2100 2101 /* Reenable periodic outputs */ 2102 ice_ptp_enable_all_perout(pf); 2103 2104 /* Recalibrate and re-enable timestamp blocks for E822/E823 */ 2105 if (ice_get_phy_model(hw) == ICE_PHY_E82X) 2106 ice_ptp_restart_all_phy(pf); 2107 exit: 2108 if (err) { 2109 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 2110 return err; 2111 } 2112 2113 return 0; 2114 } 2115 2116 /** 2117 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 2118 * @info: the driver's PTP info structure 2119 * @delta: Offset in nanoseconds to adjust the time by 2120 */ 2121 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 2122 { 2123 struct timespec64 now, then; 2124 int ret; 2125 2126 then = ns_to_timespec64(delta); 2127 ret = ice_ptp_gettimex64(info, &now, NULL); 2128 if (ret) 2129 return ret; 2130 now = timespec64_add(now, then); 2131 2132 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 2133 } 2134 2135 /** 2136 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 2137 * @info: the driver's PTP info structure 2138 * @delta: Offset in nanoseconds to adjust the time by 2139 */ 2140 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 2141 { 2142 struct ice_pf *pf = ptp_info_to_pf(info); 2143 struct ice_hw *hw = &pf->hw; 2144 struct device *dev; 2145 int err; 2146 2147 dev = ice_pf_to_dev(pf); 2148 2149 /* Hardware only supports atomic adjustments using signed 32-bit 2150 * integers. For any adjustment outside this range, perform 2151 * a non-atomic get->adjust->set flow. 2152 */ 2153 if (delta > S32_MAX || delta < S32_MIN) { 2154 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 2155 return ice_ptp_adjtime_nonatomic(info, delta); 2156 } 2157 2158 if (!ice_ptp_lock(hw)) { 2159 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 2160 return -EBUSY; 2161 } 2162 2163 /* Disable periodic outputs */ 2164 ice_ptp_disable_all_perout(pf); 2165 2166 err = ice_ptp_write_adj(pf, delta); 2167 2168 /* Reenable periodic outputs */ 2169 ice_ptp_enable_all_perout(pf); 2170 2171 ice_ptp_unlock(hw); 2172 2173 if (err) { 2174 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 2175 return err; 2176 } 2177 2178 ice_ptp_reset_cached_phctime(pf); 2179 2180 return 0; 2181 } 2182 2183 #ifdef CONFIG_ICE_HWTS 2184 /** 2185 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 2186 * @device: Current device time 2187 * @system: System counter value read synchronously with device time 2188 * @ctx: Context provided by timekeeping code 2189 * 2190 * Read device and system (ART) clock simultaneously and return the corrected 2191 * clock values in ns. 2192 */ 2193 static int 2194 ice_ptp_get_syncdevicetime(ktime_t *device, 2195 struct system_counterval_t *system, 2196 void *ctx) 2197 { 2198 struct ice_pf *pf = (struct ice_pf *)ctx; 2199 struct ice_hw *hw = &pf->hw; 2200 u32 hh_lock, hh_art_ctl; 2201 int i; 2202 2203 #define MAX_HH_HW_LOCK_TRIES 5 2204 #define MAX_HH_CTL_LOCK_TRIES 100 2205 2206 for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { 2207 /* Get the HW lock */ 2208 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2209 if (hh_lock & PFHH_SEM_BUSY_M) { 2210 usleep_range(10000, 15000); 2211 continue; 2212 } 2213 break; 2214 } 2215 if (hh_lock & PFHH_SEM_BUSY_M) { 2216 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 2217 return -EBUSY; 2218 } 2219 2220 /* Program cmd to master timer */ 2221 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 2222 2223 /* Start the ART and device clock sync sequence */ 2224 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 2225 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 2226 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 2227 2228 for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { 2229 /* Wait for sync to complete */ 2230 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 2231 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 2232 udelay(1); 2233 continue; 2234 } else { 2235 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 2236 u64 hh_ts; 2237 2238 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 2239 /* Read ART time */ 2240 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 2241 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 2242 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2243 system->cycles = hh_ts; 2244 system->cs_id = CSID_X86_ART; 2245 /* Read Device source clock time */ 2246 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 2247 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 2248 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2249 *device = ns_to_ktime(hh_ts); 2250 break; 2251 } 2252 } 2253 2254 /* Clear the master timer */ 2255 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 2256 2257 /* Release HW lock */ 2258 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2259 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 2260 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 2261 2262 if (i == MAX_HH_CTL_LOCK_TRIES) 2263 return -ETIMEDOUT; 2264 2265 return 0; 2266 } 2267 2268 /** 2269 * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp 2270 * @info: the driver's PTP info structure 2271 * @cts: The memory to fill the cross timestamp info 2272 * 2273 * Capture a cross timestamp between the ART and the device PTP hardware 2274 * clock. Fill the cross timestamp information and report it back to the 2275 * caller. 2276 * 2277 * This is only valid for E822 and E823 devices which have support for 2278 * generating the cross timestamp via PCIe PTM. 2279 * 2280 * In order to correctly correlate the ART timestamp back to the TSC time, the 2281 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2282 */ 2283 static int 2284 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, 2285 struct system_device_crosststamp *cts) 2286 { 2287 struct ice_pf *pf = ptp_info_to_pf(info); 2288 2289 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2290 pf, NULL, cts); 2291 } 2292 #endif /* CONFIG_ICE_HWTS */ 2293 2294 /** 2295 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2296 * @pf: Board private structure 2297 * @ifr: ioctl data 2298 * 2299 * Copy the timestamping config to user buffer 2300 */ 2301 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2302 { 2303 struct hwtstamp_config *config; 2304 2305 if (pf->ptp.state != ICE_PTP_READY) 2306 return -EIO; 2307 2308 config = &pf->ptp.tstamp_config; 2309 2310 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2311 -EFAULT : 0; 2312 } 2313 2314 /** 2315 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2316 * @pf: Board private structure 2317 * @config: hwtstamp settings requested or saved 2318 */ 2319 static int 2320 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2321 { 2322 switch (config->tx_type) { 2323 case HWTSTAMP_TX_OFF: 2324 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 2325 break; 2326 case HWTSTAMP_TX_ON: 2327 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 2328 break; 2329 default: 2330 return -ERANGE; 2331 } 2332 2333 switch (config->rx_filter) { 2334 case HWTSTAMP_FILTER_NONE: 2335 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2336 break; 2337 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2338 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2339 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2340 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2341 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2342 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2343 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2344 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2345 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2346 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2347 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2348 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2349 case HWTSTAMP_FILTER_NTP_ALL: 2350 case HWTSTAMP_FILTER_ALL: 2351 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 2352 break; 2353 default: 2354 return -ERANGE; 2355 } 2356 2357 /* Immediately update the device timestamping mode */ 2358 ice_ptp_restore_timestamp_mode(pf); 2359 2360 return 0; 2361 } 2362 2363 /** 2364 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2365 * @pf: Board private structure 2366 * @ifr: ioctl data 2367 * 2368 * Get the user config and store it 2369 */ 2370 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2371 { 2372 struct hwtstamp_config config; 2373 int err; 2374 2375 if (pf->ptp.state != ICE_PTP_READY) 2376 return -EAGAIN; 2377 2378 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2379 return -EFAULT; 2380 2381 err = ice_ptp_set_timestamp_mode(pf, &config); 2382 if (err) 2383 return err; 2384 2385 /* Return the actual configuration set */ 2386 config = pf->ptp.tstamp_config; 2387 2388 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2389 -EFAULT : 0; 2390 } 2391 2392 /** 2393 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns 2394 * @rx_desc: Receive descriptor 2395 * @pkt_ctx: Packet context to get the cached time 2396 * 2397 * The driver receives a notification in the receive descriptor with timestamp. 2398 */ 2399 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, 2400 const struct ice_pkt_ctx *pkt_ctx) 2401 { 2402 u64 ts_ns, cached_time; 2403 u32 ts_high; 2404 2405 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2406 return 0; 2407 2408 cached_time = READ_ONCE(pkt_ctx->cached_phctime); 2409 2410 /* Do not report a timestamp if we don't have a cached PHC time */ 2411 if (!cached_time) 2412 return 0; 2413 2414 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2415 * PHC value, rather than accessing the PF. This also allows us to 2416 * simply pass the upper 32bits of nanoseconds directly. Calling 2417 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2418 * bits itself. 2419 */ 2420 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2421 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2422 2423 return ts_ns; 2424 } 2425 2426 /** 2427 * ice_ptp_setup_pin_cfg - setup PTP pin_config structure 2428 * @pf: Board private structure 2429 */ 2430 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf) 2431 { 2432 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 2433 const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i]; 2434 struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i]; 2435 const char *name = NULL; 2436 2437 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2438 name = ice_pin_names[desc->name_idx]; 2439 else if (desc->name_idx != GPIO_NA) 2440 name = ice_pin_names_nvm[desc->name_idx]; 2441 if (name) 2442 strscpy(pin->name, name, sizeof(pin->name)); 2443 2444 pin->index = i; 2445 } 2446 2447 pf->ptp.info.pin_config = pf->ptp.pin_desc; 2448 } 2449 2450 /** 2451 * ice_ptp_disable_pins - Disable PTP pins 2452 * @pf: pointer to the PF structure 2453 * 2454 * Disable the OS access to the SMA pins. Called to clear out the OS 2455 * indications of pin support when we fail to setup the SMA control register. 2456 */ 2457 static void ice_ptp_disable_pins(struct ice_pf *pf) 2458 { 2459 struct ptp_clock_info *info = &pf->ptp.info; 2460 2461 dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n"); 2462 2463 info->enable = NULL; 2464 info->verify = NULL; 2465 info->n_pins = 0; 2466 info->n_ext_ts = 0; 2467 info->n_per_out = 0; 2468 } 2469 2470 /** 2471 * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM 2472 * @pf: pointer to the PF structure 2473 * @entries: SDP connection section from NVM 2474 * @num_entries: number of valid entries in sdp_entries 2475 * @pins: PTP pins array to update 2476 * 2477 * Return: 0 on success, negative error code otherwise. 2478 */ 2479 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, 2480 unsigned int num_entries, 2481 struct ice_ptp_pin_desc *pins) 2482 { 2483 unsigned int n_pins = 0; 2484 unsigned int i; 2485 2486 /* Setup ice_pin_desc array */ 2487 for (i = 0; i < ICE_N_PINS_MAX; i++) { 2488 pins[i].name_idx = -1; 2489 pins[i].gpio[0] = -1; 2490 pins[i].gpio[1] = -1; 2491 } 2492 2493 for (i = 0; i < num_entries; i++) { 2494 u16 entry = le16_to_cpu(entries[i]); 2495 DECLARE_BITMAP(bitmap, GPIO_NA); 2496 unsigned int bitmap_idx; 2497 bool dir; 2498 u16 gpio; 2499 2500 *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry); 2501 dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry); 2502 gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry); 2503 for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) { 2504 unsigned int idx; 2505 2506 /* Check if entry's pin bit is valid */ 2507 if (bitmap_idx >= NUM_PTP_PINS_NVM && 2508 bitmap_idx != GPIO_NA) 2509 continue; 2510 2511 /* Check if pin already exists */ 2512 for (idx = 0; idx < ICE_N_PINS_MAX; idx++) 2513 if (pins[idx].name_idx == bitmap_idx) 2514 break; 2515 2516 if (idx == ICE_N_PINS_MAX) { 2517 /* Pin not found, setup its entry and name */ 2518 idx = n_pins++; 2519 pins[idx].name_idx = bitmap_idx; 2520 if (bitmap_idx == GPIO_NA) 2521 strscpy(pf->ptp.pin_desc[idx].name, 2522 ice_pin_names[gpio], 2523 sizeof(pf->ptp.pin_desc[idx] 2524 .name)); 2525 } 2526 2527 /* Setup in/out GPIO number */ 2528 pins[idx].gpio[dir] = gpio; 2529 } 2530 } 2531 2532 for (i = 0; i < n_pins; i++) { 2533 dev_dbg(ice_pf_to_dev(pf), 2534 "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n", 2535 i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]); 2536 } 2537 2538 pf->ptp.info.n_pins = n_pins; 2539 return 0; 2540 } 2541 2542 /** 2543 * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support 2544 * @pf: Board private structure 2545 * 2546 * Assign functions to the PTP capabilities structure for E82X devices. 2547 * Functions which operate across all device families should be set directly 2548 * in ice_ptp_set_caps. Only add functions here which are distinct for E82X 2549 * devices. 2550 */ 2551 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) 2552 { 2553 #ifdef CONFIG_ICE_HWTS 2554 if (boot_cpu_has(X86_FEATURE_ART) && 2555 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2556 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x; 2557 2558 #endif /* CONFIG_ICE_HWTS */ 2559 if (ice_is_e825c(&pf->hw)) { 2560 pf->ptp.ice_pin_desc = ice_pin_desc_e825c; 2561 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); 2562 } else { 2563 pf->ptp.ice_pin_desc = ice_pin_desc_e82x; 2564 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x); 2565 } 2566 ice_ptp_setup_pin_cfg(pf); 2567 } 2568 2569 /** 2570 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2571 * @pf: Board private structure 2572 * 2573 * Assign functions to the PTP capabiltiies structure for E810 devices. 2574 * Functions which operate across all device families should be set directly 2575 * in ice_ptp_set_caps. Only add functions here which are distinct for E810 2576 * devices. 2577 */ 2578 static void ice_ptp_set_funcs_e810(struct ice_pf *pf) 2579 { 2580 __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE]; 2581 struct ice_ptp_pin_desc *desc = NULL; 2582 struct ice_ptp *ptp = &pf->ptp; 2583 unsigned int num_entries; 2584 int err; 2585 2586 err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries); 2587 if (err) { 2588 /* SDP section does not exist in NVM or is corrupted */ 2589 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2590 ptp->ice_pin_desc = ice_pin_desc_e810_sma; 2591 ptp->info.n_pins = 2592 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma); 2593 } else { 2594 pf->ptp.ice_pin_desc = ice_pin_desc_e810; 2595 pf->ptp.info.n_pins = 2596 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); 2597 err = 0; 2598 } 2599 } else { 2600 desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX, 2601 sizeof(struct ice_ptp_pin_desc), 2602 GFP_KERNEL); 2603 if (!desc) 2604 goto err; 2605 2606 err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc); 2607 if (err) 2608 goto err; 2609 2610 ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc; 2611 } 2612 2613 ptp->info.pin_config = ptp->pin_desc; 2614 ice_ptp_setup_pin_cfg(pf); 2615 2616 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2617 err = ice_ptp_set_sma_cfg(pf); 2618 err: 2619 if (err) { 2620 devm_kfree(ice_pf_to_dev(pf), desc); 2621 ice_ptp_disable_pins(pf); 2622 } 2623 } 2624 2625 /** 2626 * ice_ptp_set_caps - Set PTP capabilities 2627 * @pf: Board private structure 2628 */ 2629 static void ice_ptp_set_caps(struct ice_pf *pf) 2630 { 2631 struct ptp_clock_info *info = &pf->ptp.info; 2632 struct device *dev = ice_pf_to_dev(pf); 2633 2634 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2635 dev_driver_string(dev), dev_name(dev)); 2636 info->owner = THIS_MODULE; 2637 info->max_adj = 100000000; 2638 info->adjtime = ice_ptp_adjtime; 2639 info->adjfine = ice_ptp_adjfine; 2640 info->gettimex64 = ice_ptp_gettimex64; 2641 info->settime64 = ice_ptp_settime64; 2642 info->n_per_out = GLTSYN_TGT_H_IDX_MAX; 2643 info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX; 2644 info->enable = ice_ptp_gpio_enable; 2645 info->verify = ice_verify_pin; 2646 2647 if (ice_is_e810(&pf->hw)) 2648 ice_ptp_set_funcs_e810(pf); 2649 else 2650 ice_ptp_set_funcs_e82x(pf); 2651 } 2652 2653 /** 2654 * ice_ptp_create_clock - Create PTP clock device for userspace 2655 * @pf: Board private structure 2656 * 2657 * This function creates a new PTP clock device. It only creates one if we 2658 * don't already have one. Will return error if it can't create one, but success 2659 * if we already have a device. Should be used by ice_ptp_init to create clock 2660 * initially, and prevent global resets from creating new clock devices. 2661 */ 2662 static long ice_ptp_create_clock(struct ice_pf *pf) 2663 { 2664 struct ptp_clock_info *info; 2665 struct device *dev; 2666 2667 /* No need to create a clock device if we already have one */ 2668 if (pf->ptp.clock) 2669 return 0; 2670 2671 ice_ptp_set_caps(pf); 2672 2673 info = &pf->ptp.info; 2674 dev = ice_pf_to_dev(pf); 2675 2676 /* Attempt to register the clock before enabling the hardware. */ 2677 pf->ptp.clock = ptp_clock_register(info, dev); 2678 if (IS_ERR(pf->ptp.clock)) { 2679 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); 2680 return PTR_ERR(pf->ptp.clock); 2681 } 2682 2683 return 0; 2684 } 2685 2686 /** 2687 * ice_ptp_request_ts - Request an available Tx timestamp index 2688 * @tx: the PTP Tx timestamp tracker to request from 2689 * @skb: the SKB to associate with this timestamp request 2690 */ 2691 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2692 { 2693 unsigned long flags; 2694 u8 idx; 2695 2696 spin_lock_irqsave(&tx->lock, flags); 2697 2698 /* Check that this tracker is accepting new timestamp requests */ 2699 if (!ice_ptp_is_tx_tracker_up(tx)) { 2700 spin_unlock_irqrestore(&tx->lock, flags); 2701 return -1; 2702 } 2703 2704 /* Find and set the first available index */ 2705 idx = find_next_zero_bit(tx->in_use, tx->len, 2706 tx->last_ll_ts_idx_read + 1); 2707 if (idx == tx->len) 2708 idx = find_first_zero_bit(tx->in_use, tx->len); 2709 2710 if (idx < tx->len) { 2711 /* We got a valid index that no other thread could have set. Store 2712 * a reference to the skb and the start time to allow discarding old 2713 * requests. 2714 */ 2715 set_bit(idx, tx->in_use); 2716 clear_bit(idx, tx->stale); 2717 tx->tstamps[idx].start = jiffies; 2718 tx->tstamps[idx].skb = skb_get(skb); 2719 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2720 ice_trace(tx_tstamp_request, skb, idx); 2721 } 2722 2723 spin_unlock_irqrestore(&tx->lock, flags); 2724 2725 /* return the appropriate PHY timestamp register index, -1 if no 2726 * indexes were available. 2727 */ 2728 if (idx >= tx->len) 2729 return -1; 2730 else 2731 return idx + tx->offset; 2732 } 2733 2734 /** 2735 * ice_ptp_process_ts - Process the PTP Tx timestamps 2736 * @pf: Board private structure 2737 * 2738 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2739 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2740 */ 2741 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2742 { 2743 switch (pf->ptp.tx_interrupt_mode) { 2744 case ICE_PTP_TX_INTERRUPT_NONE: 2745 /* This device has the clock owner handle timestamps for it */ 2746 return ICE_TX_TSTAMP_WORK_DONE; 2747 case ICE_PTP_TX_INTERRUPT_SELF: 2748 /* This device handles its own timestamps */ 2749 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2750 case ICE_PTP_TX_INTERRUPT_ALL: 2751 /* This device handles timestamps for all ports */ 2752 return ice_ptp_tx_tstamp_owner(pf); 2753 default: 2754 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2755 pf->ptp.tx_interrupt_mode); 2756 return ICE_TX_TSTAMP_WORK_DONE; 2757 } 2758 } 2759 2760 /** 2761 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt 2762 * @pf: Board private structure 2763 * 2764 * The device PHY issues Tx timestamp interrupts to the driver for processing 2765 * timestamp data from the PHY. It will not interrupt again until all 2766 * current timestamp data is read. In rare circumstances, it is possible that 2767 * the driver fails to read all outstanding data. 2768 * 2769 * To avoid getting permanently stuck, periodically check if the PHY has 2770 * outstanding timestamp data. If so, trigger an interrupt from software to 2771 * process this data. 2772 */ 2773 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) 2774 { 2775 struct device *dev = ice_pf_to_dev(pf); 2776 struct ice_hw *hw = &pf->hw; 2777 bool trigger_oicr = false; 2778 unsigned int i; 2779 2780 if (ice_is_e810(hw)) 2781 return; 2782 2783 if (!ice_pf_src_tmr_owned(pf)) 2784 return; 2785 2786 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { 2787 u64 tstamp_ready; 2788 int err; 2789 2790 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 2791 if (!err && tstamp_ready) { 2792 trigger_oicr = true; 2793 break; 2794 } 2795 } 2796 2797 if (trigger_oicr) { 2798 /* Trigger a software interrupt, to ensure this data 2799 * gets processed. 2800 */ 2801 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); 2802 2803 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 2804 ice_flush(hw); 2805 } 2806 } 2807 2808 static void ice_ptp_periodic_work(struct kthread_work *work) 2809 { 2810 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2811 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2812 int err; 2813 2814 if (pf->ptp.state != ICE_PTP_READY) 2815 return; 2816 2817 err = ice_ptp_update_cached_phctime(pf); 2818 2819 ice_ptp_maybe_trigger_tx_interrupt(pf); 2820 2821 /* Run twice a second or reschedule if phc update failed */ 2822 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2823 msecs_to_jiffies(err ? 10 : 500)); 2824 } 2825 2826 /** 2827 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2828 * @pf: Board private structure 2829 * @reset_type: the reset type being performed 2830 */ 2831 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 2832 { 2833 struct ice_ptp *ptp = &pf->ptp; 2834 u8 src_tmr; 2835 2836 if (ptp->state != ICE_PTP_READY) 2837 return; 2838 2839 ptp->state = ICE_PTP_RESETTING; 2840 2841 /* Disable timestamping for both Tx and Rx */ 2842 ice_ptp_disable_timestamp_mode(pf); 2843 2844 kthread_cancel_delayed_work_sync(&ptp->work); 2845 2846 if (reset_type == ICE_RESET_PFR) 2847 return; 2848 2849 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2850 2851 /* Disable periodic outputs */ 2852 ice_ptp_disable_all_perout(pf); 2853 2854 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2855 2856 /* Disable source clock */ 2857 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2858 2859 /* Acquire PHC and system timer to restore after reset */ 2860 ptp->reset_time = ktime_get_real_ns(); 2861 } 2862 2863 /** 2864 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset 2865 * @pf: Board private structure 2866 * 2867 * Companion function for ice_ptp_rebuild() which handles tasks that only the 2868 * PTP clock owner instance should perform. 2869 */ 2870 static int ice_ptp_rebuild_owner(struct ice_pf *pf) 2871 { 2872 struct ice_ptp *ptp = &pf->ptp; 2873 struct ice_hw *hw = &pf->hw; 2874 struct timespec64 ts; 2875 u64 time_diff; 2876 int err; 2877 2878 err = ice_ptp_init_phc(hw); 2879 if (err) 2880 return err; 2881 2882 /* Acquire the global hardware lock */ 2883 if (!ice_ptp_lock(hw)) { 2884 err = -EBUSY; 2885 return err; 2886 } 2887 2888 /* Write the increment time value to PHY and LAN */ 2889 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2890 if (err) 2891 goto err_unlock; 2892 2893 /* Write the initial Time value to PHY and LAN using the cached PHC 2894 * time before the reset and time difference between stopping and 2895 * starting the clock. 2896 */ 2897 if (ptp->cached_phc_time) { 2898 time_diff = ktime_get_real_ns() - ptp->reset_time; 2899 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2900 } else { 2901 ts = ktime_to_timespec64(ktime_get_real()); 2902 } 2903 err = ice_ptp_write_init(pf, &ts); 2904 if (err) 2905 goto err_unlock; 2906 2907 /* Release the global hardware lock */ 2908 ice_ptp_unlock(hw); 2909 2910 /* Flush software tracking of any outstanding timestamps since we're 2911 * about to flush the PHY timestamp block. 2912 */ 2913 ice_ptp_flush_all_tx_tracker(pf); 2914 2915 if (!ice_is_e810(hw)) { 2916 /* Enable quad interrupts */ 2917 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 2918 if (err) 2919 return err; 2920 2921 ice_ptp_restart_all_phy(pf); 2922 } 2923 2924 /* Re-enable all periodic outputs and external timestamp events */ 2925 ice_ptp_enable_all_perout(pf); 2926 ice_ptp_enable_all_extts(pf); 2927 2928 return 0; 2929 2930 err_unlock: 2931 ice_ptp_unlock(hw); 2932 return err; 2933 } 2934 2935 /** 2936 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset 2937 * @pf: Board private structure 2938 * @reset_type: the reset type being performed 2939 */ 2940 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 2941 { 2942 struct ice_ptp *ptp = &pf->ptp; 2943 int err; 2944 2945 if (ptp->state == ICE_PTP_READY) { 2946 ice_ptp_prepare_for_reset(pf, reset_type); 2947 } else if (ptp->state != ICE_PTP_RESETTING) { 2948 err = -EINVAL; 2949 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n"); 2950 goto err; 2951 } 2952 2953 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) { 2954 err = ice_ptp_rebuild_owner(pf); 2955 if (err) 2956 goto err; 2957 } 2958 2959 ptp->state = ICE_PTP_READY; 2960 2961 /* Start periodic work going */ 2962 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2963 2964 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2965 return; 2966 2967 err: 2968 ptp->state = ICE_PTP_ERROR; 2969 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2970 } 2971 2972 static bool ice_is_primary(struct ice_hw *hw) 2973 { 2974 return ice_is_e825c(hw) && ice_is_dual(hw) ? 2975 !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true; 2976 } 2977 2978 static int ice_ptp_setup_adapter(struct ice_pf *pf) 2979 { 2980 if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw)) 2981 return -EPERM; 2982 2983 pf->adapter->ctrl_pf = pf; 2984 2985 return 0; 2986 } 2987 2988 static int ice_ptp_setup_pf(struct ice_pf *pf) 2989 { 2990 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 2991 struct ice_ptp *ptp = &pf->ptp; 2992 2993 if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP) 2994 return -ENODEV; 2995 2996 INIT_LIST_HEAD(&ptp->port.list_node); 2997 mutex_lock(&pf->adapter->ports.lock); 2998 2999 list_add(&ptp->port.list_node, 3000 &pf->adapter->ports.ports); 3001 mutex_unlock(&pf->adapter->ports.lock); 3002 3003 return 0; 3004 } 3005 3006 static void ice_ptp_cleanup_pf(struct ice_pf *pf) 3007 { 3008 struct ice_ptp *ptp = &pf->ptp; 3009 3010 if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) { 3011 mutex_lock(&pf->adapter->ports.lock); 3012 list_del(&ptp->port.list_node); 3013 mutex_unlock(&pf->adapter->ports.lock); 3014 } 3015 } 3016 3017 /** 3018 * ice_ptp_clock_index - Get the PTP clock index for this device 3019 * @pf: Board private structure 3020 * 3021 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock 3022 * is associated. 3023 */ 3024 int ice_ptp_clock_index(struct ice_pf *pf) 3025 { 3026 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 3027 struct ptp_clock *clock; 3028 3029 if (!ctrl_ptp) 3030 return -1; 3031 clock = ctrl_ptp->clock; 3032 3033 return clock ? ptp_clock_index(clock) : -1; 3034 } 3035 3036 /** 3037 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 3038 * @pf: Board private structure 3039 * 3040 * Setup and initialize a PTP clock device that represents the device hardware 3041 * clock. Save the clock index for other functions connected to the same 3042 * hardware resource. 3043 */ 3044 static int ice_ptp_init_owner(struct ice_pf *pf) 3045 { 3046 struct ice_hw *hw = &pf->hw; 3047 struct timespec64 ts; 3048 int err; 3049 3050 err = ice_ptp_init_phc(hw); 3051 if (err) { 3052 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 3053 err); 3054 return err; 3055 } 3056 3057 /* Acquire the global hardware lock */ 3058 if (!ice_ptp_lock(hw)) { 3059 err = -EBUSY; 3060 goto err_exit; 3061 } 3062 3063 /* Write the increment time value to PHY and LAN */ 3064 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 3065 if (err) 3066 goto err_unlock; 3067 3068 ts = ktime_to_timespec64(ktime_get_real()); 3069 /* Write the initial Time value to PHY and LAN */ 3070 err = ice_ptp_write_init(pf, &ts); 3071 if (err) 3072 goto err_unlock; 3073 3074 /* Release the global hardware lock */ 3075 ice_ptp_unlock(hw); 3076 3077 /* Configure PHY interrupt settings */ 3078 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 3079 if (err) 3080 goto err_exit; 3081 3082 /* Ensure we have a clock device */ 3083 err = ice_ptp_create_clock(pf); 3084 if (err) 3085 goto err_clk; 3086 3087 return 0; 3088 err_clk: 3089 pf->ptp.clock = NULL; 3090 err_exit: 3091 return err; 3092 3093 err_unlock: 3094 ice_ptp_unlock(hw); 3095 return err; 3096 } 3097 3098 /** 3099 * ice_ptp_init_work - Initialize PTP work threads 3100 * @pf: Board private structure 3101 * @ptp: PF PTP structure 3102 */ 3103 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 3104 { 3105 struct kthread_worker *kworker; 3106 3107 /* Initialize work functions */ 3108 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 3109 3110 /* Allocate a kworker for handling work required for the ports 3111 * connected to the PTP hardware clock. 3112 */ 3113 kworker = kthread_create_worker(0, "ice-ptp-%s", 3114 dev_name(ice_pf_to_dev(pf))); 3115 if (IS_ERR(kworker)) 3116 return PTR_ERR(kworker); 3117 3118 ptp->kworker = kworker; 3119 3120 /* Start periodic work going */ 3121 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 3122 3123 return 0; 3124 } 3125 3126 /** 3127 * ice_ptp_init_port - Initialize PTP port structure 3128 * @pf: Board private structure 3129 * @ptp_port: PTP port structure 3130 */ 3131 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 3132 { 3133 struct ice_hw *hw = &pf->hw; 3134 3135 mutex_init(&ptp_port->ps_lock); 3136 3137 switch (ice_get_phy_model(hw)) { 3138 case ICE_PHY_ETH56G: 3139 return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx, 3140 ptp_port->port_num); 3141 case ICE_PHY_E810: 3142 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 3143 case ICE_PHY_E82X: 3144 kthread_init_delayed_work(&ptp_port->ov_work, 3145 ice_ptp_wait_for_offsets); 3146 3147 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, 3148 ptp_port->port_num); 3149 default: 3150 return -ENODEV; 3151 } 3152 } 3153 3154 /** 3155 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode 3156 * @pf: Board private structure 3157 * 3158 * Initialize the Tx timestamp interrupt mode for this device. For most device 3159 * types, each PF processes the interrupt and manages its own timestamps. For 3160 * E822-based devices, only the clock owner processes the timestamps. Other 3161 * PFs disable the interrupt and do not process their own timestamps. 3162 */ 3163 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) 3164 { 3165 switch (ice_get_phy_model(&pf->hw)) { 3166 case ICE_PHY_E82X: 3167 /* E822 based PHY has the clock owner process the interrupt 3168 * for all ports. 3169 */ 3170 if (ice_pf_src_tmr_owned(pf)) 3171 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; 3172 else 3173 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; 3174 break; 3175 default: 3176 /* other PHY types handle their own Tx interrupt */ 3177 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; 3178 } 3179 } 3180 3181 /** 3182 * ice_ptp_init - Initialize PTP hardware clock support 3183 * @pf: Board private structure 3184 * 3185 * Set up the device for interacting with the PTP hardware clock for all 3186 * functions, both the function that owns the clock hardware, and the 3187 * functions connected to the clock hardware. 3188 * 3189 * The clock owner will allocate and register a ptp_clock with the 3190 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 3191 * items used for asynchronous work such as Tx timestamps and periodic work. 3192 */ 3193 void ice_ptp_init(struct ice_pf *pf) 3194 { 3195 struct ice_ptp *ptp = &pf->ptp; 3196 struct ice_hw *hw = &pf->hw; 3197 int lane_num, err; 3198 3199 ptp->state = ICE_PTP_INITIALIZING; 3200 3201 lane_num = ice_get_phy_lane_number(hw); 3202 if (lane_num < 0) { 3203 err = lane_num; 3204 goto err_exit; 3205 } 3206 3207 ptp->port.port_num = (u8)lane_num; 3208 ice_ptp_init_hw(hw); 3209 3210 ice_ptp_init_tx_interrupt_mode(pf); 3211 3212 /* If this function owns the clock hardware, it must allocate and 3213 * configure the PTP clock device to represent it. 3214 */ 3215 if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) { 3216 err = ice_ptp_setup_adapter(pf); 3217 if (err) 3218 goto err_exit; 3219 err = ice_ptp_init_owner(pf); 3220 if (err) 3221 goto err_exit; 3222 } 3223 3224 err = ice_ptp_setup_pf(pf); 3225 if (err) 3226 goto err_exit; 3227 3228 err = ice_ptp_init_port(pf, &ptp->port); 3229 if (err) 3230 goto err_exit; 3231 3232 /* Start the PHY timestamping block */ 3233 ice_ptp_reset_phy_timestamping(pf); 3234 3235 /* Configure initial Tx interrupt settings */ 3236 ice_ptp_cfg_tx_interrupt(pf); 3237 3238 ptp->state = ICE_PTP_READY; 3239 3240 err = ice_ptp_init_work(pf, ptp); 3241 if (err) 3242 goto err_exit; 3243 3244 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 3245 return; 3246 3247 err_exit: 3248 /* If we registered a PTP clock, release it */ 3249 if (pf->ptp.clock) { 3250 ptp_clock_unregister(ptp->clock); 3251 pf->ptp.clock = NULL; 3252 } 3253 ptp->state = ICE_PTP_ERROR; 3254 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 3255 } 3256 3257 /** 3258 * ice_ptp_release - Disable the driver/HW support and unregister the clock 3259 * @pf: Board private structure 3260 * 3261 * This function handles the cleanup work required from the initialization by 3262 * clearing out the important information and unregistering the clock 3263 */ 3264 void ice_ptp_release(struct ice_pf *pf) 3265 { 3266 if (pf->ptp.state != ICE_PTP_READY) 3267 return; 3268 3269 pf->ptp.state = ICE_PTP_UNINIT; 3270 3271 /* Disable timestamping for both Tx and Rx */ 3272 ice_ptp_disable_timestamp_mode(pf); 3273 3274 ice_ptp_cleanup_pf(pf); 3275 3276 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3277 3278 ice_ptp_disable_all_extts(pf); 3279 3280 kthread_cancel_delayed_work_sync(&pf->ptp.work); 3281 3282 ice_ptp_port_phy_stop(&pf->ptp.port); 3283 mutex_destroy(&pf->ptp.port.ps_lock); 3284 if (pf->ptp.kworker) { 3285 kthread_destroy_worker(pf->ptp.kworker); 3286 pf->ptp.kworker = NULL; 3287 } 3288 3289 if (!pf->ptp.clock) 3290 return; 3291 3292 /* Disable periodic outputs */ 3293 ice_ptp_disable_all_perout(pf); 3294 3295 ptp_clock_unregister(pf->ptp.clock); 3296 pf->ptp.clock = NULL; 3297 3298 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 3299 } 3300