1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 #include "ice_cgu_regs.h" 8 9 static const char ice_pin_names[][64] = { 10 "SDP0", 11 "SDP1", 12 "SDP2", 13 "SDP3", 14 "TIME_SYNC", 15 "1PPS" 16 }; 17 18 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { 19 /* name, gpio, delay */ 20 { TIME_SYNC, { 4, -1 }, { 0, 0 }}, 21 { ONE_PPS, { -1, 5 }, { 0, 11 }}, 22 }; 23 24 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { 25 /* name, gpio, delay */ 26 { SDP0, { 0, 0 }, { 15, 14 }}, 27 { SDP1, { 1, 1 }, { 15, 14 }}, 28 { SDP2, { 2, 2 }, { 15, 14 }}, 29 { SDP3, { 3, 3 }, { 15, 14 }}, 30 { TIME_SYNC, { 4, -1 }, { 11, 0 }}, 31 { ONE_PPS, { -1, 5 }, { 0, 9 }}, 32 }; 33 34 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { 35 /* name, gpio, delay */ 36 { SDP0, { 0, 0 }, { 0, 1 }}, 37 { SDP1, { 1, 1 }, { 0, 1 }}, 38 { SDP2, { 2, 2 }, { 0, 1 }}, 39 { SDP3, { 3, 3 }, { 0, 1 }}, 40 { ONE_PPS, { -1, 5 }, { 0, 1 }}, 41 }; 42 43 static const char ice_pin_names_nvm[][64] = { 44 "GNSS", 45 "SMA1", 46 "U.FL1", 47 "SMA2", 48 "U.FL2", 49 }; 50 51 static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { 52 /* name, gpio, delay */ 53 { GNSS, { 1, -1 }, { 0, 0 }}, 54 { SMA1, { 1, 0 }, { 0, 1 }}, 55 { UFL1, { -1, 0 }, { 0, 1 }}, 56 { SMA2, { 3, 2 }, { 0, 1 }}, 57 { UFL2, { 3, -1 }, { 0, 0 }}, 58 }; 59 60 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) 61 { 62 return !pf->adapter ? NULL : pf->adapter->ctrl_pf; 63 } 64 65 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf) 66 { 67 struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf); 68 69 return !ctrl_pf ? NULL : &ctrl_pf->ptp; 70 } 71 72 /** 73 * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc 74 * @pf: Board private structure 75 * @func: Pin function 76 * @chan: GPIO channel 77 * 78 * Return: positive pin number when pin is present, -1 otherwise 79 */ 80 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func, 81 unsigned int chan) 82 { 83 const struct ptp_clock_info *info = &pf->ptp.info; 84 int i; 85 86 for (i = 0; i < info->n_pins; i++) { 87 if (info->pin_config[i].func == func && 88 info->pin_config[i].chan == chan) 89 return i; 90 } 91 92 return -1; 93 } 94 95 /** 96 * ice_ptp_update_sma_data - update SMA pins data according to pins setup 97 * @pf: Board private structure 98 * @sma_pins: parsed SMA pins status 99 * @data: SMA data to update 100 */ 101 static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[], 102 u8 *data) 103 { 104 const char *state1, *state2; 105 106 /* Set the right state based on the desired configuration. 107 * When bit is set, functionality is disabled. 108 */ 109 *data &= ~ICE_ALL_SMA_MASK; 110 if (!sma_pins[UFL1 - 1]) { 111 if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) { 112 state1 = "SMA1 Rx, U.FL1 disabled"; 113 *data |= ICE_SMA1_TX_EN; 114 } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) { 115 state1 = "SMA1 Tx U.FL1 disabled"; 116 *data |= ICE_SMA1_DIR_EN; 117 } else { 118 state1 = "SMA1 disabled, U.FL1 disabled"; 119 *data |= ICE_SMA1_MASK; 120 } 121 } else { 122 /* U.FL1 Tx will always enable SMA1 Rx */ 123 state1 = "SMA1 Rx, U.FL1 Tx"; 124 } 125 126 if (!sma_pins[UFL2 - 1]) { 127 if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) { 128 state2 = "SMA2 Rx, U.FL2 disabled"; 129 *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS; 130 } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) { 131 state2 = "SMA2 Tx, U.FL2 disabled"; 132 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS; 133 } else { 134 state2 = "SMA2 disabled, U.FL2 disabled"; 135 *data |= ICE_SMA2_MASK; 136 } 137 } else { 138 if (!sma_pins[SMA2 - 1]) { 139 state2 = "SMA2 disabled, U.FL2 Rx"; 140 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN; 141 } else { 142 state2 = "SMA2 Tx, U.FL2 Rx"; 143 *data |= ICE_SMA2_DIR_EN; 144 } 145 } 146 147 dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2); 148 } 149 150 /** 151 * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic 152 * @pf: Board private structure 153 * 154 * Return: 0 on success, negative error code otherwise 155 */ 156 static int ice_ptp_set_sma_cfg(struct ice_pf *pf) 157 { 158 const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc; 159 struct ptp_pin_desc *pins = pf->ptp.pin_desc; 160 unsigned int sma_pins[ICE_SMA_PINS_NUM] = {}; 161 int err; 162 u8 data; 163 164 /* Read initial pin state value */ 165 err = ice_read_sma_ctrl(&pf->hw, &data); 166 if (err) 167 return err; 168 169 /* Get SMA/U.FL pins states */ 170 for (int i = 0; i < pf->ptp.info.n_pins; i++) 171 if (pins[i].func) { 172 int name_idx = ice_pins[i].name_idx; 173 174 switch (name_idx) { 175 case SMA1: 176 case UFL1: 177 case SMA2: 178 case UFL2: 179 sma_pins[name_idx - 1] = pins[i].func; 180 break; 181 default: 182 continue; 183 } 184 } 185 186 ice_ptp_update_sma_data(pf, sma_pins, &data); 187 return ice_write_sma_ctrl(&pf->hw, data); 188 } 189 190 /** 191 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device 192 * @pf: Board private structure 193 * 194 * Program the device to respond appropriately to the Tx timestamp interrupt 195 * cause. 196 */ 197 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) 198 { 199 struct ice_hw *hw = &pf->hw; 200 bool enable; 201 u32 val; 202 203 switch (pf->ptp.tx_interrupt_mode) { 204 case ICE_PTP_TX_INTERRUPT_ALL: 205 /* React to interrupts across all quads. */ 206 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); 207 enable = true; 208 break; 209 case ICE_PTP_TX_INTERRUPT_NONE: 210 /* Do not react to interrupts on any quad. */ 211 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); 212 enable = false; 213 break; 214 case ICE_PTP_TX_INTERRUPT_SELF: 215 default: 216 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; 217 break; 218 } 219 220 /* Configure the Tx timestamp interrupt */ 221 val = rd32(hw, PFINT_OICR_ENA); 222 if (enable) 223 val |= PFINT_OICR_TSYN_TX_M; 224 else 225 val &= ~PFINT_OICR_TSYN_TX_M; 226 wr32(hw, PFINT_OICR_ENA, val); 227 } 228 229 /** 230 * ice_set_rx_tstamp - Enable or disable Rx timestamping 231 * @pf: The PF pointer to search in 232 * @on: bool value for whether timestamps are enabled or disabled 233 */ 234 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 235 { 236 struct ice_vsi *vsi; 237 u16 i; 238 239 vsi = ice_get_main_vsi(pf); 240 if (!vsi || !vsi->rx_rings) 241 return; 242 243 /* Set the timestamp flag for all the Rx rings */ 244 ice_for_each_rxq(vsi, i) { 245 if (!vsi->rx_rings[i]) 246 continue; 247 vsi->rx_rings[i]->ptp_rx = on; 248 } 249 } 250 251 /** 252 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode 253 * @pf: Board private structure 254 * 255 * Called during preparation for reset to temporarily disable timestamping on 256 * the device. Called during remove to disable timestamping while cleaning up 257 * driver resources. 258 */ 259 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) 260 { 261 struct ice_hw *hw = &pf->hw; 262 u32 val; 263 264 val = rd32(hw, PFINT_OICR_ENA); 265 val &= ~PFINT_OICR_TSYN_TX_M; 266 wr32(hw, PFINT_OICR_ENA, val); 267 268 ice_set_rx_tstamp(pf, false); 269 } 270 271 /** 272 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration 273 * @pf: Board private structure 274 * 275 * Called at the end of rebuild to restore timestamp configuration after 276 * a device reset. 277 */ 278 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) 279 { 280 struct ice_hw *hw = &pf->hw; 281 bool enable_rx; 282 283 ice_ptp_cfg_tx_interrupt(pf); 284 285 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; 286 ice_set_rx_tstamp(pf, enable_rx); 287 288 /* Trigger an immediate software interrupt to ensure that timestamps 289 * which occurred during reset are handled now. 290 */ 291 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 292 ice_flush(hw); 293 } 294 295 /** 296 * ice_ptp_read_src_clk_reg - Read the source clock register 297 * @pf: Board private structure 298 * @sts: Optional parameter for holding a pair of system timestamps from 299 * the system clock. Will be ignored if NULL is given. 300 */ 301 static u64 302 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 303 { 304 struct ice_hw *hw = &pf->hw; 305 u32 hi, lo, lo2; 306 u8 tmr_idx; 307 308 tmr_idx = ice_get_ptp_src_clock_index(hw); 309 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); 310 /* Read the system timestamp pre PHC read */ 311 ptp_read_system_prets(sts); 312 313 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 314 315 /* Read the system timestamp post PHC read */ 316 ptp_read_system_postts(sts); 317 318 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 319 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 320 321 if (lo2 < lo) { 322 /* if TIME_L rolled over read TIME_L again and update 323 * system timestamps 324 */ 325 ptp_read_system_prets(sts); 326 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 327 ptp_read_system_postts(sts); 328 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 329 } 330 331 return ((u64)hi << 32) | lo; 332 } 333 334 /** 335 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 336 * @cached_phc_time: recently cached copy of PHC time 337 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 338 * 339 * Hardware captures timestamps which contain only 32 bits of nominal 340 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 341 * Note that the captured timestamp values may be 40 bits, but the lower 342 * 8 bits are sub-nanoseconds and generally discarded. 343 * 344 * Extend the 32bit nanosecond timestamp using the following algorithm and 345 * assumptions: 346 * 347 * 1) have a recently cached copy of the PHC time 348 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 349 * seconds) before or after the PHC time was captured. 350 * 3) calculate the delta between the cached time and the timestamp 351 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 352 * captured after the PHC time. In this case, the full timestamp is just 353 * the cached PHC time plus the delta. 354 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 355 * timestamp was captured *before* the PHC time, i.e. because the PHC 356 * cache was updated after the timestamp was captured by hardware. In this 357 * case, the full timestamp is the cached time minus the inverse delta. 358 * 359 * This algorithm works even if the PHC time was updated after a Tx timestamp 360 * was requested, but before the Tx timestamp event was reported from 361 * hardware. 362 * 363 * This calculation primarily relies on keeping the cached PHC time up to 364 * date. If the timestamp was captured more than 2^31 nanoseconds after the 365 * PHC time, it is possible that the lower 32bits of PHC time have 366 * overflowed more than once, and we might generate an incorrect timestamp. 367 * 368 * This is prevented by (a) periodically updating the cached PHC time once 369 * a second, and (b) discarding any Tx timestamp packet if it has waited for 370 * a timestamp for more than one second. 371 */ 372 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 373 { 374 u32 delta, phc_time_lo; 375 u64 ns; 376 377 /* Extract the lower 32 bits of the PHC time */ 378 phc_time_lo = (u32)cached_phc_time; 379 380 /* Calculate the delta between the lower 32bits of the cached PHC 381 * time and the in_tstamp value 382 */ 383 delta = (in_tstamp - phc_time_lo); 384 385 /* Do not assume that the in_tstamp is always more recent than the 386 * cached PHC time. If the delta is large, it indicates that the 387 * in_tstamp was taken in the past, and should be converted 388 * forward. 389 */ 390 if (delta > (U32_MAX / 2)) { 391 /* reverse the delta calculation here */ 392 delta = (phc_time_lo - in_tstamp); 393 ns = cached_phc_time - delta; 394 } else { 395 ns = cached_phc_time + delta; 396 } 397 398 return ns; 399 } 400 401 /** 402 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 403 * @pf: Board private structure 404 * @in_tstamp: Ingress/egress 40b timestamp value 405 * 406 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 407 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 408 * 409 * *--------------------------------------------------------------* 410 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 411 * *--------------------------------------------------------------* 412 * 413 * The low bit is an indicator of whether the timestamp is valid. The next 414 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 415 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 416 * 417 * It is assumed that the caller verifies the timestamp is valid prior to 418 * calling this function. 419 * 420 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 421 * time stored in the device private PTP structure as the basis for timestamp 422 * extension. 423 * 424 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 425 * algorithm. 426 */ 427 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 428 { 429 const u64 mask = GENMASK_ULL(31, 0); 430 unsigned long discard_time; 431 432 /* Discard the hardware timestamp if the cached PHC time is too old */ 433 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 434 if (time_is_before_jiffies(discard_time)) { 435 pf->ptp.tx_hwtstamp_discarded++; 436 return 0; 437 } 438 439 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 440 (in_tstamp >> 8) & mask); 441 } 442 443 /** 444 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 445 * @tx: the PTP Tx timestamp tracker to check 446 * 447 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 448 * to accept new timestamp requests. 449 * 450 * Assumes the tx->lock spinlock is already held. 451 */ 452 static bool 453 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 454 { 455 lockdep_assert_held(&tx->lock); 456 457 return tx->init && !tx->calibrating; 458 } 459 460 /** 461 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW 462 * @tx: the PTP Tx timestamp tracker 463 * @idx: index of the timestamp to request 464 */ 465 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) 466 { 467 struct ice_e810_params *params; 468 struct ice_ptp_port *ptp_port; 469 unsigned long flags; 470 struct sk_buff *skb; 471 struct ice_pf *pf; 472 473 if (!tx->init) 474 return; 475 476 ptp_port = container_of(tx, struct ice_ptp_port, tx); 477 pf = ptp_port_to_pf(ptp_port); 478 params = &pf->hw.ptp.phy.e810; 479 480 /* Drop packets which have waited for more than 2 seconds */ 481 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 482 /* Count the number of Tx timestamps that timed out */ 483 pf->ptp.tx_hwtstamp_timeouts++; 484 485 skb = tx->tstamps[idx].skb; 486 tx->tstamps[idx].skb = NULL; 487 clear_bit(idx, tx->in_use); 488 489 dev_kfree_skb_any(skb); 490 return; 491 } 492 493 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 494 495 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); 496 497 params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS; 498 499 /* Write TS index to read to the PF register so the FW can read it */ 500 wr32(&pf->hw, REG_LL_PROXY_H, 501 REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | 502 REG_LL_PROXY_H_EXEC); 503 tx->last_ll_ts_idx_read = idx; 504 505 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); 506 } 507 508 /** 509 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port 510 * @tx: the PTP Tx timestamp tracker 511 */ 512 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) 513 { 514 struct skb_shared_hwtstamps shhwtstamps = {}; 515 u8 idx = tx->last_ll_ts_idx_read; 516 struct ice_e810_params *params; 517 struct ice_ptp_port *ptp_port; 518 u64 raw_tstamp, tstamp; 519 bool drop_ts = false; 520 struct sk_buff *skb; 521 unsigned long flags; 522 struct device *dev; 523 struct ice_pf *pf; 524 u32 reg_ll_high; 525 526 if (!tx->init || tx->last_ll_ts_idx_read < 0) 527 return; 528 529 ptp_port = container_of(tx, struct ice_ptp_port, tx); 530 pf = ptp_port_to_pf(ptp_port); 531 dev = ice_pf_to_dev(pf); 532 params = &pf->hw.ptp.phy.e810; 533 534 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 535 536 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); 537 538 if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS)) 539 dev_dbg(dev, "%s: low latency interrupt request not in progress?\n", 540 __func__); 541 542 /* Read the low 32 bit value */ 543 raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L); 544 /* Read the status together with high TS part */ 545 reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H); 546 547 /* Wake up threads waiting on low latency interface */ 548 params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS; 549 550 wake_up_locked(¶ms->atqbal_wq); 551 552 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); 553 554 /* When the bit is cleared, the TS is ready in the register */ 555 if (reg_ll_high & REG_LL_PROXY_H_EXEC) { 556 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); 557 return; 558 } 559 560 /* High 8 bit value of the TS is on the bits 16:23 */ 561 raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32; 562 563 /* Devices using this interface always verify the timestamp differs 564 * relative to the last cached timestamp value. 565 */ 566 if (raw_tstamp == tx->tstamps[idx].cached_tstamp) 567 return; 568 569 tx->tstamps[idx].cached_tstamp = raw_tstamp; 570 clear_bit(idx, tx->in_use); 571 skb = tx->tstamps[idx].skb; 572 tx->tstamps[idx].skb = NULL; 573 if (test_and_clear_bit(idx, tx->stale)) 574 drop_ts = true; 575 576 if (!skb) 577 return; 578 579 if (drop_ts) { 580 dev_kfree_skb_any(skb); 581 return; 582 } 583 584 /* Extend the timestamp using cached PHC time */ 585 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 586 if (tstamp) { 587 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 588 ice_trace(tx_tstamp_complete, skb, idx); 589 } 590 591 skb_tstamp_tx(skb, &shhwtstamps); 592 dev_kfree_skb_any(skb); 593 } 594 595 /** 596 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 597 * @tx: the PTP Tx timestamp tracker 598 * 599 * Process timestamps captured by the PHY associated with this port. To do 600 * this, loop over each index with a waiting skb. 601 * 602 * If a given index has a valid timestamp, perform the following steps: 603 * 604 * 1) check that the timestamp request is not stale 605 * 2) check that a timestamp is ready and available in the PHY memory bank 606 * 3) read and copy the timestamp out of the PHY register 607 * 4) unlock the index by clearing the associated in_use bit 608 * 5) check if the timestamp is stale, and discard if so 609 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 610 * 7) send this 64 bit timestamp to the stack 611 * 612 * Note that we do not hold the tracking lock while reading the Tx timestamp. 613 * This is because reading the timestamp requires taking a mutex that might 614 * sleep. 615 * 616 * The only place where we set in_use is when a new timestamp is initiated 617 * with a slot index. This is only called in the hard xmit routine where an 618 * SKB has a request flag set. The only places where we clear this bit is this 619 * function, or during teardown when the Tx timestamp tracker is being 620 * removed. A timestamp index will never be re-used until the in_use bit for 621 * that index is cleared. 622 * 623 * If a Tx thread starts a new timestamp, we might not begin processing it 624 * right away but we will notice it at the end when we re-queue the task. 625 * 626 * If a Tx thread starts a new timestamp just after this function exits, the 627 * interrupt for that timestamp should re-trigger this function once 628 * a timestamp is ready. 629 * 630 * In cases where the PTP hardware clock was directly adjusted, some 631 * timestamps may not be able to safely use the timestamp extension math. In 632 * this case, software will set the stale bit for any outstanding Tx 633 * timestamps when the clock is adjusted. Then this function will discard 634 * those captured timestamps instead of sending them to the stack. 635 * 636 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 637 * to correctly extend the timestamp using the cached PHC time. It is 638 * extremely unlikely that a packet will ever take this long to timestamp. If 639 * we detect a Tx timestamp request that has waited for this long we assume 640 * the packet will never be sent by hardware and discard it without reading 641 * the timestamp register. 642 */ 643 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 644 { 645 struct ice_ptp_port *ptp_port; 646 unsigned long flags; 647 struct ice_pf *pf; 648 struct ice_hw *hw; 649 u64 tstamp_ready; 650 bool link_up; 651 int err; 652 u8 idx; 653 654 ptp_port = container_of(tx, struct ice_ptp_port, tx); 655 pf = ptp_port_to_pf(ptp_port); 656 hw = &pf->hw; 657 658 /* Read the Tx ready status first */ 659 if (tx->has_ready_bitmap) { 660 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 661 if (err) 662 return; 663 } 664 665 /* Drop packets if the link went down */ 666 link_up = ptp_port->link_up; 667 668 for_each_set_bit(idx, tx->in_use, tx->len) { 669 struct skb_shared_hwtstamps shhwtstamps = {}; 670 u8 phy_idx = idx + tx->offset; 671 u64 raw_tstamp = 0, tstamp; 672 bool drop_ts = !link_up; 673 struct sk_buff *skb; 674 675 /* Drop packets which have waited for more than 2 seconds */ 676 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 677 drop_ts = true; 678 679 /* Count the number of Tx timestamps that timed out */ 680 pf->ptp.tx_hwtstamp_timeouts++; 681 } 682 683 /* Only read a timestamp from the PHY if its marked as ready 684 * by the tstamp_ready register. This avoids unnecessary 685 * reading of timestamps which are not yet valid. This is 686 * important as we must read all timestamps which are valid 687 * and only timestamps which are valid during each interrupt. 688 * If we do not, the hardware logic for generating a new 689 * interrupt can get stuck on some devices. 690 */ 691 if (tx->has_ready_bitmap && 692 !(tstamp_ready & BIT_ULL(phy_idx))) { 693 if (drop_ts) 694 goto skip_ts_read; 695 696 continue; 697 } 698 699 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 700 701 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 702 if (err && !drop_ts) 703 continue; 704 705 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 706 707 /* For PHYs which don't implement a proper timestamp ready 708 * bitmap, verify that the timestamp value is different 709 * from the last cached timestamp. If it is not, skip this for 710 * now assuming it hasn't yet been captured by hardware. 711 */ 712 if (!drop_ts && !tx->has_ready_bitmap && 713 raw_tstamp == tx->tstamps[idx].cached_tstamp) 714 continue; 715 716 /* Discard any timestamp value without the valid bit set */ 717 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 718 drop_ts = true; 719 720 skip_ts_read: 721 spin_lock_irqsave(&tx->lock, flags); 722 if (!tx->has_ready_bitmap && raw_tstamp) 723 tx->tstamps[idx].cached_tstamp = raw_tstamp; 724 clear_bit(idx, tx->in_use); 725 skb = tx->tstamps[idx].skb; 726 tx->tstamps[idx].skb = NULL; 727 if (test_and_clear_bit(idx, tx->stale)) 728 drop_ts = true; 729 spin_unlock_irqrestore(&tx->lock, flags); 730 731 /* It is unlikely but possible that the SKB will have been 732 * flushed at this point due to link change or teardown. 733 */ 734 if (!skb) 735 continue; 736 737 if (drop_ts) { 738 dev_kfree_skb_any(skb); 739 continue; 740 } 741 742 /* Extend the timestamp using cached PHC time */ 743 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 744 if (tstamp) { 745 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 746 ice_trace(tx_tstamp_complete, skb, idx); 747 } 748 749 skb_tstamp_tx(skb, &shhwtstamps); 750 dev_kfree_skb_any(skb); 751 } 752 } 753 754 /** 755 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 756 * @pf: Board private structure 757 */ 758 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 759 { 760 struct ice_ptp_port *port; 761 unsigned int i; 762 763 mutex_lock(&pf->adapter->ports.lock); 764 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { 765 struct ice_ptp_tx *tx = &port->tx; 766 767 if (!tx || !tx->init) 768 continue; 769 770 ice_ptp_process_tx_tstamp(tx); 771 } 772 mutex_unlock(&pf->adapter->ports.lock); 773 774 for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { 775 u64 tstamp_ready; 776 int err; 777 778 /* Read the Tx ready status first */ 779 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 780 if (err) 781 break; 782 else if (tstamp_ready) 783 return ICE_TX_TSTAMP_WORK_PENDING; 784 } 785 786 return ICE_TX_TSTAMP_WORK_DONE; 787 } 788 789 /** 790 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 791 * @tx: Tx tracking structure to initialize 792 * 793 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 794 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 795 */ 796 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 797 { 798 bool more_timestamps; 799 unsigned long flags; 800 801 if (!tx->init) 802 return ICE_TX_TSTAMP_WORK_DONE; 803 804 /* Process the Tx timestamp tracker */ 805 ice_ptp_process_tx_tstamp(tx); 806 807 /* Check if there are outstanding Tx timestamps */ 808 spin_lock_irqsave(&tx->lock, flags); 809 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 810 spin_unlock_irqrestore(&tx->lock, flags); 811 812 if (more_timestamps) 813 return ICE_TX_TSTAMP_WORK_PENDING; 814 815 return ICE_TX_TSTAMP_WORK_DONE; 816 } 817 818 /** 819 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 820 * @tx: Tx tracking structure to initialize 821 * 822 * Assumes that the length has already been initialized. Do not call directly, 823 * use the ice_ptp_init_tx_* instead. 824 */ 825 static int 826 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 827 { 828 unsigned long *in_use, *stale; 829 struct ice_tx_tstamp *tstamps; 830 831 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 832 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 833 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 834 835 if (!tstamps || !in_use || !stale) { 836 kfree(tstamps); 837 bitmap_free(in_use); 838 bitmap_free(stale); 839 840 return -ENOMEM; 841 } 842 843 tx->tstamps = tstamps; 844 tx->in_use = in_use; 845 tx->stale = stale; 846 tx->init = 1; 847 tx->last_ll_ts_idx_read = -1; 848 849 spin_lock_init(&tx->lock); 850 851 return 0; 852 } 853 854 /** 855 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 856 * @pf: Board private structure 857 * @tx: the tracker to flush 858 * 859 * Called during teardown when a Tx tracker is being removed. 860 */ 861 static void 862 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 863 { 864 struct ice_hw *hw = &pf->hw; 865 unsigned long flags; 866 u64 tstamp_ready; 867 int err; 868 u8 idx; 869 870 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 871 if (err) { 872 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 873 tx->block, err); 874 875 /* If we fail to read the Tx timestamp ready bitmap just 876 * skip clearing the PHY timestamps. 877 */ 878 tstamp_ready = 0; 879 } 880 881 for_each_set_bit(idx, tx->in_use, tx->len) { 882 u8 phy_idx = idx + tx->offset; 883 struct sk_buff *skb; 884 885 /* In case this timestamp is ready, we need to clear it. */ 886 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 887 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 888 889 spin_lock_irqsave(&tx->lock, flags); 890 skb = tx->tstamps[idx].skb; 891 tx->tstamps[idx].skb = NULL; 892 clear_bit(idx, tx->in_use); 893 clear_bit(idx, tx->stale); 894 spin_unlock_irqrestore(&tx->lock, flags); 895 896 /* Count the number of Tx timestamps flushed */ 897 pf->ptp.tx_hwtstamp_flushed++; 898 899 /* Free the SKB after we've cleared the bit */ 900 dev_kfree_skb_any(skb); 901 } 902 } 903 904 /** 905 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 906 * @tx: the tracker to mark 907 * 908 * Mark currently outstanding Tx timestamps as stale. This prevents sending 909 * their timestamp value to the stack. This is required to prevent extending 910 * the 40bit hardware timestamp incorrectly. 911 * 912 * This should be called when the PTP clock is modified such as after a set 913 * time request. 914 */ 915 static void 916 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 917 { 918 unsigned long flags; 919 920 spin_lock_irqsave(&tx->lock, flags); 921 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 922 spin_unlock_irqrestore(&tx->lock, flags); 923 } 924 925 /** 926 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock 927 * @pf: Board private structure 928 * 929 * Called by the clock owner to flush all the Tx timestamp trackers associated 930 * with the clock. 931 */ 932 static void 933 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf) 934 { 935 struct ice_ptp_port *port; 936 937 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) 938 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx); 939 } 940 941 /** 942 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 943 * @pf: Board private structure 944 * @tx: Tx tracking structure to release 945 * 946 * Free memory associated with the Tx timestamp tracker. 947 */ 948 static void 949 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 950 { 951 unsigned long flags; 952 953 spin_lock_irqsave(&tx->lock, flags); 954 tx->init = 0; 955 spin_unlock_irqrestore(&tx->lock, flags); 956 957 /* wait for potentially outstanding interrupt to complete */ 958 synchronize_irq(pf->oicr_irq.virq); 959 960 ice_ptp_flush_tx_tracker(pf, tx); 961 962 kfree(tx->tstamps); 963 tx->tstamps = NULL; 964 965 bitmap_free(tx->in_use); 966 tx->in_use = NULL; 967 968 bitmap_free(tx->stale); 969 tx->stale = NULL; 970 971 tx->len = 0; 972 } 973 974 /** 975 * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps 976 * @pf: Board private structure 977 * @tx: the Tx tracking structure to initialize 978 * @port: the port this structure tracks 979 * 980 * Initialize the Tx timestamp tracker for this port. ETH56G PHYs 981 * have independent memory blocks for all ports. 982 * 983 * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker 984 */ 985 static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx, 986 u8 port) 987 { 988 tx->block = port; 989 tx->offset = 0; 990 tx->len = INDEX_PER_PORT_ETH56G; 991 tx->has_ready_bitmap = 1; 992 993 return ice_ptp_alloc_tx_tracker(tx); 994 } 995 996 /** 997 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps 998 * @pf: Board private structure 999 * @tx: the Tx tracking structure to initialize 1000 * @port: the port this structure tracks 1001 * 1002 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 1003 * the timestamp block is shared for all ports in the same quad. To avoid 1004 * ports using the same timestamp index, logically break the block of 1005 * registers into chunks based on the port number. 1006 */ 1007 static int 1008 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 1009 { 1010 tx->block = ICE_GET_QUAD_NUM(port); 1011 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; 1012 tx->len = INDEX_PER_PORT_E82X; 1013 tx->has_ready_bitmap = 1; 1014 1015 return ice_ptp_alloc_tx_tracker(tx); 1016 } 1017 1018 /** 1019 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 1020 * @pf: Board private structure 1021 * @tx: the Tx tracking structure to initialize 1022 * 1023 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 1024 * port has its own block of timestamps, independent of the other ports. 1025 */ 1026 static int 1027 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 1028 { 1029 tx->block = pf->hw.port_info->lport; 1030 tx->offset = 0; 1031 tx->len = INDEX_PER_PORT_E810; 1032 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 1033 * verify new timestamps against cached copy of the last read 1034 * timestamp. 1035 */ 1036 tx->has_ready_bitmap = 0; 1037 1038 return ice_ptp_alloc_tx_tracker(tx); 1039 } 1040 1041 /** 1042 * ice_ptp_update_cached_phctime - Update the cached PHC time values 1043 * @pf: Board specific private structure 1044 * 1045 * This function updates the system time values which are cached in the PF 1046 * structure and the Rx rings. 1047 * 1048 * This function must be called periodically to ensure that the cached value 1049 * is never more than 2 seconds old. 1050 * 1051 * Note that the cached copy in the PF PTP structure is always updated, even 1052 * if we can't update the copy in the Rx rings. 1053 * 1054 * Return: 1055 * * 0 - OK, successfully updated 1056 * * -EAGAIN - PF was busy, need to reschedule the update 1057 */ 1058 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 1059 { 1060 struct device *dev = ice_pf_to_dev(pf); 1061 unsigned long update_before; 1062 u64 systime; 1063 int i; 1064 1065 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 1066 if (pf->ptp.cached_phc_time && 1067 time_is_before_jiffies(update_before)) { 1068 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 1069 1070 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 1071 jiffies_to_msecs(time_taken)); 1072 pf->ptp.late_cached_phc_updates++; 1073 } 1074 1075 /* Read the current PHC time */ 1076 systime = ice_ptp_read_src_clk_reg(pf, NULL); 1077 1078 /* Update the cached PHC time stored in the PF structure */ 1079 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 1080 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 1081 1082 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 1083 return -EAGAIN; 1084 1085 ice_for_each_vsi(pf, i) { 1086 struct ice_vsi *vsi = pf->vsi[i]; 1087 int j; 1088 1089 if (!vsi) 1090 continue; 1091 1092 if (vsi->type != ICE_VSI_PF) 1093 continue; 1094 1095 ice_for_each_rxq(vsi, j) { 1096 if (!vsi->rx_rings[j]) 1097 continue; 1098 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 1099 } 1100 } 1101 clear_bit(ICE_CFG_BUSY, pf->state); 1102 1103 return 0; 1104 } 1105 1106 /** 1107 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 1108 * @pf: Board specific private structure 1109 * 1110 * This function must be called when the cached PHC time is no longer valid, 1111 * such as after a time adjustment. It marks any currently outstanding Tx 1112 * timestamps as stale and updates the cached PHC time for both the PF and Rx 1113 * rings. 1114 * 1115 * If updating the PHC time cannot be done immediately, a warning message is 1116 * logged and the work item is scheduled immediately to minimize the window 1117 * with a wrong cached timestamp. 1118 */ 1119 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1120 { 1121 struct device *dev = ice_pf_to_dev(pf); 1122 int err; 1123 1124 /* Update the cached PHC time immediately if possible, otherwise 1125 * schedule the work item to execute soon. 1126 */ 1127 err = ice_ptp_update_cached_phctime(pf); 1128 if (err) { 1129 /* If another thread is updating the Rx rings, we won't 1130 * properly reset them here. This could lead to reporting of 1131 * invalid timestamps, but there isn't much we can do. 1132 */ 1133 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1134 __func__); 1135 1136 /* Queue the work item to update the Rx rings when possible */ 1137 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1138 msecs_to_jiffies(10)); 1139 } 1140 1141 /* Mark any outstanding timestamps as stale, since they might have 1142 * been captured in hardware before the time update. This could lead 1143 * to us extending them with the wrong cached value resulting in 1144 * incorrect timestamp values. 1145 */ 1146 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1147 } 1148 1149 /** 1150 * ice_ptp_write_init - Set PHC time to provided value 1151 * @pf: Board private structure 1152 * @ts: timespec structure that holds the new time value 1153 * 1154 * Set the PHC time to the specified time provided in the timespec. 1155 */ 1156 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1157 { 1158 u64 ns = timespec64_to_ns(ts); 1159 struct ice_hw *hw = &pf->hw; 1160 1161 return ice_ptp_init_time(hw, ns); 1162 } 1163 1164 /** 1165 * ice_ptp_write_adj - Adjust PHC clock time atomically 1166 * @pf: Board private structure 1167 * @adj: Adjustment in nanoseconds 1168 * 1169 * Perform an atomic adjustment of the PHC time by the specified number of 1170 * nanoseconds. 1171 */ 1172 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1173 { 1174 struct ice_hw *hw = &pf->hw; 1175 1176 return ice_ptp_adj_clock(hw, adj); 1177 } 1178 1179 /** 1180 * ice_base_incval - Get base timer increment value 1181 * @pf: Board private structure 1182 * 1183 * Look up the base timer increment value for this device. The base increment 1184 * value is used to define the nominal clock tick rate. This increment value 1185 * is programmed during device initialization. It is also used as the basis 1186 * for calculating adjustments using scaled_ppm. 1187 */ 1188 static u64 ice_base_incval(struct ice_pf *pf) 1189 { 1190 struct ice_hw *hw = &pf->hw; 1191 u64 incval; 1192 1193 incval = ice_get_base_incval(hw); 1194 1195 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1196 incval); 1197 1198 return incval; 1199 } 1200 1201 /** 1202 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1203 * @port: PTP port for which Tx FIFO is checked 1204 */ 1205 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1206 { 1207 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1208 int quad = ICE_GET_QUAD_NUM(port->port_num); 1209 struct ice_pf *pf; 1210 struct ice_hw *hw; 1211 u32 val, phy_sts; 1212 int err; 1213 1214 pf = ptp_port_to_pf(port); 1215 hw = &pf->hw; 1216 1217 if (port->tx_fifo_busy_cnt == FIFO_OK) 1218 return 0; 1219 1220 /* need to read FIFO state */ 1221 if (offs == 0 || offs == 1) 1222 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, 1223 &val); 1224 else 1225 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, 1226 &val); 1227 1228 if (err) { 1229 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1230 port->port_num, err); 1231 return err; 1232 } 1233 1234 if (offs & 0x1) 1235 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val); 1236 else 1237 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val); 1238 1239 if (phy_sts & FIFO_EMPTY) { 1240 port->tx_fifo_busy_cnt = FIFO_OK; 1241 return 0; 1242 } 1243 1244 port->tx_fifo_busy_cnt++; 1245 1246 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1247 port->tx_fifo_busy_cnt, port->port_num); 1248 1249 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1250 dev_dbg(ice_pf_to_dev(pf), 1251 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1252 port->port_num, quad); 1253 ice_ptp_reset_ts_memory_quad_e82x(hw, quad); 1254 port->tx_fifo_busy_cnt = FIFO_OK; 1255 return 0; 1256 } 1257 1258 return -EAGAIN; 1259 } 1260 1261 /** 1262 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1263 * @work: Pointer to the kthread_work structure for this task 1264 * 1265 * Check whether hardware has completed measuring the Tx and Rx offset values 1266 * used to configure and enable vernier timestamp calibration. 1267 * 1268 * Once the offset in either direction is measured, configure the associated 1269 * registers with the calibrated offset values and enable timestamping. The Tx 1270 * and Rx directions are configured independently as soon as their associated 1271 * offsets are known. 1272 * 1273 * This function reschedules itself until both Tx and Rx calibration have 1274 * completed. 1275 */ 1276 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1277 { 1278 struct ice_ptp_port *port; 1279 struct ice_pf *pf; 1280 struct ice_hw *hw; 1281 int tx_err; 1282 int rx_err; 1283 1284 port = container_of(work, struct ice_ptp_port, ov_work.work); 1285 pf = ptp_port_to_pf(port); 1286 hw = &pf->hw; 1287 1288 if (ice_is_reset_in_progress(pf->state)) { 1289 /* wait for device driver to complete reset */ 1290 kthread_queue_delayed_work(pf->ptp.kworker, 1291 &port->ov_work, 1292 msecs_to_jiffies(100)); 1293 return; 1294 } 1295 1296 tx_err = ice_ptp_check_tx_fifo(port); 1297 if (!tx_err) 1298 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); 1299 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); 1300 if (tx_err || rx_err) { 1301 /* Tx and/or Rx offset not yet configured, try again later */ 1302 kthread_queue_delayed_work(pf->ptp.kworker, 1303 &port->ov_work, 1304 msecs_to_jiffies(100)); 1305 return; 1306 } 1307 } 1308 1309 /** 1310 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1311 * @ptp_port: PTP port to stop 1312 */ 1313 static int 1314 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1315 { 1316 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1317 u8 port = ptp_port->port_num; 1318 struct ice_hw *hw = &pf->hw; 1319 int err; 1320 1321 if (ice_is_e810(hw)) 1322 return 0; 1323 1324 mutex_lock(&ptp_port->ps_lock); 1325 1326 switch (ice_get_phy_model(hw)) { 1327 case ICE_PHY_ETH56G: 1328 err = ice_stop_phy_timer_eth56g(hw, port, true); 1329 break; 1330 case ICE_PHY_E82X: 1331 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1332 1333 err = ice_stop_phy_timer_e82x(hw, port, true); 1334 break; 1335 default: 1336 err = -ENODEV; 1337 } 1338 if (err && err != -EBUSY) 1339 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1340 port, err); 1341 1342 mutex_unlock(&ptp_port->ps_lock); 1343 1344 return err; 1345 } 1346 1347 /** 1348 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1349 * @ptp_port: PTP port for which the PHY start is set 1350 * 1351 * Start the PHY timestamping block, and initiate Vernier timestamping 1352 * calibration. If timestamping cannot be calibrated (such as if link is down) 1353 * then disable the timestamping block instead. 1354 */ 1355 static int 1356 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1357 { 1358 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1359 u8 port = ptp_port->port_num; 1360 struct ice_hw *hw = &pf->hw; 1361 unsigned long flags; 1362 int err; 1363 1364 if (ice_is_e810(hw)) 1365 return 0; 1366 1367 if (!ptp_port->link_up) 1368 return ice_ptp_port_phy_stop(ptp_port); 1369 1370 mutex_lock(&ptp_port->ps_lock); 1371 1372 switch (ice_get_phy_model(hw)) { 1373 case ICE_PHY_ETH56G: 1374 err = ice_start_phy_timer_eth56g(hw, port); 1375 break; 1376 case ICE_PHY_E82X: 1377 /* Start the PHY timer in Vernier mode */ 1378 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1379 1380 /* temporarily disable Tx timestamps while calibrating 1381 * PHY offset 1382 */ 1383 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1384 ptp_port->tx.calibrating = true; 1385 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1386 ptp_port->tx_fifo_busy_cnt = 0; 1387 1388 /* Start the PHY timer in Vernier mode */ 1389 err = ice_start_phy_timer_e82x(hw, port); 1390 if (err) 1391 break; 1392 1393 /* Enable Tx timestamps right away */ 1394 spin_lock_irqsave(&ptp_port->tx.lock, flags); 1395 ptp_port->tx.calibrating = false; 1396 spin_unlock_irqrestore(&ptp_port->tx.lock, flags); 1397 1398 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 1399 0); 1400 break; 1401 default: 1402 err = -ENODEV; 1403 } 1404 1405 if (err) 1406 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1407 port, err); 1408 1409 mutex_unlock(&ptp_port->ps_lock); 1410 1411 return err; 1412 } 1413 1414 /** 1415 * ice_ptp_link_change - Reconfigure PTP after link status change 1416 * @pf: Board private structure 1417 * @port: Port for which the PHY start is set 1418 * @linkup: Link is up or down 1419 */ 1420 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) 1421 { 1422 struct ice_ptp_port *ptp_port; 1423 struct ice_hw *hw = &pf->hw; 1424 1425 if (pf->ptp.state != ICE_PTP_READY) 1426 return; 1427 1428 if (WARN_ON_ONCE(port >= hw->ptp.num_lports)) 1429 return; 1430 1431 ptp_port = &pf->ptp.port; 1432 if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) 1433 port *= 2; 1434 if (WARN_ON_ONCE(ptp_port->port_num != port)) 1435 return; 1436 1437 /* Update cached link status for this port immediately */ 1438 ptp_port->link_up = linkup; 1439 1440 /* Skip HW writes if reset is in progress */ 1441 if (pf->hw.reset_ongoing) 1442 return; 1443 switch (ice_get_phy_model(hw)) { 1444 case ICE_PHY_E810: 1445 /* Do not reconfigure E810 PHY */ 1446 return; 1447 case ICE_PHY_ETH56G: 1448 case ICE_PHY_E82X: 1449 ice_ptp_port_phy_restart(ptp_port); 1450 return; 1451 default: 1452 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); 1453 } 1454 } 1455 1456 /** 1457 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings 1458 * @pf: PF private structure 1459 * @ena: bool value to enable or disable interrupt 1460 * @threshold: Minimum number of packets at which intr is triggered 1461 * 1462 * Utility function to configure all the PHY interrupt settings, including 1463 * whether the PHY interrupt is enabled, and what threshold to use. Also 1464 * configures The E82X timestamp owner to react to interrupts from all PHYs. 1465 * 1466 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes 1467 * when failed to configure PHY interrupt for E82X 1468 */ 1469 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) 1470 { 1471 struct device *dev = ice_pf_to_dev(pf); 1472 struct ice_hw *hw = &pf->hw; 1473 1474 ice_ptp_reset_ts_memory(hw); 1475 1476 switch (ice_get_phy_model(hw)) { 1477 case ICE_PHY_ETH56G: { 1478 int port; 1479 1480 for (port = 0; port < hw->ptp.num_lports; port++) { 1481 int err; 1482 1483 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); 1484 if (err) { 1485 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", 1486 port, err); 1487 return err; 1488 } 1489 } 1490 1491 return 0; 1492 } 1493 case ICE_PHY_E82X: { 1494 int quad; 1495 1496 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); 1497 quad++) { 1498 int err; 1499 1500 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); 1501 if (err) { 1502 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", 1503 quad, err); 1504 return err; 1505 } 1506 } 1507 1508 return 0; 1509 } 1510 case ICE_PHY_E810: 1511 return 0; 1512 case ICE_PHY_UNSUP: 1513 default: 1514 dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__, 1515 ice_get_phy_model(hw)); 1516 return -EOPNOTSUPP; 1517 } 1518 } 1519 1520 /** 1521 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1522 * @pf: Board private structure 1523 */ 1524 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1525 { 1526 ice_ptp_port_phy_restart(&pf->ptp.port); 1527 } 1528 1529 /** 1530 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping 1531 * @pf: Board private structure 1532 */ 1533 static void ice_ptp_restart_all_phy(struct ice_pf *pf) 1534 { 1535 struct list_head *entry; 1536 1537 list_for_each(entry, &pf->adapter->ports.ports) { 1538 struct ice_ptp_port *port = list_entry(entry, 1539 struct ice_ptp_port, 1540 list_node); 1541 1542 if (port->link_up) 1543 ice_ptp_port_phy_restart(port); 1544 } 1545 } 1546 1547 /** 1548 * ice_ptp_adjfine - Adjust clock increment rate 1549 * @info: the driver's PTP info structure 1550 * @scaled_ppm: Parts per million with 16-bit fractional field 1551 * 1552 * Adjust the frequency of the clock by the indicated scaled ppm from the 1553 * base frequency. 1554 */ 1555 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1556 { 1557 struct ice_pf *pf = ptp_info_to_pf(info); 1558 struct ice_hw *hw = &pf->hw; 1559 u64 incval; 1560 int err; 1561 1562 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1563 err = ice_ptp_write_incval_locked(hw, incval); 1564 if (err) { 1565 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1566 err); 1567 return -EIO; 1568 } 1569 1570 return 0; 1571 } 1572 1573 /** 1574 * ice_ptp_extts_event - Process PTP external clock event 1575 * @pf: Board private structure 1576 */ 1577 void ice_ptp_extts_event(struct ice_pf *pf) 1578 { 1579 struct ptp_clock_event event; 1580 struct ice_hw *hw = &pf->hw; 1581 u8 chan, tmr_idx; 1582 u32 hi, lo; 1583 1584 /* Don't process timestamp events if PTP is not ready */ 1585 if (pf->ptp.state != ICE_PTP_READY) 1586 return; 1587 1588 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1589 /* Event time is captured by one of the two matched registers 1590 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1591 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1592 * Event is defined in GLTSYN_EVNT_0 register 1593 */ 1594 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1595 int pin_desc_idx; 1596 1597 /* Check if channel is enabled */ 1598 if (!(pf->ptp.ext_ts_irq & (1 << chan))) 1599 continue; 1600 1601 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1602 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1603 event.timestamp = (u64)hi << 32 | lo; 1604 1605 /* Add delay compensation */ 1606 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); 1607 if (pin_desc_idx >= 0) { 1608 const struct ice_ptp_pin_desc *desc; 1609 1610 desc = &pf->ptp.ice_pin_desc[pin_desc_idx]; 1611 event.timestamp -= desc->delay[0]; 1612 } 1613 1614 event.type = PTP_CLOCK_EXTTS; 1615 event.index = chan; 1616 pf->ptp.ext_ts_irq &= ~(1 << chan); 1617 ptp_clock_event(pf->ptp.clock, &event); 1618 } 1619 } 1620 1621 /** 1622 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1623 * @pf: Board private structure 1624 * @rq: External timestamp request 1625 * @on: Enable/disable flag 1626 * 1627 * Configure an external timestamp event on the requested channel. 1628 * 1629 * Return: 0 on success, negative error code otherwise 1630 */ 1631 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq, 1632 int on) 1633 { 1634 u32 aux_reg, gpio_reg, irq_reg; 1635 struct ice_hw *hw = &pf->hw; 1636 unsigned int chan, gpio_pin; 1637 int pin_desc_idx; 1638 u8 tmr_idx; 1639 1640 /* Reject requests with unsupported flags */ 1641 1642 if (rq->flags & ~(PTP_ENABLE_FEATURE | 1643 PTP_RISING_EDGE | 1644 PTP_FALLING_EDGE | 1645 PTP_STRICT_FLAGS)) 1646 return -EOPNOTSUPP; 1647 1648 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1649 chan = rq->index; 1650 1651 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); 1652 if (pin_desc_idx < 0) 1653 return -EIO; 1654 1655 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0]; 1656 irq_reg = rd32(hw, PFINT_OICR_ENA); 1657 1658 if (on) { 1659 /* Enable the interrupt */ 1660 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1661 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1662 1663 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1664 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1665 1666 /* set event level to requested edge */ 1667 if (rq->flags & PTP_FALLING_EDGE) 1668 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1669 if (rq->flags & PTP_RISING_EDGE) 1670 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1671 1672 /* Write GPIO CTL reg. 1673 * 0x1 is input sampled by EVENT register(channel) 1674 * + num_in_channels * tmr_idx 1675 */ 1676 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1677 1 + chan + (tmr_idx * 3)); 1678 } else { 1679 bool last_enabled = true; 1680 1681 /* clear the values we set to reset defaults */ 1682 aux_reg = 0; 1683 gpio_reg = 0; 1684 1685 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++) 1686 if ((pf->ptp.extts_rqs[i].flags & 1687 PTP_ENABLE_FEATURE) && 1688 i != chan) { 1689 last_enabled = false; 1690 } 1691 1692 if (last_enabled) 1693 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1694 } 1695 1696 wr32(hw, PFINT_OICR_ENA, irq_reg); 1697 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1698 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1699 1700 return 0; 1701 } 1702 1703 /** 1704 * ice_ptp_disable_all_extts - Disable all EXTTS channels 1705 * @pf: Board private structure 1706 */ 1707 static void ice_ptp_disable_all_extts(struct ice_pf *pf) 1708 { 1709 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1710 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1711 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1712 false); 1713 1714 synchronize_irq(pf->oicr_irq.virq); 1715 } 1716 1717 /** 1718 * ice_ptp_enable_all_extts - Enable all EXTTS channels 1719 * @pf: Board private structure 1720 * 1721 * Called during reset to restore user configuration. 1722 */ 1723 static void ice_ptp_enable_all_extts(struct ice_pf *pf) 1724 { 1725 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) 1726 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) 1727 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], 1728 true); 1729 } 1730 1731 /** 1732 * ice_ptp_write_perout - Write periodic wave parameters to HW 1733 * @hw: pointer to the HW struct 1734 * @chan: target channel 1735 * @gpio_pin: target GPIO pin 1736 * @start: target time to start periodic output 1737 * @period: target period 1738 * 1739 * Return: 0 on success, negative error code otherwise 1740 */ 1741 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, 1742 unsigned int gpio_pin, u64 start, u64 period) 1743 { 1744 1745 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1746 u32 val = 0; 1747 1748 /* 0. Reset mode & out_en in AUX_OUT */ 1749 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1750 1751 if (ice_is_e825c(hw)) { 1752 int err; 1753 1754 /* Enable/disable CGU 1PPS output for E825C */ 1755 err = ice_cgu_cfg_pps_out(hw, !!period); 1756 if (err) 1757 return err; 1758 } 1759 1760 /* 1. Write perout with half of required period value. 1761 * HW toggles output when source clock hits the TGT and then adds 1762 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle. 1763 */ 1764 period >>= 1; 1765 1766 /* For proper operation, GLTSYN_CLKO must be larger than clock tick and 1767 * period has to fit in 32 bit register. 1768 */ 1769 #define MIN_PULSE 3 1770 if (!!period && (period <= MIN_PULSE || period > U32_MAX)) { 1771 dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32", 1772 MIN_PULSE); 1773 return -EIO; 1774 } 1775 1776 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1777 1778 /* 2. Write TARGET time */ 1779 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start)); 1780 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start)); 1781 1782 /* 3. Write AUX_OUT register */ 1783 if (!!period) 1784 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1785 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1786 1787 /* 4. write GPIO CTL reg */ 1788 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1789 if (!!period) 1790 val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, 1791 8 + chan + (tmr_idx * 4)); 1792 1793 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1794 1795 return 0; 1796 } 1797 1798 /** 1799 * ice_ptp_cfg_perout - Configure clock to generate periodic wave 1800 * @pf: Board private structure 1801 * @rq: Periodic output request 1802 * @on: Enable/disable flag 1803 * 1804 * Configure the internal clock generator modules to generate the clock wave of 1805 * specified period. 1806 * 1807 * Return: 0 on success, negative error code otherwise 1808 */ 1809 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, 1810 int on) 1811 { 1812 unsigned int gpio_pin, prop_delay_ns; 1813 u64 clk, period, start, phase; 1814 struct ice_hw *hw = &pf->hw; 1815 int pin_desc_idx; 1816 1817 if (rq->flags & ~PTP_PEROUT_PHASE) 1818 return -EOPNOTSUPP; 1819 1820 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index); 1821 if (pin_desc_idx < 0) 1822 return -EIO; 1823 1824 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; 1825 prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1]; 1826 period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; 1827 1828 /* If we're disabling the output or period is 0, clear out CLKO and TGT 1829 * and keep output level low. 1830 */ 1831 if (!on || !period) 1832 return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0); 1833 1834 if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 && 1835 period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) { 1836 dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n"); 1837 return -EOPNOTSUPP; 1838 } 1839 1840 if (period & 0x1) { 1841 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1842 return -EIO; 1843 } 1844 1845 start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec; 1846 1847 /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */ 1848 if (rq->flags & PTP_PEROUT_PHASE) 1849 phase = start; 1850 else 1851 div64_u64_rem(start, period, &phase); 1852 1853 /* If we have only phase or start time is in the past, start the timer 1854 * at the next multiple of period, maintaining phase. 1855 */ 1856 clk = ice_ptp_read_src_clk_reg(pf, NULL); 1857 if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) 1858 start = div64_u64(clk + period - 1, period) * period + phase; 1859 1860 /* Compensate for propagation delay from the generator to the pin. */ 1861 start -= prop_delay_ns; 1862 1863 return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); 1864 } 1865 1866 /** 1867 * ice_ptp_disable_all_perout - Disable all currently configured outputs 1868 * @pf: Board private structure 1869 * 1870 * Disable all currently configured clock outputs. This is necessary before 1871 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to 1872 * re-enable the clocks again. 1873 */ 1874 static void ice_ptp_disable_all_perout(struct ice_pf *pf) 1875 { 1876 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1877 if (pf->ptp.perout_rqs[i].period.sec || 1878 pf->ptp.perout_rqs[i].period.nsec) 1879 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1880 false); 1881 } 1882 1883 /** 1884 * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs 1885 * @pf: Board private structure 1886 * 1887 * Enable all currently configured clock outputs. Use this after 1888 * ice_ptp_disable_all_perout to reconfigure the output signals according to 1889 * their configuration. 1890 */ 1891 static void ice_ptp_enable_all_perout(struct ice_pf *pf) 1892 { 1893 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) 1894 if (pf->ptp.perout_rqs[i].period.sec || 1895 pf->ptp.perout_rqs[i].period.nsec) 1896 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], 1897 true); 1898 } 1899 1900 /** 1901 * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO 1902 * @pf: Board private structure 1903 * @pin: Pin index 1904 * @func: Assigned function 1905 * 1906 * Return: 0 on success, negative error code otherwise 1907 */ 1908 static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin, 1909 enum ptp_pin_function func) 1910 { 1911 unsigned int gpio_pin; 1912 1913 switch (func) { 1914 case PTP_PF_PEROUT: 1915 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1]; 1916 break; 1917 case PTP_PF_EXTTS: 1918 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0]; 1919 break; 1920 default: 1921 return -EOPNOTSUPP; 1922 } 1923 1924 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 1925 struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i]; 1926 unsigned int chan = pin_desc->chan; 1927 1928 /* Skip pin idx from the request */ 1929 if (i == pin) 1930 continue; 1931 1932 if (pin_desc->func == PTP_PF_PEROUT && 1933 pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) { 1934 pf->ptp.perout_rqs[chan].period.sec = 0; 1935 pf->ptp.perout_rqs[chan].period.nsec = 0; 1936 pin_desc->func = PTP_PF_NONE; 1937 pin_desc->chan = 0; 1938 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n", 1939 i, gpio_pin); 1940 return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan], 1941 false); 1942 } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS && 1943 pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) { 1944 pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE; 1945 pin_desc->func = PTP_PF_NONE; 1946 pin_desc->chan = 0; 1947 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n", 1948 i, gpio_pin); 1949 return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan], 1950 false); 1951 } 1952 } 1953 1954 return 0; 1955 } 1956 1957 /** 1958 * ice_verify_pin - verify if pin supports requested pin function 1959 * @info: the driver's PTP info structure 1960 * @pin: Pin index 1961 * @func: Assigned function 1962 * @chan: Assigned channel 1963 * 1964 * Return: 0 on success, -EOPNOTSUPP when function is not supported. 1965 */ 1966 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin, 1967 enum ptp_pin_function func, unsigned int chan) 1968 { 1969 struct ice_pf *pf = ptp_info_to_pf(info); 1970 const struct ice_ptp_pin_desc *pin_desc; 1971 1972 pin_desc = &pf->ptp.ice_pin_desc[pin]; 1973 1974 /* Is assigned function allowed? */ 1975 switch (func) { 1976 case PTP_PF_EXTTS: 1977 if (pin_desc->gpio[0] < 0) 1978 return -EOPNOTSUPP; 1979 break; 1980 case PTP_PF_PEROUT: 1981 if (pin_desc->gpio[1] < 0) 1982 return -EOPNOTSUPP; 1983 break; 1984 case PTP_PF_NONE: 1985 break; 1986 case PTP_PF_PHYSYNC: 1987 default: 1988 return -EOPNOTSUPP; 1989 } 1990 1991 /* On adapters with SMA_CTRL disable other pins that share same GPIO */ 1992 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 1993 ice_ptp_disable_shared_pin(pf, pin, func); 1994 pf->ptp.pin_desc[pin].func = func; 1995 pf->ptp.pin_desc[pin].chan = chan; 1996 return ice_ptp_set_sma_cfg(pf); 1997 } 1998 1999 return 0; 2000 } 2001 2002 /** 2003 * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC 2004 * @info: The driver's PTP info structure 2005 * @rq: The requested feature to change 2006 * @on: Enable/disable flag 2007 * 2008 * Return: 0 on success, negative error code otherwise 2009 */ 2010 static int ice_ptp_gpio_enable(struct ptp_clock_info *info, 2011 struct ptp_clock_request *rq, int on) 2012 { 2013 struct ice_pf *pf = ptp_info_to_pf(info); 2014 int err; 2015 2016 switch (rq->type) { 2017 case PTP_CLK_REQ_PEROUT: 2018 { 2019 struct ptp_perout_request *cached = 2020 &pf->ptp.perout_rqs[rq->perout.index]; 2021 2022 err = ice_ptp_cfg_perout(pf, &rq->perout, on); 2023 if (!err) { 2024 *cached = rq->perout; 2025 } else { 2026 cached->period.sec = 0; 2027 cached->period.nsec = 0; 2028 } 2029 return err; 2030 } 2031 case PTP_CLK_REQ_EXTTS: 2032 { 2033 struct ptp_extts_request *cached = 2034 &pf->ptp.extts_rqs[rq->extts.index]; 2035 2036 err = ice_ptp_cfg_extts(pf, &rq->extts, on); 2037 if (!err) 2038 *cached = rq->extts; 2039 else 2040 cached->flags &= ~PTP_ENABLE_FEATURE; 2041 return err; 2042 } 2043 default: 2044 return -EOPNOTSUPP; 2045 } 2046 } 2047 2048 /** 2049 * ice_ptp_gettimex64 - Get the time of the clock 2050 * @info: the driver's PTP info structure 2051 * @ts: timespec64 structure to hold the current time value 2052 * @sts: Optional parameter for holding a pair of system timestamps from 2053 * the system clock. Will be ignored if NULL is given. 2054 * 2055 * Read the device clock and return the correct value on ns, after converting it 2056 * into a timespec struct. 2057 */ 2058 static int 2059 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 2060 struct ptp_system_timestamp *sts) 2061 { 2062 struct ice_pf *pf = ptp_info_to_pf(info); 2063 u64 time_ns; 2064 2065 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 2066 *ts = ns_to_timespec64(time_ns); 2067 return 0; 2068 } 2069 2070 /** 2071 * ice_ptp_settime64 - Set the time of the clock 2072 * @info: the driver's PTP info structure 2073 * @ts: timespec64 structure that holds the new time value 2074 * 2075 * Set the device clock to the user input value. The conversion from timespec 2076 * to ns happens in the write function. 2077 */ 2078 static int 2079 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 2080 { 2081 struct ice_pf *pf = ptp_info_to_pf(info); 2082 struct timespec64 ts64 = *ts; 2083 struct ice_hw *hw = &pf->hw; 2084 int err; 2085 2086 /* For Vernier mode on E82X, we need to recalibrate after new settime. 2087 * Start with marking timestamps as invalid. 2088 */ 2089 if (ice_get_phy_model(hw) == ICE_PHY_E82X) { 2090 err = ice_ptp_clear_phy_offset_ready_e82x(hw); 2091 if (err) 2092 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); 2093 } 2094 2095 if (!ice_ptp_lock(hw)) { 2096 err = -EBUSY; 2097 goto exit; 2098 } 2099 2100 /* Disable periodic outputs */ 2101 ice_ptp_disable_all_perout(pf); 2102 2103 err = ice_ptp_write_init(pf, &ts64); 2104 ice_ptp_unlock(hw); 2105 2106 if (!err) 2107 ice_ptp_reset_cached_phctime(pf); 2108 2109 /* Reenable periodic outputs */ 2110 ice_ptp_enable_all_perout(pf); 2111 2112 /* Recalibrate and re-enable timestamp blocks for E822/E823 */ 2113 if (ice_get_phy_model(hw) == ICE_PHY_E82X) 2114 ice_ptp_restart_all_phy(pf); 2115 exit: 2116 if (err) { 2117 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 2118 return err; 2119 } 2120 2121 return 0; 2122 } 2123 2124 /** 2125 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 2126 * @info: the driver's PTP info structure 2127 * @delta: Offset in nanoseconds to adjust the time by 2128 */ 2129 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 2130 { 2131 struct timespec64 now, then; 2132 int ret; 2133 2134 then = ns_to_timespec64(delta); 2135 ret = ice_ptp_gettimex64(info, &now, NULL); 2136 if (ret) 2137 return ret; 2138 now = timespec64_add(now, then); 2139 2140 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 2141 } 2142 2143 /** 2144 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 2145 * @info: the driver's PTP info structure 2146 * @delta: Offset in nanoseconds to adjust the time by 2147 */ 2148 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 2149 { 2150 struct ice_pf *pf = ptp_info_to_pf(info); 2151 struct ice_hw *hw = &pf->hw; 2152 struct device *dev; 2153 int err; 2154 2155 dev = ice_pf_to_dev(pf); 2156 2157 /* Hardware only supports atomic adjustments using signed 32-bit 2158 * integers. For any adjustment outside this range, perform 2159 * a non-atomic get->adjust->set flow. 2160 */ 2161 if (delta > S32_MAX || delta < S32_MIN) { 2162 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 2163 return ice_ptp_adjtime_nonatomic(info, delta); 2164 } 2165 2166 if (!ice_ptp_lock(hw)) { 2167 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 2168 return -EBUSY; 2169 } 2170 2171 /* Disable periodic outputs */ 2172 ice_ptp_disable_all_perout(pf); 2173 2174 err = ice_ptp_write_adj(pf, delta); 2175 2176 /* Reenable periodic outputs */ 2177 ice_ptp_enable_all_perout(pf); 2178 2179 ice_ptp_unlock(hw); 2180 2181 if (err) { 2182 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 2183 return err; 2184 } 2185 2186 ice_ptp_reset_cached_phctime(pf); 2187 2188 return 0; 2189 } 2190 2191 #ifdef CONFIG_ICE_HWTS 2192 /** 2193 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 2194 * @device: Current device time 2195 * @system: System counter value read synchronously with device time 2196 * @ctx: Context provided by timekeeping code 2197 * 2198 * Read device and system (ART) clock simultaneously and return the corrected 2199 * clock values in ns. 2200 */ 2201 static int 2202 ice_ptp_get_syncdevicetime(ktime_t *device, 2203 struct system_counterval_t *system, 2204 void *ctx) 2205 { 2206 struct ice_pf *pf = (struct ice_pf *)ctx; 2207 struct ice_hw *hw = &pf->hw; 2208 u32 hh_lock, hh_art_ctl; 2209 int i; 2210 2211 #define MAX_HH_HW_LOCK_TRIES 5 2212 #define MAX_HH_CTL_LOCK_TRIES 100 2213 2214 for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { 2215 /* Get the HW lock */ 2216 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2217 if (hh_lock & PFHH_SEM_BUSY_M) { 2218 usleep_range(10000, 15000); 2219 continue; 2220 } 2221 break; 2222 } 2223 if (hh_lock & PFHH_SEM_BUSY_M) { 2224 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 2225 return -EBUSY; 2226 } 2227 2228 /* Program cmd to master timer */ 2229 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); 2230 2231 /* Start the ART and device clock sync sequence */ 2232 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 2233 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 2234 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 2235 2236 for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { 2237 /* Wait for sync to complete */ 2238 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 2239 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 2240 udelay(1); 2241 continue; 2242 } else { 2243 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 2244 u64 hh_ts; 2245 2246 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 2247 /* Read ART time */ 2248 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 2249 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 2250 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2251 system->cycles = hh_ts; 2252 system->cs_id = CSID_X86_ART; 2253 /* Read Device source clock time */ 2254 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 2255 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 2256 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2257 *device = ns_to_ktime(hh_ts); 2258 break; 2259 } 2260 } 2261 2262 /* Clear the master timer */ 2263 ice_ptp_src_cmd(hw, ICE_PTP_NOP); 2264 2265 /* Release HW lock */ 2266 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2267 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 2268 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 2269 2270 if (i == MAX_HH_CTL_LOCK_TRIES) 2271 return -ETIMEDOUT; 2272 2273 return 0; 2274 } 2275 2276 /** 2277 * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp 2278 * @info: the driver's PTP info structure 2279 * @cts: The memory to fill the cross timestamp info 2280 * 2281 * Capture a cross timestamp between the ART and the device PTP hardware 2282 * clock. Fill the cross timestamp information and report it back to the 2283 * caller. 2284 * 2285 * This is only valid for E822 and E823 devices which have support for 2286 * generating the cross timestamp via PCIe PTM. 2287 * 2288 * In order to correctly correlate the ART timestamp back to the TSC time, the 2289 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2290 */ 2291 static int 2292 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, 2293 struct system_device_crosststamp *cts) 2294 { 2295 struct ice_pf *pf = ptp_info_to_pf(info); 2296 2297 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2298 pf, NULL, cts); 2299 } 2300 #endif /* CONFIG_ICE_HWTS */ 2301 2302 /** 2303 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2304 * @pf: Board private structure 2305 * @ifr: ioctl data 2306 * 2307 * Copy the timestamping config to user buffer 2308 */ 2309 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2310 { 2311 struct hwtstamp_config *config; 2312 2313 if (pf->ptp.state != ICE_PTP_READY) 2314 return -EIO; 2315 2316 config = &pf->ptp.tstamp_config; 2317 2318 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2319 -EFAULT : 0; 2320 } 2321 2322 /** 2323 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2324 * @pf: Board private structure 2325 * @config: hwtstamp settings requested or saved 2326 */ 2327 static int 2328 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2329 { 2330 switch (config->tx_type) { 2331 case HWTSTAMP_TX_OFF: 2332 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 2333 break; 2334 case HWTSTAMP_TX_ON: 2335 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 2336 break; 2337 default: 2338 return -ERANGE; 2339 } 2340 2341 switch (config->rx_filter) { 2342 case HWTSTAMP_FILTER_NONE: 2343 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2344 break; 2345 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2346 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2347 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2348 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2349 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2350 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2351 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2352 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2353 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2354 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2355 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2356 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2357 case HWTSTAMP_FILTER_NTP_ALL: 2358 case HWTSTAMP_FILTER_ALL: 2359 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 2360 break; 2361 default: 2362 return -ERANGE; 2363 } 2364 2365 /* Immediately update the device timestamping mode */ 2366 ice_ptp_restore_timestamp_mode(pf); 2367 2368 return 0; 2369 } 2370 2371 /** 2372 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2373 * @pf: Board private structure 2374 * @ifr: ioctl data 2375 * 2376 * Get the user config and store it 2377 */ 2378 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2379 { 2380 struct hwtstamp_config config; 2381 int err; 2382 2383 if (pf->ptp.state != ICE_PTP_READY) 2384 return -EAGAIN; 2385 2386 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2387 return -EFAULT; 2388 2389 err = ice_ptp_set_timestamp_mode(pf, &config); 2390 if (err) 2391 return err; 2392 2393 /* Return the actual configuration set */ 2394 config = pf->ptp.tstamp_config; 2395 2396 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2397 -EFAULT : 0; 2398 } 2399 2400 /** 2401 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns 2402 * @rx_desc: Receive descriptor 2403 * @pkt_ctx: Packet context to get the cached time 2404 * 2405 * The driver receives a notification in the receive descriptor with timestamp. 2406 */ 2407 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, 2408 const struct ice_pkt_ctx *pkt_ctx) 2409 { 2410 u64 ts_ns, cached_time; 2411 u32 ts_high; 2412 2413 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2414 return 0; 2415 2416 cached_time = READ_ONCE(pkt_ctx->cached_phctime); 2417 2418 /* Do not report a timestamp if we don't have a cached PHC time */ 2419 if (!cached_time) 2420 return 0; 2421 2422 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2423 * PHC value, rather than accessing the PF. This also allows us to 2424 * simply pass the upper 32bits of nanoseconds directly. Calling 2425 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2426 * bits itself. 2427 */ 2428 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2429 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2430 2431 return ts_ns; 2432 } 2433 2434 /** 2435 * ice_ptp_setup_pin_cfg - setup PTP pin_config structure 2436 * @pf: Board private structure 2437 */ 2438 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf) 2439 { 2440 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { 2441 const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i]; 2442 struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i]; 2443 const char *name = NULL; 2444 2445 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2446 name = ice_pin_names[desc->name_idx]; 2447 else if (desc->name_idx != GPIO_NA) 2448 name = ice_pin_names_nvm[desc->name_idx]; 2449 if (name) 2450 strscpy(pin->name, name, sizeof(pin->name)); 2451 2452 pin->index = i; 2453 } 2454 2455 pf->ptp.info.pin_config = pf->ptp.pin_desc; 2456 } 2457 2458 /** 2459 * ice_ptp_disable_pins - Disable PTP pins 2460 * @pf: pointer to the PF structure 2461 * 2462 * Disable the OS access to the SMA pins. Called to clear out the OS 2463 * indications of pin support when we fail to setup the SMA control register. 2464 */ 2465 static void ice_ptp_disable_pins(struct ice_pf *pf) 2466 { 2467 struct ptp_clock_info *info = &pf->ptp.info; 2468 2469 dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n"); 2470 2471 info->enable = NULL; 2472 info->verify = NULL; 2473 info->n_pins = 0; 2474 info->n_ext_ts = 0; 2475 info->n_per_out = 0; 2476 } 2477 2478 /** 2479 * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM 2480 * @pf: pointer to the PF structure 2481 * @entries: SDP connection section from NVM 2482 * @num_entries: number of valid entries in sdp_entries 2483 * @pins: PTP pins array to update 2484 * 2485 * Return: 0 on success, negative error code otherwise. 2486 */ 2487 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, 2488 unsigned int num_entries, 2489 struct ice_ptp_pin_desc *pins) 2490 { 2491 unsigned int n_pins = 0; 2492 unsigned int i; 2493 2494 /* Setup ice_pin_desc array */ 2495 for (i = 0; i < ICE_N_PINS_MAX; i++) { 2496 pins[i].name_idx = -1; 2497 pins[i].gpio[0] = -1; 2498 pins[i].gpio[1] = -1; 2499 } 2500 2501 for (i = 0; i < num_entries; i++) { 2502 u16 entry = le16_to_cpu(entries[i]); 2503 DECLARE_BITMAP(bitmap, GPIO_NA); 2504 unsigned int bitmap_idx; 2505 bool dir; 2506 u16 gpio; 2507 2508 *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry); 2509 dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry); 2510 gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry); 2511 for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) { 2512 unsigned int idx; 2513 2514 /* Check if entry's pin bit is valid */ 2515 if (bitmap_idx >= NUM_PTP_PINS_NVM && 2516 bitmap_idx != GPIO_NA) 2517 continue; 2518 2519 /* Check if pin already exists */ 2520 for (idx = 0; idx < ICE_N_PINS_MAX; idx++) 2521 if (pins[idx].name_idx == bitmap_idx) 2522 break; 2523 2524 if (idx == ICE_N_PINS_MAX) { 2525 /* Pin not found, setup its entry and name */ 2526 idx = n_pins++; 2527 pins[idx].name_idx = bitmap_idx; 2528 if (bitmap_idx == GPIO_NA) 2529 strscpy(pf->ptp.pin_desc[idx].name, 2530 ice_pin_names[gpio], 2531 sizeof(pf->ptp.pin_desc[idx] 2532 .name)); 2533 } 2534 2535 /* Setup in/out GPIO number */ 2536 pins[idx].gpio[dir] = gpio; 2537 } 2538 } 2539 2540 for (i = 0; i < n_pins; i++) { 2541 dev_dbg(ice_pf_to_dev(pf), 2542 "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n", 2543 i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]); 2544 } 2545 2546 pf->ptp.info.n_pins = n_pins; 2547 return 0; 2548 } 2549 2550 /** 2551 * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support 2552 * @pf: Board private structure 2553 * 2554 * Assign functions to the PTP capabilities structure for E82X devices. 2555 * Functions which operate across all device families should be set directly 2556 * in ice_ptp_set_caps. Only add functions here which are distinct for E82X 2557 * devices. 2558 */ 2559 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) 2560 { 2561 #ifdef CONFIG_ICE_HWTS 2562 if (boot_cpu_has(X86_FEATURE_ART) && 2563 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2564 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x; 2565 2566 #endif /* CONFIG_ICE_HWTS */ 2567 if (ice_is_e825c(&pf->hw)) { 2568 pf->ptp.ice_pin_desc = ice_pin_desc_e825c; 2569 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); 2570 } else { 2571 pf->ptp.ice_pin_desc = ice_pin_desc_e82x; 2572 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x); 2573 } 2574 ice_ptp_setup_pin_cfg(pf); 2575 } 2576 2577 /** 2578 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2579 * @pf: Board private structure 2580 * 2581 * Assign functions to the PTP capabiltiies structure for E810 devices. 2582 * Functions which operate across all device families should be set directly 2583 * in ice_ptp_set_caps. Only add functions here which are distinct for E810 2584 * devices. 2585 */ 2586 static void ice_ptp_set_funcs_e810(struct ice_pf *pf) 2587 { 2588 __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE]; 2589 struct ice_ptp_pin_desc *desc = NULL; 2590 struct ice_ptp *ptp = &pf->ptp; 2591 unsigned int num_entries; 2592 int err; 2593 2594 err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries); 2595 if (err) { 2596 /* SDP section does not exist in NVM or is corrupted */ 2597 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2598 ptp->ice_pin_desc = ice_pin_desc_e810_sma; 2599 ptp->info.n_pins = 2600 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma); 2601 } else { 2602 pf->ptp.ice_pin_desc = ice_pin_desc_e810; 2603 pf->ptp.info.n_pins = 2604 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); 2605 err = 0; 2606 } 2607 } else { 2608 desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX, 2609 sizeof(struct ice_ptp_pin_desc), 2610 GFP_KERNEL); 2611 if (!desc) 2612 goto err; 2613 2614 err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc); 2615 if (err) 2616 goto err; 2617 2618 ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc; 2619 } 2620 2621 ptp->info.pin_config = ptp->pin_desc; 2622 ice_ptp_setup_pin_cfg(pf); 2623 2624 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 2625 err = ice_ptp_set_sma_cfg(pf); 2626 err: 2627 if (err) { 2628 devm_kfree(ice_pf_to_dev(pf), desc); 2629 ice_ptp_disable_pins(pf); 2630 } 2631 } 2632 2633 /** 2634 * ice_ptp_set_caps - Set PTP capabilities 2635 * @pf: Board private structure 2636 */ 2637 static void ice_ptp_set_caps(struct ice_pf *pf) 2638 { 2639 struct ptp_clock_info *info = &pf->ptp.info; 2640 struct device *dev = ice_pf_to_dev(pf); 2641 2642 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2643 dev_driver_string(dev), dev_name(dev)); 2644 info->owner = THIS_MODULE; 2645 info->max_adj = 100000000; 2646 info->adjtime = ice_ptp_adjtime; 2647 info->adjfine = ice_ptp_adjfine; 2648 info->gettimex64 = ice_ptp_gettimex64; 2649 info->settime64 = ice_ptp_settime64; 2650 info->n_per_out = GLTSYN_TGT_H_IDX_MAX; 2651 info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX; 2652 info->enable = ice_ptp_gpio_enable; 2653 info->verify = ice_verify_pin; 2654 2655 if (ice_is_e810(&pf->hw)) 2656 ice_ptp_set_funcs_e810(pf); 2657 else 2658 ice_ptp_set_funcs_e82x(pf); 2659 } 2660 2661 /** 2662 * ice_ptp_create_clock - Create PTP clock device for userspace 2663 * @pf: Board private structure 2664 * 2665 * This function creates a new PTP clock device. It only creates one if we 2666 * don't already have one. Will return error if it can't create one, but success 2667 * if we already have a device. Should be used by ice_ptp_init to create clock 2668 * initially, and prevent global resets from creating new clock devices. 2669 */ 2670 static long ice_ptp_create_clock(struct ice_pf *pf) 2671 { 2672 struct ptp_clock_info *info; 2673 struct device *dev; 2674 2675 /* No need to create a clock device if we already have one */ 2676 if (pf->ptp.clock) 2677 return 0; 2678 2679 ice_ptp_set_caps(pf); 2680 2681 info = &pf->ptp.info; 2682 dev = ice_pf_to_dev(pf); 2683 2684 /* Attempt to register the clock before enabling the hardware. */ 2685 pf->ptp.clock = ptp_clock_register(info, dev); 2686 if (IS_ERR(pf->ptp.clock)) { 2687 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); 2688 return PTR_ERR(pf->ptp.clock); 2689 } 2690 2691 return 0; 2692 } 2693 2694 /** 2695 * ice_ptp_request_ts - Request an available Tx timestamp index 2696 * @tx: the PTP Tx timestamp tracker to request from 2697 * @skb: the SKB to associate with this timestamp request 2698 */ 2699 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2700 { 2701 unsigned long flags; 2702 u8 idx; 2703 2704 spin_lock_irqsave(&tx->lock, flags); 2705 2706 /* Check that this tracker is accepting new timestamp requests */ 2707 if (!ice_ptp_is_tx_tracker_up(tx)) { 2708 spin_unlock_irqrestore(&tx->lock, flags); 2709 return -1; 2710 } 2711 2712 /* Find and set the first available index */ 2713 idx = find_next_zero_bit(tx->in_use, tx->len, 2714 tx->last_ll_ts_idx_read + 1); 2715 if (idx == tx->len) 2716 idx = find_first_zero_bit(tx->in_use, tx->len); 2717 2718 if (idx < tx->len) { 2719 /* We got a valid index that no other thread could have set. Store 2720 * a reference to the skb and the start time to allow discarding old 2721 * requests. 2722 */ 2723 set_bit(idx, tx->in_use); 2724 clear_bit(idx, tx->stale); 2725 tx->tstamps[idx].start = jiffies; 2726 tx->tstamps[idx].skb = skb_get(skb); 2727 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2728 ice_trace(tx_tstamp_request, skb, idx); 2729 } 2730 2731 spin_unlock_irqrestore(&tx->lock, flags); 2732 2733 /* return the appropriate PHY timestamp register index, -1 if no 2734 * indexes were available. 2735 */ 2736 if (idx >= tx->len) 2737 return -1; 2738 else 2739 return idx + tx->offset; 2740 } 2741 2742 /** 2743 * ice_ptp_process_ts - Process the PTP Tx timestamps 2744 * @pf: Board private structure 2745 * 2746 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2747 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2748 */ 2749 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2750 { 2751 switch (pf->ptp.tx_interrupt_mode) { 2752 case ICE_PTP_TX_INTERRUPT_NONE: 2753 /* This device has the clock owner handle timestamps for it */ 2754 return ICE_TX_TSTAMP_WORK_DONE; 2755 case ICE_PTP_TX_INTERRUPT_SELF: 2756 /* This device handles its own timestamps */ 2757 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2758 case ICE_PTP_TX_INTERRUPT_ALL: 2759 /* This device handles timestamps for all ports */ 2760 return ice_ptp_tx_tstamp_owner(pf); 2761 default: 2762 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2763 pf->ptp.tx_interrupt_mode); 2764 return ICE_TX_TSTAMP_WORK_DONE; 2765 } 2766 } 2767 2768 /** 2769 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt 2770 * @pf: Board private structure 2771 * 2772 * The device PHY issues Tx timestamp interrupts to the driver for processing 2773 * timestamp data from the PHY. It will not interrupt again until all 2774 * current timestamp data is read. In rare circumstances, it is possible that 2775 * the driver fails to read all outstanding data. 2776 * 2777 * To avoid getting permanently stuck, periodically check if the PHY has 2778 * outstanding timestamp data. If so, trigger an interrupt from software to 2779 * process this data. 2780 */ 2781 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) 2782 { 2783 struct device *dev = ice_pf_to_dev(pf); 2784 struct ice_hw *hw = &pf->hw; 2785 bool trigger_oicr = false; 2786 unsigned int i; 2787 2788 if (ice_is_e810(hw)) 2789 return; 2790 2791 if (!ice_pf_src_tmr_owned(pf)) 2792 return; 2793 2794 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { 2795 u64 tstamp_ready; 2796 int err; 2797 2798 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 2799 if (!err && tstamp_ready) { 2800 trigger_oicr = true; 2801 break; 2802 } 2803 } 2804 2805 if (trigger_oicr) { 2806 /* Trigger a software interrupt, to ensure this data 2807 * gets processed. 2808 */ 2809 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); 2810 2811 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 2812 ice_flush(hw); 2813 } 2814 } 2815 2816 static void ice_ptp_periodic_work(struct kthread_work *work) 2817 { 2818 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2819 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2820 int err; 2821 2822 if (pf->ptp.state != ICE_PTP_READY) 2823 return; 2824 2825 err = ice_ptp_update_cached_phctime(pf); 2826 2827 ice_ptp_maybe_trigger_tx_interrupt(pf); 2828 2829 /* Run twice a second or reschedule if phc update failed */ 2830 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2831 msecs_to_jiffies(err ? 10 : 500)); 2832 } 2833 2834 /** 2835 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2836 * @pf: Board private structure 2837 * @reset_type: the reset type being performed 2838 */ 2839 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 2840 { 2841 struct ice_ptp *ptp = &pf->ptp; 2842 u8 src_tmr; 2843 2844 if (ptp->state != ICE_PTP_READY) 2845 return; 2846 2847 ptp->state = ICE_PTP_RESETTING; 2848 2849 /* Disable timestamping for both Tx and Rx */ 2850 ice_ptp_disable_timestamp_mode(pf); 2851 2852 kthread_cancel_delayed_work_sync(&ptp->work); 2853 2854 if (reset_type == ICE_RESET_PFR) 2855 return; 2856 2857 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2858 2859 /* Disable periodic outputs */ 2860 ice_ptp_disable_all_perout(pf); 2861 2862 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2863 2864 /* Disable source clock */ 2865 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2866 2867 /* Acquire PHC and system timer to restore after reset */ 2868 ptp->reset_time = ktime_get_real_ns(); 2869 } 2870 2871 /** 2872 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset 2873 * @pf: Board private structure 2874 * 2875 * Companion function for ice_ptp_rebuild() which handles tasks that only the 2876 * PTP clock owner instance should perform. 2877 */ 2878 static int ice_ptp_rebuild_owner(struct ice_pf *pf) 2879 { 2880 struct ice_ptp *ptp = &pf->ptp; 2881 struct ice_hw *hw = &pf->hw; 2882 struct timespec64 ts; 2883 u64 time_diff; 2884 int err; 2885 2886 err = ice_ptp_init_phc(hw); 2887 if (err) 2888 return err; 2889 2890 /* Acquire the global hardware lock */ 2891 if (!ice_ptp_lock(hw)) { 2892 err = -EBUSY; 2893 return err; 2894 } 2895 2896 /* Write the increment time value to PHY and LAN */ 2897 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2898 if (err) 2899 goto err_unlock; 2900 2901 /* Write the initial Time value to PHY and LAN using the cached PHC 2902 * time before the reset and time difference between stopping and 2903 * starting the clock. 2904 */ 2905 if (ptp->cached_phc_time) { 2906 time_diff = ktime_get_real_ns() - ptp->reset_time; 2907 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2908 } else { 2909 ts = ktime_to_timespec64(ktime_get_real()); 2910 } 2911 err = ice_ptp_write_init(pf, &ts); 2912 if (err) 2913 goto err_unlock; 2914 2915 /* Release the global hardware lock */ 2916 ice_ptp_unlock(hw); 2917 2918 /* Flush software tracking of any outstanding timestamps since we're 2919 * about to flush the PHY timestamp block. 2920 */ 2921 ice_ptp_flush_all_tx_tracker(pf); 2922 2923 if (!ice_is_e810(hw)) { 2924 /* Enable quad interrupts */ 2925 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 2926 if (err) 2927 return err; 2928 2929 ice_ptp_restart_all_phy(pf); 2930 } 2931 2932 /* Re-enable all periodic outputs and external timestamp events */ 2933 ice_ptp_enable_all_perout(pf); 2934 ice_ptp_enable_all_extts(pf); 2935 2936 return 0; 2937 2938 err_unlock: 2939 ice_ptp_unlock(hw); 2940 return err; 2941 } 2942 2943 /** 2944 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset 2945 * @pf: Board private structure 2946 * @reset_type: the reset type being performed 2947 */ 2948 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 2949 { 2950 struct ice_ptp *ptp = &pf->ptp; 2951 int err; 2952 2953 if (ptp->state == ICE_PTP_READY) { 2954 ice_ptp_prepare_for_reset(pf, reset_type); 2955 } else if (ptp->state != ICE_PTP_RESETTING) { 2956 err = -EINVAL; 2957 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n"); 2958 goto err; 2959 } 2960 2961 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) { 2962 err = ice_ptp_rebuild_owner(pf); 2963 if (err) 2964 goto err; 2965 } 2966 2967 ptp->state = ICE_PTP_READY; 2968 2969 /* Start periodic work going */ 2970 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2971 2972 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2973 return; 2974 2975 err: 2976 ptp->state = ICE_PTP_ERROR; 2977 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2978 } 2979 2980 static bool ice_is_primary(struct ice_hw *hw) 2981 { 2982 return ice_is_e825c(hw) && ice_is_dual(hw) ? 2983 !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true; 2984 } 2985 2986 static int ice_ptp_setup_adapter(struct ice_pf *pf) 2987 { 2988 if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw)) 2989 return -EPERM; 2990 2991 pf->adapter->ctrl_pf = pf; 2992 2993 return 0; 2994 } 2995 2996 static int ice_ptp_setup_pf(struct ice_pf *pf) 2997 { 2998 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 2999 struct ice_ptp *ptp = &pf->ptp; 3000 3001 if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP) 3002 return -ENODEV; 3003 3004 INIT_LIST_HEAD(&ptp->port.list_node); 3005 mutex_lock(&pf->adapter->ports.lock); 3006 3007 list_add(&ptp->port.list_node, 3008 &pf->adapter->ports.ports); 3009 mutex_unlock(&pf->adapter->ports.lock); 3010 3011 return 0; 3012 } 3013 3014 static void ice_ptp_cleanup_pf(struct ice_pf *pf) 3015 { 3016 struct ice_ptp *ptp = &pf->ptp; 3017 3018 if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) { 3019 mutex_lock(&pf->adapter->ports.lock); 3020 list_del(&ptp->port.list_node); 3021 mutex_unlock(&pf->adapter->ports.lock); 3022 } 3023 } 3024 3025 /** 3026 * ice_ptp_clock_index - Get the PTP clock index for this device 3027 * @pf: Board private structure 3028 * 3029 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock 3030 * is associated. 3031 */ 3032 int ice_ptp_clock_index(struct ice_pf *pf) 3033 { 3034 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 3035 struct ptp_clock *clock; 3036 3037 if (!ctrl_ptp) 3038 return -1; 3039 clock = ctrl_ptp->clock; 3040 3041 return clock ? ptp_clock_index(clock) : -1; 3042 } 3043 3044 /** 3045 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 3046 * @pf: Board private structure 3047 * 3048 * Setup and initialize a PTP clock device that represents the device hardware 3049 * clock. Save the clock index for other functions connected to the same 3050 * hardware resource. 3051 */ 3052 static int ice_ptp_init_owner(struct ice_pf *pf) 3053 { 3054 struct ice_hw *hw = &pf->hw; 3055 struct timespec64 ts; 3056 int err; 3057 3058 err = ice_ptp_init_phc(hw); 3059 if (err) { 3060 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 3061 err); 3062 return err; 3063 } 3064 3065 /* Acquire the global hardware lock */ 3066 if (!ice_ptp_lock(hw)) { 3067 err = -EBUSY; 3068 goto err_exit; 3069 } 3070 3071 /* Write the increment time value to PHY and LAN */ 3072 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 3073 if (err) 3074 goto err_unlock; 3075 3076 ts = ktime_to_timespec64(ktime_get_real()); 3077 /* Write the initial Time value to PHY and LAN */ 3078 err = ice_ptp_write_init(pf, &ts); 3079 if (err) 3080 goto err_unlock; 3081 3082 /* Release the global hardware lock */ 3083 ice_ptp_unlock(hw); 3084 3085 /* Configure PHY interrupt settings */ 3086 err = ice_ptp_cfg_phy_interrupt(pf, true, 1); 3087 if (err) 3088 goto err_exit; 3089 3090 /* Ensure we have a clock device */ 3091 err = ice_ptp_create_clock(pf); 3092 if (err) 3093 goto err_clk; 3094 3095 return 0; 3096 err_clk: 3097 pf->ptp.clock = NULL; 3098 err_exit: 3099 return err; 3100 3101 err_unlock: 3102 ice_ptp_unlock(hw); 3103 return err; 3104 } 3105 3106 /** 3107 * ice_ptp_init_work - Initialize PTP work threads 3108 * @pf: Board private structure 3109 * @ptp: PF PTP structure 3110 */ 3111 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 3112 { 3113 struct kthread_worker *kworker; 3114 3115 /* Initialize work functions */ 3116 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 3117 3118 /* Allocate a kworker for handling work required for the ports 3119 * connected to the PTP hardware clock. 3120 */ 3121 kworker = kthread_create_worker(0, "ice-ptp-%s", 3122 dev_name(ice_pf_to_dev(pf))); 3123 if (IS_ERR(kworker)) 3124 return PTR_ERR(kworker); 3125 3126 ptp->kworker = kworker; 3127 3128 /* Start periodic work going */ 3129 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 3130 3131 return 0; 3132 } 3133 3134 /** 3135 * ice_ptp_init_port - Initialize PTP port structure 3136 * @pf: Board private structure 3137 * @ptp_port: PTP port structure 3138 */ 3139 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 3140 { 3141 struct ice_hw *hw = &pf->hw; 3142 3143 mutex_init(&ptp_port->ps_lock); 3144 3145 switch (ice_get_phy_model(hw)) { 3146 case ICE_PHY_ETH56G: 3147 return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx, 3148 ptp_port->port_num); 3149 case ICE_PHY_E810: 3150 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 3151 case ICE_PHY_E82X: 3152 kthread_init_delayed_work(&ptp_port->ov_work, 3153 ice_ptp_wait_for_offsets); 3154 3155 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, 3156 ptp_port->port_num); 3157 default: 3158 return -ENODEV; 3159 } 3160 } 3161 3162 /** 3163 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode 3164 * @pf: Board private structure 3165 * 3166 * Initialize the Tx timestamp interrupt mode for this device. For most device 3167 * types, each PF processes the interrupt and manages its own timestamps. For 3168 * E822-based devices, only the clock owner processes the timestamps. Other 3169 * PFs disable the interrupt and do not process their own timestamps. 3170 */ 3171 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) 3172 { 3173 switch (ice_get_phy_model(&pf->hw)) { 3174 case ICE_PHY_E82X: 3175 /* E822 based PHY has the clock owner process the interrupt 3176 * for all ports. 3177 */ 3178 if (ice_pf_src_tmr_owned(pf)) 3179 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; 3180 else 3181 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; 3182 break; 3183 default: 3184 /* other PHY types handle their own Tx interrupt */ 3185 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; 3186 } 3187 } 3188 3189 /** 3190 * ice_ptp_init - Initialize PTP hardware clock support 3191 * @pf: Board private structure 3192 * 3193 * Set up the device for interacting with the PTP hardware clock for all 3194 * functions, both the function that owns the clock hardware, and the 3195 * functions connected to the clock hardware. 3196 * 3197 * The clock owner will allocate and register a ptp_clock with the 3198 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 3199 * items used for asynchronous work such as Tx timestamps and periodic work. 3200 */ 3201 void ice_ptp_init(struct ice_pf *pf) 3202 { 3203 struct ice_ptp *ptp = &pf->ptp; 3204 struct ice_hw *hw = &pf->hw; 3205 int err; 3206 3207 ptp->state = ICE_PTP_INITIALIZING; 3208 3209 ice_ptp_init_hw(hw); 3210 3211 ice_ptp_init_tx_interrupt_mode(pf); 3212 3213 /* If this function owns the clock hardware, it must allocate and 3214 * configure the PTP clock device to represent it. 3215 */ 3216 if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) { 3217 err = ice_ptp_setup_adapter(pf); 3218 if (err) 3219 goto err_exit; 3220 err = ice_ptp_init_owner(pf); 3221 if (err) 3222 goto err_exit; 3223 } 3224 3225 err = ice_ptp_setup_pf(pf); 3226 if (err) 3227 goto err_exit; 3228 3229 ptp->port.port_num = hw->pf_id; 3230 if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) 3231 ptp->port.port_num = hw->pf_id * 2; 3232 3233 err = ice_ptp_init_port(pf, &ptp->port); 3234 if (err) 3235 goto err_exit; 3236 3237 /* Start the PHY timestamping block */ 3238 ice_ptp_reset_phy_timestamping(pf); 3239 3240 /* Configure initial Tx interrupt settings */ 3241 ice_ptp_cfg_tx_interrupt(pf); 3242 3243 ptp->state = ICE_PTP_READY; 3244 3245 err = ice_ptp_init_work(pf, ptp); 3246 if (err) 3247 goto err_exit; 3248 3249 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 3250 return; 3251 3252 err_exit: 3253 /* If we registered a PTP clock, release it */ 3254 if (pf->ptp.clock) { 3255 ptp_clock_unregister(ptp->clock); 3256 pf->ptp.clock = NULL; 3257 } 3258 ptp->state = ICE_PTP_ERROR; 3259 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 3260 } 3261 3262 /** 3263 * ice_ptp_release - Disable the driver/HW support and unregister the clock 3264 * @pf: Board private structure 3265 * 3266 * This function handles the cleanup work required from the initialization by 3267 * clearing out the important information and unregistering the clock 3268 */ 3269 void ice_ptp_release(struct ice_pf *pf) 3270 { 3271 if (pf->ptp.state != ICE_PTP_READY) 3272 return; 3273 3274 pf->ptp.state = ICE_PTP_UNINIT; 3275 3276 /* Disable timestamping for both Tx and Rx */ 3277 ice_ptp_disable_timestamp_mode(pf); 3278 3279 ice_ptp_cleanup_pf(pf); 3280 3281 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3282 3283 ice_ptp_disable_all_extts(pf); 3284 3285 kthread_cancel_delayed_work_sync(&pf->ptp.work); 3286 3287 ice_ptp_port_phy_stop(&pf->ptp.port); 3288 mutex_destroy(&pf->ptp.port.ps_lock); 3289 if (pf->ptp.kworker) { 3290 kthread_destroy_worker(pf->ptp.kworker); 3291 pf->ptp.kworker = NULL; 3292 } 3293 3294 if (!pf->ptp.clock) 3295 return; 3296 3297 /* Disable periodic outputs */ 3298 ice_ptp_disable_all_perout(pf); 3299 3300 ptp_clock_unregister(pf->ptp.clock); 3301 pf->ptp.clock = NULL; 3302 3303 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 3304 } 3305