1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 #include "ice_cgu_regs.h"
8
9 static const char ice_pin_names[][64] = {
10 "SDP0",
11 "SDP1",
12 "SDP2",
13 "SDP3",
14 "TIME_SYNC",
15 "1PPS"
16 };
17
18 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
19 /* name, gpio, delay */
20 { TIME_SYNC, { 4, -1 }, { 0, 0 }},
21 { ONE_PPS, { -1, 5 }, { 0, 11 }},
22 };
23
24 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
25 /* name, gpio, delay */
26 { SDP0, { 0, 0 }, { 15, 14 }},
27 { SDP1, { 1, 1 }, { 15, 14 }},
28 { SDP2, { 2, 2 }, { 15, 14 }},
29 { SDP3, { 3, 3 }, { 15, 14 }},
30 { TIME_SYNC, { 4, -1 }, { 11, 0 }},
31 { ONE_PPS, { -1, 5 }, { 0, 9 }},
32 };
33
34 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
35 /* name, gpio, delay */
36 { SDP0, { 0, 0 }, { 0, 1 }},
37 { SDP1, { 1, 1 }, { 0, 1 }},
38 { SDP2, { 2, 2 }, { 0, 1 }},
39 { SDP3, { 3, 3 }, { 0, 1 }},
40 { ONE_PPS, { -1, 5 }, { 0, 1 }},
41 };
42
43 static const char ice_pin_names_nvm[][64] = {
44 "GNSS",
45 "SMA1",
46 "U.FL1",
47 "SMA2",
48 "U.FL2",
49 };
50
51 static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
52 /* name, gpio, delay */
53 { GNSS, { 1, -1 }, { 0, 0 }},
54 { SMA1, { 1, 0 }, { 0, 1 }},
55 { UFL1, { -1, 0 }, { 0, 1 }},
56 { SMA2, { 3, 2 }, { 0, 1 }},
57 { UFL2, { 3, -1 }, { 0, 0 }},
58 };
59
ice_get_ctrl_pf(struct ice_pf * pf)60 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
61 {
62 return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
63 }
64
ice_get_ctrl_ptp(struct ice_pf * pf)65 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
66 {
67 struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
68
69 return !ctrl_pf ? NULL : &ctrl_pf->ptp;
70 }
71
72 /**
73 * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc
74 * @pf: Board private structure
75 * @func: Pin function
76 * @chan: GPIO channel
77 *
78 * Return: positive pin number when pin is present, -1 otherwise
79 */
ice_ptp_find_pin_idx(struct ice_pf * pf,enum ptp_pin_function func,unsigned int chan)80 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
81 unsigned int chan)
82 {
83 const struct ptp_clock_info *info = &pf->ptp.info;
84 int i;
85
86 for (i = 0; i < info->n_pins; i++) {
87 if (info->pin_config[i].func == func &&
88 info->pin_config[i].chan == chan)
89 return i;
90 }
91
92 return -1;
93 }
94
95 /**
96 * ice_ptp_update_sma_data - update SMA pins data according to pins setup
97 * @pf: Board private structure
98 * @sma_pins: parsed SMA pins status
99 * @data: SMA data to update
100 */
ice_ptp_update_sma_data(struct ice_pf * pf,unsigned int sma_pins[],u8 * data)101 static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[],
102 u8 *data)
103 {
104 const char *state1, *state2;
105
106 /* Set the right state based on the desired configuration.
107 * When bit is set, functionality is disabled.
108 */
109 *data &= ~ICE_ALL_SMA_MASK;
110 if (!sma_pins[UFL1 - 1]) {
111 if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) {
112 state1 = "SMA1 Rx, U.FL1 disabled";
113 *data |= ICE_SMA1_TX_EN;
114 } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) {
115 state1 = "SMA1 Tx U.FL1 disabled";
116 *data |= ICE_SMA1_DIR_EN;
117 } else {
118 state1 = "SMA1 disabled, U.FL1 disabled";
119 *data |= ICE_SMA1_MASK;
120 }
121 } else {
122 /* U.FL1 Tx will always enable SMA1 Rx */
123 state1 = "SMA1 Rx, U.FL1 Tx";
124 }
125
126 if (!sma_pins[UFL2 - 1]) {
127 if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) {
128 state2 = "SMA2 Rx, U.FL2 disabled";
129 *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS;
130 } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) {
131 state2 = "SMA2 Tx, U.FL2 disabled";
132 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS;
133 } else {
134 state2 = "SMA2 disabled, U.FL2 disabled";
135 *data |= ICE_SMA2_MASK;
136 }
137 } else {
138 if (!sma_pins[SMA2 - 1]) {
139 state2 = "SMA2 disabled, U.FL2 Rx";
140 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN;
141 } else {
142 state2 = "SMA2 Tx, U.FL2 Rx";
143 *data |= ICE_SMA2_DIR_EN;
144 }
145 }
146
147 dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2);
148 }
149
150 /**
151 * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic
152 * @pf: Board private structure
153 *
154 * Return: 0 on success, negative error code otherwise
155 */
ice_ptp_set_sma_cfg(struct ice_pf * pf)156 static int ice_ptp_set_sma_cfg(struct ice_pf *pf)
157 {
158 const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc;
159 struct ptp_pin_desc *pins = pf->ptp.pin_desc;
160 unsigned int sma_pins[ICE_SMA_PINS_NUM] = {};
161 int err;
162 u8 data;
163
164 /* Read initial pin state value */
165 err = ice_read_sma_ctrl(&pf->hw, &data);
166 if (err)
167 return err;
168
169 /* Get SMA/U.FL pins states */
170 for (int i = 0; i < pf->ptp.info.n_pins; i++)
171 if (pins[i].func) {
172 int name_idx = ice_pins[i].name_idx;
173
174 switch (name_idx) {
175 case SMA1:
176 case UFL1:
177 case SMA2:
178 case UFL2:
179 sma_pins[name_idx - 1] = pins[i].func;
180 break;
181 default:
182 continue;
183 }
184 }
185
186 ice_ptp_update_sma_data(pf, sma_pins, &data);
187 return ice_write_sma_ctrl(&pf->hw, data);
188 }
189
190 /**
191 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
192 * @pf: Board private structure
193 *
194 * Program the device to respond appropriately to the Tx timestamp interrupt
195 * cause.
196 */
ice_ptp_cfg_tx_interrupt(struct ice_pf * pf)197 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
198 {
199 struct ice_hw *hw = &pf->hw;
200 bool enable;
201 u32 val;
202
203 switch (pf->ptp.tx_interrupt_mode) {
204 case ICE_PTP_TX_INTERRUPT_ALL:
205 /* React to interrupts across all quads. */
206 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
207 enable = true;
208 break;
209 case ICE_PTP_TX_INTERRUPT_NONE:
210 /* Do not react to interrupts on any quad. */
211 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
212 enable = false;
213 break;
214 case ICE_PTP_TX_INTERRUPT_SELF:
215 default:
216 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
217 break;
218 }
219
220 /* Configure the Tx timestamp interrupt */
221 val = rd32(hw, PFINT_OICR_ENA);
222 if (enable)
223 val |= PFINT_OICR_TSYN_TX_M;
224 else
225 val &= ~PFINT_OICR_TSYN_TX_M;
226 wr32(hw, PFINT_OICR_ENA, val);
227 }
228
229 /**
230 * ice_set_rx_tstamp - Enable or disable Rx timestamping
231 * @pf: The PF pointer to search in
232 * @on: bool value for whether timestamps are enabled or disabled
233 */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)234 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
235 {
236 struct ice_vsi *vsi;
237 u16 i;
238
239 vsi = ice_get_main_vsi(pf);
240 if (!vsi || !vsi->rx_rings)
241 return;
242
243 /* Set the timestamp flag for all the Rx rings */
244 ice_for_each_rxq(vsi, i) {
245 if (!vsi->rx_rings[i])
246 continue;
247 vsi->rx_rings[i]->ptp_rx = on;
248 }
249 }
250
251 /**
252 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
253 * @pf: Board private structure
254 *
255 * Called during preparation for reset to temporarily disable timestamping on
256 * the device. Called during remove to disable timestamping while cleaning up
257 * driver resources.
258 */
ice_ptp_disable_timestamp_mode(struct ice_pf * pf)259 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
260 {
261 struct ice_hw *hw = &pf->hw;
262 u32 val;
263
264 val = rd32(hw, PFINT_OICR_ENA);
265 val &= ~PFINT_OICR_TSYN_TX_M;
266 wr32(hw, PFINT_OICR_ENA, val);
267
268 ice_set_rx_tstamp(pf, false);
269 }
270
271 /**
272 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
273 * @pf: Board private structure
274 *
275 * Called at the end of rebuild to restore timestamp configuration after
276 * a device reset.
277 */
ice_ptp_restore_timestamp_mode(struct ice_pf * pf)278 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
279 {
280 struct ice_hw *hw = &pf->hw;
281 bool enable_rx;
282
283 ice_ptp_cfg_tx_interrupt(pf);
284
285 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
286 ice_set_rx_tstamp(pf, enable_rx);
287
288 /* Trigger an immediate software interrupt to ensure that timestamps
289 * which occurred during reset are handled now.
290 */
291 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
292 ice_flush(hw);
293 }
294
295 /**
296 * ice_ptp_read_src_clk_reg - Read the source clock register
297 * @pf: Board private structure
298 * @sts: Optional parameter for holding a pair of system timestamps from
299 * the system clock. Will be ignored if NULL is given.
300 */
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)301 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
302 struct ptp_system_timestamp *sts)
303 {
304 struct ice_hw *hw = &pf->hw;
305 u32 hi, lo, lo2;
306 u8 tmr_idx;
307
308 tmr_idx = ice_get_ptp_src_clock_index(hw);
309 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
310 /* Read the system timestamp pre PHC read */
311 ptp_read_system_prets(sts);
312
313 if (hw->mac_type == ICE_MAC_E830) {
314 u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
315
316 /* Read the system timestamp post PHC read */
317 ptp_read_system_postts(sts);
318
319 return clk_time;
320 }
321
322 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
323
324 /* Read the system timestamp post PHC read */
325 ptp_read_system_postts(sts);
326
327 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
328 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
329
330 if (lo2 < lo) {
331 /* if TIME_L rolled over read TIME_L again and update
332 * system timestamps
333 */
334 ptp_read_system_prets(sts);
335 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
336 ptp_read_system_postts(sts);
337 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
338 }
339
340 return ((u64)hi << 32) | lo;
341 }
342
343 /**
344 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
345 * @cached_phc_time: recently cached copy of PHC time
346 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
347 *
348 * Hardware captures timestamps which contain only 32 bits of nominal
349 * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
350 * Note that the captured timestamp values may be 40 bits, but the lower
351 * 8 bits are sub-nanoseconds and generally discarded.
352 *
353 * Extend the 32bit nanosecond timestamp using the following algorithm and
354 * assumptions:
355 *
356 * 1) have a recently cached copy of the PHC time
357 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
358 * seconds) before or after the PHC time was captured.
359 * 3) calculate the delta between the cached time and the timestamp
360 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
361 * captured after the PHC time. In this case, the full timestamp is just
362 * the cached PHC time plus the delta.
363 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
364 * timestamp was captured *before* the PHC time, i.e. because the PHC
365 * cache was updated after the timestamp was captured by hardware. In this
366 * case, the full timestamp is the cached time minus the inverse delta.
367 *
368 * This algorithm works even if the PHC time was updated after a Tx timestamp
369 * was requested, but before the Tx timestamp event was reported from
370 * hardware.
371 *
372 * This calculation primarily relies on keeping the cached PHC time up to
373 * date. If the timestamp was captured more than 2^31 nanoseconds after the
374 * PHC time, it is possible that the lower 32bits of PHC time have
375 * overflowed more than once, and we might generate an incorrect timestamp.
376 *
377 * This is prevented by (a) periodically updating the cached PHC time once
378 * a second, and (b) discarding any Tx timestamp packet if it has waited for
379 * a timestamp for more than one second.
380 */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)381 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
382 {
383 u32 delta, phc_time_lo;
384 u64 ns;
385
386 /* Extract the lower 32 bits of the PHC time */
387 phc_time_lo = (u32)cached_phc_time;
388
389 /* Calculate the delta between the lower 32bits of the cached PHC
390 * time and the in_tstamp value
391 */
392 delta = (in_tstamp - phc_time_lo);
393
394 /* Do not assume that the in_tstamp is always more recent than the
395 * cached PHC time. If the delta is large, it indicates that the
396 * in_tstamp was taken in the past, and should be converted
397 * forward.
398 */
399 if (delta > (U32_MAX / 2)) {
400 /* reverse the delta calculation here */
401 delta = (phc_time_lo - in_tstamp);
402 ns = cached_phc_time - delta;
403 } else {
404 ns = cached_phc_time + delta;
405 }
406
407 return ns;
408 }
409
410 /**
411 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
412 * @pf: Board private structure
413 * @in_tstamp: Ingress/egress 40b timestamp value
414 *
415 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
416 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
417 *
418 * *--------------------------------------------------------------*
419 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
420 * *--------------------------------------------------------------*
421 *
422 * The low bit is an indicator of whether the timestamp is valid. The next
423 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
424 * and the remaining 32 bits are the lower 32 bits of the PHC timer.
425 *
426 * It is assumed that the caller verifies the timestamp is valid prior to
427 * calling this function.
428 *
429 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
430 * time stored in the device private PTP structure as the basis for timestamp
431 * extension.
432 *
433 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
434 * algorithm.
435 */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)436 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
437 {
438 const u64 mask = GENMASK_ULL(31, 0);
439 unsigned long discard_time;
440
441 /* Discard the hardware timestamp if the cached PHC time is too old */
442 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
443 if (time_is_before_jiffies(discard_time)) {
444 pf->ptp.tx_hwtstamp_discarded++;
445 return 0;
446 }
447
448 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
449 (in_tstamp >> 8) & mask);
450 }
451
452 /**
453 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
454 * @tx: the PTP Tx timestamp tracker to check
455 *
456 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
457 * to accept new timestamp requests.
458 *
459 * Assumes the tx->lock spinlock is already held.
460 */
461 static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx)462 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
463 {
464 lockdep_assert_held(&tx->lock);
465
466 return tx->init && !tx->calibrating;
467 }
468
469 /**
470 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
471 * @tx: the PTP Tx timestamp tracker
472 * @idx: index of the timestamp to request
473 */
ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx)474 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
475 {
476 struct ice_e810_params *params;
477 struct ice_ptp_port *ptp_port;
478 unsigned long flags;
479 struct sk_buff *skb;
480 struct ice_pf *pf;
481
482 if (!tx->init)
483 return;
484
485 ptp_port = container_of(tx, struct ice_ptp_port, tx);
486 pf = ptp_port_to_pf(ptp_port);
487 params = &pf->hw.ptp.phy.e810;
488
489 /* Drop packets which have waited for more than 2 seconds */
490 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
491 /* Count the number of Tx timestamps that timed out */
492 pf->ptp.tx_hwtstamp_timeouts++;
493
494 skb = tx->tstamps[idx].skb;
495 tx->tstamps[idx].skb = NULL;
496 clear_bit(idx, tx->in_use);
497
498 dev_kfree_skb_any(skb);
499 return;
500 }
501
502 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
503
504 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
505
506 params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
507
508 /* Write TS index to read to the PF register so the FW can read it */
509 wr32(&pf->hw, REG_LL_PROXY_H,
510 REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
511 REG_LL_PROXY_H_EXEC);
512 tx->last_ll_ts_idx_read = idx;
513
514 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
515 }
516
517 /**
518 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
519 * @tx: the PTP Tx timestamp tracker
520 */
ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx)521 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
522 {
523 struct skb_shared_hwtstamps shhwtstamps = {};
524 u8 idx = tx->last_ll_ts_idx_read;
525 struct ice_e810_params *params;
526 struct ice_ptp_port *ptp_port;
527 u64 raw_tstamp, tstamp;
528 bool drop_ts = false;
529 struct sk_buff *skb;
530 unsigned long flags;
531 struct device *dev;
532 struct ice_pf *pf;
533 u32 reg_ll_high;
534
535 if (!tx->init || tx->last_ll_ts_idx_read < 0)
536 return;
537
538 ptp_port = container_of(tx, struct ice_ptp_port, tx);
539 pf = ptp_port_to_pf(ptp_port);
540 dev = ice_pf_to_dev(pf);
541 params = &pf->hw.ptp.phy.e810;
542
543 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
544
545 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
546
547 if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
548 dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
549 __func__);
550
551 /* Read the low 32 bit value */
552 raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
553 /* Read the status together with high TS part */
554 reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
555
556 /* Wake up threads waiting on low latency interface */
557 params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
558
559 wake_up_locked(¶ms->atqbal_wq);
560
561 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
562
563 /* When the bit is cleared, the TS is ready in the register */
564 if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
565 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
566 return;
567 }
568
569 /* High 8 bit value of the TS is on the bits 16:23 */
570 raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
571
572 /* Devices using this interface always verify the timestamp differs
573 * relative to the last cached timestamp value.
574 */
575 if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
576 return;
577
578 tx->tstamps[idx].cached_tstamp = raw_tstamp;
579 clear_bit(idx, tx->in_use);
580 skb = tx->tstamps[idx].skb;
581 tx->tstamps[idx].skb = NULL;
582 if (test_and_clear_bit(idx, tx->stale))
583 drop_ts = true;
584
585 if (!skb)
586 return;
587
588 if (drop_ts) {
589 dev_kfree_skb_any(skb);
590 return;
591 }
592
593 /* Extend the timestamp using cached PHC time */
594 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
595 if (tstamp) {
596 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
597 ice_trace(tx_tstamp_complete, skb, idx);
598 }
599
600 skb_tstamp_tx(skb, &shhwtstamps);
601 dev_kfree_skb_any(skb);
602 }
603
604 /**
605 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
606 * @tx: the PTP Tx timestamp tracker
607 *
608 * Process timestamps captured by the PHY associated with this port. To do
609 * this, loop over each index with a waiting skb.
610 *
611 * If a given index has a valid timestamp, perform the following steps:
612 *
613 * 1) check that the timestamp request is not stale
614 * 2) check that a timestamp is ready and available in the PHY memory bank
615 * 3) read and copy the timestamp out of the PHY register
616 * 4) unlock the index by clearing the associated in_use bit
617 * 5) check if the timestamp is stale, and discard if so
618 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
619 * 7) send this 64 bit timestamp to the stack
620 *
621 * Note that we do not hold the tracking lock while reading the Tx timestamp.
622 * This is because reading the timestamp requires taking a mutex that might
623 * sleep.
624 *
625 * The only place where we set in_use is when a new timestamp is initiated
626 * with a slot index. This is only called in the hard xmit routine where an
627 * SKB has a request flag set. The only places where we clear this bit is this
628 * function, or during teardown when the Tx timestamp tracker is being
629 * removed. A timestamp index will never be re-used until the in_use bit for
630 * that index is cleared.
631 *
632 * If a Tx thread starts a new timestamp, we might not begin processing it
633 * right away but we will notice it at the end when we re-queue the task.
634 *
635 * If a Tx thread starts a new timestamp just after this function exits, the
636 * interrupt for that timestamp should re-trigger this function once
637 * a timestamp is ready.
638 *
639 * In cases where the PTP hardware clock was directly adjusted, some
640 * timestamps may not be able to safely use the timestamp extension math. In
641 * this case, software will set the stale bit for any outstanding Tx
642 * timestamps when the clock is adjusted. Then this function will discard
643 * those captured timestamps instead of sending them to the stack.
644 *
645 * If a Tx packet has been waiting for more than 2 seconds, it is not possible
646 * to correctly extend the timestamp using the cached PHC time. It is
647 * extremely unlikely that a packet will ever take this long to timestamp. If
648 * we detect a Tx timestamp request that has waited for this long we assume
649 * the packet will never be sent by hardware and discard it without reading
650 * the timestamp register.
651 */
ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx)652 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
653 {
654 struct ice_ptp_port *ptp_port;
655 unsigned long flags;
656 struct ice_pf *pf;
657 struct ice_hw *hw;
658 u64 tstamp_ready;
659 bool link_up;
660 int err;
661 u8 idx;
662
663 ptp_port = container_of(tx, struct ice_ptp_port, tx);
664 pf = ptp_port_to_pf(ptp_port);
665 hw = &pf->hw;
666
667 /* Read the Tx ready status first */
668 if (tx->has_ready_bitmap) {
669 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
670 if (err)
671 return;
672 }
673
674 /* Drop packets if the link went down */
675 link_up = ptp_port->link_up;
676
677 for_each_set_bit(idx, tx->in_use, tx->len) {
678 struct skb_shared_hwtstamps shhwtstamps = {};
679 u8 phy_idx = idx + tx->offset;
680 u64 raw_tstamp = 0, tstamp;
681 bool drop_ts = !link_up;
682 struct sk_buff *skb;
683
684 /* Drop packets which have waited for more than 2 seconds */
685 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
686 drop_ts = true;
687
688 /* Count the number of Tx timestamps that timed out */
689 pf->ptp.tx_hwtstamp_timeouts++;
690 }
691
692 /* Only read a timestamp from the PHY if its marked as ready
693 * by the tstamp_ready register. This avoids unnecessary
694 * reading of timestamps which are not yet valid. This is
695 * important as we must read all timestamps which are valid
696 * and only timestamps which are valid during each interrupt.
697 * If we do not, the hardware logic for generating a new
698 * interrupt can get stuck on some devices.
699 */
700 if (tx->has_ready_bitmap &&
701 !(tstamp_ready & BIT_ULL(phy_idx))) {
702 if (drop_ts)
703 goto skip_ts_read;
704
705 continue;
706 }
707
708 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
709
710 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
711 if (err && !drop_ts)
712 continue;
713
714 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
715
716 /* For PHYs which don't implement a proper timestamp ready
717 * bitmap, verify that the timestamp value is different
718 * from the last cached timestamp. If it is not, skip this for
719 * now assuming it hasn't yet been captured by hardware.
720 */
721 if (!drop_ts && !tx->has_ready_bitmap &&
722 raw_tstamp == tx->tstamps[idx].cached_tstamp)
723 continue;
724
725 /* Discard any timestamp value without the valid bit set */
726 if (!(raw_tstamp & ICE_PTP_TS_VALID))
727 drop_ts = true;
728
729 skip_ts_read:
730 spin_lock_irqsave(&tx->lock, flags);
731 if (!tx->has_ready_bitmap && raw_tstamp)
732 tx->tstamps[idx].cached_tstamp = raw_tstamp;
733 clear_bit(idx, tx->in_use);
734 skb = tx->tstamps[idx].skb;
735 tx->tstamps[idx].skb = NULL;
736 if (test_and_clear_bit(idx, tx->stale))
737 drop_ts = true;
738 spin_unlock_irqrestore(&tx->lock, flags);
739
740 /* It is unlikely but possible that the SKB will have been
741 * flushed at this point due to link change or teardown.
742 */
743 if (!skb)
744 continue;
745
746 if (drop_ts) {
747 dev_kfree_skb_any(skb);
748 continue;
749 }
750
751 /* Extend the timestamp using cached PHC time */
752 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
753 if (tstamp) {
754 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
755 ice_trace(tx_tstamp_complete, skb, idx);
756 }
757
758 skb_tstamp_tx(skb, &shhwtstamps);
759 dev_kfree_skb_any(skb);
760 }
761 }
762
763 /**
764 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
765 * @pf: Board private structure
766 */
ice_ptp_tx_tstamp_owner(struct ice_pf * pf)767 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
768 {
769 struct ice_ptp_port *port;
770 unsigned int i;
771
772 mutex_lock(&pf->adapter->ports.lock);
773 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
774 struct ice_ptp_tx *tx = &port->tx;
775
776 if (!tx || !tx->init)
777 continue;
778
779 ice_ptp_process_tx_tstamp(tx);
780 }
781 mutex_unlock(&pf->adapter->ports.lock);
782
783 for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
784 u64 tstamp_ready;
785 int err;
786
787 /* Read the Tx ready status first */
788 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
789 if (err)
790 break;
791 else if (tstamp_ready)
792 return ICE_TX_TSTAMP_WORK_PENDING;
793 }
794
795 return ICE_TX_TSTAMP_WORK_DONE;
796 }
797
798 /**
799 * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
800 * @tx: Tx tracking structure to initialize
801 *
802 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
803 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
804 */
ice_ptp_tx_tstamp(struct ice_ptp_tx * tx)805 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
806 {
807 bool more_timestamps;
808 unsigned long flags;
809
810 if (!tx->init)
811 return ICE_TX_TSTAMP_WORK_DONE;
812
813 /* Process the Tx timestamp tracker */
814 ice_ptp_process_tx_tstamp(tx);
815
816 /* Check if there are outstanding Tx timestamps */
817 spin_lock_irqsave(&tx->lock, flags);
818 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
819 spin_unlock_irqrestore(&tx->lock, flags);
820
821 if (more_timestamps)
822 return ICE_TX_TSTAMP_WORK_PENDING;
823
824 return ICE_TX_TSTAMP_WORK_DONE;
825 }
826
827 /**
828 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
829 * @tx: Tx tracking structure to initialize
830 *
831 * Assumes that the length has already been initialized. Do not call directly,
832 * use the ice_ptp_init_tx_* instead.
833 */
834 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)835 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
836 {
837 unsigned long *in_use, *stale;
838 struct ice_tx_tstamp *tstamps;
839
840 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
841 in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
842 stale = bitmap_zalloc(tx->len, GFP_KERNEL);
843
844 if (!tstamps || !in_use || !stale) {
845 kfree(tstamps);
846 bitmap_free(in_use);
847 bitmap_free(stale);
848
849 return -ENOMEM;
850 }
851
852 tx->tstamps = tstamps;
853 tx->in_use = in_use;
854 tx->stale = stale;
855 tx->init = 1;
856 tx->last_ll_ts_idx_read = -1;
857
858 spin_lock_init(&tx->lock);
859
860 return 0;
861 }
862
863 /**
864 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
865 * @pf: Board private structure
866 * @tx: the tracker to flush
867 *
868 * Called during teardown when a Tx tracker is being removed.
869 */
870 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)871 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
872 {
873 struct ice_hw *hw = &pf->hw;
874 unsigned long flags;
875 u64 tstamp_ready;
876 int err;
877 u8 idx;
878
879 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
880 if (err) {
881 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
882 tx->block, err);
883
884 /* If we fail to read the Tx timestamp ready bitmap just
885 * skip clearing the PHY timestamps.
886 */
887 tstamp_ready = 0;
888 }
889
890 for_each_set_bit(idx, tx->in_use, tx->len) {
891 u8 phy_idx = idx + tx->offset;
892 struct sk_buff *skb;
893
894 /* In case this timestamp is ready, we need to clear it. */
895 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
896 ice_clear_phy_tstamp(hw, tx->block, phy_idx);
897
898 spin_lock_irqsave(&tx->lock, flags);
899 skb = tx->tstamps[idx].skb;
900 tx->tstamps[idx].skb = NULL;
901 clear_bit(idx, tx->in_use);
902 clear_bit(idx, tx->stale);
903 spin_unlock_irqrestore(&tx->lock, flags);
904
905 /* Count the number of Tx timestamps flushed */
906 pf->ptp.tx_hwtstamp_flushed++;
907
908 /* Free the SKB after we've cleared the bit */
909 dev_kfree_skb_any(skb);
910 }
911 }
912
913 /**
914 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
915 * @tx: the tracker to mark
916 *
917 * Mark currently outstanding Tx timestamps as stale. This prevents sending
918 * their timestamp value to the stack. This is required to prevent extending
919 * the 40bit hardware timestamp incorrectly.
920 *
921 * This should be called when the PTP clock is modified such as after a set
922 * time request.
923 */
924 static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx)925 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
926 {
927 unsigned long flags;
928
929 spin_lock_irqsave(&tx->lock, flags);
930 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
931 spin_unlock_irqrestore(&tx->lock, flags);
932 }
933
934 /**
935 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
936 * @pf: Board private structure
937 *
938 * Called by the clock owner to flush all the Tx timestamp trackers associated
939 * with the clock.
940 */
941 static void
ice_ptp_flush_all_tx_tracker(struct ice_pf * pf)942 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
943 {
944 struct ice_ptp_port *port;
945
946 list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
947 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
948 }
949
950 /**
951 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
952 * @pf: Board private structure
953 * @tx: Tx tracking structure to release
954 *
955 * Free memory associated with the Tx timestamp tracker.
956 */
957 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)958 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
959 {
960 unsigned long flags;
961
962 spin_lock_irqsave(&tx->lock, flags);
963 tx->init = 0;
964 spin_unlock_irqrestore(&tx->lock, flags);
965
966 /* wait for potentially outstanding interrupt to complete */
967 synchronize_irq(pf->oicr_irq.virq);
968
969 ice_ptp_flush_tx_tracker(pf, tx);
970
971 kfree(tx->tstamps);
972 tx->tstamps = NULL;
973
974 bitmap_free(tx->in_use);
975 tx->in_use = NULL;
976
977 bitmap_free(tx->stale);
978 tx->stale = NULL;
979
980 tx->len = 0;
981 }
982
983 /**
984 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
985 * @pf: Board private structure
986 * @tx: the Tx tracking structure to initialize
987 * @port: the port this structure tracks
988 *
989 * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
990 * the timestamp block is shared for all ports in the same quad. To avoid
991 * ports using the same timestamp index, logically break the block of
992 * registers into chunks based on the port number.
993 *
994 * Return: 0 on success, -ENOMEM when out of memory
995 */
ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)996 static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
997 u8 port)
998 {
999 tx->block = ICE_GET_QUAD_NUM(port);
1000 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
1001 tx->len = INDEX_PER_PORT_E82X;
1002 tx->has_ready_bitmap = 1;
1003
1004 return ice_ptp_alloc_tx_tracker(tx);
1005 }
1006
1007 /**
1008 * ice_ptp_init_tx - Initialize tracking for Tx timestamps
1009 * @pf: Board private structure
1010 * @tx: the Tx tracking structure to initialize
1011 * @port: the port this structure tracks
1012 *
1013 * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
1014 * each port has its own block of timestamps, independent of the other ports.
1015 *
1016 * Return: 0 on success, -ENOMEM when out of memory
1017 */
ice_ptp_init_tx(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)1018 static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
1019 {
1020 tx->block = port;
1021 tx->offset = 0;
1022 tx->len = INDEX_PER_PORT;
1023
1024 /* The E810 PHY does not provide a timestamp ready bitmap. Instead,
1025 * verify new timestamps against cached copy of the last read
1026 * timestamp.
1027 */
1028 tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
1029
1030 return ice_ptp_alloc_tx_tracker(tx);
1031 }
1032
1033 /**
1034 * ice_ptp_update_cached_phctime - Update the cached PHC time values
1035 * @pf: Board specific private structure
1036 *
1037 * This function updates the system time values which are cached in the PF
1038 * structure and the Rx rings.
1039 *
1040 * This function must be called periodically to ensure that the cached value
1041 * is never more than 2 seconds old.
1042 *
1043 * Note that the cached copy in the PF PTP structure is always updated, even
1044 * if we can't update the copy in the Rx rings.
1045 *
1046 * Return:
1047 * * 0 - OK, successfully updated
1048 * * -EAGAIN - PF was busy, need to reschedule the update
1049 */
ice_ptp_update_cached_phctime(struct ice_pf * pf)1050 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
1051 {
1052 struct device *dev = ice_pf_to_dev(pf);
1053 unsigned long update_before;
1054 u64 systime;
1055 int i;
1056
1057 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
1058 if (pf->ptp.cached_phc_time &&
1059 time_is_before_jiffies(update_before)) {
1060 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
1061
1062 dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
1063 jiffies_to_msecs(time_taken));
1064 pf->ptp.late_cached_phc_updates++;
1065 }
1066
1067 /* Read the current PHC time */
1068 systime = ice_ptp_read_src_clk_reg(pf, NULL);
1069
1070 /* Update the cached PHC time stored in the PF structure */
1071 WRITE_ONCE(pf->ptp.cached_phc_time, systime);
1072 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
1073
1074 if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
1075 return -EAGAIN;
1076
1077 ice_for_each_vsi(pf, i) {
1078 struct ice_vsi *vsi = pf->vsi[i];
1079 int j;
1080
1081 if (!vsi)
1082 continue;
1083
1084 if (vsi->type != ICE_VSI_PF)
1085 continue;
1086
1087 ice_for_each_rxq(vsi, j) {
1088 if (!vsi->rx_rings[j])
1089 continue;
1090 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
1091 }
1092 }
1093 clear_bit(ICE_CFG_BUSY, pf->state);
1094
1095 return 0;
1096 }
1097
1098 /**
1099 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1100 * @pf: Board specific private structure
1101 *
1102 * This function must be called when the cached PHC time is no longer valid,
1103 * such as after a time adjustment. It marks any currently outstanding Tx
1104 * timestamps as stale and updates the cached PHC time for both the PF and Rx
1105 * rings.
1106 *
1107 * If updating the PHC time cannot be done immediately, a warning message is
1108 * logged and the work item is scheduled immediately to minimize the window
1109 * with a wrong cached timestamp.
1110 */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)1111 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1112 {
1113 struct device *dev = ice_pf_to_dev(pf);
1114 int err;
1115
1116 /* Update the cached PHC time immediately if possible, otherwise
1117 * schedule the work item to execute soon.
1118 */
1119 err = ice_ptp_update_cached_phctime(pf);
1120 if (err) {
1121 /* If another thread is updating the Rx rings, we won't
1122 * properly reset them here. This could lead to reporting of
1123 * invalid timestamps, but there isn't much we can do.
1124 */
1125 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1126 __func__);
1127
1128 /* Queue the work item to update the Rx rings when possible */
1129 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1130 msecs_to_jiffies(10));
1131 }
1132
1133 /* Mark any outstanding timestamps as stale, since they might have
1134 * been captured in hardware before the time update. This could lead
1135 * to us extending them with the wrong cached value resulting in
1136 * incorrect timestamp values.
1137 */
1138 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1139 }
1140
1141 /**
1142 * ice_ptp_write_init - Set PHC time to provided value
1143 * @pf: Board private structure
1144 * @ts: timespec structure that holds the new time value
1145 *
1146 * Set the PHC time to the specified time provided in the timespec.
1147 */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1148 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1149 {
1150 u64 ns = timespec64_to_ns(ts);
1151 struct ice_hw *hw = &pf->hw;
1152
1153 return ice_ptp_init_time(hw, ns);
1154 }
1155
1156 /**
1157 * ice_ptp_write_adj - Adjust PHC clock time atomically
1158 * @pf: Board private structure
1159 * @adj: Adjustment in nanoseconds
1160 *
1161 * Perform an atomic adjustment of the PHC time by the specified number of
1162 * nanoseconds.
1163 */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1164 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1165 {
1166 struct ice_hw *hw = &pf->hw;
1167
1168 return ice_ptp_adj_clock(hw, adj);
1169 }
1170
1171 /**
1172 * ice_base_incval - Get base timer increment value
1173 * @pf: Board private structure
1174 *
1175 * Look up the base timer increment value for this device. The base increment
1176 * value is used to define the nominal clock tick rate. This increment value
1177 * is programmed during device initialization. It is also used as the basis
1178 * for calculating adjustments using scaled_ppm.
1179 */
ice_base_incval(struct ice_pf * pf)1180 static u64 ice_base_incval(struct ice_pf *pf)
1181 {
1182 struct ice_hw *hw = &pf->hw;
1183 u64 incval;
1184
1185 incval = ice_get_base_incval(hw);
1186
1187 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1188 incval);
1189
1190 return incval;
1191 }
1192
1193 /**
1194 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1195 * @port: PTP port for which Tx FIFO is checked
1196 */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1197 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1198 {
1199 int offs = port->port_num % ICE_PORTS_PER_QUAD;
1200 int quad = ICE_GET_QUAD_NUM(port->port_num);
1201 struct ice_pf *pf;
1202 struct ice_hw *hw;
1203 u32 val, phy_sts;
1204 int err;
1205
1206 pf = ptp_port_to_pf(port);
1207 hw = &pf->hw;
1208
1209 if (port->tx_fifo_busy_cnt == FIFO_OK)
1210 return 0;
1211
1212 /* need to read FIFO state */
1213 if (offs == 0 || offs == 1)
1214 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1215 &val);
1216 else
1217 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1218 &val);
1219
1220 if (err) {
1221 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1222 port->port_num, err);
1223 return err;
1224 }
1225
1226 if (offs & 0x1)
1227 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1228 else
1229 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1230
1231 if (phy_sts & FIFO_EMPTY) {
1232 port->tx_fifo_busy_cnt = FIFO_OK;
1233 return 0;
1234 }
1235
1236 port->tx_fifo_busy_cnt++;
1237
1238 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1239 port->tx_fifo_busy_cnt, port->port_num);
1240
1241 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1242 dev_dbg(ice_pf_to_dev(pf),
1243 "Port %d Tx FIFO still not empty; resetting quad %d\n",
1244 port->port_num, quad);
1245 ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1246 port->tx_fifo_busy_cnt = FIFO_OK;
1247 return 0;
1248 }
1249
1250 return -EAGAIN;
1251 }
1252
1253 /**
1254 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1255 * @work: Pointer to the kthread_work structure for this task
1256 *
1257 * Check whether hardware has completed measuring the Tx and Rx offset values
1258 * used to configure and enable vernier timestamp calibration.
1259 *
1260 * Once the offset in either direction is measured, configure the associated
1261 * registers with the calibrated offset values and enable timestamping. The Tx
1262 * and Rx directions are configured independently as soon as their associated
1263 * offsets are known.
1264 *
1265 * This function reschedules itself until both Tx and Rx calibration have
1266 * completed.
1267 */
ice_ptp_wait_for_offsets(struct kthread_work * work)1268 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1269 {
1270 struct ice_ptp_port *port;
1271 struct ice_pf *pf;
1272 struct ice_hw *hw;
1273 int tx_err;
1274 int rx_err;
1275
1276 port = container_of(work, struct ice_ptp_port, ov_work.work);
1277 pf = ptp_port_to_pf(port);
1278 hw = &pf->hw;
1279
1280 if (ice_is_reset_in_progress(pf->state)) {
1281 /* wait for device driver to complete reset */
1282 kthread_queue_delayed_work(pf->ptp.kworker,
1283 &port->ov_work,
1284 msecs_to_jiffies(100));
1285 return;
1286 }
1287
1288 tx_err = ice_ptp_check_tx_fifo(port);
1289 if (!tx_err)
1290 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1291 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1292 if (tx_err || rx_err) {
1293 /* Tx and/or Rx offset not yet configured, try again later */
1294 kthread_queue_delayed_work(pf->ptp.kworker,
1295 &port->ov_work,
1296 msecs_to_jiffies(100));
1297 return;
1298 }
1299 }
1300
1301 /**
1302 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1303 * @ptp_port: PTP port to stop
1304 */
1305 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1306 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1307 {
1308 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1309 u8 port = ptp_port->port_num;
1310 struct ice_hw *hw = &pf->hw;
1311 int err;
1312
1313 mutex_lock(&ptp_port->ps_lock);
1314
1315 switch (hw->mac_type) {
1316 case ICE_MAC_E810:
1317 case ICE_MAC_E830:
1318 err = 0;
1319 break;
1320 case ICE_MAC_GENERIC:
1321 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1322
1323 err = ice_stop_phy_timer_e82x(hw, port, true);
1324 break;
1325 case ICE_MAC_GENERIC_3K_E825:
1326 err = ice_stop_phy_timer_eth56g(hw, port, true);
1327 break;
1328 default:
1329 err = -ENODEV;
1330 }
1331 if (err && err != -EBUSY)
1332 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1333 port, err);
1334
1335 mutex_unlock(&ptp_port->ps_lock);
1336
1337 return err;
1338 }
1339
1340 /**
1341 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1342 * @ptp_port: PTP port for which the PHY start is set
1343 *
1344 * Start the PHY timestamping block, and initiate Vernier timestamping
1345 * calibration. If timestamping cannot be calibrated (such as if link is down)
1346 * then disable the timestamping block instead.
1347 */
1348 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1349 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1350 {
1351 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1352 u8 port = ptp_port->port_num;
1353 struct ice_hw *hw = &pf->hw;
1354 unsigned long flags;
1355 int err;
1356
1357 if (!ptp_port->link_up)
1358 return ice_ptp_port_phy_stop(ptp_port);
1359
1360 mutex_lock(&ptp_port->ps_lock);
1361
1362 switch (hw->mac_type) {
1363 case ICE_MAC_E810:
1364 case ICE_MAC_E830:
1365 err = 0;
1366 break;
1367 case ICE_MAC_GENERIC:
1368 /* Start the PHY timer in Vernier mode */
1369 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1370
1371 /* temporarily disable Tx timestamps while calibrating
1372 * PHY offset
1373 */
1374 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1375 ptp_port->tx.calibrating = true;
1376 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1377 ptp_port->tx_fifo_busy_cnt = 0;
1378
1379 /* Start the PHY timer in Vernier mode */
1380 err = ice_start_phy_timer_e82x(hw, port);
1381 if (err)
1382 break;
1383
1384 /* Enable Tx timestamps right away */
1385 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1386 ptp_port->tx.calibrating = false;
1387 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1388
1389 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1390 0);
1391 break;
1392 case ICE_MAC_GENERIC_3K_E825:
1393 err = ice_start_phy_timer_eth56g(hw, port);
1394 break;
1395 default:
1396 err = -ENODEV;
1397 }
1398
1399 if (err)
1400 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1401 port, err);
1402
1403 mutex_unlock(&ptp_port->ps_lock);
1404
1405 return err;
1406 }
1407
1408 /**
1409 * ice_ptp_link_change - Reconfigure PTP after link status change
1410 * @pf: Board private structure
1411 * @linkup: Link is up or down
1412 */
ice_ptp_link_change(struct ice_pf * pf,bool linkup)1413 void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
1414 {
1415 struct ice_ptp_port *ptp_port;
1416 struct ice_hw *hw = &pf->hw;
1417
1418 if (pf->ptp.state != ICE_PTP_READY)
1419 return;
1420
1421 ptp_port = &pf->ptp.port;
1422
1423 /* Update cached link status for this port immediately */
1424 ptp_port->link_up = linkup;
1425
1426 /* Skip HW writes if reset is in progress */
1427 if (pf->hw.reset_ongoing)
1428 return;
1429
1430 switch (hw->mac_type) {
1431 case ICE_MAC_E810:
1432 case ICE_MAC_E830:
1433 /* Do not reconfigure E810 or E830 PHY */
1434 return;
1435 case ICE_MAC_GENERIC:
1436 case ICE_MAC_GENERIC_3K_E825:
1437 ice_ptp_port_phy_restart(ptp_port);
1438 return;
1439 default:
1440 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1441 }
1442 }
1443
1444 /**
1445 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1446 * @pf: PF private structure
1447 * @ena: bool value to enable or disable interrupt
1448 * @threshold: Minimum number of packets at which intr is triggered
1449 *
1450 * Utility function to configure all the PHY interrupt settings, including
1451 * whether the PHY interrupt is enabled, and what threshold to use. Also
1452 * configures The E82X timestamp owner to react to interrupts from all PHYs.
1453 *
1454 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1455 * when failed to configure PHY interrupt for E82X
1456 */
ice_ptp_cfg_phy_interrupt(struct ice_pf * pf,bool ena,u32 threshold)1457 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1458 {
1459 struct device *dev = ice_pf_to_dev(pf);
1460 struct ice_hw *hw = &pf->hw;
1461
1462 ice_ptp_reset_ts_memory(hw);
1463
1464 switch (hw->mac_type) {
1465 case ICE_MAC_E810:
1466 case ICE_MAC_E830:
1467 return 0;
1468 case ICE_MAC_GENERIC: {
1469 int quad;
1470
1471 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1472 quad++) {
1473 int err;
1474
1475 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1476 if (err) {
1477 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1478 quad, err);
1479 return err;
1480 }
1481 }
1482
1483 return 0;
1484 }
1485 case ICE_MAC_GENERIC_3K_E825: {
1486 int port;
1487
1488 for (port = 0; port < hw->ptp.num_lports; port++) {
1489 int err;
1490
1491 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1492 if (err) {
1493 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1494 port, err);
1495 return err;
1496 }
1497 }
1498
1499 return 0;
1500 }
1501 case ICE_MAC_UNKNOWN:
1502 default:
1503 return -EOPNOTSUPP;
1504 }
1505 }
1506
1507 /**
1508 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1509 * @pf: Board private structure
1510 */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1511 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1512 {
1513 ice_ptp_port_phy_restart(&pf->ptp.port);
1514 }
1515
1516 /**
1517 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1518 * @pf: Board private structure
1519 */
ice_ptp_restart_all_phy(struct ice_pf * pf)1520 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1521 {
1522 struct list_head *entry;
1523
1524 list_for_each(entry, &pf->adapter->ports.ports) {
1525 struct ice_ptp_port *port = list_entry(entry,
1526 struct ice_ptp_port,
1527 list_node);
1528
1529 if (port->link_up)
1530 ice_ptp_port_phy_restart(port);
1531 }
1532 }
1533
1534 /**
1535 * ice_ptp_adjfine - Adjust clock increment rate
1536 * @info: the driver's PTP info structure
1537 * @scaled_ppm: Parts per million with 16-bit fractional field
1538 *
1539 * Adjust the frequency of the clock by the indicated scaled ppm from the
1540 * base frequency.
1541 */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1542 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1543 {
1544 struct ice_pf *pf = ptp_info_to_pf(info);
1545 struct ice_hw *hw = &pf->hw;
1546 u64 incval;
1547 int err;
1548
1549 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1550 err = ice_ptp_write_incval_locked(hw, incval);
1551 if (err) {
1552 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1553 err);
1554 return -EIO;
1555 }
1556
1557 return 0;
1558 }
1559
1560 /**
1561 * ice_ptp_extts_event - Process PTP external clock event
1562 * @pf: Board private structure
1563 */
ice_ptp_extts_event(struct ice_pf * pf)1564 void ice_ptp_extts_event(struct ice_pf *pf)
1565 {
1566 struct ptp_clock_event event;
1567 struct ice_hw *hw = &pf->hw;
1568 u8 chan, tmr_idx;
1569 u32 hi, lo;
1570
1571 /* Don't process timestamp events if PTP is not ready */
1572 if (pf->ptp.state != ICE_PTP_READY)
1573 return;
1574
1575 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1576 /* Event time is captured by one of the two matched registers
1577 * GLTSYN_EVNT_L: 32 LSB of sampled time event
1578 * GLTSYN_EVNT_H: 32 MSB of sampled time event
1579 * Event is defined in GLTSYN_EVNT_0 register
1580 */
1581 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1582 int pin_desc_idx;
1583
1584 /* Check if channel is enabled */
1585 if (!(pf->ptp.ext_ts_irq & (1 << chan)))
1586 continue;
1587
1588 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1589 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1590 event.timestamp = (u64)hi << 32 | lo;
1591
1592 /* Add delay compensation */
1593 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1594 if (pin_desc_idx >= 0) {
1595 const struct ice_ptp_pin_desc *desc;
1596
1597 desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
1598 event.timestamp -= desc->delay[0];
1599 }
1600
1601 event.type = PTP_CLOCK_EXTTS;
1602 event.index = chan;
1603 pf->ptp.ext_ts_irq &= ~(1 << chan);
1604 ptp_clock_event(pf->ptp.clock, &event);
1605 }
1606 }
1607
1608 /**
1609 * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1610 * @pf: Board private structure
1611 * @rq: External timestamp request
1612 * @on: Enable/disable flag
1613 *
1614 * Configure an external timestamp event on the requested channel.
1615 *
1616 * Return: 0 on success, negative error code otherwise
1617 */
ice_ptp_cfg_extts(struct ice_pf * pf,struct ptp_extts_request * rq,int on)1618 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
1619 int on)
1620 {
1621 u32 aux_reg, gpio_reg, irq_reg;
1622 struct ice_hw *hw = &pf->hw;
1623 unsigned int chan, gpio_pin;
1624 int pin_desc_idx;
1625 u8 tmr_idx;
1626
1627 /* Reject requests with unsupported flags */
1628
1629 if (rq->flags & ~(PTP_ENABLE_FEATURE |
1630 PTP_RISING_EDGE |
1631 PTP_FALLING_EDGE |
1632 PTP_STRICT_FLAGS))
1633 return -EOPNOTSUPP;
1634
1635 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1636 chan = rq->index;
1637
1638 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1639 if (pin_desc_idx < 0)
1640 return -EIO;
1641
1642 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
1643 irq_reg = rd32(hw, PFINT_OICR_ENA);
1644
1645 if (on) {
1646 /* Enable the interrupt */
1647 irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1648 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1649
1650 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
1651 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
1652
1653 /* set event level to requested edge */
1654 if (rq->flags & PTP_FALLING_EDGE)
1655 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1656 if (rq->flags & PTP_RISING_EDGE)
1657 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1658
1659 /* Write GPIO CTL reg.
1660 * 0x1 is input sampled by EVENT register(channel)
1661 * + num_in_channels * tmr_idx
1662 */
1663 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1664 1 + chan + (tmr_idx * 3));
1665 } else {
1666 bool last_enabled = true;
1667
1668 /* clear the values we set to reset defaults */
1669 aux_reg = 0;
1670 gpio_reg = 0;
1671
1672 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
1673 if ((pf->ptp.extts_rqs[i].flags &
1674 PTP_ENABLE_FEATURE) &&
1675 i != chan) {
1676 last_enabled = false;
1677 }
1678
1679 if (last_enabled)
1680 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1681 }
1682
1683 wr32(hw, PFINT_OICR_ENA, irq_reg);
1684 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1685 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1686
1687 return 0;
1688 }
1689
1690 /**
1691 * ice_ptp_disable_all_extts - Disable all EXTTS channels
1692 * @pf: Board private structure
1693 */
ice_ptp_disable_all_extts(struct ice_pf * pf)1694 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1695 {
1696 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1697 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1698 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1699 false);
1700
1701 synchronize_irq(pf->oicr_irq.virq);
1702 }
1703
1704 /**
1705 * ice_ptp_enable_all_extts - Enable all EXTTS channels
1706 * @pf: Board private structure
1707 *
1708 * Called during reset to restore user configuration.
1709 */
ice_ptp_enable_all_extts(struct ice_pf * pf)1710 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1711 {
1712 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1713 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1714 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1715 true);
1716 }
1717
1718 /**
1719 * ice_ptp_write_perout - Write periodic wave parameters to HW
1720 * @hw: pointer to the HW struct
1721 * @chan: target channel
1722 * @gpio_pin: target GPIO pin
1723 * @start: target time to start periodic output
1724 * @period: target period
1725 *
1726 * Return: 0 on success, negative error code otherwise
1727 */
ice_ptp_write_perout(struct ice_hw * hw,unsigned int chan,unsigned int gpio_pin,u64 start,u64 period)1728 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
1729 unsigned int gpio_pin, u64 start, u64 period)
1730 {
1731
1732 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1733 u32 val = 0;
1734
1735 /* 0. Reset mode & out_en in AUX_OUT */
1736 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1737
1738 if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
1739 int err;
1740
1741 /* Enable/disable CGU 1PPS output for E825C */
1742 err = ice_cgu_cfg_pps_out(hw, !!period);
1743 if (err)
1744 return err;
1745 }
1746
1747 /* 1. Write perout with half of required period value.
1748 * HW toggles output when source clock hits the TGT and then adds
1749 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle.
1750 */
1751 period >>= 1;
1752
1753 /* For proper operation, GLTSYN_CLKO must be larger than clock tick and
1754 * period has to fit in 32 bit register.
1755 */
1756 #define MIN_PULSE 3
1757 if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
1758 dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
1759 MIN_PULSE);
1760 return -EIO;
1761 }
1762
1763 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1764
1765 /* 2. Write TARGET time */
1766 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
1767 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
1768
1769 /* 3. Write AUX_OUT register */
1770 if (!!period)
1771 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1772 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1773
1774 /* 4. write GPIO CTL reg */
1775 val = GLGEN_GPIO_CTL_PIN_DIR_M;
1776 if (!!period)
1777 val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1778 8 + chan + (tmr_idx * 4));
1779
1780 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1781 ice_flush(hw);
1782
1783 return 0;
1784 }
1785
1786 /**
1787 * ice_ptp_cfg_perout - Configure clock to generate periodic wave
1788 * @pf: Board private structure
1789 * @rq: Periodic output request
1790 * @on: Enable/disable flag
1791 *
1792 * Configure the internal clock generator modules to generate the clock wave of
1793 * specified period.
1794 *
1795 * Return: 0 on success, negative error code otherwise
1796 */
ice_ptp_cfg_perout(struct ice_pf * pf,struct ptp_perout_request * rq,int on)1797 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
1798 int on)
1799 {
1800 unsigned int gpio_pin, prop_delay_ns;
1801 u64 clk, period, start, phase;
1802 struct ice_hw *hw = &pf->hw;
1803 int pin_desc_idx;
1804
1805 if (rq->flags & ~PTP_PEROUT_PHASE)
1806 return -EOPNOTSUPP;
1807
1808 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
1809 if (pin_desc_idx < 0)
1810 return -EIO;
1811
1812 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
1813 prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
1814 period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
1815
1816 /* If we're disabling the output or period is 0, clear out CLKO and TGT
1817 * and keep output level low.
1818 */
1819 if (!on || !period)
1820 return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
1821
1822 if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
1823 period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
1824 dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
1825 return -EOPNOTSUPP;
1826 }
1827
1828 if (period & 0x1) {
1829 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1830 return -EIO;
1831 }
1832
1833 start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
1834
1835 /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */
1836 if (rq->flags & PTP_PEROUT_PHASE)
1837 phase = start;
1838 else
1839 div64_u64_rem(start, period, &phase);
1840
1841 /* If we have only phase or start time is in the past, start the timer
1842 * at the next multiple of period, maintaining phase at least 0.5 second
1843 * from now, so we have time to write it to HW.
1844 */
1845 clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
1846 if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
1847 start = div64_u64(clk + period - 1, period) * period + phase;
1848
1849 /* Compensate for propagation delay from the generator to the pin. */
1850 start -= prop_delay_ns;
1851
1852 return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
1853 }
1854
1855 /**
1856 * ice_ptp_disable_all_perout - Disable all currently configured outputs
1857 * @pf: Board private structure
1858 *
1859 * Disable all currently configured clock outputs. This is necessary before
1860 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to
1861 * re-enable the clocks again.
1862 */
ice_ptp_disable_all_perout(struct ice_pf * pf)1863 static void ice_ptp_disable_all_perout(struct ice_pf *pf)
1864 {
1865 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1866 if (pf->ptp.perout_rqs[i].period.sec ||
1867 pf->ptp.perout_rqs[i].period.nsec)
1868 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1869 false);
1870 }
1871
1872 /**
1873 * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs
1874 * @pf: Board private structure
1875 *
1876 * Enable all currently configured clock outputs. Use this after
1877 * ice_ptp_disable_all_perout to reconfigure the output signals according to
1878 * their configuration.
1879 */
ice_ptp_enable_all_perout(struct ice_pf * pf)1880 static void ice_ptp_enable_all_perout(struct ice_pf *pf)
1881 {
1882 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1883 if (pf->ptp.perout_rqs[i].period.sec ||
1884 pf->ptp.perout_rqs[i].period.nsec)
1885 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1886 true);
1887 }
1888
1889 /**
1890 * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO
1891 * @pf: Board private structure
1892 * @pin: Pin index
1893 * @func: Assigned function
1894 *
1895 * Return: 0 on success, negative error code otherwise
1896 */
ice_ptp_disable_shared_pin(struct ice_pf * pf,unsigned int pin,enum ptp_pin_function func)1897 static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin,
1898 enum ptp_pin_function func)
1899 {
1900 unsigned int gpio_pin;
1901
1902 switch (func) {
1903 case PTP_PF_PEROUT:
1904 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1];
1905 break;
1906 case PTP_PF_EXTTS:
1907 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0];
1908 break;
1909 default:
1910 return -EOPNOTSUPP;
1911 }
1912
1913 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
1914 struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i];
1915 unsigned int chan = pin_desc->chan;
1916
1917 /* Skip pin idx from the request */
1918 if (i == pin)
1919 continue;
1920
1921 if (pin_desc->func == PTP_PF_PEROUT &&
1922 pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) {
1923 pf->ptp.perout_rqs[chan].period.sec = 0;
1924 pf->ptp.perout_rqs[chan].period.nsec = 0;
1925 pin_desc->func = PTP_PF_NONE;
1926 pin_desc->chan = 0;
1927 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n",
1928 i, gpio_pin);
1929 return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan],
1930 false);
1931 } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS &&
1932 pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) {
1933 pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE;
1934 pin_desc->func = PTP_PF_NONE;
1935 pin_desc->chan = 0;
1936 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n",
1937 i, gpio_pin);
1938 return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan],
1939 false);
1940 }
1941 }
1942
1943 return 0;
1944 }
1945
1946 /**
1947 * ice_verify_pin - verify if pin supports requested pin function
1948 * @info: the driver's PTP info structure
1949 * @pin: Pin index
1950 * @func: Assigned function
1951 * @chan: Assigned channel
1952 *
1953 * Return: 0 on success, -EOPNOTSUPP when function is not supported.
1954 */
ice_verify_pin(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)1955 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
1956 enum ptp_pin_function func, unsigned int chan)
1957 {
1958 struct ice_pf *pf = ptp_info_to_pf(info);
1959 const struct ice_ptp_pin_desc *pin_desc;
1960
1961 pin_desc = &pf->ptp.ice_pin_desc[pin];
1962
1963 /* Is assigned function allowed? */
1964 switch (func) {
1965 case PTP_PF_EXTTS:
1966 if (pin_desc->gpio[0] < 0)
1967 return -EOPNOTSUPP;
1968 break;
1969 case PTP_PF_PEROUT:
1970 if (pin_desc->gpio[1] < 0)
1971 return -EOPNOTSUPP;
1972 break;
1973 case PTP_PF_NONE:
1974 break;
1975 case PTP_PF_PHYSYNC:
1976 default:
1977 return -EOPNOTSUPP;
1978 }
1979
1980 /* On adapters with SMA_CTRL disable other pins that share same GPIO */
1981 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
1982 ice_ptp_disable_shared_pin(pf, pin, func);
1983 pf->ptp.pin_desc[pin].func = func;
1984 pf->ptp.pin_desc[pin].chan = chan;
1985 return ice_ptp_set_sma_cfg(pf);
1986 }
1987
1988 return 0;
1989 }
1990
1991 /**
1992 * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC
1993 * @info: The driver's PTP info structure
1994 * @rq: The requested feature to change
1995 * @on: Enable/disable flag
1996 *
1997 * Return: 0 on success, negative error code otherwise
1998 */
ice_ptp_gpio_enable(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1999 static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
2000 struct ptp_clock_request *rq, int on)
2001 {
2002 struct ice_pf *pf = ptp_info_to_pf(info);
2003 int err;
2004
2005 switch (rq->type) {
2006 case PTP_CLK_REQ_PEROUT:
2007 {
2008 struct ptp_perout_request *cached =
2009 &pf->ptp.perout_rqs[rq->perout.index];
2010
2011 err = ice_ptp_cfg_perout(pf, &rq->perout, on);
2012 if (!err) {
2013 *cached = rq->perout;
2014 } else {
2015 cached->period.sec = 0;
2016 cached->period.nsec = 0;
2017 }
2018 return err;
2019 }
2020 case PTP_CLK_REQ_EXTTS:
2021 {
2022 struct ptp_extts_request *cached =
2023 &pf->ptp.extts_rqs[rq->extts.index];
2024
2025 err = ice_ptp_cfg_extts(pf, &rq->extts, on);
2026 if (!err)
2027 *cached = rq->extts;
2028 else
2029 cached->flags &= ~PTP_ENABLE_FEATURE;
2030 return err;
2031 }
2032 default:
2033 return -EOPNOTSUPP;
2034 }
2035 }
2036
2037 /**
2038 * ice_ptp_gettimex64 - Get the time of the clock
2039 * @info: the driver's PTP info structure
2040 * @ts: timespec64 structure to hold the current time value
2041 * @sts: Optional parameter for holding a pair of system timestamps from
2042 * the system clock. Will be ignored if NULL is given.
2043 *
2044 * Read the device clock and return the correct value on ns, after converting it
2045 * into a timespec struct.
2046 */
2047 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)2048 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
2049 struct ptp_system_timestamp *sts)
2050 {
2051 struct ice_pf *pf = ptp_info_to_pf(info);
2052 u64 time_ns;
2053
2054 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
2055 *ts = ns_to_timespec64(time_ns);
2056 return 0;
2057 }
2058
2059 /**
2060 * ice_ptp_settime64 - Set the time of the clock
2061 * @info: the driver's PTP info structure
2062 * @ts: timespec64 structure that holds the new time value
2063 *
2064 * Set the device clock to the user input value. The conversion from timespec
2065 * to ns happens in the write function.
2066 */
2067 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)2068 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
2069 {
2070 struct ice_pf *pf = ptp_info_to_pf(info);
2071 struct timespec64 ts64 = *ts;
2072 struct ice_hw *hw = &pf->hw;
2073 int err;
2074
2075 /* For Vernier mode on E82X, we need to recalibrate after new settime.
2076 * Start with marking timestamps as invalid.
2077 */
2078 if (hw->mac_type == ICE_MAC_GENERIC) {
2079 err = ice_ptp_clear_phy_offset_ready_e82x(hw);
2080 if (err)
2081 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
2082 }
2083
2084 if (!ice_ptp_lock(hw)) {
2085 err = -EBUSY;
2086 goto exit;
2087 }
2088
2089 /* Disable periodic outputs */
2090 ice_ptp_disable_all_perout(pf);
2091
2092 err = ice_ptp_write_init(pf, &ts64);
2093 ice_ptp_unlock(hw);
2094
2095 if (!err)
2096 ice_ptp_reset_cached_phctime(pf);
2097
2098 /* Reenable periodic outputs */
2099 ice_ptp_enable_all_perout(pf);
2100
2101 /* Recalibrate and re-enable timestamp blocks for E822/E823 */
2102 if (hw->mac_type == ICE_MAC_GENERIC)
2103 ice_ptp_restart_all_phy(pf);
2104 exit:
2105 if (err) {
2106 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
2107 return err;
2108 }
2109
2110 return 0;
2111 }
2112
2113 /**
2114 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
2115 * @info: the driver's PTP info structure
2116 * @delta: Offset in nanoseconds to adjust the time by
2117 */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)2118 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
2119 {
2120 struct timespec64 now, then;
2121 int ret;
2122
2123 then = ns_to_timespec64(delta);
2124 ret = ice_ptp_gettimex64(info, &now, NULL);
2125 if (ret)
2126 return ret;
2127 now = timespec64_add(now, then);
2128
2129 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
2130 }
2131
2132 /**
2133 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
2134 * @info: the driver's PTP info structure
2135 * @delta: Offset in nanoseconds to adjust the time by
2136 */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)2137 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
2138 {
2139 struct ice_pf *pf = ptp_info_to_pf(info);
2140 struct ice_hw *hw = &pf->hw;
2141 struct device *dev;
2142 int err;
2143
2144 dev = ice_pf_to_dev(pf);
2145
2146 /* Hardware only supports atomic adjustments using signed 32-bit
2147 * integers. For any adjustment outside this range, perform
2148 * a non-atomic get->adjust->set flow.
2149 */
2150 if (delta > S32_MAX || delta < S32_MIN) {
2151 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
2152 return ice_ptp_adjtime_nonatomic(info, delta);
2153 }
2154
2155 if (!ice_ptp_lock(hw)) {
2156 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
2157 return -EBUSY;
2158 }
2159
2160 /* Disable periodic outputs */
2161 ice_ptp_disable_all_perout(pf);
2162
2163 err = ice_ptp_write_adj(pf, delta);
2164
2165 /* Reenable periodic outputs */
2166 ice_ptp_enable_all_perout(pf);
2167
2168 ice_ptp_unlock(hw);
2169
2170 if (err) {
2171 dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2172 return err;
2173 }
2174
2175 ice_ptp_reset_cached_phctime(pf);
2176
2177 return 0;
2178 }
2179
2180 /**
2181 * struct ice_crosststamp_cfg - Device cross timestamp configuration
2182 * @lock_reg: The hardware semaphore lock to use
2183 * @lock_busy: Bit in the semaphore lock indicating the lock is busy
2184 * @ctl_reg: The hardware register to request cross timestamp
2185 * @ctl_active: Bit in the control register to request cross timestamp
2186 * @art_time_l: Lower 32-bits of ART system time
2187 * @art_time_h: Upper 32-bits of ART system time
2188 * @dev_time_l: Lower 32-bits of device time (per timer index)
2189 * @dev_time_h: Upper 32-bits of device time (per timer index)
2190 */
2191 struct ice_crosststamp_cfg {
2192 /* HW semaphore lock register */
2193 u32 lock_reg;
2194 u32 lock_busy;
2195
2196 /* Capture control register */
2197 u32 ctl_reg;
2198 u32 ctl_active;
2199
2200 /* Time storage */
2201 u32 art_time_l;
2202 u32 art_time_h;
2203 u32 dev_time_l[2];
2204 u32 dev_time_h[2];
2205 };
2206
2207 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
2208 .lock_reg = PFHH_SEM,
2209 .lock_busy = PFHH_SEM_BUSY_M,
2210 .ctl_reg = GLHH_ART_CTL,
2211 .ctl_active = GLHH_ART_CTL_ACTIVE_M,
2212 .art_time_l = GLHH_ART_TIME_L,
2213 .art_time_h = GLHH_ART_TIME_H,
2214 .dev_time_l[0] = GLTSYN_HHTIME_L(0),
2215 .dev_time_h[0] = GLTSYN_HHTIME_H(0),
2216 .dev_time_l[1] = GLTSYN_HHTIME_L(1),
2217 .dev_time_h[1] = GLTSYN_HHTIME_H(1),
2218 };
2219
2220 #ifdef CONFIG_ICE_HWTS
2221 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
2222 .lock_reg = E830_PFPTM_SEM,
2223 .lock_busy = E830_PFPTM_SEM_BUSY_M,
2224 .ctl_reg = E830_GLPTM_ART_CTL,
2225 .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
2226 .art_time_l = E830_GLPTM_ART_TIME_L,
2227 .art_time_h = E830_GLPTM_ART_TIME_H,
2228 .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
2229 .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
2230 .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
2231 .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
2232 };
2233
2234 #endif /* CONFIG_ICE_HWTS */
2235 /**
2236 * struct ice_crosststamp_ctx - Device cross timestamp context
2237 * @snapshot: snapshot of system clocks for historic interpolation
2238 * @pf: pointer to the PF private structure
2239 * @cfg: pointer to hardware configuration for cross timestamp
2240 */
2241 struct ice_crosststamp_ctx {
2242 struct system_time_snapshot snapshot;
2243 struct ice_pf *pf;
2244 const struct ice_crosststamp_cfg *cfg;
2245 };
2246
2247 /**
2248 * ice_capture_crosststamp - Capture a device/system cross timestamp
2249 * @device: Current device time
2250 * @system: System counter value read synchronously with device time
2251 * @__ctx: Context passed from ice_ptp_getcrosststamp
2252 *
2253 * Read device and system (ART) clock simultaneously and return the corrected
2254 * clock values in ns.
2255 *
2256 * Return: zero on success, or a negative error code on failure.
2257 */
ice_capture_crosststamp(ktime_t * device,struct system_counterval_t * system,void * __ctx)2258 static int ice_capture_crosststamp(ktime_t *device,
2259 struct system_counterval_t *system,
2260 void *__ctx)
2261 {
2262 struct ice_crosststamp_ctx *ctx = __ctx;
2263 const struct ice_crosststamp_cfg *cfg;
2264 u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
2265 struct ice_pf *pf;
2266 struct ice_hw *hw;
2267 int err;
2268 u64 ts;
2269
2270 cfg = ctx->cfg;
2271 pf = ctx->pf;
2272 hw = &pf->hw;
2273
2274 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2275 if (tmr_idx > 1)
2276 return -EINVAL;
2277
2278 /* Poll until we obtain the cross-timestamp hardware semaphore */
2279 err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
2280 !(lock & cfg->lock_busy),
2281 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
2282 if (err) {
2283 dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
2284 return -EBUSY;
2285 }
2286
2287 /* Snapshot system time for historic interpolation */
2288 ktime_get_snapshot(&ctx->snapshot);
2289
2290 /* Program cmd to master timer */
2291 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2292
2293 /* Start the ART and device clock sync sequence */
2294 ctl = rd32(hw, cfg->ctl_reg);
2295 ctl |= cfg->ctl_active;
2296 wr32(hw, cfg->ctl_reg, ctl);
2297
2298 /* Poll until hardware completes the capture */
2299 err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
2300 5, 20 * USEC_PER_MSEC);
2301 if (err)
2302 goto err_timeout;
2303
2304 /* Read ART system time */
2305 ts_lo = rd32(hw, cfg->art_time_l);
2306 ts_hi = rd32(hw, cfg->art_time_h);
2307 ts = ((u64)ts_hi << 32) | ts_lo;
2308 system->cycles = ts;
2309 system->cs_id = CSID_X86_ART;
2310
2311 /* Read Device source clock time */
2312 ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
2313 ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
2314 ts = ((u64)ts_hi << 32) | ts_lo;
2315 *device = ns_to_ktime(ts);
2316
2317 err_timeout:
2318 /* Clear the master timer */
2319 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2320
2321 /* Release HW lock */
2322 lock = rd32(hw, cfg->lock_reg);
2323 lock &= ~cfg->lock_busy;
2324 wr32(hw, cfg->lock_reg, lock);
2325
2326 return err;
2327 }
2328
2329 /**
2330 * ice_ptp_getcrosststamp - Capture a device cross timestamp
2331 * @info: the driver's PTP info structure
2332 * @cts: The memory to fill the cross timestamp info
2333 *
2334 * Capture a cross timestamp between the ART and the device PTP hardware
2335 * clock. Fill the cross timestamp information and report it back to the
2336 * caller.
2337 *
2338 * In order to correctly correlate the ART timestamp back to the TSC time, the
2339 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2340 *
2341 * Return: zero on success, or a negative error code on failure.
2342 */
ice_ptp_getcrosststamp(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2343 static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
2344 struct system_device_crosststamp *cts)
2345 {
2346 struct ice_pf *pf = ptp_info_to_pf(info);
2347 struct ice_crosststamp_ctx ctx = {
2348 .pf = pf,
2349 };
2350
2351 switch (pf->hw.mac_type) {
2352 case ICE_MAC_GENERIC:
2353 case ICE_MAC_GENERIC_3K_E825:
2354 ctx.cfg = &ice_crosststamp_cfg_e82x;
2355 break;
2356 #ifdef CONFIG_ICE_HWTS
2357 case ICE_MAC_E830:
2358 ctx.cfg = &ice_crosststamp_cfg_e830;
2359 break;
2360 #endif /* CONFIG_ICE_HWTS */
2361 default:
2362 return -EOPNOTSUPP;
2363 }
2364
2365 return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
2366 &ctx.snapshot, cts);
2367 }
2368
2369 /**
2370 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2371 * @pf: Board private structure
2372 * @ifr: ioctl data
2373 *
2374 * Copy the timestamping config to user buffer
2375 */
ice_ptp_get_ts_config(struct ice_pf * pf,struct ifreq * ifr)2376 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2377 {
2378 struct hwtstamp_config *config;
2379
2380 if (pf->ptp.state != ICE_PTP_READY)
2381 return -EIO;
2382
2383 config = &pf->ptp.tstamp_config;
2384
2385 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2386 -EFAULT : 0;
2387 }
2388
2389 /**
2390 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2391 * @pf: Board private structure
2392 * @config: hwtstamp settings requested or saved
2393 */
2394 static int
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct hwtstamp_config * config)2395 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2396 {
2397 switch (config->tx_type) {
2398 case HWTSTAMP_TX_OFF:
2399 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2400 break;
2401 case HWTSTAMP_TX_ON:
2402 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2403 break;
2404 default:
2405 return -ERANGE;
2406 }
2407
2408 switch (config->rx_filter) {
2409 case HWTSTAMP_FILTER_NONE:
2410 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2411 break;
2412 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2413 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2414 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2415 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2416 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2417 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2418 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2419 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2420 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2421 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2422 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2423 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2424 case HWTSTAMP_FILTER_NTP_ALL:
2425 case HWTSTAMP_FILTER_ALL:
2426 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2427 break;
2428 default:
2429 return -ERANGE;
2430 }
2431
2432 /* Immediately update the device timestamping mode */
2433 ice_ptp_restore_timestamp_mode(pf);
2434
2435 return 0;
2436 }
2437
2438 /**
2439 * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2440 * @pf: Board private structure
2441 * @ifr: ioctl data
2442 *
2443 * Get the user config and store it
2444 */
ice_ptp_set_ts_config(struct ice_pf * pf,struct ifreq * ifr)2445 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2446 {
2447 struct hwtstamp_config config;
2448 int err;
2449
2450 if (pf->ptp.state != ICE_PTP_READY)
2451 return -EAGAIN;
2452
2453 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2454 return -EFAULT;
2455
2456 err = ice_ptp_set_timestamp_mode(pf, &config);
2457 if (err)
2458 return err;
2459
2460 /* Return the actual configuration set */
2461 config = pf->ptp.tstamp_config;
2462
2463 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2464 -EFAULT : 0;
2465 }
2466
2467 /**
2468 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2469 * @rx_desc: Receive descriptor
2470 * @pkt_ctx: Packet context to get the cached time
2471 *
2472 * The driver receives a notification in the receive descriptor with timestamp.
2473 */
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc * rx_desc,const struct ice_pkt_ctx * pkt_ctx)2474 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2475 const struct ice_pkt_ctx *pkt_ctx)
2476 {
2477 u64 ts_ns, cached_time;
2478 u32 ts_high;
2479
2480 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2481 return 0;
2482
2483 cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2484
2485 /* Do not report a timestamp if we don't have a cached PHC time */
2486 if (!cached_time)
2487 return 0;
2488
2489 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2490 * PHC value, rather than accessing the PF. This also allows us to
2491 * simply pass the upper 32bits of nanoseconds directly. Calling
2492 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2493 * bits itself.
2494 */
2495 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2496 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2497
2498 return ts_ns;
2499 }
2500
2501 /**
2502 * ice_ptp_setup_pin_cfg - setup PTP pin_config structure
2503 * @pf: Board private structure
2504 */
ice_ptp_setup_pin_cfg(struct ice_pf * pf)2505 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
2506 {
2507 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
2508 const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
2509 struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
2510 const char *name = NULL;
2511
2512 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2513 name = ice_pin_names[desc->name_idx];
2514 else if (desc->name_idx != GPIO_NA)
2515 name = ice_pin_names_nvm[desc->name_idx];
2516 if (name)
2517 strscpy(pin->name, name, sizeof(pin->name));
2518
2519 pin->index = i;
2520 }
2521
2522 pf->ptp.info.pin_config = pf->ptp.pin_desc;
2523 }
2524
2525 /**
2526 * ice_ptp_disable_pins - Disable PTP pins
2527 * @pf: pointer to the PF structure
2528 *
2529 * Disable the OS access to the SMA pins. Called to clear out the OS
2530 * indications of pin support when we fail to setup the SMA control register.
2531 */
ice_ptp_disable_pins(struct ice_pf * pf)2532 static void ice_ptp_disable_pins(struct ice_pf *pf)
2533 {
2534 struct ptp_clock_info *info = &pf->ptp.info;
2535
2536 dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
2537
2538 info->enable = NULL;
2539 info->verify = NULL;
2540 info->n_pins = 0;
2541 info->n_ext_ts = 0;
2542 info->n_per_out = 0;
2543 }
2544
2545 /**
2546 * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM
2547 * @pf: pointer to the PF structure
2548 * @entries: SDP connection section from NVM
2549 * @num_entries: number of valid entries in sdp_entries
2550 * @pins: PTP pins array to update
2551 *
2552 * Return: 0 on success, negative error code otherwise.
2553 */
ice_ptp_parse_sdp_entries(struct ice_pf * pf,__le16 * entries,unsigned int num_entries,struct ice_ptp_pin_desc * pins)2554 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
2555 unsigned int num_entries,
2556 struct ice_ptp_pin_desc *pins)
2557 {
2558 unsigned int n_pins = 0;
2559 unsigned int i;
2560
2561 /* Setup ice_pin_desc array */
2562 for (i = 0; i < ICE_N_PINS_MAX; i++) {
2563 pins[i].name_idx = -1;
2564 pins[i].gpio[0] = -1;
2565 pins[i].gpio[1] = -1;
2566 }
2567
2568 for (i = 0; i < num_entries; i++) {
2569 u16 entry = le16_to_cpu(entries[i]);
2570 DECLARE_BITMAP(bitmap, GPIO_NA);
2571 unsigned int bitmap_idx;
2572 bool dir;
2573 u16 gpio;
2574
2575 *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
2576 dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
2577 gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
2578 for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) {
2579 unsigned int idx;
2580
2581 /* Check if entry's pin bit is valid */
2582 if (bitmap_idx >= NUM_PTP_PINS_NVM &&
2583 bitmap_idx != GPIO_NA)
2584 continue;
2585
2586 /* Check if pin already exists */
2587 for (idx = 0; idx < ICE_N_PINS_MAX; idx++)
2588 if (pins[idx].name_idx == bitmap_idx)
2589 break;
2590
2591 if (idx == ICE_N_PINS_MAX) {
2592 /* Pin not found, setup its entry and name */
2593 idx = n_pins++;
2594 pins[idx].name_idx = bitmap_idx;
2595 if (bitmap_idx == GPIO_NA)
2596 strscpy(pf->ptp.pin_desc[idx].name,
2597 ice_pin_names[gpio],
2598 sizeof(pf->ptp.pin_desc[idx]
2599 .name));
2600 }
2601
2602 /* Setup in/out GPIO number */
2603 pins[idx].gpio[dir] = gpio;
2604 }
2605 }
2606
2607 for (i = 0; i < n_pins; i++) {
2608 dev_dbg(ice_pf_to_dev(pf),
2609 "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
2610 i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
2611 }
2612
2613 pf->ptp.info.n_pins = n_pins;
2614 return 0;
2615 }
2616
2617 /**
2618 * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support
2619 * @pf: Board private structure
2620 *
2621 * Assign functions to the PTP capabilities structure for E82X devices.
2622 * Functions which operate across all device families should be set directly
2623 * in ice_ptp_set_caps. Only add functions here which are distinct for E82X
2624 * devices.
2625 */
ice_ptp_set_funcs_e82x(struct ice_pf * pf)2626 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
2627 {
2628 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2629
2630 if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
2631 pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
2632 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
2633 } else {
2634 pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
2635 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x);
2636 }
2637 ice_ptp_setup_pin_cfg(pf);
2638 }
2639
2640 /**
2641 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2642 * @pf: Board private structure
2643 *
2644 * Assign functions to the PTP capabiltiies structure for E810 devices.
2645 * Functions which operate across all device families should be set directly
2646 * in ice_ptp_set_caps. Only add functions here which are distinct for E810
2647 * devices.
2648 */
ice_ptp_set_funcs_e810(struct ice_pf * pf)2649 static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
2650 {
2651 __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
2652 struct ice_ptp_pin_desc *desc = NULL;
2653 struct ice_ptp *ptp = &pf->ptp;
2654 unsigned int num_entries;
2655 int err;
2656
2657 err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
2658 if (err) {
2659 /* SDP section does not exist in NVM or is corrupted */
2660 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2661 ptp->ice_pin_desc = ice_pin_desc_e810_sma;
2662 ptp->info.n_pins =
2663 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma);
2664 } else {
2665 pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2666 pf->ptp.info.n_pins =
2667 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
2668 err = 0;
2669 }
2670 } else {
2671 desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
2672 sizeof(struct ice_ptp_pin_desc),
2673 GFP_KERNEL);
2674 if (!desc)
2675 goto err;
2676
2677 err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
2678 if (err)
2679 goto err;
2680
2681 ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
2682 }
2683
2684 ptp->info.pin_config = ptp->pin_desc;
2685 ice_ptp_setup_pin_cfg(pf);
2686
2687 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2688 err = ice_ptp_set_sma_cfg(pf);
2689 err:
2690 if (err) {
2691 devm_kfree(ice_pf_to_dev(pf), desc);
2692 ice_ptp_disable_pins(pf);
2693 }
2694 }
2695
2696 /**
2697 * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
2698 * @pf: Board private structure
2699 *
2700 * Assign functions to the PTP capabiltiies structure for E830 devices.
2701 * Functions which operate across all device families should be set directly
2702 * in ice_ptp_set_caps. Only add functions here which are distinct for E830
2703 * devices.
2704 */
ice_ptp_set_funcs_e830(struct ice_pf * pf)2705 static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
2706 {
2707 #ifdef CONFIG_ICE_HWTS
2708 if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
2709 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2710
2711 #endif /* CONFIG_ICE_HWTS */
2712 /* Rest of the config is the same as base E810 */
2713 pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2714 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
2715 ice_ptp_setup_pin_cfg(pf);
2716 }
2717
2718 /**
2719 * ice_ptp_set_caps - Set PTP capabilities
2720 * @pf: Board private structure
2721 */
ice_ptp_set_caps(struct ice_pf * pf)2722 static void ice_ptp_set_caps(struct ice_pf *pf)
2723 {
2724 struct ptp_clock_info *info = &pf->ptp.info;
2725 struct device *dev = ice_pf_to_dev(pf);
2726
2727 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2728 dev_driver_string(dev), dev_name(dev));
2729 info->owner = THIS_MODULE;
2730 info->max_adj = 100000000;
2731 info->adjtime = ice_ptp_adjtime;
2732 info->adjfine = ice_ptp_adjfine;
2733 info->gettimex64 = ice_ptp_gettimex64;
2734 info->settime64 = ice_ptp_settime64;
2735 info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
2736 info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
2737 info->enable = ice_ptp_gpio_enable;
2738 info->verify = ice_verify_pin;
2739
2740 switch (pf->hw.mac_type) {
2741 case ICE_MAC_E810:
2742 ice_ptp_set_funcs_e810(pf);
2743 return;
2744 case ICE_MAC_E830:
2745 ice_ptp_set_funcs_e830(pf);
2746 return;
2747 case ICE_MAC_GENERIC:
2748 case ICE_MAC_GENERIC_3K_E825:
2749 ice_ptp_set_funcs_e82x(pf);
2750 return;
2751 default:
2752 return;
2753 }
2754 }
2755
2756 /**
2757 * ice_ptp_create_clock - Create PTP clock device for userspace
2758 * @pf: Board private structure
2759 *
2760 * This function creates a new PTP clock device. It only creates one if we
2761 * don't already have one. Will return error if it can't create one, but success
2762 * if we already have a device. Should be used by ice_ptp_init to create clock
2763 * initially, and prevent global resets from creating new clock devices.
2764 */
ice_ptp_create_clock(struct ice_pf * pf)2765 static long ice_ptp_create_clock(struct ice_pf *pf)
2766 {
2767 struct ptp_clock_info *info;
2768 struct device *dev;
2769
2770 /* No need to create a clock device if we already have one */
2771 if (pf->ptp.clock)
2772 return 0;
2773
2774 ice_ptp_set_caps(pf);
2775
2776 info = &pf->ptp.info;
2777 dev = ice_pf_to_dev(pf);
2778
2779 /* Attempt to register the clock before enabling the hardware. */
2780 pf->ptp.clock = ptp_clock_register(info, dev);
2781 if (IS_ERR(pf->ptp.clock)) {
2782 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2783 return PTR_ERR(pf->ptp.clock);
2784 }
2785
2786 return 0;
2787 }
2788
2789 /**
2790 * ice_ptp_request_ts - Request an available Tx timestamp index
2791 * @tx: the PTP Tx timestamp tracker to request from
2792 * @skb: the SKB to associate with this timestamp request
2793 */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2794 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2795 {
2796 unsigned long flags;
2797 u8 idx;
2798
2799 spin_lock_irqsave(&tx->lock, flags);
2800
2801 /* Check that this tracker is accepting new timestamp requests */
2802 if (!ice_ptp_is_tx_tracker_up(tx)) {
2803 spin_unlock_irqrestore(&tx->lock, flags);
2804 return -1;
2805 }
2806
2807 /* Find and set the first available index */
2808 idx = find_next_zero_bit(tx->in_use, tx->len,
2809 tx->last_ll_ts_idx_read + 1);
2810 if (idx == tx->len)
2811 idx = find_first_zero_bit(tx->in_use, tx->len);
2812
2813 if (idx < tx->len) {
2814 /* We got a valid index that no other thread could have set. Store
2815 * a reference to the skb and the start time to allow discarding old
2816 * requests.
2817 */
2818 set_bit(idx, tx->in_use);
2819 clear_bit(idx, tx->stale);
2820 tx->tstamps[idx].start = jiffies;
2821 tx->tstamps[idx].skb = skb_get(skb);
2822 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2823 ice_trace(tx_tstamp_request, skb, idx);
2824 }
2825
2826 spin_unlock_irqrestore(&tx->lock, flags);
2827
2828 /* return the appropriate PHY timestamp register index, -1 if no
2829 * indexes were available.
2830 */
2831 if (idx >= tx->len)
2832 return -1;
2833 else
2834 return idx + tx->offset;
2835 }
2836
2837 /**
2838 * ice_ptp_process_ts - Process the PTP Tx timestamps
2839 * @pf: Board private structure
2840 *
2841 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2842 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2843 */
ice_ptp_process_ts(struct ice_pf * pf)2844 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2845 {
2846 switch (pf->ptp.tx_interrupt_mode) {
2847 case ICE_PTP_TX_INTERRUPT_NONE:
2848 /* This device has the clock owner handle timestamps for it */
2849 return ICE_TX_TSTAMP_WORK_DONE;
2850 case ICE_PTP_TX_INTERRUPT_SELF:
2851 /* This device handles its own timestamps */
2852 return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2853 case ICE_PTP_TX_INTERRUPT_ALL:
2854 /* This device handles timestamps for all ports */
2855 return ice_ptp_tx_tstamp_owner(pf);
2856 default:
2857 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2858 pf->ptp.tx_interrupt_mode);
2859 return ICE_TX_TSTAMP_WORK_DONE;
2860 }
2861 }
2862
2863 /**
2864 * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
2865 * @pf: Board private structure
2866 *
2867 * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
2868 * half of the interrupt and IRQ_HANDLED otherwise.
2869 */
ice_ptp_ts_irq(struct ice_pf * pf)2870 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
2871 {
2872 struct ice_hw *hw = &pf->hw;
2873
2874 switch (hw->mac_type) {
2875 case ICE_MAC_E810:
2876 /* E810 capable of low latency timestamping with interrupt can
2877 * request a single timestamp in the top half and wait for
2878 * a second LL TS interrupt from the FW when it's ready.
2879 */
2880 if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
2881 struct ice_ptp_tx *tx = &pf->ptp.port.tx;
2882 u8 idx;
2883
2884 if (!ice_pf_state_is_nominal(pf))
2885 return IRQ_HANDLED;
2886
2887 spin_lock(&tx->lock);
2888 idx = find_next_bit_wrap(tx->in_use, tx->len,
2889 tx->last_ll_ts_idx_read + 1);
2890 if (idx != tx->len)
2891 ice_ptp_req_tx_single_tstamp(tx, idx);
2892 spin_unlock(&tx->lock);
2893
2894 return IRQ_HANDLED;
2895 }
2896 fallthrough; /* non-LL_TS E810 */
2897 case ICE_MAC_GENERIC:
2898 case ICE_MAC_GENERIC_3K_E825:
2899 /* All other devices process timestamps in the bottom half due
2900 * to sleeping or polling.
2901 */
2902 if (!ice_ptp_pf_handles_tx_interrupt(pf))
2903 return IRQ_HANDLED;
2904
2905 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
2906 return IRQ_WAKE_THREAD;
2907 case ICE_MAC_E830:
2908 /* E830 can read timestamps in the top half using rd32() */
2909 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
2910 /* Process outstanding Tx timestamps. If there
2911 * is more work, re-arm the interrupt to trigger again.
2912 */
2913 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2914 ice_flush(hw);
2915 }
2916 return IRQ_HANDLED;
2917 default:
2918 return IRQ_HANDLED;
2919 }
2920 }
2921
2922 /**
2923 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2924 * @pf: Board private structure
2925 *
2926 * The device PHY issues Tx timestamp interrupts to the driver for processing
2927 * timestamp data from the PHY. It will not interrupt again until all
2928 * current timestamp data is read. In rare circumstances, it is possible that
2929 * the driver fails to read all outstanding data.
2930 *
2931 * To avoid getting permanently stuck, periodically check if the PHY has
2932 * outstanding timestamp data. If so, trigger an interrupt from software to
2933 * process this data.
2934 */
ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf * pf)2935 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2936 {
2937 struct device *dev = ice_pf_to_dev(pf);
2938 struct ice_hw *hw = &pf->hw;
2939 bool trigger_oicr = false;
2940 unsigned int i;
2941
2942 if (!pf->ptp.port.tx.has_ready_bitmap)
2943 return;
2944
2945 if (!ice_pf_src_tmr_owned(pf))
2946 return;
2947
2948 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2949 u64 tstamp_ready;
2950 int err;
2951
2952 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2953 if (!err && tstamp_ready) {
2954 trigger_oicr = true;
2955 break;
2956 }
2957 }
2958
2959 if (trigger_oicr) {
2960 /* Trigger a software interrupt, to ensure this data
2961 * gets processed.
2962 */
2963 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2964
2965 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2966 ice_flush(hw);
2967 }
2968 }
2969
ice_ptp_periodic_work(struct kthread_work * work)2970 static void ice_ptp_periodic_work(struct kthread_work *work)
2971 {
2972 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2973 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2974 int err;
2975
2976 if (pf->ptp.state != ICE_PTP_READY)
2977 return;
2978
2979 err = ice_ptp_update_cached_phctime(pf);
2980
2981 ice_ptp_maybe_trigger_tx_interrupt(pf);
2982
2983 /* Run twice a second or reschedule if phc update failed */
2984 kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2985 msecs_to_jiffies(err ? 10 : 500));
2986 }
2987
2988 /**
2989 * ice_ptp_prepare_for_reset - Prepare PTP for reset
2990 * @pf: Board private structure
2991 * @reset_type: the reset type being performed
2992 */
ice_ptp_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)2993 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2994 {
2995 struct ice_ptp *ptp = &pf->ptp;
2996 u8 src_tmr;
2997
2998 if (ptp->state != ICE_PTP_READY)
2999 return;
3000
3001 ptp->state = ICE_PTP_RESETTING;
3002
3003 /* Disable timestamping for both Tx and Rx */
3004 ice_ptp_disable_timestamp_mode(pf);
3005
3006 kthread_cancel_delayed_work_sync(&ptp->work);
3007
3008 if (reset_type == ICE_RESET_PFR)
3009 return;
3010
3011 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3012
3013 /* Disable periodic outputs */
3014 ice_ptp_disable_all_perout(pf);
3015
3016 src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
3017
3018 /* Disable source clock */
3019 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
3020
3021 /* Acquire PHC and system timer to restore after reset */
3022 ptp->reset_time = ktime_get_real_ns();
3023 }
3024
3025 /**
3026 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
3027 * @pf: Board private structure
3028 *
3029 * Companion function for ice_ptp_rebuild() which handles tasks that only the
3030 * PTP clock owner instance should perform.
3031 */
ice_ptp_rebuild_owner(struct ice_pf * pf)3032 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
3033 {
3034 struct ice_ptp *ptp = &pf->ptp;
3035 struct ice_hw *hw = &pf->hw;
3036 struct timespec64 ts;
3037 u64 time_diff;
3038 int err;
3039
3040 err = ice_ptp_init_phc(hw);
3041 if (err)
3042 return err;
3043
3044 /* Acquire the global hardware lock */
3045 if (!ice_ptp_lock(hw)) {
3046 err = -EBUSY;
3047 return err;
3048 }
3049
3050 /* Write the increment time value to PHY and LAN */
3051 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3052 if (err)
3053 goto err_unlock;
3054
3055 /* Write the initial Time value to PHY and LAN using the cached PHC
3056 * time before the reset and time difference between stopping and
3057 * starting the clock.
3058 */
3059 if (ptp->cached_phc_time) {
3060 time_diff = ktime_get_real_ns() - ptp->reset_time;
3061 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
3062 } else {
3063 ts = ktime_to_timespec64(ktime_get_real());
3064 }
3065 err = ice_ptp_write_init(pf, &ts);
3066 if (err)
3067 goto err_unlock;
3068
3069 /* Release the global hardware lock */
3070 ice_ptp_unlock(hw);
3071
3072 /* Flush software tracking of any outstanding timestamps since we're
3073 * about to flush the PHY timestamp block.
3074 */
3075 ice_ptp_flush_all_tx_tracker(pf);
3076
3077 /* Enable quad interrupts */
3078 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3079 if (err)
3080 return err;
3081
3082 ice_ptp_restart_all_phy(pf);
3083
3084 /* Re-enable all periodic outputs and external timestamp events */
3085 ice_ptp_enable_all_perout(pf);
3086 ice_ptp_enable_all_extts(pf);
3087
3088 return 0;
3089
3090 err_unlock:
3091 ice_ptp_unlock(hw);
3092 return err;
3093 }
3094
3095 /**
3096 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
3097 * @pf: Board private structure
3098 * @reset_type: the reset type being performed
3099 */
ice_ptp_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)3100 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
3101 {
3102 struct ice_ptp *ptp = &pf->ptp;
3103 int err;
3104
3105 if (ptp->state == ICE_PTP_READY) {
3106 ice_ptp_prepare_for_reset(pf, reset_type);
3107 } else if (ptp->state != ICE_PTP_RESETTING) {
3108 err = -EINVAL;
3109 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
3110 goto err;
3111 }
3112
3113 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
3114 err = ice_ptp_rebuild_owner(pf);
3115 if (err)
3116 goto err;
3117 }
3118
3119 ptp->state = ICE_PTP_READY;
3120
3121 /* Start periodic work going */
3122 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3123
3124 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
3125 return;
3126
3127 err:
3128 ptp->state = ICE_PTP_ERROR;
3129 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
3130 }
3131
ice_is_primary(struct ice_hw * hw)3132 static bool ice_is_primary(struct ice_hw *hw)
3133 {
3134 return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ?
3135 !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) :
3136 true;
3137 }
3138
ice_ptp_setup_adapter(struct ice_pf * pf)3139 static int ice_ptp_setup_adapter(struct ice_pf *pf)
3140 {
3141 if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
3142 return -EPERM;
3143
3144 pf->adapter->ctrl_pf = pf;
3145
3146 return 0;
3147 }
3148
ice_ptp_setup_pf(struct ice_pf * pf)3149 static int ice_ptp_setup_pf(struct ice_pf *pf)
3150 {
3151 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3152 struct ice_ptp *ptp = &pf->ptp;
3153
3154 if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
3155 return -ENODEV;
3156
3157 INIT_LIST_HEAD(&ptp->port.list_node);
3158 mutex_lock(&pf->adapter->ports.lock);
3159
3160 list_add(&ptp->port.list_node,
3161 &pf->adapter->ports.ports);
3162 mutex_unlock(&pf->adapter->ports.lock);
3163
3164 return 0;
3165 }
3166
ice_ptp_cleanup_pf(struct ice_pf * pf)3167 static void ice_ptp_cleanup_pf(struct ice_pf *pf)
3168 {
3169 struct ice_ptp *ptp = &pf->ptp;
3170
3171 if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
3172 mutex_lock(&pf->adapter->ports.lock);
3173 list_del(&ptp->port.list_node);
3174 mutex_unlock(&pf->adapter->ports.lock);
3175 }
3176 }
3177
3178 /**
3179 * ice_ptp_clock_index - Get the PTP clock index for this device
3180 * @pf: Board private structure
3181 *
3182 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3183 * is associated.
3184 */
ice_ptp_clock_index(struct ice_pf * pf)3185 int ice_ptp_clock_index(struct ice_pf *pf)
3186 {
3187 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3188 struct ptp_clock *clock;
3189
3190 if (!ctrl_ptp)
3191 return -1;
3192 clock = ctrl_ptp->clock;
3193
3194 return clock ? ptp_clock_index(clock) : -1;
3195 }
3196
3197 /**
3198 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3199 * @pf: Board private structure
3200 *
3201 * Setup and initialize a PTP clock device that represents the device hardware
3202 * clock. Save the clock index for other functions connected to the same
3203 * hardware resource.
3204 */
ice_ptp_init_owner(struct ice_pf * pf)3205 static int ice_ptp_init_owner(struct ice_pf *pf)
3206 {
3207 struct ice_hw *hw = &pf->hw;
3208 struct timespec64 ts;
3209 int err;
3210
3211 err = ice_ptp_init_phc(hw);
3212 if (err) {
3213 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3214 err);
3215 return err;
3216 }
3217
3218 /* Acquire the global hardware lock */
3219 if (!ice_ptp_lock(hw)) {
3220 err = -EBUSY;
3221 goto err_exit;
3222 }
3223
3224 /* Write the increment time value to PHY and LAN */
3225 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3226 if (err)
3227 goto err_unlock;
3228
3229 ts = ktime_to_timespec64(ktime_get_real());
3230 /* Write the initial Time value to PHY and LAN */
3231 err = ice_ptp_write_init(pf, &ts);
3232 if (err)
3233 goto err_unlock;
3234
3235 /* Release the global hardware lock */
3236 ice_ptp_unlock(hw);
3237
3238 /* Configure PHY interrupt settings */
3239 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3240 if (err)
3241 goto err_exit;
3242
3243 /* Ensure we have a clock device */
3244 err = ice_ptp_create_clock(pf);
3245 if (err)
3246 goto err_clk;
3247
3248 return 0;
3249 err_clk:
3250 pf->ptp.clock = NULL;
3251 err_exit:
3252 return err;
3253
3254 err_unlock:
3255 ice_ptp_unlock(hw);
3256 return err;
3257 }
3258
3259 /**
3260 * ice_ptp_init_work - Initialize PTP work threads
3261 * @pf: Board private structure
3262 * @ptp: PF PTP structure
3263 */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)3264 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3265 {
3266 struct kthread_worker *kworker;
3267
3268 /* Initialize work functions */
3269 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3270
3271 /* Allocate a kworker for handling work required for the ports
3272 * connected to the PTP hardware clock.
3273 */
3274 kworker = kthread_run_worker(0, "ice-ptp-%s",
3275 dev_name(ice_pf_to_dev(pf)));
3276 if (IS_ERR(kworker))
3277 return PTR_ERR(kworker);
3278
3279 ptp->kworker = kworker;
3280
3281 /* Start periodic work going */
3282 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3283
3284 return 0;
3285 }
3286
3287 /**
3288 * ice_ptp_init_port - Initialize PTP port structure
3289 * @pf: Board private structure
3290 * @ptp_port: PTP port structure
3291 *
3292 * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
3293 */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)3294 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3295 {
3296 struct ice_hw *hw = &pf->hw;
3297
3298 mutex_init(&ptp_port->ps_lock);
3299
3300 switch (hw->mac_type) {
3301 case ICE_MAC_E810:
3302 case ICE_MAC_E830:
3303 case ICE_MAC_GENERIC_3K_E825:
3304 return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
3305 case ICE_MAC_GENERIC:
3306 kthread_init_delayed_work(&ptp_port->ov_work,
3307 ice_ptp_wait_for_offsets);
3308 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3309 ptp_port->port_num);
3310 default:
3311 return -ENODEV;
3312 }
3313 }
3314
3315 /**
3316 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3317 * @pf: Board private structure
3318 *
3319 * Initialize the Tx timestamp interrupt mode for this device. For most device
3320 * types, each PF processes the interrupt and manages its own timestamps. For
3321 * E822-based devices, only the clock owner processes the timestamps. Other
3322 * PFs disable the interrupt and do not process their own timestamps.
3323 */
ice_ptp_init_tx_interrupt_mode(struct ice_pf * pf)3324 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3325 {
3326 switch (pf->hw.mac_type) {
3327 case ICE_MAC_GENERIC:
3328 /* E822 based PHY has the clock owner process the interrupt
3329 * for all ports.
3330 */
3331 if (ice_pf_src_tmr_owned(pf))
3332 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3333 else
3334 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3335 break;
3336 default:
3337 /* other PHY types handle their own Tx interrupt */
3338 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3339 }
3340 }
3341
3342 /**
3343 * ice_ptp_init - Initialize PTP hardware clock support
3344 * @pf: Board private structure
3345 *
3346 * Set up the device for interacting with the PTP hardware clock for all
3347 * functions, both the function that owns the clock hardware, and the
3348 * functions connected to the clock hardware.
3349 *
3350 * The clock owner will allocate and register a ptp_clock with the
3351 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3352 * items used for asynchronous work such as Tx timestamps and periodic work.
3353 */
ice_ptp_init(struct ice_pf * pf)3354 void ice_ptp_init(struct ice_pf *pf)
3355 {
3356 struct ice_ptp *ptp = &pf->ptp;
3357 struct ice_hw *hw = &pf->hw;
3358 int lane_num, err;
3359
3360 ptp->state = ICE_PTP_INITIALIZING;
3361
3362 lane_num = ice_get_phy_lane_number(hw);
3363 if (lane_num < 0) {
3364 err = lane_num;
3365 goto err_exit;
3366 }
3367
3368 ptp->port.port_num = (u8)lane_num;
3369 ice_ptp_init_hw(hw);
3370
3371 ice_ptp_init_tx_interrupt_mode(pf);
3372
3373 /* If this function owns the clock hardware, it must allocate and
3374 * configure the PTP clock device to represent it.
3375 */
3376 if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
3377 err = ice_ptp_setup_adapter(pf);
3378 if (err)
3379 goto err_exit;
3380 err = ice_ptp_init_owner(pf);
3381 if (err)
3382 goto err_exit;
3383 }
3384
3385 err = ice_ptp_setup_pf(pf);
3386 if (err)
3387 goto err_exit;
3388
3389 err = ice_ptp_init_port(pf, &ptp->port);
3390 if (err)
3391 goto err_exit;
3392
3393 /* Start the PHY timestamping block */
3394 ice_ptp_reset_phy_timestamping(pf);
3395
3396 /* Configure initial Tx interrupt settings */
3397 ice_ptp_cfg_tx_interrupt(pf);
3398
3399 ptp->state = ICE_PTP_READY;
3400
3401 err = ice_ptp_init_work(pf, ptp);
3402 if (err)
3403 goto err_exit;
3404
3405 dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3406 return;
3407
3408 err_exit:
3409 /* If we registered a PTP clock, release it */
3410 if (pf->ptp.clock) {
3411 ptp_clock_unregister(ptp->clock);
3412 pf->ptp.clock = NULL;
3413 }
3414 ptp->state = ICE_PTP_ERROR;
3415 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3416 }
3417
3418 /**
3419 * ice_ptp_release - Disable the driver/HW support and unregister the clock
3420 * @pf: Board private structure
3421 *
3422 * This function handles the cleanup work required from the initialization by
3423 * clearing out the important information and unregistering the clock
3424 */
ice_ptp_release(struct ice_pf * pf)3425 void ice_ptp_release(struct ice_pf *pf)
3426 {
3427 if (pf->ptp.state != ICE_PTP_READY)
3428 return;
3429
3430 pf->ptp.state = ICE_PTP_UNINIT;
3431
3432 /* Disable timestamping for both Tx and Rx */
3433 ice_ptp_disable_timestamp_mode(pf);
3434
3435 ice_ptp_cleanup_pf(pf);
3436
3437 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3438
3439 ice_ptp_disable_all_extts(pf);
3440
3441 kthread_cancel_delayed_work_sync(&pf->ptp.work);
3442
3443 ice_ptp_port_phy_stop(&pf->ptp.port);
3444 mutex_destroy(&pf->ptp.port.ps_lock);
3445 if (pf->ptp.kworker) {
3446 kthread_destroy_worker(pf->ptp.kworker);
3447 pf->ptp.kworker = NULL;
3448 }
3449
3450 if (!pf->ptp.clock)
3451 return;
3452
3453 /* Disable periodic outputs */
3454 ice_ptp_disable_all_perout(pf);
3455
3456 ptp_clock_unregister(pf->ptp.clock);
3457 pf->ptp.clock = NULL;
3458
3459 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3460 }
3461