1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 #include "ice_cgu_regs.h"
8
9 static const char ice_pin_names[][64] = {
10 "SDP0",
11 "SDP1",
12 "SDP2",
13 "SDP3",
14 "TIME_SYNC",
15 "1PPS"
16 };
17
18 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
19 /* name, gpio, delay */
20 { TIME_SYNC, { 4, -1 }, { 0, 0 }},
21 { ONE_PPS, { -1, 5 }, { 0, 11 }},
22 };
23
24 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
25 /* name, gpio, delay */
26 { SDP0, { 0, 0 }, { 15, 14 }},
27 { SDP1, { 1, 1 }, { 15, 14 }},
28 { SDP2, { 2, 2 }, { 15, 14 }},
29 { SDP3, { 3, 3 }, { 15, 14 }},
30 { TIME_SYNC, { 4, -1 }, { 11, 0 }},
31 { ONE_PPS, { -1, 5 }, { 0, 9 }},
32 };
33
34 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
35 /* name, gpio, delay */
36 { SDP0, { 0, 0 }, { 0, 1 }},
37 { SDP1, { 1, 1 }, { 0, 1 }},
38 { SDP2, { 2, 2 }, { 0, 1 }},
39 { SDP3, { 3, 3 }, { 0, 1 }},
40 { ONE_PPS, { -1, 5 }, { 0, 1 }},
41 };
42
43 static const char ice_pin_names_nvm[][64] = {
44 "GNSS",
45 "SMA1",
46 "U.FL1",
47 "SMA2",
48 "U.FL2",
49 };
50
51 static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
52 /* name, gpio, delay */
53 { GNSS, { 1, -1 }, { 0, 0 }},
54 { SMA1, { 1, 0 }, { 0, 1 }},
55 { UFL1, { -1, 0 }, { 0, 1 }},
56 { SMA2, { 3, 2 }, { 0, 1 }},
57 { UFL2, { 3, -1 }, { 0, 0 }},
58 };
59
ice_get_ctrl_pf(struct ice_pf * pf)60 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
61 {
62 return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
63 }
64
ice_get_ctrl_ptp(struct ice_pf * pf)65 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
66 {
67 struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
68
69 return !ctrl_pf ? NULL : &ctrl_pf->ptp;
70 }
71
72 /**
73 * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc
74 * @pf: Board private structure
75 * @func: Pin function
76 * @chan: GPIO channel
77 *
78 * Return: positive pin number when pin is present, -1 otherwise
79 */
ice_ptp_find_pin_idx(struct ice_pf * pf,enum ptp_pin_function func,unsigned int chan)80 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
81 unsigned int chan)
82 {
83 const struct ptp_clock_info *info = &pf->ptp.info;
84 int i;
85
86 for (i = 0; i < info->n_pins; i++) {
87 if (info->pin_config[i].func == func &&
88 info->pin_config[i].chan == chan)
89 return i;
90 }
91
92 return -1;
93 }
94
95 /**
96 * ice_ptp_update_sma_data - update SMA pins data according to pins setup
97 * @pf: Board private structure
98 * @sma_pins: parsed SMA pins status
99 * @data: SMA data to update
100 */
ice_ptp_update_sma_data(struct ice_pf * pf,unsigned int sma_pins[],u8 * data)101 static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[],
102 u8 *data)
103 {
104 const char *state1, *state2;
105
106 /* Set the right state based on the desired configuration.
107 * When bit is set, functionality is disabled.
108 */
109 *data &= ~ICE_ALL_SMA_MASK;
110 if (!sma_pins[UFL1 - 1]) {
111 if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) {
112 state1 = "SMA1 Rx, U.FL1 disabled";
113 *data |= ICE_SMA1_TX_EN;
114 } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) {
115 state1 = "SMA1 Tx U.FL1 disabled";
116 *data |= ICE_SMA1_DIR_EN;
117 } else {
118 state1 = "SMA1 disabled, U.FL1 disabled";
119 *data |= ICE_SMA1_MASK;
120 }
121 } else {
122 /* U.FL1 Tx will always enable SMA1 Rx */
123 state1 = "SMA1 Rx, U.FL1 Tx";
124 }
125
126 if (!sma_pins[UFL2 - 1]) {
127 if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) {
128 state2 = "SMA2 Rx, U.FL2 disabled";
129 *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS;
130 } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) {
131 state2 = "SMA2 Tx, U.FL2 disabled";
132 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS;
133 } else {
134 state2 = "SMA2 disabled, U.FL2 disabled";
135 *data |= ICE_SMA2_MASK;
136 }
137 } else {
138 if (!sma_pins[SMA2 - 1]) {
139 state2 = "SMA2 disabled, U.FL2 Rx";
140 *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN;
141 } else {
142 state2 = "SMA2 Tx, U.FL2 Rx";
143 *data |= ICE_SMA2_DIR_EN;
144 }
145 }
146
147 dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2);
148 }
149
150 /**
151 * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic
152 * @pf: Board private structure
153 *
154 * Return: 0 on success, negative error code otherwise
155 */
ice_ptp_set_sma_cfg(struct ice_pf * pf)156 static int ice_ptp_set_sma_cfg(struct ice_pf *pf)
157 {
158 const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc;
159 struct ptp_pin_desc *pins = pf->ptp.pin_desc;
160 unsigned int sma_pins[ICE_SMA_PINS_NUM] = {};
161 int err;
162 u8 data;
163
164 /* Read initial pin state value */
165 err = ice_read_sma_ctrl(&pf->hw, &data);
166 if (err)
167 return err;
168
169 /* Get SMA/U.FL pins states */
170 for (int i = 0; i < pf->ptp.info.n_pins; i++)
171 if (pins[i].func) {
172 int name_idx = ice_pins[i].name_idx;
173
174 switch (name_idx) {
175 case SMA1:
176 case UFL1:
177 case SMA2:
178 case UFL2:
179 sma_pins[name_idx - 1] = pins[i].func;
180 break;
181 default:
182 continue;
183 }
184 }
185
186 ice_ptp_update_sma_data(pf, sma_pins, &data);
187 return ice_write_sma_ctrl(&pf->hw, data);
188 }
189
190 /**
191 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
192 * @pf: Board private structure
193 *
194 * Program the device to respond appropriately to the Tx timestamp interrupt
195 * cause.
196 */
ice_ptp_cfg_tx_interrupt(struct ice_pf * pf)197 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
198 {
199 struct ice_hw *hw = &pf->hw;
200 bool enable;
201 u32 val;
202
203 switch (pf->ptp.tx_interrupt_mode) {
204 case ICE_PTP_TX_INTERRUPT_ALL:
205 /* React to interrupts across all quads. */
206 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
207 enable = true;
208 break;
209 case ICE_PTP_TX_INTERRUPT_NONE:
210 /* Do not react to interrupts on any quad. */
211 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
212 enable = false;
213 break;
214 case ICE_PTP_TX_INTERRUPT_SELF:
215 default:
216 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
217 break;
218 }
219
220 /* Configure the Tx timestamp interrupt */
221 val = rd32(hw, PFINT_OICR_ENA);
222 if (enable)
223 val |= PFINT_OICR_TSYN_TX_M;
224 else
225 val &= ~PFINT_OICR_TSYN_TX_M;
226 wr32(hw, PFINT_OICR_ENA, val);
227 }
228
229 /**
230 * ice_set_rx_tstamp - Enable or disable Rx timestamping
231 * @pf: The PF pointer to search in
232 * @on: bool value for whether timestamps are enabled or disabled
233 */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)234 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
235 {
236 struct ice_vsi *vsi;
237 u16 i;
238
239 vsi = ice_get_main_vsi(pf);
240 if (!vsi || !vsi->rx_rings)
241 return;
242
243 /* Set the timestamp flag for all the Rx rings */
244 ice_for_each_rxq(vsi, i) {
245 if (!vsi->rx_rings[i])
246 continue;
247 vsi->rx_rings[i]->ptp_rx = on;
248 }
249 }
250
251 /**
252 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
253 * @pf: Board private structure
254 *
255 * Called during preparation for reset to temporarily disable timestamping on
256 * the device. Called during remove to disable timestamping while cleaning up
257 * driver resources.
258 */
ice_ptp_disable_timestamp_mode(struct ice_pf * pf)259 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
260 {
261 struct ice_hw *hw = &pf->hw;
262 u32 val;
263
264 val = rd32(hw, PFINT_OICR_ENA);
265 val &= ~PFINT_OICR_TSYN_TX_M;
266 wr32(hw, PFINT_OICR_ENA, val);
267
268 ice_set_rx_tstamp(pf, false);
269 }
270
271 /**
272 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
273 * @pf: Board private structure
274 *
275 * Called at the end of rebuild to restore timestamp configuration after
276 * a device reset.
277 */
ice_ptp_restore_timestamp_mode(struct ice_pf * pf)278 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
279 {
280 struct ice_hw *hw = &pf->hw;
281 bool enable_rx;
282
283 ice_ptp_cfg_tx_interrupt(pf);
284
285 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
286 ice_set_rx_tstamp(pf, enable_rx);
287
288 /* Trigger an immediate software interrupt to ensure that timestamps
289 * which occurred during reset are handled now.
290 */
291 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
292 ice_flush(hw);
293 }
294
295 /**
296 * ice_ptp_read_src_clk_reg - Read the source clock register
297 * @pf: Board private structure
298 * @sts: Optional parameter for holding a pair of system timestamps from
299 * the system clock. Will be ignored if NULL is given.
300 */
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)301 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
302 struct ptp_system_timestamp *sts)
303 {
304 struct ice_hw *hw = &pf->hw;
305 u32 hi, lo, lo2;
306 u8 tmr_idx;
307
308 if (!ice_is_primary(hw))
309 hw = ice_get_primary_hw(pf);
310
311 tmr_idx = ice_get_ptp_src_clock_index(hw);
312 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
313 /* Read the system timestamp pre PHC read */
314 ptp_read_system_prets(sts);
315
316 if (hw->mac_type == ICE_MAC_E830) {
317 u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
318
319 /* Read the system timestamp post PHC read */
320 ptp_read_system_postts(sts);
321
322 return clk_time;
323 }
324
325 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
326
327 /* Read the system timestamp post PHC read */
328 ptp_read_system_postts(sts);
329
330 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
331 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
332
333 if (lo2 < lo) {
334 /* if TIME_L rolled over read TIME_L again and update
335 * system timestamps
336 */
337 ptp_read_system_prets(sts);
338 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
339 ptp_read_system_postts(sts);
340 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
341 }
342
343 return ((u64)hi << 32) | lo;
344 }
345
346 /**
347 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
348 * @cached_phc_time: recently cached copy of PHC time
349 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
350 *
351 * Hardware captures timestamps which contain only 32 bits of nominal
352 * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
353 * Note that the captured timestamp values may be 40 bits, but the lower
354 * 8 bits are sub-nanoseconds and generally discarded.
355 *
356 * Extend the 32bit nanosecond timestamp using the following algorithm and
357 * assumptions:
358 *
359 * 1) have a recently cached copy of the PHC time
360 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
361 * seconds) before or after the PHC time was captured.
362 * 3) calculate the delta between the cached time and the timestamp
363 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
364 * captured after the PHC time. In this case, the full timestamp is just
365 * the cached PHC time plus the delta.
366 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
367 * timestamp was captured *before* the PHC time, i.e. because the PHC
368 * cache was updated after the timestamp was captured by hardware. In this
369 * case, the full timestamp is the cached time minus the inverse delta.
370 *
371 * This algorithm works even if the PHC time was updated after a Tx timestamp
372 * was requested, but before the Tx timestamp event was reported from
373 * hardware.
374 *
375 * This calculation primarily relies on keeping the cached PHC time up to
376 * date. If the timestamp was captured more than 2^31 nanoseconds after the
377 * PHC time, it is possible that the lower 32bits of PHC time have
378 * overflowed more than once, and we might generate an incorrect timestamp.
379 *
380 * This is prevented by (a) periodically updating the cached PHC time once
381 * a second, and (b) discarding any Tx timestamp packet if it has waited for
382 * a timestamp for more than one second.
383 */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)384 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
385 {
386 u32 delta, phc_time_lo;
387 u64 ns;
388
389 /* Extract the lower 32 bits of the PHC time */
390 phc_time_lo = (u32)cached_phc_time;
391
392 /* Calculate the delta between the lower 32bits of the cached PHC
393 * time and the in_tstamp value
394 */
395 delta = (in_tstamp - phc_time_lo);
396
397 /* Do not assume that the in_tstamp is always more recent than the
398 * cached PHC time. If the delta is large, it indicates that the
399 * in_tstamp was taken in the past, and should be converted
400 * forward.
401 */
402 if (delta > (U32_MAX / 2)) {
403 /* reverse the delta calculation here */
404 delta = (phc_time_lo - in_tstamp);
405 ns = cached_phc_time - delta;
406 } else {
407 ns = cached_phc_time + delta;
408 }
409
410 return ns;
411 }
412
413 /**
414 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
415 * @pf: Board private structure
416 * @in_tstamp: Ingress/egress 40b timestamp value
417 *
418 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
419 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
420 *
421 * *--------------------------------------------------------------*
422 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
423 * *--------------------------------------------------------------*
424 *
425 * The low bit is an indicator of whether the timestamp is valid. The next
426 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
427 * and the remaining 32 bits are the lower 32 bits of the PHC timer.
428 *
429 * It is assumed that the caller verifies the timestamp is valid prior to
430 * calling this function.
431 *
432 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
433 * time stored in the device private PTP structure as the basis for timestamp
434 * extension.
435 *
436 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
437 * algorithm.
438 */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)439 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
440 {
441 const u64 mask = GENMASK_ULL(31, 0);
442 unsigned long discard_time;
443
444 /* Discard the hardware timestamp if the cached PHC time is too old */
445 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
446 if (time_is_before_jiffies(discard_time)) {
447 pf->ptp.tx_hwtstamp_discarded++;
448 return 0;
449 }
450
451 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
452 (in_tstamp >> 8) & mask);
453 }
454
455 /**
456 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
457 * @tx: the PTP Tx timestamp tracker to check
458 *
459 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
460 * to accept new timestamp requests.
461 *
462 * Assumes the tx->lock spinlock is already held.
463 */
464 static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx)465 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
466 {
467 lockdep_assert_held(&tx->lock);
468
469 return tx->init && !tx->calibrating;
470 }
471
472 /**
473 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
474 * @tx: the PTP Tx timestamp tracker
475 * @idx: index of the timestamp to request
476 */
ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx)477 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
478 {
479 struct ice_e810_params *params;
480 struct ice_ptp_port *ptp_port;
481 unsigned long flags;
482 struct sk_buff *skb;
483 struct ice_pf *pf;
484
485 if (!tx->init)
486 return;
487
488 ptp_port = container_of(tx, struct ice_ptp_port, tx);
489 pf = ptp_port_to_pf(ptp_port);
490 params = &pf->hw.ptp.phy.e810;
491
492 /* Drop packets which have waited for more than 2 seconds */
493 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
494 /* Count the number of Tx timestamps that timed out */
495 pf->ptp.tx_hwtstamp_timeouts++;
496
497 skb = tx->tstamps[idx].skb;
498 tx->tstamps[idx].skb = NULL;
499 clear_bit(idx, tx->in_use);
500
501 dev_kfree_skb_any(skb);
502 return;
503 }
504
505 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
506
507 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
508
509 params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
510
511 /* Write TS index to read to the PF register so the FW can read it */
512 wr32(&pf->hw, REG_LL_PROXY_H,
513 REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
514 REG_LL_PROXY_H_EXEC);
515 tx->last_ll_ts_idx_read = idx;
516
517 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
518 }
519
520 /**
521 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
522 * @tx: the PTP Tx timestamp tracker
523 */
ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx)524 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
525 {
526 struct skb_shared_hwtstamps shhwtstamps = {};
527 u8 idx = tx->last_ll_ts_idx_read;
528 struct ice_e810_params *params;
529 struct ice_ptp_port *ptp_port;
530 u64 raw_tstamp, tstamp;
531 bool drop_ts = false;
532 struct sk_buff *skb;
533 unsigned long flags;
534 struct device *dev;
535 struct ice_pf *pf;
536 u32 reg_ll_high;
537
538 if (!tx->init || tx->last_ll_ts_idx_read < 0)
539 return;
540
541 ptp_port = container_of(tx, struct ice_ptp_port, tx);
542 pf = ptp_port_to_pf(ptp_port);
543 dev = ice_pf_to_dev(pf);
544 params = &pf->hw.ptp.phy.e810;
545
546 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
547
548 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
549
550 if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
551 dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
552 __func__);
553
554 /* Read the low 32 bit value */
555 raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
556 /* Read the status together with high TS part */
557 reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
558
559 /* Wake up threads waiting on low latency interface */
560 params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
561
562 wake_up_locked(¶ms->atqbal_wq);
563
564 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
565
566 /* When the bit is cleared, the TS is ready in the register */
567 if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
568 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
569 return;
570 }
571
572 /* High 8 bit value of the TS is on the bits 16:23 */
573 raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
574
575 /* Devices using this interface always verify the timestamp differs
576 * relative to the last cached timestamp value.
577 */
578 if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
579 return;
580
581 tx->tstamps[idx].cached_tstamp = raw_tstamp;
582 clear_bit(idx, tx->in_use);
583 skb = tx->tstamps[idx].skb;
584 tx->tstamps[idx].skb = NULL;
585 if (test_and_clear_bit(idx, tx->stale))
586 drop_ts = true;
587
588 if (!skb)
589 return;
590
591 if (drop_ts) {
592 dev_kfree_skb_any(skb);
593 return;
594 }
595
596 /* Extend the timestamp using cached PHC time */
597 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
598 if (tstamp) {
599 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
600 ice_trace(tx_tstamp_complete, skb, idx);
601 }
602
603 skb_tstamp_tx(skb, &shhwtstamps);
604 dev_kfree_skb_any(skb);
605 }
606
607 /**
608 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
609 * @tx: the PTP Tx timestamp tracker
610 *
611 * Process timestamps captured by the PHY associated with this port. To do
612 * this, loop over each index with a waiting skb.
613 *
614 * If a given index has a valid timestamp, perform the following steps:
615 *
616 * 1) check that the timestamp request is not stale
617 * 2) check that a timestamp is ready and available in the PHY memory bank
618 * 3) read and copy the timestamp out of the PHY register
619 * 4) unlock the index by clearing the associated in_use bit
620 * 5) check if the timestamp is stale, and discard if so
621 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
622 * 7) send this 64 bit timestamp to the stack
623 *
624 * Note that we do not hold the tracking lock while reading the Tx timestamp.
625 * This is because reading the timestamp requires taking a mutex that might
626 * sleep.
627 *
628 * The only place where we set in_use is when a new timestamp is initiated
629 * with a slot index. This is only called in the hard xmit routine where an
630 * SKB has a request flag set. The only places where we clear this bit is this
631 * function, or during teardown when the Tx timestamp tracker is being
632 * removed. A timestamp index will never be re-used until the in_use bit for
633 * that index is cleared.
634 *
635 * If a Tx thread starts a new timestamp, we might not begin processing it
636 * right away but we will notice it at the end when we re-queue the task.
637 *
638 * If a Tx thread starts a new timestamp just after this function exits, the
639 * interrupt for that timestamp should re-trigger this function once
640 * a timestamp is ready.
641 *
642 * In cases where the PTP hardware clock was directly adjusted, some
643 * timestamps may not be able to safely use the timestamp extension math. In
644 * this case, software will set the stale bit for any outstanding Tx
645 * timestamps when the clock is adjusted. Then this function will discard
646 * those captured timestamps instead of sending them to the stack.
647 *
648 * If a Tx packet has been waiting for more than 2 seconds, it is not possible
649 * to correctly extend the timestamp using the cached PHC time. It is
650 * extremely unlikely that a packet will ever take this long to timestamp. If
651 * we detect a Tx timestamp request that has waited for this long we assume
652 * the packet will never be sent by hardware and discard it without reading
653 * the timestamp register.
654 */
ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx)655 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
656 {
657 struct ice_ptp_port *ptp_port;
658 unsigned long flags;
659 struct ice_pf *pf;
660 struct ice_hw *hw;
661 u64 tstamp_ready;
662 bool link_up;
663 int err;
664 u8 idx;
665
666 ptp_port = container_of(tx, struct ice_ptp_port, tx);
667 pf = ptp_port_to_pf(ptp_port);
668 hw = &pf->hw;
669
670 /* Read the Tx ready status first */
671 if (tx->has_ready_bitmap) {
672 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
673 if (err)
674 return;
675 }
676
677 /* Drop packets if the link went down */
678 link_up = ptp_port->link_up;
679
680 for_each_set_bit(idx, tx->in_use, tx->len) {
681 struct skb_shared_hwtstamps shhwtstamps = {};
682 u8 phy_idx = idx + tx->offset;
683 u64 raw_tstamp = 0, tstamp;
684 bool drop_ts = !link_up;
685 struct sk_buff *skb;
686
687 /* Drop packets which have waited for more than 2 seconds */
688 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
689 drop_ts = true;
690
691 /* Count the number of Tx timestamps that timed out */
692 pf->ptp.tx_hwtstamp_timeouts++;
693 }
694
695 /* Only read a timestamp from the PHY if its marked as ready
696 * by the tstamp_ready register. This avoids unnecessary
697 * reading of timestamps which are not yet valid. This is
698 * important as we must read all timestamps which are valid
699 * and only timestamps which are valid during each interrupt.
700 * If we do not, the hardware logic for generating a new
701 * interrupt can get stuck on some devices.
702 */
703 if (tx->has_ready_bitmap &&
704 !(tstamp_ready & BIT_ULL(phy_idx))) {
705 if (drop_ts)
706 goto skip_ts_read;
707
708 continue;
709 }
710
711 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
712
713 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
714 if (err && !drop_ts)
715 continue;
716
717 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
718
719 /* For PHYs which don't implement a proper timestamp ready
720 * bitmap, verify that the timestamp value is different
721 * from the last cached timestamp. If it is not, skip this for
722 * now assuming it hasn't yet been captured by hardware.
723 */
724 if (!drop_ts && !tx->has_ready_bitmap &&
725 raw_tstamp == tx->tstamps[idx].cached_tstamp)
726 continue;
727
728 /* Discard any timestamp value without the valid bit set */
729 if (!(raw_tstamp & ICE_PTP_TS_VALID))
730 drop_ts = true;
731
732 skip_ts_read:
733 spin_lock_irqsave(&tx->lock, flags);
734 if (!tx->has_ready_bitmap && raw_tstamp)
735 tx->tstamps[idx].cached_tstamp = raw_tstamp;
736 clear_bit(idx, tx->in_use);
737 skb = tx->tstamps[idx].skb;
738 tx->tstamps[idx].skb = NULL;
739 if (test_and_clear_bit(idx, tx->stale))
740 drop_ts = true;
741 spin_unlock_irqrestore(&tx->lock, flags);
742
743 /* It is unlikely but possible that the SKB will have been
744 * flushed at this point due to link change or teardown.
745 */
746 if (!skb)
747 continue;
748
749 if (drop_ts) {
750 dev_kfree_skb_any(skb);
751 continue;
752 }
753
754 /* Extend the timestamp using cached PHC time */
755 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
756 if (tstamp) {
757 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
758 ice_trace(tx_tstamp_complete, skb, idx);
759 }
760
761 skb_tstamp_tx(skb, &shhwtstamps);
762 dev_kfree_skb_any(skb);
763 }
764 }
765
766 /**
767 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
768 * @pf: Board private structure
769 */
ice_ptp_tx_tstamp_owner(struct ice_pf * pf)770 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
771 {
772 struct ice_ptp_port *port;
773 unsigned int i;
774
775 mutex_lock(&pf->adapter->ports.lock);
776 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
777 struct ice_ptp_tx *tx = &port->tx;
778
779 if (!tx || !tx->init)
780 continue;
781
782 ice_ptp_process_tx_tstamp(tx);
783 }
784 mutex_unlock(&pf->adapter->ports.lock);
785
786 for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
787 u64 tstamp_ready;
788 int err;
789
790 /* Read the Tx ready status first */
791 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
792 if (err)
793 break;
794 else if (tstamp_ready)
795 return ICE_TX_TSTAMP_WORK_PENDING;
796 }
797
798 return ICE_TX_TSTAMP_WORK_DONE;
799 }
800
801 /**
802 * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
803 * @tx: Tx tracking structure to initialize
804 *
805 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
806 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
807 */
ice_ptp_tx_tstamp(struct ice_ptp_tx * tx)808 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
809 {
810 bool more_timestamps;
811 unsigned long flags;
812
813 if (!tx->init)
814 return ICE_TX_TSTAMP_WORK_DONE;
815
816 /* Process the Tx timestamp tracker */
817 ice_ptp_process_tx_tstamp(tx);
818
819 /* Check if there are outstanding Tx timestamps */
820 spin_lock_irqsave(&tx->lock, flags);
821 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
822 spin_unlock_irqrestore(&tx->lock, flags);
823
824 if (more_timestamps)
825 return ICE_TX_TSTAMP_WORK_PENDING;
826
827 return ICE_TX_TSTAMP_WORK_DONE;
828 }
829
830 /**
831 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
832 * @tx: Tx tracking structure to initialize
833 *
834 * Assumes that the length has already been initialized. Do not call directly,
835 * use the ice_ptp_init_tx_* instead.
836 */
837 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)838 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
839 {
840 unsigned long *in_use, *stale;
841 struct ice_tx_tstamp *tstamps;
842
843 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
844 in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
845 stale = bitmap_zalloc(tx->len, GFP_KERNEL);
846
847 if (!tstamps || !in_use || !stale) {
848 kfree(tstamps);
849 bitmap_free(in_use);
850 bitmap_free(stale);
851
852 return -ENOMEM;
853 }
854
855 tx->tstamps = tstamps;
856 tx->in_use = in_use;
857 tx->stale = stale;
858 tx->init = 1;
859 tx->last_ll_ts_idx_read = -1;
860
861 spin_lock_init(&tx->lock);
862
863 return 0;
864 }
865
866 /**
867 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
868 * @pf: Board private structure
869 * @tx: the tracker to flush
870 *
871 * Called during teardown when a Tx tracker is being removed.
872 */
873 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)874 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
875 {
876 struct ice_hw *hw = &pf->hw;
877 unsigned long flags;
878 u64 tstamp_ready;
879 int err;
880 u8 idx;
881
882 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
883 if (err) {
884 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
885 tx->block, err);
886
887 /* If we fail to read the Tx timestamp ready bitmap just
888 * skip clearing the PHY timestamps.
889 */
890 tstamp_ready = 0;
891 }
892
893 for_each_set_bit(idx, tx->in_use, tx->len) {
894 u8 phy_idx = idx + tx->offset;
895 struct sk_buff *skb;
896
897 /* In case this timestamp is ready, we need to clear it. */
898 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
899 ice_clear_phy_tstamp(hw, tx->block, phy_idx);
900
901 spin_lock_irqsave(&tx->lock, flags);
902 skb = tx->tstamps[idx].skb;
903 tx->tstamps[idx].skb = NULL;
904 clear_bit(idx, tx->in_use);
905 clear_bit(idx, tx->stale);
906 spin_unlock_irqrestore(&tx->lock, flags);
907
908 /* Count the number of Tx timestamps flushed */
909 pf->ptp.tx_hwtstamp_flushed++;
910
911 /* Free the SKB after we've cleared the bit */
912 dev_kfree_skb_any(skb);
913 }
914 }
915
916 /**
917 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
918 * @tx: the tracker to mark
919 *
920 * Mark currently outstanding Tx timestamps as stale. This prevents sending
921 * their timestamp value to the stack. This is required to prevent extending
922 * the 40bit hardware timestamp incorrectly.
923 *
924 * This should be called when the PTP clock is modified such as after a set
925 * time request.
926 */
927 static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx)928 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
929 {
930 unsigned long flags;
931
932 spin_lock_irqsave(&tx->lock, flags);
933 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
934 spin_unlock_irqrestore(&tx->lock, flags);
935 }
936
937 /**
938 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
939 * @pf: Board private structure
940 *
941 * Called by the clock owner to flush all the Tx timestamp trackers associated
942 * with the clock.
943 */
944 static void
ice_ptp_flush_all_tx_tracker(struct ice_pf * pf)945 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
946 {
947 struct ice_ptp_port *port;
948
949 list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
950 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
951 }
952
953 /**
954 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
955 * @pf: Board private structure
956 * @tx: Tx tracking structure to release
957 *
958 * Free memory associated with the Tx timestamp tracker.
959 */
960 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)961 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
962 {
963 unsigned long flags;
964
965 spin_lock_irqsave(&tx->lock, flags);
966 tx->init = 0;
967 spin_unlock_irqrestore(&tx->lock, flags);
968
969 /* wait for potentially outstanding interrupt to complete */
970 synchronize_irq(pf->oicr_irq.virq);
971
972 ice_ptp_flush_tx_tracker(pf, tx);
973
974 kfree(tx->tstamps);
975 tx->tstamps = NULL;
976
977 bitmap_free(tx->in_use);
978 tx->in_use = NULL;
979
980 bitmap_free(tx->stale);
981 tx->stale = NULL;
982
983 tx->len = 0;
984 }
985
986 /**
987 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
988 * @pf: Board private structure
989 * @tx: the Tx tracking structure to initialize
990 * @port: the port this structure tracks
991 *
992 * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
993 * the timestamp block is shared for all ports in the same quad. To avoid
994 * ports using the same timestamp index, logically break the block of
995 * registers into chunks based on the port number.
996 *
997 * Return: 0 on success, -ENOMEM when out of memory
998 */
ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)999 static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
1000 u8 port)
1001 {
1002 tx->block = ICE_GET_QUAD_NUM(port);
1003 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
1004 tx->len = INDEX_PER_PORT_E82X;
1005 tx->has_ready_bitmap = 1;
1006
1007 return ice_ptp_alloc_tx_tracker(tx);
1008 }
1009
1010 /**
1011 * ice_ptp_init_tx - Initialize tracking for Tx timestamps
1012 * @pf: Board private structure
1013 * @tx: the Tx tracking structure to initialize
1014 * @port: the port this structure tracks
1015 *
1016 * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
1017 * each port has its own block of timestamps, independent of the other ports.
1018 *
1019 * Return: 0 on success, -ENOMEM when out of memory
1020 */
ice_ptp_init_tx(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)1021 static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
1022 {
1023 tx->block = port;
1024 tx->offset = 0;
1025 tx->len = INDEX_PER_PORT;
1026
1027 /* The E810 PHY does not provide a timestamp ready bitmap. Instead,
1028 * verify new timestamps against cached copy of the last read
1029 * timestamp.
1030 */
1031 tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
1032
1033 return ice_ptp_alloc_tx_tracker(tx);
1034 }
1035
1036 /**
1037 * ice_ptp_update_cached_phctime - Update the cached PHC time values
1038 * @pf: Board specific private structure
1039 *
1040 * This function updates the system time values which are cached in the PF
1041 * structure and the Rx rings.
1042 *
1043 * This function must be called periodically to ensure that the cached value
1044 * is never more than 2 seconds old.
1045 *
1046 * Note that the cached copy in the PF PTP structure is always updated, even
1047 * if we can't update the copy in the Rx rings.
1048 *
1049 * Return:
1050 * * 0 - OK, successfully updated
1051 * * -EAGAIN - PF was busy, need to reschedule the update
1052 */
ice_ptp_update_cached_phctime(struct ice_pf * pf)1053 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
1054 {
1055 struct device *dev = ice_pf_to_dev(pf);
1056 unsigned long update_before;
1057 u64 systime;
1058 int i;
1059
1060 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
1061 if (pf->ptp.cached_phc_time &&
1062 time_is_before_jiffies(update_before)) {
1063 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
1064
1065 dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
1066 jiffies_to_msecs(time_taken));
1067 pf->ptp.late_cached_phc_updates++;
1068 }
1069
1070 /* Read the current PHC time */
1071 systime = ice_ptp_read_src_clk_reg(pf, NULL);
1072
1073 /* Update the cached PHC time stored in the PF structure */
1074 WRITE_ONCE(pf->ptp.cached_phc_time, systime);
1075 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
1076
1077 if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
1078 return -EAGAIN;
1079
1080 ice_for_each_vsi(pf, i) {
1081 struct ice_vsi *vsi = pf->vsi[i];
1082 int j;
1083
1084 if (!vsi)
1085 continue;
1086
1087 if (vsi->type != ICE_VSI_PF)
1088 continue;
1089
1090 ice_for_each_rxq(vsi, j) {
1091 if (!vsi->rx_rings[j])
1092 continue;
1093 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
1094 }
1095 }
1096 clear_bit(ICE_CFG_BUSY, pf->state);
1097
1098 return 0;
1099 }
1100
1101 /**
1102 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1103 * @pf: Board specific private structure
1104 *
1105 * This function must be called when the cached PHC time is no longer valid,
1106 * such as after a time adjustment. It marks any currently outstanding Tx
1107 * timestamps as stale and updates the cached PHC time for both the PF and Rx
1108 * rings.
1109 *
1110 * If updating the PHC time cannot be done immediately, a warning message is
1111 * logged and the work item is scheduled immediately to minimize the window
1112 * with a wrong cached timestamp.
1113 */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)1114 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1115 {
1116 struct device *dev = ice_pf_to_dev(pf);
1117 int err;
1118
1119 /* Update the cached PHC time immediately if possible, otherwise
1120 * schedule the work item to execute soon.
1121 */
1122 err = ice_ptp_update_cached_phctime(pf);
1123 if (err) {
1124 /* If another thread is updating the Rx rings, we won't
1125 * properly reset them here. This could lead to reporting of
1126 * invalid timestamps, but there isn't much we can do.
1127 */
1128 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1129 __func__);
1130
1131 /* Queue the work item to update the Rx rings when possible */
1132 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1133 msecs_to_jiffies(10));
1134 }
1135
1136 /* Mark any outstanding timestamps as stale, since they might have
1137 * been captured in hardware before the time update. This could lead
1138 * to us extending them with the wrong cached value resulting in
1139 * incorrect timestamp values.
1140 */
1141 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1142 }
1143
1144 /**
1145 * ice_ptp_write_init - Set PHC time to provided value
1146 * @pf: Board private structure
1147 * @ts: timespec structure that holds the new time value
1148 *
1149 * Set the PHC time to the specified time provided in the timespec.
1150 */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1151 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1152 {
1153 u64 ns = timespec64_to_ns(ts);
1154 struct ice_hw *hw = &pf->hw;
1155
1156 return ice_ptp_init_time(hw, ns);
1157 }
1158
1159 /**
1160 * ice_ptp_write_adj - Adjust PHC clock time atomically
1161 * @pf: Board private structure
1162 * @adj: Adjustment in nanoseconds
1163 *
1164 * Perform an atomic adjustment of the PHC time by the specified number of
1165 * nanoseconds.
1166 */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1167 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1168 {
1169 struct ice_hw *hw = &pf->hw;
1170
1171 return ice_ptp_adj_clock(hw, adj);
1172 }
1173
1174 /**
1175 * ice_base_incval - Get base timer increment value
1176 * @pf: Board private structure
1177 *
1178 * Look up the base timer increment value for this device. The base increment
1179 * value is used to define the nominal clock tick rate. This increment value
1180 * is programmed during device initialization. It is also used as the basis
1181 * for calculating adjustments using scaled_ppm.
1182 */
ice_base_incval(struct ice_pf * pf)1183 static u64 ice_base_incval(struct ice_pf *pf)
1184 {
1185 struct ice_hw *hw = &pf->hw;
1186 u64 incval;
1187
1188 incval = ice_get_base_incval(hw);
1189
1190 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1191 incval);
1192
1193 return incval;
1194 }
1195
1196 /**
1197 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1198 * @port: PTP port for which Tx FIFO is checked
1199 */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1200 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1201 {
1202 int offs = port->port_num % ICE_PORTS_PER_QUAD;
1203 int quad = ICE_GET_QUAD_NUM(port->port_num);
1204 struct ice_pf *pf;
1205 struct ice_hw *hw;
1206 u32 val, phy_sts;
1207 int err;
1208
1209 pf = ptp_port_to_pf(port);
1210 hw = &pf->hw;
1211
1212 if (port->tx_fifo_busy_cnt == FIFO_OK)
1213 return 0;
1214
1215 /* need to read FIFO state */
1216 if (offs == 0 || offs == 1)
1217 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1218 &val);
1219 else
1220 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1221 &val);
1222
1223 if (err) {
1224 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1225 port->port_num, err);
1226 return err;
1227 }
1228
1229 if (offs & 0x1)
1230 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1231 else
1232 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1233
1234 if (phy_sts & FIFO_EMPTY) {
1235 port->tx_fifo_busy_cnt = FIFO_OK;
1236 return 0;
1237 }
1238
1239 port->tx_fifo_busy_cnt++;
1240
1241 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1242 port->tx_fifo_busy_cnt, port->port_num);
1243
1244 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1245 dev_dbg(ice_pf_to_dev(pf),
1246 "Port %d Tx FIFO still not empty; resetting quad %d\n",
1247 port->port_num, quad);
1248 ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1249 port->tx_fifo_busy_cnt = FIFO_OK;
1250 return 0;
1251 }
1252
1253 return -EAGAIN;
1254 }
1255
1256 /**
1257 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1258 * @work: Pointer to the kthread_work structure for this task
1259 *
1260 * Check whether hardware has completed measuring the Tx and Rx offset values
1261 * used to configure and enable vernier timestamp calibration.
1262 *
1263 * Once the offset in either direction is measured, configure the associated
1264 * registers with the calibrated offset values and enable timestamping. The Tx
1265 * and Rx directions are configured independently as soon as their associated
1266 * offsets are known.
1267 *
1268 * This function reschedules itself until both Tx and Rx calibration have
1269 * completed.
1270 */
ice_ptp_wait_for_offsets(struct kthread_work * work)1271 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1272 {
1273 struct ice_ptp_port *port;
1274 struct ice_pf *pf;
1275 struct ice_hw *hw;
1276 int tx_err;
1277 int rx_err;
1278
1279 port = container_of(work, struct ice_ptp_port, ov_work.work);
1280 pf = ptp_port_to_pf(port);
1281 hw = &pf->hw;
1282
1283 if (ice_is_reset_in_progress(pf->state)) {
1284 /* wait for device driver to complete reset */
1285 kthread_queue_delayed_work(pf->ptp.kworker,
1286 &port->ov_work,
1287 msecs_to_jiffies(100));
1288 return;
1289 }
1290
1291 tx_err = ice_ptp_check_tx_fifo(port);
1292 if (!tx_err)
1293 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1294 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1295 if (tx_err || rx_err) {
1296 /* Tx and/or Rx offset not yet configured, try again later */
1297 kthread_queue_delayed_work(pf->ptp.kworker,
1298 &port->ov_work,
1299 msecs_to_jiffies(100));
1300 return;
1301 }
1302 }
1303
1304 /**
1305 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1306 * @ptp_port: PTP port to stop
1307 */
1308 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1309 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1310 {
1311 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1312 u8 port = ptp_port->port_num;
1313 struct ice_hw *hw = &pf->hw;
1314 int err;
1315
1316 mutex_lock(&ptp_port->ps_lock);
1317
1318 switch (hw->mac_type) {
1319 case ICE_MAC_E810:
1320 case ICE_MAC_E830:
1321 err = 0;
1322 break;
1323 case ICE_MAC_GENERIC:
1324 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1325
1326 err = ice_stop_phy_timer_e82x(hw, port, true);
1327 break;
1328 case ICE_MAC_GENERIC_3K_E825:
1329 err = ice_stop_phy_timer_eth56g(hw, port, true);
1330 break;
1331 default:
1332 err = -ENODEV;
1333 }
1334 if (err && err != -EBUSY)
1335 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1336 port, err);
1337
1338 mutex_unlock(&ptp_port->ps_lock);
1339
1340 return err;
1341 }
1342
1343 /**
1344 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1345 * @ptp_port: PTP port for which the PHY start is set
1346 *
1347 * Start the PHY timestamping block, and initiate Vernier timestamping
1348 * calibration. If timestamping cannot be calibrated (such as if link is down)
1349 * then disable the timestamping block instead.
1350 */
1351 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1352 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1353 {
1354 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1355 u8 port = ptp_port->port_num;
1356 struct ice_hw *hw = &pf->hw;
1357 unsigned long flags;
1358 int err;
1359
1360 if (!ptp_port->link_up)
1361 return ice_ptp_port_phy_stop(ptp_port);
1362
1363 mutex_lock(&ptp_port->ps_lock);
1364
1365 switch (hw->mac_type) {
1366 case ICE_MAC_E810:
1367 case ICE_MAC_E830:
1368 err = 0;
1369 break;
1370 case ICE_MAC_GENERIC:
1371 /* Start the PHY timer in Vernier mode */
1372 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1373
1374 /* temporarily disable Tx timestamps while calibrating
1375 * PHY offset
1376 */
1377 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1378 ptp_port->tx.calibrating = true;
1379 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1380 ptp_port->tx_fifo_busy_cnt = 0;
1381
1382 /* Start the PHY timer in Vernier mode */
1383 err = ice_start_phy_timer_e82x(hw, port);
1384 if (err)
1385 break;
1386
1387 /* Enable Tx timestamps right away */
1388 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1389 ptp_port->tx.calibrating = false;
1390 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1391
1392 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1393 0);
1394 break;
1395 case ICE_MAC_GENERIC_3K_E825:
1396 err = ice_start_phy_timer_eth56g(hw, port);
1397 break;
1398 default:
1399 err = -ENODEV;
1400 }
1401
1402 if (err)
1403 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1404 port, err);
1405
1406 mutex_unlock(&ptp_port->ps_lock);
1407
1408 return err;
1409 }
1410
1411 /**
1412 * ice_ptp_link_change - Reconfigure PTP after link status change
1413 * @pf: Board private structure
1414 * @linkup: Link is up or down
1415 */
ice_ptp_link_change(struct ice_pf * pf,bool linkup)1416 void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
1417 {
1418 struct ice_ptp_port *ptp_port;
1419 struct ice_hw *hw = &pf->hw;
1420
1421 if (pf->ptp.state != ICE_PTP_READY)
1422 return;
1423
1424 ptp_port = &pf->ptp.port;
1425
1426 /* Update cached link status for this port immediately */
1427 ptp_port->link_up = linkup;
1428
1429 /* Skip HW writes if reset is in progress */
1430 if (pf->hw.reset_ongoing)
1431 return;
1432
1433 switch (hw->mac_type) {
1434 case ICE_MAC_E810:
1435 case ICE_MAC_E830:
1436 /* Do not reconfigure E810 or E830 PHY */
1437 return;
1438 case ICE_MAC_GENERIC:
1439 case ICE_MAC_GENERIC_3K_E825:
1440 ice_ptp_port_phy_restart(ptp_port);
1441 return;
1442 default:
1443 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1444 }
1445 }
1446
1447 /**
1448 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1449 * @pf: PF private structure
1450 * @ena: bool value to enable or disable interrupt
1451 * @threshold: Minimum number of packets at which intr is triggered
1452 *
1453 * Utility function to configure all the PHY interrupt settings, including
1454 * whether the PHY interrupt is enabled, and what threshold to use. Also
1455 * configures The E82X timestamp owner to react to interrupts from all PHYs.
1456 *
1457 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1458 * when failed to configure PHY interrupt for E82X
1459 */
ice_ptp_cfg_phy_interrupt(struct ice_pf * pf,bool ena,u32 threshold)1460 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1461 {
1462 struct device *dev = ice_pf_to_dev(pf);
1463 struct ice_hw *hw = &pf->hw;
1464
1465 ice_ptp_reset_ts_memory(hw);
1466
1467 switch (hw->mac_type) {
1468 case ICE_MAC_E810:
1469 case ICE_MAC_E830:
1470 return 0;
1471 case ICE_MAC_GENERIC: {
1472 int quad;
1473
1474 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1475 quad++) {
1476 int err;
1477
1478 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1479 if (err) {
1480 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1481 quad, err);
1482 return err;
1483 }
1484 }
1485
1486 return 0;
1487 }
1488 case ICE_MAC_GENERIC_3K_E825: {
1489 int port;
1490
1491 for (port = 0; port < hw->ptp.num_lports; port++) {
1492 int err;
1493
1494 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1495 if (err) {
1496 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1497 port, err);
1498 return err;
1499 }
1500 }
1501
1502 return 0;
1503 }
1504 case ICE_MAC_UNKNOWN:
1505 default:
1506 return -EOPNOTSUPP;
1507 }
1508 }
1509
1510 /**
1511 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1512 * @pf: Board private structure
1513 */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1514 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1515 {
1516 ice_ptp_port_phy_restart(&pf->ptp.port);
1517 }
1518
1519 /**
1520 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1521 * @pf: Board private structure
1522 */
ice_ptp_restart_all_phy(struct ice_pf * pf)1523 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1524 {
1525 struct list_head *entry;
1526
1527 list_for_each(entry, &pf->adapter->ports.ports) {
1528 struct ice_ptp_port *port = list_entry(entry,
1529 struct ice_ptp_port,
1530 list_node);
1531
1532 if (port->link_up)
1533 ice_ptp_port_phy_restart(port);
1534 }
1535 }
1536
1537 /**
1538 * ice_ptp_adjfine - Adjust clock increment rate
1539 * @info: the driver's PTP info structure
1540 * @scaled_ppm: Parts per million with 16-bit fractional field
1541 *
1542 * Adjust the frequency of the clock by the indicated scaled ppm from the
1543 * base frequency.
1544 */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1545 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1546 {
1547 struct ice_pf *pf = ptp_info_to_pf(info);
1548 struct ice_hw *hw = &pf->hw;
1549 u64 incval;
1550 int err;
1551
1552 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1553 err = ice_ptp_write_incval_locked(hw, incval);
1554 if (err) {
1555 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1556 err);
1557 return -EIO;
1558 }
1559
1560 return 0;
1561 }
1562
1563 /**
1564 * ice_ptp_extts_event - Process PTP external clock event
1565 * @pf: Board private structure
1566 */
ice_ptp_extts_event(struct ice_pf * pf)1567 void ice_ptp_extts_event(struct ice_pf *pf)
1568 {
1569 struct ptp_clock_event event;
1570 struct ice_hw *hw = &pf->hw;
1571 u8 chan, tmr_idx;
1572 u32 hi, lo;
1573
1574 /* Don't process timestamp events if PTP is not ready */
1575 if (pf->ptp.state != ICE_PTP_READY)
1576 return;
1577
1578 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1579 /* Event time is captured by one of the two matched registers
1580 * GLTSYN_EVNT_L: 32 LSB of sampled time event
1581 * GLTSYN_EVNT_H: 32 MSB of sampled time event
1582 * Event is defined in GLTSYN_EVNT_0 register
1583 */
1584 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1585 int pin_desc_idx;
1586
1587 /* Check if channel is enabled */
1588 if (!(pf->ptp.ext_ts_irq & (1 << chan)))
1589 continue;
1590
1591 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1592 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1593 event.timestamp = (u64)hi << 32 | lo;
1594
1595 /* Add delay compensation */
1596 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1597 if (pin_desc_idx >= 0) {
1598 const struct ice_ptp_pin_desc *desc;
1599
1600 desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
1601 event.timestamp -= desc->delay[0];
1602 }
1603
1604 event.type = PTP_CLOCK_EXTTS;
1605 event.index = chan;
1606 pf->ptp.ext_ts_irq &= ~(1 << chan);
1607 ptp_clock_event(pf->ptp.clock, &event);
1608 }
1609 }
1610
1611 /**
1612 * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1613 * @pf: Board private structure
1614 * @rq: External timestamp request
1615 * @on: Enable/disable flag
1616 *
1617 * Configure an external timestamp event on the requested channel.
1618 *
1619 * Return: 0 on success, negative error code otherwise
1620 */
ice_ptp_cfg_extts(struct ice_pf * pf,struct ptp_extts_request * rq,int on)1621 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
1622 int on)
1623 {
1624 u32 aux_reg, gpio_reg, irq_reg;
1625 struct ice_hw *hw = &pf->hw;
1626 unsigned int chan, gpio_pin;
1627 int pin_desc_idx;
1628 u8 tmr_idx;
1629
1630 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1631 chan = rq->index;
1632
1633 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1634 if (pin_desc_idx < 0)
1635 return -EIO;
1636
1637 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
1638 irq_reg = rd32(hw, PFINT_OICR_ENA);
1639
1640 if (on) {
1641 /* Enable the interrupt */
1642 irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1643 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1644
1645 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
1646 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
1647
1648 /* set event level to requested edge */
1649 if (rq->flags & PTP_FALLING_EDGE)
1650 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1651 if (rq->flags & PTP_RISING_EDGE)
1652 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1653
1654 /* Write GPIO CTL reg.
1655 * 0x1 is input sampled by EVENT register(channel)
1656 * + num_in_channels * tmr_idx
1657 */
1658 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1659 1 + chan + (tmr_idx * 3));
1660 } else {
1661 bool last_enabled = true;
1662
1663 /* clear the values we set to reset defaults */
1664 aux_reg = 0;
1665 gpio_reg = 0;
1666
1667 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
1668 if ((pf->ptp.extts_rqs[i].flags &
1669 PTP_ENABLE_FEATURE) &&
1670 i != chan) {
1671 last_enabled = false;
1672 }
1673
1674 if (last_enabled)
1675 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1676 }
1677
1678 wr32(hw, PFINT_OICR_ENA, irq_reg);
1679 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1680 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1681
1682 return 0;
1683 }
1684
1685 /**
1686 * ice_ptp_disable_all_extts - Disable all EXTTS channels
1687 * @pf: Board private structure
1688 */
ice_ptp_disable_all_extts(struct ice_pf * pf)1689 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1690 {
1691 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1692 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1693 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1694 false);
1695
1696 synchronize_irq(pf->oicr_irq.virq);
1697 }
1698
1699 /**
1700 * ice_ptp_enable_all_extts - Enable all EXTTS channels
1701 * @pf: Board private structure
1702 *
1703 * Called during reset to restore user configuration.
1704 */
ice_ptp_enable_all_extts(struct ice_pf * pf)1705 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1706 {
1707 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1708 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1709 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1710 true);
1711 }
1712
1713 /**
1714 * ice_ptp_write_perout - Write periodic wave parameters to HW
1715 * @hw: pointer to the HW struct
1716 * @chan: target channel
1717 * @gpio_pin: target GPIO pin
1718 * @start: target time to start periodic output
1719 * @period: target period
1720 *
1721 * Return: 0 on success, negative error code otherwise
1722 */
ice_ptp_write_perout(struct ice_hw * hw,unsigned int chan,unsigned int gpio_pin,u64 start,u64 period)1723 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
1724 unsigned int gpio_pin, u64 start, u64 period)
1725 {
1726
1727 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1728 u32 val = 0;
1729
1730 /* 0. Reset mode & out_en in AUX_OUT */
1731 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1732
1733 if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
1734 int err;
1735
1736 /* Enable/disable CGU 1PPS output for E825C */
1737 err = ice_cgu_cfg_pps_out(hw, !!period);
1738 if (err)
1739 return err;
1740 }
1741
1742 /* 1. Write perout with half of required period value.
1743 * HW toggles output when source clock hits the TGT and then adds
1744 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle.
1745 */
1746 period >>= 1;
1747
1748 /* For proper operation, GLTSYN_CLKO must be larger than clock tick and
1749 * period has to fit in 32 bit register.
1750 */
1751 #define MIN_PULSE 3
1752 if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
1753 dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
1754 MIN_PULSE);
1755 return -EIO;
1756 }
1757
1758 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1759
1760 /* 2. Write TARGET time */
1761 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
1762 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
1763
1764 /* 3. Write AUX_OUT register */
1765 if (!!period)
1766 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1767 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1768
1769 /* 4. write GPIO CTL reg */
1770 val = GLGEN_GPIO_CTL_PIN_DIR_M;
1771 if (!!period)
1772 val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1773 8 + chan + (tmr_idx * 4));
1774
1775 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1776 ice_flush(hw);
1777
1778 return 0;
1779 }
1780
1781 /**
1782 * ice_ptp_cfg_perout - Configure clock to generate periodic wave
1783 * @pf: Board private structure
1784 * @rq: Periodic output request
1785 * @on: Enable/disable flag
1786 *
1787 * Configure the internal clock generator modules to generate the clock wave of
1788 * specified period.
1789 *
1790 * Return: 0 on success, negative error code otherwise
1791 */
ice_ptp_cfg_perout(struct ice_pf * pf,struct ptp_perout_request * rq,int on)1792 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
1793 int on)
1794 {
1795 unsigned int gpio_pin, prop_delay_ns;
1796 u64 clk, period, start, phase;
1797 struct ice_hw *hw = &pf->hw;
1798 int pin_desc_idx;
1799
1800 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
1801 if (pin_desc_idx < 0)
1802 return -EIO;
1803
1804 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
1805 prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
1806 period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
1807
1808 /* If we're disabling the output or period is 0, clear out CLKO and TGT
1809 * and keep output level low.
1810 */
1811 if (!on || !period)
1812 return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
1813
1814 if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
1815 period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
1816 dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
1817 return -EOPNOTSUPP;
1818 }
1819
1820 if (period & 0x1) {
1821 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1822 return -EIO;
1823 }
1824
1825 start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
1826
1827 /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */
1828 if (rq->flags & PTP_PEROUT_PHASE)
1829 phase = start;
1830 else
1831 div64_u64_rem(start, period, &phase);
1832
1833 /* If we have only phase or start time is in the past, start the timer
1834 * at the next multiple of period, maintaining phase at least 0.5 second
1835 * from now, so we have time to write it to HW.
1836 */
1837 clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
1838 if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
1839 start = div64_u64(clk + period - 1, period) * period + phase;
1840
1841 /* Compensate for propagation delay from the generator to the pin. */
1842 start -= prop_delay_ns;
1843
1844 return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
1845 }
1846
1847 /**
1848 * ice_ptp_disable_all_perout - Disable all currently configured outputs
1849 * @pf: Board private structure
1850 *
1851 * Disable all currently configured clock outputs. This is necessary before
1852 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to
1853 * re-enable the clocks again.
1854 */
ice_ptp_disable_all_perout(struct ice_pf * pf)1855 static void ice_ptp_disable_all_perout(struct ice_pf *pf)
1856 {
1857 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1858 if (pf->ptp.perout_rqs[i].period.sec ||
1859 pf->ptp.perout_rqs[i].period.nsec)
1860 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1861 false);
1862 }
1863
1864 /**
1865 * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs
1866 * @pf: Board private structure
1867 *
1868 * Enable all currently configured clock outputs. Use this after
1869 * ice_ptp_disable_all_perout to reconfigure the output signals according to
1870 * their configuration.
1871 */
ice_ptp_enable_all_perout(struct ice_pf * pf)1872 static void ice_ptp_enable_all_perout(struct ice_pf *pf)
1873 {
1874 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1875 if (pf->ptp.perout_rqs[i].period.sec ||
1876 pf->ptp.perout_rqs[i].period.nsec)
1877 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1878 true);
1879 }
1880
1881 /**
1882 * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO
1883 * @pf: Board private structure
1884 * @pin: Pin index
1885 * @func: Assigned function
1886 *
1887 * Return: 0 on success, negative error code otherwise
1888 */
ice_ptp_disable_shared_pin(struct ice_pf * pf,unsigned int pin,enum ptp_pin_function func)1889 static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin,
1890 enum ptp_pin_function func)
1891 {
1892 unsigned int gpio_pin;
1893
1894 switch (func) {
1895 case PTP_PF_PEROUT:
1896 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1];
1897 break;
1898 case PTP_PF_EXTTS:
1899 gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0];
1900 break;
1901 default:
1902 return -EOPNOTSUPP;
1903 }
1904
1905 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
1906 struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i];
1907 unsigned int chan = pin_desc->chan;
1908
1909 /* Skip pin idx from the request */
1910 if (i == pin)
1911 continue;
1912
1913 if (pin_desc->func == PTP_PF_PEROUT &&
1914 pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) {
1915 pf->ptp.perout_rqs[chan].period.sec = 0;
1916 pf->ptp.perout_rqs[chan].period.nsec = 0;
1917 pin_desc->func = PTP_PF_NONE;
1918 pin_desc->chan = 0;
1919 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n",
1920 i, gpio_pin);
1921 return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan],
1922 false);
1923 } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS &&
1924 pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) {
1925 pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE;
1926 pin_desc->func = PTP_PF_NONE;
1927 pin_desc->chan = 0;
1928 dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n",
1929 i, gpio_pin);
1930 return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan],
1931 false);
1932 }
1933 }
1934
1935 return 0;
1936 }
1937
1938 /**
1939 * ice_verify_pin - verify if pin supports requested pin function
1940 * @info: the driver's PTP info structure
1941 * @pin: Pin index
1942 * @func: Assigned function
1943 * @chan: Assigned channel
1944 *
1945 * Return: 0 on success, -EOPNOTSUPP when function is not supported.
1946 */
ice_verify_pin(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)1947 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
1948 enum ptp_pin_function func, unsigned int chan)
1949 {
1950 struct ice_pf *pf = ptp_info_to_pf(info);
1951 const struct ice_ptp_pin_desc *pin_desc;
1952
1953 pin_desc = &pf->ptp.ice_pin_desc[pin];
1954
1955 /* Is assigned function allowed? */
1956 switch (func) {
1957 case PTP_PF_EXTTS:
1958 if (pin_desc->gpio[0] < 0)
1959 return -EOPNOTSUPP;
1960 break;
1961 case PTP_PF_PEROUT:
1962 if (pin_desc->gpio[1] < 0)
1963 return -EOPNOTSUPP;
1964 break;
1965 case PTP_PF_NONE:
1966 break;
1967 case PTP_PF_PHYSYNC:
1968 default:
1969 return -EOPNOTSUPP;
1970 }
1971
1972 /* On adapters with SMA_CTRL disable other pins that share same GPIO */
1973 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
1974 ice_ptp_disable_shared_pin(pf, pin, func);
1975 pf->ptp.pin_desc[pin].func = func;
1976 pf->ptp.pin_desc[pin].chan = chan;
1977 return ice_ptp_set_sma_cfg(pf);
1978 }
1979
1980 return 0;
1981 }
1982
1983 /**
1984 * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC
1985 * @info: The driver's PTP info structure
1986 * @rq: The requested feature to change
1987 * @on: Enable/disable flag
1988 *
1989 * Return: 0 on success, negative error code otherwise
1990 */
ice_ptp_gpio_enable(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1991 static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
1992 struct ptp_clock_request *rq, int on)
1993 {
1994 struct ice_pf *pf = ptp_info_to_pf(info);
1995 int err;
1996
1997 switch (rq->type) {
1998 case PTP_CLK_REQ_PEROUT:
1999 {
2000 struct ptp_perout_request *cached =
2001 &pf->ptp.perout_rqs[rq->perout.index];
2002
2003 err = ice_ptp_cfg_perout(pf, &rq->perout, on);
2004 if (!err) {
2005 *cached = rq->perout;
2006 } else {
2007 cached->period.sec = 0;
2008 cached->period.nsec = 0;
2009 }
2010 return err;
2011 }
2012 case PTP_CLK_REQ_EXTTS:
2013 {
2014 struct ptp_extts_request *cached =
2015 &pf->ptp.extts_rqs[rq->extts.index];
2016
2017 err = ice_ptp_cfg_extts(pf, &rq->extts, on);
2018 if (!err)
2019 *cached = rq->extts;
2020 else
2021 cached->flags &= ~PTP_ENABLE_FEATURE;
2022 return err;
2023 }
2024 default:
2025 return -EOPNOTSUPP;
2026 }
2027 }
2028
2029 /**
2030 * ice_ptp_gettimex64 - Get the time of the clock
2031 * @info: the driver's PTP info structure
2032 * @ts: timespec64 structure to hold the current time value
2033 * @sts: Optional parameter for holding a pair of system timestamps from
2034 * the system clock. Will be ignored if NULL is given.
2035 *
2036 * Read the device clock and return the correct value on ns, after converting it
2037 * into a timespec struct.
2038 */
2039 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)2040 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
2041 struct ptp_system_timestamp *sts)
2042 {
2043 struct ice_pf *pf = ptp_info_to_pf(info);
2044 u64 time_ns;
2045
2046 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
2047 *ts = ns_to_timespec64(time_ns);
2048 return 0;
2049 }
2050
2051 /**
2052 * ice_ptp_settime64 - Set the time of the clock
2053 * @info: the driver's PTP info structure
2054 * @ts: timespec64 structure that holds the new time value
2055 *
2056 * Set the device clock to the user input value. The conversion from timespec
2057 * to ns happens in the write function.
2058 */
2059 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)2060 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
2061 {
2062 struct ice_pf *pf = ptp_info_to_pf(info);
2063 struct timespec64 ts64 = *ts;
2064 struct ice_hw *hw = &pf->hw;
2065 int err;
2066
2067 /* For Vernier mode on E82X, we need to recalibrate after new settime.
2068 * Start with marking timestamps as invalid.
2069 */
2070 if (hw->mac_type == ICE_MAC_GENERIC) {
2071 err = ice_ptp_clear_phy_offset_ready_e82x(hw);
2072 if (err)
2073 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
2074 }
2075
2076 if (!ice_ptp_lock(hw)) {
2077 err = -EBUSY;
2078 goto exit;
2079 }
2080
2081 /* Disable periodic outputs */
2082 ice_ptp_disable_all_perout(pf);
2083
2084 err = ice_ptp_write_init(pf, &ts64);
2085 ice_ptp_unlock(hw);
2086
2087 if (!err)
2088 ice_ptp_reset_cached_phctime(pf);
2089
2090 /* Reenable periodic outputs */
2091 ice_ptp_enable_all_perout(pf);
2092
2093 /* Recalibrate and re-enable timestamp blocks for E822/E823 */
2094 if (hw->mac_type == ICE_MAC_GENERIC)
2095 ice_ptp_restart_all_phy(pf);
2096 exit:
2097 if (err) {
2098 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
2099 return err;
2100 }
2101
2102 return 0;
2103 }
2104
2105 /**
2106 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
2107 * @info: the driver's PTP info structure
2108 * @delta: Offset in nanoseconds to adjust the time by
2109 */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)2110 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
2111 {
2112 struct timespec64 now, then;
2113 int ret;
2114
2115 then = ns_to_timespec64(delta);
2116 ret = ice_ptp_gettimex64(info, &now, NULL);
2117 if (ret)
2118 return ret;
2119 now = timespec64_add(now, then);
2120
2121 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
2122 }
2123
2124 /**
2125 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
2126 * @info: the driver's PTP info structure
2127 * @delta: Offset in nanoseconds to adjust the time by
2128 */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)2129 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
2130 {
2131 struct ice_pf *pf = ptp_info_to_pf(info);
2132 struct ice_hw *hw = &pf->hw;
2133 struct device *dev;
2134 int err;
2135
2136 dev = ice_pf_to_dev(pf);
2137
2138 /* Hardware only supports atomic adjustments using signed 32-bit
2139 * integers. For any adjustment outside this range, perform
2140 * a non-atomic get->adjust->set flow.
2141 */
2142 if (delta > S32_MAX || delta < S32_MIN) {
2143 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
2144 return ice_ptp_adjtime_nonatomic(info, delta);
2145 }
2146
2147 if (!ice_ptp_lock(hw)) {
2148 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
2149 return -EBUSY;
2150 }
2151
2152 /* Disable periodic outputs */
2153 ice_ptp_disable_all_perout(pf);
2154
2155 err = ice_ptp_write_adj(pf, delta);
2156
2157 /* Reenable periodic outputs */
2158 ice_ptp_enable_all_perout(pf);
2159
2160 ice_ptp_unlock(hw);
2161
2162 if (err) {
2163 dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2164 return err;
2165 }
2166
2167 ice_ptp_reset_cached_phctime(pf);
2168
2169 return 0;
2170 }
2171
2172 /**
2173 * struct ice_crosststamp_cfg - Device cross timestamp configuration
2174 * @lock_reg: The hardware semaphore lock to use
2175 * @lock_busy: Bit in the semaphore lock indicating the lock is busy
2176 * @ctl_reg: The hardware register to request cross timestamp
2177 * @ctl_active: Bit in the control register to request cross timestamp
2178 * @art_time_l: Lower 32-bits of ART system time
2179 * @art_time_h: Upper 32-bits of ART system time
2180 * @dev_time_l: Lower 32-bits of device time (per timer index)
2181 * @dev_time_h: Upper 32-bits of device time (per timer index)
2182 */
2183 struct ice_crosststamp_cfg {
2184 /* HW semaphore lock register */
2185 u32 lock_reg;
2186 u32 lock_busy;
2187
2188 /* Capture control register */
2189 u32 ctl_reg;
2190 u32 ctl_active;
2191
2192 /* Time storage */
2193 u32 art_time_l;
2194 u32 art_time_h;
2195 u32 dev_time_l[2];
2196 u32 dev_time_h[2];
2197 };
2198
2199 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
2200 .lock_reg = PFHH_SEM,
2201 .lock_busy = PFHH_SEM_BUSY_M,
2202 .ctl_reg = GLHH_ART_CTL,
2203 .ctl_active = GLHH_ART_CTL_ACTIVE_M,
2204 .art_time_l = GLHH_ART_TIME_L,
2205 .art_time_h = GLHH_ART_TIME_H,
2206 .dev_time_l[0] = GLTSYN_HHTIME_L(0),
2207 .dev_time_h[0] = GLTSYN_HHTIME_H(0),
2208 .dev_time_l[1] = GLTSYN_HHTIME_L(1),
2209 .dev_time_h[1] = GLTSYN_HHTIME_H(1),
2210 };
2211
2212 #ifdef CONFIG_ICE_HWTS
2213 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
2214 .lock_reg = E830_PFPTM_SEM,
2215 .lock_busy = E830_PFPTM_SEM_BUSY_M,
2216 .ctl_reg = E830_GLPTM_ART_CTL,
2217 .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
2218 .art_time_l = E830_GLPTM_ART_TIME_L,
2219 .art_time_h = E830_GLPTM_ART_TIME_H,
2220 .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
2221 .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
2222 .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
2223 .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
2224 };
2225
2226 #endif /* CONFIG_ICE_HWTS */
2227 /**
2228 * struct ice_crosststamp_ctx - Device cross timestamp context
2229 * @snapshot: snapshot of system clocks for historic interpolation
2230 * @pf: pointer to the PF private structure
2231 * @cfg: pointer to hardware configuration for cross timestamp
2232 */
2233 struct ice_crosststamp_ctx {
2234 struct system_time_snapshot snapshot;
2235 struct ice_pf *pf;
2236 const struct ice_crosststamp_cfg *cfg;
2237 };
2238
2239 /**
2240 * ice_capture_crosststamp - Capture a device/system cross timestamp
2241 * @device: Current device time
2242 * @system: System counter value read synchronously with device time
2243 * @__ctx: Context passed from ice_ptp_getcrosststamp
2244 *
2245 * Read device and system (ART) clock simultaneously and return the corrected
2246 * clock values in ns.
2247 *
2248 * Return: zero on success, or a negative error code on failure.
2249 */
ice_capture_crosststamp(ktime_t * device,struct system_counterval_t * system,void * __ctx)2250 static int ice_capture_crosststamp(ktime_t *device,
2251 struct system_counterval_t *system,
2252 void *__ctx)
2253 {
2254 struct ice_crosststamp_ctx *ctx = __ctx;
2255 const struct ice_crosststamp_cfg *cfg;
2256 u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
2257 struct ice_pf *pf;
2258 struct ice_hw *hw;
2259 int err;
2260 u64 ts;
2261
2262 cfg = ctx->cfg;
2263 pf = ctx->pf;
2264 hw = &pf->hw;
2265
2266 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2267 if (tmr_idx > 1)
2268 return -EINVAL;
2269
2270 /* Poll until we obtain the cross-timestamp hardware semaphore */
2271 err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
2272 !(lock & cfg->lock_busy),
2273 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
2274 if (err) {
2275 dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
2276 return -EBUSY;
2277 }
2278
2279 /* Snapshot system time for historic interpolation */
2280 ktime_get_snapshot(&ctx->snapshot);
2281
2282 /* Program cmd to master timer */
2283 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2284
2285 /* Start the ART and device clock sync sequence */
2286 ctl = rd32(hw, cfg->ctl_reg);
2287 ctl |= cfg->ctl_active;
2288 wr32(hw, cfg->ctl_reg, ctl);
2289
2290 /* Poll until hardware completes the capture */
2291 err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
2292 5, 20 * USEC_PER_MSEC);
2293 if (err)
2294 goto err_timeout;
2295
2296 /* Read ART system time */
2297 ts_lo = rd32(hw, cfg->art_time_l);
2298 ts_hi = rd32(hw, cfg->art_time_h);
2299 ts = ((u64)ts_hi << 32) | ts_lo;
2300 system->cycles = ts;
2301 system->cs_id = CSID_X86_ART;
2302 system->use_nsecs = true;
2303
2304 /* Read Device source clock time */
2305 ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
2306 ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
2307 ts = ((u64)ts_hi << 32) | ts_lo;
2308 *device = ns_to_ktime(ts);
2309
2310 err_timeout:
2311 /* Clear the master timer */
2312 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2313
2314 /* Release HW lock */
2315 lock = rd32(hw, cfg->lock_reg);
2316 lock &= ~cfg->lock_busy;
2317 wr32(hw, cfg->lock_reg, lock);
2318
2319 return err;
2320 }
2321
2322 /**
2323 * ice_ptp_getcrosststamp - Capture a device cross timestamp
2324 * @info: the driver's PTP info structure
2325 * @cts: The memory to fill the cross timestamp info
2326 *
2327 * Capture a cross timestamp between the ART and the device PTP hardware
2328 * clock. Fill the cross timestamp information and report it back to the
2329 * caller.
2330 *
2331 * In order to correctly correlate the ART timestamp back to the TSC time, the
2332 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2333 *
2334 * Return: zero on success, or a negative error code on failure.
2335 */
ice_ptp_getcrosststamp(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2336 static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
2337 struct system_device_crosststamp *cts)
2338 {
2339 struct ice_pf *pf = ptp_info_to_pf(info);
2340 struct ice_crosststamp_ctx ctx = {
2341 .pf = pf,
2342 };
2343
2344 switch (pf->hw.mac_type) {
2345 case ICE_MAC_GENERIC:
2346 case ICE_MAC_GENERIC_3K_E825:
2347 ctx.cfg = &ice_crosststamp_cfg_e82x;
2348 break;
2349 #ifdef CONFIG_ICE_HWTS
2350 case ICE_MAC_E830:
2351 ctx.cfg = &ice_crosststamp_cfg_e830;
2352 break;
2353 #endif /* CONFIG_ICE_HWTS */
2354 default:
2355 return -EOPNOTSUPP;
2356 }
2357
2358 return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
2359 &ctx.snapshot, cts);
2360 }
2361
2362 /**
2363 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2364 * @pf: Board private structure
2365 * @ifr: ioctl data
2366 *
2367 * Copy the timestamping config to user buffer
2368 */
ice_ptp_get_ts_config(struct ice_pf * pf,struct ifreq * ifr)2369 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2370 {
2371 struct hwtstamp_config *config;
2372
2373 if (pf->ptp.state != ICE_PTP_READY)
2374 return -EIO;
2375
2376 config = &pf->ptp.tstamp_config;
2377
2378 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2379 -EFAULT : 0;
2380 }
2381
2382 /**
2383 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2384 * @pf: Board private structure
2385 * @config: hwtstamp settings requested or saved
2386 */
2387 static int
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct hwtstamp_config * config)2388 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2389 {
2390 switch (config->tx_type) {
2391 case HWTSTAMP_TX_OFF:
2392 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2393 break;
2394 case HWTSTAMP_TX_ON:
2395 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2396 break;
2397 default:
2398 return -ERANGE;
2399 }
2400
2401 switch (config->rx_filter) {
2402 case HWTSTAMP_FILTER_NONE:
2403 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2404 break;
2405 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2406 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2407 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2408 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2409 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2410 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2411 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2412 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2413 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2414 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2415 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2416 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2417 case HWTSTAMP_FILTER_NTP_ALL:
2418 case HWTSTAMP_FILTER_ALL:
2419 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2420 break;
2421 default:
2422 return -ERANGE;
2423 }
2424
2425 /* Immediately update the device timestamping mode */
2426 ice_ptp_restore_timestamp_mode(pf);
2427
2428 return 0;
2429 }
2430
2431 /**
2432 * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2433 * @pf: Board private structure
2434 * @ifr: ioctl data
2435 *
2436 * Get the user config and store it
2437 */
ice_ptp_set_ts_config(struct ice_pf * pf,struct ifreq * ifr)2438 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2439 {
2440 struct hwtstamp_config config;
2441 int err;
2442
2443 if (pf->ptp.state != ICE_PTP_READY)
2444 return -EAGAIN;
2445
2446 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2447 return -EFAULT;
2448
2449 err = ice_ptp_set_timestamp_mode(pf, &config);
2450 if (err)
2451 return err;
2452
2453 /* Return the actual configuration set */
2454 config = pf->ptp.tstamp_config;
2455
2456 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2457 -EFAULT : 0;
2458 }
2459
2460 /**
2461 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2462 * @rx_desc: Receive descriptor
2463 * @pkt_ctx: Packet context to get the cached time
2464 *
2465 * The driver receives a notification in the receive descriptor with timestamp.
2466 */
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc * rx_desc,const struct ice_pkt_ctx * pkt_ctx)2467 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2468 const struct ice_pkt_ctx *pkt_ctx)
2469 {
2470 u64 ts_ns, cached_time;
2471 u32 ts_high;
2472
2473 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2474 return 0;
2475
2476 cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2477
2478 /* Do not report a timestamp if we don't have a cached PHC time */
2479 if (!cached_time)
2480 return 0;
2481
2482 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2483 * PHC value, rather than accessing the PF. This also allows us to
2484 * simply pass the upper 32bits of nanoseconds directly. Calling
2485 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2486 * bits itself.
2487 */
2488 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2489 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2490
2491 return ts_ns;
2492 }
2493
2494 /**
2495 * ice_ptp_setup_pin_cfg - setup PTP pin_config structure
2496 * @pf: Board private structure
2497 */
ice_ptp_setup_pin_cfg(struct ice_pf * pf)2498 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
2499 {
2500 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
2501 const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
2502 struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
2503 const char *name = NULL;
2504
2505 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2506 name = ice_pin_names[desc->name_idx];
2507 else if (desc->name_idx != GPIO_NA)
2508 name = ice_pin_names_nvm[desc->name_idx];
2509 if (name)
2510 strscpy(pin->name, name, sizeof(pin->name));
2511
2512 pin->index = i;
2513 }
2514
2515 pf->ptp.info.pin_config = pf->ptp.pin_desc;
2516 }
2517
2518 /**
2519 * ice_ptp_disable_pins - Disable PTP pins
2520 * @pf: pointer to the PF structure
2521 *
2522 * Disable the OS access to the SMA pins. Called to clear out the OS
2523 * indications of pin support when we fail to setup the SMA control register.
2524 */
ice_ptp_disable_pins(struct ice_pf * pf)2525 static void ice_ptp_disable_pins(struct ice_pf *pf)
2526 {
2527 struct ptp_clock_info *info = &pf->ptp.info;
2528
2529 dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
2530
2531 info->enable = NULL;
2532 info->verify = NULL;
2533 info->n_pins = 0;
2534 info->n_ext_ts = 0;
2535 info->n_per_out = 0;
2536 }
2537
2538 /**
2539 * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM
2540 * @pf: pointer to the PF structure
2541 * @entries: SDP connection section from NVM
2542 * @num_entries: number of valid entries in sdp_entries
2543 * @pins: PTP pins array to update
2544 *
2545 * Return: 0 on success, negative error code otherwise.
2546 */
ice_ptp_parse_sdp_entries(struct ice_pf * pf,__le16 * entries,unsigned int num_entries,struct ice_ptp_pin_desc * pins)2547 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
2548 unsigned int num_entries,
2549 struct ice_ptp_pin_desc *pins)
2550 {
2551 unsigned int n_pins = 0;
2552 unsigned int i;
2553
2554 /* Setup ice_pin_desc array */
2555 for (i = 0; i < ICE_N_PINS_MAX; i++) {
2556 pins[i].name_idx = -1;
2557 pins[i].gpio[0] = -1;
2558 pins[i].gpio[1] = -1;
2559 }
2560
2561 for (i = 0; i < num_entries; i++) {
2562 u16 entry = le16_to_cpu(entries[i]);
2563 DECLARE_BITMAP(bitmap, GPIO_NA);
2564 unsigned int bitmap_idx;
2565 bool dir;
2566 u16 gpio;
2567
2568 *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
2569 dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
2570 gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
2571 for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) {
2572 unsigned int idx;
2573
2574 /* Check if entry's pin bit is valid */
2575 if (bitmap_idx >= NUM_PTP_PINS_NVM &&
2576 bitmap_idx != GPIO_NA)
2577 continue;
2578
2579 /* Check if pin already exists */
2580 for (idx = 0; idx < ICE_N_PINS_MAX; idx++)
2581 if (pins[idx].name_idx == bitmap_idx)
2582 break;
2583
2584 if (idx == ICE_N_PINS_MAX) {
2585 /* Pin not found, setup its entry and name */
2586 idx = n_pins++;
2587 pins[idx].name_idx = bitmap_idx;
2588 if (bitmap_idx == GPIO_NA)
2589 strscpy(pf->ptp.pin_desc[idx].name,
2590 ice_pin_names[gpio],
2591 sizeof(pf->ptp.pin_desc[idx]
2592 .name));
2593 }
2594
2595 /* Setup in/out GPIO number */
2596 pins[idx].gpio[dir] = gpio;
2597 }
2598 }
2599
2600 for (i = 0; i < n_pins; i++) {
2601 dev_dbg(ice_pf_to_dev(pf),
2602 "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
2603 i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
2604 }
2605
2606 pf->ptp.info.n_pins = n_pins;
2607 return 0;
2608 }
2609
2610 /**
2611 * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support
2612 * @pf: Board private structure
2613 *
2614 * Assign functions to the PTP capabilities structure for E82X devices.
2615 * Functions which operate across all device families should be set directly
2616 * in ice_ptp_set_caps. Only add functions here which are distinct for E82X
2617 * devices.
2618 */
ice_ptp_set_funcs_e82x(struct ice_pf * pf)2619 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
2620 {
2621 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2622
2623 if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
2624 pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
2625 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
2626 } else {
2627 pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
2628 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x);
2629 }
2630 ice_ptp_setup_pin_cfg(pf);
2631 }
2632
2633 /**
2634 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2635 * @pf: Board private structure
2636 *
2637 * Assign functions to the PTP capabiltiies structure for E810 devices.
2638 * Functions which operate across all device families should be set directly
2639 * in ice_ptp_set_caps. Only add functions here which are distinct for E810
2640 * devices.
2641 */
ice_ptp_set_funcs_e810(struct ice_pf * pf)2642 static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
2643 {
2644 __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
2645 struct ice_ptp_pin_desc *desc = NULL;
2646 struct ice_ptp *ptp = &pf->ptp;
2647 unsigned int num_entries;
2648 int err;
2649
2650 err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
2651 if (err) {
2652 /* SDP section does not exist in NVM or is corrupted */
2653 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2654 ptp->ice_pin_desc = ice_pin_desc_e810_sma;
2655 ptp->info.n_pins =
2656 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma);
2657 } else {
2658 pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2659 pf->ptp.info.n_pins =
2660 ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
2661 err = 0;
2662 }
2663 } else {
2664 desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
2665 sizeof(struct ice_ptp_pin_desc),
2666 GFP_KERNEL);
2667 if (!desc)
2668 goto err;
2669
2670 err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
2671 if (err)
2672 goto err;
2673
2674 ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
2675 }
2676
2677 ptp->info.pin_config = ptp->pin_desc;
2678 ice_ptp_setup_pin_cfg(pf);
2679
2680 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2681 err = ice_ptp_set_sma_cfg(pf);
2682 err:
2683 if (err) {
2684 devm_kfree(ice_pf_to_dev(pf), desc);
2685 ice_ptp_disable_pins(pf);
2686 }
2687 }
2688
2689 /**
2690 * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
2691 * @pf: Board private structure
2692 *
2693 * Assign functions to the PTP capabiltiies structure for E830 devices.
2694 * Functions which operate across all device families should be set directly
2695 * in ice_ptp_set_caps. Only add functions here which are distinct for E830
2696 * devices.
2697 */
ice_ptp_set_funcs_e830(struct ice_pf * pf)2698 static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
2699 {
2700 #ifdef CONFIG_ICE_HWTS
2701 if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
2702 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2703
2704 #endif /* CONFIG_ICE_HWTS */
2705 /* Rest of the config is the same as base E810 */
2706 pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2707 pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
2708 ice_ptp_setup_pin_cfg(pf);
2709 }
2710
2711 /**
2712 * ice_ptp_set_caps - Set PTP capabilities
2713 * @pf: Board private structure
2714 */
ice_ptp_set_caps(struct ice_pf * pf)2715 static void ice_ptp_set_caps(struct ice_pf *pf)
2716 {
2717 struct ptp_clock_info *info = &pf->ptp.info;
2718 struct device *dev = ice_pf_to_dev(pf);
2719
2720 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2721 dev_driver_string(dev), dev_name(dev));
2722 info->owner = THIS_MODULE;
2723 info->max_adj = 100000000;
2724 info->adjtime = ice_ptp_adjtime;
2725 info->adjfine = ice_ptp_adjfine;
2726 info->gettimex64 = ice_ptp_gettimex64;
2727 info->settime64 = ice_ptp_settime64;
2728 info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
2729 info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
2730 info->enable = ice_ptp_gpio_enable;
2731 info->verify = ice_verify_pin;
2732
2733 info->supported_extts_flags = PTP_RISING_EDGE |
2734 PTP_FALLING_EDGE |
2735 PTP_STRICT_FLAGS;
2736 info->supported_perout_flags = PTP_PEROUT_PHASE;
2737
2738 switch (pf->hw.mac_type) {
2739 case ICE_MAC_E810:
2740 ice_ptp_set_funcs_e810(pf);
2741 return;
2742 case ICE_MAC_E830:
2743 ice_ptp_set_funcs_e830(pf);
2744 return;
2745 case ICE_MAC_GENERIC:
2746 case ICE_MAC_GENERIC_3K_E825:
2747 ice_ptp_set_funcs_e82x(pf);
2748 return;
2749 default:
2750 return;
2751 }
2752 }
2753
2754 /**
2755 * ice_ptp_create_clock - Create PTP clock device for userspace
2756 * @pf: Board private structure
2757 *
2758 * This function creates a new PTP clock device. It only creates one if we
2759 * don't already have one. Will return error if it can't create one, but success
2760 * if we already have a device. Should be used by ice_ptp_init to create clock
2761 * initially, and prevent global resets from creating new clock devices.
2762 */
ice_ptp_create_clock(struct ice_pf * pf)2763 static long ice_ptp_create_clock(struct ice_pf *pf)
2764 {
2765 struct ptp_clock_info *info;
2766 struct device *dev;
2767
2768 /* No need to create a clock device if we already have one */
2769 if (pf->ptp.clock)
2770 return 0;
2771
2772 ice_ptp_set_caps(pf);
2773
2774 info = &pf->ptp.info;
2775 dev = ice_pf_to_dev(pf);
2776
2777 /* Attempt to register the clock before enabling the hardware. */
2778 pf->ptp.clock = ptp_clock_register(info, dev);
2779 if (IS_ERR(pf->ptp.clock)) {
2780 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2781 return PTR_ERR(pf->ptp.clock);
2782 }
2783
2784 return 0;
2785 }
2786
2787 /**
2788 * ice_ptp_request_ts - Request an available Tx timestamp index
2789 * @tx: the PTP Tx timestamp tracker to request from
2790 * @skb: the SKB to associate with this timestamp request
2791 */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2792 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2793 {
2794 unsigned long flags;
2795 u8 idx;
2796
2797 spin_lock_irqsave(&tx->lock, flags);
2798
2799 /* Check that this tracker is accepting new timestamp requests */
2800 if (!ice_ptp_is_tx_tracker_up(tx)) {
2801 spin_unlock_irqrestore(&tx->lock, flags);
2802 return -1;
2803 }
2804
2805 /* Find and set the first available index */
2806 idx = find_next_zero_bit(tx->in_use, tx->len,
2807 tx->last_ll_ts_idx_read + 1);
2808 if (idx == tx->len)
2809 idx = find_first_zero_bit(tx->in_use, tx->len);
2810
2811 if (idx < tx->len) {
2812 /* We got a valid index that no other thread could have set. Store
2813 * a reference to the skb and the start time to allow discarding old
2814 * requests.
2815 */
2816 set_bit(idx, tx->in_use);
2817 clear_bit(idx, tx->stale);
2818 tx->tstamps[idx].start = jiffies;
2819 tx->tstamps[idx].skb = skb_get(skb);
2820 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2821 ice_trace(tx_tstamp_request, skb, idx);
2822 }
2823
2824 spin_unlock_irqrestore(&tx->lock, flags);
2825
2826 /* return the appropriate PHY timestamp register index, -1 if no
2827 * indexes were available.
2828 */
2829 if (idx >= tx->len)
2830 return -1;
2831 else
2832 return idx + tx->offset;
2833 }
2834
2835 /**
2836 * ice_ptp_process_ts - Process the PTP Tx timestamps
2837 * @pf: Board private structure
2838 *
2839 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2840 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2841 */
ice_ptp_process_ts(struct ice_pf * pf)2842 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2843 {
2844 switch (pf->ptp.tx_interrupt_mode) {
2845 case ICE_PTP_TX_INTERRUPT_NONE:
2846 /* This device has the clock owner handle timestamps for it */
2847 return ICE_TX_TSTAMP_WORK_DONE;
2848 case ICE_PTP_TX_INTERRUPT_SELF:
2849 /* This device handles its own timestamps */
2850 return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2851 case ICE_PTP_TX_INTERRUPT_ALL:
2852 /* This device handles timestamps for all ports */
2853 return ice_ptp_tx_tstamp_owner(pf);
2854 default:
2855 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2856 pf->ptp.tx_interrupt_mode);
2857 return ICE_TX_TSTAMP_WORK_DONE;
2858 }
2859 }
2860
2861 /**
2862 * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
2863 * @pf: Board private structure
2864 *
2865 * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
2866 * half of the interrupt and IRQ_HANDLED otherwise.
2867 */
ice_ptp_ts_irq(struct ice_pf * pf)2868 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
2869 {
2870 struct ice_hw *hw = &pf->hw;
2871
2872 switch (hw->mac_type) {
2873 case ICE_MAC_E810:
2874 /* E810 capable of low latency timestamping with interrupt can
2875 * request a single timestamp in the top half and wait for
2876 * a second LL TS interrupt from the FW when it's ready.
2877 */
2878 if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
2879 struct ice_ptp_tx *tx = &pf->ptp.port.tx;
2880 u8 idx;
2881
2882 if (!ice_pf_state_is_nominal(pf))
2883 return IRQ_HANDLED;
2884
2885 spin_lock(&tx->lock);
2886 idx = find_next_bit_wrap(tx->in_use, tx->len,
2887 tx->last_ll_ts_idx_read + 1);
2888 if (idx != tx->len)
2889 ice_ptp_req_tx_single_tstamp(tx, idx);
2890 spin_unlock(&tx->lock);
2891
2892 return IRQ_HANDLED;
2893 }
2894 fallthrough; /* non-LL_TS E810 */
2895 case ICE_MAC_GENERIC:
2896 case ICE_MAC_GENERIC_3K_E825:
2897 /* All other devices process timestamps in the bottom half due
2898 * to sleeping or polling.
2899 */
2900 if (!ice_ptp_pf_handles_tx_interrupt(pf))
2901 return IRQ_HANDLED;
2902
2903 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
2904 return IRQ_WAKE_THREAD;
2905 case ICE_MAC_E830:
2906 /* E830 can read timestamps in the top half using rd32() */
2907 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
2908 /* Process outstanding Tx timestamps. If there
2909 * is more work, re-arm the interrupt to trigger again.
2910 */
2911 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2912 ice_flush(hw);
2913 }
2914 return IRQ_HANDLED;
2915 default:
2916 return IRQ_HANDLED;
2917 }
2918 }
2919
2920 /**
2921 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2922 * @pf: Board private structure
2923 *
2924 * The device PHY issues Tx timestamp interrupts to the driver for processing
2925 * timestamp data from the PHY. It will not interrupt again until all
2926 * current timestamp data is read. In rare circumstances, it is possible that
2927 * the driver fails to read all outstanding data.
2928 *
2929 * To avoid getting permanently stuck, periodically check if the PHY has
2930 * outstanding timestamp data. If so, trigger an interrupt from software to
2931 * process this data.
2932 */
ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf * pf)2933 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2934 {
2935 struct device *dev = ice_pf_to_dev(pf);
2936 struct ice_hw *hw = &pf->hw;
2937 bool trigger_oicr = false;
2938 unsigned int i;
2939
2940 if (!pf->ptp.port.tx.has_ready_bitmap)
2941 return;
2942
2943 if (!ice_pf_src_tmr_owned(pf))
2944 return;
2945
2946 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2947 u64 tstamp_ready;
2948 int err;
2949
2950 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2951 if (!err && tstamp_ready) {
2952 trigger_oicr = true;
2953 break;
2954 }
2955 }
2956
2957 if (trigger_oicr) {
2958 /* Trigger a software interrupt, to ensure this data
2959 * gets processed.
2960 */
2961 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2962
2963 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2964 ice_flush(hw);
2965 }
2966 }
2967
ice_ptp_periodic_work(struct kthread_work * work)2968 static void ice_ptp_periodic_work(struct kthread_work *work)
2969 {
2970 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2971 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2972 int err;
2973
2974 if (pf->ptp.state != ICE_PTP_READY)
2975 return;
2976
2977 err = ice_ptp_update_cached_phctime(pf);
2978
2979 ice_ptp_maybe_trigger_tx_interrupt(pf);
2980
2981 /* Run twice a second or reschedule if phc update failed */
2982 kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2983 msecs_to_jiffies(err ? 10 : 500));
2984 }
2985
2986 /**
2987 * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
2988 * @pf: Board private structure
2989 * @rebuild: rebuild if true, prepare if false
2990 * @reset_type: the reset type being performed
2991 */
ice_ptp_prepare_rebuild_sec(struct ice_pf * pf,bool rebuild,enum ice_reset_req reset_type)2992 static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
2993 enum ice_reset_req reset_type)
2994 {
2995 struct list_head *entry;
2996
2997 list_for_each(entry, &pf->adapter->ports.ports) {
2998 struct ice_ptp_port *port = list_entry(entry,
2999 struct ice_ptp_port,
3000 list_node);
3001 struct ice_pf *peer_pf = ptp_port_to_pf(port);
3002
3003 if (!ice_is_primary(&peer_pf->hw)) {
3004 if (rebuild)
3005 ice_ptp_rebuild(peer_pf, reset_type);
3006 else
3007 ice_ptp_prepare_for_reset(peer_pf, reset_type);
3008 }
3009 }
3010 }
3011
3012 /**
3013 * ice_ptp_prepare_for_reset - Prepare PTP for reset
3014 * @pf: Board private structure
3015 * @reset_type: the reset type being performed
3016 */
ice_ptp_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)3017 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
3018 {
3019 struct ice_ptp *ptp = &pf->ptp;
3020 struct ice_hw *hw = &pf->hw;
3021 u8 src_tmr;
3022
3023 if (ptp->state != ICE_PTP_READY)
3024 return;
3025
3026 ptp->state = ICE_PTP_RESETTING;
3027
3028 /* Disable timestamping for both Tx and Rx */
3029 ice_ptp_disable_timestamp_mode(pf);
3030
3031 kthread_cancel_delayed_work_sync(&ptp->work);
3032
3033 if (reset_type == ICE_RESET_PFR)
3034 return;
3035
3036 if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
3037 ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
3038
3039 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3040
3041 /* Disable periodic outputs */
3042 ice_ptp_disable_all_perout(pf);
3043
3044 src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
3045
3046 /* Disable source clock */
3047 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
3048
3049 /* Acquire PHC and system timer to restore after reset */
3050 ptp->reset_time = ktime_get_real_ns();
3051 }
3052
3053 /**
3054 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
3055 * @pf: Board private structure
3056 *
3057 * Companion function for ice_ptp_rebuild() which handles tasks that only the
3058 * PTP clock owner instance should perform.
3059 */
ice_ptp_rebuild_owner(struct ice_pf * pf)3060 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
3061 {
3062 struct ice_ptp *ptp = &pf->ptp;
3063 struct ice_hw *hw = &pf->hw;
3064 struct timespec64 ts;
3065 u64 time_diff;
3066 int err;
3067
3068 err = ice_ptp_init_phc(hw);
3069 if (err)
3070 return err;
3071
3072 /* Acquire the global hardware lock */
3073 if (!ice_ptp_lock(hw)) {
3074 err = -EBUSY;
3075 return err;
3076 }
3077
3078 /* Write the increment time value to PHY and LAN */
3079 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3080 if (err)
3081 goto err_unlock;
3082
3083 /* Write the initial Time value to PHY and LAN using the cached PHC
3084 * time before the reset and time difference between stopping and
3085 * starting the clock.
3086 */
3087 if (ptp->cached_phc_time) {
3088 time_diff = ktime_get_real_ns() - ptp->reset_time;
3089 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
3090 } else {
3091 ts = ktime_to_timespec64(ktime_get_real());
3092 }
3093 err = ice_ptp_write_init(pf, &ts);
3094 if (err)
3095 goto err_unlock;
3096
3097 /* Release the global hardware lock */
3098 ice_ptp_unlock(hw);
3099
3100 /* Flush software tracking of any outstanding timestamps since we're
3101 * about to flush the PHY timestamp block.
3102 */
3103 ice_ptp_flush_all_tx_tracker(pf);
3104
3105 /* Enable quad interrupts */
3106 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3107 if (err)
3108 return err;
3109
3110 ice_ptp_restart_all_phy(pf);
3111
3112 /* Re-enable all periodic outputs and external timestamp events */
3113 ice_ptp_enable_all_perout(pf);
3114 ice_ptp_enable_all_extts(pf);
3115
3116 return 0;
3117
3118 err_unlock:
3119 ice_ptp_unlock(hw);
3120 return err;
3121 }
3122
3123 /**
3124 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
3125 * @pf: Board private structure
3126 * @reset_type: the reset type being performed
3127 */
ice_ptp_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)3128 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
3129 {
3130 struct ice_ptp *ptp = &pf->ptp;
3131 int err;
3132
3133 if (ptp->state == ICE_PTP_READY) {
3134 ice_ptp_prepare_for_reset(pf, reset_type);
3135 } else if (ptp->state != ICE_PTP_RESETTING) {
3136 err = -EINVAL;
3137 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
3138 goto err;
3139 }
3140
3141 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
3142 err = ice_ptp_rebuild_owner(pf);
3143 if (err)
3144 goto err;
3145 }
3146
3147 ptp->state = ICE_PTP_READY;
3148
3149 /* Start periodic work going */
3150 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3151
3152 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
3153 return;
3154
3155 err:
3156 ptp->state = ICE_PTP_ERROR;
3157 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
3158 }
3159
ice_ptp_setup_adapter(struct ice_pf * pf)3160 static int ice_ptp_setup_adapter(struct ice_pf *pf)
3161 {
3162 if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
3163 return -EPERM;
3164
3165 pf->adapter->ctrl_pf = pf;
3166
3167 return 0;
3168 }
3169
ice_ptp_setup_pf(struct ice_pf * pf)3170 static int ice_ptp_setup_pf(struct ice_pf *pf)
3171 {
3172 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3173 struct ice_ptp *ptp = &pf->ptp;
3174
3175 if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
3176 return -ENODEV;
3177
3178 INIT_LIST_HEAD(&ptp->port.list_node);
3179 mutex_lock(&pf->adapter->ports.lock);
3180
3181 list_add(&ptp->port.list_node,
3182 &pf->adapter->ports.ports);
3183 mutex_unlock(&pf->adapter->ports.lock);
3184
3185 return 0;
3186 }
3187
ice_ptp_cleanup_pf(struct ice_pf * pf)3188 static void ice_ptp_cleanup_pf(struct ice_pf *pf)
3189 {
3190 struct ice_ptp *ptp = &pf->ptp;
3191
3192 if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
3193 mutex_lock(&pf->adapter->ports.lock);
3194 list_del(&ptp->port.list_node);
3195 mutex_unlock(&pf->adapter->ports.lock);
3196 }
3197 }
3198
3199 /**
3200 * ice_ptp_clock_index - Get the PTP clock index for this device
3201 * @pf: Board private structure
3202 *
3203 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3204 * is associated.
3205 */
ice_ptp_clock_index(struct ice_pf * pf)3206 int ice_ptp_clock_index(struct ice_pf *pf)
3207 {
3208 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3209 struct ptp_clock *clock;
3210
3211 if (!ctrl_ptp)
3212 return -1;
3213 clock = ctrl_ptp->clock;
3214
3215 return clock ? ptp_clock_index(clock) : -1;
3216 }
3217
3218 /**
3219 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3220 * @pf: Board private structure
3221 *
3222 * Setup and initialize a PTP clock device that represents the device hardware
3223 * clock. Save the clock index for other functions connected to the same
3224 * hardware resource.
3225 */
ice_ptp_init_owner(struct ice_pf * pf)3226 static int ice_ptp_init_owner(struct ice_pf *pf)
3227 {
3228 struct ice_hw *hw = &pf->hw;
3229 struct timespec64 ts;
3230 int err;
3231
3232 err = ice_ptp_init_phc(hw);
3233 if (err) {
3234 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3235 err);
3236 return err;
3237 }
3238
3239 /* Acquire the global hardware lock */
3240 if (!ice_ptp_lock(hw)) {
3241 err = -EBUSY;
3242 goto err_exit;
3243 }
3244
3245 /* Write the increment time value to PHY and LAN */
3246 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3247 if (err)
3248 goto err_unlock;
3249
3250 ts = ktime_to_timespec64(ktime_get_real());
3251 /* Write the initial Time value to PHY and LAN */
3252 err = ice_ptp_write_init(pf, &ts);
3253 if (err)
3254 goto err_unlock;
3255
3256 /* Release the global hardware lock */
3257 ice_ptp_unlock(hw);
3258
3259 /* Configure PHY interrupt settings */
3260 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3261 if (err)
3262 goto err_exit;
3263
3264 /* Ensure we have a clock device */
3265 err = ice_ptp_create_clock(pf);
3266 if (err)
3267 goto err_clk;
3268
3269 return 0;
3270 err_clk:
3271 pf->ptp.clock = NULL;
3272 err_exit:
3273 return err;
3274
3275 err_unlock:
3276 ice_ptp_unlock(hw);
3277 return err;
3278 }
3279
3280 /**
3281 * ice_ptp_init_work - Initialize PTP work threads
3282 * @pf: Board private structure
3283 * @ptp: PF PTP structure
3284 */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)3285 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3286 {
3287 struct kthread_worker *kworker;
3288
3289 /* Initialize work functions */
3290 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3291
3292 /* Allocate a kworker for handling work required for the ports
3293 * connected to the PTP hardware clock.
3294 */
3295 kworker = kthread_run_worker(0, "ice-ptp-%s",
3296 dev_name(ice_pf_to_dev(pf)));
3297 if (IS_ERR(kworker))
3298 return PTR_ERR(kworker);
3299
3300 ptp->kworker = kworker;
3301
3302 /* Start periodic work going */
3303 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3304
3305 return 0;
3306 }
3307
3308 /**
3309 * ice_ptp_init_port - Initialize PTP port structure
3310 * @pf: Board private structure
3311 * @ptp_port: PTP port structure
3312 *
3313 * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
3314 */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)3315 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3316 {
3317 struct ice_hw *hw = &pf->hw;
3318
3319 mutex_init(&ptp_port->ps_lock);
3320
3321 switch (hw->mac_type) {
3322 case ICE_MAC_E810:
3323 case ICE_MAC_E830:
3324 case ICE_MAC_GENERIC_3K_E825:
3325 return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
3326 case ICE_MAC_GENERIC:
3327 kthread_init_delayed_work(&ptp_port->ov_work,
3328 ice_ptp_wait_for_offsets);
3329 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3330 ptp_port->port_num);
3331 default:
3332 return -ENODEV;
3333 }
3334 }
3335
3336 /**
3337 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3338 * @pf: Board private structure
3339 *
3340 * Initialize the Tx timestamp interrupt mode for this device. For most device
3341 * types, each PF processes the interrupt and manages its own timestamps. For
3342 * E822-based devices, only the clock owner processes the timestamps. Other
3343 * PFs disable the interrupt and do not process their own timestamps.
3344 */
ice_ptp_init_tx_interrupt_mode(struct ice_pf * pf)3345 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3346 {
3347 switch (pf->hw.mac_type) {
3348 case ICE_MAC_GENERIC:
3349 /* E822 based PHY has the clock owner process the interrupt
3350 * for all ports.
3351 */
3352 if (ice_pf_src_tmr_owned(pf))
3353 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3354 else
3355 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3356 break;
3357 default:
3358 /* other PHY types handle their own Tx interrupt */
3359 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3360 }
3361 }
3362
3363 /**
3364 * ice_ptp_init - Initialize PTP hardware clock support
3365 * @pf: Board private structure
3366 *
3367 * Set up the device for interacting with the PTP hardware clock for all
3368 * functions, both the function that owns the clock hardware, and the
3369 * functions connected to the clock hardware.
3370 *
3371 * The clock owner will allocate and register a ptp_clock with the
3372 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3373 * items used for asynchronous work such as Tx timestamps and periodic work.
3374 */
ice_ptp_init(struct ice_pf * pf)3375 void ice_ptp_init(struct ice_pf *pf)
3376 {
3377 struct ice_ptp *ptp = &pf->ptp;
3378 struct ice_hw *hw = &pf->hw;
3379 int err;
3380
3381 ptp->state = ICE_PTP_INITIALIZING;
3382
3383 if (hw->lane_num < 0) {
3384 err = hw->lane_num;
3385 goto err_exit;
3386 }
3387 ptp->port.port_num = hw->lane_num;
3388
3389 ice_ptp_init_hw(hw);
3390
3391 ice_ptp_init_tx_interrupt_mode(pf);
3392
3393 /* If this function owns the clock hardware, it must allocate and
3394 * configure the PTP clock device to represent it.
3395 */
3396 if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
3397 err = ice_ptp_setup_adapter(pf);
3398 if (err)
3399 goto err_exit;
3400 err = ice_ptp_init_owner(pf);
3401 if (err)
3402 goto err_exit;
3403 }
3404
3405 err = ice_ptp_setup_pf(pf);
3406 if (err)
3407 goto err_exit;
3408
3409 err = ice_ptp_init_port(pf, &ptp->port);
3410 if (err)
3411 goto err_exit;
3412
3413 /* Start the PHY timestamping block */
3414 ice_ptp_reset_phy_timestamping(pf);
3415
3416 /* Configure initial Tx interrupt settings */
3417 ice_ptp_cfg_tx_interrupt(pf);
3418
3419 ptp->state = ICE_PTP_READY;
3420
3421 err = ice_ptp_init_work(pf, ptp);
3422 if (err)
3423 goto err_exit;
3424
3425 dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3426 return;
3427
3428 err_exit:
3429 /* If we registered a PTP clock, release it */
3430 if (pf->ptp.clock) {
3431 ptp_clock_unregister(ptp->clock);
3432 pf->ptp.clock = NULL;
3433 }
3434 ptp->state = ICE_PTP_ERROR;
3435 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3436 }
3437
3438 /**
3439 * ice_ptp_release - Disable the driver/HW support and unregister the clock
3440 * @pf: Board private structure
3441 *
3442 * This function handles the cleanup work required from the initialization by
3443 * clearing out the important information and unregistering the clock
3444 */
ice_ptp_release(struct ice_pf * pf)3445 void ice_ptp_release(struct ice_pf *pf)
3446 {
3447 if (pf->ptp.state != ICE_PTP_READY)
3448 return;
3449
3450 pf->ptp.state = ICE_PTP_UNINIT;
3451
3452 /* Disable timestamping for both Tx and Rx */
3453 ice_ptp_disable_timestamp_mode(pf);
3454
3455 ice_ptp_cleanup_pf(pf);
3456
3457 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3458
3459 ice_ptp_disable_all_extts(pf);
3460
3461 kthread_cancel_delayed_work_sync(&pf->ptp.work);
3462
3463 ice_ptp_port_phy_stop(&pf->ptp.port);
3464 mutex_destroy(&pf->ptp.port.ps_lock);
3465 if (pf->ptp.kworker) {
3466 kthread_destroy_worker(pf->ptp.kworker);
3467 pf->ptp.kworker = NULL;
3468 }
3469
3470 if (!pf->ptp.clock)
3471 return;
3472
3473 /* Disable periodic outputs */
3474 ice_ptp_disable_all_perout(pf);
3475
3476 ptp_clock_unregister(pf->ptp.clock);
3477 pf->ptp.clock = NULL;
3478
3479 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3480 }
3481