1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7
8 static const char ice_pin_names[][64] = {
9 "SDP0",
10 "SDP1",
11 "SDP2",
12 "SDP3",
13 "TIME_SYNC",
14 "1PPS"
15 };
16
17 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
18 /* name, gpio, delay */
19 { TIME_SYNC, { 4, -1 }, { 0, 0 }},
20 { ONE_PPS, { -1, 5 }, { 0, 11 }},
21 };
22
23 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
24 /* name, gpio, delay */
25 { SDP0, { 0, 0 }, { 15, 14 }},
26 { SDP1, { 1, 1 }, { 15, 14 }},
27 { SDP2, { 2, 2 }, { 15, 14 }},
28 { SDP3, { 3, 3 }, { 15, 14 }},
29 { TIME_SYNC, { 4, -1 }, { 11, 0 }},
30 { ONE_PPS, { -1, 5 }, { 0, 9 }},
31 };
32
33 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
34 /* name, gpio, delay */
35 { SDP0, { 0, 0 }, { 0, 1 }},
36 { SDP1, { 1, 1 }, { 0, 1 }},
37 { SDP2, { 2, 2 }, { 0, 1 }},
38 { SDP3, { 3, 3 }, { 0, 1 }},
39 { ONE_PPS, { -1, 5 }, { 0, 1 }},
40 };
41
42 static const char ice_pin_names_dpll[][64] = {
43 "SDP20",
44 "SDP21",
45 "SDP22",
46 "SDP23",
47 };
48
49 static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
50 /* name, gpio, delay */
51 { SDP0, { -1, 0 }, { 0, 1 }},
52 { SDP1, { 1, -1 }, { 0, 0 }},
53 { SDP2, { -1, 2 }, { 0, 1 }},
54 { SDP3, { 3, -1 }, { 0, 0 }},
55 };
56
ice_get_ctrl_pf(struct ice_pf * pf)57 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
58 {
59 return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
60 }
61
ice_get_ctrl_ptp(struct ice_pf * pf)62 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
63 {
64 struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
65
66 return !ctrl_pf ? NULL : &ctrl_pf->ptp;
67 }
68
69 /**
70 * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc
71 * @pf: Board private structure
72 * @func: Pin function
73 * @chan: GPIO channel
74 *
75 * Return: positive pin number when pin is present, -1 otherwise
76 */
ice_ptp_find_pin_idx(struct ice_pf * pf,enum ptp_pin_function func,unsigned int chan)77 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
78 unsigned int chan)
79 {
80 const struct ptp_clock_info *info = &pf->ptp.info;
81 int i;
82
83 for (i = 0; i < info->n_pins; i++) {
84 if (info->pin_config[i].func == func &&
85 info->pin_config[i].chan == chan)
86 return i;
87 }
88
89 return -1;
90 }
91
92 /**
93 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
94 * @pf: Board private structure
95 *
96 * Program the device to respond appropriately to the Tx timestamp interrupt
97 * cause.
98 */
ice_ptp_cfg_tx_interrupt(struct ice_pf * pf)99 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
100 {
101 struct ice_hw *hw = &pf->hw;
102 bool enable;
103 u32 val;
104
105 switch (pf->ptp.tx_interrupt_mode) {
106 case ICE_PTP_TX_INTERRUPT_ALL:
107 /* React to interrupts across all quads. */
108 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
109 enable = true;
110 break;
111 case ICE_PTP_TX_INTERRUPT_NONE:
112 /* Do not react to interrupts on any quad. */
113 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
114 enable = false;
115 break;
116 case ICE_PTP_TX_INTERRUPT_SELF:
117 default:
118 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
119 break;
120 }
121
122 /* Configure the Tx timestamp interrupt */
123 val = rd32(hw, PFINT_OICR_ENA);
124 if (enable)
125 val |= PFINT_OICR_TSYN_TX_M;
126 else
127 val &= ~PFINT_OICR_TSYN_TX_M;
128 wr32(hw, PFINT_OICR_ENA, val);
129 }
130
131 /**
132 * ice_set_rx_tstamp - Enable or disable Rx timestamping
133 * @pf: The PF pointer to search in
134 * @on: bool value for whether timestamps are enabled or disabled
135 */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)136 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
137 {
138 struct ice_vsi *vsi;
139 u16 i;
140
141 vsi = ice_get_main_vsi(pf);
142 if (!vsi || !vsi->rx_rings)
143 return;
144
145 /* Set the timestamp flag for all the Rx rings */
146 ice_for_each_rxq(vsi, i) {
147 if (!vsi->rx_rings[i])
148 continue;
149 vsi->rx_rings[i]->ptp_rx = on;
150 }
151 }
152
153 /**
154 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
155 * @pf: Board private structure
156 *
157 * Called during preparation for reset to temporarily disable timestamping on
158 * the device. Called during remove to disable timestamping while cleaning up
159 * driver resources.
160 */
ice_ptp_disable_timestamp_mode(struct ice_pf * pf)161 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
162 {
163 struct ice_hw *hw = &pf->hw;
164 u32 val;
165
166 val = rd32(hw, PFINT_OICR_ENA);
167 val &= ~PFINT_OICR_TSYN_TX_M;
168 wr32(hw, PFINT_OICR_ENA, val);
169
170 ice_set_rx_tstamp(pf, false);
171 }
172
173 /**
174 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
175 * @pf: Board private structure
176 *
177 * Called at the end of rebuild to restore timestamp configuration after
178 * a device reset.
179 */
ice_ptp_restore_timestamp_mode(struct ice_pf * pf)180 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
181 {
182 struct ice_hw *hw = &pf->hw;
183 bool enable_rx;
184
185 ice_ptp_cfg_tx_interrupt(pf);
186
187 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
188 ice_set_rx_tstamp(pf, enable_rx);
189
190 /* Trigger an immediate software interrupt to ensure that timestamps
191 * which occurred during reset are handled now.
192 */
193 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
194 ice_flush(hw);
195 }
196
197 /**
198 * ice_ptp_read_src_clk_reg - Read the source clock register
199 * @pf: Board private structure
200 * @sts: Optional parameter for holding a pair of system timestamps from
201 * the system clock. Will be ignored if NULL is given.
202 */
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)203 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
204 struct ptp_system_timestamp *sts)
205 {
206 struct ice_hw *hw = &pf->hw;
207 u32 hi, lo, lo2;
208 u8 tmr_idx;
209
210 if (!ice_is_primary(hw))
211 hw = ice_get_primary_hw(pf);
212
213 tmr_idx = ice_get_ptp_src_clock_index(hw);
214 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
215 /* Read the system timestamp pre PHC read */
216 ptp_read_system_prets(sts);
217
218 if (hw->mac_type == ICE_MAC_E830) {
219 u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
220
221 /* Read the system timestamp post PHC read */
222 ptp_read_system_postts(sts);
223
224 return clk_time;
225 }
226
227 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
228
229 /* Read the system timestamp post PHC read */
230 ptp_read_system_postts(sts);
231
232 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
233 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
234
235 if (lo2 < lo) {
236 /* if TIME_L rolled over read TIME_L again and update
237 * system timestamps
238 */
239 ptp_read_system_prets(sts);
240 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
241 ptp_read_system_postts(sts);
242 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
243 }
244
245 return ((u64)hi << 32) | lo;
246 }
247
248 /**
249 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
250 * @cached_phc_time: recently cached copy of PHC time
251 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
252 *
253 * Hardware captures timestamps which contain only 32 bits of nominal
254 * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
255 * Note that the captured timestamp values may be 40 bits, but the lower
256 * 8 bits are sub-nanoseconds and generally discarded.
257 *
258 * Extend the 32bit nanosecond timestamp using the following algorithm and
259 * assumptions:
260 *
261 * 1) have a recently cached copy of the PHC time
262 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
263 * seconds) before or after the PHC time was captured.
264 * 3) calculate the delta between the cached time and the timestamp
265 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
266 * captured after the PHC time. In this case, the full timestamp is just
267 * the cached PHC time plus the delta.
268 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
269 * timestamp was captured *before* the PHC time, i.e. because the PHC
270 * cache was updated after the timestamp was captured by hardware. In this
271 * case, the full timestamp is the cached time minus the inverse delta.
272 *
273 * This algorithm works even if the PHC time was updated after a Tx timestamp
274 * was requested, but before the Tx timestamp event was reported from
275 * hardware.
276 *
277 * This calculation primarily relies on keeping the cached PHC time up to
278 * date. If the timestamp was captured more than 2^31 nanoseconds after the
279 * PHC time, it is possible that the lower 32bits of PHC time have
280 * overflowed more than once, and we might generate an incorrect timestamp.
281 *
282 * This is prevented by (a) periodically updating the cached PHC time once
283 * a second, and (b) discarding any Tx timestamp packet if it has waited for
284 * a timestamp for more than one second.
285 */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)286 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
287 {
288 u32 delta, phc_time_lo;
289 u64 ns;
290
291 /* Extract the lower 32 bits of the PHC time */
292 phc_time_lo = (u32)cached_phc_time;
293
294 /* Calculate the delta between the lower 32bits of the cached PHC
295 * time and the in_tstamp value
296 */
297 delta = (in_tstamp - phc_time_lo);
298
299 /* Do not assume that the in_tstamp is always more recent than the
300 * cached PHC time. If the delta is large, it indicates that the
301 * in_tstamp was taken in the past, and should be converted
302 * forward.
303 */
304 if (delta > (U32_MAX / 2)) {
305 /* reverse the delta calculation here */
306 delta = (phc_time_lo - in_tstamp);
307 ns = cached_phc_time - delta;
308 } else {
309 ns = cached_phc_time + delta;
310 }
311
312 return ns;
313 }
314
315 /**
316 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
317 * @pf: Board private structure
318 * @in_tstamp: Ingress/egress 40b timestamp value
319 *
320 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
321 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
322 *
323 * *--------------------------------------------------------------*
324 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
325 * *--------------------------------------------------------------*
326 *
327 * The low bit is an indicator of whether the timestamp is valid. The next
328 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
329 * and the remaining 32 bits are the lower 32 bits of the PHC timer.
330 *
331 * It is assumed that the caller verifies the timestamp is valid prior to
332 * calling this function.
333 *
334 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
335 * time stored in the device private PTP structure as the basis for timestamp
336 * extension.
337 *
338 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
339 * algorithm.
340 */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)341 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
342 {
343 const u64 mask = GENMASK_ULL(31, 0);
344 unsigned long discard_time;
345
346 /* Discard the hardware timestamp if the cached PHC time is too old */
347 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
348 if (time_is_before_jiffies(discard_time)) {
349 pf->ptp.tx_hwtstamp_discarded++;
350 return 0;
351 }
352
353 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
354 (in_tstamp >> 8) & mask);
355 }
356
357 /**
358 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
359 * @tx: the PTP Tx timestamp tracker to check
360 *
361 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
362 * to accept new timestamp requests.
363 *
364 * Assumes the tx->lock spinlock is already held.
365 */
366 static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx)367 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
368 {
369 lockdep_assert_held(&tx->lock);
370
371 return tx->init && !tx->calibrating;
372 }
373
374 /**
375 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
376 * @tx: the PTP Tx timestamp tracker
377 * @idx: index of the timestamp to request
378 */
ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx)379 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
380 {
381 struct ice_e810_params *params;
382 struct ice_ptp_port *ptp_port;
383 unsigned long flags;
384 struct sk_buff *skb;
385 struct ice_pf *pf;
386
387 if (!tx->init)
388 return;
389
390 ptp_port = container_of(tx, struct ice_ptp_port, tx);
391 pf = ptp_port_to_pf(ptp_port);
392 params = &pf->hw.ptp.phy.e810;
393
394 /* Drop packets which have waited for more than 2 seconds */
395 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
396 /* Count the number of Tx timestamps that timed out */
397 pf->ptp.tx_hwtstamp_timeouts++;
398
399 skb = tx->tstamps[idx].skb;
400 tx->tstamps[idx].skb = NULL;
401 clear_bit(idx, tx->in_use);
402
403 dev_kfree_skb_any(skb);
404 return;
405 }
406
407 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
408
409 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
410
411 params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
412
413 /* Write TS index to read to the PF register so the FW can read it */
414 wr32(&pf->hw, REG_LL_PROXY_H,
415 REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
416 REG_LL_PROXY_H_EXEC);
417 tx->last_ll_ts_idx_read = idx;
418
419 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
420 }
421
422 /**
423 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
424 * @tx: the PTP Tx timestamp tracker
425 */
ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx)426 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
427 {
428 struct skb_shared_hwtstamps shhwtstamps = {};
429 u8 idx = tx->last_ll_ts_idx_read;
430 struct ice_e810_params *params;
431 struct ice_ptp_port *ptp_port;
432 u64 raw_tstamp, tstamp;
433 bool drop_ts = false;
434 struct sk_buff *skb;
435 unsigned long flags;
436 struct device *dev;
437 struct ice_pf *pf;
438 u32 reg_ll_high;
439
440 if (!tx->init || tx->last_ll_ts_idx_read < 0)
441 return;
442
443 ptp_port = container_of(tx, struct ice_ptp_port, tx);
444 pf = ptp_port_to_pf(ptp_port);
445 dev = ice_pf_to_dev(pf);
446 params = &pf->hw.ptp.phy.e810;
447
448 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
449
450 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
451
452 if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
453 dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
454 __func__);
455
456 /* Read the low 32 bit value */
457 raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
458 /* Read the status together with high TS part */
459 reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
460
461 /* Wake up threads waiting on low latency interface */
462 params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
463
464 wake_up_locked(¶ms->atqbal_wq);
465
466 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
467
468 /* When the bit is cleared, the TS is ready in the register */
469 if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
470 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
471 return;
472 }
473
474 /* High 8 bit value of the TS is on the bits 16:23 */
475 raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
476
477 /* Devices using this interface always verify the timestamp differs
478 * relative to the last cached timestamp value.
479 */
480 if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
481 return;
482
483 tx->tstamps[idx].cached_tstamp = raw_tstamp;
484 clear_bit(idx, tx->in_use);
485 skb = tx->tstamps[idx].skb;
486 tx->tstamps[idx].skb = NULL;
487 if (test_and_clear_bit(idx, tx->stale))
488 drop_ts = true;
489
490 if (!skb)
491 return;
492
493 if (drop_ts) {
494 dev_kfree_skb_any(skb);
495 return;
496 }
497
498 /* Extend the timestamp using cached PHC time */
499 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
500 if (tstamp) {
501 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
502 ice_trace(tx_tstamp_complete, skb, idx);
503 }
504
505 skb_tstamp_tx(skb, &shhwtstamps);
506 dev_kfree_skb_any(skb);
507 }
508
509 /**
510 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
511 * @tx: the PTP Tx timestamp tracker
512 *
513 * Process timestamps captured by the PHY associated with this port. To do
514 * this, loop over each index with a waiting skb.
515 *
516 * If a given index has a valid timestamp, perform the following steps:
517 *
518 * 1) check that the timestamp request is not stale
519 * 2) check that a timestamp is ready and available in the PHY memory bank
520 * 3) read and copy the timestamp out of the PHY register
521 * 4) unlock the index by clearing the associated in_use bit
522 * 5) check if the timestamp is stale, and discard if so
523 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
524 * 7) send this 64 bit timestamp to the stack
525 *
526 * Note that we do not hold the tracking lock while reading the Tx timestamp.
527 * This is because reading the timestamp requires taking a mutex that might
528 * sleep.
529 *
530 * The only place where we set in_use is when a new timestamp is initiated
531 * with a slot index. This is only called in the hard xmit routine where an
532 * SKB has a request flag set. The only places where we clear this bit is this
533 * function, or during teardown when the Tx timestamp tracker is being
534 * removed. A timestamp index will never be re-used until the in_use bit for
535 * that index is cleared.
536 *
537 * If a Tx thread starts a new timestamp, we might not begin processing it
538 * right away but we will notice it at the end when we re-queue the task.
539 *
540 * If a Tx thread starts a new timestamp just after this function exits, the
541 * interrupt for that timestamp should re-trigger this function once
542 * a timestamp is ready.
543 *
544 * In cases where the PTP hardware clock was directly adjusted, some
545 * timestamps may not be able to safely use the timestamp extension math. In
546 * this case, software will set the stale bit for any outstanding Tx
547 * timestamps when the clock is adjusted. Then this function will discard
548 * those captured timestamps instead of sending them to the stack.
549 *
550 * If a Tx packet has been waiting for more than 2 seconds, it is not possible
551 * to correctly extend the timestamp using the cached PHC time. It is
552 * extremely unlikely that a packet will ever take this long to timestamp. If
553 * we detect a Tx timestamp request that has waited for this long we assume
554 * the packet will never be sent by hardware and discard it without reading
555 * the timestamp register.
556 */
ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx)557 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
558 {
559 struct ice_ptp_port *ptp_port;
560 unsigned long flags;
561 struct ice_pf *pf;
562 struct ice_hw *hw;
563 u64 tstamp_ready;
564 bool link_up;
565 int err;
566 u8 idx;
567
568 ptp_port = container_of(tx, struct ice_ptp_port, tx);
569 pf = ptp_port_to_pf(ptp_port);
570 hw = &pf->hw;
571
572 /* Read the Tx ready status first */
573 if (tx->has_ready_bitmap) {
574 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
575 if (err)
576 return;
577 }
578
579 /* Drop packets if the link went down */
580 link_up = ptp_port->link_up;
581
582 for_each_set_bit(idx, tx->in_use, tx->len) {
583 struct skb_shared_hwtstamps shhwtstamps = {};
584 u8 phy_idx = idx + tx->offset;
585 u64 raw_tstamp = 0, tstamp;
586 bool drop_ts = !link_up;
587 struct sk_buff *skb;
588
589 /* Drop packets which have waited for more than 2 seconds */
590 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
591 drop_ts = true;
592
593 /* Count the number of Tx timestamps that timed out */
594 pf->ptp.tx_hwtstamp_timeouts++;
595 }
596
597 /* Only read a timestamp from the PHY if its marked as ready
598 * by the tstamp_ready register. This avoids unnecessary
599 * reading of timestamps which are not yet valid. This is
600 * important as we must read all timestamps which are valid
601 * and only timestamps which are valid during each interrupt.
602 * If we do not, the hardware logic for generating a new
603 * interrupt can get stuck on some devices.
604 */
605 if (tx->has_ready_bitmap &&
606 !(tstamp_ready & BIT_ULL(phy_idx))) {
607 if (drop_ts)
608 goto skip_ts_read;
609
610 continue;
611 }
612
613 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
614
615 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
616 if (err && !drop_ts)
617 continue;
618
619 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
620
621 /* For PHYs which don't implement a proper timestamp ready
622 * bitmap, verify that the timestamp value is different
623 * from the last cached timestamp. If it is not, skip this for
624 * now assuming it hasn't yet been captured by hardware.
625 */
626 if (!drop_ts && !tx->has_ready_bitmap &&
627 raw_tstamp == tx->tstamps[idx].cached_tstamp)
628 continue;
629
630 /* Discard any timestamp value without the valid bit set */
631 if (!(raw_tstamp & ICE_PTP_TS_VALID))
632 drop_ts = true;
633
634 skip_ts_read:
635 spin_lock_irqsave(&tx->lock, flags);
636 if (!tx->has_ready_bitmap && raw_tstamp)
637 tx->tstamps[idx].cached_tstamp = raw_tstamp;
638 clear_bit(idx, tx->in_use);
639 skb = tx->tstamps[idx].skb;
640 tx->tstamps[idx].skb = NULL;
641 if (test_and_clear_bit(idx, tx->stale))
642 drop_ts = true;
643 spin_unlock_irqrestore(&tx->lock, flags);
644
645 /* It is unlikely but possible that the SKB will have been
646 * flushed at this point due to link change or teardown.
647 */
648 if (!skb)
649 continue;
650
651 if (drop_ts) {
652 dev_kfree_skb_any(skb);
653 continue;
654 }
655
656 /* Extend the timestamp using cached PHC time */
657 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
658 if (tstamp) {
659 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
660 ice_trace(tx_tstamp_complete, skb, idx);
661 }
662
663 skb_tstamp_tx(skb, &shhwtstamps);
664 dev_kfree_skb_any(skb);
665 }
666 }
667
668 /**
669 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
670 * @pf: Board private structure
671 */
ice_ptp_tx_tstamp_owner(struct ice_pf * pf)672 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
673 {
674 struct ice_ptp_port *port;
675 unsigned int i;
676
677 mutex_lock(&pf->adapter->ports.lock);
678 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
679 struct ice_ptp_tx *tx = &port->tx;
680
681 if (!tx || !tx->init)
682 continue;
683
684 ice_ptp_process_tx_tstamp(tx);
685 }
686 mutex_unlock(&pf->adapter->ports.lock);
687
688 for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
689 u64 tstamp_ready;
690 int err;
691
692 /* Read the Tx ready status first */
693 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
694 if (err)
695 break;
696 else if (tstamp_ready)
697 return ICE_TX_TSTAMP_WORK_PENDING;
698 }
699
700 return ICE_TX_TSTAMP_WORK_DONE;
701 }
702
703 /**
704 * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
705 * @tx: Tx tracking structure to initialize
706 *
707 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
708 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
709 */
ice_ptp_tx_tstamp(struct ice_ptp_tx * tx)710 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
711 {
712 bool more_timestamps;
713 unsigned long flags;
714
715 if (!tx->init)
716 return ICE_TX_TSTAMP_WORK_DONE;
717
718 /* Process the Tx timestamp tracker */
719 ice_ptp_process_tx_tstamp(tx);
720
721 /* Check if there are outstanding Tx timestamps */
722 spin_lock_irqsave(&tx->lock, flags);
723 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
724 spin_unlock_irqrestore(&tx->lock, flags);
725
726 if (more_timestamps)
727 return ICE_TX_TSTAMP_WORK_PENDING;
728
729 return ICE_TX_TSTAMP_WORK_DONE;
730 }
731
732 /**
733 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
734 * @tx: Tx tracking structure to initialize
735 *
736 * Assumes that the length has already been initialized. Do not call directly,
737 * use the ice_ptp_init_tx_* instead.
738 */
739 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)740 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
741 {
742 unsigned long *in_use, *stale;
743 struct ice_tx_tstamp *tstamps;
744
745 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
746 in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
747 stale = bitmap_zalloc(tx->len, GFP_KERNEL);
748
749 if (!tstamps || !in_use || !stale) {
750 kfree(tstamps);
751 bitmap_free(in_use);
752 bitmap_free(stale);
753
754 return -ENOMEM;
755 }
756
757 tx->tstamps = tstamps;
758 tx->in_use = in_use;
759 tx->stale = stale;
760 tx->init = 1;
761 tx->last_ll_ts_idx_read = -1;
762
763 spin_lock_init(&tx->lock);
764
765 return 0;
766 }
767
768 /**
769 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
770 * @pf: Board private structure
771 * @tx: the tracker to flush
772 *
773 * Called during teardown when a Tx tracker is being removed.
774 */
775 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)776 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
777 {
778 struct ice_hw *hw = &pf->hw;
779 unsigned long flags;
780 u64 tstamp_ready;
781 int err;
782 u8 idx;
783
784 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
785 if (err) {
786 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
787 tx->block, err);
788
789 /* If we fail to read the Tx timestamp ready bitmap just
790 * skip clearing the PHY timestamps.
791 */
792 tstamp_ready = 0;
793 }
794
795 for_each_set_bit(idx, tx->in_use, tx->len) {
796 u8 phy_idx = idx + tx->offset;
797 struct sk_buff *skb;
798
799 /* In case this timestamp is ready, we need to clear it. */
800 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
801 ice_clear_phy_tstamp(hw, tx->block, phy_idx);
802
803 spin_lock_irqsave(&tx->lock, flags);
804 skb = tx->tstamps[idx].skb;
805 tx->tstamps[idx].skb = NULL;
806 clear_bit(idx, tx->in_use);
807 clear_bit(idx, tx->stale);
808 spin_unlock_irqrestore(&tx->lock, flags);
809
810 /* Count the number of Tx timestamps flushed */
811 pf->ptp.tx_hwtstamp_flushed++;
812
813 /* Free the SKB after we've cleared the bit */
814 dev_kfree_skb_any(skb);
815 }
816 }
817
818 /**
819 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
820 * @tx: the tracker to mark
821 *
822 * Mark currently outstanding Tx timestamps as stale. This prevents sending
823 * their timestamp value to the stack. This is required to prevent extending
824 * the 40bit hardware timestamp incorrectly.
825 *
826 * This should be called when the PTP clock is modified such as after a set
827 * time request.
828 */
829 static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx)830 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
831 {
832 unsigned long flags;
833
834 spin_lock_irqsave(&tx->lock, flags);
835 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
836 spin_unlock_irqrestore(&tx->lock, flags);
837 }
838
839 /**
840 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
841 * @pf: Board private structure
842 *
843 * Called by the clock owner to flush all the Tx timestamp trackers associated
844 * with the clock.
845 */
846 static void
ice_ptp_flush_all_tx_tracker(struct ice_pf * pf)847 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
848 {
849 struct ice_ptp_port *port;
850
851 list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
852 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
853 }
854
855 /**
856 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
857 * @pf: Board private structure
858 * @tx: Tx tracking structure to release
859 *
860 * Free memory associated with the Tx timestamp tracker.
861 */
862 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)863 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
864 {
865 unsigned long flags;
866
867 spin_lock_irqsave(&tx->lock, flags);
868 tx->init = 0;
869 spin_unlock_irqrestore(&tx->lock, flags);
870
871 /* wait for potentially outstanding interrupt to complete */
872 synchronize_irq(pf->oicr_irq.virq);
873
874 ice_ptp_flush_tx_tracker(pf, tx);
875
876 kfree(tx->tstamps);
877 tx->tstamps = NULL;
878
879 bitmap_free(tx->in_use);
880 tx->in_use = NULL;
881
882 bitmap_free(tx->stale);
883 tx->stale = NULL;
884
885 tx->len = 0;
886 }
887
888 /**
889 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
890 * @pf: Board private structure
891 * @tx: the Tx tracking structure to initialize
892 * @port: the port this structure tracks
893 *
894 * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
895 * the timestamp block is shared for all ports in the same quad. To avoid
896 * ports using the same timestamp index, logically break the block of
897 * registers into chunks based on the port number.
898 *
899 * Return: 0 on success, -ENOMEM when out of memory
900 */
ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)901 static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
902 u8 port)
903 {
904 tx->block = ICE_GET_QUAD_NUM(port);
905 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
906 tx->len = INDEX_PER_PORT_E82X;
907 tx->has_ready_bitmap = 1;
908
909 return ice_ptp_alloc_tx_tracker(tx);
910 }
911
912 /**
913 * ice_ptp_init_tx - Initialize tracking for Tx timestamps
914 * @pf: Board private structure
915 * @tx: the Tx tracking structure to initialize
916 * @port: the port this structure tracks
917 *
918 * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
919 * each port has its own block of timestamps, independent of the other ports.
920 *
921 * Return: 0 on success, -ENOMEM when out of memory
922 */
ice_ptp_init_tx(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)923 static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
924 {
925 tx->block = port;
926 tx->offset = 0;
927 tx->len = INDEX_PER_PORT;
928
929 /* The E810 PHY does not provide a timestamp ready bitmap. Instead,
930 * verify new timestamps against cached copy of the last read
931 * timestamp.
932 */
933 tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
934
935 return ice_ptp_alloc_tx_tracker(tx);
936 }
937
938 /**
939 * ice_ptp_update_cached_phctime - Update the cached PHC time values
940 * @pf: Board specific private structure
941 *
942 * This function updates the system time values which are cached in the PF
943 * structure and the Rx rings.
944 *
945 * This function must be called periodically to ensure that the cached value
946 * is never more than 2 seconds old.
947 *
948 * Note that the cached copy in the PF PTP structure is always updated, even
949 * if we can't update the copy in the Rx rings.
950 *
951 * Return:
952 * * 0 - OK, successfully updated
953 * * -EAGAIN - PF was busy, need to reschedule the update
954 */
ice_ptp_update_cached_phctime(struct ice_pf * pf)955 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
956 {
957 struct device *dev = ice_pf_to_dev(pf);
958 unsigned long update_before;
959 u64 systime;
960 int i;
961
962 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
963 if (pf->ptp.cached_phc_time &&
964 time_is_before_jiffies(update_before)) {
965 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
966
967 dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
968 jiffies_to_msecs(time_taken));
969 pf->ptp.late_cached_phc_updates++;
970 }
971
972 /* Read the current PHC time */
973 systime = ice_ptp_read_src_clk_reg(pf, NULL);
974
975 /* Update the cached PHC time stored in the PF structure */
976 WRITE_ONCE(pf->ptp.cached_phc_time, systime);
977 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
978
979 if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
980 return -EAGAIN;
981
982 ice_for_each_vsi(pf, i) {
983 struct ice_vsi *vsi = pf->vsi[i];
984 int j;
985
986 if (!vsi)
987 continue;
988
989 if (vsi->type != ICE_VSI_PF)
990 continue;
991
992 ice_for_each_rxq(vsi, j) {
993 if (!vsi->rx_rings[j])
994 continue;
995 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
996 }
997 }
998 clear_bit(ICE_CFG_BUSY, pf->state);
999
1000 return 0;
1001 }
1002
1003 /**
1004 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1005 * @pf: Board specific private structure
1006 *
1007 * This function must be called when the cached PHC time is no longer valid,
1008 * such as after a time adjustment. It marks any currently outstanding Tx
1009 * timestamps as stale and updates the cached PHC time for both the PF and Rx
1010 * rings.
1011 *
1012 * If updating the PHC time cannot be done immediately, a warning message is
1013 * logged and the work item is scheduled immediately to minimize the window
1014 * with a wrong cached timestamp.
1015 */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)1016 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1017 {
1018 struct device *dev = ice_pf_to_dev(pf);
1019 int err;
1020
1021 /* Update the cached PHC time immediately if possible, otherwise
1022 * schedule the work item to execute soon.
1023 */
1024 err = ice_ptp_update_cached_phctime(pf);
1025 if (err) {
1026 /* If another thread is updating the Rx rings, we won't
1027 * properly reset them here. This could lead to reporting of
1028 * invalid timestamps, but there isn't much we can do.
1029 */
1030 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1031 __func__);
1032
1033 /* Queue the work item to update the Rx rings when possible */
1034 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1035 msecs_to_jiffies(10));
1036 }
1037
1038 /* Mark any outstanding timestamps as stale, since they might have
1039 * been captured in hardware before the time update. This could lead
1040 * to us extending them with the wrong cached value resulting in
1041 * incorrect timestamp values.
1042 */
1043 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1044 }
1045
1046 /**
1047 * ice_ptp_write_init - Set PHC time to provided value
1048 * @pf: Board private structure
1049 * @ts: timespec structure that holds the new time value
1050 *
1051 * Set the PHC time to the specified time provided in the timespec.
1052 */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1053 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1054 {
1055 u64 ns = timespec64_to_ns(ts);
1056 struct ice_hw *hw = &pf->hw;
1057
1058 return ice_ptp_init_time(hw, ns);
1059 }
1060
1061 /**
1062 * ice_ptp_write_adj - Adjust PHC clock time atomically
1063 * @pf: Board private structure
1064 * @adj: Adjustment in nanoseconds
1065 *
1066 * Perform an atomic adjustment of the PHC time by the specified number of
1067 * nanoseconds.
1068 */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1069 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1070 {
1071 struct ice_hw *hw = &pf->hw;
1072
1073 return ice_ptp_adj_clock(hw, adj);
1074 }
1075
1076 /**
1077 * ice_base_incval - Get base timer increment value
1078 * @pf: Board private structure
1079 *
1080 * Look up the base timer increment value for this device. The base increment
1081 * value is used to define the nominal clock tick rate. This increment value
1082 * is programmed during device initialization. It is also used as the basis
1083 * for calculating adjustments using scaled_ppm.
1084 */
ice_base_incval(struct ice_pf * pf)1085 static u64 ice_base_incval(struct ice_pf *pf)
1086 {
1087 struct ice_hw *hw = &pf->hw;
1088 u64 incval;
1089
1090 incval = ice_get_base_incval(hw);
1091
1092 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1093 incval);
1094
1095 return incval;
1096 }
1097
1098 /**
1099 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1100 * @port: PTP port for which Tx FIFO is checked
1101 */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1102 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1103 {
1104 int offs = port->port_num % ICE_PORTS_PER_QUAD;
1105 int quad = ICE_GET_QUAD_NUM(port->port_num);
1106 struct ice_pf *pf;
1107 struct ice_hw *hw;
1108 u32 val, phy_sts;
1109 int err;
1110
1111 pf = ptp_port_to_pf(port);
1112 hw = &pf->hw;
1113
1114 if (port->tx_fifo_busy_cnt == FIFO_OK)
1115 return 0;
1116
1117 /* need to read FIFO state */
1118 if (offs == 0 || offs == 1)
1119 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1120 &val);
1121 else
1122 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1123 &val);
1124
1125 if (err) {
1126 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1127 port->port_num, err);
1128 return err;
1129 }
1130
1131 if (offs & 0x1)
1132 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1133 else
1134 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1135
1136 if (phy_sts & FIFO_EMPTY) {
1137 port->tx_fifo_busy_cnt = FIFO_OK;
1138 return 0;
1139 }
1140
1141 port->tx_fifo_busy_cnt++;
1142
1143 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1144 port->tx_fifo_busy_cnt, port->port_num);
1145
1146 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1147 dev_dbg(ice_pf_to_dev(pf),
1148 "Port %d Tx FIFO still not empty; resetting quad %d\n",
1149 port->port_num, quad);
1150 ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1151 port->tx_fifo_busy_cnt = FIFO_OK;
1152 return 0;
1153 }
1154
1155 return -EAGAIN;
1156 }
1157
1158 /**
1159 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1160 * @work: Pointer to the kthread_work structure for this task
1161 *
1162 * Check whether hardware has completed measuring the Tx and Rx offset values
1163 * used to configure and enable vernier timestamp calibration.
1164 *
1165 * Once the offset in either direction is measured, configure the associated
1166 * registers with the calibrated offset values and enable timestamping. The Tx
1167 * and Rx directions are configured independently as soon as their associated
1168 * offsets are known.
1169 *
1170 * This function reschedules itself until both Tx and Rx calibration have
1171 * completed.
1172 */
ice_ptp_wait_for_offsets(struct kthread_work * work)1173 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1174 {
1175 struct ice_ptp_port *port;
1176 struct ice_pf *pf;
1177 struct ice_hw *hw;
1178 int tx_err;
1179 int rx_err;
1180
1181 port = container_of(work, struct ice_ptp_port, ov_work.work);
1182 pf = ptp_port_to_pf(port);
1183 hw = &pf->hw;
1184
1185 if (ice_is_reset_in_progress(pf->state)) {
1186 /* wait for device driver to complete reset */
1187 kthread_queue_delayed_work(pf->ptp.kworker,
1188 &port->ov_work,
1189 msecs_to_jiffies(100));
1190 return;
1191 }
1192
1193 tx_err = ice_ptp_check_tx_fifo(port);
1194 if (!tx_err)
1195 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1196 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1197 if (tx_err || rx_err) {
1198 /* Tx and/or Rx offset not yet configured, try again later */
1199 kthread_queue_delayed_work(pf->ptp.kworker,
1200 &port->ov_work,
1201 msecs_to_jiffies(100));
1202 return;
1203 }
1204 }
1205
1206 /**
1207 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1208 * @ptp_port: PTP port to stop
1209 */
1210 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1211 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1212 {
1213 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1214 u8 port = ptp_port->port_num;
1215 struct ice_hw *hw = &pf->hw;
1216 int err;
1217
1218 mutex_lock(&ptp_port->ps_lock);
1219
1220 switch (hw->mac_type) {
1221 case ICE_MAC_E810:
1222 case ICE_MAC_E830:
1223 err = 0;
1224 break;
1225 case ICE_MAC_GENERIC:
1226 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1227
1228 err = ice_stop_phy_timer_e82x(hw, port, true);
1229 break;
1230 case ICE_MAC_GENERIC_3K_E825:
1231 err = ice_stop_phy_timer_eth56g(hw, port, true);
1232 break;
1233 default:
1234 err = -ENODEV;
1235 }
1236 if (err && err != -EBUSY)
1237 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1238 port, err);
1239
1240 mutex_unlock(&ptp_port->ps_lock);
1241
1242 return err;
1243 }
1244
1245 /**
1246 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1247 * @ptp_port: PTP port for which the PHY start is set
1248 *
1249 * Start the PHY timestamping block, and initiate Vernier timestamping
1250 * calibration. If timestamping cannot be calibrated (such as if link is down)
1251 * then disable the timestamping block instead.
1252 */
1253 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1254 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1255 {
1256 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1257 u8 port = ptp_port->port_num;
1258 struct ice_hw *hw = &pf->hw;
1259 unsigned long flags;
1260 int err;
1261
1262 if (!ptp_port->link_up)
1263 return ice_ptp_port_phy_stop(ptp_port);
1264
1265 mutex_lock(&ptp_port->ps_lock);
1266
1267 switch (hw->mac_type) {
1268 case ICE_MAC_E810:
1269 case ICE_MAC_E830:
1270 err = 0;
1271 break;
1272 case ICE_MAC_GENERIC:
1273 /* Start the PHY timer in Vernier mode */
1274 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1275
1276 /* temporarily disable Tx timestamps while calibrating
1277 * PHY offset
1278 */
1279 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1280 ptp_port->tx.calibrating = true;
1281 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1282 ptp_port->tx_fifo_busy_cnt = 0;
1283
1284 /* Start the PHY timer in Vernier mode */
1285 err = ice_start_phy_timer_e82x(hw, port);
1286 if (err)
1287 break;
1288
1289 /* Enable Tx timestamps right away */
1290 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1291 ptp_port->tx.calibrating = false;
1292 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1293
1294 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1295 0);
1296 break;
1297 case ICE_MAC_GENERIC_3K_E825:
1298 err = ice_start_phy_timer_eth56g(hw, port);
1299 break;
1300 default:
1301 err = -ENODEV;
1302 }
1303
1304 if (err)
1305 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1306 port, err);
1307
1308 mutex_unlock(&ptp_port->ps_lock);
1309
1310 return err;
1311 }
1312
1313 /**
1314 * ice_ptp_link_change - Reconfigure PTP after link status change
1315 * @pf: Board private structure
1316 * @linkup: Link is up or down
1317 */
ice_ptp_link_change(struct ice_pf * pf,bool linkup)1318 void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
1319 {
1320 struct ice_ptp_port *ptp_port;
1321 struct ice_hw *hw = &pf->hw;
1322
1323 if (pf->ptp.state != ICE_PTP_READY)
1324 return;
1325
1326 ptp_port = &pf->ptp.port;
1327
1328 /* Update cached link status for this port immediately */
1329 ptp_port->link_up = linkup;
1330
1331 /* Skip HW writes if reset is in progress */
1332 if (pf->hw.reset_ongoing)
1333 return;
1334
1335 switch (hw->mac_type) {
1336 case ICE_MAC_E810:
1337 case ICE_MAC_E830:
1338 /* Do not reconfigure E810 or E830 PHY */
1339 return;
1340 case ICE_MAC_GENERIC:
1341 case ICE_MAC_GENERIC_3K_E825:
1342 ice_ptp_port_phy_restart(ptp_port);
1343 return;
1344 default:
1345 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1346 }
1347 }
1348
1349 /**
1350 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1351 * @pf: PF private structure
1352 * @ena: bool value to enable or disable interrupt
1353 * @threshold: Minimum number of packets at which intr is triggered
1354 *
1355 * Utility function to configure all the PHY interrupt settings, including
1356 * whether the PHY interrupt is enabled, and what threshold to use. Also
1357 * configures The E82X timestamp owner to react to interrupts from all PHYs.
1358 *
1359 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1360 * when failed to configure PHY interrupt for E82X
1361 */
ice_ptp_cfg_phy_interrupt(struct ice_pf * pf,bool ena,u32 threshold)1362 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1363 {
1364 struct device *dev = ice_pf_to_dev(pf);
1365 struct ice_hw *hw = &pf->hw;
1366
1367 ice_ptp_reset_ts_memory(hw);
1368
1369 switch (hw->mac_type) {
1370 case ICE_MAC_E810:
1371 case ICE_MAC_E830:
1372 return 0;
1373 case ICE_MAC_GENERIC: {
1374 int quad;
1375
1376 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1377 quad++) {
1378 int err;
1379
1380 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1381 if (err) {
1382 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1383 quad, err);
1384 return err;
1385 }
1386 }
1387
1388 return 0;
1389 }
1390 case ICE_MAC_GENERIC_3K_E825: {
1391 int port;
1392
1393 for (port = 0; port < hw->ptp.num_lports; port++) {
1394 int err;
1395
1396 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1397 if (err) {
1398 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1399 port, err);
1400 return err;
1401 }
1402 }
1403
1404 return 0;
1405 }
1406 case ICE_MAC_UNKNOWN:
1407 default:
1408 return -EOPNOTSUPP;
1409 }
1410 }
1411
1412 /**
1413 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1414 * @pf: Board private structure
1415 */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1416 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1417 {
1418 ice_ptp_port_phy_restart(&pf->ptp.port);
1419 }
1420
1421 /**
1422 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1423 * @pf: Board private structure
1424 */
ice_ptp_restart_all_phy(struct ice_pf * pf)1425 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1426 {
1427 struct list_head *entry;
1428
1429 list_for_each(entry, &pf->adapter->ports.ports) {
1430 struct ice_ptp_port *port = list_entry(entry,
1431 struct ice_ptp_port,
1432 list_node);
1433
1434 if (port->link_up)
1435 ice_ptp_port_phy_restart(port);
1436 }
1437 }
1438
1439 /**
1440 * ice_ptp_adjfine - Adjust clock increment rate
1441 * @info: the driver's PTP info structure
1442 * @scaled_ppm: Parts per million with 16-bit fractional field
1443 *
1444 * Adjust the frequency of the clock by the indicated scaled ppm from the
1445 * base frequency.
1446 */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1447 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1448 {
1449 struct ice_pf *pf = ptp_info_to_pf(info);
1450 struct ice_hw *hw = &pf->hw;
1451 u64 incval;
1452 int err;
1453
1454 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1455 err = ice_ptp_write_incval_locked(hw, incval);
1456 if (err) {
1457 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1458 err);
1459 return -EIO;
1460 }
1461
1462 return 0;
1463 }
1464
1465 /**
1466 * ice_ptp_extts_event - Process PTP external clock event
1467 * @pf: Board private structure
1468 */
ice_ptp_extts_event(struct ice_pf * pf)1469 void ice_ptp_extts_event(struct ice_pf *pf)
1470 {
1471 struct ptp_clock_event event;
1472 struct ice_hw *hw = &pf->hw;
1473 u8 chan, tmr_idx;
1474 u32 hi, lo;
1475
1476 /* Don't process timestamp events if PTP is not ready */
1477 if (pf->ptp.state != ICE_PTP_READY)
1478 return;
1479
1480 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1481 /* Event time is captured by one of the two matched registers
1482 * GLTSYN_EVNT_L: 32 LSB of sampled time event
1483 * GLTSYN_EVNT_H: 32 MSB of sampled time event
1484 * Event is defined in GLTSYN_EVNT_0 register
1485 */
1486 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1487 int pin_desc_idx;
1488
1489 /* Check if channel is enabled */
1490 if (!(pf->ptp.ext_ts_irq & (1 << chan)))
1491 continue;
1492
1493 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1494 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1495 event.timestamp = (u64)hi << 32 | lo;
1496
1497 /* Add delay compensation */
1498 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1499 if (pin_desc_idx >= 0) {
1500 const struct ice_ptp_pin_desc *desc;
1501
1502 desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
1503 event.timestamp -= desc->delay[0];
1504 }
1505
1506 event.type = PTP_CLOCK_EXTTS;
1507 event.index = chan;
1508 pf->ptp.ext_ts_irq &= ~(1 << chan);
1509 ptp_clock_event(pf->ptp.clock, &event);
1510 }
1511 }
1512
1513 /**
1514 * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1515 * @pf: Board private structure
1516 * @rq: External timestamp request
1517 * @on: Enable/disable flag
1518 *
1519 * Configure an external timestamp event on the requested channel.
1520 *
1521 * Return: 0 on success, negative error code otherwise
1522 */
ice_ptp_cfg_extts(struct ice_pf * pf,struct ptp_extts_request * rq,int on)1523 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
1524 int on)
1525 {
1526 u32 aux_reg, gpio_reg, irq_reg;
1527 struct ice_hw *hw = &pf->hw;
1528 unsigned int chan, gpio_pin;
1529 int pin_desc_idx;
1530 u8 tmr_idx;
1531
1532 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1533 chan = rq->index;
1534
1535 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1536 if (pin_desc_idx < 0)
1537 return -EIO;
1538
1539 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
1540 irq_reg = rd32(hw, PFINT_OICR_ENA);
1541
1542 if (on) {
1543 /* Enable the interrupt */
1544 irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1545 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1546
1547 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
1548 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
1549
1550 /* set event level to requested edge */
1551 if (rq->flags & PTP_FALLING_EDGE)
1552 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1553 if (rq->flags & PTP_RISING_EDGE)
1554 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1555
1556 /* Write GPIO CTL reg.
1557 * 0x1 is input sampled by EVENT register(channel)
1558 * + num_in_channels * tmr_idx
1559 */
1560 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1561 1 + chan + (tmr_idx * 3));
1562 } else {
1563 bool last_enabled = true;
1564
1565 /* clear the values we set to reset defaults */
1566 aux_reg = 0;
1567 gpio_reg = 0;
1568
1569 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
1570 if ((pf->ptp.extts_rqs[i].flags &
1571 PTP_ENABLE_FEATURE) &&
1572 i != chan) {
1573 last_enabled = false;
1574 }
1575
1576 if (last_enabled)
1577 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1578 }
1579
1580 wr32(hw, PFINT_OICR_ENA, irq_reg);
1581 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1582 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1583
1584 return 0;
1585 }
1586
1587 /**
1588 * ice_ptp_disable_all_extts - Disable all EXTTS channels
1589 * @pf: Board private structure
1590 */
ice_ptp_disable_all_extts(struct ice_pf * pf)1591 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1592 {
1593 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1594 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1595 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1596 false);
1597
1598 synchronize_irq(pf->oicr_irq.virq);
1599 }
1600
1601 /**
1602 * ice_ptp_enable_all_extts - Enable all EXTTS channels
1603 * @pf: Board private structure
1604 *
1605 * Called during reset to restore user configuration.
1606 */
ice_ptp_enable_all_extts(struct ice_pf * pf)1607 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1608 {
1609 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1610 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1611 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1612 true);
1613 }
1614
1615 /**
1616 * ice_ptp_write_perout - Write periodic wave parameters to HW
1617 * @hw: pointer to the HW struct
1618 * @chan: target channel
1619 * @gpio_pin: target GPIO pin
1620 * @start: target time to start periodic output
1621 * @period: target period
1622 *
1623 * Return: 0 on success, negative error code otherwise
1624 */
ice_ptp_write_perout(struct ice_hw * hw,unsigned int chan,unsigned int gpio_pin,u64 start,u64 period)1625 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
1626 unsigned int gpio_pin, u64 start, u64 period)
1627 {
1628
1629 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1630 u32 val = 0;
1631
1632 /* 0. Reset mode & out_en in AUX_OUT */
1633 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1634
1635 if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
1636 int err;
1637
1638 /* Enable/disable CGU 1PPS output for E825C */
1639 err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
1640 if (err)
1641 return err;
1642 }
1643
1644 /* 1. Write perout with half of required period value.
1645 * HW toggles output when source clock hits the TGT and then adds
1646 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle.
1647 */
1648 period >>= 1;
1649
1650 /* For proper operation, GLTSYN_CLKO must be larger than clock tick and
1651 * period has to fit in 32 bit register.
1652 */
1653 #define MIN_PULSE 3
1654 if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
1655 dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
1656 MIN_PULSE);
1657 return -EIO;
1658 }
1659
1660 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1661
1662 /* 2. Write TARGET time */
1663 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
1664 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
1665
1666 /* 3. Write AUX_OUT register */
1667 if (!!period)
1668 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1669 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1670
1671 /* 4. write GPIO CTL reg */
1672 val = GLGEN_GPIO_CTL_PIN_DIR_M;
1673 if (!!period)
1674 val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1675 8 + chan + (tmr_idx * 4));
1676
1677 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1678 ice_flush(hw);
1679
1680 return 0;
1681 }
1682
1683 /**
1684 * ice_ptp_cfg_perout - Configure clock to generate periodic wave
1685 * @pf: Board private structure
1686 * @rq: Periodic output request
1687 * @on: Enable/disable flag
1688 *
1689 * Configure the internal clock generator modules to generate the clock wave of
1690 * specified period.
1691 *
1692 * Return: 0 on success, negative error code otherwise
1693 */
ice_ptp_cfg_perout(struct ice_pf * pf,struct ptp_perout_request * rq,int on)1694 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
1695 int on)
1696 {
1697 unsigned int gpio_pin, prop_delay_ns;
1698 u64 clk, period, start, phase;
1699 struct ice_hw *hw = &pf->hw;
1700 int pin_desc_idx;
1701
1702 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
1703 if (pin_desc_idx < 0)
1704 return -EIO;
1705
1706 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
1707 prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
1708 period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
1709
1710 /* If we're disabling the output or period is 0, clear out CLKO and TGT
1711 * and keep output level low.
1712 */
1713 if (!on || !period)
1714 return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
1715
1716 if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
1717 period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
1718 dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
1719 return -EOPNOTSUPP;
1720 }
1721
1722 if (period & 0x1) {
1723 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1724 return -EIO;
1725 }
1726
1727 start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
1728
1729 /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */
1730 if (rq->flags & PTP_PEROUT_PHASE)
1731 phase = start;
1732 else
1733 div64_u64_rem(start, period, &phase);
1734
1735 /* If we have only phase or start time is in the past, start the timer
1736 * at the next multiple of period, maintaining phase at least 0.5 second
1737 * from now, so we have time to write it to HW.
1738 */
1739 clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
1740 if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
1741 start = div64_u64(clk + period - 1, period) * period + phase;
1742
1743 /* Compensate for propagation delay from the generator to the pin. */
1744 start -= prop_delay_ns;
1745
1746 return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
1747 }
1748
1749 /**
1750 * ice_ptp_disable_all_perout - Disable all currently configured outputs
1751 * @pf: Board private structure
1752 *
1753 * Disable all currently configured clock outputs. This is necessary before
1754 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to
1755 * re-enable the clocks again.
1756 */
ice_ptp_disable_all_perout(struct ice_pf * pf)1757 static void ice_ptp_disable_all_perout(struct ice_pf *pf)
1758 {
1759 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1760 if (pf->ptp.perout_rqs[i].period.sec ||
1761 pf->ptp.perout_rqs[i].period.nsec)
1762 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1763 false);
1764 }
1765
1766 /**
1767 * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs
1768 * @pf: Board private structure
1769 *
1770 * Enable all currently configured clock outputs. Use this after
1771 * ice_ptp_disable_all_perout to reconfigure the output signals according to
1772 * their configuration.
1773 */
ice_ptp_enable_all_perout(struct ice_pf * pf)1774 static void ice_ptp_enable_all_perout(struct ice_pf *pf)
1775 {
1776 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1777 if (pf->ptp.perout_rqs[i].period.sec ||
1778 pf->ptp.perout_rqs[i].period.nsec)
1779 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1780 true);
1781 }
1782
1783 /**
1784 * ice_verify_pin - verify if pin supports requested pin function
1785 * @info: the driver's PTP info structure
1786 * @pin: Pin index
1787 * @func: Assigned function
1788 * @chan: Assigned channel
1789 *
1790 * Return: 0 on success, -EOPNOTSUPP when function is not supported.
1791 */
ice_verify_pin(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)1792 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
1793 enum ptp_pin_function func, unsigned int chan)
1794 {
1795 struct ice_pf *pf = ptp_info_to_pf(info);
1796 const struct ice_ptp_pin_desc *pin_desc;
1797
1798 pin_desc = &pf->ptp.ice_pin_desc[pin];
1799
1800 /* Is assigned function allowed? */
1801 switch (func) {
1802 case PTP_PF_EXTTS:
1803 if (pin_desc->gpio[0] < 0)
1804 return -EOPNOTSUPP;
1805 break;
1806 case PTP_PF_PEROUT:
1807 if (pin_desc->gpio[1] < 0)
1808 return -EOPNOTSUPP;
1809 break;
1810 case PTP_PF_NONE:
1811 break;
1812 case PTP_PF_PHYSYNC:
1813 default:
1814 return -EOPNOTSUPP;
1815 }
1816
1817 return 0;
1818 }
1819
1820 /**
1821 * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC
1822 * @info: The driver's PTP info structure
1823 * @rq: The requested feature to change
1824 * @on: Enable/disable flag
1825 *
1826 * Return: 0 on success, negative error code otherwise
1827 */
ice_ptp_gpio_enable(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1828 static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
1829 struct ptp_clock_request *rq, int on)
1830 {
1831 struct ice_pf *pf = ptp_info_to_pf(info);
1832 int err;
1833
1834 switch (rq->type) {
1835 case PTP_CLK_REQ_PEROUT:
1836 {
1837 struct ptp_perout_request *cached =
1838 &pf->ptp.perout_rqs[rq->perout.index];
1839
1840 err = ice_ptp_cfg_perout(pf, &rq->perout, on);
1841 if (!err) {
1842 *cached = rq->perout;
1843 } else {
1844 cached->period.sec = 0;
1845 cached->period.nsec = 0;
1846 }
1847 return err;
1848 }
1849 case PTP_CLK_REQ_EXTTS:
1850 {
1851 struct ptp_extts_request *cached =
1852 &pf->ptp.extts_rqs[rq->extts.index];
1853
1854 err = ice_ptp_cfg_extts(pf, &rq->extts, on);
1855 if (!err)
1856 *cached = rq->extts;
1857 else
1858 cached->flags &= ~PTP_ENABLE_FEATURE;
1859 return err;
1860 }
1861 default:
1862 return -EOPNOTSUPP;
1863 }
1864 }
1865
1866 /**
1867 * ice_ptp_gettimex64 - Get the time of the clock
1868 * @info: the driver's PTP info structure
1869 * @ts: timespec64 structure to hold the current time value
1870 * @sts: Optional parameter for holding a pair of system timestamps from
1871 * the system clock. Will be ignored if NULL is given.
1872 *
1873 * Read the device clock and return the correct value on ns, after converting it
1874 * into a timespec struct.
1875 */
1876 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)1877 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1878 struct ptp_system_timestamp *sts)
1879 {
1880 struct ice_pf *pf = ptp_info_to_pf(info);
1881 u64 time_ns;
1882
1883 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
1884 *ts = ns_to_timespec64(time_ns);
1885 return 0;
1886 }
1887
1888 /**
1889 * ice_ptp_settime64 - Set the time of the clock
1890 * @info: the driver's PTP info structure
1891 * @ts: timespec64 structure that holds the new time value
1892 *
1893 * Set the device clock to the user input value. The conversion from timespec
1894 * to ns happens in the write function.
1895 */
1896 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)1897 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1898 {
1899 struct ice_pf *pf = ptp_info_to_pf(info);
1900 struct timespec64 ts64 = *ts;
1901 struct ice_hw *hw = &pf->hw;
1902 int err;
1903
1904 /* For Vernier mode on E82X, we need to recalibrate after new settime.
1905 * Start with marking timestamps as invalid.
1906 */
1907 if (hw->mac_type == ICE_MAC_GENERIC) {
1908 err = ice_ptp_clear_phy_offset_ready_e82x(hw);
1909 if (err)
1910 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
1911 }
1912
1913 if (!ice_ptp_lock(hw)) {
1914 err = -EBUSY;
1915 goto exit;
1916 }
1917
1918 /* Disable periodic outputs */
1919 ice_ptp_disable_all_perout(pf);
1920
1921 err = ice_ptp_write_init(pf, &ts64);
1922 ice_ptp_unlock(hw);
1923
1924 if (!err)
1925 ice_ptp_reset_cached_phctime(pf);
1926
1927 /* Reenable periodic outputs */
1928 ice_ptp_enable_all_perout(pf);
1929
1930 /* Recalibrate and re-enable timestamp blocks for E822/E823 */
1931 if (hw->mac_type == ICE_MAC_GENERIC)
1932 ice_ptp_restart_all_phy(pf);
1933 exit:
1934 if (err) {
1935 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
1936 return err;
1937 }
1938
1939 return 0;
1940 }
1941
1942 /**
1943 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
1944 * @info: the driver's PTP info structure
1945 * @delta: Offset in nanoseconds to adjust the time by
1946 */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)1947 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1948 {
1949 struct timespec64 now, then;
1950 int ret;
1951
1952 then = ns_to_timespec64(delta);
1953 ret = ice_ptp_gettimex64(info, &now, NULL);
1954 if (ret)
1955 return ret;
1956 now = timespec64_add(now, then);
1957
1958 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1959 }
1960
1961 /**
1962 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
1963 * @info: the driver's PTP info structure
1964 * @delta: Offset in nanoseconds to adjust the time by
1965 */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)1966 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
1967 {
1968 struct ice_pf *pf = ptp_info_to_pf(info);
1969 struct ice_hw *hw = &pf->hw;
1970 struct device *dev;
1971 int err;
1972
1973 dev = ice_pf_to_dev(pf);
1974
1975 /* Hardware only supports atomic adjustments using signed 32-bit
1976 * integers. For any adjustment outside this range, perform
1977 * a non-atomic get->adjust->set flow.
1978 */
1979 if (delta > S32_MAX || delta < S32_MIN) {
1980 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
1981 return ice_ptp_adjtime_nonatomic(info, delta);
1982 }
1983
1984 if (!ice_ptp_lock(hw)) {
1985 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
1986 return -EBUSY;
1987 }
1988
1989 /* Disable periodic outputs */
1990 ice_ptp_disable_all_perout(pf);
1991
1992 err = ice_ptp_write_adj(pf, delta);
1993
1994 /* Reenable periodic outputs */
1995 ice_ptp_enable_all_perout(pf);
1996
1997 ice_ptp_unlock(hw);
1998
1999 if (err) {
2000 dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2001 return err;
2002 }
2003
2004 ice_ptp_reset_cached_phctime(pf);
2005
2006 return 0;
2007 }
2008
2009 /**
2010 * struct ice_crosststamp_cfg - Device cross timestamp configuration
2011 * @lock_reg: The hardware semaphore lock to use
2012 * @lock_busy: Bit in the semaphore lock indicating the lock is busy
2013 * @ctl_reg: The hardware register to request cross timestamp
2014 * @ctl_active: Bit in the control register to request cross timestamp
2015 * @art_time_l: Lower 32-bits of ART system time
2016 * @art_time_h: Upper 32-bits of ART system time
2017 * @dev_time_l: Lower 32-bits of device time (per timer index)
2018 * @dev_time_h: Upper 32-bits of device time (per timer index)
2019 */
2020 struct ice_crosststamp_cfg {
2021 /* HW semaphore lock register */
2022 u32 lock_reg;
2023 u32 lock_busy;
2024
2025 /* Capture control register */
2026 u32 ctl_reg;
2027 u32 ctl_active;
2028
2029 /* Time storage */
2030 u32 art_time_l;
2031 u32 art_time_h;
2032 u32 dev_time_l[2];
2033 u32 dev_time_h[2];
2034 };
2035
2036 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
2037 .lock_reg = PFHH_SEM,
2038 .lock_busy = PFHH_SEM_BUSY_M,
2039 .ctl_reg = GLHH_ART_CTL,
2040 .ctl_active = GLHH_ART_CTL_ACTIVE_M,
2041 .art_time_l = GLHH_ART_TIME_L,
2042 .art_time_h = GLHH_ART_TIME_H,
2043 .dev_time_l[0] = GLTSYN_HHTIME_L(0),
2044 .dev_time_h[0] = GLTSYN_HHTIME_H(0),
2045 .dev_time_l[1] = GLTSYN_HHTIME_L(1),
2046 .dev_time_h[1] = GLTSYN_HHTIME_H(1),
2047 };
2048
2049 #ifdef CONFIG_ICE_HWTS
2050 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
2051 .lock_reg = E830_PFPTM_SEM,
2052 .lock_busy = E830_PFPTM_SEM_BUSY_M,
2053 .ctl_reg = E830_GLPTM_ART_CTL,
2054 .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
2055 .art_time_l = E830_GLPTM_ART_TIME_L,
2056 .art_time_h = E830_GLPTM_ART_TIME_H,
2057 .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
2058 .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
2059 .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
2060 .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
2061 };
2062
2063 #endif /* CONFIG_ICE_HWTS */
2064 /**
2065 * struct ice_crosststamp_ctx - Device cross timestamp context
2066 * @snapshot: snapshot of system clocks for historic interpolation
2067 * @pf: pointer to the PF private structure
2068 * @cfg: pointer to hardware configuration for cross timestamp
2069 */
2070 struct ice_crosststamp_ctx {
2071 struct system_time_snapshot snapshot;
2072 struct ice_pf *pf;
2073 const struct ice_crosststamp_cfg *cfg;
2074 };
2075
2076 /**
2077 * ice_capture_crosststamp - Capture a device/system cross timestamp
2078 * @device: Current device time
2079 * @system: System counter value read synchronously with device time
2080 * @__ctx: Context passed from ice_ptp_getcrosststamp
2081 *
2082 * Read device and system (ART) clock simultaneously and return the corrected
2083 * clock values in ns.
2084 *
2085 * Return: zero on success, or a negative error code on failure.
2086 */
ice_capture_crosststamp(ktime_t * device,struct system_counterval_t * system,void * __ctx)2087 static int ice_capture_crosststamp(ktime_t *device,
2088 struct system_counterval_t *system,
2089 void *__ctx)
2090 {
2091 struct ice_crosststamp_ctx *ctx = __ctx;
2092 const struct ice_crosststamp_cfg *cfg;
2093 u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
2094 struct ice_pf *pf;
2095 struct ice_hw *hw;
2096 int err;
2097 u64 ts;
2098
2099 cfg = ctx->cfg;
2100 pf = ctx->pf;
2101 hw = &pf->hw;
2102
2103 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2104 if (tmr_idx > 1)
2105 return -EINVAL;
2106
2107 /* Poll until we obtain the cross-timestamp hardware semaphore */
2108 err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
2109 !(lock & cfg->lock_busy),
2110 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
2111 if (err) {
2112 dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
2113 return -EBUSY;
2114 }
2115
2116 /* Snapshot system time for historic interpolation */
2117 ktime_get_snapshot(&ctx->snapshot);
2118
2119 /* Program cmd to master timer */
2120 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2121
2122 /* Start the ART and device clock sync sequence */
2123 ctl = rd32(hw, cfg->ctl_reg);
2124 ctl |= cfg->ctl_active;
2125 wr32(hw, cfg->ctl_reg, ctl);
2126
2127 /* Poll until hardware completes the capture */
2128 err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
2129 5, 20 * USEC_PER_MSEC);
2130 if (err)
2131 goto err_timeout;
2132
2133 /* Read ART system time */
2134 ts_lo = rd32(hw, cfg->art_time_l);
2135 ts_hi = rd32(hw, cfg->art_time_h);
2136 ts = ((u64)ts_hi << 32) | ts_lo;
2137 system->cycles = ts;
2138 system->cs_id = CSID_X86_ART;
2139 system->use_nsecs = true;
2140
2141 /* Read Device source clock time */
2142 ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
2143 ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
2144 ts = ((u64)ts_hi << 32) | ts_lo;
2145 *device = ns_to_ktime(ts);
2146
2147 err_timeout:
2148 /* Clear the master timer */
2149 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2150
2151 /* Release HW lock */
2152 lock = rd32(hw, cfg->lock_reg);
2153 lock &= ~cfg->lock_busy;
2154 wr32(hw, cfg->lock_reg, lock);
2155
2156 return err;
2157 }
2158
2159 /**
2160 * ice_ptp_getcrosststamp - Capture a device cross timestamp
2161 * @info: the driver's PTP info structure
2162 * @cts: The memory to fill the cross timestamp info
2163 *
2164 * Capture a cross timestamp between the ART and the device PTP hardware
2165 * clock. Fill the cross timestamp information and report it back to the
2166 * caller.
2167 *
2168 * In order to correctly correlate the ART timestamp back to the TSC time, the
2169 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2170 *
2171 * Return: zero on success, or a negative error code on failure.
2172 */
ice_ptp_getcrosststamp(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2173 static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
2174 struct system_device_crosststamp *cts)
2175 {
2176 struct ice_pf *pf = ptp_info_to_pf(info);
2177 struct ice_crosststamp_ctx ctx = {
2178 .pf = pf,
2179 };
2180
2181 switch (pf->hw.mac_type) {
2182 case ICE_MAC_GENERIC:
2183 case ICE_MAC_GENERIC_3K_E825:
2184 ctx.cfg = &ice_crosststamp_cfg_e82x;
2185 break;
2186 #ifdef CONFIG_ICE_HWTS
2187 case ICE_MAC_E830:
2188 ctx.cfg = &ice_crosststamp_cfg_e830;
2189 break;
2190 #endif /* CONFIG_ICE_HWTS */
2191 default:
2192 return -EOPNOTSUPP;
2193 }
2194
2195 return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
2196 &ctx.snapshot, cts);
2197 }
2198
2199 /**
2200 * ice_ptp_hwtstamp_get - interface to read the timestamping config
2201 * @netdev: Pointer to network interface device structure
2202 * @config: Timestamping configuration structure
2203 *
2204 * Copy the timestamping config to user buffer
2205 */
ice_ptp_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)2206 int ice_ptp_hwtstamp_get(struct net_device *netdev,
2207 struct kernel_hwtstamp_config *config)
2208 {
2209 struct ice_netdev_priv *np = netdev_priv(netdev);
2210 struct ice_pf *pf = np->vsi->back;
2211
2212 if (pf->ptp.state != ICE_PTP_READY)
2213 return -EIO;
2214
2215 *config = pf->ptp.tstamp_config;
2216
2217 return 0;
2218 }
2219
2220 /**
2221 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2222 * @pf: Board private structure
2223 * @config: hwtstamp settings requested or saved
2224 */
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct kernel_hwtstamp_config * config)2225 static int ice_ptp_set_timestamp_mode(struct ice_pf *pf,
2226 struct kernel_hwtstamp_config *config)
2227 {
2228 switch (config->tx_type) {
2229 case HWTSTAMP_TX_OFF:
2230 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2231 break;
2232 case HWTSTAMP_TX_ON:
2233 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2234 break;
2235 default:
2236 return -ERANGE;
2237 }
2238
2239 switch (config->rx_filter) {
2240 case HWTSTAMP_FILTER_NONE:
2241 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2242 break;
2243 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2244 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2245 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2246 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2247 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2248 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2249 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2250 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2251 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2252 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2253 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2254 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2255 case HWTSTAMP_FILTER_NTP_ALL:
2256 case HWTSTAMP_FILTER_ALL:
2257 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2258 break;
2259 default:
2260 return -ERANGE;
2261 }
2262
2263 /* Immediately update the device timestamping mode */
2264 ice_ptp_restore_timestamp_mode(pf);
2265
2266 return 0;
2267 }
2268
2269 /**
2270 * ice_ptp_hwtstamp_set - interface to control the timestamping
2271 * @netdev: Pointer to network interface device structure
2272 * @config: Timestamping configuration structure
2273 * @extack: Netlink extended ack structure for error reporting
2274 *
2275 * Get the user config and store it
2276 */
ice_ptp_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)2277 int ice_ptp_hwtstamp_set(struct net_device *netdev,
2278 struct kernel_hwtstamp_config *config,
2279 struct netlink_ext_ack *extack)
2280 {
2281 struct ice_netdev_priv *np = netdev_priv(netdev);
2282 struct ice_pf *pf = np->vsi->back;
2283 int err;
2284
2285 if (pf->ptp.state != ICE_PTP_READY)
2286 return -EAGAIN;
2287
2288 err = ice_ptp_set_timestamp_mode(pf, config);
2289 if (err)
2290 return err;
2291
2292 /* Return the actual configuration set */
2293 *config = pf->ptp.tstamp_config;
2294
2295 return 0;
2296 }
2297
2298 /**
2299 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2300 * @rx_desc: Receive descriptor
2301 * @pkt_ctx: Packet context to get the cached time
2302 *
2303 * The driver receives a notification in the receive descriptor with timestamp.
2304 */
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc * rx_desc,const struct ice_pkt_ctx * pkt_ctx)2305 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2306 const struct ice_pkt_ctx *pkt_ctx)
2307 {
2308 u64 ts_ns, cached_time;
2309 u32 ts_high;
2310
2311 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2312 return 0;
2313
2314 cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2315
2316 /* Do not report a timestamp if we don't have a cached PHC time */
2317 if (!cached_time)
2318 return 0;
2319
2320 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2321 * PHC value, rather than accessing the PF. This also allows us to
2322 * simply pass the upper 32bits of nanoseconds directly. Calling
2323 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2324 * bits itself.
2325 */
2326 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2327 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2328
2329 return ts_ns;
2330 }
2331
2332 /**
2333 * ice_ptp_setup_pin_cfg - setup PTP pin_config structure
2334 * @pf: Board private structure
2335 */
ice_ptp_setup_pin_cfg(struct ice_pf * pf)2336 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
2337 {
2338 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
2339 const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
2340 struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
2341 const char *name;
2342
2343 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2344 name = ice_pin_names[desc->name_idx];
2345 else
2346 name = ice_pin_names_dpll[desc->name_idx];
2347
2348 strscpy(pin->name, name, sizeof(pin->name));
2349
2350 pin->index = i;
2351 }
2352
2353 pf->ptp.info.pin_config = pf->ptp.pin_desc;
2354 }
2355
2356 /**
2357 * ice_ptp_disable_pins - Disable PTP pins
2358 * @pf: pointer to the PF structure
2359 *
2360 * Disable the OS access to the pins. Called to clear out the OS
2361 * indications of pin support when we fail to setup pin array.
2362 */
ice_ptp_disable_pins(struct ice_pf * pf)2363 static void ice_ptp_disable_pins(struct ice_pf *pf)
2364 {
2365 struct ptp_clock_info *info = &pf->ptp.info;
2366
2367 dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
2368
2369 info->enable = NULL;
2370 info->verify = NULL;
2371 info->n_pins = 0;
2372 info->n_ext_ts = 0;
2373 info->n_per_out = 0;
2374 }
2375
2376 /**
2377 * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM
2378 * @pf: pointer to the PF structure
2379 * @entries: SDP connection section from NVM
2380 * @num_entries: number of valid entries in sdp_entries
2381 * @pins: PTP pins array to update
2382 *
2383 * Return: 0 on success, negative error code otherwise.
2384 */
ice_ptp_parse_sdp_entries(struct ice_pf * pf,__le16 * entries,unsigned int num_entries,struct ice_ptp_pin_desc * pins)2385 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
2386 unsigned int num_entries,
2387 struct ice_ptp_pin_desc *pins)
2388 {
2389 unsigned int n_pins = 0;
2390 unsigned int i;
2391
2392 /* Setup ice_pin_desc array */
2393 for (i = 0; i < ICE_N_PINS_MAX; i++) {
2394 pins[i].name_idx = -1;
2395 pins[i].gpio[0] = -1;
2396 pins[i].gpio[1] = -1;
2397 }
2398
2399 for (i = 0; i < num_entries; i++) {
2400 u16 entry = le16_to_cpu(entries[i]);
2401 DECLARE_BITMAP(bitmap, GPIO_NA);
2402 unsigned int idx;
2403 bool dir;
2404 u16 gpio;
2405
2406 *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
2407
2408 /* Check if entry's pin bitmap is valid. */
2409 if (bitmap_empty(bitmap, GPIO_NA))
2410 continue;
2411
2412 dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
2413 gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
2414
2415 for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
2416 if (pins[idx].name_idx == gpio)
2417 break;
2418 }
2419
2420 if (idx == ICE_N_PINS_MAX) {
2421 /* Pin not found, setup its entry and name */
2422 idx = n_pins++;
2423 pins[idx].name_idx = gpio;
2424 }
2425 pins[idx].gpio[dir] = gpio;
2426 }
2427
2428 for (i = 0; i < n_pins; i++) {
2429 dev_dbg(ice_pf_to_dev(pf),
2430 "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
2431 i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
2432 }
2433
2434 pf->ptp.info.n_pins = n_pins;
2435 return 0;
2436 }
2437
2438 /**
2439 * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support
2440 * @pf: Board private structure
2441 *
2442 * Assign functions to the PTP capabilities structure for E82X devices.
2443 * Functions which operate across all device families should be set directly
2444 * in ice_ptp_set_caps. Only add functions here which are distinct for E82X
2445 * devices.
2446 */
ice_ptp_set_funcs_e82x(struct ice_pf * pf)2447 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
2448 {
2449 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2450
2451 if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
2452 pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
2453 pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
2454 } else {
2455 pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
2456 pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
2457 }
2458 ice_ptp_setup_pin_cfg(pf);
2459 }
2460
2461 /**
2462 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2463 * @pf: Board private structure
2464 *
2465 * Assign functions to the PTP capabiltiies structure for E810 devices.
2466 * Functions which operate across all device families should be set directly
2467 * in ice_ptp_set_caps. Only add functions here which are distinct for E810
2468 * devices.
2469 */
ice_ptp_set_funcs_e810(struct ice_pf * pf)2470 static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
2471 {
2472 __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
2473 struct ice_ptp_pin_desc *desc = NULL;
2474 struct ice_ptp *ptp = &pf->ptp;
2475 unsigned int num_entries;
2476 int err;
2477
2478 err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
2479 if (err) {
2480 /* SDP section does not exist in NVM or is corrupted */
2481 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2482 ptp->ice_pin_desc = ice_pin_desc_dpll;
2483 ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
2484 } else {
2485 pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2486 pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
2487 }
2488 err = 0;
2489 } else {
2490 desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
2491 sizeof(struct ice_ptp_pin_desc),
2492 GFP_KERNEL);
2493 if (!desc)
2494 goto err;
2495
2496 err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
2497 if (err)
2498 goto err;
2499
2500 ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
2501 }
2502
2503 ptp->info.pin_config = ptp->pin_desc;
2504 ice_ptp_setup_pin_cfg(pf);
2505
2506 err:
2507 if (err) {
2508 devm_kfree(ice_pf_to_dev(pf), desc);
2509 ice_ptp_disable_pins(pf);
2510 }
2511 }
2512
2513 /**
2514 * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
2515 * @pf: Board private structure
2516 *
2517 * Assign functions to the PTP capabiltiies structure for E830 devices.
2518 * Functions which operate across all device families should be set directly
2519 * in ice_ptp_set_caps. Only add functions here which are distinct for E830
2520 * devices.
2521 */
ice_ptp_set_funcs_e830(struct ice_pf * pf)2522 static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
2523 {
2524 #ifdef CONFIG_ICE_HWTS
2525 if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
2526 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2527
2528 #endif /* CONFIG_ICE_HWTS */
2529 /* Rest of the config is the same as base E810 */
2530 pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2531 pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
2532 ice_ptp_setup_pin_cfg(pf);
2533 }
2534
2535 /**
2536 * ice_ptp_set_caps - Set PTP capabilities
2537 * @pf: Board private structure
2538 */
ice_ptp_set_caps(struct ice_pf * pf)2539 static void ice_ptp_set_caps(struct ice_pf *pf)
2540 {
2541 struct ptp_clock_info *info = &pf->ptp.info;
2542 struct device *dev = ice_pf_to_dev(pf);
2543
2544 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2545 dev_driver_string(dev), dev_name(dev));
2546 info->owner = THIS_MODULE;
2547 info->max_adj = 100000000;
2548 info->adjtime = ice_ptp_adjtime;
2549 info->adjfine = ice_ptp_adjfine;
2550 info->gettimex64 = ice_ptp_gettimex64;
2551 info->settime64 = ice_ptp_settime64;
2552 info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
2553 info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
2554 info->enable = ice_ptp_gpio_enable;
2555 info->verify = ice_verify_pin;
2556
2557 info->supported_extts_flags = PTP_RISING_EDGE |
2558 PTP_FALLING_EDGE |
2559 PTP_STRICT_FLAGS;
2560 info->supported_perout_flags = PTP_PEROUT_PHASE;
2561
2562 switch (pf->hw.mac_type) {
2563 case ICE_MAC_E810:
2564 ice_ptp_set_funcs_e810(pf);
2565 return;
2566 case ICE_MAC_E830:
2567 ice_ptp_set_funcs_e830(pf);
2568 return;
2569 case ICE_MAC_GENERIC:
2570 case ICE_MAC_GENERIC_3K_E825:
2571 ice_ptp_set_funcs_e82x(pf);
2572 return;
2573 default:
2574 return;
2575 }
2576 }
2577
2578 /**
2579 * ice_ptp_create_clock - Create PTP clock device for userspace
2580 * @pf: Board private structure
2581 *
2582 * This function creates a new PTP clock device. It only creates one if we
2583 * don't already have one. Will return error if it can't create one, but success
2584 * if we already have a device. Should be used by ice_ptp_init to create clock
2585 * initially, and prevent global resets from creating new clock devices.
2586 */
ice_ptp_create_clock(struct ice_pf * pf)2587 static long ice_ptp_create_clock(struct ice_pf *pf)
2588 {
2589 struct ptp_clock_info *info;
2590 struct device *dev;
2591
2592 /* No need to create a clock device if we already have one */
2593 if (pf->ptp.clock)
2594 return 0;
2595
2596 ice_ptp_set_caps(pf);
2597
2598 info = &pf->ptp.info;
2599 dev = ice_pf_to_dev(pf);
2600
2601 /* Attempt to register the clock before enabling the hardware. */
2602 pf->ptp.clock = ptp_clock_register(info, dev);
2603 if (IS_ERR(pf->ptp.clock)) {
2604 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2605 return PTR_ERR(pf->ptp.clock);
2606 }
2607
2608 return 0;
2609 }
2610
2611 /**
2612 * ice_ptp_request_ts - Request an available Tx timestamp index
2613 * @tx: the PTP Tx timestamp tracker to request from
2614 * @skb: the SKB to associate with this timestamp request
2615 */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2616 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2617 {
2618 unsigned long flags;
2619 u8 idx;
2620
2621 spin_lock_irqsave(&tx->lock, flags);
2622
2623 /* Check that this tracker is accepting new timestamp requests */
2624 if (!ice_ptp_is_tx_tracker_up(tx)) {
2625 spin_unlock_irqrestore(&tx->lock, flags);
2626 return -1;
2627 }
2628
2629 /* Find and set the first available index */
2630 idx = find_next_zero_bit(tx->in_use, tx->len,
2631 tx->last_ll_ts_idx_read + 1);
2632 if (idx == tx->len)
2633 idx = find_first_zero_bit(tx->in_use, tx->len);
2634
2635 if (idx < tx->len) {
2636 /* We got a valid index that no other thread could have set. Store
2637 * a reference to the skb and the start time to allow discarding old
2638 * requests.
2639 */
2640 set_bit(idx, tx->in_use);
2641 clear_bit(idx, tx->stale);
2642 tx->tstamps[idx].start = jiffies;
2643 tx->tstamps[idx].skb = skb_get(skb);
2644 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2645 ice_trace(tx_tstamp_request, skb, idx);
2646 }
2647
2648 spin_unlock_irqrestore(&tx->lock, flags);
2649
2650 /* return the appropriate PHY timestamp register index, -1 if no
2651 * indexes were available.
2652 */
2653 if (idx >= tx->len)
2654 return -1;
2655 else
2656 return idx + tx->offset;
2657 }
2658
2659 /**
2660 * ice_ptp_process_ts - Process the PTP Tx timestamps
2661 * @pf: Board private structure
2662 *
2663 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2664 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2665 */
ice_ptp_process_ts(struct ice_pf * pf)2666 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2667 {
2668 switch (pf->ptp.tx_interrupt_mode) {
2669 case ICE_PTP_TX_INTERRUPT_NONE:
2670 /* This device has the clock owner handle timestamps for it */
2671 return ICE_TX_TSTAMP_WORK_DONE;
2672 case ICE_PTP_TX_INTERRUPT_SELF:
2673 /* This device handles its own timestamps */
2674 return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2675 case ICE_PTP_TX_INTERRUPT_ALL:
2676 /* This device handles timestamps for all ports */
2677 return ice_ptp_tx_tstamp_owner(pf);
2678 default:
2679 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2680 pf->ptp.tx_interrupt_mode);
2681 return ICE_TX_TSTAMP_WORK_DONE;
2682 }
2683 }
2684
2685 /**
2686 * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
2687 * @pf: Board private structure
2688 *
2689 * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
2690 * half of the interrupt and IRQ_HANDLED otherwise.
2691 */
ice_ptp_ts_irq(struct ice_pf * pf)2692 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
2693 {
2694 struct ice_hw *hw = &pf->hw;
2695
2696 switch (hw->mac_type) {
2697 case ICE_MAC_E810:
2698 /* E810 capable of low latency timestamping with interrupt can
2699 * request a single timestamp in the top half and wait for
2700 * a second LL TS interrupt from the FW when it's ready.
2701 */
2702 if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
2703 struct ice_ptp_tx *tx = &pf->ptp.port.tx;
2704 u8 idx;
2705
2706 if (!ice_pf_state_is_nominal(pf))
2707 return IRQ_HANDLED;
2708
2709 spin_lock(&tx->lock);
2710 idx = find_next_bit_wrap(tx->in_use, tx->len,
2711 tx->last_ll_ts_idx_read + 1);
2712 if (idx != tx->len)
2713 ice_ptp_req_tx_single_tstamp(tx, idx);
2714 spin_unlock(&tx->lock);
2715
2716 return IRQ_HANDLED;
2717 }
2718 fallthrough; /* non-LL_TS E810 */
2719 case ICE_MAC_GENERIC:
2720 case ICE_MAC_GENERIC_3K_E825:
2721 /* All other devices process timestamps in the bottom half due
2722 * to sleeping or polling.
2723 */
2724 if (!ice_ptp_pf_handles_tx_interrupt(pf))
2725 return IRQ_HANDLED;
2726
2727 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
2728 return IRQ_WAKE_THREAD;
2729 case ICE_MAC_E830:
2730 /* E830 can read timestamps in the top half using rd32() */
2731 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
2732 /* Process outstanding Tx timestamps. If there
2733 * is more work, re-arm the interrupt to trigger again.
2734 */
2735 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2736 ice_flush(hw);
2737 }
2738 return IRQ_HANDLED;
2739 default:
2740 return IRQ_HANDLED;
2741 }
2742 }
2743
2744 /**
2745 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2746 * @pf: Board private structure
2747 *
2748 * The device PHY issues Tx timestamp interrupts to the driver for processing
2749 * timestamp data from the PHY. It will not interrupt again until all
2750 * current timestamp data is read. In rare circumstances, it is possible that
2751 * the driver fails to read all outstanding data.
2752 *
2753 * To avoid getting permanently stuck, periodically check if the PHY has
2754 * outstanding timestamp data. If so, trigger an interrupt from software to
2755 * process this data.
2756 */
ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf * pf)2757 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2758 {
2759 struct device *dev = ice_pf_to_dev(pf);
2760 struct ice_hw *hw = &pf->hw;
2761 bool trigger_oicr = false;
2762 unsigned int i;
2763
2764 if (!pf->ptp.port.tx.has_ready_bitmap)
2765 return;
2766
2767 if (!ice_pf_src_tmr_owned(pf))
2768 return;
2769
2770 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2771 u64 tstamp_ready;
2772 int err;
2773
2774 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2775 if (!err && tstamp_ready) {
2776 trigger_oicr = true;
2777 break;
2778 }
2779 }
2780
2781 if (trigger_oicr) {
2782 /* Trigger a software interrupt, to ensure this data
2783 * gets processed.
2784 */
2785 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2786
2787 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2788 ice_flush(hw);
2789 }
2790 }
2791
ice_ptp_periodic_work(struct kthread_work * work)2792 static void ice_ptp_periodic_work(struct kthread_work *work)
2793 {
2794 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2795 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2796 int err;
2797
2798 if (pf->ptp.state != ICE_PTP_READY)
2799 return;
2800
2801 err = ice_ptp_update_cached_phctime(pf);
2802
2803 ice_ptp_maybe_trigger_tx_interrupt(pf);
2804
2805 /* Run twice a second or reschedule if phc update failed */
2806 kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2807 msecs_to_jiffies(err ? 10 : 500));
2808 }
2809
2810 /**
2811 * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
2812 * @pf: Board private structure
2813 * @rebuild: rebuild if true, prepare if false
2814 * @reset_type: the reset type being performed
2815 */
ice_ptp_prepare_rebuild_sec(struct ice_pf * pf,bool rebuild,enum ice_reset_req reset_type)2816 static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
2817 enum ice_reset_req reset_type)
2818 {
2819 struct list_head *entry;
2820
2821 list_for_each(entry, &pf->adapter->ports.ports) {
2822 struct ice_ptp_port *port = list_entry(entry,
2823 struct ice_ptp_port,
2824 list_node);
2825 struct ice_pf *peer_pf = ptp_port_to_pf(port);
2826
2827 if (!ice_is_primary(&peer_pf->hw)) {
2828 if (rebuild)
2829 ice_ptp_rebuild(peer_pf, reset_type);
2830 else
2831 ice_ptp_prepare_for_reset(peer_pf, reset_type);
2832 }
2833 }
2834 }
2835
2836 /**
2837 * ice_ptp_prepare_for_reset - Prepare PTP for reset
2838 * @pf: Board private structure
2839 * @reset_type: the reset type being performed
2840 */
ice_ptp_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)2841 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2842 {
2843 struct ice_ptp *ptp = &pf->ptp;
2844 struct ice_hw *hw = &pf->hw;
2845 u8 src_tmr;
2846
2847 if (ptp->state != ICE_PTP_READY)
2848 return;
2849
2850 ptp->state = ICE_PTP_RESETTING;
2851
2852 /* Disable timestamping for both Tx and Rx */
2853 ice_ptp_disable_timestamp_mode(pf);
2854
2855 kthread_cancel_delayed_work_sync(&ptp->work);
2856
2857 if (reset_type == ICE_RESET_PFR)
2858 return;
2859
2860 if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
2861 ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
2862
2863 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2864
2865 /* Disable periodic outputs */
2866 ice_ptp_disable_all_perout(pf);
2867
2868 src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2869
2870 /* Disable source clock */
2871 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2872
2873 /* Acquire PHC and system timer to restore after reset */
2874 ptp->reset_time = ktime_get_real_ns();
2875 }
2876
2877 /**
2878 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2879 * @pf: Board private structure
2880 *
2881 * Companion function for ice_ptp_rebuild() which handles tasks that only the
2882 * PTP clock owner instance should perform.
2883 */
ice_ptp_rebuild_owner(struct ice_pf * pf)2884 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2885 {
2886 struct ice_ptp *ptp = &pf->ptp;
2887 struct ice_hw *hw = &pf->hw;
2888 struct timespec64 ts;
2889 u64 time_diff;
2890 int err;
2891
2892 err = ice_ptp_init_phc(hw);
2893 if (err)
2894 return err;
2895
2896 err = ice_tspll_init(hw);
2897 if (err)
2898 return err;
2899
2900 /* Acquire the global hardware lock */
2901 if (!ice_ptp_lock(hw)) {
2902 err = -EBUSY;
2903 return err;
2904 }
2905
2906 /* Write the increment time value to PHY and LAN */
2907 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2908 if (err)
2909 goto err_unlock;
2910
2911 /* Write the initial Time value to PHY and LAN using the cached PHC
2912 * time before the reset and time difference between stopping and
2913 * starting the clock.
2914 */
2915 if (ptp->cached_phc_time) {
2916 time_diff = ktime_get_real_ns() - ptp->reset_time;
2917 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2918 } else {
2919 ts = ktime_to_timespec64(ktime_get_real());
2920 }
2921 err = ice_ptp_write_init(pf, &ts);
2922 if (err)
2923 goto err_unlock;
2924
2925 /* Release the global hardware lock */
2926 ice_ptp_unlock(hw);
2927
2928 /* Flush software tracking of any outstanding timestamps since we're
2929 * about to flush the PHY timestamp block.
2930 */
2931 ice_ptp_flush_all_tx_tracker(pf);
2932
2933 /* Enable quad interrupts */
2934 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
2935 if (err)
2936 return err;
2937
2938 ice_ptp_restart_all_phy(pf);
2939
2940 /* Re-enable all periodic outputs and external timestamp events */
2941 ice_ptp_enable_all_perout(pf);
2942 ice_ptp_enable_all_extts(pf);
2943
2944 return 0;
2945
2946 err_unlock:
2947 ice_ptp_unlock(hw);
2948 return err;
2949 }
2950
2951 /**
2952 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
2953 * @pf: Board private structure
2954 * @reset_type: the reset type being performed
2955 */
ice_ptp_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)2956 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
2957 {
2958 struct ice_ptp *ptp = &pf->ptp;
2959 int err;
2960
2961 if (ptp->state == ICE_PTP_READY) {
2962 ice_ptp_prepare_for_reset(pf, reset_type);
2963 } else if (ptp->state != ICE_PTP_RESETTING) {
2964 err = -EINVAL;
2965 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
2966 goto err;
2967 }
2968
2969 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
2970 err = ice_ptp_rebuild_owner(pf);
2971 if (err)
2972 goto err;
2973 }
2974
2975 ptp->state = ICE_PTP_READY;
2976
2977 /* Start periodic work going */
2978 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2979
2980 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2981 return;
2982
2983 err:
2984 ptp->state = ICE_PTP_ERROR;
2985 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2986 }
2987
ice_ptp_setup_adapter(struct ice_pf * pf)2988 static int ice_ptp_setup_adapter(struct ice_pf *pf)
2989 {
2990 if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
2991 return -EPERM;
2992
2993 pf->adapter->ctrl_pf = pf;
2994
2995 return 0;
2996 }
2997
ice_ptp_setup_pf(struct ice_pf * pf)2998 static int ice_ptp_setup_pf(struct ice_pf *pf)
2999 {
3000 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3001 struct ice_ptp *ptp = &pf->ptp;
3002
3003 if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
3004 return -ENODEV;
3005
3006 INIT_LIST_HEAD(&ptp->port.list_node);
3007 mutex_lock(&pf->adapter->ports.lock);
3008
3009 list_add(&ptp->port.list_node,
3010 &pf->adapter->ports.ports);
3011 mutex_unlock(&pf->adapter->ports.lock);
3012
3013 return 0;
3014 }
3015
ice_ptp_cleanup_pf(struct ice_pf * pf)3016 static void ice_ptp_cleanup_pf(struct ice_pf *pf)
3017 {
3018 struct ice_ptp *ptp = &pf->ptp;
3019
3020 if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
3021 mutex_lock(&pf->adapter->ports.lock);
3022 list_del(&ptp->port.list_node);
3023 mutex_unlock(&pf->adapter->ports.lock);
3024 }
3025 }
3026
3027 /**
3028 * ice_ptp_clock_index - Get the PTP clock index for this device
3029 * @pf: Board private structure
3030 *
3031 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3032 * is associated.
3033 */
ice_ptp_clock_index(struct ice_pf * pf)3034 int ice_ptp_clock_index(struct ice_pf *pf)
3035 {
3036 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3037 struct ptp_clock *clock;
3038
3039 if (!ctrl_ptp)
3040 return -1;
3041 clock = ctrl_ptp->clock;
3042
3043 return clock ? ptp_clock_index(clock) : -1;
3044 }
3045
3046 /**
3047 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3048 * @pf: Board private structure
3049 *
3050 * Setup and initialize a PTP clock device that represents the device hardware
3051 * clock. Save the clock index for other functions connected to the same
3052 * hardware resource.
3053 */
ice_ptp_init_owner(struct ice_pf * pf)3054 static int ice_ptp_init_owner(struct ice_pf *pf)
3055 {
3056 struct ice_hw *hw = &pf->hw;
3057 struct timespec64 ts;
3058 int err;
3059
3060 err = ice_ptp_init_phc(hw);
3061 if (err) {
3062 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3063 err);
3064 return err;
3065 }
3066
3067 err = ice_tspll_init(hw);
3068 if (err) {
3069 dev_err(ice_pf_to_dev(pf), "Failed to initialize CGU, status %d\n",
3070 err);
3071 return err;
3072 }
3073
3074 /* Acquire the global hardware lock */
3075 if (!ice_ptp_lock(hw)) {
3076 err = -EBUSY;
3077 goto err_exit;
3078 }
3079
3080 /* Write the increment time value to PHY and LAN */
3081 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3082 if (err)
3083 goto err_unlock;
3084
3085 ts = ktime_to_timespec64(ktime_get_real());
3086 /* Write the initial Time value to PHY and LAN */
3087 err = ice_ptp_write_init(pf, &ts);
3088 if (err)
3089 goto err_unlock;
3090
3091 /* Release the global hardware lock */
3092 ice_ptp_unlock(hw);
3093
3094 /* Configure PHY interrupt settings */
3095 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3096 if (err)
3097 goto err_exit;
3098
3099 /* Ensure we have a clock device */
3100 err = ice_ptp_create_clock(pf);
3101 if (err)
3102 goto err_clk;
3103
3104 return 0;
3105 err_clk:
3106 pf->ptp.clock = NULL;
3107 err_exit:
3108 return err;
3109
3110 err_unlock:
3111 ice_ptp_unlock(hw);
3112 return err;
3113 }
3114
3115 /**
3116 * ice_ptp_init_work - Initialize PTP work threads
3117 * @pf: Board private structure
3118 * @ptp: PF PTP structure
3119 */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)3120 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3121 {
3122 struct kthread_worker *kworker;
3123
3124 /* Initialize work functions */
3125 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3126
3127 /* Allocate a kworker for handling work required for the ports
3128 * connected to the PTP hardware clock.
3129 */
3130 kworker = kthread_run_worker(0, "ice-ptp-%s",
3131 dev_name(ice_pf_to_dev(pf)));
3132 if (IS_ERR(kworker))
3133 return PTR_ERR(kworker);
3134
3135 ptp->kworker = kworker;
3136
3137 /* Start periodic work going */
3138 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3139
3140 return 0;
3141 }
3142
3143 /**
3144 * ice_ptp_init_port - Initialize PTP port structure
3145 * @pf: Board private structure
3146 * @ptp_port: PTP port structure
3147 *
3148 * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
3149 */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)3150 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3151 {
3152 struct ice_hw *hw = &pf->hw;
3153
3154 mutex_init(&ptp_port->ps_lock);
3155
3156 switch (hw->mac_type) {
3157 case ICE_MAC_E810:
3158 case ICE_MAC_E830:
3159 case ICE_MAC_GENERIC_3K_E825:
3160 return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
3161 case ICE_MAC_GENERIC:
3162 kthread_init_delayed_work(&ptp_port->ov_work,
3163 ice_ptp_wait_for_offsets);
3164 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3165 ptp_port->port_num);
3166 default:
3167 return -ENODEV;
3168 }
3169 }
3170
3171 /**
3172 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3173 * @pf: Board private structure
3174 *
3175 * Initialize the Tx timestamp interrupt mode for this device. For most device
3176 * types, each PF processes the interrupt and manages its own timestamps. For
3177 * E822-based devices, only the clock owner processes the timestamps. Other
3178 * PFs disable the interrupt and do not process their own timestamps.
3179 */
ice_ptp_init_tx_interrupt_mode(struct ice_pf * pf)3180 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3181 {
3182 switch (pf->hw.mac_type) {
3183 case ICE_MAC_GENERIC:
3184 /* E822 based PHY has the clock owner process the interrupt
3185 * for all ports.
3186 */
3187 if (ice_pf_src_tmr_owned(pf))
3188 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3189 else
3190 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3191 break;
3192 default:
3193 /* other PHY types handle their own Tx interrupt */
3194 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3195 }
3196 }
3197
3198 /**
3199 * ice_ptp_init - Initialize PTP hardware clock support
3200 * @pf: Board private structure
3201 *
3202 * Set up the device for interacting with the PTP hardware clock for all
3203 * functions, both the function that owns the clock hardware, and the
3204 * functions connected to the clock hardware.
3205 *
3206 * The clock owner will allocate and register a ptp_clock with the
3207 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3208 * items used for asynchronous work such as Tx timestamps and periodic work.
3209 */
ice_ptp_init(struct ice_pf * pf)3210 void ice_ptp_init(struct ice_pf *pf)
3211 {
3212 struct ice_ptp *ptp = &pf->ptp;
3213 struct ice_hw *hw = &pf->hw;
3214 int err;
3215
3216 ptp->state = ICE_PTP_INITIALIZING;
3217
3218 if (hw->lane_num < 0) {
3219 err = hw->lane_num;
3220 goto err_exit;
3221 }
3222 ptp->port.port_num = hw->lane_num;
3223
3224 ice_ptp_init_hw(hw);
3225
3226 ice_ptp_init_tx_interrupt_mode(pf);
3227
3228 /* If this function owns the clock hardware, it must allocate and
3229 * configure the PTP clock device to represent it.
3230 */
3231 if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
3232 err = ice_ptp_setup_adapter(pf);
3233 if (err)
3234 goto err_exit;
3235 err = ice_ptp_init_owner(pf);
3236 if (err)
3237 goto err_exit;
3238 }
3239
3240 err = ice_ptp_setup_pf(pf);
3241 if (err)
3242 goto err_exit;
3243
3244 err = ice_ptp_init_port(pf, &ptp->port);
3245 if (err)
3246 goto err_exit;
3247
3248 /* Start the PHY timestamping block */
3249 ice_ptp_reset_phy_timestamping(pf);
3250
3251 /* Configure initial Tx interrupt settings */
3252 ice_ptp_cfg_tx_interrupt(pf);
3253
3254 ptp->state = ICE_PTP_READY;
3255
3256 err = ice_ptp_init_work(pf, ptp);
3257 if (err)
3258 goto err_exit;
3259
3260 dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3261 return;
3262
3263 err_exit:
3264 /* If we registered a PTP clock, release it */
3265 if (pf->ptp.clock) {
3266 ptp_clock_unregister(ptp->clock);
3267 pf->ptp.clock = NULL;
3268 }
3269 ptp->state = ICE_PTP_ERROR;
3270 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3271 }
3272
3273 /**
3274 * ice_ptp_release - Disable the driver/HW support and unregister the clock
3275 * @pf: Board private structure
3276 *
3277 * This function handles the cleanup work required from the initialization by
3278 * clearing out the important information and unregistering the clock
3279 */
ice_ptp_release(struct ice_pf * pf)3280 void ice_ptp_release(struct ice_pf *pf)
3281 {
3282 if (pf->ptp.state != ICE_PTP_READY)
3283 return;
3284
3285 pf->ptp.state = ICE_PTP_UNINIT;
3286
3287 /* Disable timestamping for both Tx and Rx */
3288 ice_ptp_disable_timestamp_mode(pf);
3289
3290 ice_ptp_cleanup_pf(pf);
3291
3292 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3293
3294 ice_ptp_disable_all_extts(pf);
3295
3296 kthread_cancel_delayed_work_sync(&pf->ptp.work);
3297
3298 ice_ptp_port_phy_stop(&pf->ptp.port);
3299 mutex_destroy(&pf->ptp.port.ps_lock);
3300 if (pf->ptp.kworker) {
3301 kthread_destroy_worker(pf->ptp.kworker);
3302 pf->ptp.kworker = NULL;
3303 }
3304
3305 if (!pf->ptp.clock)
3306 return;
3307
3308 /* Disable periodic outputs */
3309 ice_ptp_disable_all_perout(pf);
3310
3311 ptp_clock_unregister(pf->ptp.clock);
3312 pf->ptp.clock = NULL;
3313
3314 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3315 }
3316