1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7
8 static const char ice_pin_names[][64] = {
9 "SDP0",
10 "SDP1",
11 "SDP2",
12 "SDP3",
13 "TIME_SYNC",
14 "1PPS"
15 };
16
17 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
18 /* name, gpio, delay */
19 { TIME_SYNC, { 4, -1 }, { 0, 0 }},
20 { ONE_PPS, { -1, 5 }, { 0, 11 }},
21 };
22
23 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
24 /* name, gpio, delay */
25 { SDP0, { 0, 0 }, { 15, 14 }},
26 { SDP1, { 1, 1 }, { 15, 14 }},
27 { SDP2, { 2, 2 }, { 15, 14 }},
28 { SDP3, { 3, 3 }, { 15, 14 }},
29 { TIME_SYNC, { 4, -1 }, { 11, 0 }},
30 { ONE_PPS, { -1, 5 }, { 0, 9 }},
31 };
32
33 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
34 /* name, gpio, delay */
35 { SDP0, { 0, 0 }, { 0, 1 }},
36 { SDP1, { 1, 1 }, { 0, 1 }},
37 { SDP2, { 2, 2 }, { 0, 1 }},
38 { SDP3, { 3, 3 }, { 0, 1 }},
39 { ONE_PPS, { -1, 5 }, { 0, 1 }},
40 };
41
42 static const char ice_pin_names_dpll[][64] = {
43 "SDP20",
44 "SDP21",
45 "SDP22",
46 "SDP23",
47 };
48
49 static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
50 /* name, gpio, delay */
51 { SDP0, { -1, 0 }, { 0, 1 }},
52 { SDP1, { 1, -1 }, { 0, 0 }},
53 { SDP2, { -1, 2 }, { 0, 1 }},
54 { SDP3, { 3, -1 }, { 0, 0 }},
55 };
56
ice_get_ctrl_pf(struct ice_pf * pf)57 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
58 {
59 return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
60 }
61
ice_get_ctrl_ptp(struct ice_pf * pf)62 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
63 {
64 struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
65
66 return !ctrl_pf ? NULL : &ctrl_pf->ptp;
67 }
68
69 /**
70 * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc
71 * @pf: Board private structure
72 * @func: Pin function
73 * @chan: GPIO channel
74 *
75 * Return: positive pin number when pin is present, -1 otherwise
76 */
ice_ptp_find_pin_idx(struct ice_pf * pf,enum ptp_pin_function func,unsigned int chan)77 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
78 unsigned int chan)
79 {
80 const struct ptp_clock_info *info = &pf->ptp.info;
81 int i;
82
83 for (i = 0; i < info->n_pins; i++) {
84 if (info->pin_config[i].func == func &&
85 info->pin_config[i].chan == chan)
86 return i;
87 }
88
89 return -1;
90 }
91
92 /**
93 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
94 * @pf: Board private structure
95 *
96 * Program the device to respond appropriately to the Tx timestamp interrupt
97 * cause.
98 */
ice_ptp_cfg_tx_interrupt(struct ice_pf * pf)99 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
100 {
101 struct ice_hw *hw = &pf->hw;
102 bool enable;
103 u32 val;
104
105 switch (pf->ptp.tx_interrupt_mode) {
106 case ICE_PTP_TX_INTERRUPT_ALL:
107 /* React to interrupts across all quads. */
108 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
109 enable = true;
110 break;
111 case ICE_PTP_TX_INTERRUPT_NONE:
112 /* Do not react to interrupts on any quad. */
113 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
114 enable = false;
115 break;
116 case ICE_PTP_TX_INTERRUPT_SELF:
117 default:
118 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
119 break;
120 }
121
122 /* Configure the Tx timestamp interrupt */
123 val = rd32(hw, PFINT_OICR_ENA);
124 if (enable)
125 val |= PFINT_OICR_TSYN_TX_M;
126 else
127 val &= ~PFINT_OICR_TSYN_TX_M;
128 wr32(hw, PFINT_OICR_ENA, val);
129 }
130
131 /**
132 * ice_set_rx_tstamp - Enable or disable Rx timestamping
133 * @pf: The PF pointer to search in
134 * @on: bool value for whether timestamps are enabled or disabled
135 */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)136 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
137 {
138 struct ice_vsi *vsi;
139 u16 i;
140
141 vsi = ice_get_main_vsi(pf);
142 if (!vsi || !vsi->rx_rings)
143 return;
144
145 /* Set the timestamp flag for all the Rx rings */
146 ice_for_each_rxq(vsi, i) {
147 if (!vsi->rx_rings[i])
148 continue;
149 vsi->rx_rings[i]->ptp_rx = on;
150 }
151 }
152
153 /**
154 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
155 * @pf: Board private structure
156 *
157 * Called during preparation for reset to temporarily disable timestamping on
158 * the device. Called during remove to disable timestamping while cleaning up
159 * driver resources.
160 */
ice_ptp_disable_timestamp_mode(struct ice_pf * pf)161 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
162 {
163 struct ice_hw *hw = &pf->hw;
164 u32 val;
165
166 val = rd32(hw, PFINT_OICR_ENA);
167 val &= ~PFINT_OICR_TSYN_TX_M;
168 wr32(hw, PFINT_OICR_ENA, val);
169
170 ice_set_rx_tstamp(pf, false);
171 }
172
173 /**
174 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
175 * @pf: Board private structure
176 *
177 * Called at the end of rebuild to restore timestamp configuration after
178 * a device reset.
179 */
ice_ptp_restore_timestamp_mode(struct ice_pf * pf)180 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
181 {
182 struct ice_hw *hw = &pf->hw;
183 bool enable_rx;
184
185 ice_ptp_cfg_tx_interrupt(pf);
186
187 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
188 ice_set_rx_tstamp(pf, enable_rx);
189
190 /* Trigger an immediate software interrupt to ensure that timestamps
191 * which occurred during reset are handled now.
192 */
193 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
194 ice_flush(hw);
195 }
196
197 /**
198 * ice_ptp_read_src_clk_reg - Read the source clock register
199 * @pf: Board private structure
200 * @sts: Optional parameter for holding a pair of system timestamps from
201 * the system clock. Will be ignored if NULL is given.
202 */
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)203 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
204 struct ptp_system_timestamp *sts)
205 {
206 struct ice_hw *hw = &pf->hw;
207 u32 hi, lo, lo2;
208 u8 tmr_idx;
209
210 if (!ice_is_primary(hw))
211 hw = ice_get_primary_hw(pf);
212
213 tmr_idx = ice_get_ptp_src_clock_index(hw);
214 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
215 /* Read the system timestamp pre PHC read */
216 ptp_read_system_prets(sts);
217
218 if (hw->mac_type == ICE_MAC_E830) {
219 u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
220
221 /* Read the system timestamp post PHC read */
222 ptp_read_system_postts(sts);
223
224 return clk_time;
225 }
226
227 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
228
229 /* Read the system timestamp post PHC read */
230 ptp_read_system_postts(sts);
231
232 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
233 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
234
235 if (lo2 < lo) {
236 /* if TIME_L rolled over read TIME_L again and update
237 * system timestamps
238 */
239 ptp_read_system_prets(sts);
240 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
241 ptp_read_system_postts(sts);
242 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
243 }
244
245 return ((u64)hi << 32) | lo;
246 }
247
248 /**
249 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
250 * @cached_phc_time: recently cached copy of PHC time
251 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
252 *
253 * Hardware captures timestamps which contain only 32 bits of nominal
254 * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
255 * Note that the captured timestamp values may be 40 bits, but the lower
256 * 8 bits are sub-nanoseconds and generally discarded.
257 *
258 * Extend the 32bit nanosecond timestamp using the following algorithm and
259 * assumptions:
260 *
261 * 1) have a recently cached copy of the PHC time
262 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
263 * seconds) before or after the PHC time was captured.
264 * 3) calculate the delta between the cached time and the timestamp
265 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
266 * captured after the PHC time. In this case, the full timestamp is just
267 * the cached PHC time plus the delta.
268 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
269 * timestamp was captured *before* the PHC time, i.e. because the PHC
270 * cache was updated after the timestamp was captured by hardware. In this
271 * case, the full timestamp is the cached time minus the inverse delta.
272 *
273 * This algorithm works even if the PHC time was updated after a Tx timestamp
274 * was requested, but before the Tx timestamp event was reported from
275 * hardware.
276 *
277 * This calculation primarily relies on keeping the cached PHC time up to
278 * date. If the timestamp was captured more than 2^31 nanoseconds after the
279 * PHC time, it is possible that the lower 32bits of PHC time have
280 * overflowed more than once, and we might generate an incorrect timestamp.
281 *
282 * This is prevented by (a) periodically updating the cached PHC time once
283 * a second, and (b) discarding any Tx timestamp packet if it has waited for
284 * a timestamp for more than one second.
285 */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)286 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
287 {
288 u32 delta, phc_time_lo;
289 u64 ns;
290
291 /* Extract the lower 32 bits of the PHC time */
292 phc_time_lo = (u32)cached_phc_time;
293
294 /* Calculate the delta between the lower 32bits of the cached PHC
295 * time and the in_tstamp value
296 */
297 delta = (in_tstamp - phc_time_lo);
298
299 /* Do not assume that the in_tstamp is always more recent than the
300 * cached PHC time. If the delta is large, it indicates that the
301 * in_tstamp was taken in the past, and should be converted
302 * forward.
303 */
304 if (delta > (U32_MAX / 2)) {
305 /* reverse the delta calculation here */
306 delta = (phc_time_lo - in_tstamp);
307 ns = cached_phc_time - delta;
308 } else {
309 ns = cached_phc_time + delta;
310 }
311
312 return ns;
313 }
314
315 /**
316 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
317 * @pf: Board private structure
318 * @in_tstamp: Ingress/egress 40b timestamp value
319 *
320 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
321 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
322 *
323 * *--------------------------------------------------------------*
324 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
325 * *--------------------------------------------------------------*
326 *
327 * The low bit is an indicator of whether the timestamp is valid. The next
328 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
329 * and the remaining 32 bits are the lower 32 bits of the PHC timer.
330 *
331 * It is assumed that the caller verifies the timestamp is valid prior to
332 * calling this function.
333 *
334 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
335 * time stored in the device private PTP structure as the basis for timestamp
336 * extension.
337 *
338 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
339 * algorithm.
340 */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)341 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
342 {
343 const u64 mask = GENMASK_ULL(31, 0);
344 unsigned long discard_time;
345
346 /* Discard the hardware timestamp if the cached PHC time is too old */
347 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
348 if (time_is_before_jiffies(discard_time)) {
349 pf->ptp.tx_hwtstamp_discarded++;
350 return 0;
351 }
352
353 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
354 (in_tstamp >> 8) & mask);
355 }
356
357 /**
358 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
359 * @tx: the PTP Tx timestamp tracker to check
360 *
361 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
362 * to accept new timestamp requests.
363 *
364 * Assumes the tx->lock spinlock is already held.
365 */
366 static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx)367 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
368 {
369 lockdep_assert_held(&tx->lock);
370
371 return tx->init && !tx->calibrating;
372 }
373
374 /**
375 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
376 * @tx: the PTP Tx timestamp tracker
377 * @idx: index of the timestamp to request
378 */
ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx)379 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
380 {
381 struct ice_e810_params *params;
382 struct ice_ptp_port *ptp_port;
383 unsigned long flags;
384 struct sk_buff *skb;
385 struct ice_pf *pf;
386
387 if (!tx->init)
388 return;
389
390 ptp_port = container_of(tx, struct ice_ptp_port, tx);
391 pf = ptp_port_to_pf(ptp_port);
392 params = &pf->hw.ptp.phy.e810;
393
394 /* Drop packets which have waited for more than 2 seconds */
395 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
396 /* Count the number of Tx timestamps that timed out */
397 pf->ptp.tx_hwtstamp_timeouts++;
398
399 skb = tx->tstamps[idx].skb;
400 tx->tstamps[idx].skb = NULL;
401 clear_bit(idx, tx->in_use);
402
403 dev_kfree_skb_any(skb);
404 return;
405 }
406
407 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
408
409 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
410
411 params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
412
413 /* Write TS index to read to the PF register so the FW can read it */
414 wr32(&pf->hw, REG_LL_PROXY_H,
415 REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
416 REG_LL_PROXY_H_EXEC);
417 tx->last_ll_ts_idx_read = idx;
418
419 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
420 }
421
422 /**
423 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
424 * @tx: the PTP Tx timestamp tracker
425 */
ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx)426 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
427 {
428 struct skb_shared_hwtstamps shhwtstamps = {};
429 u8 idx = tx->last_ll_ts_idx_read;
430 struct ice_e810_params *params;
431 struct ice_ptp_port *ptp_port;
432 u64 raw_tstamp, tstamp;
433 bool drop_ts = false;
434 struct sk_buff *skb;
435 unsigned long flags;
436 struct device *dev;
437 struct ice_pf *pf;
438 u32 reg_ll_high;
439
440 if (!tx->init || tx->last_ll_ts_idx_read < 0)
441 return;
442
443 ptp_port = container_of(tx, struct ice_ptp_port, tx);
444 pf = ptp_port_to_pf(ptp_port);
445 dev = ice_pf_to_dev(pf);
446 params = &pf->hw.ptp.phy.e810;
447
448 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
449
450 spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
451
452 if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
453 dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
454 __func__);
455
456 /* Read the low 32 bit value */
457 raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
458 /* Read the status together with high TS part */
459 reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
460
461 /* Wake up threads waiting on low latency interface */
462 params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
463
464 wake_up_locked(¶ms->atqbal_wq);
465
466 spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
467
468 /* When the bit is cleared, the TS is ready in the register */
469 if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
470 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
471 return;
472 }
473
474 /* High 8 bit value of the TS is on the bits 16:23 */
475 raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
476
477 /* Devices using this interface always verify the timestamp differs
478 * relative to the last cached timestamp value.
479 */
480 if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
481 return;
482
483 tx->tstamps[idx].cached_tstamp = raw_tstamp;
484 clear_bit(idx, tx->in_use);
485 skb = tx->tstamps[idx].skb;
486 tx->tstamps[idx].skb = NULL;
487 if (test_and_clear_bit(idx, tx->stale))
488 drop_ts = true;
489
490 if (!skb)
491 return;
492
493 if (drop_ts) {
494 dev_kfree_skb_any(skb);
495 return;
496 }
497
498 /* Extend the timestamp using cached PHC time */
499 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
500 if (tstamp) {
501 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
502 ice_trace(tx_tstamp_complete, skb, idx);
503
504 /* Count the number of Tx timestamps that succeeded */
505 pf->ptp.tx_hwtstamp_good++;
506 }
507
508 skb_tstamp_tx(skb, &shhwtstamps);
509 dev_kfree_skb_any(skb);
510 }
511
512 /**
513 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
514 * @tx: the PTP Tx timestamp tracker
515 *
516 * Process timestamps captured by the PHY associated with this port. To do
517 * this, loop over each index with a waiting skb.
518 *
519 * If a given index has a valid timestamp, perform the following steps:
520 *
521 * 1) check that the timestamp request is not stale
522 * 2) check that a timestamp is ready and available in the PHY memory bank
523 * 3) read and copy the timestamp out of the PHY register
524 * 4) unlock the index by clearing the associated in_use bit
525 * 5) check if the timestamp is stale, and discard if so
526 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
527 * 7) send this 64 bit timestamp to the stack
528 *
529 * Note that we do not hold the tracking lock while reading the Tx timestamp.
530 * This is because reading the timestamp requires taking a mutex that might
531 * sleep.
532 *
533 * The only place where we set in_use is when a new timestamp is initiated
534 * with a slot index. This is only called in the hard xmit routine where an
535 * SKB has a request flag set. The only places where we clear this bit is this
536 * function, or during teardown when the Tx timestamp tracker is being
537 * removed. A timestamp index will never be re-used until the in_use bit for
538 * that index is cleared.
539 *
540 * If a Tx thread starts a new timestamp, we might not begin processing it
541 * right away but we will notice it at the end when we re-queue the task.
542 *
543 * If a Tx thread starts a new timestamp just after this function exits, the
544 * interrupt for that timestamp should re-trigger this function once
545 * a timestamp is ready.
546 *
547 * In cases where the PTP hardware clock was directly adjusted, some
548 * timestamps may not be able to safely use the timestamp extension math. In
549 * this case, software will set the stale bit for any outstanding Tx
550 * timestamps when the clock is adjusted. Then this function will discard
551 * those captured timestamps instead of sending them to the stack.
552 *
553 * If a Tx packet has been waiting for more than 2 seconds, it is not possible
554 * to correctly extend the timestamp using the cached PHC time. It is
555 * extremely unlikely that a packet will ever take this long to timestamp. If
556 * we detect a Tx timestamp request that has waited for this long we assume
557 * the packet will never be sent by hardware and discard it without reading
558 * the timestamp register.
559 */
ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx)560 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
561 {
562 struct ice_ptp_port *ptp_port;
563 unsigned long flags;
564 u32 tstamp_good = 0;
565 struct ice_pf *pf;
566 struct ice_hw *hw;
567 u64 tstamp_ready;
568 bool link_up;
569 int err;
570 u8 idx;
571
572 ptp_port = container_of(tx, struct ice_ptp_port, tx);
573 pf = ptp_port_to_pf(ptp_port);
574 hw = &pf->hw;
575
576 if (!tx->init)
577 return;
578
579 /* Read the Tx ready status first */
580 if (tx->has_ready_bitmap) {
581 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
582 if (err)
583 return;
584 }
585
586 /* Drop packets if the link went down */
587 link_up = ptp_port->link_up;
588
589 for_each_set_bit(idx, tx->in_use, tx->len) {
590 struct skb_shared_hwtstamps shhwtstamps = {};
591 u8 phy_idx = idx + tx->offset;
592 u64 raw_tstamp = 0, tstamp;
593 bool drop_ts = !link_up;
594 struct sk_buff *skb;
595
596 /* Drop packets which have waited for more than 2 seconds */
597 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
598 drop_ts = true;
599
600 /* Count the number of Tx timestamps that timed out */
601 pf->ptp.tx_hwtstamp_timeouts++;
602 }
603
604 /* Only read a timestamp from the PHY if its marked as ready
605 * by the tstamp_ready register. This avoids unnecessary
606 * reading of timestamps which are not yet valid. This is
607 * important as we must read all timestamps which are valid
608 * and only timestamps which are valid during each interrupt.
609 * If we do not, the hardware logic for generating a new
610 * interrupt can get stuck on some devices.
611 */
612 if (tx->has_ready_bitmap &&
613 !(tstamp_ready & BIT_ULL(phy_idx))) {
614 if (drop_ts)
615 goto skip_ts_read;
616
617 continue;
618 }
619
620 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
621
622 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
623 if (err && !drop_ts)
624 continue;
625
626 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
627
628 /* For PHYs which don't implement a proper timestamp ready
629 * bitmap, verify that the timestamp value is different
630 * from the last cached timestamp. If it is not, skip this for
631 * now assuming it hasn't yet been captured by hardware.
632 */
633 if (!drop_ts && !tx->has_ready_bitmap &&
634 raw_tstamp == tx->tstamps[idx].cached_tstamp)
635 continue;
636
637 /* Discard any timestamp value without the valid bit set */
638 if (!(raw_tstamp & ICE_PTP_TS_VALID))
639 drop_ts = true;
640
641 skip_ts_read:
642 spin_lock_irqsave(&tx->lock, flags);
643 if (!tx->has_ready_bitmap && raw_tstamp)
644 tx->tstamps[idx].cached_tstamp = raw_tstamp;
645 clear_bit(idx, tx->in_use);
646 skb = tx->tstamps[idx].skb;
647 tx->tstamps[idx].skb = NULL;
648 if (test_and_clear_bit(idx, tx->stale))
649 drop_ts = true;
650 spin_unlock_irqrestore(&tx->lock, flags);
651
652 /* It is unlikely but possible that the SKB will have been
653 * flushed at this point due to link change or teardown.
654 */
655 if (!skb)
656 continue;
657
658 if (drop_ts) {
659 dev_kfree_skb_any(skb);
660 continue;
661 }
662
663 /* Extend the timestamp using cached PHC time */
664 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
665 if (tstamp) {
666 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
667 ice_trace(tx_tstamp_complete, skb, idx);
668
669 /* Count the number of Tx timestamps that succeeded */
670 tstamp_good++;
671 }
672
673 skb_tstamp_tx(skb, &shhwtstamps);
674 dev_kfree_skb_any(skb);
675 }
676
677 pf->ptp.tx_hwtstamp_good += tstamp_good;
678 }
679
ice_ptp_tx_tstamp_owner(struct ice_pf * pf)680 static void ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
681 {
682 struct ice_ptp_port *port;
683
684 mutex_lock(&pf->adapter->ports.lock);
685 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
686 struct ice_ptp_tx *tx = &port->tx;
687
688 if (!tx || !tx->init)
689 continue;
690
691 ice_ptp_process_tx_tstamp(tx);
692 }
693 mutex_unlock(&pf->adapter->ports.lock);
694 }
695
696 /**
697 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
698 * @tx: Tx tracking structure to initialize
699 *
700 * Assumes that the length has already been initialized. Do not call directly,
701 * use the ice_ptp_init_tx_* instead.
702 */
703 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)704 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
705 {
706 unsigned long *in_use, *stale;
707 struct ice_tx_tstamp *tstamps;
708
709 tstamps = kzalloc_objs(*tstamps, tx->len);
710 in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
711 stale = bitmap_zalloc(tx->len, GFP_KERNEL);
712
713 if (!tstamps || !in_use || !stale) {
714 kfree(tstamps);
715 bitmap_free(in_use);
716 bitmap_free(stale);
717
718 return -ENOMEM;
719 }
720
721 tx->tstamps = tstamps;
722 tx->in_use = in_use;
723 tx->stale = stale;
724 tx->init = 1;
725 tx->last_ll_ts_idx_read = -1;
726
727 spin_lock_init(&tx->lock);
728
729 return 0;
730 }
731
732 /**
733 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
734 * @pf: Board private structure
735 * @tx: the tracker to flush
736 *
737 * Called during teardown when a Tx tracker is being removed.
738 */
739 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)740 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
741 {
742 struct ice_hw *hw = &pf->hw;
743 unsigned long flags;
744 u64 tstamp_ready;
745 int err;
746 u8 idx;
747
748 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
749 if (err) {
750 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
751 tx->block, err);
752
753 /* If we fail to read the Tx timestamp ready bitmap just
754 * skip clearing the PHY timestamps.
755 */
756 tstamp_ready = 0;
757 }
758
759 for_each_set_bit(idx, tx->in_use, tx->len) {
760 u8 phy_idx = idx + tx->offset;
761 struct sk_buff *skb;
762
763 /* In case this timestamp is ready, we need to clear it. */
764 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
765 ice_clear_phy_tstamp(hw, tx->block, phy_idx);
766
767 spin_lock_irqsave(&tx->lock, flags);
768 skb = tx->tstamps[idx].skb;
769 tx->tstamps[idx].skb = NULL;
770 clear_bit(idx, tx->in_use);
771 clear_bit(idx, tx->stale);
772 spin_unlock_irqrestore(&tx->lock, flags);
773
774 /* Count the number of Tx timestamps flushed */
775 pf->ptp.tx_hwtstamp_flushed++;
776
777 /* Free the SKB after we've cleared the bit */
778 dev_kfree_skb_any(skb);
779 }
780 }
781
782 /**
783 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
784 * @tx: the tracker to mark
785 *
786 * Mark currently outstanding Tx timestamps as stale. This prevents sending
787 * their timestamp value to the stack. This is required to prevent extending
788 * the 40bit hardware timestamp incorrectly.
789 *
790 * This should be called when the PTP clock is modified such as after a set
791 * time request.
792 */
793 static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx)794 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
795 {
796 unsigned long flags;
797
798 spin_lock_irqsave(&tx->lock, flags);
799 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
800 spin_unlock_irqrestore(&tx->lock, flags);
801 }
802
803 /**
804 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
805 * @pf: Board private structure
806 *
807 * Called by the clock owner to flush all the Tx timestamp trackers associated
808 * with the clock.
809 */
810 static void
ice_ptp_flush_all_tx_tracker(struct ice_pf * pf)811 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
812 {
813 struct ice_ptp_port *port;
814
815 list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
816 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
817 }
818
819 /**
820 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
821 * @pf: Board private structure
822 * @tx: Tx tracking structure to release
823 *
824 * Free memory associated with the Tx timestamp tracker.
825 */
826 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)827 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
828 {
829 unsigned long flags;
830
831 spin_lock_irqsave(&tx->lock, flags);
832 tx->init = 0;
833 spin_unlock_irqrestore(&tx->lock, flags);
834
835 /* wait for potentially outstanding interrupt to complete */
836 synchronize_irq(pf->oicr_irq.virq);
837
838 ice_ptp_flush_tx_tracker(pf, tx);
839
840 kfree(tx->tstamps);
841 tx->tstamps = NULL;
842
843 bitmap_free(tx->in_use);
844 tx->in_use = NULL;
845
846 bitmap_free(tx->stale);
847 tx->stale = NULL;
848
849 tx->len = 0;
850 }
851
852 /**
853 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
854 * @pf: Board private structure
855 * @tx: the Tx tracking structure to initialize
856 * @port: the port this structure tracks
857 *
858 * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
859 * the timestamp block is shared for all ports in the same quad. To avoid
860 * ports using the same timestamp index, logically break the block of
861 * registers into chunks based on the port number.
862 *
863 * Return: 0 on success, -ENOMEM when out of memory
864 */
ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)865 static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
866 u8 port)
867 {
868 tx->block = ICE_GET_QUAD_NUM(port);
869 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
870 tx->len = INDEX_PER_PORT_E82X;
871 tx->has_ready_bitmap = 1;
872
873 return ice_ptp_alloc_tx_tracker(tx);
874 }
875
876 /**
877 * ice_ptp_init_tx - Initialize tracking for Tx timestamps
878 * @pf: Board private structure
879 * @tx: the Tx tracking structure to initialize
880 * @port: the port this structure tracks
881 *
882 * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
883 * each port has its own block of timestamps, independent of the other ports.
884 *
885 * Return: 0 on success, -ENOMEM when out of memory
886 */
ice_ptp_init_tx(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)887 static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
888 {
889 tx->block = port;
890 tx->offset = 0;
891 tx->len = INDEX_PER_PORT;
892
893 /* The E810 PHY does not provide a timestamp ready bitmap. Instead,
894 * verify new timestamps against cached copy of the last read
895 * timestamp.
896 */
897 tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
898
899 return ice_ptp_alloc_tx_tracker(tx);
900 }
901
902 /**
903 * ice_ptp_update_cached_phctime - Update the cached PHC time values
904 * @pf: Board specific private structure
905 *
906 * This function updates the system time values which are cached in the PF
907 * structure and the Rx rings.
908 *
909 * This function must be called periodically to ensure that the cached value
910 * is never more than 2 seconds old.
911 *
912 * Note that the cached copy in the PF PTP structure is always updated, even
913 * if we can't update the copy in the Rx rings.
914 *
915 * Return:
916 * * 0 - OK, successfully updated
917 * * -EAGAIN - PF was busy, need to reschedule the update
918 */
ice_ptp_update_cached_phctime(struct ice_pf * pf)919 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
920 {
921 struct device *dev = ice_pf_to_dev(pf);
922 unsigned long update_before;
923 u64 systime;
924 int i;
925
926 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
927 if (pf->ptp.cached_phc_time &&
928 time_is_before_jiffies(update_before)) {
929 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
930
931 dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
932 jiffies_to_msecs(time_taken));
933 pf->ptp.late_cached_phc_updates++;
934 }
935
936 /* Read the current PHC time */
937 systime = ice_ptp_read_src_clk_reg(pf, NULL);
938
939 /* Update the cached PHC time stored in the PF structure */
940 WRITE_ONCE(pf->ptp.cached_phc_time, systime);
941 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
942
943 if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
944 return -EAGAIN;
945
946 ice_for_each_vsi(pf, i) {
947 struct ice_vsi *vsi = pf->vsi[i];
948 int j;
949
950 if (!vsi)
951 continue;
952
953 if (vsi->type != ICE_VSI_PF)
954 continue;
955
956 ice_for_each_rxq(vsi, j) {
957 if (!vsi->rx_rings[j])
958 continue;
959 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
960 }
961 }
962 clear_bit(ICE_CFG_BUSY, pf->state);
963
964 return 0;
965 }
966
967 /**
968 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
969 * @pf: Board specific private structure
970 *
971 * This function must be called when the cached PHC time is no longer valid,
972 * such as after a time adjustment. It marks any currently outstanding Tx
973 * timestamps as stale and updates the cached PHC time for both the PF and Rx
974 * rings.
975 *
976 * If updating the PHC time cannot be done immediately, a warning message is
977 * logged and the work item is scheduled immediately to minimize the window
978 * with a wrong cached timestamp.
979 */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)980 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
981 {
982 struct device *dev = ice_pf_to_dev(pf);
983 int err;
984
985 /* Update the cached PHC time immediately if possible, otherwise
986 * schedule the work item to execute soon.
987 */
988 err = ice_ptp_update_cached_phctime(pf);
989 if (err) {
990 /* If another thread is updating the Rx rings, we won't
991 * properly reset them here. This could lead to reporting of
992 * invalid timestamps, but there isn't much we can do.
993 */
994 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
995 __func__);
996
997 /* Queue the work item to update the Rx rings when possible */
998 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
999 msecs_to_jiffies(10));
1000 }
1001
1002 /* Mark any outstanding timestamps as stale, since they might have
1003 * been captured in hardware before the time update. This could lead
1004 * to us extending them with the wrong cached value resulting in
1005 * incorrect timestamp values.
1006 */
1007 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1008 }
1009
1010 /**
1011 * ice_ptp_write_init - Set PHC time to provided value
1012 * @pf: Board private structure
1013 * @ts: timespec structure that holds the new time value
1014 *
1015 * Set the PHC time to the specified time provided in the timespec.
1016 */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1017 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1018 {
1019 u64 ns = timespec64_to_ns(ts);
1020 struct ice_hw *hw = &pf->hw;
1021
1022 return ice_ptp_init_time(hw, ns);
1023 }
1024
1025 /**
1026 * ice_ptp_write_adj - Adjust PHC clock time atomically
1027 * @pf: Board private structure
1028 * @adj: Adjustment in nanoseconds
1029 *
1030 * Perform an atomic adjustment of the PHC time by the specified number of
1031 * nanoseconds.
1032 */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1033 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1034 {
1035 struct ice_hw *hw = &pf->hw;
1036
1037 return ice_ptp_adj_clock(hw, adj);
1038 }
1039
1040 /**
1041 * ice_base_incval - Get base timer increment value
1042 * @pf: Board private structure
1043 *
1044 * Look up the base timer increment value for this device. The base increment
1045 * value is used to define the nominal clock tick rate. This increment value
1046 * is programmed during device initialization. It is also used as the basis
1047 * for calculating adjustments using scaled_ppm.
1048 */
ice_base_incval(struct ice_pf * pf)1049 static u64 ice_base_incval(struct ice_pf *pf)
1050 {
1051 struct ice_hw *hw = &pf->hw;
1052 u64 incval;
1053
1054 incval = ice_get_base_incval(hw);
1055
1056 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1057 incval);
1058
1059 return incval;
1060 }
1061
1062 /**
1063 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1064 * @port: PTP port for which Tx FIFO is checked
1065 */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1066 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1067 {
1068 int offs = port->port_num % ICE_PORTS_PER_QUAD;
1069 int quad = ICE_GET_QUAD_NUM(port->port_num);
1070 struct ice_pf *pf;
1071 struct ice_hw *hw;
1072 u32 val, phy_sts;
1073 int err;
1074
1075 pf = ptp_port_to_pf(port);
1076 hw = &pf->hw;
1077
1078 if (port->tx_fifo_busy_cnt == FIFO_OK)
1079 return 0;
1080
1081 /* need to read FIFO state */
1082 if (offs == 0 || offs == 1)
1083 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1084 &val);
1085 else
1086 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1087 &val);
1088
1089 if (err) {
1090 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1091 port->port_num, err);
1092 return err;
1093 }
1094
1095 if (offs & 0x1)
1096 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1097 else
1098 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1099
1100 if (phy_sts & FIFO_EMPTY) {
1101 port->tx_fifo_busy_cnt = FIFO_OK;
1102 return 0;
1103 }
1104
1105 port->tx_fifo_busy_cnt++;
1106
1107 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1108 port->tx_fifo_busy_cnt, port->port_num);
1109
1110 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1111 dev_dbg(ice_pf_to_dev(pf),
1112 "Port %d Tx FIFO still not empty; resetting quad %d\n",
1113 port->port_num, quad);
1114 ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1115 port->tx_fifo_busy_cnt = FIFO_OK;
1116 return 0;
1117 }
1118
1119 return -EAGAIN;
1120 }
1121
1122 /**
1123 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1124 * @work: Pointer to the kthread_work structure for this task
1125 *
1126 * Check whether hardware has completed measuring the Tx and Rx offset values
1127 * used to configure and enable vernier timestamp calibration.
1128 *
1129 * Once the offset in either direction is measured, configure the associated
1130 * registers with the calibrated offset values and enable timestamping. The Tx
1131 * and Rx directions are configured independently as soon as their associated
1132 * offsets are known.
1133 *
1134 * This function reschedules itself until both Tx and Rx calibration have
1135 * completed.
1136 */
ice_ptp_wait_for_offsets(struct kthread_work * work)1137 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1138 {
1139 struct ice_ptp_port *port;
1140 struct ice_pf *pf;
1141 struct ice_hw *hw;
1142 int tx_err;
1143 int rx_err;
1144
1145 port = container_of(work, struct ice_ptp_port, ov_work.work);
1146 pf = ptp_port_to_pf(port);
1147 hw = &pf->hw;
1148
1149 if (ice_is_reset_in_progress(pf->state)) {
1150 /* wait for device driver to complete reset */
1151 kthread_queue_delayed_work(pf->ptp.kworker,
1152 &port->ov_work,
1153 msecs_to_jiffies(100));
1154 return;
1155 }
1156
1157 tx_err = ice_ptp_check_tx_fifo(port);
1158 if (!tx_err)
1159 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1160 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1161 if (tx_err || rx_err) {
1162 /* Tx and/or Rx offset not yet configured, try again later */
1163 kthread_queue_delayed_work(pf->ptp.kworker,
1164 &port->ov_work,
1165 msecs_to_jiffies(100));
1166 return;
1167 }
1168 }
1169
1170 /**
1171 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1172 * @ptp_port: PTP port to stop
1173 */
1174 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1175 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1176 {
1177 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1178 u8 port = ptp_port->port_num;
1179 struct ice_hw *hw = &pf->hw;
1180 int err;
1181
1182 mutex_lock(&ptp_port->ps_lock);
1183
1184 switch (hw->mac_type) {
1185 case ICE_MAC_E810:
1186 case ICE_MAC_E830:
1187 err = 0;
1188 break;
1189 case ICE_MAC_GENERIC:
1190 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1191
1192 err = ice_stop_phy_timer_e82x(hw, port, true);
1193 break;
1194 case ICE_MAC_GENERIC_3K_E825:
1195 err = ice_stop_phy_timer_eth56g(hw, port, true);
1196 break;
1197 default:
1198 err = -ENODEV;
1199 }
1200 if (err && err != -EBUSY)
1201 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1202 port, err);
1203
1204 mutex_unlock(&ptp_port->ps_lock);
1205
1206 return err;
1207 }
1208
1209 /**
1210 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1211 * @ptp_port: PTP port for which the PHY start is set
1212 *
1213 * Start the PHY timestamping block, and initiate Vernier timestamping
1214 * calibration. If timestamping cannot be calibrated (such as if link is down)
1215 * then disable the timestamping block instead.
1216 */
1217 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1218 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1219 {
1220 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1221 u8 port = ptp_port->port_num;
1222 struct ice_hw *hw = &pf->hw;
1223 unsigned long flags;
1224 int err;
1225
1226 if (!ptp_port->link_up)
1227 return ice_ptp_port_phy_stop(ptp_port);
1228
1229 mutex_lock(&ptp_port->ps_lock);
1230
1231 switch (hw->mac_type) {
1232 case ICE_MAC_E810:
1233 case ICE_MAC_E830:
1234 err = 0;
1235 break;
1236 case ICE_MAC_GENERIC:
1237 /* Start the PHY timer in Vernier mode */
1238 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1239
1240 /* temporarily disable Tx timestamps while calibrating
1241 * PHY offset
1242 */
1243 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1244 ptp_port->tx.calibrating = true;
1245 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1246 ptp_port->tx_fifo_busy_cnt = 0;
1247
1248 /* Start the PHY timer in Vernier mode */
1249 err = ice_start_phy_timer_e82x(hw, port);
1250 if (err)
1251 break;
1252
1253 /* Enable Tx timestamps right away */
1254 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1255 ptp_port->tx.calibrating = false;
1256 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1257
1258 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1259 0);
1260 break;
1261 case ICE_MAC_GENERIC_3K_E825:
1262 err = ice_start_phy_timer_eth56g(hw, port);
1263 break;
1264 default:
1265 err = -ENODEV;
1266 }
1267
1268 if (err)
1269 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1270 port, err);
1271
1272 mutex_unlock(&ptp_port->ps_lock);
1273
1274 return err;
1275 }
1276
1277 /**
1278 * ice_ptp_link_change - Reconfigure PTP after link status change
1279 * @pf: Board private structure
1280 * @linkup: Link is up or down
1281 */
ice_ptp_link_change(struct ice_pf * pf,bool linkup)1282 void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
1283 {
1284 struct ice_ptp_port *ptp_port;
1285 struct ice_hw *hw = &pf->hw;
1286
1287 if (pf->ptp.state != ICE_PTP_READY)
1288 return;
1289
1290 ptp_port = &pf->ptp.port;
1291
1292 /* Update cached link status for this port immediately */
1293 ptp_port->link_up = linkup;
1294
1295 /* Skip HW writes if reset is in progress */
1296 if (pf->hw.reset_ongoing)
1297 return;
1298
1299 if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
1300 int pin, err;
1301
1302 if (!test_bit(ICE_FLAG_DPLL, pf->flags))
1303 return;
1304
1305 mutex_lock(&pf->dplls.lock);
1306 for (pin = 0; pin < ICE_SYNCE_CLK_NUM; pin++) {
1307 enum ice_synce_clk clk_pin;
1308 bool active;
1309 u8 port_num;
1310
1311 port_num = ptp_port->port_num;
1312 clk_pin = (enum ice_synce_clk)pin;
1313 err = ice_tspll_bypass_mux_active_e825c(hw,
1314 port_num,
1315 &active,
1316 clk_pin);
1317 if (WARN_ON_ONCE(err)) {
1318 mutex_unlock(&pf->dplls.lock);
1319 return;
1320 }
1321
1322 err = ice_tspll_cfg_synce_ethdiv_e825c(hw, clk_pin);
1323 if (active && WARN_ON_ONCE(err)) {
1324 mutex_unlock(&pf->dplls.lock);
1325 return;
1326 }
1327 }
1328 mutex_unlock(&pf->dplls.lock);
1329 }
1330
1331 switch (hw->mac_type) {
1332 case ICE_MAC_E810:
1333 case ICE_MAC_E830:
1334 /* Do not reconfigure E810 or E830 PHY */
1335 return;
1336 case ICE_MAC_GENERIC:
1337 ice_ptp_port_phy_restart(ptp_port);
1338 return;
1339 case ICE_MAC_GENERIC_3K_E825:
1340 if (linkup)
1341 ice_ptp_port_phy_restart(ptp_port);
1342 return;
1343 default:
1344 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1345 }
1346 }
1347
1348 /**
1349 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1350 * @pf: PF private structure
1351 * @ena: bool value to enable or disable interrupt
1352 * @threshold: Minimum number of packets at which intr is triggered
1353 *
1354 * Utility function to configure all the PHY interrupt settings, including
1355 * whether the PHY interrupt is enabled, and what threshold to use. Also
1356 * configures The E82X timestamp owner to react to interrupts from all PHYs.
1357 *
1358 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1359 * when failed to configure PHY interrupt for E82X
1360 */
ice_ptp_cfg_phy_interrupt(struct ice_pf * pf,bool ena,u32 threshold)1361 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1362 {
1363 struct device *dev = ice_pf_to_dev(pf);
1364 struct ice_hw *hw = &pf->hw;
1365
1366 ice_ptp_reset_ts_memory(hw);
1367
1368 switch (hw->mac_type) {
1369 case ICE_MAC_E810:
1370 case ICE_MAC_E830:
1371 return 0;
1372 case ICE_MAC_GENERIC: {
1373 int quad;
1374
1375 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1376 quad++) {
1377 int err;
1378
1379 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1380 if (err) {
1381 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1382 quad, err);
1383 return err;
1384 }
1385 }
1386
1387 return 0;
1388 }
1389 case ICE_MAC_GENERIC_3K_E825: {
1390 int port;
1391
1392 for (port = 0; port < hw->ptp.num_lports; port++) {
1393 int err;
1394
1395 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1396 if (err) {
1397 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1398 port, err);
1399 return err;
1400 }
1401 }
1402
1403 return 0;
1404 }
1405 case ICE_MAC_UNKNOWN:
1406 default:
1407 return -EOPNOTSUPP;
1408 }
1409 }
1410
1411 /**
1412 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1413 * @pf: Board private structure
1414 */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1415 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1416 {
1417 ice_ptp_port_phy_restart(&pf->ptp.port);
1418 }
1419
1420 /**
1421 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1422 * @pf: Board private structure
1423 */
ice_ptp_restart_all_phy(struct ice_pf * pf)1424 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1425 {
1426 struct list_head *entry;
1427
1428 list_for_each(entry, &pf->adapter->ports.ports) {
1429 struct ice_ptp_port *port = list_entry(entry,
1430 struct ice_ptp_port,
1431 list_node);
1432
1433 if (port->link_up)
1434 ice_ptp_port_phy_restart(port);
1435 }
1436 }
1437
1438 /**
1439 * ice_ptp_adjfine - Adjust clock increment rate
1440 * @info: the driver's PTP info structure
1441 * @scaled_ppm: Parts per million with 16-bit fractional field
1442 *
1443 * Adjust the frequency of the clock by the indicated scaled ppm from the
1444 * base frequency.
1445 */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1446 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1447 {
1448 struct ice_pf *pf = ptp_info_to_pf(info);
1449 struct ice_hw *hw = &pf->hw;
1450 u64 incval;
1451 int err;
1452
1453 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1454 err = ice_ptp_write_incval_locked(hw, incval);
1455 if (err) {
1456 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1457 err);
1458 return -EIO;
1459 }
1460
1461 return 0;
1462 }
1463
1464 /**
1465 * ice_ptp_extts_event - Process PTP external clock event
1466 * @pf: Board private structure
1467 */
ice_ptp_extts_event(struct ice_pf * pf)1468 void ice_ptp_extts_event(struct ice_pf *pf)
1469 {
1470 struct ptp_clock_event event;
1471 struct ice_hw *hw = &pf->hw;
1472 u8 chan, tmr_idx;
1473 u32 hi, lo;
1474
1475 /* Don't process timestamp events if PTP is not ready */
1476 if (pf->ptp.state != ICE_PTP_READY)
1477 return;
1478
1479 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1480 /* Event time is captured by one of the two matched registers
1481 * GLTSYN_EVNT_L: 32 LSB of sampled time event
1482 * GLTSYN_EVNT_H: 32 MSB of sampled time event
1483 * Event is defined in GLTSYN_EVNT_0 register
1484 */
1485 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1486 int pin_desc_idx;
1487
1488 /* Check if channel is enabled */
1489 if (!(pf->ptp.ext_ts_irq & (1 << chan)))
1490 continue;
1491
1492 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1493 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1494 event.timestamp = (u64)hi << 32 | lo;
1495
1496 /* Add delay compensation */
1497 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1498 if (pin_desc_idx >= 0) {
1499 const struct ice_ptp_pin_desc *desc;
1500
1501 desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
1502 event.timestamp -= desc->delay[0];
1503 }
1504
1505 event.type = PTP_CLOCK_EXTTS;
1506 event.index = chan;
1507 pf->ptp.ext_ts_irq &= ~(1 << chan);
1508 ptp_clock_event(pf->ptp.clock, &event);
1509 }
1510 }
1511
1512 /**
1513 * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1514 * @pf: Board private structure
1515 * @rq: External timestamp request
1516 * @on: Enable/disable flag
1517 *
1518 * Configure an external timestamp event on the requested channel.
1519 *
1520 * Return: 0 on success, negative error code otherwise
1521 */
ice_ptp_cfg_extts(struct ice_pf * pf,struct ptp_extts_request * rq,int on)1522 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
1523 int on)
1524 {
1525 u32 aux_reg, gpio_reg, irq_reg;
1526 struct ice_hw *hw = &pf->hw;
1527 unsigned int chan, gpio_pin;
1528 int pin_desc_idx;
1529 u8 tmr_idx;
1530
1531 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1532 chan = rq->index;
1533
1534 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1535 if (pin_desc_idx < 0)
1536 return -EIO;
1537
1538 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
1539 irq_reg = rd32(hw, PFINT_OICR_ENA);
1540
1541 if (on) {
1542 /* Enable the interrupt */
1543 irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1544 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1545
1546 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
1547 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
1548
1549 /* set event level to requested edge */
1550 if (rq->flags & PTP_FALLING_EDGE)
1551 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1552 if (rq->flags & PTP_RISING_EDGE)
1553 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1554
1555 /* Write GPIO CTL reg.
1556 * 0x1 is input sampled by EVENT register(channel)
1557 * + num_in_channels * tmr_idx
1558 */
1559 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1560 1 + chan + (tmr_idx * 3));
1561 } else {
1562 bool last_enabled = true;
1563
1564 /* clear the values we set to reset defaults */
1565 aux_reg = 0;
1566 gpio_reg = 0;
1567
1568 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
1569 if ((pf->ptp.extts_rqs[i].flags &
1570 PTP_ENABLE_FEATURE) &&
1571 i != chan) {
1572 last_enabled = false;
1573 }
1574
1575 if (last_enabled)
1576 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1577 }
1578
1579 wr32(hw, PFINT_OICR_ENA, irq_reg);
1580 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1581 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1582
1583 return 0;
1584 }
1585
1586 /**
1587 * ice_ptp_disable_all_extts - Disable all EXTTS channels
1588 * @pf: Board private structure
1589 */
ice_ptp_disable_all_extts(struct ice_pf * pf)1590 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1591 {
1592 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1593 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1594 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1595 false);
1596
1597 synchronize_irq(pf->oicr_irq.virq);
1598 }
1599
1600 /**
1601 * ice_ptp_enable_all_extts - Enable all EXTTS channels
1602 * @pf: Board private structure
1603 *
1604 * Called during reset to restore user configuration.
1605 */
ice_ptp_enable_all_extts(struct ice_pf * pf)1606 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1607 {
1608 for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1609 if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1610 ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1611 true);
1612 }
1613
1614 /**
1615 * ice_ptp_write_perout - Write periodic wave parameters to HW
1616 * @hw: pointer to the HW struct
1617 * @chan: target channel
1618 * @gpio_pin: target GPIO pin
1619 * @start: target time to start periodic output
1620 * @period: target period
1621 *
1622 * Return: 0 on success, negative error code otherwise
1623 */
ice_ptp_write_perout(struct ice_hw * hw,unsigned int chan,unsigned int gpio_pin,u64 start,u64 period)1624 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
1625 unsigned int gpio_pin, u64 start, u64 period)
1626 {
1627
1628 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1629 u32 val = 0;
1630
1631 /* 0. Reset mode & out_en in AUX_OUT */
1632 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1633
1634 if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
1635 int err;
1636
1637 /* Enable/disable CGU 1PPS output for E825C */
1638 err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
1639 if (err)
1640 return err;
1641 }
1642
1643 /* 1. Write perout with half of required period value.
1644 * HW toggles output when source clock hits the TGT and then adds
1645 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle.
1646 */
1647 period >>= 1;
1648
1649 /* For proper operation, GLTSYN_CLKO must be larger than clock tick and
1650 * period has to fit in 32 bit register.
1651 */
1652 #define MIN_PULSE 3
1653 if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
1654 dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
1655 MIN_PULSE);
1656 return -EIO;
1657 }
1658
1659 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1660
1661 /* 2. Write TARGET time */
1662 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
1663 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
1664
1665 /* 3. Write AUX_OUT register */
1666 if (!!period)
1667 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1668 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1669
1670 /* 4. write GPIO CTL reg */
1671 val = GLGEN_GPIO_CTL_PIN_DIR_M;
1672 if (!!period)
1673 val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1674 8 + chan + (tmr_idx * 4));
1675
1676 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1677 ice_flush(hw);
1678
1679 return 0;
1680 }
1681
1682 /**
1683 * ice_ptp_cfg_perout - Configure clock to generate periodic wave
1684 * @pf: Board private structure
1685 * @rq: Periodic output request
1686 * @on: Enable/disable flag
1687 *
1688 * Configure the internal clock generator modules to generate the clock wave of
1689 * specified period.
1690 *
1691 * Return: 0 on success, negative error code otherwise
1692 */
ice_ptp_cfg_perout(struct ice_pf * pf,struct ptp_perout_request * rq,int on)1693 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
1694 int on)
1695 {
1696 unsigned int gpio_pin, prop_delay_ns;
1697 u64 clk, period, start, phase;
1698 struct ice_hw *hw = &pf->hw;
1699 int pin_desc_idx;
1700
1701 pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
1702 if (pin_desc_idx < 0)
1703 return -EIO;
1704
1705 gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
1706 prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
1707 period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
1708
1709 /* If we're disabling the output or period is 0, clear out CLKO and TGT
1710 * and keep output level low.
1711 */
1712 if (!on || !period)
1713 return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
1714
1715 if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
1716 period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
1717 dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
1718 return -EOPNOTSUPP;
1719 }
1720
1721 if (period & 0x1) {
1722 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1723 return -EIO;
1724 }
1725
1726 start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
1727
1728 /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */
1729 if (rq->flags & PTP_PEROUT_PHASE)
1730 phase = start;
1731 else
1732 div64_u64_rem(start, period, &phase);
1733
1734 /* If we have only phase or start time is in the past, start the timer
1735 * at the next multiple of period, maintaining phase at least 0.5 second
1736 * from now, so we have time to write it to HW.
1737 */
1738 clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
1739 if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
1740 start = div64_u64(clk + period - 1, period) * period + phase;
1741
1742 /* Compensate for propagation delay from the generator to the pin. */
1743 start -= prop_delay_ns;
1744
1745 return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
1746 }
1747
1748 /**
1749 * ice_ptp_disable_all_perout - Disable all currently configured outputs
1750 * @pf: Board private structure
1751 *
1752 * Disable all currently configured clock outputs. This is necessary before
1753 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to
1754 * re-enable the clocks again.
1755 */
ice_ptp_disable_all_perout(struct ice_pf * pf)1756 static void ice_ptp_disable_all_perout(struct ice_pf *pf)
1757 {
1758 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1759 if (pf->ptp.perout_rqs[i].period.sec ||
1760 pf->ptp.perout_rqs[i].period.nsec)
1761 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1762 false);
1763 }
1764
1765 /**
1766 * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs
1767 * @pf: Board private structure
1768 *
1769 * Enable all currently configured clock outputs. Use this after
1770 * ice_ptp_disable_all_perout to reconfigure the output signals according to
1771 * their configuration.
1772 */
ice_ptp_enable_all_perout(struct ice_pf * pf)1773 static void ice_ptp_enable_all_perout(struct ice_pf *pf)
1774 {
1775 for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1776 if (pf->ptp.perout_rqs[i].period.sec ||
1777 pf->ptp.perout_rqs[i].period.nsec)
1778 ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1779 true);
1780 }
1781
1782 /**
1783 * ice_verify_pin - verify if pin supports requested pin function
1784 * @info: the driver's PTP info structure
1785 * @pin: Pin index
1786 * @func: Assigned function
1787 * @chan: Assigned channel
1788 *
1789 * Return: 0 on success, -EOPNOTSUPP when function is not supported.
1790 */
ice_verify_pin(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)1791 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
1792 enum ptp_pin_function func, unsigned int chan)
1793 {
1794 struct ice_pf *pf = ptp_info_to_pf(info);
1795 const struct ice_ptp_pin_desc *pin_desc;
1796
1797 pin_desc = &pf->ptp.ice_pin_desc[pin];
1798
1799 /* Is assigned function allowed? */
1800 switch (func) {
1801 case PTP_PF_EXTTS:
1802 if (pin_desc->gpio[0] < 0)
1803 return -EOPNOTSUPP;
1804 break;
1805 case PTP_PF_PEROUT:
1806 if (pin_desc->gpio[1] < 0)
1807 return -EOPNOTSUPP;
1808 break;
1809 case PTP_PF_NONE:
1810 break;
1811 case PTP_PF_PHYSYNC:
1812 default:
1813 return -EOPNOTSUPP;
1814 }
1815
1816 return 0;
1817 }
1818
1819 /**
1820 * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC
1821 * @info: The driver's PTP info structure
1822 * @rq: The requested feature to change
1823 * @on: Enable/disable flag
1824 *
1825 * Return: 0 on success, negative error code otherwise
1826 */
ice_ptp_gpio_enable(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1827 static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
1828 struct ptp_clock_request *rq, int on)
1829 {
1830 struct ice_pf *pf = ptp_info_to_pf(info);
1831 int err;
1832
1833 switch (rq->type) {
1834 case PTP_CLK_REQ_PEROUT:
1835 {
1836 struct ptp_perout_request *cached =
1837 &pf->ptp.perout_rqs[rq->perout.index];
1838
1839 err = ice_ptp_cfg_perout(pf, &rq->perout, on);
1840 if (!err) {
1841 *cached = rq->perout;
1842 } else {
1843 cached->period.sec = 0;
1844 cached->period.nsec = 0;
1845 }
1846 return err;
1847 }
1848 case PTP_CLK_REQ_EXTTS:
1849 {
1850 struct ptp_extts_request *cached =
1851 &pf->ptp.extts_rqs[rq->extts.index];
1852
1853 err = ice_ptp_cfg_extts(pf, &rq->extts, on);
1854 if (!err)
1855 *cached = rq->extts;
1856 else
1857 cached->flags &= ~PTP_ENABLE_FEATURE;
1858 return err;
1859 }
1860 default:
1861 return -EOPNOTSUPP;
1862 }
1863 }
1864
1865 /**
1866 * ice_ptp_gettimex64 - Get the time of the clock
1867 * @info: the driver's PTP info structure
1868 * @ts: timespec64 structure to hold the current time value
1869 * @sts: Optional parameter for holding a pair of system timestamps from
1870 * the system clock. Will be ignored if NULL is given.
1871 *
1872 * Read the device clock and return the correct value on ns, after converting it
1873 * into a timespec struct.
1874 */
1875 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)1876 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1877 struct ptp_system_timestamp *sts)
1878 {
1879 struct ice_pf *pf = ptp_info_to_pf(info);
1880 u64 time_ns;
1881
1882 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
1883 *ts = ns_to_timespec64(time_ns);
1884 return 0;
1885 }
1886
1887 /**
1888 * ice_ptp_settime64 - Set the time of the clock
1889 * @info: the driver's PTP info structure
1890 * @ts: timespec64 structure that holds the new time value
1891 *
1892 * Set the device clock to the user input value. The conversion from timespec
1893 * to ns happens in the write function.
1894 */
1895 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)1896 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1897 {
1898 struct ice_pf *pf = ptp_info_to_pf(info);
1899 struct timespec64 ts64 = *ts;
1900 struct ice_hw *hw = &pf->hw;
1901 int err;
1902
1903 /* For Vernier mode on E82X, we need to recalibrate after new settime.
1904 * Start with marking timestamps as invalid.
1905 */
1906 if (hw->mac_type == ICE_MAC_GENERIC) {
1907 err = ice_ptp_clear_phy_offset_ready_e82x(hw);
1908 if (err)
1909 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
1910 }
1911
1912 if (!ice_ptp_lock(hw)) {
1913 err = -EBUSY;
1914 goto exit;
1915 }
1916
1917 /* Disable periodic outputs */
1918 ice_ptp_disable_all_perout(pf);
1919
1920 err = ice_ptp_write_init(pf, &ts64);
1921 ice_ptp_unlock(hw);
1922
1923 if (!err)
1924 ice_ptp_reset_cached_phctime(pf);
1925
1926 /* Reenable periodic outputs */
1927 ice_ptp_enable_all_perout(pf);
1928
1929 /* Recalibrate and re-enable timestamp blocks for E822/E823 */
1930 if (hw->mac_type == ICE_MAC_GENERIC)
1931 ice_ptp_restart_all_phy(pf);
1932 exit:
1933 if (err) {
1934 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
1935 return err;
1936 }
1937
1938 return 0;
1939 }
1940
1941 /**
1942 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
1943 * @info: the driver's PTP info structure
1944 * @delta: Offset in nanoseconds to adjust the time by
1945 */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)1946 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1947 {
1948 struct timespec64 now, then;
1949 int ret;
1950
1951 then = ns_to_timespec64(delta);
1952 ret = ice_ptp_gettimex64(info, &now, NULL);
1953 if (ret)
1954 return ret;
1955 now = timespec64_add(now, then);
1956
1957 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1958 }
1959
1960 /**
1961 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
1962 * @info: the driver's PTP info structure
1963 * @delta: Offset in nanoseconds to adjust the time by
1964 */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)1965 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
1966 {
1967 struct ice_pf *pf = ptp_info_to_pf(info);
1968 struct ice_hw *hw = &pf->hw;
1969 struct device *dev;
1970 int err;
1971
1972 dev = ice_pf_to_dev(pf);
1973
1974 /* Hardware only supports atomic adjustments using signed 32-bit
1975 * integers. For any adjustment outside this range, perform
1976 * a non-atomic get->adjust->set flow.
1977 */
1978 if (delta > S32_MAX || delta < S32_MIN) {
1979 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
1980 return ice_ptp_adjtime_nonatomic(info, delta);
1981 }
1982
1983 if (!ice_ptp_lock(hw)) {
1984 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
1985 return -EBUSY;
1986 }
1987
1988 /* Disable periodic outputs */
1989 ice_ptp_disable_all_perout(pf);
1990
1991 err = ice_ptp_write_adj(pf, delta);
1992
1993 /* Reenable periodic outputs */
1994 ice_ptp_enable_all_perout(pf);
1995
1996 ice_ptp_unlock(hw);
1997
1998 if (err) {
1999 dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2000 return err;
2001 }
2002
2003 ice_ptp_reset_cached_phctime(pf);
2004
2005 return 0;
2006 }
2007
2008 /**
2009 * struct ice_crosststamp_cfg - Device cross timestamp configuration
2010 * @lock_reg: The hardware semaphore lock to use
2011 * @lock_busy: Bit in the semaphore lock indicating the lock is busy
2012 * @ctl_reg: The hardware register to request cross timestamp
2013 * @ctl_active: Bit in the control register to request cross timestamp
2014 * @art_time_l: Lower 32-bits of ART system time
2015 * @art_time_h: Upper 32-bits of ART system time
2016 * @dev_time_l: Lower 32-bits of device time (per timer index)
2017 * @dev_time_h: Upper 32-bits of device time (per timer index)
2018 */
2019 struct ice_crosststamp_cfg {
2020 /* HW semaphore lock register */
2021 u32 lock_reg;
2022 u32 lock_busy;
2023
2024 /* Capture control register */
2025 u32 ctl_reg;
2026 u32 ctl_active;
2027
2028 /* Time storage */
2029 u32 art_time_l;
2030 u32 art_time_h;
2031 u32 dev_time_l[2];
2032 u32 dev_time_h[2];
2033 };
2034
2035 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
2036 .lock_reg = PFHH_SEM,
2037 .lock_busy = PFHH_SEM_BUSY_M,
2038 .ctl_reg = GLHH_ART_CTL,
2039 .ctl_active = GLHH_ART_CTL_ACTIVE_M,
2040 .art_time_l = GLHH_ART_TIME_L,
2041 .art_time_h = GLHH_ART_TIME_H,
2042 .dev_time_l[0] = GLTSYN_HHTIME_L(0),
2043 .dev_time_h[0] = GLTSYN_HHTIME_H(0),
2044 .dev_time_l[1] = GLTSYN_HHTIME_L(1),
2045 .dev_time_h[1] = GLTSYN_HHTIME_H(1),
2046 };
2047
2048 #ifdef CONFIG_ICE_HWTS
2049 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
2050 .lock_reg = E830_PFPTM_SEM,
2051 .lock_busy = E830_PFPTM_SEM_BUSY_M,
2052 .ctl_reg = E830_GLPTM_ART_CTL,
2053 .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
2054 .art_time_l = E830_GLPTM_ART_TIME_L,
2055 .art_time_h = E830_GLPTM_ART_TIME_H,
2056 .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
2057 .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
2058 .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
2059 .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
2060 };
2061
2062 #endif /* CONFIG_ICE_HWTS */
2063 /**
2064 * struct ice_crosststamp_ctx - Device cross timestamp context
2065 * @snapshot: snapshot of system clocks for historic interpolation
2066 * @pf: pointer to the PF private structure
2067 * @cfg: pointer to hardware configuration for cross timestamp
2068 */
2069 struct ice_crosststamp_ctx {
2070 struct system_time_snapshot snapshot;
2071 struct ice_pf *pf;
2072 const struct ice_crosststamp_cfg *cfg;
2073 };
2074
2075 /**
2076 * ice_capture_crosststamp - Capture a device/system cross timestamp
2077 * @device: Current device time
2078 * @system: System counter value read synchronously with device time
2079 * @__ctx: Context passed from ice_ptp_getcrosststamp
2080 *
2081 * Read device and system (ART) clock simultaneously and return the corrected
2082 * clock values in ns.
2083 *
2084 * Return: zero on success, or a negative error code on failure.
2085 */
ice_capture_crosststamp(ktime_t * device,struct system_counterval_t * system,void * __ctx)2086 static int ice_capture_crosststamp(ktime_t *device,
2087 struct system_counterval_t *system,
2088 void *__ctx)
2089 {
2090 struct ice_crosststamp_ctx *ctx = __ctx;
2091 const struct ice_crosststamp_cfg *cfg;
2092 u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
2093 struct ice_pf *pf;
2094 struct ice_hw *hw;
2095 int err;
2096 u64 ts;
2097
2098 cfg = ctx->cfg;
2099 pf = ctx->pf;
2100 hw = &pf->hw;
2101
2102 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2103 if (tmr_idx > 1)
2104 return -EINVAL;
2105
2106 /* Poll until we obtain the cross-timestamp hardware semaphore */
2107 err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
2108 !(lock & cfg->lock_busy),
2109 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
2110 if (err) {
2111 dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
2112 return -EBUSY;
2113 }
2114
2115 /* Snapshot system time for historic interpolation */
2116 ktime_get_snapshot(&ctx->snapshot);
2117
2118 /* Program cmd to master timer */
2119 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2120
2121 /* Start the ART and device clock sync sequence */
2122 ctl = rd32(hw, cfg->ctl_reg);
2123 ctl |= cfg->ctl_active;
2124 wr32(hw, cfg->ctl_reg, ctl);
2125
2126 /* Poll until hardware completes the capture */
2127 err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
2128 5, 20 * USEC_PER_MSEC);
2129 if (err)
2130 goto err_timeout;
2131
2132 /* Read ART system time */
2133 ts_lo = rd32(hw, cfg->art_time_l);
2134 ts_hi = rd32(hw, cfg->art_time_h);
2135 ts = ((u64)ts_hi << 32) | ts_lo;
2136 system->cycles = ts;
2137 system->cs_id = CSID_X86_ART;
2138 system->use_nsecs = true;
2139
2140 /* Read Device source clock time */
2141 ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
2142 ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
2143 ts = ((u64)ts_hi << 32) | ts_lo;
2144 *device = ns_to_ktime(ts);
2145
2146 err_timeout:
2147 /* Clear the master timer */
2148 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2149
2150 /* Release HW lock */
2151 lock = rd32(hw, cfg->lock_reg);
2152 lock &= ~cfg->lock_busy;
2153 wr32(hw, cfg->lock_reg, lock);
2154
2155 return err;
2156 }
2157
2158 /**
2159 * ice_ptp_getcrosststamp - Capture a device cross timestamp
2160 * @info: the driver's PTP info structure
2161 * @cts: The memory to fill the cross timestamp info
2162 *
2163 * Capture a cross timestamp between the ART and the device PTP hardware
2164 * clock. Fill the cross timestamp information and report it back to the
2165 * caller.
2166 *
2167 * In order to correctly correlate the ART timestamp back to the TSC time, the
2168 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2169 *
2170 * Return: zero on success, or a negative error code on failure.
2171 */
ice_ptp_getcrosststamp(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2172 static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
2173 struct system_device_crosststamp *cts)
2174 {
2175 struct ice_pf *pf = ptp_info_to_pf(info);
2176 struct ice_crosststamp_ctx ctx = {
2177 .pf = pf,
2178 };
2179
2180 switch (pf->hw.mac_type) {
2181 case ICE_MAC_GENERIC:
2182 case ICE_MAC_GENERIC_3K_E825:
2183 ctx.cfg = &ice_crosststamp_cfg_e82x;
2184 break;
2185 #ifdef CONFIG_ICE_HWTS
2186 case ICE_MAC_E830:
2187 ctx.cfg = &ice_crosststamp_cfg_e830;
2188 break;
2189 #endif /* CONFIG_ICE_HWTS */
2190 default:
2191 return -EOPNOTSUPP;
2192 }
2193
2194 return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
2195 &ctx.snapshot, cts);
2196 }
2197
2198 /**
2199 * ice_ptp_hwtstamp_get - interface to read the timestamping config
2200 * @netdev: Pointer to network interface device structure
2201 * @config: Timestamping configuration structure
2202 *
2203 * Copy the timestamping config to user buffer
2204 */
ice_ptp_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)2205 int ice_ptp_hwtstamp_get(struct net_device *netdev,
2206 struct kernel_hwtstamp_config *config)
2207 {
2208 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2209
2210 if (pf->ptp.state != ICE_PTP_READY)
2211 return -EIO;
2212
2213 *config = pf->ptp.tstamp_config;
2214
2215 return 0;
2216 }
2217
2218 /**
2219 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2220 * @pf: Board private structure
2221 * @config: hwtstamp settings requested or saved
2222 */
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct kernel_hwtstamp_config * config)2223 static int ice_ptp_set_timestamp_mode(struct ice_pf *pf,
2224 struct kernel_hwtstamp_config *config)
2225 {
2226 switch (config->tx_type) {
2227 case HWTSTAMP_TX_OFF:
2228 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2229 break;
2230 case HWTSTAMP_TX_ON:
2231 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2232 break;
2233 default:
2234 return -ERANGE;
2235 }
2236
2237 switch (config->rx_filter) {
2238 case HWTSTAMP_FILTER_NONE:
2239 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2240 break;
2241 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2242 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2243 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2244 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2245 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2246 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2247 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2248 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2249 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2250 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2251 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2252 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2253 case HWTSTAMP_FILTER_NTP_ALL:
2254 case HWTSTAMP_FILTER_ALL:
2255 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2256 break;
2257 default:
2258 return -ERANGE;
2259 }
2260
2261 /* Immediately update the device timestamping mode */
2262 ice_ptp_restore_timestamp_mode(pf);
2263
2264 return 0;
2265 }
2266
2267 /**
2268 * ice_ptp_hwtstamp_set - interface to control the timestamping
2269 * @netdev: Pointer to network interface device structure
2270 * @config: Timestamping configuration structure
2271 * @extack: Netlink extended ack structure for error reporting
2272 *
2273 * Get the user config and store it
2274 */
ice_ptp_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)2275 int ice_ptp_hwtstamp_set(struct net_device *netdev,
2276 struct kernel_hwtstamp_config *config,
2277 struct netlink_ext_ack *extack)
2278 {
2279 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2280 int err;
2281
2282 if (pf->ptp.state != ICE_PTP_READY)
2283 return -EAGAIN;
2284
2285 err = ice_ptp_set_timestamp_mode(pf, config);
2286 if (err)
2287 return err;
2288
2289 /* Return the actual configuration set */
2290 *config = pf->ptp.tstamp_config;
2291
2292 return 0;
2293 }
2294
2295 /**
2296 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2297 * @rx_desc: Receive descriptor
2298 * @pkt_ctx: Packet context to get the cached time
2299 *
2300 * The driver receives a notification in the receive descriptor with timestamp.
2301 */
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc * rx_desc,const struct ice_pkt_ctx * pkt_ctx)2302 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2303 const struct ice_pkt_ctx *pkt_ctx)
2304 {
2305 u64 ts_ns, cached_time;
2306 u32 ts_high;
2307
2308 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2309 return 0;
2310
2311 cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2312
2313 /* Do not report a timestamp if we don't have a cached PHC time */
2314 if (!cached_time)
2315 return 0;
2316
2317 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2318 * PHC value, rather than accessing the PF. This also allows us to
2319 * simply pass the upper 32bits of nanoseconds directly. Calling
2320 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2321 * bits itself.
2322 */
2323 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2324 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2325
2326 return ts_ns;
2327 }
2328
2329 /**
2330 * ice_ptp_setup_pin_cfg - setup PTP pin_config structure
2331 * @pf: Board private structure
2332 */
ice_ptp_setup_pin_cfg(struct ice_pf * pf)2333 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
2334 {
2335 for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
2336 const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
2337 struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
2338 const char *name;
2339
2340 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2341 name = ice_pin_names[desc->name_idx];
2342 else
2343 name = ice_pin_names_dpll[desc->name_idx];
2344
2345 strscpy(pin->name, name, sizeof(pin->name));
2346
2347 pin->index = i;
2348 }
2349
2350 pf->ptp.info.pin_config = pf->ptp.pin_desc;
2351 }
2352
2353 /**
2354 * ice_ptp_disable_pins - Disable PTP pins
2355 * @pf: pointer to the PF structure
2356 *
2357 * Disable the OS access to the pins. Called to clear out the OS
2358 * indications of pin support when we fail to setup pin array.
2359 */
ice_ptp_disable_pins(struct ice_pf * pf)2360 static void ice_ptp_disable_pins(struct ice_pf *pf)
2361 {
2362 struct ptp_clock_info *info = &pf->ptp.info;
2363
2364 dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
2365
2366 info->enable = NULL;
2367 info->verify = NULL;
2368 info->n_pins = 0;
2369 info->n_ext_ts = 0;
2370 info->n_per_out = 0;
2371 }
2372
2373 /**
2374 * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM
2375 * @pf: pointer to the PF structure
2376 * @entries: SDP connection section from NVM
2377 * @num_entries: number of valid entries in sdp_entries
2378 * @pins: PTP pins array to update
2379 *
2380 * Return: 0 on success, negative error code otherwise.
2381 */
ice_ptp_parse_sdp_entries(struct ice_pf * pf,__le16 * entries,unsigned int num_entries,struct ice_ptp_pin_desc * pins)2382 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
2383 unsigned int num_entries,
2384 struct ice_ptp_pin_desc *pins)
2385 {
2386 unsigned int n_pins = 0;
2387 unsigned int i;
2388
2389 /* Setup ice_pin_desc array */
2390 for (i = 0; i < ICE_N_PINS_MAX; i++) {
2391 pins[i].name_idx = -1;
2392 pins[i].gpio[0] = -1;
2393 pins[i].gpio[1] = -1;
2394 }
2395
2396 for (i = 0; i < num_entries; i++) {
2397 u16 entry = le16_to_cpu(entries[i]);
2398 DECLARE_BITMAP(bitmap, GPIO_NA);
2399 unsigned int idx;
2400 bool dir;
2401 u16 gpio;
2402
2403 *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
2404
2405 /* Check if entry's pin bitmap is valid. */
2406 if (bitmap_empty(bitmap, GPIO_NA))
2407 continue;
2408
2409 dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
2410 gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
2411
2412 for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
2413 if (pins[idx].name_idx == gpio)
2414 break;
2415 }
2416
2417 if (idx == ICE_N_PINS_MAX) {
2418 /* Pin not found, setup its entry and name */
2419 idx = n_pins++;
2420 pins[idx].name_idx = gpio;
2421 }
2422 pins[idx].gpio[dir] = gpio;
2423 }
2424
2425 for (i = 0; i < n_pins; i++) {
2426 dev_dbg(ice_pf_to_dev(pf),
2427 "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
2428 i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
2429 }
2430
2431 pf->ptp.info.n_pins = n_pins;
2432 return 0;
2433 }
2434
2435 /**
2436 * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support
2437 * @pf: Board private structure
2438 *
2439 * Assign functions to the PTP capabilities structure for E82X devices.
2440 * Functions which operate across all device families should be set directly
2441 * in ice_ptp_set_caps. Only add functions here which are distinct for E82X
2442 * devices.
2443 */
ice_ptp_set_funcs_e82x(struct ice_pf * pf)2444 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
2445 {
2446 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2447
2448 if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
2449 pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
2450 pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
2451 } else {
2452 pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
2453 pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
2454 }
2455 ice_ptp_setup_pin_cfg(pf);
2456 }
2457
2458 /**
2459 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2460 * @pf: Board private structure
2461 *
2462 * Assign functions to the PTP capabiltiies structure for E810 devices.
2463 * Functions which operate across all device families should be set directly
2464 * in ice_ptp_set_caps. Only add functions here which are distinct for E810
2465 * devices.
2466 */
ice_ptp_set_funcs_e810(struct ice_pf * pf)2467 static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
2468 {
2469 __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
2470 struct ice_ptp_pin_desc *desc = NULL;
2471 struct ice_ptp *ptp = &pf->ptp;
2472 unsigned int num_entries;
2473 int err;
2474
2475 err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
2476 if (err) {
2477 /* SDP section does not exist in NVM or is corrupted */
2478 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2479 ptp->ice_pin_desc = ice_pin_desc_dpll;
2480 ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
2481 } else {
2482 pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2483 pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
2484 }
2485 err = 0;
2486 } else {
2487 desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
2488 sizeof(struct ice_ptp_pin_desc),
2489 GFP_KERNEL);
2490 if (!desc)
2491 goto err;
2492
2493 err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
2494 if (err)
2495 goto err;
2496
2497 ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
2498 }
2499
2500 ptp->info.pin_config = ptp->pin_desc;
2501 ice_ptp_setup_pin_cfg(pf);
2502
2503 err:
2504 if (err) {
2505 devm_kfree(ice_pf_to_dev(pf), desc);
2506 ice_ptp_disable_pins(pf);
2507 }
2508 }
2509
2510 /**
2511 * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
2512 * @pf: Board private structure
2513 *
2514 * Assign functions to the PTP capabiltiies structure for E830 devices.
2515 * Functions which operate across all device families should be set directly
2516 * in ice_ptp_set_caps. Only add functions here which are distinct for E830
2517 * devices.
2518 */
ice_ptp_set_funcs_e830(struct ice_pf * pf)2519 static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
2520 {
2521 #ifdef CONFIG_ICE_HWTS
2522 if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
2523 pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2524
2525 #endif /* CONFIG_ICE_HWTS */
2526 /* Rest of the config is the same as base E810 */
2527 pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2528 pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
2529 ice_ptp_setup_pin_cfg(pf);
2530 }
2531
2532 /**
2533 * ice_ptp_set_caps - Set PTP capabilities
2534 * @pf: Board private structure
2535 */
ice_ptp_set_caps(struct ice_pf * pf)2536 static void ice_ptp_set_caps(struct ice_pf *pf)
2537 {
2538 struct ptp_clock_info *info = &pf->ptp.info;
2539 struct device *dev = ice_pf_to_dev(pf);
2540
2541 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2542 dev_driver_string(dev), dev_name(dev));
2543 info->owner = THIS_MODULE;
2544 info->max_adj = 100000000;
2545 info->adjtime = ice_ptp_adjtime;
2546 info->adjfine = ice_ptp_adjfine;
2547 info->gettimex64 = ice_ptp_gettimex64;
2548 info->settime64 = ice_ptp_settime64;
2549 info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
2550 info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
2551 info->enable = ice_ptp_gpio_enable;
2552 info->verify = ice_verify_pin;
2553
2554 info->supported_extts_flags = PTP_RISING_EDGE |
2555 PTP_FALLING_EDGE |
2556 PTP_STRICT_FLAGS;
2557 info->supported_perout_flags = PTP_PEROUT_PHASE;
2558
2559 switch (pf->hw.mac_type) {
2560 case ICE_MAC_E810:
2561 ice_ptp_set_funcs_e810(pf);
2562 return;
2563 case ICE_MAC_E830:
2564 ice_ptp_set_funcs_e830(pf);
2565 return;
2566 case ICE_MAC_GENERIC:
2567 case ICE_MAC_GENERIC_3K_E825:
2568 ice_ptp_set_funcs_e82x(pf);
2569 return;
2570 default:
2571 return;
2572 }
2573 }
2574
2575 /**
2576 * ice_ptp_create_clock - Create PTP clock device for userspace
2577 * @pf: Board private structure
2578 *
2579 * This function creates a new PTP clock device. It only creates one if we
2580 * don't already have one. Will return error if it can't create one, but success
2581 * if we already have a device. Should be used by ice_ptp_init to create clock
2582 * initially, and prevent global resets from creating new clock devices.
2583 */
ice_ptp_create_clock(struct ice_pf * pf)2584 static long ice_ptp_create_clock(struct ice_pf *pf)
2585 {
2586 struct ptp_clock_info *info;
2587 struct device *dev;
2588
2589 /* No need to create a clock device if we already have one */
2590 if (pf->ptp.clock)
2591 return 0;
2592
2593 ice_ptp_set_caps(pf);
2594
2595 info = &pf->ptp.info;
2596 dev = ice_pf_to_dev(pf);
2597
2598 /* Attempt to register the clock before enabling the hardware. */
2599 pf->ptp.clock = ptp_clock_register(info, dev);
2600 if (IS_ERR(pf->ptp.clock)) {
2601 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2602 return PTR_ERR(pf->ptp.clock);
2603 }
2604
2605 return 0;
2606 }
2607
2608 /**
2609 * ice_ptp_request_ts - Request an available Tx timestamp index
2610 * @tx: the PTP Tx timestamp tracker to request from
2611 * @skb: the SKB to associate with this timestamp request
2612 */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2613 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2614 {
2615 unsigned long flags;
2616 u8 idx;
2617
2618 spin_lock_irqsave(&tx->lock, flags);
2619
2620 /* Check that this tracker is accepting new timestamp requests */
2621 if (!ice_ptp_is_tx_tracker_up(tx)) {
2622 spin_unlock_irqrestore(&tx->lock, flags);
2623 return -1;
2624 }
2625
2626 /* Find and set the first available index */
2627 idx = find_next_zero_bit(tx->in_use, tx->len,
2628 tx->last_ll_ts_idx_read + 1);
2629 if (idx == tx->len)
2630 idx = find_first_zero_bit(tx->in_use, tx->len);
2631
2632 if (idx < tx->len) {
2633 /* We got a valid index that no other thread could have set. Store
2634 * a reference to the skb and the start time to allow discarding old
2635 * requests.
2636 */
2637 set_bit(idx, tx->in_use);
2638 clear_bit(idx, tx->stale);
2639 tx->tstamps[idx].start = jiffies;
2640 tx->tstamps[idx].skb = skb_get(skb);
2641 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2642 ice_trace(tx_tstamp_request, skb, idx);
2643 }
2644
2645 spin_unlock_irqrestore(&tx->lock, flags);
2646
2647 /* return the appropriate PHY timestamp register index, -1 if no
2648 * indexes were available.
2649 */
2650 if (idx >= tx->len)
2651 return -1;
2652 else
2653 return idx + tx->offset;
2654 }
2655
ice_ptp_process_ts(struct ice_pf * pf)2656 void ice_ptp_process_ts(struct ice_pf *pf)
2657 {
2658 switch (pf->ptp.tx_interrupt_mode) {
2659 case ICE_PTP_TX_INTERRUPT_NONE:
2660 /* This device has the clock owner handle timestamps for it */
2661 return;
2662 case ICE_PTP_TX_INTERRUPT_SELF:
2663 /* This device handles its own timestamps */
2664 ice_ptp_process_tx_tstamp(&pf->ptp.port.tx);
2665 return;
2666 case ICE_PTP_TX_INTERRUPT_ALL:
2667 /* This device handles timestamps for all ports */
2668 ice_ptp_tx_tstamp_owner(pf);
2669 return;
2670 default:
2671 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2672 pf->ptp.tx_interrupt_mode);
2673 return;
2674 }
2675 }
2676
ice_port_has_timestamps(struct ice_ptp_tx * tx)2677 static bool ice_port_has_timestamps(struct ice_ptp_tx *tx)
2678 {
2679 bool more_timestamps;
2680
2681 scoped_guard(spinlock_irqsave, &tx->lock) {
2682 if (!tx->init)
2683 return false;
2684
2685 more_timestamps = !bitmap_empty(tx->in_use, tx->len);
2686 }
2687
2688 return more_timestamps;
2689 }
2690
ice_any_port_has_timestamps(struct ice_pf * pf)2691 static bool ice_any_port_has_timestamps(struct ice_pf *pf)
2692 {
2693 struct ice_ptp_port *port;
2694
2695 scoped_guard(mutex, &pf->adapter->ports.lock) {
2696 list_for_each_entry(port, &pf->adapter->ports.ports,
2697 list_node) {
2698 struct ice_ptp_tx *tx = &port->tx;
2699
2700 if (ice_port_has_timestamps(tx))
2701 return true;
2702 }
2703 }
2704
2705 return false;
2706 }
2707
ice_ptp_tx_tstamps_pending(struct ice_pf * pf)2708 bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf)
2709 {
2710 struct ice_hw *hw = &pf->hw;
2711 unsigned int i;
2712
2713 /* Check software indicator */
2714 switch (pf->ptp.tx_interrupt_mode) {
2715 case ICE_PTP_TX_INTERRUPT_NONE:
2716 return false;
2717 case ICE_PTP_TX_INTERRUPT_SELF:
2718 if (ice_port_has_timestamps(&pf->ptp.port.tx))
2719 return true;
2720 break;
2721 case ICE_PTP_TX_INTERRUPT_ALL:
2722 if (ice_any_port_has_timestamps(pf))
2723 return true;
2724 break;
2725 default:
2726 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2727 pf->ptp.tx_interrupt_mode);
2728 break;
2729 }
2730
2731 /* Check hardware indicator */
2732 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2733 u64 tstamp_ready = 0;
2734 int err;
2735
2736 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2737 if (err || tstamp_ready)
2738 return true;
2739 }
2740
2741 return false;
2742 }
2743
2744 /**
2745 * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
2746 * @pf: Board private structure
2747 *
2748 * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
2749 * half of the interrupt and IRQ_HANDLED otherwise.
2750 */
ice_ptp_ts_irq(struct ice_pf * pf)2751 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
2752 {
2753 struct ice_hw *hw = &pf->hw;
2754
2755 switch (hw->mac_type) {
2756 case ICE_MAC_E810:
2757 /* E810 capable of low latency timestamping with interrupt can
2758 * request a single timestamp in the top half and wait for
2759 * a second LL TS interrupt from the FW when it's ready.
2760 */
2761 if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
2762 struct ice_ptp_tx *tx = &pf->ptp.port.tx;
2763 u8 idx, last;
2764
2765 if (!ice_pf_state_is_nominal(pf))
2766 return IRQ_HANDLED;
2767
2768 spin_lock(&tx->lock);
2769 if (tx->init) {
2770 last = tx->last_ll_ts_idx_read + 1;
2771 idx = find_next_bit_wrap(tx->in_use, tx->len,
2772 last);
2773 if (idx != tx->len)
2774 ice_ptp_req_tx_single_tstamp(tx, idx);
2775 }
2776 spin_unlock(&tx->lock);
2777
2778 return IRQ_HANDLED;
2779 }
2780 fallthrough; /* non-LL_TS E810 */
2781 case ICE_MAC_GENERIC:
2782 case ICE_MAC_GENERIC_3K_E825:
2783 /* All other devices process timestamps in the bottom half due
2784 * to sleeping or polling.
2785 */
2786 if (!ice_ptp_pf_handles_tx_interrupt(pf))
2787 return IRQ_HANDLED;
2788
2789 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
2790 return IRQ_WAKE_THREAD;
2791 case ICE_MAC_E830:
2792 /* E830 can read timestamps in the top half using rd32() */
2793 ice_ptp_process_ts(pf);
2794
2795 if (ice_ptp_tx_tstamps_pending(pf)) {
2796 /* Process outstanding Tx timestamps. If there
2797 * is more work, re-arm the interrupt to trigger again.
2798 */
2799 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2800 ice_flush(hw);
2801 }
2802 return IRQ_HANDLED;
2803 default:
2804 return IRQ_HANDLED;
2805 }
2806 }
2807
2808 /**
2809 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2810 * @pf: Board private structure
2811 *
2812 * The device PHY issues Tx timestamp interrupts to the driver for processing
2813 * timestamp data from the PHY. It will not interrupt again until all
2814 * current timestamp data is read. In rare circumstances, it is possible that
2815 * the driver fails to read all outstanding data.
2816 *
2817 * To avoid getting permanently stuck, periodically check if the PHY has
2818 * outstanding timestamp data. If so, trigger an interrupt from software to
2819 * process this data.
2820 */
ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf * pf)2821 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2822 {
2823 struct device *dev = ice_pf_to_dev(pf);
2824 struct ice_hw *hw = &pf->hw;
2825 bool trigger_oicr = false;
2826 unsigned int i;
2827
2828 if (!pf->ptp.port.tx.has_ready_bitmap)
2829 return;
2830
2831 if (!ice_pf_src_tmr_owned(pf))
2832 return;
2833
2834 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2835 u64 tstamp_ready;
2836 int err;
2837
2838 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2839 if (!err && tstamp_ready) {
2840 trigger_oicr = true;
2841 break;
2842 }
2843 }
2844
2845 if (trigger_oicr) {
2846 /* Trigger a software interrupt, to ensure this data
2847 * gets processed.
2848 */
2849 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2850
2851 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2852 ice_flush(hw);
2853 }
2854 }
2855
ice_ptp_periodic_work(struct kthread_work * work)2856 static void ice_ptp_periodic_work(struct kthread_work *work)
2857 {
2858 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2859 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2860 int err;
2861
2862 if (pf->ptp.state != ICE_PTP_READY)
2863 return;
2864
2865 err = ice_ptp_update_cached_phctime(pf);
2866
2867 ice_ptp_maybe_trigger_tx_interrupt(pf);
2868
2869 /* Run twice a second or reschedule if phc update failed */
2870 kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2871 msecs_to_jiffies(err ? 10 : 500));
2872 }
2873
2874 /**
2875 * ice_ptp_queue_work - Queue PTP periodic work for a PF
2876 * @pf: Board private structure
2877 *
2878 * Helper function to queue PTP periodic work after VSI rebuild completes.
2879 * This ensures that PTP work only runs when VSI structures are ready.
2880 */
ice_ptp_queue_work(struct ice_pf * pf)2881 void ice_ptp_queue_work(struct ice_pf *pf)
2882 {
2883 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) &&
2884 pf->ptp.state == ICE_PTP_READY)
2885 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
2886 }
2887
2888 /**
2889 * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
2890 * @pf: Board private structure
2891 * @rebuild: rebuild if true, prepare if false
2892 * @reset_type: the reset type being performed
2893 */
ice_ptp_prepare_rebuild_sec(struct ice_pf * pf,bool rebuild,enum ice_reset_req reset_type)2894 static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
2895 enum ice_reset_req reset_type)
2896 {
2897 struct list_head *entry;
2898
2899 list_for_each(entry, &pf->adapter->ports.ports) {
2900 struct ice_ptp_port *port = list_entry(entry,
2901 struct ice_ptp_port,
2902 list_node);
2903 struct ice_pf *peer_pf = ptp_port_to_pf(port);
2904
2905 if (!ice_is_primary(&peer_pf->hw)) {
2906 if (rebuild) {
2907 /* TODO: When implementing rebuild=true:
2908 * 1. Ensure secondary PFs' VSIs are rebuilt
2909 * 2. Call ice_ptp_queue_work(peer_pf) after VSI rebuild
2910 */
2911 ice_ptp_rebuild(peer_pf, reset_type);
2912 } else {
2913 ice_ptp_prepare_for_reset(peer_pf, reset_type);
2914 }
2915 }
2916 }
2917 }
2918
2919 /**
2920 * ice_ptp_prepare_for_reset - Prepare PTP for reset
2921 * @pf: Board private structure
2922 * @reset_type: the reset type being performed
2923 */
ice_ptp_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)2924 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2925 {
2926 struct ice_ptp *ptp = &pf->ptp;
2927 struct ice_hw *hw = &pf->hw;
2928 u8 src_tmr;
2929
2930 if (ptp->state != ICE_PTP_READY)
2931 return;
2932
2933 ptp->state = ICE_PTP_RESETTING;
2934
2935 /* Disable timestamping for both Tx and Rx */
2936 ice_ptp_disable_timestamp_mode(pf);
2937
2938 kthread_cancel_delayed_work_sync(&ptp->work);
2939
2940 if (reset_type == ICE_RESET_PFR)
2941 return;
2942
2943 if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
2944 ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
2945
2946 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2947
2948 /* Disable periodic outputs */
2949 ice_ptp_disable_all_perout(pf);
2950
2951 src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2952
2953 /* Disable source clock */
2954 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2955
2956 /* Acquire PHC and system timer to restore after reset */
2957 ptp->reset_time = ktime_get_real_ns();
2958 }
2959
2960 /**
2961 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2962 * @pf: Board private structure
2963 *
2964 * Companion function for ice_ptp_rebuild() which handles tasks that only the
2965 * PTP clock owner instance should perform.
2966 */
ice_ptp_rebuild_owner(struct ice_pf * pf)2967 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2968 {
2969 struct ice_ptp *ptp = &pf->ptp;
2970 struct ice_hw *hw = &pf->hw;
2971 struct timespec64 ts;
2972 u64 time_diff;
2973 int err;
2974
2975 err = ice_ptp_init_phc(hw);
2976 if (err)
2977 return err;
2978
2979 err = ice_tspll_init(hw);
2980 if (err)
2981 return err;
2982
2983 /* Acquire the global hardware lock */
2984 if (!ice_ptp_lock(hw)) {
2985 err = -EBUSY;
2986 return err;
2987 }
2988
2989 /* Write the increment time value to PHY and LAN */
2990 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2991 if (err)
2992 goto err_unlock;
2993
2994 /* Write the initial Time value to PHY and LAN using the cached PHC
2995 * time before the reset and time difference between stopping and
2996 * starting the clock.
2997 */
2998 if (ptp->cached_phc_time) {
2999 time_diff = ktime_get_real_ns() - ptp->reset_time;
3000 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
3001 } else {
3002 ts = ktime_to_timespec64(ktime_get_real());
3003 }
3004 err = ice_ptp_write_init(pf, &ts);
3005 if (err)
3006 goto err_unlock;
3007
3008 /* Release the global hardware lock */
3009 ice_ptp_unlock(hw);
3010
3011 /* Flush software tracking of any outstanding timestamps since we're
3012 * about to flush the PHY timestamp block.
3013 */
3014 ice_ptp_flush_all_tx_tracker(pf);
3015
3016 /* Enable quad interrupts */
3017 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3018 if (err)
3019 return err;
3020
3021 ice_ptp_restart_all_phy(pf);
3022
3023 /* Re-enable all periodic outputs and external timestamp events */
3024 ice_ptp_enable_all_perout(pf);
3025 ice_ptp_enable_all_extts(pf);
3026
3027 return 0;
3028
3029 err_unlock:
3030 ice_ptp_unlock(hw);
3031 return err;
3032 }
3033
3034 /**
3035 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
3036 * @pf: Board private structure
3037 * @reset_type: the reset type being performed
3038 */
ice_ptp_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)3039 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
3040 {
3041 struct ice_ptp *ptp = &pf->ptp;
3042 int err;
3043
3044 if (ptp->state == ICE_PTP_READY) {
3045 ice_ptp_prepare_for_reset(pf, reset_type);
3046 } else if (ptp->state != ICE_PTP_RESETTING) {
3047 err = -EINVAL;
3048 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
3049 goto err;
3050 }
3051
3052 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
3053 err = ice_ptp_rebuild_owner(pf);
3054 if (err)
3055 goto err;
3056 }
3057
3058 ptp->state = ICE_PTP_READY;
3059
3060 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
3061 return;
3062
3063 err:
3064 ptp->state = ICE_PTP_ERROR;
3065 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
3066 }
3067
ice_ptp_setup_adapter(struct ice_pf * pf)3068 static int ice_ptp_setup_adapter(struct ice_pf *pf)
3069 {
3070 if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
3071 return -EPERM;
3072
3073 pf->adapter->ctrl_pf = pf;
3074
3075 return 0;
3076 }
3077
ice_ptp_setup_pf(struct ice_pf * pf)3078 static int ice_ptp_setup_pf(struct ice_pf *pf)
3079 {
3080 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3081 struct ice_ptp *ptp = &pf->ptp;
3082
3083 if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
3084 return -ENODEV;
3085
3086 INIT_LIST_HEAD(&ptp->port.list_node);
3087 mutex_lock(&pf->adapter->ports.lock);
3088
3089 list_add(&ptp->port.list_node,
3090 &pf->adapter->ports.ports);
3091 mutex_unlock(&pf->adapter->ports.lock);
3092
3093 return 0;
3094 }
3095
ice_ptp_cleanup_pf(struct ice_pf * pf)3096 static void ice_ptp_cleanup_pf(struct ice_pf *pf)
3097 {
3098 struct ice_ptp *ptp = &pf->ptp;
3099
3100 if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
3101 mutex_lock(&pf->adapter->ports.lock);
3102 list_del(&ptp->port.list_node);
3103 mutex_unlock(&pf->adapter->ports.lock);
3104 }
3105 }
3106
3107 /**
3108 * ice_ptp_clock_index - Get the PTP clock index for this device
3109 * @pf: Board private structure
3110 *
3111 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3112 * is associated.
3113 */
ice_ptp_clock_index(struct ice_pf * pf)3114 int ice_ptp_clock_index(struct ice_pf *pf)
3115 {
3116 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3117 struct ptp_clock *clock;
3118
3119 if (!ctrl_ptp)
3120 return -1;
3121 clock = ctrl_ptp->clock;
3122
3123 return clock ? ptp_clock_index(clock) : -1;
3124 }
3125
3126 /**
3127 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3128 * @pf: Board private structure
3129 *
3130 * Setup and initialize a PTP clock device that represents the device hardware
3131 * clock. Save the clock index for other functions connected to the same
3132 * hardware resource.
3133 */
ice_ptp_init_owner(struct ice_pf * pf)3134 static int ice_ptp_init_owner(struct ice_pf *pf)
3135 {
3136 struct ice_hw *hw = &pf->hw;
3137 struct timespec64 ts;
3138 int err;
3139
3140 err = ice_ptp_init_phc(hw);
3141 if (err) {
3142 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3143 err);
3144 return err;
3145 }
3146
3147 err = ice_tspll_init(hw);
3148 if (err) {
3149 dev_err(ice_pf_to_dev(pf), "Failed to initialize CGU, status %d\n",
3150 err);
3151 return err;
3152 }
3153
3154 /* Acquire the global hardware lock */
3155 if (!ice_ptp_lock(hw)) {
3156 err = -EBUSY;
3157 goto err_exit;
3158 }
3159
3160 /* Write the increment time value to PHY and LAN */
3161 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3162 if (err)
3163 goto err_unlock;
3164
3165 ts = ktime_to_timespec64(ktime_get_real());
3166 /* Write the initial Time value to PHY and LAN */
3167 err = ice_ptp_write_init(pf, &ts);
3168 if (err)
3169 goto err_unlock;
3170
3171 /* Release the global hardware lock */
3172 ice_ptp_unlock(hw);
3173
3174 /* Configure PHY interrupt settings */
3175 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3176 if (err)
3177 goto err_exit;
3178
3179 /* Ensure we have a clock device */
3180 err = ice_ptp_create_clock(pf);
3181 if (err)
3182 goto err_clk;
3183
3184 return 0;
3185 err_clk:
3186 pf->ptp.clock = NULL;
3187 err_exit:
3188 return err;
3189
3190 err_unlock:
3191 ice_ptp_unlock(hw);
3192 return err;
3193 }
3194
3195 /**
3196 * ice_ptp_init_work - Initialize PTP work threads
3197 * @pf: Board private structure
3198 * @ptp: PF PTP structure
3199 */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)3200 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3201 {
3202 struct kthread_worker *kworker;
3203
3204 /* Initialize work functions */
3205 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3206
3207 /* Allocate a kworker for handling work required for the ports
3208 * connected to the PTP hardware clock.
3209 */
3210 kworker = kthread_run_worker(0, "ice-ptp-%s",
3211 dev_name(ice_pf_to_dev(pf)));
3212 if (IS_ERR(kworker))
3213 return PTR_ERR(kworker);
3214
3215 ptp->kworker = kworker;
3216
3217 /* Start periodic work going */
3218 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3219
3220 return 0;
3221 }
3222
3223 /**
3224 * ice_ptp_init_port - Initialize PTP port structure
3225 * @pf: Board private structure
3226 * @ptp_port: PTP port structure
3227 *
3228 * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
3229 */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)3230 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3231 {
3232 struct ice_hw *hw = &pf->hw;
3233
3234 mutex_init(&ptp_port->ps_lock);
3235
3236 switch (hw->mac_type) {
3237 case ICE_MAC_E810:
3238 case ICE_MAC_E830:
3239 case ICE_MAC_GENERIC_3K_E825:
3240 return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
3241 case ICE_MAC_GENERIC:
3242 kthread_init_delayed_work(&ptp_port->ov_work,
3243 ice_ptp_wait_for_offsets);
3244 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3245 ptp_port->port_num);
3246 default:
3247 return -ENODEV;
3248 }
3249 }
3250
3251 /**
3252 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3253 * @pf: Board private structure
3254 *
3255 * Initialize the Tx timestamp interrupt mode for this device. For most device
3256 * types, each PF processes the interrupt and manages its own timestamps. For
3257 * E822-based devices, only the clock owner processes the timestamps. Other
3258 * PFs disable the interrupt and do not process their own timestamps.
3259 */
ice_ptp_init_tx_interrupt_mode(struct ice_pf * pf)3260 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3261 {
3262 switch (pf->hw.mac_type) {
3263 case ICE_MAC_GENERIC:
3264 case ICE_MAC_GENERIC_3K_E825:
3265 /* E82x hardware has the clock owner process timestamps for
3266 * all ports.
3267 */
3268 if (ice_pf_src_tmr_owned(pf))
3269 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3270 else
3271 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3272 break;
3273 default:
3274 /* other PHY types handle their own Tx interrupt */
3275 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3276 }
3277 }
3278
3279 /**
3280 * ice_ptp_init - Initialize PTP hardware clock support
3281 * @pf: Board private structure
3282 *
3283 * Set up the device for interacting with the PTP hardware clock for all
3284 * functions, both the function that owns the clock hardware, and the
3285 * functions connected to the clock hardware.
3286 *
3287 * The clock owner will allocate and register a ptp_clock with the
3288 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3289 * items used for asynchronous work such as Tx timestamps and periodic work.
3290 */
ice_ptp_init(struct ice_pf * pf)3291 void ice_ptp_init(struct ice_pf *pf)
3292 {
3293 struct ice_ptp *ptp = &pf->ptp;
3294 struct ice_hw *hw = &pf->hw;
3295 int err;
3296
3297 ptp->state = ICE_PTP_INITIALIZING;
3298
3299 if (hw->lane_num < 0) {
3300 err = hw->lane_num;
3301 goto err_exit;
3302 }
3303 ptp->port.port_num = hw->lane_num;
3304
3305 ice_ptp_init_hw(hw);
3306
3307 ice_ptp_init_tx_interrupt_mode(pf);
3308
3309 /* If this function owns the clock hardware, it must allocate and
3310 * configure the PTP clock device to represent it.
3311 */
3312 if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
3313 err = ice_ptp_setup_adapter(pf);
3314 if (err)
3315 goto err_exit;
3316 err = ice_ptp_init_owner(pf);
3317 if (err)
3318 goto err_exit;
3319 }
3320
3321 err = ice_ptp_setup_pf(pf);
3322 if (err)
3323 goto err_exit;
3324
3325 err = ice_ptp_init_port(pf, &ptp->port);
3326 if (err)
3327 goto err_clean_pf;
3328
3329 /* Start the PHY timestamping block */
3330 ice_ptp_reset_phy_timestamping(pf);
3331
3332 /* Configure initial Tx interrupt settings */
3333 ice_ptp_cfg_tx_interrupt(pf);
3334
3335 ptp->state = ICE_PTP_READY;
3336
3337 err = ice_ptp_init_work(pf, ptp);
3338 if (err)
3339 goto err_exit;
3340
3341 dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3342 return;
3343
3344 err_clean_pf:
3345 mutex_destroy(&ptp->port.ps_lock);
3346 ice_ptp_cleanup_pf(pf);
3347 err_exit:
3348 /* If we registered a PTP clock, release it */
3349 if (pf->ptp.clock) {
3350 ptp_clock_unregister(ptp->clock);
3351 pf->ptp.clock = NULL;
3352 }
3353 /* Keep ICE_PTP_UNINIT state to avoid ambiguity at driver unload
3354 * and to avoid duplicated resources release.
3355 */
3356 ptp->state = ICE_PTP_UNINIT;
3357 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3358 }
3359
3360 /**
3361 * ice_ptp_release - Disable the driver/HW support and unregister the clock
3362 * @pf: Board private structure
3363 *
3364 * This function handles the cleanup work required from the initialization by
3365 * clearing out the important information and unregistering the clock
3366 */
ice_ptp_release(struct ice_pf * pf)3367 void ice_ptp_release(struct ice_pf *pf)
3368 {
3369 if (pf->ptp.state == ICE_PTP_UNINIT)
3370 return;
3371
3372 if (pf->ptp.state != ICE_PTP_READY) {
3373 mutex_destroy(&pf->ptp.port.ps_lock);
3374 ice_ptp_cleanup_pf(pf);
3375 if (pf->ptp.clock) {
3376 ptp_clock_unregister(pf->ptp.clock);
3377 pf->ptp.clock = NULL;
3378 }
3379 return;
3380 }
3381
3382 pf->ptp.state = ICE_PTP_UNINIT;
3383
3384 /* Disable timestamping for both Tx and Rx */
3385 ice_ptp_disable_timestamp_mode(pf);
3386
3387 ice_ptp_cleanup_pf(pf);
3388
3389 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3390
3391 ice_ptp_disable_all_extts(pf);
3392
3393 kthread_cancel_delayed_work_sync(&pf->ptp.work);
3394
3395 ice_ptp_port_phy_stop(&pf->ptp.port);
3396 mutex_destroy(&pf->ptp.port.ps_lock);
3397 if (pf->ptp.kworker) {
3398 kthread_destroy_worker(pf->ptp.kworker);
3399 pf->ptp.kworker = NULL;
3400 }
3401
3402 if (!pf->ptp.clock)
3403 return;
3404
3405 /* Disable periodic outputs */
3406 ice_ptp_disable_all_perout(pf);
3407
3408 ptp_clock_unregister(pf->ptp.clock);
3409 pf->ptp.clock = NULL;
3410
3411 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3412 }
3413