1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7
8 #define E810_OUT_PROP_DELAY_NS 1
9
10 static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
11 /* name idx func chan */
12 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
13 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
14 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
15 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
16 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
17 };
18
19 /**
20 * ice_get_sma_config_e810t
21 * @hw: pointer to the hw struct
22 * @ptp_pins: pointer to the ptp_pin_desc struture
23 *
24 * Read the configuration of the SMA control logic and put it into the
25 * ptp_pin_desc structure
26 */
27 static int
ice_get_sma_config_e810t(struct ice_hw * hw,struct ptp_pin_desc * ptp_pins)28 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
29 {
30 u8 data, i;
31 int status;
32
33 /* Read initial pin state */
34 status = ice_read_sma_ctrl_e810t(hw, &data);
35 if (status)
36 return status;
37
38 /* initialize with defaults */
39 for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
40 strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
41 sizeof(ptp_pins[i].name));
42 ptp_pins[i].index = ice_pin_desc_e810t[i].index;
43 ptp_pins[i].func = ice_pin_desc_e810t[i].func;
44 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
45 }
46
47 /* Parse SMA1/UFL1 */
48 switch (data & ICE_SMA1_MASK_E810T) {
49 case ICE_SMA1_MASK_E810T:
50 default:
51 ptp_pins[SMA1].func = PTP_PF_NONE;
52 ptp_pins[UFL1].func = PTP_PF_NONE;
53 break;
54 case ICE_SMA1_DIR_EN_E810T:
55 ptp_pins[SMA1].func = PTP_PF_PEROUT;
56 ptp_pins[UFL1].func = PTP_PF_NONE;
57 break;
58 case ICE_SMA1_TX_EN_E810T:
59 ptp_pins[SMA1].func = PTP_PF_EXTTS;
60 ptp_pins[UFL1].func = PTP_PF_NONE;
61 break;
62 case 0:
63 ptp_pins[SMA1].func = PTP_PF_EXTTS;
64 ptp_pins[UFL1].func = PTP_PF_PEROUT;
65 break;
66 }
67
68 /* Parse SMA2/UFL2 */
69 switch (data & ICE_SMA2_MASK_E810T) {
70 case ICE_SMA2_MASK_E810T:
71 default:
72 ptp_pins[SMA2].func = PTP_PF_NONE;
73 ptp_pins[UFL2].func = PTP_PF_NONE;
74 break;
75 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
76 ptp_pins[SMA2].func = PTP_PF_EXTTS;
77 ptp_pins[UFL2].func = PTP_PF_NONE;
78 break;
79 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
80 ptp_pins[SMA2].func = PTP_PF_PEROUT;
81 ptp_pins[UFL2].func = PTP_PF_NONE;
82 break;
83 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
84 ptp_pins[SMA2].func = PTP_PF_NONE;
85 ptp_pins[UFL2].func = PTP_PF_EXTTS;
86 break;
87 case ICE_SMA2_DIR_EN_E810T:
88 ptp_pins[SMA2].func = PTP_PF_PEROUT;
89 ptp_pins[UFL2].func = PTP_PF_EXTTS;
90 break;
91 }
92
93 return 0;
94 }
95
96 /**
97 * ice_ptp_set_sma_config_e810t
98 * @hw: pointer to the hw struct
99 * @ptp_pins: pointer to the ptp_pin_desc struture
100 *
101 * Set the configuration of the SMA control logic based on the configuration in
102 * num_pins parameter
103 */
104 static int
ice_ptp_set_sma_config_e810t(struct ice_hw * hw,const struct ptp_pin_desc * ptp_pins)105 ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
106 const struct ptp_pin_desc *ptp_pins)
107 {
108 int status;
109 u8 data;
110
111 /* SMA1 and UFL1 cannot be set to TX at the same time */
112 if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
113 ptp_pins[UFL1].func == PTP_PF_PEROUT)
114 return -EINVAL;
115
116 /* SMA2 and UFL2 cannot be set to RX at the same time */
117 if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
118 ptp_pins[UFL2].func == PTP_PF_EXTTS)
119 return -EINVAL;
120
121 /* Read initial pin state value */
122 status = ice_read_sma_ctrl_e810t(hw, &data);
123 if (status)
124 return status;
125
126 /* Set the right sate based on the desired configuration */
127 data &= ~ICE_SMA1_MASK_E810T;
128 if (ptp_pins[SMA1].func == PTP_PF_NONE &&
129 ptp_pins[UFL1].func == PTP_PF_NONE) {
130 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
131 data |= ICE_SMA1_MASK_E810T;
132 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
133 ptp_pins[UFL1].func == PTP_PF_NONE) {
134 dev_info(ice_hw_to_dev(hw), "SMA1 RX");
135 data |= ICE_SMA1_TX_EN_E810T;
136 } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
137 ptp_pins[UFL1].func == PTP_PF_PEROUT) {
138 /* U.FL 1 TX will always enable SMA 1 RX */
139 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
140 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
141 ptp_pins[UFL1].func == PTP_PF_PEROUT) {
142 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
143 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
144 ptp_pins[UFL1].func == PTP_PF_NONE) {
145 dev_info(ice_hw_to_dev(hw), "SMA1 TX");
146 data |= ICE_SMA1_DIR_EN_E810T;
147 }
148
149 data &= ~ICE_SMA2_MASK_E810T;
150 if (ptp_pins[SMA2].func == PTP_PF_NONE &&
151 ptp_pins[UFL2].func == PTP_PF_NONE) {
152 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
153 data |= ICE_SMA2_MASK_E810T;
154 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
155 ptp_pins[UFL2].func == PTP_PF_NONE) {
156 dev_info(ice_hw_to_dev(hw), "SMA2 RX");
157 data |= (ICE_SMA2_TX_EN_E810T |
158 ICE_SMA2_UFL2_RX_DIS_E810T);
159 } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
160 ptp_pins[UFL2].func == PTP_PF_EXTTS) {
161 dev_info(ice_hw_to_dev(hw), "UFL2 RX");
162 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
163 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
164 ptp_pins[UFL2].func == PTP_PF_NONE) {
165 dev_info(ice_hw_to_dev(hw), "SMA2 TX");
166 data |= (ICE_SMA2_DIR_EN_E810T |
167 ICE_SMA2_UFL2_RX_DIS_E810T);
168 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
169 ptp_pins[UFL2].func == PTP_PF_EXTTS) {
170 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
171 data |= ICE_SMA2_DIR_EN_E810T;
172 }
173
174 return ice_write_sma_ctrl_e810t(hw, data);
175 }
176
177 /**
178 * ice_ptp_set_sma_e810t
179 * @info: the driver's PTP info structure
180 * @pin: pin index in kernel structure
181 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
182 *
183 * Set the configuration of a single SMA pin
184 */
185 static int
ice_ptp_set_sma_e810t(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func)186 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
187 enum ptp_pin_function func)
188 {
189 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
190 struct ice_pf *pf = ptp_info_to_pf(info);
191 struct ice_hw *hw = &pf->hw;
192 int err;
193
194 if (pin < SMA1 || func > PTP_PF_PEROUT)
195 return -EOPNOTSUPP;
196
197 err = ice_get_sma_config_e810t(hw, ptp_pins);
198 if (err)
199 return err;
200
201 /* Disable the same function on the other pin sharing the channel */
202 if (pin == SMA1 && ptp_pins[UFL1].func == func)
203 ptp_pins[UFL1].func = PTP_PF_NONE;
204 if (pin == UFL1 && ptp_pins[SMA1].func == func)
205 ptp_pins[SMA1].func = PTP_PF_NONE;
206
207 if (pin == SMA2 && ptp_pins[UFL2].func == func)
208 ptp_pins[UFL2].func = PTP_PF_NONE;
209 if (pin == UFL2 && ptp_pins[SMA2].func == func)
210 ptp_pins[SMA2].func = PTP_PF_NONE;
211
212 /* Set up new pin function in the temp table */
213 ptp_pins[pin].func = func;
214
215 return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
216 }
217
218 /**
219 * ice_verify_pin_e810t
220 * @info: the driver's PTP info structure
221 * @pin: Pin index
222 * @func: Assigned function
223 * @chan: Assigned channel
224 *
225 * Verify if pin supports requested pin function. If the Check pins consistency.
226 * Reconfigure the SMA logic attached to the given pin to enable its
227 * desired functionality
228 */
229 static int
ice_verify_pin_e810t(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)230 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
231 enum ptp_pin_function func, unsigned int chan)
232 {
233 /* Don't allow channel reassignment */
234 if (chan != ice_pin_desc_e810t[pin].chan)
235 return -EOPNOTSUPP;
236
237 /* Check if functions are properly assigned */
238 switch (func) {
239 case PTP_PF_NONE:
240 break;
241 case PTP_PF_EXTTS:
242 if (pin == UFL1)
243 return -EOPNOTSUPP;
244 break;
245 case PTP_PF_PEROUT:
246 if (pin == UFL2 || pin == GNSS)
247 return -EOPNOTSUPP;
248 break;
249 case PTP_PF_PHYSYNC:
250 return -EOPNOTSUPP;
251 }
252
253 return ice_ptp_set_sma_e810t(info, pin, func);
254 }
255
256 /**
257 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
258 * @pf: Board private structure
259 *
260 * Program the device to respond appropriately to the Tx timestamp interrupt
261 * cause.
262 */
ice_ptp_cfg_tx_interrupt(struct ice_pf * pf)263 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
264 {
265 struct ice_hw *hw = &pf->hw;
266 bool enable;
267 u32 val;
268
269 switch (pf->ptp.tx_interrupt_mode) {
270 case ICE_PTP_TX_INTERRUPT_ALL:
271 /* React to interrupts across all quads. */
272 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
273 enable = true;
274 break;
275 case ICE_PTP_TX_INTERRUPT_NONE:
276 /* Do not react to interrupts on any quad. */
277 wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
278 enable = false;
279 break;
280 case ICE_PTP_TX_INTERRUPT_SELF:
281 default:
282 enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
283 break;
284 }
285
286 /* Configure the Tx timestamp interrupt */
287 val = rd32(hw, PFINT_OICR_ENA);
288 if (enable)
289 val |= PFINT_OICR_TSYN_TX_M;
290 else
291 val &= ~PFINT_OICR_TSYN_TX_M;
292 wr32(hw, PFINT_OICR_ENA, val);
293 }
294
295 /**
296 * ice_set_rx_tstamp - Enable or disable Rx timestamping
297 * @pf: The PF pointer to search in
298 * @on: bool value for whether timestamps are enabled or disabled
299 */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)300 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
301 {
302 struct ice_vsi *vsi;
303 u16 i;
304
305 vsi = ice_get_main_vsi(pf);
306 if (!vsi || !vsi->rx_rings)
307 return;
308
309 /* Set the timestamp flag for all the Rx rings */
310 ice_for_each_rxq(vsi, i) {
311 if (!vsi->rx_rings[i])
312 continue;
313 vsi->rx_rings[i]->ptp_rx = on;
314 }
315 }
316
317 /**
318 * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
319 * @pf: Board private structure
320 *
321 * Called during preparation for reset to temporarily disable timestamping on
322 * the device. Called during remove to disable timestamping while cleaning up
323 * driver resources.
324 */
ice_ptp_disable_timestamp_mode(struct ice_pf * pf)325 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
326 {
327 struct ice_hw *hw = &pf->hw;
328 u32 val;
329
330 val = rd32(hw, PFINT_OICR_ENA);
331 val &= ~PFINT_OICR_TSYN_TX_M;
332 wr32(hw, PFINT_OICR_ENA, val);
333
334 ice_set_rx_tstamp(pf, false);
335 }
336
337 /**
338 * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
339 * @pf: Board private structure
340 *
341 * Called at the end of rebuild to restore timestamp configuration after
342 * a device reset.
343 */
ice_ptp_restore_timestamp_mode(struct ice_pf * pf)344 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
345 {
346 struct ice_hw *hw = &pf->hw;
347 bool enable_rx;
348
349 ice_ptp_cfg_tx_interrupt(pf);
350
351 enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
352 ice_set_rx_tstamp(pf, enable_rx);
353
354 /* Trigger an immediate software interrupt to ensure that timestamps
355 * which occurred during reset are handled now.
356 */
357 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
358 ice_flush(hw);
359 }
360
361 /**
362 * ice_ptp_read_src_clk_reg - Read the source clock register
363 * @pf: Board private structure
364 * @sts: Optional parameter for holding a pair of system timestamps from
365 * the system clock. Will be ignored if NULL is given.
366 */
367 static u64
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)368 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
369 {
370 struct ice_hw *hw = &pf->hw;
371 u32 hi, lo, lo2;
372 u8 tmr_idx;
373
374 tmr_idx = ice_get_ptp_src_clock_index(hw);
375 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
376 /* Read the system timestamp pre PHC read */
377 ptp_read_system_prets(sts);
378
379 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
380
381 /* Read the system timestamp post PHC read */
382 ptp_read_system_postts(sts);
383
384 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
385 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
386
387 if (lo2 < lo) {
388 /* if TIME_L rolled over read TIME_L again and update
389 * system timestamps
390 */
391 ptp_read_system_prets(sts);
392 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
393 ptp_read_system_postts(sts);
394 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
395 }
396
397 return ((u64)hi << 32) | lo;
398 }
399
400 /**
401 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
402 * @cached_phc_time: recently cached copy of PHC time
403 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
404 *
405 * Hardware captures timestamps which contain only 32 bits of nominal
406 * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
407 * Note that the captured timestamp values may be 40 bits, but the lower
408 * 8 bits are sub-nanoseconds and generally discarded.
409 *
410 * Extend the 32bit nanosecond timestamp using the following algorithm and
411 * assumptions:
412 *
413 * 1) have a recently cached copy of the PHC time
414 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
415 * seconds) before or after the PHC time was captured.
416 * 3) calculate the delta between the cached time and the timestamp
417 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
418 * captured after the PHC time. In this case, the full timestamp is just
419 * the cached PHC time plus the delta.
420 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
421 * timestamp was captured *before* the PHC time, i.e. because the PHC
422 * cache was updated after the timestamp was captured by hardware. In this
423 * case, the full timestamp is the cached time minus the inverse delta.
424 *
425 * This algorithm works even if the PHC time was updated after a Tx timestamp
426 * was requested, but before the Tx timestamp event was reported from
427 * hardware.
428 *
429 * This calculation primarily relies on keeping the cached PHC time up to
430 * date. If the timestamp was captured more than 2^31 nanoseconds after the
431 * PHC time, it is possible that the lower 32bits of PHC time have
432 * overflowed more than once, and we might generate an incorrect timestamp.
433 *
434 * This is prevented by (a) periodically updating the cached PHC time once
435 * a second, and (b) discarding any Tx timestamp packet if it has waited for
436 * a timestamp for more than one second.
437 */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)438 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
439 {
440 u32 delta, phc_time_lo;
441 u64 ns;
442
443 /* Extract the lower 32 bits of the PHC time */
444 phc_time_lo = (u32)cached_phc_time;
445
446 /* Calculate the delta between the lower 32bits of the cached PHC
447 * time and the in_tstamp value
448 */
449 delta = (in_tstamp - phc_time_lo);
450
451 /* Do not assume that the in_tstamp is always more recent than the
452 * cached PHC time. If the delta is large, it indicates that the
453 * in_tstamp was taken in the past, and should be converted
454 * forward.
455 */
456 if (delta > (U32_MAX / 2)) {
457 /* reverse the delta calculation here */
458 delta = (phc_time_lo - in_tstamp);
459 ns = cached_phc_time - delta;
460 } else {
461 ns = cached_phc_time + delta;
462 }
463
464 return ns;
465 }
466
467 /**
468 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
469 * @pf: Board private structure
470 * @in_tstamp: Ingress/egress 40b timestamp value
471 *
472 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
473 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
474 *
475 * *--------------------------------------------------------------*
476 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
477 * *--------------------------------------------------------------*
478 *
479 * The low bit is an indicator of whether the timestamp is valid. The next
480 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
481 * and the remaining 32 bits are the lower 32 bits of the PHC timer.
482 *
483 * It is assumed that the caller verifies the timestamp is valid prior to
484 * calling this function.
485 *
486 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
487 * time stored in the device private PTP structure as the basis for timestamp
488 * extension.
489 *
490 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
491 * algorithm.
492 */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)493 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
494 {
495 const u64 mask = GENMASK_ULL(31, 0);
496 unsigned long discard_time;
497
498 /* Discard the hardware timestamp if the cached PHC time is too old */
499 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
500 if (time_is_before_jiffies(discard_time)) {
501 pf->ptp.tx_hwtstamp_discarded++;
502 return 0;
503 }
504
505 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
506 (in_tstamp >> 8) & mask);
507 }
508
509 /**
510 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
511 * @tx: the PTP Tx timestamp tracker to check
512 *
513 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
514 * to accept new timestamp requests.
515 *
516 * Assumes the tx->lock spinlock is already held.
517 */
518 static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx)519 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
520 {
521 lockdep_assert_held(&tx->lock);
522
523 return tx->init && !tx->calibrating;
524 }
525
526 /**
527 * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
528 * @tx: the PTP Tx timestamp tracker
529 * @idx: index of the timestamp to request
530 */
ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx)531 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
532 {
533 struct ice_ptp_port *ptp_port;
534 struct sk_buff *skb;
535 struct ice_pf *pf;
536
537 if (!tx->init)
538 return;
539
540 ptp_port = container_of(tx, struct ice_ptp_port, tx);
541 pf = ptp_port_to_pf(ptp_port);
542
543 /* Drop packets which have waited for more than 2 seconds */
544 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
545 /* Count the number of Tx timestamps that timed out */
546 pf->ptp.tx_hwtstamp_timeouts++;
547
548 skb = tx->tstamps[idx].skb;
549 tx->tstamps[idx].skb = NULL;
550 clear_bit(idx, tx->in_use);
551
552 dev_kfree_skb_any(skb);
553 return;
554 }
555
556 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
557
558 /* Write TS index to read to the PF register so the FW can read it */
559 wr32(&pf->hw, PF_SB_ATQBAL,
560 TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
561 TS_LL_READ_TS);
562 tx->last_ll_ts_idx_read = idx;
563 }
564
565 /**
566 * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
567 * @tx: the PTP Tx timestamp tracker
568 */
ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx)569 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
570 {
571 struct skb_shared_hwtstamps shhwtstamps = {};
572 u8 idx = tx->last_ll_ts_idx_read;
573 struct ice_ptp_port *ptp_port;
574 u64 raw_tstamp, tstamp;
575 bool drop_ts = false;
576 struct sk_buff *skb;
577 struct ice_pf *pf;
578 u32 val;
579
580 if (!tx->init || tx->last_ll_ts_idx_read < 0)
581 return;
582
583 ptp_port = container_of(tx, struct ice_ptp_port, tx);
584 pf = ptp_port_to_pf(ptp_port);
585
586 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
587
588 val = rd32(&pf->hw, PF_SB_ATQBAL);
589
590 /* When the bit is cleared, the TS is ready in the register */
591 if (val & TS_LL_READ_TS) {
592 dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
593 return;
594 }
595
596 /* High 8 bit value of the TS is on the bits 16:23 */
597 raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
598 raw_tstamp <<= 32;
599
600 /* Read the low 32 bit value */
601 raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
602
603 /* Devices using this interface always verify the timestamp differs
604 * relative to the last cached timestamp value.
605 */
606 if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
607 return;
608
609 tx->tstamps[idx].cached_tstamp = raw_tstamp;
610 clear_bit(idx, tx->in_use);
611 skb = tx->tstamps[idx].skb;
612 tx->tstamps[idx].skb = NULL;
613 if (test_and_clear_bit(idx, tx->stale))
614 drop_ts = true;
615
616 if (!skb)
617 return;
618
619 if (drop_ts) {
620 dev_kfree_skb_any(skb);
621 return;
622 }
623
624 /* Extend the timestamp using cached PHC time */
625 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
626 if (tstamp) {
627 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
628 ice_trace(tx_tstamp_complete, skb, idx);
629 }
630
631 skb_tstamp_tx(skb, &shhwtstamps);
632 dev_kfree_skb_any(skb);
633 }
634
635 /**
636 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
637 * @tx: the PTP Tx timestamp tracker
638 *
639 * Process timestamps captured by the PHY associated with this port. To do
640 * this, loop over each index with a waiting skb.
641 *
642 * If a given index has a valid timestamp, perform the following steps:
643 *
644 * 1) check that the timestamp request is not stale
645 * 2) check that a timestamp is ready and available in the PHY memory bank
646 * 3) read and copy the timestamp out of the PHY register
647 * 4) unlock the index by clearing the associated in_use bit
648 * 5) check if the timestamp is stale, and discard if so
649 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
650 * 7) send this 64 bit timestamp to the stack
651 *
652 * Note that we do not hold the tracking lock while reading the Tx timestamp.
653 * This is because reading the timestamp requires taking a mutex that might
654 * sleep.
655 *
656 * The only place where we set in_use is when a new timestamp is initiated
657 * with a slot index. This is only called in the hard xmit routine where an
658 * SKB has a request flag set. The only places where we clear this bit is this
659 * function, or during teardown when the Tx timestamp tracker is being
660 * removed. A timestamp index will never be re-used until the in_use bit for
661 * that index is cleared.
662 *
663 * If a Tx thread starts a new timestamp, we might not begin processing it
664 * right away but we will notice it at the end when we re-queue the task.
665 *
666 * If a Tx thread starts a new timestamp just after this function exits, the
667 * interrupt for that timestamp should re-trigger this function once
668 * a timestamp is ready.
669 *
670 * In cases where the PTP hardware clock was directly adjusted, some
671 * timestamps may not be able to safely use the timestamp extension math. In
672 * this case, software will set the stale bit for any outstanding Tx
673 * timestamps when the clock is adjusted. Then this function will discard
674 * those captured timestamps instead of sending them to the stack.
675 *
676 * If a Tx packet has been waiting for more than 2 seconds, it is not possible
677 * to correctly extend the timestamp using the cached PHC time. It is
678 * extremely unlikely that a packet will ever take this long to timestamp. If
679 * we detect a Tx timestamp request that has waited for this long we assume
680 * the packet will never be sent by hardware and discard it without reading
681 * the timestamp register.
682 */
ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx)683 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
684 {
685 struct ice_ptp_port *ptp_port;
686 unsigned long flags;
687 struct ice_pf *pf;
688 struct ice_hw *hw;
689 u64 tstamp_ready;
690 bool link_up;
691 int err;
692 u8 idx;
693
694 ptp_port = container_of(tx, struct ice_ptp_port, tx);
695 pf = ptp_port_to_pf(ptp_port);
696 hw = &pf->hw;
697
698 /* Read the Tx ready status first */
699 if (tx->has_ready_bitmap) {
700 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
701 if (err)
702 return;
703 }
704
705 /* Drop packets if the link went down */
706 link_up = ptp_port->link_up;
707
708 for_each_set_bit(idx, tx->in_use, tx->len) {
709 struct skb_shared_hwtstamps shhwtstamps = {};
710 u8 phy_idx = idx + tx->offset;
711 u64 raw_tstamp = 0, tstamp;
712 bool drop_ts = !link_up;
713 struct sk_buff *skb;
714
715 /* Drop packets which have waited for more than 2 seconds */
716 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
717 drop_ts = true;
718
719 /* Count the number of Tx timestamps that timed out */
720 pf->ptp.tx_hwtstamp_timeouts++;
721 }
722
723 /* Only read a timestamp from the PHY if its marked as ready
724 * by the tstamp_ready register. This avoids unnecessary
725 * reading of timestamps which are not yet valid. This is
726 * important as we must read all timestamps which are valid
727 * and only timestamps which are valid during each interrupt.
728 * If we do not, the hardware logic for generating a new
729 * interrupt can get stuck on some devices.
730 */
731 if (tx->has_ready_bitmap &&
732 !(tstamp_ready & BIT_ULL(phy_idx))) {
733 if (drop_ts)
734 goto skip_ts_read;
735
736 continue;
737 }
738
739 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
740
741 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
742 if (err && !drop_ts)
743 continue;
744
745 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
746
747 /* For PHYs which don't implement a proper timestamp ready
748 * bitmap, verify that the timestamp value is different
749 * from the last cached timestamp. If it is not, skip this for
750 * now assuming it hasn't yet been captured by hardware.
751 */
752 if (!drop_ts && !tx->has_ready_bitmap &&
753 raw_tstamp == tx->tstamps[idx].cached_tstamp)
754 continue;
755
756 /* Discard any timestamp value without the valid bit set */
757 if (!(raw_tstamp & ICE_PTP_TS_VALID))
758 drop_ts = true;
759
760 skip_ts_read:
761 spin_lock_irqsave(&tx->lock, flags);
762 if (!tx->has_ready_bitmap && raw_tstamp)
763 tx->tstamps[idx].cached_tstamp = raw_tstamp;
764 clear_bit(idx, tx->in_use);
765 skb = tx->tstamps[idx].skb;
766 tx->tstamps[idx].skb = NULL;
767 if (test_and_clear_bit(idx, tx->stale))
768 drop_ts = true;
769 spin_unlock_irqrestore(&tx->lock, flags);
770
771 /* It is unlikely but possible that the SKB will have been
772 * flushed at this point due to link change or teardown.
773 */
774 if (!skb)
775 continue;
776
777 if (drop_ts) {
778 dev_kfree_skb_any(skb);
779 continue;
780 }
781
782 /* Extend the timestamp using cached PHC time */
783 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
784 if (tstamp) {
785 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
786 ice_trace(tx_tstamp_complete, skb, idx);
787 }
788
789 skb_tstamp_tx(skb, &shhwtstamps);
790 dev_kfree_skb_any(skb);
791 }
792 }
793
794 /**
795 * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
796 * @pf: Board private structure
797 */
ice_ptp_tx_tstamp_owner(struct ice_pf * pf)798 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
799 {
800 struct ice_ptp_port *port;
801 unsigned int i;
802
803 mutex_lock(&pf->ptp.ports_owner.lock);
804 list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
805 struct ice_ptp_tx *tx = &port->tx;
806
807 if (!tx || !tx->init)
808 continue;
809
810 ice_ptp_process_tx_tstamp(tx);
811 }
812 mutex_unlock(&pf->ptp.ports_owner.lock);
813
814 for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
815 u64 tstamp_ready;
816 int err;
817
818 /* Read the Tx ready status first */
819 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
820 if (err)
821 break;
822 else if (tstamp_ready)
823 return ICE_TX_TSTAMP_WORK_PENDING;
824 }
825
826 return ICE_TX_TSTAMP_WORK_DONE;
827 }
828
829 /**
830 * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
831 * @tx: Tx tracking structure to initialize
832 *
833 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
834 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
835 */
ice_ptp_tx_tstamp(struct ice_ptp_tx * tx)836 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
837 {
838 bool more_timestamps;
839 unsigned long flags;
840
841 if (!tx->init)
842 return ICE_TX_TSTAMP_WORK_DONE;
843
844 /* Process the Tx timestamp tracker */
845 ice_ptp_process_tx_tstamp(tx);
846
847 /* Check if there are outstanding Tx timestamps */
848 spin_lock_irqsave(&tx->lock, flags);
849 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
850 spin_unlock_irqrestore(&tx->lock, flags);
851
852 if (more_timestamps)
853 return ICE_TX_TSTAMP_WORK_PENDING;
854
855 return ICE_TX_TSTAMP_WORK_DONE;
856 }
857
858 /**
859 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
860 * @tx: Tx tracking structure to initialize
861 *
862 * Assumes that the length has already been initialized. Do not call directly,
863 * use the ice_ptp_init_tx_* instead.
864 */
865 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)866 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
867 {
868 unsigned long *in_use, *stale;
869 struct ice_tx_tstamp *tstamps;
870
871 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
872 in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
873 stale = bitmap_zalloc(tx->len, GFP_KERNEL);
874
875 if (!tstamps || !in_use || !stale) {
876 kfree(tstamps);
877 bitmap_free(in_use);
878 bitmap_free(stale);
879
880 return -ENOMEM;
881 }
882
883 tx->tstamps = tstamps;
884 tx->in_use = in_use;
885 tx->stale = stale;
886 tx->init = 1;
887 tx->last_ll_ts_idx_read = -1;
888
889 spin_lock_init(&tx->lock);
890
891 return 0;
892 }
893
894 /**
895 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
896 * @pf: Board private structure
897 * @tx: the tracker to flush
898 *
899 * Called during teardown when a Tx tracker is being removed.
900 */
901 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)902 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
903 {
904 struct ice_hw *hw = &pf->hw;
905 unsigned long flags;
906 u64 tstamp_ready;
907 int err;
908 u8 idx;
909
910 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
911 if (err) {
912 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
913 tx->block, err);
914
915 /* If we fail to read the Tx timestamp ready bitmap just
916 * skip clearing the PHY timestamps.
917 */
918 tstamp_ready = 0;
919 }
920
921 for_each_set_bit(idx, tx->in_use, tx->len) {
922 u8 phy_idx = idx + tx->offset;
923 struct sk_buff *skb;
924
925 /* In case this timestamp is ready, we need to clear it. */
926 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
927 ice_clear_phy_tstamp(hw, tx->block, phy_idx);
928
929 spin_lock_irqsave(&tx->lock, flags);
930 skb = tx->tstamps[idx].skb;
931 tx->tstamps[idx].skb = NULL;
932 clear_bit(idx, tx->in_use);
933 clear_bit(idx, tx->stale);
934 spin_unlock_irqrestore(&tx->lock, flags);
935
936 /* Count the number of Tx timestamps flushed */
937 pf->ptp.tx_hwtstamp_flushed++;
938
939 /* Free the SKB after we've cleared the bit */
940 dev_kfree_skb_any(skb);
941 }
942 }
943
944 /**
945 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
946 * @tx: the tracker to mark
947 *
948 * Mark currently outstanding Tx timestamps as stale. This prevents sending
949 * their timestamp value to the stack. This is required to prevent extending
950 * the 40bit hardware timestamp incorrectly.
951 *
952 * This should be called when the PTP clock is modified such as after a set
953 * time request.
954 */
955 static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx)956 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
957 {
958 unsigned long flags;
959
960 spin_lock_irqsave(&tx->lock, flags);
961 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
962 spin_unlock_irqrestore(&tx->lock, flags);
963 }
964
965 /**
966 * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
967 * @pf: Board private structure
968 *
969 * Called by the clock owner to flush all the Tx timestamp trackers associated
970 * with the clock.
971 */
972 static void
ice_ptp_flush_all_tx_tracker(struct ice_pf * pf)973 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
974 {
975 struct ice_ptp_port *port;
976
977 list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
978 ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
979 }
980
981 /**
982 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
983 * @pf: Board private structure
984 * @tx: Tx tracking structure to release
985 *
986 * Free memory associated with the Tx timestamp tracker.
987 */
988 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)989 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
990 {
991 unsigned long flags;
992
993 spin_lock_irqsave(&tx->lock, flags);
994 tx->init = 0;
995 spin_unlock_irqrestore(&tx->lock, flags);
996
997 /* wait for potentially outstanding interrupt to complete */
998 synchronize_irq(pf->oicr_irq.virq);
999
1000 ice_ptp_flush_tx_tracker(pf, tx);
1001
1002 kfree(tx->tstamps);
1003 tx->tstamps = NULL;
1004
1005 bitmap_free(tx->in_use);
1006 tx->in_use = NULL;
1007
1008 bitmap_free(tx->stale);
1009 tx->stale = NULL;
1010
1011 tx->len = 0;
1012 }
1013
1014 /**
1015 * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
1016 * @pf: Board private structure
1017 * @tx: the Tx tracking structure to initialize
1018 * @port: the port this structure tracks
1019 *
1020 * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
1021 * have independent memory blocks for all ports.
1022 *
1023 * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
1024 */
ice_ptp_init_tx_eth56g(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)1025 static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
1026 u8 port)
1027 {
1028 tx->block = port;
1029 tx->offset = 0;
1030 tx->len = INDEX_PER_PORT_ETH56G;
1031 tx->has_ready_bitmap = 1;
1032
1033 return ice_ptp_alloc_tx_tracker(tx);
1034 }
1035
1036 /**
1037 * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
1038 * @pf: Board private structure
1039 * @tx: the Tx tracking structure to initialize
1040 * @port: the port this structure tracks
1041 *
1042 * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
1043 * the timestamp block is shared for all ports in the same quad. To avoid
1044 * ports using the same timestamp index, logically break the block of
1045 * registers into chunks based on the port number.
1046 */
1047 static int
ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)1048 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
1049 {
1050 tx->block = ICE_GET_QUAD_NUM(port);
1051 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
1052 tx->len = INDEX_PER_PORT_E82X;
1053 tx->has_ready_bitmap = 1;
1054
1055 return ice_ptp_alloc_tx_tracker(tx);
1056 }
1057
1058 /**
1059 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
1060 * @pf: Board private structure
1061 * @tx: the Tx tracking structure to initialize
1062 *
1063 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
1064 * port has its own block of timestamps, independent of the other ports.
1065 */
1066 static int
ice_ptp_init_tx_e810(struct ice_pf * pf,struct ice_ptp_tx * tx)1067 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1068 {
1069 tx->block = pf->hw.port_info->lport;
1070 tx->offset = 0;
1071 tx->len = INDEX_PER_PORT_E810;
1072 /* The E810 PHY does not provide a timestamp ready bitmap. Instead,
1073 * verify new timestamps against cached copy of the last read
1074 * timestamp.
1075 */
1076 tx->has_ready_bitmap = 0;
1077
1078 return ice_ptp_alloc_tx_tracker(tx);
1079 }
1080
1081 /**
1082 * ice_ptp_update_cached_phctime - Update the cached PHC time values
1083 * @pf: Board specific private structure
1084 *
1085 * This function updates the system time values which are cached in the PF
1086 * structure and the Rx rings.
1087 *
1088 * This function must be called periodically to ensure that the cached value
1089 * is never more than 2 seconds old.
1090 *
1091 * Note that the cached copy in the PF PTP structure is always updated, even
1092 * if we can't update the copy in the Rx rings.
1093 *
1094 * Return:
1095 * * 0 - OK, successfully updated
1096 * * -EAGAIN - PF was busy, need to reschedule the update
1097 */
ice_ptp_update_cached_phctime(struct ice_pf * pf)1098 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
1099 {
1100 struct device *dev = ice_pf_to_dev(pf);
1101 unsigned long update_before;
1102 u64 systime;
1103 int i;
1104
1105 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
1106 if (pf->ptp.cached_phc_time &&
1107 time_is_before_jiffies(update_before)) {
1108 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
1109
1110 dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
1111 jiffies_to_msecs(time_taken));
1112 pf->ptp.late_cached_phc_updates++;
1113 }
1114
1115 /* Read the current PHC time */
1116 systime = ice_ptp_read_src_clk_reg(pf, NULL);
1117
1118 /* Update the cached PHC time stored in the PF structure */
1119 WRITE_ONCE(pf->ptp.cached_phc_time, systime);
1120 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
1121
1122 if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
1123 return -EAGAIN;
1124
1125 ice_for_each_vsi(pf, i) {
1126 struct ice_vsi *vsi = pf->vsi[i];
1127 int j;
1128
1129 if (!vsi)
1130 continue;
1131
1132 if (vsi->type != ICE_VSI_PF)
1133 continue;
1134
1135 ice_for_each_rxq(vsi, j) {
1136 if (!vsi->rx_rings[j])
1137 continue;
1138 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
1139 }
1140 }
1141 clear_bit(ICE_CFG_BUSY, pf->state);
1142
1143 return 0;
1144 }
1145
1146 /**
1147 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1148 * @pf: Board specific private structure
1149 *
1150 * This function must be called when the cached PHC time is no longer valid,
1151 * such as after a time adjustment. It marks any currently outstanding Tx
1152 * timestamps as stale and updates the cached PHC time for both the PF and Rx
1153 * rings.
1154 *
1155 * If updating the PHC time cannot be done immediately, a warning message is
1156 * logged and the work item is scheduled immediately to minimize the window
1157 * with a wrong cached timestamp.
1158 */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)1159 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1160 {
1161 struct device *dev = ice_pf_to_dev(pf);
1162 int err;
1163
1164 /* Update the cached PHC time immediately if possible, otherwise
1165 * schedule the work item to execute soon.
1166 */
1167 err = ice_ptp_update_cached_phctime(pf);
1168 if (err) {
1169 /* If another thread is updating the Rx rings, we won't
1170 * properly reset them here. This could lead to reporting of
1171 * invalid timestamps, but there isn't much we can do.
1172 */
1173 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1174 __func__);
1175
1176 /* Queue the work item to update the Rx rings when possible */
1177 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1178 msecs_to_jiffies(10));
1179 }
1180
1181 /* Mark any outstanding timestamps as stale, since they might have
1182 * been captured in hardware before the time update. This could lead
1183 * to us extending them with the wrong cached value resulting in
1184 * incorrect timestamp values.
1185 */
1186 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1187 }
1188
1189 /**
1190 * ice_ptp_write_init - Set PHC time to provided value
1191 * @pf: Board private structure
1192 * @ts: timespec structure that holds the new time value
1193 *
1194 * Set the PHC time to the specified time provided in the timespec.
1195 */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1196 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1197 {
1198 u64 ns = timespec64_to_ns(ts);
1199 struct ice_hw *hw = &pf->hw;
1200
1201 return ice_ptp_init_time(hw, ns);
1202 }
1203
1204 /**
1205 * ice_ptp_write_adj - Adjust PHC clock time atomically
1206 * @pf: Board private structure
1207 * @adj: Adjustment in nanoseconds
1208 *
1209 * Perform an atomic adjustment of the PHC time by the specified number of
1210 * nanoseconds.
1211 */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1212 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1213 {
1214 struct ice_hw *hw = &pf->hw;
1215
1216 return ice_ptp_adj_clock(hw, adj);
1217 }
1218
1219 /**
1220 * ice_base_incval - Get base timer increment value
1221 * @pf: Board private structure
1222 *
1223 * Look up the base timer increment value for this device. The base increment
1224 * value is used to define the nominal clock tick rate. This increment value
1225 * is programmed during device initialization. It is also used as the basis
1226 * for calculating adjustments using scaled_ppm.
1227 */
ice_base_incval(struct ice_pf * pf)1228 static u64 ice_base_incval(struct ice_pf *pf)
1229 {
1230 struct ice_hw *hw = &pf->hw;
1231 u64 incval;
1232
1233 incval = ice_get_base_incval(hw);
1234
1235 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1236 incval);
1237
1238 return incval;
1239 }
1240
1241 /**
1242 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1243 * @port: PTP port for which Tx FIFO is checked
1244 */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1245 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1246 {
1247 int offs = port->port_num % ICE_PORTS_PER_QUAD;
1248 int quad = ICE_GET_QUAD_NUM(port->port_num);
1249 struct ice_pf *pf;
1250 struct ice_hw *hw;
1251 u32 val, phy_sts;
1252 int err;
1253
1254 pf = ptp_port_to_pf(port);
1255 hw = &pf->hw;
1256
1257 if (port->tx_fifo_busy_cnt == FIFO_OK)
1258 return 0;
1259
1260 /* need to read FIFO state */
1261 if (offs == 0 || offs == 1)
1262 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1263 &val);
1264 else
1265 err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1266 &val);
1267
1268 if (err) {
1269 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1270 port->port_num, err);
1271 return err;
1272 }
1273
1274 if (offs & 0x1)
1275 phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1276 else
1277 phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1278
1279 if (phy_sts & FIFO_EMPTY) {
1280 port->tx_fifo_busy_cnt = FIFO_OK;
1281 return 0;
1282 }
1283
1284 port->tx_fifo_busy_cnt++;
1285
1286 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1287 port->tx_fifo_busy_cnt, port->port_num);
1288
1289 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1290 dev_dbg(ice_pf_to_dev(pf),
1291 "Port %d Tx FIFO still not empty; resetting quad %d\n",
1292 port->port_num, quad);
1293 ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1294 port->tx_fifo_busy_cnt = FIFO_OK;
1295 return 0;
1296 }
1297
1298 return -EAGAIN;
1299 }
1300
1301 /**
1302 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1303 * @work: Pointer to the kthread_work structure for this task
1304 *
1305 * Check whether hardware has completed measuring the Tx and Rx offset values
1306 * used to configure and enable vernier timestamp calibration.
1307 *
1308 * Once the offset in either direction is measured, configure the associated
1309 * registers with the calibrated offset values and enable timestamping. The Tx
1310 * and Rx directions are configured independently as soon as their associated
1311 * offsets are known.
1312 *
1313 * This function reschedules itself until both Tx and Rx calibration have
1314 * completed.
1315 */
ice_ptp_wait_for_offsets(struct kthread_work * work)1316 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1317 {
1318 struct ice_ptp_port *port;
1319 struct ice_pf *pf;
1320 struct ice_hw *hw;
1321 int tx_err;
1322 int rx_err;
1323
1324 port = container_of(work, struct ice_ptp_port, ov_work.work);
1325 pf = ptp_port_to_pf(port);
1326 hw = &pf->hw;
1327
1328 if (ice_is_reset_in_progress(pf->state)) {
1329 /* wait for device driver to complete reset */
1330 kthread_queue_delayed_work(pf->ptp.kworker,
1331 &port->ov_work,
1332 msecs_to_jiffies(100));
1333 return;
1334 }
1335
1336 tx_err = ice_ptp_check_tx_fifo(port);
1337 if (!tx_err)
1338 tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1339 rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1340 if (tx_err || rx_err) {
1341 /* Tx and/or Rx offset not yet configured, try again later */
1342 kthread_queue_delayed_work(pf->ptp.kworker,
1343 &port->ov_work,
1344 msecs_to_jiffies(100));
1345 return;
1346 }
1347 }
1348
1349 /**
1350 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1351 * @ptp_port: PTP port to stop
1352 */
1353 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1354 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1355 {
1356 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1357 u8 port = ptp_port->port_num;
1358 struct ice_hw *hw = &pf->hw;
1359 int err;
1360
1361 if (ice_is_e810(hw))
1362 return 0;
1363
1364 mutex_lock(&ptp_port->ps_lock);
1365
1366 switch (hw->ptp.phy_model) {
1367 case ICE_PHY_ETH56G:
1368 err = ice_stop_phy_timer_eth56g(hw, port, true);
1369 break;
1370 case ICE_PHY_E82X:
1371 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1372
1373 err = ice_stop_phy_timer_e82x(hw, port, true);
1374 break;
1375 default:
1376 err = -ENODEV;
1377 }
1378 if (err && err != -EBUSY)
1379 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1380 port, err);
1381
1382 mutex_unlock(&ptp_port->ps_lock);
1383
1384 return err;
1385 }
1386
1387 /**
1388 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1389 * @ptp_port: PTP port for which the PHY start is set
1390 *
1391 * Start the PHY timestamping block, and initiate Vernier timestamping
1392 * calibration. If timestamping cannot be calibrated (such as if link is down)
1393 * then disable the timestamping block instead.
1394 */
1395 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1396 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1397 {
1398 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1399 u8 port = ptp_port->port_num;
1400 struct ice_hw *hw = &pf->hw;
1401 unsigned long flags;
1402 int err;
1403
1404 if (ice_is_e810(hw))
1405 return 0;
1406
1407 if (!ptp_port->link_up)
1408 return ice_ptp_port_phy_stop(ptp_port);
1409
1410 mutex_lock(&ptp_port->ps_lock);
1411
1412 switch (hw->ptp.phy_model) {
1413 case ICE_PHY_ETH56G:
1414 err = ice_start_phy_timer_eth56g(hw, port);
1415 break;
1416 case ICE_PHY_E82X:
1417 /* Start the PHY timer in Vernier mode */
1418 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1419
1420 /* temporarily disable Tx timestamps while calibrating
1421 * PHY offset
1422 */
1423 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1424 ptp_port->tx.calibrating = true;
1425 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1426 ptp_port->tx_fifo_busy_cnt = 0;
1427
1428 /* Start the PHY timer in Vernier mode */
1429 err = ice_start_phy_timer_e82x(hw, port);
1430 if (err)
1431 break;
1432
1433 /* Enable Tx timestamps right away */
1434 spin_lock_irqsave(&ptp_port->tx.lock, flags);
1435 ptp_port->tx.calibrating = false;
1436 spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1437
1438 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1439 0);
1440 break;
1441 default:
1442 err = -ENODEV;
1443 }
1444
1445 if (err)
1446 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1447 port, err);
1448
1449 mutex_unlock(&ptp_port->ps_lock);
1450
1451 return err;
1452 }
1453
1454 /**
1455 * ice_ptp_link_change - Reconfigure PTP after link status change
1456 * @pf: Board private structure
1457 * @port: Port for which the PHY start is set
1458 * @linkup: Link is up or down
1459 */
ice_ptp_link_change(struct ice_pf * pf,u8 port,bool linkup)1460 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
1461 {
1462 struct ice_ptp_port *ptp_port;
1463 struct ice_hw *hw = &pf->hw;
1464
1465 if (pf->ptp.state != ICE_PTP_READY)
1466 return;
1467
1468 if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
1469 return;
1470
1471 ptp_port = &pf->ptp.port;
1472 if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
1473 port *= 2;
1474 if (WARN_ON_ONCE(ptp_port->port_num != port))
1475 return;
1476
1477 /* Update cached link status for this port immediately */
1478 ptp_port->link_up = linkup;
1479
1480 /* Skip HW writes if reset is in progress */
1481 if (pf->hw.reset_ongoing)
1482 return;
1483
1484 switch (hw->ptp.phy_model) {
1485 case ICE_PHY_E810:
1486 /* Do not reconfigure E810 PHY */
1487 return;
1488 case ICE_PHY_ETH56G:
1489 case ICE_PHY_E82X:
1490 ice_ptp_port_phy_restart(ptp_port);
1491 return;
1492 default:
1493 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1494 }
1495 }
1496
1497 /**
1498 * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1499 * @pf: PF private structure
1500 * @ena: bool value to enable or disable interrupt
1501 * @threshold: Minimum number of packets at which intr is triggered
1502 *
1503 * Utility function to configure all the PHY interrupt settings, including
1504 * whether the PHY interrupt is enabled, and what threshold to use. Also
1505 * configures The E82X timestamp owner to react to interrupts from all PHYs.
1506 *
1507 * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1508 * when failed to configure PHY interrupt for E82X
1509 */
ice_ptp_cfg_phy_interrupt(struct ice_pf * pf,bool ena,u32 threshold)1510 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1511 {
1512 struct device *dev = ice_pf_to_dev(pf);
1513 struct ice_hw *hw = &pf->hw;
1514
1515 ice_ptp_reset_ts_memory(hw);
1516
1517 switch (hw->ptp.phy_model) {
1518 case ICE_PHY_ETH56G: {
1519 int port;
1520
1521 for (port = 0; port < hw->ptp.num_lports; port++) {
1522 int err;
1523
1524 err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1525 if (err) {
1526 dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1527 port, err);
1528 return err;
1529 }
1530 }
1531
1532 return 0;
1533 }
1534 case ICE_PHY_E82X: {
1535 int quad;
1536
1537 for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1538 quad++) {
1539 int err;
1540
1541 err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1542 if (err) {
1543 dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1544 quad, err);
1545 return err;
1546 }
1547 }
1548
1549 return 0;
1550 }
1551 case ICE_PHY_E810:
1552 return 0;
1553 case ICE_PHY_UNSUP:
1554 default:
1555 dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
1556 hw->ptp.phy_model);
1557 return -EOPNOTSUPP;
1558 }
1559 }
1560
1561 /**
1562 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1563 * @pf: Board private structure
1564 */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1565 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1566 {
1567 ice_ptp_port_phy_restart(&pf->ptp.port);
1568 }
1569
1570 /**
1571 * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1572 * @pf: Board private structure
1573 */
ice_ptp_restart_all_phy(struct ice_pf * pf)1574 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1575 {
1576 struct list_head *entry;
1577
1578 list_for_each(entry, &pf->ptp.ports_owner.ports) {
1579 struct ice_ptp_port *port = list_entry(entry,
1580 struct ice_ptp_port,
1581 list_member);
1582
1583 if (port->link_up)
1584 ice_ptp_port_phy_restart(port);
1585 }
1586 }
1587
1588 /**
1589 * ice_ptp_adjfine - Adjust clock increment rate
1590 * @info: the driver's PTP info structure
1591 * @scaled_ppm: Parts per million with 16-bit fractional field
1592 *
1593 * Adjust the frequency of the clock by the indicated scaled ppm from the
1594 * base frequency.
1595 */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1596 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1597 {
1598 struct ice_pf *pf = ptp_info_to_pf(info);
1599 struct ice_hw *hw = &pf->hw;
1600 u64 incval;
1601 int err;
1602
1603 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1604 err = ice_ptp_write_incval_locked(hw, incval);
1605 if (err) {
1606 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1607 err);
1608 return -EIO;
1609 }
1610
1611 return 0;
1612 }
1613
1614 /**
1615 * ice_ptp_extts_event - Process PTP external clock event
1616 * @pf: Board private structure
1617 */
ice_ptp_extts_event(struct ice_pf * pf)1618 void ice_ptp_extts_event(struct ice_pf *pf)
1619 {
1620 struct ptp_clock_event event;
1621 struct ice_hw *hw = &pf->hw;
1622 u8 chan, tmr_idx;
1623 u32 hi, lo;
1624
1625 /* Don't process timestamp events if PTP is not ready */
1626 if (pf->ptp.state != ICE_PTP_READY)
1627 return;
1628
1629 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1630 /* Event time is captured by one of the two matched registers
1631 * GLTSYN_EVNT_L: 32 LSB of sampled time event
1632 * GLTSYN_EVNT_H: 32 MSB of sampled time event
1633 * Event is defined in GLTSYN_EVNT_0 register
1634 */
1635 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1636 /* Check if channel is enabled */
1637 if (pf->ptp.ext_ts_irq & (1 << chan)) {
1638 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1639 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1640 event.timestamp = (((u64)hi) << 32) | lo;
1641 event.type = PTP_CLOCK_EXTTS;
1642 event.index = chan;
1643
1644 /* Fire event */
1645 ptp_clock_event(pf->ptp.clock, &event);
1646 pf->ptp.ext_ts_irq &= ~(1 << chan);
1647 }
1648 }
1649 }
1650
1651 /**
1652 * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1653 * @pf: Board private structure
1654 * @chan: GPIO channel (0-3)
1655 * @config: desired EXTTS configuration.
1656 * @store: If set to true, the values will be stored
1657 *
1658 * Configure an external timestamp event on the requested channel.
1659 *
1660 * Return: 0 on success, -EOPNOTUSPP on unsupported flags
1661 */
ice_ptp_cfg_extts(struct ice_pf * pf,unsigned int chan,struct ice_extts_channel * config,bool store)1662 static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
1663 struct ice_extts_channel *config, bool store)
1664 {
1665 u32 func, aux_reg, gpio_reg, irq_reg;
1666 struct ice_hw *hw = &pf->hw;
1667 u8 tmr_idx;
1668
1669 /* Reject requests with unsupported flags */
1670 if (config->flags & ~(PTP_ENABLE_FEATURE |
1671 PTP_RISING_EDGE |
1672 PTP_FALLING_EDGE |
1673 PTP_STRICT_FLAGS))
1674 return -EOPNOTSUPP;
1675
1676 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1677
1678 irq_reg = rd32(hw, PFINT_OICR_ENA);
1679
1680 if (config->ena) {
1681 /* Enable the interrupt */
1682 irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1683 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1684
1685 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
1686 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
1687
1688 /* set event level to requested edge */
1689 if (config->flags & PTP_FALLING_EDGE)
1690 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1691 if (config->flags & PTP_RISING_EDGE)
1692 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1693
1694 /* Write GPIO CTL reg.
1695 * 0x1 is input sampled by EVENT register(channel)
1696 * + num_in_channels * tmr_idx
1697 */
1698 func = 1 + chan + (tmr_idx * 3);
1699 gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1700 pf->ptp.ext_ts_chan |= (1 << chan);
1701 } else {
1702 /* clear the values we set to reset defaults */
1703 aux_reg = 0;
1704 gpio_reg = 0;
1705 pf->ptp.ext_ts_chan &= ~(1 << chan);
1706 if (!pf->ptp.ext_ts_chan)
1707 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1708 }
1709
1710 wr32(hw, PFINT_OICR_ENA, irq_reg);
1711 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1712 wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg);
1713
1714 if (store)
1715 memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config));
1716
1717 return 0;
1718 }
1719
1720 /**
1721 * ice_ptp_disable_all_extts - Disable all EXTTS channels
1722 * @pf: Board private structure
1723 */
ice_ptp_disable_all_extts(struct ice_pf * pf)1724 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1725 {
1726 struct ice_extts_channel extts_cfg = {};
1727 int i;
1728
1729 for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
1730 if (pf->ptp.extts_channels[i].ena) {
1731 extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin;
1732 extts_cfg.ena = false;
1733 ice_ptp_cfg_extts(pf, i, &extts_cfg, false);
1734 }
1735 }
1736
1737 synchronize_irq(pf->oicr_irq.virq);
1738 }
1739
1740 /**
1741 * ice_ptp_enable_all_extts - Enable all EXTTS channels
1742 * @pf: Board private structure
1743 *
1744 * Called during reset to restore user configuration.
1745 */
ice_ptp_enable_all_extts(struct ice_pf * pf)1746 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1747 {
1748 int i;
1749
1750 for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
1751 if (pf->ptp.extts_channels[i].ena)
1752 ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i],
1753 false);
1754 }
1755 }
1756
1757 /**
1758 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
1759 * @pf: Board private structure
1760 * @chan: GPIO channel (0-3)
1761 * @config: desired periodic clk configuration. NULL will disable channel
1762 * @store: If set to true the values will be stored
1763 *
1764 * Configure the internal clock generator modules to generate the clock wave of
1765 * specified period.
1766 */
ice_ptp_cfg_clkout(struct ice_pf * pf,unsigned int chan,struct ice_perout_channel * config,bool store)1767 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
1768 struct ice_perout_channel *config, bool store)
1769 {
1770 u64 current_time, period, start_time, phase;
1771 struct ice_hw *hw = &pf->hw;
1772 u32 func, val, gpio_pin;
1773 u8 tmr_idx;
1774
1775 if (config && config->flags & ~PTP_PEROUT_PHASE)
1776 return -EOPNOTSUPP;
1777
1778 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1779
1780 /* 0. Reset mode & out_en in AUX_OUT */
1781 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1782
1783 /* If we're disabling the output, clear out CLKO and TGT and keep
1784 * output level low
1785 */
1786 if (!config || !config->ena) {
1787 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
1788 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
1789 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
1790
1791 val = GLGEN_GPIO_CTL_PIN_DIR_M;
1792 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
1793 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1794
1795 /* Store the value if requested */
1796 if (store)
1797 memset(&pf->ptp.perout_channels[chan], 0,
1798 sizeof(struct ice_perout_channel));
1799
1800 return 0;
1801 }
1802 period = config->period;
1803 start_time = config->start_time;
1804 div64_u64_rem(start_time, period, &phase);
1805 gpio_pin = config->gpio_pin;
1806
1807 /* 1. Write clkout with half of required period value */
1808 if (period & 0x1) {
1809 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1810 goto err;
1811 }
1812
1813 period >>= 1;
1814
1815 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick
1816 */
1817 #define MIN_PULSE 3
1818 if (period <= MIN_PULSE || period > U32_MAX) {
1819 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
1820 MIN_PULSE * 2);
1821 goto err;
1822 }
1823
1824 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1825
1826 /* Allow time for programming before start_time is hit */
1827 current_time = ice_ptp_read_src_clk_reg(pf, NULL);
1828
1829 /* if start time is in the past start the timer at the nearest second
1830 * maintaining phase
1831 */
1832 if (start_time < current_time)
1833 start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase;
1834
1835 if (ice_is_e810(hw))
1836 start_time -= E810_OUT_PROP_DELAY_NS;
1837 else
1838 start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
1839
1840 /* 2. Write TARGET time */
1841 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
1842 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
1843
1844 /* 3. Write AUX_OUT register */
1845 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1846 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1847
1848 /* 4. write GPIO CTL reg */
1849 func = 8 + chan + (tmr_idx * 4);
1850 val = GLGEN_GPIO_CTL_PIN_DIR_M |
1851 FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1852 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1853
1854 /* Store the value if requested */
1855 if (store) {
1856 memcpy(&pf->ptp.perout_channels[chan], config,
1857 sizeof(struct ice_perout_channel));
1858 pf->ptp.perout_channels[chan].start_time = phase;
1859 }
1860
1861 return 0;
1862 err:
1863 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
1864 return -EFAULT;
1865 }
1866
1867 /**
1868 * ice_ptp_disable_all_clkout - Disable all currently configured outputs
1869 * @pf: pointer to the PF structure
1870 *
1871 * Disable all currently configured clock outputs. This is necessary before
1872 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
1873 * re-enable the clocks again.
1874 */
ice_ptp_disable_all_clkout(struct ice_pf * pf)1875 static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
1876 {
1877 uint i;
1878
1879 for (i = 0; i < pf->ptp.info.n_per_out; i++)
1880 if (pf->ptp.perout_channels[i].ena)
1881 ice_ptp_cfg_clkout(pf, i, NULL, false);
1882 }
1883
1884 /**
1885 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
1886 * @pf: pointer to the PF structure
1887 *
1888 * Enable all currently configured clock outputs. Use this after
1889 * ice_ptp_disable_all_clkout to reconfigure the output signals according to
1890 * their configuration.
1891 */
ice_ptp_enable_all_clkout(struct ice_pf * pf)1892 static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
1893 {
1894 uint i;
1895
1896 for (i = 0; i < pf->ptp.info.n_per_out; i++)
1897 if (pf->ptp.perout_channels[i].ena)
1898 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
1899 false);
1900 }
1901
1902 /**
1903 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
1904 * @info: the driver's PTP info structure
1905 * @rq: The requested feature to change
1906 * @on: Enable/disable flag
1907 */
1908 static int
ice_ptp_gpio_enable_e810(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1909 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
1910 struct ptp_clock_request *rq, int on)
1911 {
1912 struct ice_pf *pf = ptp_info_to_pf(info);
1913 bool sma_pres = false;
1914 unsigned int chan;
1915 u32 gpio_pin;
1916
1917 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
1918 sma_pres = true;
1919
1920 switch (rq->type) {
1921 case PTP_CLK_REQ_PEROUT:
1922 {
1923 struct ice_perout_channel clk_cfg = {};
1924
1925 chan = rq->perout.index;
1926 if (sma_pres) {
1927 if (chan == ice_pin_desc_e810t[SMA1].chan)
1928 clk_cfg.gpio_pin = GPIO_20;
1929 else if (chan == ice_pin_desc_e810t[SMA2].chan)
1930 clk_cfg.gpio_pin = GPIO_22;
1931 else
1932 return -1;
1933 } else if (ice_is_e810t(&pf->hw)) {
1934 if (chan == 0)
1935 clk_cfg.gpio_pin = GPIO_20;
1936 else
1937 clk_cfg.gpio_pin = GPIO_22;
1938 } else if (chan == PPS_CLK_GEN_CHAN) {
1939 clk_cfg.gpio_pin = PPS_PIN_INDEX;
1940 } else {
1941 clk_cfg.gpio_pin = chan;
1942 }
1943
1944 clk_cfg.flags = rq->perout.flags;
1945 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1946 rq->perout.period.nsec);
1947 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1948 rq->perout.start.nsec);
1949 clk_cfg.ena = !!on;
1950
1951 return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1952 }
1953 case PTP_CLK_REQ_EXTTS:
1954 {
1955 struct ice_extts_channel extts_cfg = {};
1956
1957 chan = rq->extts.index;
1958 if (sma_pres) {
1959 if (chan < ice_pin_desc_e810t[SMA2].chan)
1960 gpio_pin = GPIO_21;
1961 else
1962 gpio_pin = GPIO_23;
1963 } else if (ice_is_e810t(&pf->hw)) {
1964 if (chan == 0)
1965 gpio_pin = GPIO_21;
1966 else
1967 gpio_pin = GPIO_23;
1968 } else {
1969 gpio_pin = chan;
1970 }
1971
1972 extts_cfg.flags = rq->extts.flags;
1973 extts_cfg.gpio_pin = gpio_pin;
1974 extts_cfg.ena = !!on;
1975
1976 return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true);
1977 }
1978 default:
1979 return -EOPNOTSUPP;
1980 }
1981 }
1982
1983 /**
1984 * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
1985 * @info: the driver's PTP info structure
1986 * @rq: The requested feature to change
1987 * @on: Enable/disable flag
1988 */
ice_ptp_gpio_enable_e823(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1989 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
1990 struct ptp_clock_request *rq, int on)
1991 {
1992 struct ice_pf *pf = ptp_info_to_pf(info);
1993
1994 switch (rq->type) {
1995 case PTP_CLK_REQ_PPS:
1996 {
1997 struct ice_perout_channel clk_cfg = {};
1998
1999 clk_cfg.flags = rq->perout.flags;
2000 clk_cfg.gpio_pin = PPS_PIN_INDEX;
2001 clk_cfg.period = NSEC_PER_SEC;
2002 clk_cfg.ena = !!on;
2003
2004 return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
2005 }
2006 case PTP_CLK_REQ_EXTTS:
2007 {
2008 struct ice_extts_channel extts_cfg = {};
2009
2010 extts_cfg.flags = rq->extts.flags;
2011 extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX;
2012 extts_cfg.ena = !!on;
2013
2014 return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true);
2015 }
2016 default:
2017 return -EOPNOTSUPP;
2018 }
2019 }
2020
2021 /**
2022 * ice_ptp_gettimex64 - Get the time of the clock
2023 * @info: the driver's PTP info structure
2024 * @ts: timespec64 structure to hold the current time value
2025 * @sts: Optional parameter for holding a pair of system timestamps from
2026 * the system clock. Will be ignored if NULL is given.
2027 *
2028 * Read the device clock and return the correct value on ns, after converting it
2029 * into a timespec struct.
2030 */
2031 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)2032 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
2033 struct ptp_system_timestamp *sts)
2034 {
2035 struct ice_pf *pf = ptp_info_to_pf(info);
2036 u64 time_ns;
2037
2038 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
2039 *ts = ns_to_timespec64(time_ns);
2040 return 0;
2041 }
2042
2043 /**
2044 * ice_ptp_settime64 - Set the time of the clock
2045 * @info: the driver's PTP info structure
2046 * @ts: timespec64 structure that holds the new time value
2047 *
2048 * Set the device clock to the user input value. The conversion from timespec
2049 * to ns happens in the write function.
2050 */
2051 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)2052 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
2053 {
2054 struct ice_pf *pf = ptp_info_to_pf(info);
2055 struct timespec64 ts64 = *ts;
2056 struct ice_hw *hw = &pf->hw;
2057 int err;
2058
2059 /* For Vernier mode on E82X, we need to recalibrate after new settime.
2060 * Start with marking timestamps as invalid.
2061 */
2062 if (hw->ptp.phy_model == ICE_PHY_E82X) {
2063 err = ice_ptp_clear_phy_offset_ready_e82x(hw);
2064 if (err)
2065 dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
2066 }
2067
2068 if (!ice_ptp_lock(hw)) {
2069 err = -EBUSY;
2070 goto exit;
2071 }
2072
2073 /* Disable periodic outputs */
2074 ice_ptp_disable_all_clkout(pf);
2075
2076 err = ice_ptp_write_init(pf, &ts64);
2077 ice_ptp_unlock(hw);
2078
2079 if (!err)
2080 ice_ptp_reset_cached_phctime(pf);
2081
2082 /* Reenable periodic outputs */
2083 ice_ptp_enable_all_clkout(pf);
2084
2085 /* Recalibrate and re-enable timestamp blocks for E822/E823 */
2086 if (hw->ptp.phy_model == ICE_PHY_E82X)
2087 ice_ptp_restart_all_phy(pf);
2088 exit:
2089 if (err) {
2090 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
2091 return err;
2092 }
2093
2094 return 0;
2095 }
2096
2097 /**
2098 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
2099 * @info: the driver's PTP info structure
2100 * @delta: Offset in nanoseconds to adjust the time by
2101 */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)2102 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
2103 {
2104 struct timespec64 now, then;
2105 int ret;
2106
2107 then = ns_to_timespec64(delta);
2108 ret = ice_ptp_gettimex64(info, &now, NULL);
2109 if (ret)
2110 return ret;
2111 now = timespec64_add(now, then);
2112
2113 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
2114 }
2115
2116 /**
2117 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
2118 * @info: the driver's PTP info structure
2119 * @delta: Offset in nanoseconds to adjust the time by
2120 */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)2121 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
2122 {
2123 struct ice_pf *pf = ptp_info_to_pf(info);
2124 struct ice_hw *hw = &pf->hw;
2125 struct device *dev;
2126 int err;
2127
2128 dev = ice_pf_to_dev(pf);
2129
2130 /* Hardware only supports atomic adjustments using signed 32-bit
2131 * integers. For any adjustment outside this range, perform
2132 * a non-atomic get->adjust->set flow.
2133 */
2134 if (delta > S32_MAX || delta < S32_MIN) {
2135 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
2136 return ice_ptp_adjtime_nonatomic(info, delta);
2137 }
2138
2139 if (!ice_ptp_lock(hw)) {
2140 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
2141 return -EBUSY;
2142 }
2143
2144 /* Disable periodic outputs */
2145 ice_ptp_disable_all_clkout(pf);
2146
2147 err = ice_ptp_write_adj(pf, delta);
2148
2149 /* Reenable periodic outputs */
2150 ice_ptp_enable_all_clkout(pf);
2151
2152 ice_ptp_unlock(hw);
2153
2154 if (err) {
2155 dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2156 return err;
2157 }
2158
2159 ice_ptp_reset_cached_phctime(pf);
2160
2161 return 0;
2162 }
2163
2164 #ifdef CONFIG_ICE_HWTS
2165 /**
2166 * ice_ptp_get_syncdevicetime - Get the cross time stamp info
2167 * @device: Current device time
2168 * @system: System counter value read synchronously with device time
2169 * @ctx: Context provided by timekeeping code
2170 *
2171 * Read device and system (ART) clock simultaneously and return the corrected
2172 * clock values in ns.
2173 */
2174 static int
ice_ptp_get_syncdevicetime(ktime_t * device,struct system_counterval_t * system,void * ctx)2175 ice_ptp_get_syncdevicetime(ktime_t *device,
2176 struct system_counterval_t *system,
2177 void *ctx)
2178 {
2179 struct ice_pf *pf = (struct ice_pf *)ctx;
2180 struct ice_hw *hw = &pf->hw;
2181 u32 hh_lock, hh_art_ctl;
2182 int i;
2183
2184 #define MAX_HH_HW_LOCK_TRIES 5
2185 #define MAX_HH_CTL_LOCK_TRIES 100
2186
2187 for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
2188 /* Get the HW lock */
2189 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2190 if (hh_lock & PFHH_SEM_BUSY_M) {
2191 usleep_range(10000, 15000);
2192 continue;
2193 }
2194 break;
2195 }
2196 if (hh_lock & PFHH_SEM_BUSY_M) {
2197 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
2198 return -EBUSY;
2199 }
2200
2201 /* Program cmd to master timer */
2202 ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2203
2204 /* Start the ART and device clock sync sequence */
2205 hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2206 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
2207 wr32(hw, GLHH_ART_CTL, hh_art_ctl);
2208
2209 for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
2210 /* Wait for sync to complete */
2211 hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2212 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
2213 udelay(1);
2214 continue;
2215 } else {
2216 u32 hh_ts_lo, hh_ts_hi, tmr_idx;
2217 u64 hh_ts;
2218
2219 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2220 /* Read ART time */
2221 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
2222 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
2223 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2224 system->cycles = hh_ts;
2225 system->cs_id = CSID_X86_ART;
2226 /* Read Device source clock time */
2227 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
2228 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
2229 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2230 *device = ns_to_ktime(hh_ts);
2231 break;
2232 }
2233 }
2234
2235 /* Clear the master timer */
2236 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2237
2238 /* Release HW lock */
2239 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2240 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
2241 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
2242
2243 if (i == MAX_HH_CTL_LOCK_TRIES)
2244 return -ETIMEDOUT;
2245
2246 return 0;
2247 }
2248
2249 /**
2250 * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
2251 * @info: the driver's PTP info structure
2252 * @cts: The memory to fill the cross timestamp info
2253 *
2254 * Capture a cross timestamp between the ART and the device PTP hardware
2255 * clock. Fill the cross timestamp information and report it back to the
2256 * caller.
2257 *
2258 * This is only valid for E822 and E823 devices which have support for
2259 * generating the cross timestamp via PCIe PTM.
2260 *
2261 * In order to correctly correlate the ART timestamp back to the TSC time, the
2262 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2263 */
2264 static int
ice_ptp_getcrosststamp_e82x(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2265 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
2266 struct system_device_crosststamp *cts)
2267 {
2268 struct ice_pf *pf = ptp_info_to_pf(info);
2269
2270 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
2271 pf, NULL, cts);
2272 }
2273 #endif /* CONFIG_ICE_HWTS */
2274
2275 /**
2276 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2277 * @pf: Board private structure
2278 * @ifr: ioctl data
2279 *
2280 * Copy the timestamping config to user buffer
2281 */
ice_ptp_get_ts_config(struct ice_pf * pf,struct ifreq * ifr)2282 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2283 {
2284 struct hwtstamp_config *config;
2285
2286 if (pf->ptp.state != ICE_PTP_READY)
2287 return -EIO;
2288
2289 config = &pf->ptp.tstamp_config;
2290
2291 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2292 -EFAULT : 0;
2293 }
2294
2295 /**
2296 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2297 * @pf: Board private structure
2298 * @config: hwtstamp settings requested or saved
2299 */
2300 static int
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct hwtstamp_config * config)2301 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2302 {
2303 switch (config->tx_type) {
2304 case HWTSTAMP_TX_OFF:
2305 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2306 break;
2307 case HWTSTAMP_TX_ON:
2308 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2309 break;
2310 default:
2311 return -ERANGE;
2312 }
2313
2314 switch (config->rx_filter) {
2315 case HWTSTAMP_FILTER_NONE:
2316 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2317 break;
2318 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2319 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2320 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2321 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2322 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2323 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2324 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2325 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2326 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2327 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2328 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2329 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2330 case HWTSTAMP_FILTER_NTP_ALL:
2331 case HWTSTAMP_FILTER_ALL:
2332 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2333 break;
2334 default:
2335 return -ERANGE;
2336 }
2337
2338 /* Immediately update the device timestamping mode */
2339 ice_ptp_restore_timestamp_mode(pf);
2340
2341 return 0;
2342 }
2343
2344 /**
2345 * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2346 * @pf: Board private structure
2347 * @ifr: ioctl data
2348 *
2349 * Get the user config and store it
2350 */
ice_ptp_set_ts_config(struct ice_pf * pf,struct ifreq * ifr)2351 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2352 {
2353 struct hwtstamp_config config;
2354 int err;
2355
2356 if (pf->ptp.state != ICE_PTP_READY)
2357 return -EAGAIN;
2358
2359 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2360 return -EFAULT;
2361
2362 err = ice_ptp_set_timestamp_mode(pf, &config);
2363 if (err)
2364 return err;
2365
2366 /* Return the actual configuration set */
2367 config = pf->ptp.tstamp_config;
2368
2369 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2370 -EFAULT : 0;
2371 }
2372
2373 /**
2374 * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2375 * @rx_desc: Receive descriptor
2376 * @pkt_ctx: Packet context to get the cached time
2377 *
2378 * The driver receives a notification in the receive descriptor with timestamp.
2379 */
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc * rx_desc,const struct ice_pkt_ctx * pkt_ctx)2380 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2381 const struct ice_pkt_ctx *pkt_ctx)
2382 {
2383 u64 ts_ns, cached_time;
2384 u32 ts_high;
2385
2386 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2387 return 0;
2388
2389 cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2390
2391 /* Do not report a timestamp if we don't have a cached PHC time */
2392 if (!cached_time)
2393 return 0;
2394
2395 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2396 * PHC value, rather than accessing the PF. This also allows us to
2397 * simply pass the upper 32bits of nanoseconds directly. Calling
2398 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2399 * bits itself.
2400 */
2401 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2402 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2403
2404 return ts_ns;
2405 }
2406
2407 /**
2408 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
2409 * @pf: pointer to the PF structure
2410 * @info: PTP clock info structure
2411 *
2412 * Disable the OS access to the SMA pins. Called to clear out the OS
2413 * indications of pin support when we fail to setup the E810-T SMA control
2414 * register.
2415 */
2416 static void
ice_ptp_disable_sma_pins_e810t(struct ice_pf * pf,struct ptp_clock_info * info)2417 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2418 {
2419 struct device *dev = ice_pf_to_dev(pf);
2420
2421 dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
2422
2423 info->enable = NULL;
2424 info->verify = NULL;
2425 info->n_pins = 0;
2426 info->n_ext_ts = 0;
2427 info->n_per_out = 0;
2428 }
2429
2430 /**
2431 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
2432 * @pf: pointer to the PF structure
2433 * @info: PTP clock info structure
2434 *
2435 * Finish setting up the SMA pins by allocating pin_config, and setting it up
2436 * according to the current status of the SMA. On failure, disable all of the
2437 * extended SMA pin support.
2438 */
2439 static void
ice_ptp_setup_sma_pins_e810t(struct ice_pf * pf,struct ptp_clock_info * info)2440 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2441 {
2442 struct device *dev = ice_pf_to_dev(pf);
2443 int err;
2444
2445 /* Allocate memory for kernel pins interface */
2446 info->pin_config = devm_kcalloc(dev, info->n_pins,
2447 sizeof(*info->pin_config), GFP_KERNEL);
2448 if (!info->pin_config) {
2449 ice_ptp_disable_sma_pins_e810t(pf, info);
2450 return;
2451 }
2452
2453 /* Read current SMA status */
2454 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
2455 if (err)
2456 ice_ptp_disable_sma_pins_e810t(pf, info);
2457 }
2458
2459 /**
2460 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
2461 * @pf: pointer to the PF instance
2462 * @info: PTP clock capabilities
2463 */
2464 static void
ice_ptp_setup_pins_e810(struct ice_pf * pf,struct ptp_clock_info * info)2465 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2466 {
2467 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2468 info->n_ext_ts = N_EXT_TS_E810;
2469 info->n_per_out = N_PER_OUT_E810T;
2470 info->n_pins = NUM_PTP_PINS_E810T;
2471 info->verify = ice_verify_pin_e810t;
2472
2473 /* Complete setup of the SMA pins */
2474 ice_ptp_setup_sma_pins_e810t(pf, info);
2475 } else if (ice_is_e810t(&pf->hw)) {
2476 info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
2477 info->n_per_out = N_PER_OUT_NO_SMA_E810T;
2478 } else {
2479 info->n_per_out = N_PER_OUT_E810;
2480 info->n_ext_ts = N_EXT_TS_E810;
2481 }
2482 }
2483
2484 /**
2485 * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
2486 * @pf: pointer to the PF instance
2487 * @info: PTP clock capabilities
2488 */
2489 static void
ice_ptp_setup_pins_e823(struct ice_pf * pf,struct ptp_clock_info * info)2490 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2491 {
2492 info->pps = 1;
2493 info->n_per_out = 0;
2494 info->n_ext_ts = 1;
2495 }
2496
2497 /**
2498 * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
2499 * @pf: Board private structure
2500 * @info: PTP info to fill
2501 *
2502 * Assign functions to the PTP capabiltiies structure for E82x devices.
2503 * Functions which operate across all device families should be set directly
2504 * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
2505 * devices.
2506 */
2507 static void
ice_ptp_set_funcs_e82x(struct ice_pf * pf,struct ptp_clock_info * info)2508 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
2509 {
2510 #ifdef CONFIG_ICE_HWTS
2511 if (boot_cpu_has(X86_FEATURE_ART) &&
2512 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
2513 info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
2514 #endif /* CONFIG_ICE_HWTS */
2515 }
2516
2517 /**
2518 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2519 * @pf: Board private structure
2520 * @info: PTP info to fill
2521 *
2522 * Assign functions to the PTP capabiltiies structure for E810 devices.
2523 * Functions which operate across all device families should be set directly
2524 * in ice_ptp_set_caps. Only add functions here which are distinct for e810
2525 * devices.
2526 */
2527 static void
ice_ptp_set_funcs_e810(struct ice_pf * pf,struct ptp_clock_info * info)2528 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2529 {
2530 info->enable = ice_ptp_gpio_enable_e810;
2531 ice_ptp_setup_pins_e810(pf, info);
2532 }
2533
2534 /**
2535 * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
2536 * @pf: Board private structure
2537 * @info: PTP info to fill
2538 *
2539 * Assign functions to the PTP capabiltiies structure for E823 devices.
2540 * Functions which operate across all device families should be set directly
2541 * in ice_ptp_set_caps. Only add functions here which are distinct for e823
2542 * devices.
2543 */
2544 static void
ice_ptp_set_funcs_e823(struct ice_pf * pf,struct ptp_clock_info * info)2545 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2546 {
2547 ice_ptp_set_funcs_e82x(pf, info);
2548
2549 info->enable = ice_ptp_gpio_enable_e823;
2550 ice_ptp_setup_pins_e823(pf, info);
2551 }
2552
2553 /**
2554 * ice_ptp_set_caps - Set PTP capabilities
2555 * @pf: Board private structure
2556 */
ice_ptp_set_caps(struct ice_pf * pf)2557 static void ice_ptp_set_caps(struct ice_pf *pf)
2558 {
2559 struct ptp_clock_info *info = &pf->ptp.info;
2560 struct device *dev = ice_pf_to_dev(pf);
2561
2562 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2563 dev_driver_string(dev), dev_name(dev));
2564 info->owner = THIS_MODULE;
2565 info->max_adj = 100000000;
2566 info->adjtime = ice_ptp_adjtime;
2567 info->adjfine = ice_ptp_adjfine;
2568 info->gettimex64 = ice_ptp_gettimex64;
2569 info->settime64 = ice_ptp_settime64;
2570
2571 if (ice_is_e810(&pf->hw))
2572 ice_ptp_set_funcs_e810(pf, info);
2573 else if (ice_is_e823(&pf->hw))
2574 ice_ptp_set_funcs_e823(pf, info);
2575 else
2576 ice_ptp_set_funcs_e82x(pf, info);
2577 }
2578
2579 /**
2580 * ice_ptp_create_clock - Create PTP clock device for userspace
2581 * @pf: Board private structure
2582 *
2583 * This function creates a new PTP clock device. It only creates one if we
2584 * don't already have one. Will return error if it can't create one, but success
2585 * if we already have a device. Should be used by ice_ptp_init to create clock
2586 * initially, and prevent global resets from creating new clock devices.
2587 */
ice_ptp_create_clock(struct ice_pf * pf)2588 static long ice_ptp_create_clock(struct ice_pf *pf)
2589 {
2590 struct ptp_clock_info *info;
2591 struct device *dev;
2592
2593 /* No need to create a clock device if we already have one */
2594 if (pf->ptp.clock)
2595 return 0;
2596
2597 ice_ptp_set_caps(pf);
2598
2599 info = &pf->ptp.info;
2600 dev = ice_pf_to_dev(pf);
2601
2602 /* Attempt to register the clock before enabling the hardware. */
2603 pf->ptp.clock = ptp_clock_register(info, dev);
2604 if (IS_ERR(pf->ptp.clock)) {
2605 dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2606 return PTR_ERR(pf->ptp.clock);
2607 }
2608
2609 return 0;
2610 }
2611
2612 /**
2613 * ice_ptp_request_ts - Request an available Tx timestamp index
2614 * @tx: the PTP Tx timestamp tracker to request from
2615 * @skb: the SKB to associate with this timestamp request
2616 */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2617 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2618 {
2619 unsigned long flags;
2620 u8 idx;
2621
2622 spin_lock_irqsave(&tx->lock, flags);
2623
2624 /* Check that this tracker is accepting new timestamp requests */
2625 if (!ice_ptp_is_tx_tracker_up(tx)) {
2626 spin_unlock_irqrestore(&tx->lock, flags);
2627 return -1;
2628 }
2629
2630 /* Find and set the first available index */
2631 idx = find_next_zero_bit(tx->in_use, tx->len,
2632 tx->last_ll_ts_idx_read + 1);
2633 if (idx == tx->len)
2634 idx = find_first_zero_bit(tx->in_use, tx->len);
2635
2636 if (idx < tx->len) {
2637 /* We got a valid index that no other thread could have set. Store
2638 * a reference to the skb and the start time to allow discarding old
2639 * requests.
2640 */
2641 set_bit(idx, tx->in_use);
2642 clear_bit(idx, tx->stale);
2643 tx->tstamps[idx].start = jiffies;
2644 tx->tstamps[idx].skb = skb_get(skb);
2645 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2646 ice_trace(tx_tstamp_request, skb, idx);
2647 }
2648
2649 spin_unlock_irqrestore(&tx->lock, flags);
2650
2651 /* return the appropriate PHY timestamp register index, -1 if no
2652 * indexes were available.
2653 */
2654 if (idx >= tx->len)
2655 return -1;
2656 else
2657 return idx + tx->offset;
2658 }
2659
2660 /**
2661 * ice_ptp_process_ts - Process the PTP Tx timestamps
2662 * @pf: Board private structure
2663 *
2664 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2665 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2666 */
ice_ptp_process_ts(struct ice_pf * pf)2667 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2668 {
2669 switch (pf->ptp.tx_interrupt_mode) {
2670 case ICE_PTP_TX_INTERRUPT_NONE:
2671 /* This device has the clock owner handle timestamps for it */
2672 return ICE_TX_TSTAMP_WORK_DONE;
2673 case ICE_PTP_TX_INTERRUPT_SELF:
2674 /* This device handles its own timestamps */
2675 return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2676 case ICE_PTP_TX_INTERRUPT_ALL:
2677 /* This device handles timestamps for all ports */
2678 return ice_ptp_tx_tstamp_owner(pf);
2679 default:
2680 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2681 pf->ptp.tx_interrupt_mode);
2682 return ICE_TX_TSTAMP_WORK_DONE;
2683 }
2684 }
2685
2686 /**
2687 * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2688 * @pf: Board private structure
2689 *
2690 * The device PHY issues Tx timestamp interrupts to the driver for processing
2691 * timestamp data from the PHY. It will not interrupt again until all
2692 * current timestamp data is read. In rare circumstances, it is possible that
2693 * the driver fails to read all outstanding data.
2694 *
2695 * To avoid getting permanently stuck, periodically check if the PHY has
2696 * outstanding timestamp data. If so, trigger an interrupt from software to
2697 * process this data.
2698 */
ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf * pf)2699 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2700 {
2701 struct device *dev = ice_pf_to_dev(pf);
2702 struct ice_hw *hw = &pf->hw;
2703 bool trigger_oicr = false;
2704 unsigned int i;
2705
2706 if (ice_is_e810(hw))
2707 return;
2708
2709 if (!ice_pf_src_tmr_owned(pf))
2710 return;
2711
2712 for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2713 u64 tstamp_ready;
2714 int err;
2715
2716 err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2717 if (!err && tstamp_ready) {
2718 trigger_oicr = true;
2719 break;
2720 }
2721 }
2722
2723 if (trigger_oicr) {
2724 /* Trigger a software interrupt, to ensure this data
2725 * gets processed.
2726 */
2727 dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2728
2729 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2730 ice_flush(hw);
2731 }
2732 }
2733
ice_ptp_periodic_work(struct kthread_work * work)2734 static void ice_ptp_periodic_work(struct kthread_work *work)
2735 {
2736 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2737 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2738 int err;
2739
2740 if (pf->ptp.state != ICE_PTP_READY)
2741 return;
2742
2743 err = ice_ptp_update_cached_phctime(pf);
2744
2745 ice_ptp_maybe_trigger_tx_interrupt(pf);
2746
2747 /* Run twice a second or reschedule if phc update failed */
2748 kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2749 msecs_to_jiffies(err ? 10 : 500));
2750 }
2751
2752 /**
2753 * ice_ptp_prepare_for_reset - Prepare PTP for reset
2754 * @pf: Board private structure
2755 * @reset_type: the reset type being performed
2756 */
ice_ptp_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)2757 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2758 {
2759 struct ice_ptp *ptp = &pf->ptp;
2760 u8 src_tmr;
2761
2762 if (ptp->state != ICE_PTP_READY)
2763 return;
2764
2765 ptp->state = ICE_PTP_RESETTING;
2766
2767 /* Disable timestamping for both Tx and Rx */
2768 ice_ptp_disable_timestamp_mode(pf);
2769
2770 kthread_cancel_delayed_work_sync(&ptp->work);
2771
2772 if (reset_type == ICE_RESET_PFR)
2773 return;
2774
2775 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2776
2777 /* Disable periodic outputs */
2778 ice_ptp_disable_all_clkout(pf);
2779
2780 src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2781
2782 /* Disable source clock */
2783 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2784
2785 /* Acquire PHC and system timer to restore after reset */
2786 ptp->reset_time = ktime_get_real_ns();
2787 }
2788
2789 /**
2790 * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2791 * @pf: Board private structure
2792 *
2793 * Companion function for ice_ptp_rebuild() which handles tasks that only the
2794 * PTP clock owner instance should perform.
2795 */
ice_ptp_rebuild_owner(struct ice_pf * pf)2796 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2797 {
2798 struct ice_ptp *ptp = &pf->ptp;
2799 struct ice_hw *hw = &pf->hw;
2800 struct timespec64 ts;
2801 u64 time_diff;
2802 int err;
2803
2804 err = ice_ptp_init_phc(hw);
2805 if (err)
2806 return err;
2807
2808 /* Acquire the global hardware lock */
2809 if (!ice_ptp_lock(hw)) {
2810 err = -EBUSY;
2811 return err;
2812 }
2813
2814 /* Write the increment time value to PHY and LAN */
2815 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2816 if (err) {
2817 ice_ptp_unlock(hw);
2818 return err;
2819 }
2820
2821 /* Write the initial Time value to PHY and LAN using the cached PHC
2822 * time before the reset and time difference between stopping and
2823 * starting the clock.
2824 */
2825 if (ptp->cached_phc_time) {
2826 time_diff = ktime_get_real_ns() - ptp->reset_time;
2827 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2828 } else {
2829 ts = ktime_to_timespec64(ktime_get_real());
2830 }
2831 err = ice_ptp_write_init(pf, &ts);
2832 if (err) {
2833 ice_ptp_unlock(hw);
2834 return err;
2835 }
2836
2837 /* Release the global hardware lock */
2838 ice_ptp_unlock(hw);
2839
2840 /* Flush software tracking of any outstanding timestamps since we're
2841 * about to flush the PHY timestamp block.
2842 */
2843 ice_ptp_flush_all_tx_tracker(pf);
2844
2845 if (!ice_is_e810(hw)) {
2846 /* Enable quad interrupts */
2847 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
2848 if (err)
2849 return err;
2850
2851 ice_ptp_restart_all_phy(pf);
2852 }
2853
2854 /* Re-enable all periodic outputs and external timestamp events */
2855 ice_ptp_enable_all_clkout(pf);
2856 ice_ptp_enable_all_extts(pf);
2857
2858 return 0;
2859 }
2860
2861 /**
2862 * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
2863 * @pf: Board private structure
2864 * @reset_type: the reset type being performed
2865 */
ice_ptp_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)2866 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
2867 {
2868 struct ice_ptp *ptp = &pf->ptp;
2869 int err;
2870
2871 if (ptp->state == ICE_PTP_READY) {
2872 ice_ptp_prepare_for_reset(pf, reset_type);
2873 } else if (ptp->state != ICE_PTP_RESETTING) {
2874 err = -EINVAL;
2875 dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
2876 goto err;
2877 }
2878
2879 if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
2880 err = ice_ptp_rebuild_owner(pf);
2881 if (err)
2882 goto err;
2883 }
2884
2885 ptp->state = ICE_PTP_READY;
2886
2887 /* Start periodic work going */
2888 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2889
2890 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2891 return;
2892
2893 err:
2894 ptp->state = ICE_PTP_ERROR;
2895 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2896 }
2897
2898 /**
2899 * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
2900 * @aux_dev: auxiliary device to get the auxiliary PF for
2901 */
2902 static struct ice_pf *
ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device * aux_dev)2903 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
2904 {
2905 struct ice_ptp_port *aux_port;
2906 struct ice_ptp *aux_ptp;
2907
2908 aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
2909 aux_ptp = container_of(aux_port, struct ice_ptp, port);
2910
2911 return container_of(aux_ptp, struct ice_pf, ptp);
2912 }
2913
2914 /**
2915 * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
2916 * @aux_dev: auxiliary device to get the PF for
2917 */
2918 static struct ice_pf *
ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device * aux_dev)2919 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
2920 {
2921 struct ice_ptp_port_owner *ports_owner;
2922 const struct auxiliary_driver *aux_drv;
2923 struct ice_ptp *owner_ptp;
2924
2925 if (!aux_dev->dev.driver)
2926 return NULL;
2927
2928 aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
2929 ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
2930 aux_driver);
2931 owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
2932 return container_of(owner_ptp, struct ice_pf, ptp);
2933 }
2934
2935 /**
2936 * ice_ptp_auxbus_probe - Probe auxiliary devices
2937 * @aux_dev: PF's auxiliary device
2938 * @id: Auxiliary device ID
2939 */
ice_ptp_auxbus_probe(struct auxiliary_device * aux_dev,const struct auxiliary_device_id * id)2940 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
2941 const struct auxiliary_device_id *id)
2942 {
2943 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2944 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2945
2946 if (WARN_ON(!owner_pf))
2947 return -ENODEV;
2948
2949 INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
2950 mutex_lock(&owner_pf->ptp.ports_owner.lock);
2951 list_add(&aux_pf->ptp.port.list_member,
2952 &owner_pf->ptp.ports_owner.ports);
2953 mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2954
2955 return 0;
2956 }
2957
2958 /**
2959 * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
2960 * @aux_dev: PF's auxiliary device
2961 */
ice_ptp_auxbus_remove(struct auxiliary_device * aux_dev)2962 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
2963 {
2964 struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2965 struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2966
2967 mutex_lock(&owner_pf->ptp.ports_owner.lock);
2968 list_del(&aux_pf->ptp.port.list_member);
2969 mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2970 }
2971
2972 /**
2973 * ice_ptp_auxbus_shutdown
2974 * @aux_dev: PF's auxiliary device
2975 */
ice_ptp_auxbus_shutdown(struct auxiliary_device * aux_dev)2976 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
2977 {
2978 /* Doing nothing here, but handle to auxbus driver must be satisfied */
2979 }
2980
2981 /**
2982 * ice_ptp_auxbus_suspend
2983 * @aux_dev: PF's auxiliary device
2984 * @state: power management state indicator
2985 */
2986 static int
ice_ptp_auxbus_suspend(struct auxiliary_device * aux_dev,pm_message_t state)2987 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
2988 {
2989 /* Doing nothing here, but handle to auxbus driver must be satisfied */
2990 return 0;
2991 }
2992
2993 /**
2994 * ice_ptp_auxbus_resume
2995 * @aux_dev: PF's auxiliary device
2996 */
ice_ptp_auxbus_resume(struct auxiliary_device * aux_dev)2997 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
2998 {
2999 /* Doing nothing here, but handle to auxbus driver must be satisfied */
3000 return 0;
3001 }
3002
3003 /**
3004 * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
3005 * @pf: Board private structure
3006 * @name: auxiliary bus driver name
3007 */
3008 static struct auxiliary_device_id *
ice_ptp_auxbus_create_id_table(struct ice_pf * pf,const char * name)3009 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
3010 {
3011 struct auxiliary_device_id *ids;
3012
3013 /* Second id left empty to terminate the array */
3014 ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
3015 sizeof(struct auxiliary_device_id), GFP_KERNEL);
3016 if (!ids)
3017 return NULL;
3018
3019 snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
3020
3021 return ids;
3022 }
3023
3024 /**
3025 * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
3026 * @pf: Board private structure
3027 */
ice_ptp_register_auxbus_driver(struct ice_pf * pf)3028 static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
3029 {
3030 struct auxiliary_driver *aux_driver;
3031 struct ice_ptp *ptp;
3032 struct device *dev;
3033 char *name;
3034 int err;
3035
3036 ptp = &pf->ptp;
3037 dev = ice_pf_to_dev(pf);
3038 aux_driver = &ptp->ports_owner.aux_driver;
3039 INIT_LIST_HEAD(&ptp->ports_owner.ports);
3040 mutex_init(&ptp->ports_owner.lock);
3041 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
3042 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
3043 ice_get_ptp_src_clock_index(&pf->hw));
3044 if (!name)
3045 return -ENOMEM;
3046
3047 aux_driver->name = name;
3048 aux_driver->shutdown = ice_ptp_auxbus_shutdown;
3049 aux_driver->suspend = ice_ptp_auxbus_suspend;
3050 aux_driver->remove = ice_ptp_auxbus_remove;
3051 aux_driver->resume = ice_ptp_auxbus_resume;
3052 aux_driver->probe = ice_ptp_auxbus_probe;
3053 aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
3054 if (!aux_driver->id_table)
3055 return -ENOMEM;
3056
3057 err = auxiliary_driver_register(aux_driver);
3058 if (err) {
3059 devm_kfree(dev, aux_driver->id_table);
3060 dev_err(dev, "Failed registering aux_driver, name <%s>\n",
3061 name);
3062 }
3063
3064 return err;
3065 }
3066
3067 /**
3068 * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
3069 * @pf: Board private structure
3070 */
ice_ptp_unregister_auxbus_driver(struct ice_pf * pf)3071 static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
3072 {
3073 struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
3074
3075 auxiliary_driver_unregister(aux_driver);
3076 devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
3077
3078 mutex_destroy(&pf->ptp.ports_owner.lock);
3079 }
3080
3081 /**
3082 * ice_ptp_clock_index - Get the PTP clock index for this device
3083 * @pf: Board private structure
3084 *
3085 * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3086 * is associated.
3087 */
ice_ptp_clock_index(struct ice_pf * pf)3088 int ice_ptp_clock_index(struct ice_pf *pf)
3089 {
3090 struct auxiliary_device *aux_dev;
3091 struct ice_pf *owner_pf;
3092 struct ptp_clock *clock;
3093
3094 aux_dev = &pf->ptp.port.aux_dev;
3095 owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
3096 if (!owner_pf)
3097 return -1;
3098 clock = owner_pf->ptp.clock;
3099
3100 return clock ? ptp_clock_index(clock) : -1;
3101 }
3102
3103 /**
3104 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3105 * @pf: Board private structure
3106 *
3107 * Setup and initialize a PTP clock device that represents the device hardware
3108 * clock. Save the clock index for other functions connected to the same
3109 * hardware resource.
3110 */
ice_ptp_init_owner(struct ice_pf * pf)3111 static int ice_ptp_init_owner(struct ice_pf *pf)
3112 {
3113 struct ice_hw *hw = &pf->hw;
3114 struct timespec64 ts;
3115 int err;
3116
3117 err = ice_ptp_init_phc(hw);
3118 if (err) {
3119 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3120 err);
3121 return err;
3122 }
3123
3124 /* Acquire the global hardware lock */
3125 if (!ice_ptp_lock(hw)) {
3126 err = -EBUSY;
3127 goto err_exit;
3128 }
3129
3130 /* Write the increment time value to PHY and LAN */
3131 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3132 if (err) {
3133 ice_ptp_unlock(hw);
3134 goto err_exit;
3135 }
3136
3137 ts = ktime_to_timespec64(ktime_get_real());
3138 /* Write the initial Time value to PHY and LAN */
3139 err = ice_ptp_write_init(pf, &ts);
3140 if (err) {
3141 ice_ptp_unlock(hw);
3142 goto err_exit;
3143 }
3144
3145 /* Release the global hardware lock */
3146 ice_ptp_unlock(hw);
3147
3148 /* Configure PHY interrupt settings */
3149 err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3150 if (err)
3151 goto err_exit;
3152
3153 /* Ensure we have a clock device */
3154 err = ice_ptp_create_clock(pf);
3155 if (err)
3156 goto err_clk;
3157
3158 err = ice_ptp_register_auxbus_driver(pf);
3159 if (err) {
3160 dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
3161 goto err_aux;
3162 }
3163
3164 return 0;
3165 err_aux:
3166 ptp_clock_unregister(pf->ptp.clock);
3167 err_clk:
3168 pf->ptp.clock = NULL;
3169 err_exit:
3170 return err;
3171 }
3172
3173 /**
3174 * ice_ptp_init_work - Initialize PTP work threads
3175 * @pf: Board private structure
3176 * @ptp: PF PTP structure
3177 */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)3178 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3179 {
3180 struct kthread_worker *kworker;
3181
3182 /* Initialize work functions */
3183 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3184
3185 /* Allocate a kworker for handling work required for the ports
3186 * connected to the PTP hardware clock.
3187 */
3188 kworker = kthread_create_worker(0, "ice-ptp-%s",
3189 dev_name(ice_pf_to_dev(pf)));
3190 if (IS_ERR(kworker))
3191 return PTR_ERR(kworker);
3192
3193 ptp->kworker = kworker;
3194
3195 /* Start periodic work going */
3196 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3197
3198 return 0;
3199 }
3200
3201 /**
3202 * ice_ptp_init_port - Initialize PTP port structure
3203 * @pf: Board private structure
3204 * @ptp_port: PTP port structure
3205 */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)3206 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3207 {
3208 struct ice_hw *hw = &pf->hw;
3209
3210 mutex_init(&ptp_port->ps_lock);
3211
3212 switch (hw->ptp.phy_model) {
3213 case ICE_PHY_ETH56G:
3214 return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
3215 ptp_port->port_num);
3216 case ICE_PHY_E810:
3217 return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
3218 case ICE_PHY_E82X:
3219 kthread_init_delayed_work(&ptp_port->ov_work,
3220 ice_ptp_wait_for_offsets);
3221
3222 return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3223 ptp_port->port_num);
3224 default:
3225 return -ENODEV;
3226 }
3227 }
3228
3229 /**
3230 * ice_ptp_release_auxbus_device
3231 * @dev: device that utilizes the auxbus
3232 */
ice_ptp_release_auxbus_device(struct device * dev)3233 static void ice_ptp_release_auxbus_device(struct device *dev)
3234 {
3235 /* Doing nothing here, but handle to auxbux device must be satisfied */
3236 }
3237
3238 /**
3239 * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
3240 * @pf: Board private structure
3241 */
ice_ptp_create_auxbus_device(struct ice_pf * pf)3242 static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
3243 {
3244 struct auxiliary_device *aux_dev;
3245 struct ice_ptp *ptp;
3246 struct device *dev;
3247 char *name;
3248 int err;
3249 u32 id;
3250
3251 ptp = &pf->ptp;
3252 id = ptp->port.port_num;
3253 dev = ice_pf_to_dev(pf);
3254
3255 aux_dev = &ptp->port.aux_dev;
3256
3257 name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
3258 pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
3259 ice_get_ptp_src_clock_index(&pf->hw));
3260 if (!name)
3261 return -ENOMEM;
3262
3263 aux_dev->name = name;
3264 aux_dev->id = id;
3265 aux_dev->dev.release = ice_ptp_release_auxbus_device;
3266 aux_dev->dev.parent = dev;
3267
3268 err = auxiliary_device_init(aux_dev);
3269 if (err)
3270 goto aux_err;
3271
3272 err = auxiliary_device_add(aux_dev);
3273 if (err) {
3274 auxiliary_device_uninit(aux_dev);
3275 goto aux_err;
3276 }
3277
3278 return 0;
3279 aux_err:
3280 dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
3281 devm_kfree(dev, name);
3282 return err;
3283 }
3284
3285 /**
3286 * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
3287 * @pf: Board private structure
3288 */
ice_ptp_remove_auxbus_device(struct ice_pf * pf)3289 static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
3290 {
3291 struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
3292
3293 auxiliary_device_delete(aux_dev);
3294 auxiliary_device_uninit(aux_dev);
3295
3296 memset(aux_dev, 0, sizeof(*aux_dev));
3297 }
3298
3299 /**
3300 * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3301 * @pf: Board private structure
3302 *
3303 * Initialize the Tx timestamp interrupt mode for this device. For most device
3304 * types, each PF processes the interrupt and manages its own timestamps. For
3305 * E822-based devices, only the clock owner processes the timestamps. Other
3306 * PFs disable the interrupt and do not process their own timestamps.
3307 */
ice_ptp_init_tx_interrupt_mode(struct ice_pf * pf)3308 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3309 {
3310 switch (pf->hw.ptp.phy_model) {
3311 case ICE_PHY_E82X:
3312 /* E822 based PHY has the clock owner process the interrupt
3313 * for all ports.
3314 */
3315 if (ice_pf_src_tmr_owned(pf))
3316 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3317 else
3318 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3319 break;
3320 default:
3321 /* other PHY types handle their own Tx interrupt */
3322 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3323 }
3324 }
3325
3326 /**
3327 * ice_ptp_init - Initialize PTP hardware clock support
3328 * @pf: Board private structure
3329 *
3330 * Set up the device for interacting with the PTP hardware clock for all
3331 * functions, both the function that owns the clock hardware, and the
3332 * functions connected to the clock hardware.
3333 *
3334 * The clock owner will allocate and register a ptp_clock with the
3335 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3336 * items used for asynchronous work such as Tx timestamps and periodic work.
3337 */
ice_ptp_init(struct ice_pf * pf)3338 void ice_ptp_init(struct ice_pf *pf)
3339 {
3340 struct ice_ptp *ptp = &pf->ptp;
3341 struct ice_hw *hw = &pf->hw;
3342 int err;
3343
3344 ptp->state = ICE_PTP_INITIALIZING;
3345
3346 ice_ptp_init_hw(hw);
3347
3348 ice_ptp_init_tx_interrupt_mode(pf);
3349
3350 /* If this function owns the clock hardware, it must allocate and
3351 * configure the PTP clock device to represent it.
3352 */
3353 if (ice_pf_src_tmr_owned(pf)) {
3354 err = ice_ptp_init_owner(pf);
3355 if (err)
3356 goto err;
3357 }
3358
3359 ptp->port.port_num = hw->pf_id;
3360 if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
3361 ptp->port.port_num = hw->pf_id * 2;
3362
3363 err = ice_ptp_init_port(pf, &ptp->port);
3364 if (err)
3365 goto err;
3366
3367 /* Start the PHY timestamping block */
3368 ice_ptp_reset_phy_timestamping(pf);
3369
3370 /* Configure initial Tx interrupt settings */
3371 ice_ptp_cfg_tx_interrupt(pf);
3372
3373 err = ice_ptp_create_auxbus_device(pf);
3374 if (err)
3375 goto err;
3376
3377 ptp->state = ICE_PTP_READY;
3378
3379 err = ice_ptp_init_work(pf, ptp);
3380 if (err)
3381 goto err;
3382
3383 dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3384 return;
3385
3386 err:
3387 /* If we registered a PTP clock, release it */
3388 if (pf->ptp.clock) {
3389 ptp_clock_unregister(ptp->clock);
3390 pf->ptp.clock = NULL;
3391 }
3392 ptp->state = ICE_PTP_ERROR;
3393 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3394 }
3395
3396 /**
3397 * ice_ptp_release - Disable the driver/HW support and unregister the clock
3398 * @pf: Board private structure
3399 *
3400 * This function handles the cleanup work required from the initialization by
3401 * clearing out the important information and unregistering the clock
3402 */
ice_ptp_release(struct ice_pf * pf)3403 void ice_ptp_release(struct ice_pf *pf)
3404 {
3405 if (pf->ptp.state != ICE_PTP_READY)
3406 return;
3407
3408 pf->ptp.state = ICE_PTP_UNINIT;
3409
3410 /* Disable timestamping for both Tx and Rx */
3411 ice_ptp_disable_timestamp_mode(pf);
3412
3413 ice_ptp_remove_auxbus_device(pf);
3414
3415 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3416
3417 ice_ptp_disable_all_extts(pf);
3418
3419 kthread_cancel_delayed_work_sync(&pf->ptp.work);
3420
3421 ice_ptp_port_phy_stop(&pf->ptp.port);
3422 mutex_destroy(&pf->ptp.port.ps_lock);
3423 if (pf->ptp.kworker) {
3424 kthread_destroy_worker(pf->ptp.kworker);
3425 pf->ptp.kworker = NULL;
3426 }
3427
3428 if (ice_pf_src_tmr_owned(pf))
3429 ice_ptp_unregister_auxbus_driver(pf);
3430
3431 if (!pf->ptp.clock)
3432 return;
3433
3434 /* Disable periodic outputs */
3435 ice_ptp_disable_all_clkout(pf);
3436
3437 ptp_clock_unregister(pf->ptp.clock);
3438 pf->ptp.clock = NULL;
3439
3440 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3441 }
3442