xref: /linux/drivers/net/ethernet/intel/ice/ice_ptp.c (revision 7354eb7f1558466e92e926802d36e69e42938ea9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 
8 #define E810_OUT_PROP_DELAY_NS 1
9 
10 static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
11 	/* name    idx   func         chan */
12 	{ "GNSS",  GNSS, PTP_PF_EXTTS, 0, { 0, } },
13 	{ "SMA1",  SMA1, PTP_PF_NONE, 1, { 0, } },
14 	{ "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
15 	{ "SMA2",  SMA2, PTP_PF_NONE, 2, { 0, } },
16 	{ "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
17 };
18 
19 /**
20  * ice_get_sma_config_e810t
21  * @hw: pointer to the hw struct
22  * @ptp_pins: pointer to the ptp_pin_desc struture
23  *
24  * Read the configuration of the SMA control logic and put it into the
25  * ptp_pin_desc structure
26  */
27 static int
28 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
29 {
30 	u8 data, i;
31 	int status;
32 
33 	/* Read initial pin state */
34 	status = ice_read_sma_ctrl_e810t(hw, &data);
35 	if (status)
36 		return status;
37 
38 	/* initialize with defaults */
39 	for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
40 		strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
41 			sizeof(ptp_pins[i].name));
42 		ptp_pins[i].index = ice_pin_desc_e810t[i].index;
43 		ptp_pins[i].func = ice_pin_desc_e810t[i].func;
44 		ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
45 	}
46 
47 	/* Parse SMA1/UFL1 */
48 	switch (data & ICE_SMA1_MASK_E810T) {
49 	case ICE_SMA1_MASK_E810T:
50 	default:
51 		ptp_pins[SMA1].func = PTP_PF_NONE;
52 		ptp_pins[UFL1].func = PTP_PF_NONE;
53 		break;
54 	case ICE_SMA1_DIR_EN_E810T:
55 		ptp_pins[SMA1].func = PTP_PF_PEROUT;
56 		ptp_pins[UFL1].func = PTP_PF_NONE;
57 		break;
58 	case ICE_SMA1_TX_EN_E810T:
59 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
60 		ptp_pins[UFL1].func = PTP_PF_NONE;
61 		break;
62 	case 0:
63 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
64 		ptp_pins[UFL1].func = PTP_PF_PEROUT;
65 		break;
66 	}
67 
68 	/* Parse SMA2/UFL2 */
69 	switch (data & ICE_SMA2_MASK_E810T) {
70 	case ICE_SMA2_MASK_E810T:
71 	default:
72 		ptp_pins[SMA2].func = PTP_PF_NONE;
73 		ptp_pins[UFL2].func = PTP_PF_NONE;
74 		break;
75 	case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
76 		ptp_pins[SMA2].func = PTP_PF_EXTTS;
77 		ptp_pins[UFL2].func = PTP_PF_NONE;
78 		break;
79 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
80 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
81 		ptp_pins[UFL2].func = PTP_PF_NONE;
82 		break;
83 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
84 		ptp_pins[SMA2].func = PTP_PF_NONE;
85 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
86 		break;
87 	case ICE_SMA2_DIR_EN_E810T:
88 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
89 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
90 		break;
91 	}
92 
93 	return 0;
94 }
95 
96 /**
97  * ice_ptp_set_sma_config_e810t
98  * @hw: pointer to the hw struct
99  * @ptp_pins: pointer to the ptp_pin_desc struture
100  *
101  * Set the configuration of the SMA control logic based on the configuration in
102  * num_pins parameter
103  */
104 static int
105 ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
106 			     const struct ptp_pin_desc *ptp_pins)
107 {
108 	int status;
109 	u8 data;
110 
111 	/* SMA1 and UFL1 cannot be set to TX at the same time */
112 	if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
113 	    ptp_pins[UFL1].func == PTP_PF_PEROUT)
114 		return -EINVAL;
115 
116 	/* SMA2 and UFL2 cannot be set to RX at the same time */
117 	if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
118 	    ptp_pins[UFL2].func == PTP_PF_EXTTS)
119 		return -EINVAL;
120 
121 	/* Read initial pin state value */
122 	status = ice_read_sma_ctrl_e810t(hw, &data);
123 	if (status)
124 		return status;
125 
126 	/* Set the right sate based on the desired configuration */
127 	data &= ~ICE_SMA1_MASK_E810T;
128 	if (ptp_pins[SMA1].func == PTP_PF_NONE &&
129 	    ptp_pins[UFL1].func == PTP_PF_NONE) {
130 		dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
131 		data |= ICE_SMA1_MASK_E810T;
132 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
133 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
134 		dev_info(ice_hw_to_dev(hw), "SMA1 RX");
135 		data |= ICE_SMA1_TX_EN_E810T;
136 	} else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
137 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
138 		/* U.FL 1 TX will always enable SMA 1 RX */
139 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
140 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
141 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
142 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
143 	} else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
144 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
145 		dev_info(ice_hw_to_dev(hw), "SMA1 TX");
146 		data |= ICE_SMA1_DIR_EN_E810T;
147 	}
148 
149 	data &= ~ICE_SMA2_MASK_E810T;
150 	if (ptp_pins[SMA2].func == PTP_PF_NONE &&
151 	    ptp_pins[UFL2].func == PTP_PF_NONE) {
152 		dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
153 		data |= ICE_SMA2_MASK_E810T;
154 	} else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
155 			ptp_pins[UFL2].func == PTP_PF_NONE) {
156 		dev_info(ice_hw_to_dev(hw), "SMA2 RX");
157 		data |= (ICE_SMA2_TX_EN_E810T |
158 			 ICE_SMA2_UFL2_RX_DIS_E810T);
159 	} else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
160 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
161 		dev_info(ice_hw_to_dev(hw), "UFL2 RX");
162 		data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
163 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
164 		   ptp_pins[UFL2].func == PTP_PF_NONE) {
165 		dev_info(ice_hw_to_dev(hw), "SMA2 TX");
166 		data |= (ICE_SMA2_DIR_EN_E810T |
167 			 ICE_SMA2_UFL2_RX_DIS_E810T);
168 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
169 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
170 		dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
171 		data |= ICE_SMA2_DIR_EN_E810T;
172 	}
173 
174 	return ice_write_sma_ctrl_e810t(hw, data);
175 }
176 
177 /**
178  * ice_ptp_set_sma_e810t
179  * @info: the driver's PTP info structure
180  * @pin: pin index in kernel structure
181  * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
182  *
183  * Set the configuration of a single SMA pin
184  */
185 static int
186 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
187 		      enum ptp_pin_function func)
188 {
189 	struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
190 	struct ice_pf *pf = ptp_info_to_pf(info);
191 	struct ice_hw *hw = &pf->hw;
192 	int err;
193 
194 	if (pin < SMA1 || func > PTP_PF_PEROUT)
195 		return -EOPNOTSUPP;
196 
197 	err = ice_get_sma_config_e810t(hw, ptp_pins);
198 	if (err)
199 		return err;
200 
201 	/* Disable the same function on the other pin sharing the channel */
202 	if (pin == SMA1 && ptp_pins[UFL1].func == func)
203 		ptp_pins[UFL1].func = PTP_PF_NONE;
204 	if (pin == UFL1 && ptp_pins[SMA1].func == func)
205 		ptp_pins[SMA1].func = PTP_PF_NONE;
206 
207 	if (pin == SMA2 && ptp_pins[UFL2].func == func)
208 		ptp_pins[UFL2].func = PTP_PF_NONE;
209 	if (pin == UFL2 && ptp_pins[SMA2].func == func)
210 		ptp_pins[SMA2].func = PTP_PF_NONE;
211 
212 	/* Set up new pin function in the temp table */
213 	ptp_pins[pin].func = func;
214 
215 	return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
216 }
217 
218 /**
219  * ice_verify_pin_e810t
220  * @info: the driver's PTP info structure
221  * @pin: Pin index
222  * @func: Assigned function
223  * @chan: Assigned channel
224  *
225  * Verify if pin supports requested pin function. If the Check pins consistency.
226  * Reconfigure the SMA logic attached to the given pin to enable its
227  * desired functionality
228  */
229 static int
230 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
231 		     enum ptp_pin_function func, unsigned int chan)
232 {
233 	/* Don't allow channel reassignment */
234 	if (chan != ice_pin_desc_e810t[pin].chan)
235 		return -EOPNOTSUPP;
236 
237 	/* Check if functions are properly assigned */
238 	switch (func) {
239 	case PTP_PF_NONE:
240 		break;
241 	case PTP_PF_EXTTS:
242 		if (pin == UFL1)
243 			return -EOPNOTSUPP;
244 		break;
245 	case PTP_PF_PEROUT:
246 		if (pin == UFL2 || pin == GNSS)
247 			return -EOPNOTSUPP;
248 		break;
249 	case PTP_PF_PHYSYNC:
250 		return -EOPNOTSUPP;
251 	}
252 
253 	return ice_ptp_set_sma_e810t(info, pin, func);
254 }
255 
256 /**
257  * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
258  * @pf: Board private structure
259  *
260  * Program the device to respond appropriately to the Tx timestamp interrupt
261  * cause.
262  */
263 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
264 {
265 	struct ice_hw *hw = &pf->hw;
266 	bool enable;
267 	u32 val;
268 
269 	switch (pf->ptp.tx_interrupt_mode) {
270 	case ICE_PTP_TX_INTERRUPT_ALL:
271 		/* React to interrupts across all quads. */
272 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
273 		enable = true;
274 		break;
275 	case ICE_PTP_TX_INTERRUPT_NONE:
276 		/* Do not react to interrupts on any quad. */
277 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
278 		enable = false;
279 		break;
280 	case ICE_PTP_TX_INTERRUPT_SELF:
281 	default:
282 		enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
283 		break;
284 	}
285 
286 	/* Configure the Tx timestamp interrupt */
287 	val = rd32(hw, PFINT_OICR_ENA);
288 	if (enable)
289 		val |= PFINT_OICR_TSYN_TX_M;
290 	else
291 		val &= ~PFINT_OICR_TSYN_TX_M;
292 	wr32(hw, PFINT_OICR_ENA, val);
293 }
294 
295 /**
296  * ice_set_rx_tstamp - Enable or disable Rx timestamping
297  * @pf: The PF pointer to search in
298  * @on: bool value for whether timestamps are enabled or disabled
299  */
300 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
301 {
302 	struct ice_vsi *vsi;
303 	u16 i;
304 
305 	vsi = ice_get_main_vsi(pf);
306 	if (!vsi || !vsi->rx_rings)
307 		return;
308 
309 	/* Set the timestamp flag for all the Rx rings */
310 	ice_for_each_rxq(vsi, i) {
311 		if (!vsi->rx_rings[i])
312 			continue;
313 		vsi->rx_rings[i]->ptp_rx = on;
314 	}
315 }
316 
317 /**
318  * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
319  * @pf: Board private structure
320  *
321  * Called during preparation for reset to temporarily disable timestamping on
322  * the device. Called during remove to disable timestamping while cleaning up
323  * driver resources.
324  */
325 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
326 {
327 	struct ice_hw *hw = &pf->hw;
328 	u32 val;
329 
330 	val = rd32(hw, PFINT_OICR_ENA);
331 	val &= ~PFINT_OICR_TSYN_TX_M;
332 	wr32(hw, PFINT_OICR_ENA, val);
333 
334 	ice_set_rx_tstamp(pf, false);
335 }
336 
337 /**
338  * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
339  * @pf: Board private structure
340  *
341  * Called at the end of rebuild to restore timestamp configuration after
342  * a device reset.
343  */
344 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
345 {
346 	struct ice_hw *hw = &pf->hw;
347 	bool enable_rx;
348 
349 	ice_ptp_cfg_tx_interrupt(pf);
350 
351 	enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
352 	ice_set_rx_tstamp(pf, enable_rx);
353 
354 	/* Trigger an immediate software interrupt to ensure that timestamps
355 	 * which occurred during reset are handled now.
356 	 */
357 	wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
358 	ice_flush(hw);
359 }
360 
361 /**
362  * ice_ptp_read_src_clk_reg - Read the source clock register
363  * @pf: Board private structure
364  * @sts: Optional parameter for holding a pair of system timestamps from
365  *       the system clock. Will be ignored if NULL is given.
366  */
367 static u64
368 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
369 {
370 	struct ice_hw *hw = &pf->hw;
371 	u32 hi, lo, lo2;
372 	u8 tmr_idx;
373 
374 	tmr_idx = ice_get_ptp_src_clock_index(hw);
375 	guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
376 	/* Read the system timestamp pre PHC read */
377 	ptp_read_system_prets(sts);
378 
379 	lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
380 
381 	/* Read the system timestamp post PHC read */
382 	ptp_read_system_postts(sts);
383 
384 	hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
385 	lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
386 
387 	if (lo2 < lo) {
388 		/* if TIME_L rolled over read TIME_L again and update
389 		 * system timestamps
390 		 */
391 		ptp_read_system_prets(sts);
392 		lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
393 		ptp_read_system_postts(sts);
394 		hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
395 	}
396 
397 	return ((u64)hi << 32) | lo;
398 }
399 
400 /**
401  * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
402  * @cached_phc_time: recently cached copy of PHC time
403  * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
404  *
405  * Hardware captures timestamps which contain only 32 bits of nominal
406  * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
407  * Note that the captured timestamp values may be 40 bits, but the lower
408  * 8 bits are sub-nanoseconds and generally discarded.
409  *
410  * Extend the 32bit nanosecond timestamp using the following algorithm and
411  * assumptions:
412  *
413  * 1) have a recently cached copy of the PHC time
414  * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
415  *    seconds) before or after the PHC time was captured.
416  * 3) calculate the delta between the cached time and the timestamp
417  * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
418  *    captured after the PHC time. In this case, the full timestamp is just
419  *    the cached PHC time plus the delta.
420  * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
421  *    timestamp was captured *before* the PHC time, i.e. because the PHC
422  *    cache was updated after the timestamp was captured by hardware. In this
423  *    case, the full timestamp is the cached time minus the inverse delta.
424  *
425  * This algorithm works even if the PHC time was updated after a Tx timestamp
426  * was requested, but before the Tx timestamp event was reported from
427  * hardware.
428  *
429  * This calculation primarily relies on keeping the cached PHC time up to
430  * date. If the timestamp was captured more than 2^31 nanoseconds after the
431  * PHC time, it is possible that the lower 32bits of PHC time have
432  * overflowed more than once, and we might generate an incorrect timestamp.
433  *
434  * This is prevented by (a) periodically updating the cached PHC time once
435  * a second, and (b) discarding any Tx timestamp packet if it has waited for
436  * a timestamp for more than one second.
437  */
438 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
439 {
440 	u32 delta, phc_time_lo;
441 	u64 ns;
442 
443 	/* Extract the lower 32 bits of the PHC time */
444 	phc_time_lo = (u32)cached_phc_time;
445 
446 	/* Calculate the delta between the lower 32bits of the cached PHC
447 	 * time and the in_tstamp value
448 	 */
449 	delta = (in_tstamp - phc_time_lo);
450 
451 	/* Do not assume that the in_tstamp is always more recent than the
452 	 * cached PHC time. If the delta is large, it indicates that the
453 	 * in_tstamp was taken in the past, and should be converted
454 	 * forward.
455 	 */
456 	if (delta > (U32_MAX / 2)) {
457 		/* reverse the delta calculation here */
458 		delta = (phc_time_lo - in_tstamp);
459 		ns = cached_phc_time - delta;
460 	} else {
461 		ns = cached_phc_time + delta;
462 	}
463 
464 	return ns;
465 }
466 
467 /**
468  * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
469  * @pf: Board private structure
470  * @in_tstamp: Ingress/egress 40b timestamp value
471  *
472  * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
473  * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
474  *
475  *  *--------------------------------------------------------------*
476  *  | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
477  *  *--------------------------------------------------------------*
478  *
479  * The low bit is an indicator of whether the timestamp is valid. The next
480  * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
481  * and the remaining 32 bits are the lower 32 bits of the PHC timer.
482  *
483  * It is assumed that the caller verifies the timestamp is valid prior to
484  * calling this function.
485  *
486  * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
487  * time stored in the device private PTP structure as the basis for timestamp
488  * extension.
489  *
490  * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
491  * algorithm.
492  */
493 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
494 {
495 	const u64 mask = GENMASK_ULL(31, 0);
496 	unsigned long discard_time;
497 
498 	/* Discard the hardware timestamp if the cached PHC time is too old */
499 	discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
500 	if (time_is_before_jiffies(discard_time)) {
501 		pf->ptp.tx_hwtstamp_discarded++;
502 		return 0;
503 	}
504 
505 	return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
506 				     (in_tstamp >> 8) & mask);
507 }
508 
509 /**
510  * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
511  * @tx: the PTP Tx timestamp tracker to check
512  *
513  * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
514  * to accept new timestamp requests.
515  *
516  * Assumes the tx->lock spinlock is already held.
517  */
518 static bool
519 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
520 {
521 	lockdep_assert_held(&tx->lock);
522 
523 	return tx->init && !tx->calibrating;
524 }
525 
526 /**
527  * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
528  * @tx: the PTP Tx timestamp tracker
529  * @idx: index of the timestamp to request
530  */
531 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
532 {
533 	struct ice_ptp_port *ptp_port;
534 	struct sk_buff *skb;
535 	struct ice_pf *pf;
536 
537 	if (!tx->init)
538 		return;
539 
540 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
541 	pf = ptp_port_to_pf(ptp_port);
542 
543 	/* Drop packets which have waited for more than 2 seconds */
544 	if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
545 		/* Count the number of Tx timestamps that timed out */
546 		pf->ptp.tx_hwtstamp_timeouts++;
547 
548 		skb = tx->tstamps[idx].skb;
549 		tx->tstamps[idx].skb = NULL;
550 		clear_bit(idx, tx->in_use);
551 
552 		dev_kfree_skb_any(skb);
553 		return;
554 	}
555 
556 	ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
557 
558 	/* Write TS index to read to the PF register so the FW can read it */
559 	wr32(&pf->hw, PF_SB_ATQBAL,
560 	     TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
561 	     TS_LL_READ_TS);
562 	tx->last_ll_ts_idx_read = idx;
563 }
564 
565 /**
566  * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
567  * @tx: the PTP Tx timestamp tracker
568  */
569 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
570 {
571 	struct skb_shared_hwtstamps shhwtstamps = {};
572 	u8 idx = tx->last_ll_ts_idx_read;
573 	struct ice_ptp_port *ptp_port;
574 	u64 raw_tstamp, tstamp;
575 	bool drop_ts = false;
576 	struct sk_buff *skb;
577 	struct ice_pf *pf;
578 	u32 val;
579 
580 	if (!tx->init || tx->last_ll_ts_idx_read < 0)
581 		return;
582 
583 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
584 	pf = ptp_port_to_pf(ptp_port);
585 
586 	ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
587 
588 	val = rd32(&pf->hw, PF_SB_ATQBAL);
589 
590 	/* When the bit is cleared, the TS is ready in the register */
591 	if (val & TS_LL_READ_TS) {
592 		dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
593 		return;
594 	}
595 
596 	/* High 8 bit value of the TS is on the bits 16:23 */
597 	raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
598 	raw_tstamp <<= 32;
599 
600 	/* Read the low 32 bit value */
601 	raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
602 
603 	/* Devices using this interface always verify the timestamp differs
604 	 * relative to the last cached timestamp value.
605 	 */
606 	if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
607 		return;
608 
609 	tx->tstamps[idx].cached_tstamp = raw_tstamp;
610 	clear_bit(idx, tx->in_use);
611 	skb = tx->tstamps[idx].skb;
612 	tx->tstamps[idx].skb = NULL;
613 	if (test_and_clear_bit(idx, tx->stale))
614 		drop_ts = true;
615 
616 	if (!skb)
617 		return;
618 
619 	if (drop_ts) {
620 		dev_kfree_skb_any(skb);
621 		return;
622 	}
623 
624 	/* Extend the timestamp using cached PHC time */
625 	tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
626 	if (tstamp) {
627 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
628 		ice_trace(tx_tstamp_complete, skb, idx);
629 	}
630 
631 	skb_tstamp_tx(skb, &shhwtstamps);
632 	dev_kfree_skb_any(skb);
633 }
634 
635 /**
636  * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
637  * @tx: the PTP Tx timestamp tracker
638  *
639  * Process timestamps captured by the PHY associated with this port. To do
640  * this, loop over each index with a waiting skb.
641  *
642  * If a given index has a valid timestamp, perform the following steps:
643  *
644  * 1) check that the timestamp request is not stale
645  * 2) check that a timestamp is ready and available in the PHY memory bank
646  * 3) read and copy the timestamp out of the PHY register
647  * 4) unlock the index by clearing the associated in_use bit
648  * 5) check if the timestamp is stale, and discard if so
649  * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
650  * 7) send this 64 bit timestamp to the stack
651  *
652  * Note that we do not hold the tracking lock while reading the Tx timestamp.
653  * This is because reading the timestamp requires taking a mutex that might
654  * sleep.
655  *
656  * The only place where we set in_use is when a new timestamp is initiated
657  * with a slot index. This is only called in the hard xmit routine where an
658  * SKB has a request flag set. The only places where we clear this bit is this
659  * function, or during teardown when the Tx timestamp tracker is being
660  * removed. A timestamp index will never be re-used until the in_use bit for
661  * that index is cleared.
662  *
663  * If a Tx thread starts a new timestamp, we might not begin processing it
664  * right away but we will notice it at the end when we re-queue the task.
665  *
666  * If a Tx thread starts a new timestamp just after this function exits, the
667  * interrupt for that timestamp should re-trigger this function once
668  * a timestamp is ready.
669  *
670  * In cases where the PTP hardware clock was directly adjusted, some
671  * timestamps may not be able to safely use the timestamp extension math. In
672  * this case, software will set the stale bit for any outstanding Tx
673  * timestamps when the clock is adjusted. Then this function will discard
674  * those captured timestamps instead of sending them to the stack.
675  *
676  * If a Tx packet has been waiting for more than 2 seconds, it is not possible
677  * to correctly extend the timestamp using the cached PHC time. It is
678  * extremely unlikely that a packet will ever take this long to timestamp. If
679  * we detect a Tx timestamp request that has waited for this long we assume
680  * the packet will never be sent by hardware and discard it without reading
681  * the timestamp register.
682  */
683 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
684 {
685 	struct ice_ptp_port *ptp_port;
686 	unsigned long flags;
687 	struct ice_pf *pf;
688 	struct ice_hw *hw;
689 	u64 tstamp_ready;
690 	bool link_up;
691 	int err;
692 	u8 idx;
693 
694 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
695 	pf = ptp_port_to_pf(ptp_port);
696 	hw = &pf->hw;
697 
698 	/* Read the Tx ready status first */
699 	if (tx->has_ready_bitmap) {
700 		err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
701 		if (err)
702 			return;
703 	}
704 
705 	/* Drop packets if the link went down */
706 	link_up = ptp_port->link_up;
707 
708 	for_each_set_bit(idx, tx->in_use, tx->len) {
709 		struct skb_shared_hwtstamps shhwtstamps = {};
710 		u8 phy_idx = idx + tx->offset;
711 		u64 raw_tstamp = 0, tstamp;
712 		bool drop_ts = !link_up;
713 		struct sk_buff *skb;
714 
715 		/* Drop packets which have waited for more than 2 seconds */
716 		if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
717 			drop_ts = true;
718 
719 			/* Count the number of Tx timestamps that timed out */
720 			pf->ptp.tx_hwtstamp_timeouts++;
721 		}
722 
723 		/* Only read a timestamp from the PHY if its marked as ready
724 		 * by the tstamp_ready register. This avoids unnecessary
725 		 * reading of timestamps which are not yet valid. This is
726 		 * important as we must read all timestamps which are valid
727 		 * and only timestamps which are valid during each interrupt.
728 		 * If we do not, the hardware logic for generating a new
729 		 * interrupt can get stuck on some devices.
730 		 */
731 		if (tx->has_ready_bitmap &&
732 		    !(tstamp_ready & BIT_ULL(phy_idx))) {
733 			if (drop_ts)
734 				goto skip_ts_read;
735 
736 			continue;
737 		}
738 
739 		ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
740 
741 		err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
742 		if (err && !drop_ts)
743 			continue;
744 
745 		ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
746 
747 		/* For PHYs which don't implement a proper timestamp ready
748 		 * bitmap, verify that the timestamp value is different
749 		 * from the last cached timestamp. If it is not, skip this for
750 		 * now assuming it hasn't yet been captured by hardware.
751 		 */
752 		if (!drop_ts && !tx->has_ready_bitmap &&
753 		    raw_tstamp == tx->tstamps[idx].cached_tstamp)
754 			continue;
755 
756 		/* Discard any timestamp value without the valid bit set */
757 		if (!(raw_tstamp & ICE_PTP_TS_VALID))
758 			drop_ts = true;
759 
760 skip_ts_read:
761 		spin_lock_irqsave(&tx->lock, flags);
762 		if (!tx->has_ready_bitmap && raw_tstamp)
763 			tx->tstamps[idx].cached_tstamp = raw_tstamp;
764 		clear_bit(idx, tx->in_use);
765 		skb = tx->tstamps[idx].skb;
766 		tx->tstamps[idx].skb = NULL;
767 		if (test_and_clear_bit(idx, tx->stale))
768 			drop_ts = true;
769 		spin_unlock_irqrestore(&tx->lock, flags);
770 
771 		/* It is unlikely but possible that the SKB will have been
772 		 * flushed at this point due to link change or teardown.
773 		 */
774 		if (!skb)
775 			continue;
776 
777 		if (drop_ts) {
778 			dev_kfree_skb_any(skb);
779 			continue;
780 		}
781 
782 		/* Extend the timestamp using cached PHC time */
783 		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
784 		if (tstamp) {
785 			shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
786 			ice_trace(tx_tstamp_complete, skb, idx);
787 		}
788 
789 		skb_tstamp_tx(skb, &shhwtstamps);
790 		dev_kfree_skb_any(skb);
791 	}
792 }
793 
794 /**
795  * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
796  * @pf: Board private structure
797  */
798 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
799 {
800 	struct ice_ptp_port *port;
801 	unsigned int i;
802 
803 	mutex_lock(&pf->ptp.ports_owner.lock);
804 	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
805 		struct ice_ptp_tx *tx = &port->tx;
806 
807 		if (!tx || !tx->init)
808 			continue;
809 
810 		ice_ptp_process_tx_tstamp(tx);
811 	}
812 	mutex_unlock(&pf->ptp.ports_owner.lock);
813 
814 	for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
815 		u64 tstamp_ready;
816 		int err;
817 
818 		/* Read the Tx ready status first */
819 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
820 		if (err)
821 			break;
822 		else if (tstamp_ready)
823 			return ICE_TX_TSTAMP_WORK_PENDING;
824 	}
825 
826 	return ICE_TX_TSTAMP_WORK_DONE;
827 }
828 
829 /**
830  * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
831  * @tx: Tx tracking structure to initialize
832  *
833  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
834  * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
835  */
836 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
837 {
838 	bool more_timestamps;
839 	unsigned long flags;
840 
841 	if (!tx->init)
842 		return ICE_TX_TSTAMP_WORK_DONE;
843 
844 	/* Process the Tx timestamp tracker */
845 	ice_ptp_process_tx_tstamp(tx);
846 
847 	/* Check if there are outstanding Tx timestamps */
848 	spin_lock_irqsave(&tx->lock, flags);
849 	more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
850 	spin_unlock_irqrestore(&tx->lock, flags);
851 
852 	if (more_timestamps)
853 		return ICE_TX_TSTAMP_WORK_PENDING;
854 
855 	return ICE_TX_TSTAMP_WORK_DONE;
856 }
857 
858 /**
859  * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
860  * @tx: Tx tracking structure to initialize
861  *
862  * Assumes that the length has already been initialized. Do not call directly,
863  * use the ice_ptp_init_tx_* instead.
864  */
865 static int
866 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
867 {
868 	unsigned long *in_use, *stale;
869 	struct ice_tx_tstamp *tstamps;
870 
871 	tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
872 	in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
873 	stale = bitmap_zalloc(tx->len, GFP_KERNEL);
874 
875 	if (!tstamps || !in_use || !stale) {
876 		kfree(tstamps);
877 		bitmap_free(in_use);
878 		bitmap_free(stale);
879 
880 		return -ENOMEM;
881 	}
882 
883 	tx->tstamps = tstamps;
884 	tx->in_use = in_use;
885 	tx->stale = stale;
886 	tx->init = 1;
887 	tx->last_ll_ts_idx_read = -1;
888 
889 	spin_lock_init(&tx->lock);
890 
891 	return 0;
892 }
893 
894 /**
895  * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
896  * @pf: Board private structure
897  * @tx: the tracker to flush
898  *
899  * Called during teardown when a Tx tracker is being removed.
900  */
901 static void
902 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
903 {
904 	struct ice_hw *hw = &pf->hw;
905 	unsigned long flags;
906 	u64 tstamp_ready;
907 	int err;
908 	u8 idx;
909 
910 	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
911 	if (err) {
912 		dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
913 			tx->block, err);
914 
915 		/* If we fail to read the Tx timestamp ready bitmap just
916 		 * skip clearing the PHY timestamps.
917 		 */
918 		tstamp_ready = 0;
919 	}
920 
921 	for_each_set_bit(idx, tx->in_use, tx->len) {
922 		u8 phy_idx = idx + tx->offset;
923 		struct sk_buff *skb;
924 
925 		/* In case this timestamp is ready, we need to clear it. */
926 		if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
927 			ice_clear_phy_tstamp(hw, tx->block, phy_idx);
928 
929 		spin_lock_irqsave(&tx->lock, flags);
930 		skb = tx->tstamps[idx].skb;
931 		tx->tstamps[idx].skb = NULL;
932 		clear_bit(idx, tx->in_use);
933 		clear_bit(idx, tx->stale);
934 		spin_unlock_irqrestore(&tx->lock, flags);
935 
936 		/* Count the number of Tx timestamps flushed */
937 		pf->ptp.tx_hwtstamp_flushed++;
938 
939 		/* Free the SKB after we've cleared the bit */
940 		dev_kfree_skb_any(skb);
941 	}
942 }
943 
944 /**
945  * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
946  * @tx: the tracker to mark
947  *
948  * Mark currently outstanding Tx timestamps as stale. This prevents sending
949  * their timestamp value to the stack. This is required to prevent extending
950  * the 40bit hardware timestamp incorrectly.
951  *
952  * This should be called when the PTP clock is modified such as after a set
953  * time request.
954  */
955 static void
956 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
957 {
958 	unsigned long flags;
959 
960 	spin_lock_irqsave(&tx->lock, flags);
961 	bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
962 	spin_unlock_irqrestore(&tx->lock, flags);
963 }
964 
965 /**
966  * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
967  * @pf: Board private structure
968  *
969  * Called by the clock owner to flush all the Tx timestamp trackers associated
970  * with the clock.
971  */
972 static void
973 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
974 {
975 	struct ice_ptp_port *port;
976 
977 	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
978 		ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
979 }
980 
981 /**
982  * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
983  * @pf: Board private structure
984  * @tx: Tx tracking structure to release
985  *
986  * Free memory associated with the Tx timestamp tracker.
987  */
988 static void
989 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
990 {
991 	unsigned long flags;
992 
993 	spin_lock_irqsave(&tx->lock, flags);
994 	tx->init = 0;
995 	spin_unlock_irqrestore(&tx->lock, flags);
996 
997 	/* wait for potentially outstanding interrupt to complete */
998 	synchronize_irq(pf->oicr_irq.virq);
999 
1000 	ice_ptp_flush_tx_tracker(pf, tx);
1001 
1002 	kfree(tx->tstamps);
1003 	tx->tstamps = NULL;
1004 
1005 	bitmap_free(tx->in_use);
1006 	tx->in_use = NULL;
1007 
1008 	bitmap_free(tx->stale);
1009 	tx->stale = NULL;
1010 
1011 	tx->len = 0;
1012 }
1013 
1014 /**
1015  * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
1016  * @pf: Board private structure
1017  * @tx: the Tx tracking structure to initialize
1018  * @port: the port this structure tracks
1019  *
1020  * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
1021  * have independent memory blocks for all ports.
1022  *
1023  * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
1024  */
1025 static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
1026 				  u8 port)
1027 {
1028 	tx->block = port;
1029 	tx->offset = 0;
1030 	tx->len = INDEX_PER_PORT_ETH56G;
1031 	tx->has_ready_bitmap = 1;
1032 
1033 	return ice_ptp_alloc_tx_tracker(tx);
1034 }
1035 
1036 /**
1037  * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
1038  * @pf: Board private structure
1039  * @tx: the Tx tracking structure to initialize
1040  * @port: the port this structure tracks
1041  *
1042  * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
1043  * the timestamp block is shared for all ports in the same quad. To avoid
1044  * ports using the same timestamp index, logically break the block of
1045  * registers into chunks based on the port number.
1046  */
1047 static int
1048 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
1049 {
1050 	tx->block = ICE_GET_QUAD_NUM(port);
1051 	tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
1052 	tx->len = INDEX_PER_PORT_E82X;
1053 	tx->has_ready_bitmap = 1;
1054 
1055 	return ice_ptp_alloc_tx_tracker(tx);
1056 }
1057 
1058 /**
1059  * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
1060  * @pf: Board private structure
1061  * @tx: the Tx tracking structure to initialize
1062  *
1063  * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
1064  * port has its own block of timestamps, independent of the other ports.
1065  */
1066 static int
1067 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1068 {
1069 	tx->block = pf->hw.port_info->lport;
1070 	tx->offset = 0;
1071 	tx->len = INDEX_PER_PORT_E810;
1072 	/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
1073 	 * verify new timestamps against cached copy of the last read
1074 	 * timestamp.
1075 	 */
1076 	tx->has_ready_bitmap = 0;
1077 
1078 	return ice_ptp_alloc_tx_tracker(tx);
1079 }
1080 
1081 /**
1082  * ice_ptp_update_cached_phctime - Update the cached PHC time values
1083  * @pf: Board specific private structure
1084  *
1085  * This function updates the system time values which are cached in the PF
1086  * structure and the Rx rings.
1087  *
1088  * This function must be called periodically to ensure that the cached value
1089  * is never more than 2 seconds old.
1090  *
1091  * Note that the cached copy in the PF PTP structure is always updated, even
1092  * if we can't update the copy in the Rx rings.
1093  *
1094  * Return:
1095  * * 0 - OK, successfully updated
1096  * * -EAGAIN - PF was busy, need to reschedule the update
1097  */
1098 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
1099 {
1100 	struct device *dev = ice_pf_to_dev(pf);
1101 	unsigned long update_before;
1102 	u64 systime;
1103 	int i;
1104 
1105 	update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
1106 	if (pf->ptp.cached_phc_time &&
1107 	    time_is_before_jiffies(update_before)) {
1108 		unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
1109 
1110 		dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
1111 			 jiffies_to_msecs(time_taken));
1112 		pf->ptp.late_cached_phc_updates++;
1113 	}
1114 
1115 	/* Read the current PHC time */
1116 	systime = ice_ptp_read_src_clk_reg(pf, NULL);
1117 
1118 	/* Update the cached PHC time stored in the PF structure */
1119 	WRITE_ONCE(pf->ptp.cached_phc_time, systime);
1120 	WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
1121 
1122 	if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
1123 		return -EAGAIN;
1124 
1125 	ice_for_each_vsi(pf, i) {
1126 		struct ice_vsi *vsi = pf->vsi[i];
1127 		int j;
1128 
1129 		if (!vsi)
1130 			continue;
1131 
1132 		if (vsi->type != ICE_VSI_PF)
1133 			continue;
1134 
1135 		ice_for_each_rxq(vsi, j) {
1136 			if (!vsi->rx_rings[j])
1137 				continue;
1138 			WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
1139 		}
1140 	}
1141 	clear_bit(ICE_CFG_BUSY, pf->state);
1142 
1143 	return 0;
1144 }
1145 
1146 /**
1147  * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1148  * @pf: Board specific private structure
1149  *
1150  * This function must be called when the cached PHC time is no longer valid,
1151  * such as after a time adjustment. It marks any currently outstanding Tx
1152  * timestamps as stale and updates the cached PHC time for both the PF and Rx
1153  * rings.
1154  *
1155  * If updating the PHC time cannot be done immediately, a warning message is
1156  * logged and the work item is scheduled immediately to minimize the window
1157  * with a wrong cached timestamp.
1158  */
1159 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1160 {
1161 	struct device *dev = ice_pf_to_dev(pf);
1162 	int err;
1163 
1164 	/* Update the cached PHC time immediately if possible, otherwise
1165 	 * schedule the work item to execute soon.
1166 	 */
1167 	err = ice_ptp_update_cached_phctime(pf);
1168 	if (err) {
1169 		/* If another thread is updating the Rx rings, we won't
1170 		 * properly reset them here. This could lead to reporting of
1171 		 * invalid timestamps, but there isn't much we can do.
1172 		 */
1173 		dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1174 			 __func__);
1175 
1176 		/* Queue the work item to update the Rx rings when possible */
1177 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1178 					   msecs_to_jiffies(10));
1179 	}
1180 
1181 	/* Mark any outstanding timestamps as stale, since they might have
1182 	 * been captured in hardware before the time update. This could lead
1183 	 * to us extending them with the wrong cached value resulting in
1184 	 * incorrect timestamp values.
1185 	 */
1186 	ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1187 }
1188 
1189 /**
1190  * ice_ptp_write_init - Set PHC time to provided value
1191  * @pf: Board private structure
1192  * @ts: timespec structure that holds the new time value
1193  *
1194  * Set the PHC time to the specified time provided in the timespec.
1195  */
1196 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1197 {
1198 	u64 ns = timespec64_to_ns(ts);
1199 	struct ice_hw *hw = &pf->hw;
1200 
1201 	return ice_ptp_init_time(hw, ns);
1202 }
1203 
1204 /**
1205  * ice_ptp_write_adj - Adjust PHC clock time atomically
1206  * @pf: Board private structure
1207  * @adj: Adjustment in nanoseconds
1208  *
1209  * Perform an atomic adjustment of the PHC time by the specified number of
1210  * nanoseconds.
1211  */
1212 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1213 {
1214 	struct ice_hw *hw = &pf->hw;
1215 
1216 	return ice_ptp_adj_clock(hw, adj);
1217 }
1218 
1219 /**
1220  * ice_base_incval - Get base timer increment value
1221  * @pf: Board private structure
1222  *
1223  * Look up the base timer increment value for this device. The base increment
1224  * value is used to define the nominal clock tick rate. This increment value
1225  * is programmed during device initialization. It is also used as the basis
1226  * for calculating adjustments using scaled_ppm.
1227  */
1228 static u64 ice_base_incval(struct ice_pf *pf)
1229 {
1230 	struct ice_hw *hw = &pf->hw;
1231 	u64 incval;
1232 
1233 	incval = ice_get_base_incval(hw);
1234 
1235 	dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1236 		incval);
1237 
1238 	return incval;
1239 }
1240 
1241 /**
1242  * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1243  * @port: PTP port for which Tx FIFO is checked
1244  */
1245 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1246 {
1247 	int offs = port->port_num % ICE_PORTS_PER_QUAD;
1248 	int quad = ICE_GET_QUAD_NUM(port->port_num);
1249 	struct ice_pf *pf;
1250 	struct ice_hw *hw;
1251 	u32 val, phy_sts;
1252 	int err;
1253 
1254 	pf = ptp_port_to_pf(port);
1255 	hw = &pf->hw;
1256 
1257 	if (port->tx_fifo_busy_cnt == FIFO_OK)
1258 		return 0;
1259 
1260 	/* need to read FIFO state */
1261 	if (offs == 0 || offs == 1)
1262 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1263 					     &val);
1264 	else
1265 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1266 					     &val);
1267 
1268 	if (err) {
1269 		dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1270 			port->port_num, err);
1271 		return err;
1272 	}
1273 
1274 	if (offs & 0x1)
1275 		phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1276 	else
1277 		phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1278 
1279 	if (phy_sts & FIFO_EMPTY) {
1280 		port->tx_fifo_busy_cnt = FIFO_OK;
1281 		return 0;
1282 	}
1283 
1284 	port->tx_fifo_busy_cnt++;
1285 
1286 	dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1287 		port->tx_fifo_busy_cnt, port->port_num);
1288 
1289 	if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1290 		dev_dbg(ice_pf_to_dev(pf),
1291 			"Port %d Tx FIFO still not empty; resetting quad %d\n",
1292 			port->port_num, quad);
1293 		ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1294 		port->tx_fifo_busy_cnt = FIFO_OK;
1295 		return 0;
1296 	}
1297 
1298 	return -EAGAIN;
1299 }
1300 
1301 /**
1302  * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1303  * @work: Pointer to the kthread_work structure for this task
1304  *
1305  * Check whether hardware has completed measuring the Tx and Rx offset values
1306  * used to configure and enable vernier timestamp calibration.
1307  *
1308  * Once the offset in either direction is measured, configure the associated
1309  * registers with the calibrated offset values and enable timestamping. The Tx
1310  * and Rx directions are configured independently as soon as their associated
1311  * offsets are known.
1312  *
1313  * This function reschedules itself until both Tx and Rx calibration have
1314  * completed.
1315  */
1316 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1317 {
1318 	struct ice_ptp_port *port;
1319 	struct ice_pf *pf;
1320 	struct ice_hw *hw;
1321 	int tx_err;
1322 	int rx_err;
1323 
1324 	port = container_of(work, struct ice_ptp_port, ov_work.work);
1325 	pf = ptp_port_to_pf(port);
1326 	hw = &pf->hw;
1327 
1328 	if (ice_is_reset_in_progress(pf->state)) {
1329 		/* wait for device driver to complete reset */
1330 		kthread_queue_delayed_work(pf->ptp.kworker,
1331 					   &port->ov_work,
1332 					   msecs_to_jiffies(100));
1333 		return;
1334 	}
1335 
1336 	tx_err = ice_ptp_check_tx_fifo(port);
1337 	if (!tx_err)
1338 		tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1339 	rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1340 	if (tx_err || rx_err) {
1341 		/* Tx and/or Rx offset not yet configured, try again later */
1342 		kthread_queue_delayed_work(pf->ptp.kworker,
1343 					   &port->ov_work,
1344 					   msecs_to_jiffies(100));
1345 		return;
1346 	}
1347 }
1348 
1349 /**
1350  * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1351  * @ptp_port: PTP port to stop
1352  */
1353 static int
1354 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1355 {
1356 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1357 	u8 port = ptp_port->port_num;
1358 	struct ice_hw *hw = &pf->hw;
1359 	int err;
1360 
1361 	if (ice_is_e810(hw))
1362 		return 0;
1363 
1364 	mutex_lock(&ptp_port->ps_lock);
1365 
1366 	switch (hw->ptp.phy_model) {
1367 	case ICE_PHY_ETH56G:
1368 		err = ice_stop_phy_timer_eth56g(hw, port, true);
1369 		break;
1370 	case ICE_PHY_E82X:
1371 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1372 
1373 		err = ice_stop_phy_timer_e82x(hw, port, true);
1374 		break;
1375 	default:
1376 		err = -ENODEV;
1377 	}
1378 	if (err && err != -EBUSY)
1379 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1380 			port, err);
1381 
1382 	mutex_unlock(&ptp_port->ps_lock);
1383 
1384 	return err;
1385 }
1386 
1387 /**
1388  * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1389  * @ptp_port: PTP port for which the PHY start is set
1390  *
1391  * Start the PHY timestamping block, and initiate Vernier timestamping
1392  * calibration. If timestamping cannot be calibrated (such as if link is down)
1393  * then disable the timestamping block instead.
1394  */
1395 static int
1396 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1397 {
1398 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1399 	u8 port = ptp_port->port_num;
1400 	struct ice_hw *hw = &pf->hw;
1401 	unsigned long flags;
1402 	int err;
1403 
1404 	if (ice_is_e810(hw))
1405 		return 0;
1406 
1407 	if (!ptp_port->link_up)
1408 		return ice_ptp_port_phy_stop(ptp_port);
1409 
1410 	mutex_lock(&ptp_port->ps_lock);
1411 
1412 	switch (hw->ptp.phy_model) {
1413 	case ICE_PHY_ETH56G:
1414 		err = ice_start_phy_timer_eth56g(hw, port);
1415 		break;
1416 	case ICE_PHY_E82X:
1417 		/* Start the PHY timer in Vernier mode */
1418 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1419 
1420 		/* temporarily disable Tx timestamps while calibrating
1421 		 * PHY offset
1422 		 */
1423 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1424 		ptp_port->tx.calibrating = true;
1425 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1426 		ptp_port->tx_fifo_busy_cnt = 0;
1427 
1428 		/* Start the PHY timer in Vernier mode */
1429 		err = ice_start_phy_timer_e82x(hw, port);
1430 		if (err)
1431 			break;
1432 
1433 		/* Enable Tx timestamps right away */
1434 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1435 		ptp_port->tx.calibrating = false;
1436 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1437 
1438 		kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1439 					   0);
1440 		break;
1441 	default:
1442 		err = -ENODEV;
1443 	}
1444 
1445 	if (err)
1446 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1447 			port, err);
1448 
1449 	mutex_unlock(&ptp_port->ps_lock);
1450 
1451 	return err;
1452 }
1453 
1454 /**
1455  * ice_ptp_link_change - Reconfigure PTP after link status change
1456  * @pf: Board private structure
1457  * @port: Port for which the PHY start is set
1458  * @linkup: Link is up or down
1459  */
1460 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
1461 {
1462 	struct ice_ptp_port *ptp_port;
1463 	struct ice_hw *hw = &pf->hw;
1464 
1465 	if (pf->ptp.state != ICE_PTP_READY)
1466 		return;
1467 
1468 	if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
1469 		return;
1470 
1471 	ptp_port = &pf->ptp.port;
1472 	if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
1473 		port *= 2;
1474 	if (WARN_ON_ONCE(ptp_port->port_num != port))
1475 		return;
1476 
1477 	/* Update cached link status for this port immediately */
1478 	ptp_port->link_up = linkup;
1479 
1480 	switch (hw->ptp.phy_model) {
1481 	case ICE_PHY_E810:
1482 		/* Do not reconfigure E810 PHY */
1483 		return;
1484 	case ICE_PHY_ETH56G:
1485 	case ICE_PHY_E82X:
1486 		ice_ptp_port_phy_restart(ptp_port);
1487 		return;
1488 	default:
1489 		dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1490 	}
1491 }
1492 
1493 /**
1494  * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1495  * @pf: PF private structure
1496  * @ena: bool value to enable or disable interrupt
1497  * @threshold: Minimum number of packets at which intr is triggered
1498  *
1499  * Utility function to configure all the PHY interrupt settings, including
1500  * whether the PHY interrupt is enabled, and what threshold to use. Also
1501  * configures The E82X timestamp owner to react to interrupts from all PHYs.
1502  *
1503  * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1504  * when failed to configure PHY interrupt for E82X
1505  */
1506 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1507 {
1508 	struct device *dev = ice_pf_to_dev(pf);
1509 	struct ice_hw *hw = &pf->hw;
1510 
1511 	ice_ptp_reset_ts_memory(hw);
1512 
1513 	switch (hw->ptp.phy_model) {
1514 	case ICE_PHY_ETH56G: {
1515 		int port;
1516 
1517 		for (port = 0; port < hw->ptp.num_lports; port++) {
1518 			int err;
1519 
1520 			err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1521 			if (err) {
1522 				dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1523 					port, err);
1524 				return err;
1525 			}
1526 		}
1527 
1528 		return 0;
1529 	}
1530 	case ICE_PHY_E82X: {
1531 		int quad;
1532 
1533 		for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1534 		     quad++) {
1535 			int err;
1536 
1537 			err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1538 			if (err) {
1539 				dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1540 					quad, err);
1541 				return err;
1542 			}
1543 		}
1544 
1545 		return 0;
1546 	}
1547 	case ICE_PHY_E810:
1548 		return 0;
1549 	case ICE_PHY_UNSUP:
1550 	default:
1551 		dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
1552 			 hw->ptp.phy_model);
1553 		return -EOPNOTSUPP;
1554 	}
1555 }
1556 
1557 /**
1558  * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1559  * @pf: Board private structure
1560  */
1561 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1562 {
1563 	ice_ptp_port_phy_restart(&pf->ptp.port);
1564 }
1565 
1566 /**
1567  * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1568  * @pf: Board private structure
1569  */
1570 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1571 {
1572 	struct list_head *entry;
1573 
1574 	list_for_each(entry, &pf->ptp.ports_owner.ports) {
1575 		struct ice_ptp_port *port = list_entry(entry,
1576 						       struct ice_ptp_port,
1577 						       list_member);
1578 
1579 		if (port->link_up)
1580 			ice_ptp_port_phy_restart(port);
1581 	}
1582 }
1583 
1584 /**
1585  * ice_ptp_adjfine - Adjust clock increment rate
1586  * @info: the driver's PTP info structure
1587  * @scaled_ppm: Parts per million with 16-bit fractional field
1588  *
1589  * Adjust the frequency of the clock by the indicated scaled ppm from the
1590  * base frequency.
1591  */
1592 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1593 {
1594 	struct ice_pf *pf = ptp_info_to_pf(info);
1595 	struct ice_hw *hw = &pf->hw;
1596 	u64 incval;
1597 	int err;
1598 
1599 	incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1600 	err = ice_ptp_write_incval_locked(hw, incval);
1601 	if (err) {
1602 		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1603 			err);
1604 		return -EIO;
1605 	}
1606 
1607 	return 0;
1608 }
1609 
1610 /**
1611  * ice_ptp_extts_event - Process PTP external clock event
1612  * @pf: Board private structure
1613  */
1614 void ice_ptp_extts_event(struct ice_pf *pf)
1615 {
1616 	struct ptp_clock_event event;
1617 	struct ice_hw *hw = &pf->hw;
1618 	u8 chan, tmr_idx;
1619 	u32 hi, lo;
1620 
1621 	/* Don't process timestamp events if PTP is not ready */
1622 	if (pf->ptp.state != ICE_PTP_READY)
1623 		return;
1624 
1625 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1626 	/* Event time is captured by one of the two matched registers
1627 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
1628 	 *      GLTSYN_EVNT_H: 32 MSB of sampled time event
1629 	 * Event is defined in GLTSYN_EVNT_0 register
1630 	 */
1631 	for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1632 		/* Check if channel is enabled */
1633 		if (pf->ptp.ext_ts_irq & (1 << chan)) {
1634 			lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1635 			hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1636 			event.timestamp = (((u64)hi) << 32) | lo;
1637 			event.type = PTP_CLOCK_EXTTS;
1638 			event.index = chan;
1639 
1640 			/* Fire event */
1641 			ptp_clock_event(pf->ptp.clock, &event);
1642 			pf->ptp.ext_ts_irq &= ~(1 << chan);
1643 		}
1644 	}
1645 }
1646 
1647 /**
1648  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1649  * @pf: Board private structure
1650  * @chan: GPIO channel (0-3)
1651  * @config: desired EXTTS configuration.
1652  * @store: If set to true, the values will be stored
1653  *
1654  * Configure an external timestamp event on the requested channel.
1655  *
1656  * Return: 0 on success, -EOPNOTUSPP on unsupported flags
1657  */
1658 static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
1659 			     struct ice_extts_channel *config, bool store)
1660 {
1661 	u32 func, aux_reg, gpio_reg, irq_reg;
1662 	struct ice_hw *hw = &pf->hw;
1663 	u8 tmr_idx;
1664 
1665 	/* Reject requests with unsupported flags */
1666 	if (config->flags & ~(PTP_ENABLE_FEATURE |
1667 			      PTP_RISING_EDGE |
1668 			      PTP_FALLING_EDGE |
1669 			      PTP_STRICT_FLAGS))
1670 		return -EOPNOTSUPP;
1671 
1672 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1673 
1674 	irq_reg = rd32(hw, PFINT_OICR_ENA);
1675 
1676 	if (config->ena) {
1677 		/* Enable the interrupt */
1678 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1679 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1680 
1681 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE	BIT(0)
1682 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
1683 
1684 		/* set event level to requested edge */
1685 		if (config->flags & PTP_FALLING_EDGE)
1686 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1687 		if (config->flags & PTP_RISING_EDGE)
1688 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1689 
1690 		/* Write GPIO CTL reg.
1691 		 * 0x1 is input sampled by EVENT register(channel)
1692 		 * + num_in_channels * tmr_idx
1693 		 */
1694 		func = 1 + chan + (tmr_idx * 3);
1695 		gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1696 		pf->ptp.ext_ts_chan |= (1 << chan);
1697 	} else {
1698 		/* clear the values we set to reset defaults */
1699 		aux_reg = 0;
1700 		gpio_reg = 0;
1701 		pf->ptp.ext_ts_chan &= ~(1 << chan);
1702 		if (!pf->ptp.ext_ts_chan)
1703 			irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1704 	}
1705 
1706 	wr32(hw, PFINT_OICR_ENA, irq_reg);
1707 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1708 	wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg);
1709 
1710 	if (store)
1711 		memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config));
1712 
1713 	return 0;
1714 }
1715 
1716 /**
1717  * ice_ptp_disable_all_extts - Disable all EXTTS channels
1718  * @pf: Board private structure
1719  */
1720 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1721 {
1722 	struct ice_extts_channel extts_cfg = {};
1723 	int i;
1724 
1725 	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
1726 		if (pf->ptp.extts_channels[i].ena) {
1727 			extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin;
1728 			extts_cfg.ena = false;
1729 			ice_ptp_cfg_extts(pf, i, &extts_cfg, false);
1730 		}
1731 	}
1732 
1733 	synchronize_irq(pf->oicr_irq.virq);
1734 }
1735 
1736 /**
1737  * ice_ptp_enable_all_extts - Enable all EXTTS channels
1738  * @pf: Board private structure
1739  *
1740  * Called during reset to restore user configuration.
1741  */
1742 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1743 {
1744 	int i;
1745 
1746 	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
1747 		if (pf->ptp.extts_channels[i].ena)
1748 			ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i],
1749 					  false);
1750 	}
1751 }
1752 
1753 /**
1754  * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
1755  * @pf: Board private structure
1756  * @chan: GPIO channel (0-3)
1757  * @config: desired periodic clk configuration. NULL will disable channel
1758  * @store: If set to true the values will be stored
1759  *
1760  * Configure the internal clock generator modules to generate the clock wave of
1761  * specified period.
1762  */
1763 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
1764 			      struct ice_perout_channel *config, bool store)
1765 {
1766 	u64 current_time, period, start_time, phase;
1767 	struct ice_hw *hw = &pf->hw;
1768 	u32 func, val, gpio_pin;
1769 	u8 tmr_idx;
1770 
1771 	if (config && config->flags & ~PTP_PEROUT_PHASE)
1772 		return -EOPNOTSUPP;
1773 
1774 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1775 
1776 	/* 0. Reset mode & out_en in AUX_OUT */
1777 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1778 
1779 	/* If we're disabling the output, clear out CLKO and TGT and keep
1780 	 * output level low
1781 	 */
1782 	if (!config || !config->ena) {
1783 		wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
1784 		wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
1785 		wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
1786 
1787 		val = GLGEN_GPIO_CTL_PIN_DIR_M;
1788 		gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
1789 		wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1790 
1791 		/* Store the value if requested */
1792 		if (store)
1793 			memset(&pf->ptp.perout_channels[chan], 0,
1794 			       sizeof(struct ice_perout_channel));
1795 
1796 		return 0;
1797 	}
1798 	period = config->period;
1799 	start_time = config->start_time;
1800 	div64_u64_rem(start_time, period, &phase);
1801 	gpio_pin = config->gpio_pin;
1802 
1803 	/* 1. Write clkout with half of required period value */
1804 	if (period & 0x1) {
1805 		dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1806 		goto err;
1807 	}
1808 
1809 	period >>= 1;
1810 
1811 	/* For proper operation, the GLTSYN_CLKO must be larger than clock tick
1812 	 */
1813 #define MIN_PULSE 3
1814 	if (period <= MIN_PULSE || period > U32_MAX) {
1815 		dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
1816 			MIN_PULSE * 2);
1817 		goto err;
1818 	}
1819 
1820 	wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1821 
1822 	/* Allow time for programming before start_time is hit */
1823 	current_time = ice_ptp_read_src_clk_reg(pf, NULL);
1824 
1825 	/* if start time is in the past start the timer at the nearest second
1826 	 * maintaining phase
1827 	 */
1828 	if (start_time < current_time)
1829 		start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase;
1830 
1831 	if (ice_is_e810(hw))
1832 		start_time -= E810_OUT_PROP_DELAY_NS;
1833 	else
1834 		start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
1835 
1836 	/* 2. Write TARGET time */
1837 	wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
1838 	wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
1839 
1840 	/* 3. Write AUX_OUT register */
1841 	val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1842 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1843 
1844 	/* 4. write GPIO CTL reg */
1845 	func = 8 + chan + (tmr_idx * 4);
1846 	val = GLGEN_GPIO_CTL_PIN_DIR_M |
1847 	      FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1848 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1849 
1850 	/* Store the value if requested */
1851 	if (store) {
1852 		memcpy(&pf->ptp.perout_channels[chan], config,
1853 		       sizeof(struct ice_perout_channel));
1854 		pf->ptp.perout_channels[chan].start_time = phase;
1855 	}
1856 
1857 	return 0;
1858 err:
1859 	dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
1860 	return -EFAULT;
1861 }
1862 
1863 /**
1864  * ice_ptp_disable_all_clkout - Disable all currently configured outputs
1865  * @pf: pointer to the PF structure
1866  *
1867  * Disable all currently configured clock outputs. This is necessary before
1868  * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
1869  * re-enable the clocks again.
1870  */
1871 static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
1872 {
1873 	uint i;
1874 
1875 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1876 		if (pf->ptp.perout_channels[i].ena)
1877 			ice_ptp_cfg_clkout(pf, i, NULL, false);
1878 }
1879 
1880 /**
1881  * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
1882  * @pf: pointer to the PF structure
1883  *
1884  * Enable all currently configured clock outputs. Use this after
1885  * ice_ptp_disable_all_clkout to reconfigure the output signals according to
1886  * their configuration.
1887  */
1888 static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
1889 {
1890 	uint i;
1891 
1892 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1893 		if (pf->ptp.perout_channels[i].ena)
1894 			ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
1895 					   false);
1896 }
1897 
1898 /**
1899  * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
1900  * @info: the driver's PTP info structure
1901  * @rq: The requested feature to change
1902  * @on: Enable/disable flag
1903  */
1904 static int
1905 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
1906 			 struct ptp_clock_request *rq, int on)
1907 {
1908 	struct ice_pf *pf = ptp_info_to_pf(info);
1909 	bool sma_pres = false;
1910 	unsigned int chan;
1911 	u32 gpio_pin;
1912 
1913 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
1914 		sma_pres = true;
1915 
1916 	switch (rq->type) {
1917 	case PTP_CLK_REQ_PEROUT:
1918 	{
1919 		struct ice_perout_channel clk_cfg = {};
1920 
1921 		chan = rq->perout.index;
1922 		if (sma_pres) {
1923 			if (chan == ice_pin_desc_e810t[SMA1].chan)
1924 				clk_cfg.gpio_pin = GPIO_20;
1925 			else if (chan == ice_pin_desc_e810t[SMA2].chan)
1926 				clk_cfg.gpio_pin = GPIO_22;
1927 			else
1928 				return -1;
1929 		} else if (ice_is_e810t(&pf->hw)) {
1930 			if (chan == 0)
1931 				clk_cfg.gpio_pin = GPIO_20;
1932 			else
1933 				clk_cfg.gpio_pin = GPIO_22;
1934 		} else if (chan == PPS_CLK_GEN_CHAN) {
1935 			clk_cfg.gpio_pin = PPS_PIN_INDEX;
1936 		} else {
1937 			clk_cfg.gpio_pin = chan;
1938 		}
1939 
1940 		clk_cfg.flags = rq->perout.flags;
1941 		clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1942 				   rq->perout.period.nsec);
1943 		clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1944 				       rq->perout.start.nsec);
1945 		clk_cfg.ena = !!on;
1946 
1947 		return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1948 	}
1949 	case PTP_CLK_REQ_EXTTS:
1950 	{
1951 		struct ice_extts_channel extts_cfg = {};
1952 
1953 		chan = rq->extts.index;
1954 		if (sma_pres) {
1955 			if (chan < ice_pin_desc_e810t[SMA2].chan)
1956 				gpio_pin = GPIO_21;
1957 			else
1958 				gpio_pin = GPIO_23;
1959 		} else if (ice_is_e810t(&pf->hw)) {
1960 			if (chan == 0)
1961 				gpio_pin = GPIO_21;
1962 			else
1963 				gpio_pin = GPIO_23;
1964 		} else {
1965 			gpio_pin = chan;
1966 		}
1967 
1968 		extts_cfg.flags = rq->extts.flags;
1969 		extts_cfg.gpio_pin = gpio_pin;
1970 		extts_cfg.ena = !!on;
1971 
1972 		return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true);
1973 	}
1974 	default:
1975 		return -EOPNOTSUPP;
1976 	}
1977 }
1978 
1979 /**
1980  * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
1981  * @info: the driver's PTP info structure
1982  * @rq: The requested feature to change
1983  * @on: Enable/disable flag
1984  */
1985 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
1986 				    struct ptp_clock_request *rq, int on)
1987 {
1988 	struct ice_pf *pf = ptp_info_to_pf(info);
1989 
1990 	switch (rq->type) {
1991 	case PTP_CLK_REQ_PPS:
1992 	{
1993 		struct ice_perout_channel clk_cfg = {};
1994 
1995 		clk_cfg.flags = rq->perout.flags;
1996 		clk_cfg.gpio_pin = PPS_PIN_INDEX;
1997 		clk_cfg.period = NSEC_PER_SEC;
1998 		clk_cfg.ena = !!on;
1999 
2000 		return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
2001 	}
2002 	case PTP_CLK_REQ_EXTTS:
2003 	{
2004 		struct ice_extts_channel extts_cfg = {};
2005 
2006 		extts_cfg.flags = rq->extts.flags;
2007 		extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX;
2008 		extts_cfg.ena = !!on;
2009 
2010 		return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true);
2011 	}
2012 	default:
2013 		return -EOPNOTSUPP;
2014 	}
2015 }
2016 
2017 /**
2018  * ice_ptp_gettimex64 - Get the time of the clock
2019  * @info: the driver's PTP info structure
2020  * @ts: timespec64 structure to hold the current time value
2021  * @sts: Optional parameter for holding a pair of system timestamps from
2022  *       the system clock. Will be ignored if NULL is given.
2023  *
2024  * Read the device clock and return the correct value on ns, after converting it
2025  * into a timespec struct.
2026  */
2027 static int
2028 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
2029 		   struct ptp_system_timestamp *sts)
2030 {
2031 	struct ice_pf *pf = ptp_info_to_pf(info);
2032 	u64 time_ns;
2033 
2034 	time_ns = ice_ptp_read_src_clk_reg(pf, sts);
2035 	*ts = ns_to_timespec64(time_ns);
2036 	return 0;
2037 }
2038 
2039 /**
2040  * ice_ptp_settime64 - Set the time of the clock
2041  * @info: the driver's PTP info structure
2042  * @ts: timespec64 structure that holds the new time value
2043  *
2044  * Set the device clock to the user input value. The conversion from timespec
2045  * to ns happens in the write function.
2046  */
2047 static int
2048 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
2049 {
2050 	struct ice_pf *pf = ptp_info_to_pf(info);
2051 	struct timespec64 ts64 = *ts;
2052 	struct ice_hw *hw = &pf->hw;
2053 	int err;
2054 
2055 	/* For Vernier mode on E82X, we need to recalibrate after new settime.
2056 	 * Start with marking timestamps as invalid.
2057 	 */
2058 	if (hw->ptp.phy_model == ICE_PHY_E82X) {
2059 		err = ice_ptp_clear_phy_offset_ready_e82x(hw);
2060 		if (err)
2061 			dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
2062 	}
2063 
2064 	if (!ice_ptp_lock(hw)) {
2065 		err = -EBUSY;
2066 		goto exit;
2067 	}
2068 
2069 	/* Disable periodic outputs */
2070 	ice_ptp_disable_all_clkout(pf);
2071 
2072 	err = ice_ptp_write_init(pf, &ts64);
2073 	ice_ptp_unlock(hw);
2074 
2075 	if (!err)
2076 		ice_ptp_reset_cached_phctime(pf);
2077 
2078 	/* Reenable periodic outputs */
2079 	ice_ptp_enable_all_clkout(pf);
2080 
2081 	/* Recalibrate and re-enable timestamp blocks for E822/E823 */
2082 	if (hw->ptp.phy_model == ICE_PHY_E82X)
2083 		ice_ptp_restart_all_phy(pf);
2084 exit:
2085 	if (err) {
2086 		dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
2087 		return err;
2088 	}
2089 
2090 	return 0;
2091 }
2092 
2093 /**
2094  * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
2095  * @info: the driver's PTP info structure
2096  * @delta: Offset in nanoseconds to adjust the time by
2097  */
2098 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
2099 {
2100 	struct timespec64 now, then;
2101 	int ret;
2102 
2103 	then = ns_to_timespec64(delta);
2104 	ret = ice_ptp_gettimex64(info, &now, NULL);
2105 	if (ret)
2106 		return ret;
2107 	now = timespec64_add(now, then);
2108 
2109 	return ice_ptp_settime64(info, (const struct timespec64 *)&now);
2110 }
2111 
2112 /**
2113  * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
2114  * @info: the driver's PTP info structure
2115  * @delta: Offset in nanoseconds to adjust the time by
2116  */
2117 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
2118 {
2119 	struct ice_pf *pf = ptp_info_to_pf(info);
2120 	struct ice_hw *hw = &pf->hw;
2121 	struct device *dev;
2122 	int err;
2123 
2124 	dev = ice_pf_to_dev(pf);
2125 
2126 	/* Hardware only supports atomic adjustments using signed 32-bit
2127 	 * integers. For any adjustment outside this range, perform
2128 	 * a non-atomic get->adjust->set flow.
2129 	 */
2130 	if (delta > S32_MAX || delta < S32_MIN) {
2131 		dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
2132 		return ice_ptp_adjtime_nonatomic(info, delta);
2133 	}
2134 
2135 	if (!ice_ptp_lock(hw)) {
2136 		dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
2137 		return -EBUSY;
2138 	}
2139 
2140 	/* Disable periodic outputs */
2141 	ice_ptp_disable_all_clkout(pf);
2142 
2143 	err = ice_ptp_write_adj(pf, delta);
2144 
2145 	/* Reenable periodic outputs */
2146 	ice_ptp_enable_all_clkout(pf);
2147 
2148 	ice_ptp_unlock(hw);
2149 
2150 	if (err) {
2151 		dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2152 		return err;
2153 	}
2154 
2155 	ice_ptp_reset_cached_phctime(pf);
2156 
2157 	return 0;
2158 }
2159 
2160 #ifdef CONFIG_ICE_HWTS
2161 /**
2162  * ice_ptp_get_syncdevicetime - Get the cross time stamp info
2163  * @device: Current device time
2164  * @system: System counter value read synchronously with device time
2165  * @ctx: Context provided by timekeeping code
2166  *
2167  * Read device and system (ART) clock simultaneously and return the corrected
2168  * clock values in ns.
2169  */
2170 static int
2171 ice_ptp_get_syncdevicetime(ktime_t *device,
2172 			   struct system_counterval_t *system,
2173 			   void *ctx)
2174 {
2175 	struct ice_pf *pf = (struct ice_pf *)ctx;
2176 	struct ice_hw *hw = &pf->hw;
2177 	u32 hh_lock, hh_art_ctl;
2178 	int i;
2179 
2180 #define MAX_HH_HW_LOCK_TRIES	5
2181 #define MAX_HH_CTL_LOCK_TRIES	100
2182 
2183 	for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
2184 		/* Get the HW lock */
2185 		hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2186 		if (hh_lock & PFHH_SEM_BUSY_M) {
2187 			usleep_range(10000, 15000);
2188 			continue;
2189 		}
2190 		break;
2191 	}
2192 	if (hh_lock & PFHH_SEM_BUSY_M) {
2193 		dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
2194 		return -EBUSY;
2195 	}
2196 
2197 	/* Program cmd to master timer */
2198 	ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2199 
2200 	/* Start the ART and device clock sync sequence */
2201 	hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2202 	hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
2203 	wr32(hw, GLHH_ART_CTL, hh_art_ctl);
2204 
2205 	for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
2206 		/* Wait for sync to complete */
2207 		hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2208 		if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
2209 			udelay(1);
2210 			continue;
2211 		} else {
2212 			u32 hh_ts_lo, hh_ts_hi, tmr_idx;
2213 			u64 hh_ts;
2214 
2215 			tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2216 			/* Read ART time */
2217 			hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
2218 			hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
2219 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2220 			system->cycles = hh_ts;
2221 			system->cs_id = CSID_X86_ART;
2222 			/* Read Device source clock time */
2223 			hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
2224 			hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
2225 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2226 			*device = ns_to_ktime(hh_ts);
2227 			break;
2228 		}
2229 	}
2230 
2231 	/* Clear the master timer */
2232 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2233 
2234 	/* Release HW lock */
2235 	hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2236 	hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
2237 	wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
2238 
2239 	if (i == MAX_HH_CTL_LOCK_TRIES)
2240 		return -ETIMEDOUT;
2241 
2242 	return 0;
2243 }
2244 
2245 /**
2246  * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
2247  * @info: the driver's PTP info structure
2248  * @cts: The memory to fill the cross timestamp info
2249  *
2250  * Capture a cross timestamp between the ART and the device PTP hardware
2251  * clock. Fill the cross timestamp information and report it back to the
2252  * caller.
2253  *
2254  * This is only valid for E822 and E823 devices which have support for
2255  * generating the cross timestamp via PCIe PTM.
2256  *
2257  * In order to correctly correlate the ART timestamp back to the TSC time, the
2258  * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2259  */
2260 static int
2261 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
2262 			    struct system_device_crosststamp *cts)
2263 {
2264 	struct ice_pf *pf = ptp_info_to_pf(info);
2265 
2266 	return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
2267 					     pf, NULL, cts);
2268 }
2269 #endif /* CONFIG_ICE_HWTS */
2270 
2271 /**
2272  * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2273  * @pf: Board private structure
2274  * @ifr: ioctl data
2275  *
2276  * Copy the timestamping config to user buffer
2277  */
2278 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2279 {
2280 	struct hwtstamp_config *config;
2281 
2282 	if (pf->ptp.state != ICE_PTP_READY)
2283 		return -EIO;
2284 
2285 	config = &pf->ptp.tstamp_config;
2286 
2287 	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2288 		-EFAULT : 0;
2289 }
2290 
2291 /**
2292  * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2293  * @pf: Board private structure
2294  * @config: hwtstamp settings requested or saved
2295  */
2296 static int
2297 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2298 {
2299 	switch (config->tx_type) {
2300 	case HWTSTAMP_TX_OFF:
2301 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2302 		break;
2303 	case HWTSTAMP_TX_ON:
2304 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2305 		break;
2306 	default:
2307 		return -ERANGE;
2308 	}
2309 
2310 	switch (config->rx_filter) {
2311 	case HWTSTAMP_FILTER_NONE:
2312 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2313 		break;
2314 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2315 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2316 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2317 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2318 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2319 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2320 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2321 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2322 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2323 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2324 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2325 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2326 	case HWTSTAMP_FILTER_NTP_ALL:
2327 	case HWTSTAMP_FILTER_ALL:
2328 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2329 		break;
2330 	default:
2331 		return -ERANGE;
2332 	}
2333 
2334 	/* Immediately update the device timestamping mode */
2335 	ice_ptp_restore_timestamp_mode(pf);
2336 
2337 	return 0;
2338 }
2339 
2340 /**
2341  * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2342  * @pf: Board private structure
2343  * @ifr: ioctl data
2344  *
2345  * Get the user config and store it
2346  */
2347 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2348 {
2349 	struct hwtstamp_config config;
2350 	int err;
2351 
2352 	if (pf->ptp.state != ICE_PTP_READY)
2353 		return -EAGAIN;
2354 
2355 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2356 		return -EFAULT;
2357 
2358 	err = ice_ptp_set_timestamp_mode(pf, &config);
2359 	if (err)
2360 		return err;
2361 
2362 	/* Return the actual configuration set */
2363 	config = pf->ptp.tstamp_config;
2364 
2365 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2366 		-EFAULT : 0;
2367 }
2368 
2369 /**
2370  * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2371  * @rx_desc: Receive descriptor
2372  * @pkt_ctx: Packet context to get the cached time
2373  *
2374  * The driver receives a notification in the receive descriptor with timestamp.
2375  */
2376 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2377 			const struct ice_pkt_ctx *pkt_ctx)
2378 {
2379 	u64 ts_ns, cached_time;
2380 	u32 ts_high;
2381 
2382 	if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2383 		return 0;
2384 
2385 	cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2386 
2387 	/* Do not report a timestamp if we don't have a cached PHC time */
2388 	if (!cached_time)
2389 		return 0;
2390 
2391 	/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2392 	 * PHC value, rather than accessing the PF. This also allows us to
2393 	 * simply pass the upper 32bits of nanoseconds directly. Calling
2394 	 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2395 	 * bits itself.
2396 	 */
2397 	ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2398 	ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2399 
2400 	return ts_ns;
2401 }
2402 
2403 /**
2404  * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
2405  * @pf: pointer to the PF structure
2406  * @info: PTP clock info structure
2407  *
2408  * Disable the OS access to the SMA pins. Called to clear out the OS
2409  * indications of pin support when we fail to setup the E810-T SMA control
2410  * register.
2411  */
2412 static void
2413 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2414 {
2415 	struct device *dev = ice_pf_to_dev(pf);
2416 
2417 	dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
2418 
2419 	info->enable = NULL;
2420 	info->verify = NULL;
2421 	info->n_pins = 0;
2422 	info->n_ext_ts = 0;
2423 	info->n_per_out = 0;
2424 }
2425 
2426 /**
2427  * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
2428  * @pf: pointer to the PF structure
2429  * @info: PTP clock info structure
2430  *
2431  * Finish setting up the SMA pins by allocating pin_config, and setting it up
2432  * according to the current status of the SMA. On failure, disable all of the
2433  * extended SMA pin support.
2434  */
2435 static void
2436 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2437 {
2438 	struct device *dev = ice_pf_to_dev(pf);
2439 	int err;
2440 
2441 	/* Allocate memory for kernel pins interface */
2442 	info->pin_config = devm_kcalloc(dev, info->n_pins,
2443 					sizeof(*info->pin_config), GFP_KERNEL);
2444 	if (!info->pin_config) {
2445 		ice_ptp_disable_sma_pins_e810t(pf, info);
2446 		return;
2447 	}
2448 
2449 	/* Read current SMA status */
2450 	err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
2451 	if (err)
2452 		ice_ptp_disable_sma_pins_e810t(pf, info);
2453 }
2454 
2455 /**
2456  * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
2457  * @pf: pointer to the PF instance
2458  * @info: PTP clock capabilities
2459  */
2460 static void
2461 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2462 {
2463 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2464 		info->n_ext_ts = N_EXT_TS_E810;
2465 		info->n_per_out = N_PER_OUT_E810T;
2466 		info->n_pins = NUM_PTP_PINS_E810T;
2467 		info->verify = ice_verify_pin_e810t;
2468 
2469 		/* Complete setup of the SMA pins */
2470 		ice_ptp_setup_sma_pins_e810t(pf, info);
2471 	} else if (ice_is_e810t(&pf->hw)) {
2472 		info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
2473 		info->n_per_out = N_PER_OUT_NO_SMA_E810T;
2474 	} else {
2475 		info->n_per_out = N_PER_OUT_E810;
2476 		info->n_ext_ts = N_EXT_TS_E810;
2477 	}
2478 }
2479 
2480 /**
2481  * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
2482  * @pf: pointer to the PF instance
2483  * @info: PTP clock capabilities
2484  */
2485 static void
2486 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2487 {
2488 	info->pps = 1;
2489 	info->n_per_out = 0;
2490 	info->n_ext_ts = 1;
2491 }
2492 
2493 /**
2494  * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
2495  * @pf: Board private structure
2496  * @info: PTP info to fill
2497  *
2498  * Assign functions to the PTP capabiltiies structure for E82x devices.
2499  * Functions which operate across all device families should be set directly
2500  * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
2501  * devices.
2502  */
2503 static void
2504 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
2505 {
2506 #ifdef CONFIG_ICE_HWTS
2507 	if (boot_cpu_has(X86_FEATURE_ART) &&
2508 	    boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
2509 		info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
2510 #endif /* CONFIG_ICE_HWTS */
2511 }
2512 
2513 /**
2514  * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2515  * @pf: Board private structure
2516  * @info: PTP info to fill
2517  *
2518  * Assign functions to the PTP capabiltiies structure for E810 devices.
2519  * Functions which operate across all device families should be set directly
2520  * in ice_ptp_set_caps. Only add functions here which are distinct for e810
2521  * devices.
2522  */
2523 static void
2524 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2525 {
2526 	info->enable = ice_ptp_gpio_enable_e810;
2527 	ice_ptp_setup_pins_e810(pf, info);
2528 }
2529 
2530 /**
2531  * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
2532  * @pf: Board private structure
2533  * @info: PTP info to fill
2534  *
2535  * Assign functions to the PTP capabiltiies structure for E823 devices.
2536  * Functions which operate across all device families should be set directly
2537  * in ice_ptp_set_caps. Only add functions here which are distinct for e823
2538  * devices.
2539  */
2540 static void
2541 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2542 {
2543 	ice_ptp_set_funcs_e82x(pf, info);
2544 
2545 	info->enable = ice_ptp_gpio_enable_e823;
2546 	ice_ptp_setup_pins_e823(pf, info);
2547 }
2548 
2549 /**
2550  * ice_ptp_set_caps - Set PTP capabilities
2551  * @pf: Board private structure
2552  */
2553 static void ice_ptp_set_caps(struct ice_pf *pf)
2554 {
2555 	struct ptp_clock_info *info = &pf->ptp.info;
2556 	struct device *dev = ice_pf_to_dev(pf);
2557 
2558 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2559 		 dev_driver_string(dev), dev_name(dev));
2560 	info->owner = THIS_MODULE;
2561 	info->max_adj = 100000000;
2562 	info->adjtime = ice_ptp_adjtime;
2563 	info->adjfine = ice_ptp_adjfine;
2564 	info->gettimex64 = ice_ptp_gettimex64;
2565 	info->settime64 = ice_ptp_settime64;
2566 
2567 	if (ice_is_e810(&pf->hw))
2568 		ice_ptp_set_funcs_e810(pf, info);
2569 	else if (ice_is_e823(&pf->hw))
2570 		ice_ptp_set_funcs_e823(pf, info);
2571 	else
2572 		ice_ptp_set_funcs_e82x(pf, info);
2573 }
2574 
2575 /**
2576  * ice_ptp_create_clock - Create PTP clock device for userspace
2577  * @pf: Board private structure
2578  *
2579  * This function creates a new PTP clock device. It only creates one if we
2580  * don't already have one. Will return error if it can't create one, but success
2581  * if we already have a device. Should be used by ice_ptp_init to create clock
2582  * initially, and prevent global resets from creating new clock devices.
2583  */
2584 static long ice_ptp_create_clock(struct ice_pf *pf)
2585 {
2586 	struct ptp_clock_info *info;
2587 	struct device *dev;
2588 
2589 	/* No need to create a clock device if we already have one */
2590 	if (pf->ptp.clock)
2591 		return 0;
2592 
2593 	ice_ptp_set_caps(pf);
2594 
2595 	info = &pf->ptp.info;
2596 	dev = ice_pf_to_dev(pf);
2597 
2598 	/* Attempt to register the clock before enabling the hardware. */
2599 	pf->ptp.clock = ptp_clock_register(info, dev);
2600 	if (IS_ERR(pf->ptp.clock)) {
2601 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2602 		return PTR_ERR(pf->ptp.clock);
2603 	}
2604 
2605 	return 0;
2606 }
2607 
2608 /**
2609  * ice_ptp_request_ts - Request an available Tx timestamp index
2610  * @tx: the PTP Tx timestamp tracker to request from
2611  * @skb: the SKB to associate with this timestamp request
2612  */
2613 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2614 {
2615 	unsigned long flags;
2616 	u8 idx;
2617 
2618 	spin_lock_irqsave(&tx->lock, flags);
2619 
2620 	/* Check that this tracker is accepting new timestamp requests */
2621 	if (!ice_ptp_is_tx_tracker_up(tx)) {
2622 		spin_unlock_irqrestore(&tx->lock, flags);
2623 		return -1;
2624 	}
2625 
2626 	/* Find and set the first available index */
2627 	idx = find_next_zero_bit(tx->in_use, tx->len,
2628 				 tx->last_ll_ts_idx_read + 1);
2629 	if (idx == tx->len)
2630 		idx = find_first_zero_bit(tx->in_use, tx->len);
2631 
2632 	if (idx < tx->len) {
2633 		/* We got a valid index that no other thread could have set. Store
2634 		 * a reference to the skb and the start time to allow discarding old
2635 		 * requests.
2636 		 */
2637 		set_bit(idx, tx->in_use);
2638 		clear_bit(idx, tx->stale);
2639 		tx->tstamps[idx].start = jiffies;
2640 		tx->tstamps[idx].skb = skb_get(skb);
2641 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2642 		ice_trace(tx_tstamp_request, skb, idx);
2643 	}
2644 
2645 	spin_unlock_irqrestore(&tx->lock, flags);
2646 
2647 	/* return the appropriate PHY timestamp register index, -1 if no
2648 	 * indexes were available.
2649 	 */
2650 	if (idx >= tx->len)
2651 		return -1;
2652 	else
2653 		return idx + tx->offset;
2654 }
2655 
2656 /**
2657  * ice_ptp_process_ts - Process the PTP Tx timestamps
2658  * @pf: Board private structure
2659  *
2660  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2661  * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2662  */
2663 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2664 {
2665 	switch (pf->ptp.tx_interrupt_mode) {
2666 	case ICE_PTP_TX_INTERRUPT_NONE:
2667 		/* This device has the clock owner handle timestamps for it */
2668 		return ICE_TX_TSTAMP_WORK_DONE;
2669 	case ICE_PTP_TX_INTERRUPT_SELF:
2670 		/* This device handles its own timestamps */
2671 		return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2672 	case ICE_PTP_TX_INTERRUPT_ALL:
2673 		/* This device handles timestamps for all ports */
2674 		return ice_ptp_tx_tstamp_owner(pf);
2675 	default:
2676 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2677 			  pf->ptp.tx_interrupt_mode);
2678 		return ICE_TX_TSTAMP_WORK_DONE;
2679 	}
2680 }
2681 
2682 /**
2683  * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2684  * @pf: Board private structure
2685  *
2686  * The device PHY issues Tx timestamp interrupts to the driver for processing
2687  * timestamp data from the PHY. It will not interrupt again until all
2688  * current timestamp data is read. In rare circumstances, it is possible that
2689  * the driver fails to read all outstanding data.
2690  *
2691  * To avoid getting permanently stuck, periodically check if the PHY has
2692  * outstanding timestamp data. If so, trigger an interrupt from software to
2693  * process this data.
2694  */
2695 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2696 {
2697 	struct device *dev = ice_pf_to_dev(pf);
2698 	struct ice_hw *hw = &pf->hw;
2699 	bool trigger_oicr = false;
2700 	unsigned int i;
2701 
2702 	if (ice_is_e810(hw))
2703 		return;
2704 
2705 	if (!ice_pf_src_tmr_owned(pf))
2706 		return;
2707 
2708 	for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2709 		u64 tstamp_ready;
2710 		int err;
2711 
2712 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2713 		if (!err && tstamp_ready) {
2714 			trigger_oicr = true;
2715 			break;
2716 		}
2717 	}
2718 
2719 	if (trigger_oicr) {
2720 		/* Trigger a software interrupt, to ensure this data
2721 		 * gets processed.
2722 		 */
2723 		dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2724 
2725 		wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2726 		ice_flush(hw);
2727 	}
2728 }
2729 
2730 static void ice_ptp_periodic_work(struct kthread_work *work)
2731 {
2732 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2733 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2734 	int err;
2735 
2736 	if (pf->ptp.state != ICE_PTP_READY)
2737 		return;
2738 
2739 	err = ice_ptp_update_cached_phctime(pf);
2740 
2741 	ice_ptp_maybe_trigger_tx_interrupt(pf);
2742 
2743 	/* Run twice a second or reschedule if phc update failed */
2744 	kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2745 				   msecs_to_jiffies(err ? 10 : 500));
2746 }
2747 
2748 /**
2749  * ice_ptp_prepare_for_reset - Prepare PTP for reset
2750  * @pf: Board private structure
2751  * @reset_type: the reset type being performed
2752  */
2753 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2754 {
2755 	struct ice_ptp *ptp = &pf->ptp;
2756 	u8 src_tmr;
2757 
2758 	if (ptp->state != ICE_PTP_READY)
2759 		return;
2760 
2761 	ptp->state = ICE_PTP_RESETTING;
2762 
2763 	/* Disable timestamping for both Tx and Rx */
2764 	ice_ptp_disable_timestamp_mode(pf);
2765 
2766 	kthread_cancel_delayed_work_sync(&ptp->work);
2767 
2768 	if (reset_type == ICE_RESET_PFR)
2769 		return;
2770 
2771 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2772 
2773 	/* Disable periodic outputs */
2774 	ice_ptp_disable_all_clkout(pf);
2775 
2776 	src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2777 
2778 	/* Disable source clock */
2779 	wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2780 
2781 	/* Acquire PHC and system timer to restore after reset */
2782 	ptp->reset_time = ktime_get_real_ns();
2783 }
2784 
2785 /**
2786  * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2787  * @pf: Board private structure
2788  *
2789  * Companion function for ice_ptp_rebuild() which handles tasks that only the
2790  * PTP clock owner instance should perform.
2791  */
2792 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2793 {
2794 	struct ice_ptp *ptp = &pf->ptp;
2795 	struct ice_hw *hw = &pf->hw;
2796 	struct timespec64 ts;
2797 	u64 time_diff;
2798 	int err;
2799 
2800 	err = ice_ptp_init_phc(hw);
2801 	if (err)
2802 		return err;
2803 
2804 	/* Acquire the global hardware lock */
2805 	if (!ice_ptp_lock(hw)) {
2806 		err = -EBUSY;
2807 		return err;
2808 	}
2809 
2810 	/* Write the increment time value to PHY and LAN */
2811 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2812 	if (err) {
2813 		ice_ptp_unlock(hw);
2814 		return err;
2815 	}
2816 
2817 	/* Write the initial Time value to PHY and LAN using the cached PHC
2818 	 * time before the reset and time difference between stopping and
2819 	 * starting the clock.
2820 	 */
2821 	if (ptp->cached_phc_time) {
2822 		time_diff = ktime_get_real_ns() - ptp->reset_time;
2823 		ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2824 	} else {
2825 		ts = ktime_to_timespec64(ktime_get_real());
2826 	}
2827 	err = ice_ptp_write_init(pf, &ts);
2828 	if (err) {
2829 		ice_ptp_unlock(hw);
2830 		return err;
2831 	}
2832 
2833 	/* Release the global hardware lock */
2834 	ice_ptp_unlock(hw);
2835 
2836 	/* Flush software tracking of any outstanding timestamps since we're
2837 	 * about to flush the PHY timestamp block.
2838 	 */
2839 	ice_ptp_flush_all_tx_tracker(pf);
2840 
2841 	if (!ice_is_e810(hw)) {
2842 		/* Enable quad interrupts */
2843 		err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
2844 		if (err)
2845 			return err;
2846 
2847 		ice_ptp_restart_all_phy(pf);
2848 	}
2849 
2850 	/* Re-enable all periodic outputs and external timestamp events */
2851 	ice_ptp_enable_all_clkout(pf);
2852 	ice_ptp_enable_all_extts(pf);
2853 
2854 	return 0;
2855 }
2856 
2857 /**
2858  * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
2859  * @pf: Board private structure
2860  * @reset_type: the reset type being performed
2861  */
2862 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
2863 {
2864 	struct ice_ptp *ptp = &pf->ptp;
2865 	int err;
2866 
2867 	if (ptp->state == ICE_PTP_READY) {
2868 		ice_ptp_prepare_for_reset(pf, reset_type);
2869 	} else if (ptp->state != ICE_PTP_RESETTING) {
2870 		err = -EINVAL;
2871 		dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
2872 		goto err;
2873 	}
2874 
2875 	if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
2876 		err = ice_ptp_rebuild_owner(pf);
2877 		if (err)
2878 			goto err;
2879 	}
2880 
2881 	ptp->state = ICE_PTP_READY;
2882 
2883 	/* Start periodic work going */
2884 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2885 
2886 	dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2887 	return;
2888 
2889 err:
2890 	ptp->state = ICE_PTP_ERROR;
2891 	dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2892 }
2893 
2894 /**
2895  * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
2896  * @aux_dev: auxiliary device to get the auxiliary PF for
2897  */
2898 static struct ice_pf *
2899 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
2900 {
2901 	struct ice_ptp_port *aux_port;
2902 	struct ice_ptp *aux_ptp;
2903 
2904 	aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
2905 	aux_ptp = container_of(aux_port, struct ice_ptp, port);
2906 
2907 	return container_of(aux_ptp, struct ice_pf, ptp);
2908 }
2909 
2910 /**
2911  * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
2912  * @aux_dev: auxiliary device to get the PF for
2913  */
2914 static struct ice_pf *
2915 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
2916 {
2917 	struct ice_ptp_port_owner *ports_owner;
2918 	const struct auxiliary_driver *aux_drv;
2919 	struct ice_ptp *owner_ptp;
2920 
2921 	if (!aux_dev->dev.driver)
2922 		return NULL;
2923 
2924 	aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
2925 	ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
2926 				   aux_driver);
2927 	owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
2928 	return container_of(owner_ptp, struct ice_pf, ptp);
2929 }
2930 
2931 /**
2932  * ice_ptp_auxbus_probe - Probe auxiliary devices
2933  * @aux_dev: PF's auxiliary device
2934  * @id: Auxiliary device ID
2935  */
2936 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
2937 				const struct auxiliary_device_id *id)
2938 {
2939 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2940 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2941 
2942 	if (WARN_ON(!owner_pf))
2943 		return -ENODEV;
2944 
2945 	INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
2946 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
2947 	list_add(&aux_pf->ptp.port.list_member,
2948 		 &owner_pf->ptp.ports_owner.ports);
2949 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2950 
2951 	return 0;
2952 }
2953 
2954 /**
2955  * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
2956  * @aux_dev: PF's auxiliary device
2957  */
2958 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
2959 {
2960 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2961 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2962 
2963 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
2964 	list_del(&aux_pf->ptp.port.list_member);
2965 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2966 }
2967 
2968 /**
2969  * ice_ptp_auxbus_shutdown
2970  * @aux_dev: PF's auxiliary device
2971  */
2972 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
2973 {
2974 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2975 }
2976 
2977 /**
2978  * ice_ptp_auxbus_suspend
2979  * @aux_dev: PF's auxiliary device
2980  * @state: power management state indicator
2981  */
2982 static int
2983 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
2984 {
2985 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2986 	return 0;
2987 }
2988 
2989 /**
2990  * ice_ptp_auxbus_resume
2991  * @aux_dev: PF's auxiliary device
2992  */
2993 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
2994 {
2995 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2996 	return 0;
2997 }
2998 
2999 /**
3000  * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
3001  * @pf: Board private structure
3002  * @name: auxiliary bus driver name
3003  */
3004 static struct auxiliary_device_id *
3005 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
3006 {
3007 	struct auxiliary_device_id *ids;
3008 
3009 	/* Second id left empty to terminate the array */
3010 	ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
3011 			   sizeof(struct auxiliary_device_id), GFP_KERNEL);
3012 	if (!ids)
3013 		return NULL;
3014 
3015 	snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
3016 
3017 	return ids;
3018 }
3019 
3020 /**
3021  * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
3022  * @pf: Board private structure
3023  */
3024 static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
3025 {
3026 	struct auxiliary_driver *aux_driver;
3027 	struct ice_ptp *ptp;
3028 	struct device *dev;
3029 	char *name;
3030 	int err;
3031 
3032 	ptp = &pf->ptp;
3033 	dev = ice_pf_to_dev(pf);
3034 	aux_driver = &ptp->ports_owner.aux_driver;
3035 	INIT_LIST_HEAD(&ptp->ports_owner.ports);
3036 	mutex_init(&ptp->ports_owner.lock);
3037 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
3038 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
3039 			      ice_get_ptp_src_clock_index(&pf->hw));
3040 	if (!name)
3041 		return -ENOMEM;
3042 
3043 	aux_driver->name = name;
3044 	aux_driver->shutdown = ice_ptp_auxbus_shutdown;
3045 	aux_driver->suspend = ice_ptp_auxbus_suspend;
3046 	aux_driver->remove = ice_ptp_auxbus_remove;
3047 	aux_driver->resume = ice_ptp_auxbus_resume;
3048 	aux_driver->probe = ice_ptp_auxbus_probe;
3049 	aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
3050 	if (!aux_driver->id_table)
3051 		return -ENOMEM;
3052 
3053 	err = auxiliary_driver_register(aux_driver);
3054 	if (err) {
3055 		devm_kfree(dev, aux_driver->id_table);
3056 		dev_err(dev, "Failed registering aux_driver, name <%s>\n",
3057 			name);
3058 	}
3059 
3060 	return err;
3061 }
3062 
3063 /**
3064  * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
3065  * @pf: Board private structure
3066  */
3067 static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
3068 {
3069 	struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
3070 
3071 	auxiliary_driver_unregister(aux_driver);
3072 	devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
3073 
3074 	mutex_destroy(&pf->ptp.ports_owner.lock);
3075 }
3076 
3077 /**
3078  * ice_ptp_clock_index - Get the PTP clock index for this device
3079  * @pf: Board private structure
3080  *
3081  * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3082  * is associated.
3083  */
3084 int ice_ptp_clock_index(struct ice_pf *pf)
3085 {
3086 	struct auxiliary_device *aux_dev;
3087 	struct ice_pf *owner_pf;
3088 	struct ptp_clock *clock;
3089 
3090 	aux_dev = &pf->ptp.port.aux_dev;
3091 	owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
3092 	if (!owner_pf)
3093 		return -1;
3094 	clock = owner_pf->ptp.clock;
3095 
3096 	return clock ? ptp_clock_index(clock) : -1;
3097 }
3098 
3099 /**
3100  * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3101  * @pf: Board private structure
3102  *
3103  * Setup and initialize a PTP clock device that represents the device hardware
3104  * clock. Save the clock index for other functions connected to the same
3105  * hardware resource.
3106  */
3107 static int ice_ptp_init_owner(struct ice_pf *pf)
3108 {
3109 	struct ice_hw *hw = &pf->hw;
3110 	struct timespec64 ts;
3111 	int err;
3112 
3113 	err = ice_ptp_init_phc(hw);
3114 	if (err) {
3115 		dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3116 			err);
3117 		return err;
3118 	}
3119 
3120 	/* Acquire the global hardware lock */
3121 	if (!ice_ptp_lock(hw)) {
3122 		err = -EBUSY;
3123 		goto err_exit;
3124 	}
3125 
3126 	/* Write the increment time value to PHY and LAN */
3127 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3128 	if (err) {
3129 		ice_ptp_unlock(hw);
3130 		goto err_exit;
3131 	}
3132 
3133 	ts = ktime_to_timespec64(ktime_get_real());
3134 	/* Write the initial Time value to PHY and LAN */
3135 	err = ice_ptp_write_init(pf, &ts);
3136 	if (err) {
3137 		ice_ptp_unlock(hw);
3138 		goto err_exit;
3139 	}
3140 
3141 	/* Release the global hardware lock */
3142 	ice_ptp_unlock(hw);
3143 
3144 	/* Configure PHY interrupt settings */
3145 	err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3146 	if (err)
3147 		goto err_exit;
3148 
3149 	/* Ensure we have a clock device */
3150 	err = ice_ptp_create_clock(pf);
3151 	if (err)
3152 		goto err_clk;
3153 
3154 	err = ice_ptp_register_auxbus_driver(pf);
3155 	if (err) {
3156 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
3157 		goto err_aux;
3158 	}
3159 
3160 	return 0;
3161 err_aux:
3162 	ptp_clock_unregister(pf->ptp.clock);
3163 err_clk:
3164 	pf->ptp.clock = NULL;
3165 err_exit:
3166 	return err;
3167 }
3168 
3169 /**
3170  * ice_ptp_init_work - Initialize PTP work threads
3171  * @pf: Board private structure
3172  * @ptp: PF PTP structure
3173  */
3174 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3175 {
3176 	struct kthread_worker *kworker;
3177 
3178 	/* Initialize work functions */
3179 	kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3180 
3181 	/* Allocate a kworker for handling work required for the ports
3182 	 * connected to the PTP hardware clock.
3183 	 */
3184 	kworker = kthread_create_worker(0, "ice-ptp-%s",
3185 					dev_name(ice_pf_to_dev(pf)));
3186 	if (IS_ERR(kworker))
3187 		return PTR_ERR(kworker);
3188 
3189 	ptp->kworker = kworker;
3190 
3191 	/* Start periodic work going */
3192 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3193 
3194 	return 0;
3195 }
3196 
3197 /**
3198  * ice_ptp_init_port - Initialize PTP port structure
3199  * @pf: Board private structure
3200  * @ptp_port: PTP port structure
3201  */
3202 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3203 {
3204 	struct ice_hw *hw = &pf->hw;
3205 
3206 	mutex_init(&ptp_port->ps_lock);
3207 
3208 	switch (hw->ptp.phy_model) {
3209 	case ICE_PHY_ETH56G:
3210 		return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
3211 					      ptp_port->port_num);
3212 	case ICE_PHY_E810:
3213 		return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
3214 	case ICE_PHY_E82X:
3215 		kthread_init_delayed_work(&ptp_port->ov_work,
3216 					  ice_ptp_wait_for_offsets);
3217 
3218 		return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3219 					    ptp_port->port_num);
3220 	default:
3221 		return -ENODEV;
3222 	}
3223 }
3224 
3225 /**
3226  * ice_ptp_release_auxbus_device
3227  * @dev: device that utilizes the auxbus
3228  */
3229 static void ice_ptp_release_auxbus_device(struct device *dev)
3230 {
3231 	/* Doing nothing here, but handle to auxbux device must be satisfied */
3232 }
3233 
3234 /**
3235  * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
3236  * @pf: Board private structure
3237  */
3238 static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
3239 {
3240 	struct auxiliary_device *aux_dev;
3241 	struct ice_ptp *ptp;
3242 	struct device *dev;
3243 	char *name;
3244 	int err;
3245 	u32 id;
3246 
3247 	ptp = &pf->ptp;
3248 	id = ptp->port.port_num;
3249 	dev = ice_pf_to_dev(pf);
3250 
3251 	aux_dev = &ptp->port.aux_dev;
3252 
3253 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
3254 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
3255 			      ice_get_ptp_src_clock_index(&pf->hw));
3256 	if (!name)
3257 		return -ENOMEM;
3258 
3259 	aux_dev->name = name;
3260 	aux_dev->id = id;
3261 	aux_dev->dev.release = ice_ptp_release_auxbus_device;
3262 	aux_dev->dev.parent = dev;
3263 
3264 	err = auxiliary_device_init(aux_dev);
3265 	if (err)
3266 		goto aux_err;
3267 
3268 	err = auxiliary_device_add(aux_dev);
3269 	if (err) {
3270 		auxiliary_device_uninit(aux_dev);
3271 		goto aux_err;
3272 	}
3273 
3274 	return 0;
3275 aux_err:
3276 	dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
3277 	devm_kfree(dev, name);
3278 	return err;
3279 }
3280 
3281 /**
3282  * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
3283  * @pf: Board private structure
3284  */
3285 static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
3286 {
3287 	struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
3288 
3289 	auxiliary_device_delete(aux_dev);
3290 	auxiliary_device_uninit(aux_dev);
3291 
3292 	memset(aux_dev, 0, sizeof(*aux_dev));
3293 }
3294 
3295 /**
3296  * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3297  * @pf: Board private structure
3298  *
3299  * Initialize the Tx timestamp interrupt mode for this device. For most device
3300  * types, each PF processes the interrupt and manages its own timestamps. For
3301  * E822-based devices, only the clock owner processes the timestamps. Other
3302  * PFs disable the interrupt and do not process their own timestamps.
3303  */
3304 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3305 {
3306 	switch (pf->hw.ptp.phy_model) {
3307 	case ICE_PHY_E82X:
3308 		/* E822 based PHY has the clock owner process the interrupt
3309 		 * for all ports.
3310 		 */
3311 		if (ice_pf_src_tmr_owned(pf))
3312 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3313 		else
3314 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3315 		break;
3316 	default:
3317 		/* other PHY types handle their own Tx interrupt */
3318 		pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3319 	}
3320 }
3321 
3322 /**
3323  * ice_ptp_init - Initialize PTP hardware clock support
3324  * @pf: Board private structure
3325  *
3326  * Set up the device for interacting with the PTP hardware clock for all
3327  * functions, both the function that owns the clock hardware, and the
3328  * functions connected to the clock hardware.
3329  *
3330  * The clock owner will allocate and register a ptp_clock with the
3331  * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3332  * items used for asynchronous work such as Tx timestamps and periodic work.
3333  */
3334 void ice_ptp_init(struct ice_pf *pf)
3335 {
3336 	struct ice_ptp *ptp = &pf->ptp;
3337 	struct ice_hw *hw = &pf->hw;
3338 	int err;
3339 
3340 	ptp->state = ICE_PTP_INITIALIZING;
3341 
3342 	ice_ptp_init_hw(hw);
3343 
3344 	ice_ptp_init_tx_interrupt_mode(pf);
3345 
3346 	/* If this function owns the clock hardware, it must allocate and
3347 	 * configure the PTP clock device to represent it.
3348 	 */
3349 	if (ice_pf_src_tmr_owned(pf)) {
3350 		err = ice_ptp_init_owner(pf);
3351 		if (err)
3352 			goto err;
3353 	}
3354 
3355 	ptp->port.port_num = hw->pf_id;
3356 	if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
3357 		ptp->port.port_num = hw->pf_id * 2;
3358 
3359 	err = ice_ptp_init_port(pf, &ptp->port);
3360 	if (err)
3361 		goto err;
3362 
3363 	/* Start the PHY timestamping block */
3364 	ice_ptp_reset_phy_timestamping(pf);
3365 
3366 	/* Configure initial Tx interrupt settings */
3367 	ice_ptp_cfg_tx_interrupt(pf);
3368 
3369 	err = ice_ptp_create_auxbus_device(pf);
3370 	if (err)
3371 		goto err;
3372 
3373 	ptp->state = ICE_PTP_READY;
3374 
3375 	err = ice_ptp_init_work(pf, ptp);
3376 	if (err)
3377 		goto err;
3378 
3379 	dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3380 	return;
3381 
3382 err:
3383 	/* If we registered a PTP clock, release it */
3384 	if (pf->ptp.clock) {
3385 		ptp_clock_unregister(ptp->clock);
3386 		pf->ptp.clock = NULL;
3387 	}
3388 	ptp->state = ICE_PTP_ERROR;
3389 	dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3390 }
3391 
3392 /**
3393  * ice_ptp_release - Disable the driver/HW support and unregister the clock
3394  * @pf: Board private structure
3395  *
3396  * This function handles the cleanup work required from the initialization by
3397  * clearing out the important information and unregistering the clock
3398  */
3399 void ice_ptp_release(struct ice_pf *pf)
3400 {
3401 	if (pf->ptp.state != ICE_PTP_READY)
3402 		return;
3403 
3404 	pf->ptp.state = ICE_PTP_UNINIT;
3405 
3406 	/* Disable timestamping for both Tx and Rx */
3407 	ice_ptp_disable_timestamp_mode(pf);
3408 
3409 	ice_ptp_remove_auxbus_device(pf);
3410 
3411 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3412 
3413 	ice_ptp_disable_all_extts(pf);
3414 
3415 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
3416 
3417 	ice_ptp_port_phy_stop(&pf->ptp.port);
3418 	mutex_destroy(&pf->ptp.port.ps_lock);
3419 	if (pf->ptp.kworker) {
3420 		kthread_destroy_worker(pf->ptp.kworker);
3421 		pf->ptp.kworker = NULL;
3422 	}
3423 
3424 	if (ice_pf_src_tmr_owned(pf))
3425 		ice_ptp_unregister_auxbus_driver(pf);
3426 
3427 	if (!pf->ptp.clock)
3428 		return;
3429 
3430 	/* Disable periodic outputs */
3431 	ice_ptp_disable_all_clkout(pf);
3432 
3433 	ptp_clock_unregister(pf->ptp.clock);
3434 	pf->ptp.clock = NULL;
3435 
3436 	dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3437 }
3438