xref: /linux/drivers/net/ethernet/intel/ice/ice_ptp.c (revision 8f9eb8bb5c5af846a8b1729bd7778d08ca852379)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 
8 #define E810_OUT_PROP_DELAY_NS 1
9 
10 static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
11 	/* name    idx   func         chan */
12 	{ "GNSS",  GNSS, PTP_PF_EXTTS, 0, { 0, } },
13 	{ "SMA1",  SMA1, PTP_PF_NONE, 1, { 0, } },
14 	{ "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
15 	{ "SMA2",  SMA2, PTP_PF_NONE, 2, { 0, } },
16 	{ "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
17 };
18 
19 /**
20  * ice_get_sma_config_e810t
21  * @hw: pointer to the hw struct
22  * @ptp_pins: pointer to the ptp_pin_desc struture
23  *
24  * Read the configuration of the SMA control logic and put it into the
25  * ptp_pin_desc structure
26  */
27 static int
28 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
29 {
30 	u8 data, i;
31 	int status;
32 
33 	/* Read initial pin state */
34 	status = ice_read_sma_ctrl_e810t(hw, &data);
35 	if (status)
36 		return status;
37 
38 	/* initialize with defaults */
39 	for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
40 		strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
41 			sizeof(ptp_pins[i].name));
42 		ptp_pins[i].index = ice_pin_desc_e810t[i].index;
43 		ptp_pins[i].func = ice_pin_desc_e810t[i].func;
44 		ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
45 	}
46 
47 	/* Parse SMA1/UFL1 */
48 	switch (data & ICE_SMA1_MASK_E810T) {
49 	case ICE_SMA1_MASK_E810T:
50 	default:
51 		ptp_pins[SMA1].func = PTP_PF_NONE;
52 		ptp_pins[UFL1].func = PTP_PF_NONE;
53 		break;
54 	case ICE_SMA1_DIR_EN_E810T:
55 		ptp_pins[SMA1].func = PTP_PF_PEROUT;
56 		ptp_pins[UFL1].func = PTP_PF_NONE;
57 		break;
58 	case ICE_SMA1_TX_EN_E810T:
59 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
60 		ptp_pins[UFL1].func = PTP_PF_NONE;
61 		break;
62 	case 0:
63 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
64 		ptp_pins[UFL1].func = PTP_PF_PEROUT;
65 		break;
66 	}
67 
68 	/* Parse SMA2/UFL2 */
69 	switch (data & ICE_SMA2_MASK_E810T) {
70 	case ICE_SMA2_MASK_E810T:
71 	default:
72 		ptp_pins[SMA2].func = PTP_PF_NONE;
73 		ptp_pins[UFL2].func = PTP_PF_NONE;
74 		break;
75 	case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
76 		ptp_pins[SMA2].func = PTP_PF_EXTTS;
77 		ptp_pins[UFL2].func = PTP_PF_NONE;
78 		break;
79 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
80 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
81 		ptp_pins[UFL2].func = PTP_PF_NONE;
82 		break;
83 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
84 		ptp_pins[SMA2].func = PTP_PF_NONE;
85 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
86 		break;
87 	case ICE_SMA2_DIR_EN_E810T:
88 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
89 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
90 		break;
91 	}
92 
93 	return 0;
94 }
95 
96 /**
97  * ice_ptp_set_sma_config_e810t
98  * @hw: pointer to the hw struct
99  * @ptp_pins: pointer to the ptp_pin_desc struture
100  *
101  * Set the configuration of the SMA control logic based on the configuration in
102  * num_pins parameter
103  */
104 static int
105 ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
106 			     const struct ptp_pin_desc *ptp_pins)
107 {
108 	int status;
109 	u8 data;
110 
111 	/* SMA1 and UFL1 cannot be set to TX at the same time */
112 	if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
113 	    ptp_pins[UFL1].func == PTP_PF_PEROUT)
114 		return -EINVAL;
115 
116 	/* SMA2 and UFL2 cannot be set to RX at the same time */
117 	if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
118 	    ptp_pins[UFL2].func == PTP_PF_EXTTS)
119 		return -EINVAL;
120 
121 	/* Read initial pin state value */
122 	status = ice_read_sma_ctrl_e810t(hw, &data);
123 	if (status)
124 		return status;
125 
126 	/* Set the right sate based on the desired configuration */
127 	data &= ~ICE_SMA1_MASK_E810T;
128 	if (ptp_pins[SMA1].func == PTP_PF_NONE &&
129 	    ptp_pins[UFL1].func == PTP_PF_NONE) {
130 		dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
131 		data |= ICE_SMA1_MASK_E810T;
132 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
133 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
134 		dev_info(ice_hw_to_dev(hw), "SMA1 RX");
135 		data |= ICE_SMA1_TX_EN_E810T;
136 	} else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
137 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
138 		/* U.FL 1 TX will always enable SMA 1 RX */
139 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
140 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
141 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
142 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
143 	} else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
144 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
145 		dev_info(ice_hw_to_dev(hw), "SMA1 TX");
146 		data |= ICE_SMA1_DIR_EN_E810T;
147 	}
148 
149 	data &= ~ICE_SMA2_MASK_E810T;
150 	if (ptp_pins[SMA2].func == PTP_PF_NONE &&
151 	    ptp_pins[UFL2].func == PTP_PF_NONE) {
152 		dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
153 		data |= ICE_SMA2_MASK_E810T;
154 	} else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
155 			ptp_pins[UFL2].func == PTP_PF_NONE) {
156 		dev_info(ice_hw_to_dev(hw), "SMA2 RX");
157 		data |= (ICE_SMA2_TX_EN_E810T |
158 			 ICE_SMA2_UFL2_RX_DIS_E810T);
159 	} else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
160 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
161 		dev_info(ice_hw_to_dev(hw), "UFL2 RX");
162 		data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
163 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
164 		   ptp_pins[UFL2].func == PTP_PF_NONE) {
165 		dev_info(ice_hw_to_dev(hw), "SMA2 TX");
166 		data |= (ICE_SMA2_DIR_EN_E810T |
167 			 ICE_SMA2_UFL2_RX_DIS_E810T);
168 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
169 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
170 		dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
171 		data |= ICE_SMA2_DIR_EN_E810T;
172 	}
173 
174 	return ice_write_sma_ctrl_e810t(hw, data);
175 }
176 
177 /**
178  * ice_ptp_set_sma_e810t
179  * @info: the driver's PTP info structure
180  * @pin: pin index in kernel structure
181  * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
182  *
183  * Set the configuration of a single SMA pin
184  */
185 static int
186 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
187 		      enum ptp_pin_function func)
188 {
189 	struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
190 	struct ice_pf *pf = ptp_info_to_pf(info);
191 	struct ice_hw *hw = &pf->hw;
192 	int err;
193 
194 	if (pin < SMA1 || func > PTP_PF_PEROUT)
195 		return -EOPNOTSUPP;
196 
197 	err = ice_get_sma_config_e810t(hw, ptp_pins);
198 	if (err)
199 		return err;
200 
201 	/* Disable the same function on the other pin sharing the channel */
202 	if (pin == SMA1 && ptp_pins[UFL1].func == func)
203 		ptp_pins[UFL1].func = PTP_PF_NONE;
204 	if (pin == UFL1 && ptp_pins[SMA1].func == func)
205 		ptp_pins[SMA1].func = PTP_PF_NONE;
206 
207 	if (pin == SMA2 && ptp_pins[UFL2].func == func)
208 		ptp_pins[UFL2].func = PTP_PF_NONE;
209 	if (pin == UFL2 && ptp_pins[SMA2].func == func)
210 		ptp_pins[SMA2].func = PTP_PF_NONE;
211 
212 	/* Set up new pin function in the temp table */
213 	ptp_pins[pin].func = func;
214 
215 	return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
216 }
217 
218 /**
219  * ice_verify_pin_e810t
220  * @info: the driver's PTP info structure
221  * @pin: Pin index
222  * @func: Assigned function
223  * @chan: Assigned channel
224  *
225  * Verify if pin supports requested pin function. If the Check pins consistency.
226  * Reconfigure the SMA logic attached to the given pin to enable its
227  * desired functionality
228  */
229 static int
230 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
231 		     enum ptp_pin_function func, unsigned int chan)
232 {
233 	/* Don't allow channel reassignment */
234 	if (chan != ice_pin_desc_e810t[pin].chan)
235 		return -EOPNOTSUPP;
236 
237 	/* Check if functions are properly assigned */
238 	switch (func) {
239 	case PTP_PF_NONE:
240 		break;
241 	case PTP_PF_EXTTS:
242 		if (pin == UFL1)
243 			return -EOPNOTSUPP;
244 		break;
245 	case PTP_PF_PEROUT:
246 		if (pin == UFL2 || pin == GNSS)
247 			return -EOPNOTSUPP;
248 		break;
249 	case PTP_PF_PHYSYNC:
250 		return -EOPNOTSUPP;
251 	}
252 
253 	return ice_ptp_set_sma_e810t(info, pin, func);
254 }
255 
256 /**
257  * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
258  * @pf: Board private structure
259  *
260  * Program the device to respond appropriately to the Tx timestamp interrupt
261  * cause.
262  */
263 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
264 {
265 	struct ice_hw *hw = &pf->hw;
266 	bool enable;
267 	u32 val;
268 
269 	switch (pf->ptp.tx_interrupt_mode) {
270 	case ICE_PTP_TX_INTERRUPT_ALL:
271 		/* React to interrupts across all quads. */
272 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
273 		enable = true;
274 		break;
275 	case ICE_PTP_TX_INTERRUPT_NONE:
276 		/* Do not react to interrupts on any quad. */
277 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
278 		enable = false;
279 		break;
280 	case ICE_PTP_TX_INTERRUPT_SELF:
281 	default:
282 		enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
283 		break;
284 	}
285 
286 	/* Configure the Tx timestamp interrupt */
287 	val = rd32(hw, PFINT_OICR_ENA);
288 	if (enable)
289 		val |= PFINT_OICR_TSYN_TX_M;
290 	else
291 		val &= ~PFINT_OICR_TSYN_TX_M;
292 	wr32(hw, PFINT_OICR_ENA, val);
293 }
294 
295 /**
296  * ice_set_rx_tstamp - Enable or disable Rx timestamping
297  * @pf: The PF pointer to search in
298  * @on: bool value for whether timestamps are enabled or disabled
299  */
300 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
301 {
302 	struct ice_vsi *vsi;
303 	u16 i;
304 
305 	vsi = ice_get_main_vsi(pf);
306 	if (!vsi || !vsi->rx_rings)
307 		return;
308 
309 	/* Set the timestamp flag for all the Rx rings */
310 	ice_for_each_rxq(vsi, i) {
311 		if (!vsi->rx_rings[i])
312 			continue;
313 		vsi->rx_rings[i]->ptp_rx = on;
314 	}
315 }
316 
317 /**
318  * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
319  * @pf: Board private structure
320  *
321  * Called during preparation for reset to temporarily disable timestamping on
322  * the device. Called during remove to disable timestamping while cleaning up
323  * driver resources.
324  */
325 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
326 {
327 	struct ice_hw *hw = &pf->hw;
328 	u32 val;
329 
330 	val = rd32(hw, PFINT_OICR_ENA);
331 	val &= ~PFINT_OICR_TSYN_TX_M;
332 	wr32(hw, PFINT_OICR_ENA, val);
333 
334 	ice_set_rx_tstamp(pf, false);
335 }
336 
337 /**
338  * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
339  * @pf: Board private structure
340  *
341  * Called at the end of rebuild to restore timestamp configuration after
342  * a device reset.
343  */
344 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
345 {
346 	struct ice_hw *hw = &pf->hw;
347 	bool enable_rx;
348 
349 	ice_ptp_cfg_tx_interrupt(pf);
350 
351 	enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
352 	ice_set_rx_tstamp(pf, enable_rx);
353 
354 	/* Trigger an immediate software interrupt to ensure that timestamps
355 	 * which occurred during reset are handled now.
356 	 */
357 	wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
358 	ice_flush(hw);
359 }
360 
361 /**
362  * ice_ptp_read_src_clk_reg - Read the source clock register
363  * @pf: Board private structure
364  * @sts: Optional parameter for holding a pair of system timestamps from
365  *       the system clock. Will be ignored if NULL is given.
366  */
367 static u64
368 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
369 {
370 	struct ice_hw *hw = &pf->hw;
371 	u32 hi, lo, lo2;
372 	u8 tmr_idx;
373 
374 	tmr_idx = ice_get_ptp_src_clock_index(hw);
375 	guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
376 	/* Read the system timestamp pre PHC read */
377 	ptp_read_system_prets(sts);
378 
379 	lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
380 
381 	/* Read the system timestamp post PHC read */
382 	ptp_read_system_postts(sts);
383 
384 	hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
385 	lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
386 
387 	if (lo2 < lo) {
388 		/* if TIME_L rolled over read TIME_L again and update
389 		 * system timestamps
390 		 */
391 		ptp_read_system_prets(sts);
392 		lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
393 		ptp_read_system_postts(sts);
394 		hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
395 	}
396 
397 	return ((u64)hi << 32) | lo;
398 }
399 
400 /**
401  * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
402  * @cached_phc_time: recently cached copy of PHC time
403  * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
404  *
405  * Hardware captures timestamps which contain only 32 bits of nominal
406  * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
407  * Note that the captured timestamp values may be 40 bits, but the lower
408  * 8 bits are sub-nanoseconds and generally discarded.
409  *
410  * Extend the 32bit nanosecond timestamp using the following algorithm and
411  * assumptions:
412  *
413  * 1) have a recently cached copy of the PHC time
414  * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
415  *    seconds) before or after the PHC time was captured.
416  * 3) calculate the delta between the cached time and the timestamp
417  * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
418  *    captured after the PHC time. In this case, the full timestamp is just
419  *    the cached PHC time plus the delta.
420  * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
421  *    timestamp was captured *before* the PHC time, i.e. because the PHC
422  *    cache was updated after the timestamp was captured by hardware. In this
423  *    case, the full timestamp is the cached time minus the inverse delta.
424  *
425  * This algorithm works even if the PHC time was updated after a Tx timestamp
426  * was requested, but before the Tx timestamp event was reported from
427  * hardware.
428  *
429  * This calculation primarily relies on keeping the cached PHC time up to
430  * date. If the timestamp was captured more than 2^31 nanoseconds after the
431  * PHC time, it is possible that the lower 32bits of PHC time have
432  * overflowed more than once, and we might generate an incorrect timestamp.
433  *
434  * This is prevented by (a) periodically updating the cached PHC time once
435  * a second, and (b) discarding any Tx timestamp packet if it has waited for
436  * a timestamp for more than one second.
437  */
438 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
439 {
440 	u32 delta, phc_time_lo;
441 	u64 ns;
442 
443 	/* Extract the lower 32 bits of the PHC time */
444 	phc_time_lo = (u32)cached_phc_time;
445 
446 	/* Calculate the delta between the lower 32bits of the cached PHC
447 	 * time and the in_tstamp value
448 	 */
449 	delta = (in_tstamp - phc_time_lo);
450 
451 	/* Do not assume that the in_tstamp is always more recent than the
452 	 * cached PHC time. If the delta is large, it indicates that the
453 	 * in_tstamp was taken in the past, and should be converted
454 	 * forward.
455 	 */
456 	if (delta > (U32_MAX / 2)) {
457 		/* reverse the delta calculation here */
458 		delta = (phc_time_lo - in_tstamp);
459 		ns = cached_phc_time - delta;
460 	} else {
461 		ns = cached_phc_time + delta;
462 	}
463 
464 	return ns;
465 }
466 
467 /**
468  * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
469  * @pf: Board private structure
470  * @in_tstamp: Ingress/egress 40b timestamp value
471  *
472  * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
473  * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
474  *
475  *  *--------------------------------------------------------------*
476  *  | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
477  *  *--------------------------------------------------------------*
478  *
479  * The low bit is an indicator of whether the timestamp is valid. The next
480  * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
481  * and the remaining 32 bits are the lower 32 bits of the PHC timer.
482  *
483  * It is assumed that the caller verifies the timestamp is valid prior to
484  * calling this function.
485  *
486  * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
487  * time stored in the device private PTP structure as the basis for timestamp
488  * extension.
489  *
490  * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
491  * algorithm.
492  */
493 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
494 {
495 	const u64 mask = GENMASK_ULL(31, 0);
496 	unsigned long discard_time;
497 
498 	/* Discard the hardware timestamp if the cached PHC time is too old */
499 	discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
500 	if (time_is_before_jiffies(discard_time)) {
501 		pf->ptp.tx_hwtstamp_discarded++;
502 		return 0;
503 	}
504 
505 	return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
506 				     (in_tstamp >> 8) & mask);
507 }
508 
509 /**
510  * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
511  * @tx: the PTP Tx timestamp tracker to check
512  *
513  * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
514  * to accept new timestamp requests.
515  *
516  * Assumes the tx->lock spinlock is already held.
517  */
518 static bool
519 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
520 {
521 	lockdep_assert_held(&tx->lock);
522 
523 	return tx->init && !tx->calibrating;
524 }
525 
526 /**
527  * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
528  * @tx: the PTP Tx timestamp tracker
529  * @idx: index of the timestamp to request
530  */
531 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
532 {
533 	struct ice_ptp_port *ptp_port;
534 	struct sk_buff *skb;
535 	struct ice_pf *pf;
536 
537 	if (!tx->init)
538 		return;
539 
540 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
541 	pf = ptp_port_to_pf(ptp_port);
542 
543 	/* Drop packets which have waited for more than 2 seconds */
544 	if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
545 		/* Count the number of Tx timestamps that timed out */
546 		pf->ptp.tx_hwtstamp_timeouts++;
547 
548 		skb = tx->tstamps[idx].skb;
549 		tx->tstamps[idx].skb = NULL;
550 		clear_bit(idx, tx->in_use);
551 
552 		dev_kfree_skb_any(skb);
553 		return;
554 	}
555 
556 	ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
557 
558 	/* Write TS index to read to the PF register so the FW can read it */
559 	wr32(&pf->hw, PF_SB_ATQBAL,
560 	     TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
561 	     TS_LL_READ_TS);
562 	tx->last_ll_ts_idx_read = idx;
563 }
564 
565 /**
566  * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
567  * @tx: the PTP Tx timestamp tracker
568  */
569 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
570 {
571 	struct skb_shared_hwtstamps shhwtstamps = {};
572 	u8 idx = tx->last_ll_ts_idx_read;
573 	struct ice_ptp_port *ptp_port;
574 	u64 raw_tstamp, tstamp;
575 	bool drop_ts = false;
576 	struct sk_buff *skb;
577 	struct ice_pf *pf;
578 	u32 val;
579 
580 	if (!tx->init || tx->last_ll_ts_idx_read < 0)
581 		return;
582 
583 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
584 	pf = ptp_port_to_pf(ptp_port);
585 
586 	ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
587 
588 	val = rd32(&pf->hw, PF_SB_ATQBAL);
589 
590 	/* When the bit is cleared, the TS is ready in the register */
591 	if (val & TS_LL_READ_TS) {
592 		dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
593 		return;
594 	}
595 
596 	/* High 8 bit value of the TS is on the bits 16:23 */
597 	raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
598 	raw_tstamp <<= 32;
599 
600 	/* Read the low 32 bit value */
601 	raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
602 
603 	/* Devices using this interface always verify the timestamp differs
604 	 * relative to the last cached timestamp value.
605 	 */
606 	if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
607 		return;
608 
609 	tx->tstamps[idx].cached_tstamp = raw_tstamp;
610 	clear_bit(idx, tx->in_use);
611 	skb = tx->tstamps[idx].skb;
612 	tx->tstamps[idx].skb = NULL;
613 	if (test_and_clear_bit(idx, tx->stale))
614 		drop_ts = true;
615 
616 	if (!skb)
617 		return;
618 
619 	if (drop_ts) {
620 		dev_kfree_skb_any(skb);
621 		return;
622 	}
623 
624 	/* Extend the timestamp using cached PHC time */
625 	tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
626 	if (tstamp) {
627 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
628 		ice_trace(tx_tstamp_complete, skb, idx);
629 	}
630 
631 	skb_tstamp_tx(skb, &shhwtstamps);
632 	dev_kfree_skb_any(skb);
633 }
634 
635 /**
636  * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
637  * @tx: the PTP Tx timestamp tracker
638  *
639  * Process timestamps captured by the PHY associated with this port. To do
640  * this, loop over each index with a waiting skb.
641  *
642  * If a given index has a valid timestamp, perform the following steps:
643  *
644  * 1) check that the timestamp request is not stale
645  * 2) check that a timestamp is ready and available in the PHY memory bank
646  * 3) read and copy the timestamp out of the PHY register
647  * 4) unlock the index by clearing the associated in_use bit
648  * 5) check if the timestamp is stale, and discard if so
649  * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
650  * 7) send this 64 bit timestamp to the stack
651  *
652  * Note that we do not hold the tracking lock while reading the Tx timestamp.
653  * This is because reading the timestamp requires taking a mutex that might
654  * sleep.
655  *
656  * The only place where we set in_use is when a new timestamp is initiated
657  * with a slot index. This is only called in the hard xmit routine where an
658  * SKB has a request flag set. The only places where we clear this bit is this
659  * function, or during teardown when the Tx timestamp tracker is being
660  * removed. A timestamp index will never be re-used until the in_use bit for
661  * that index is cleared.
662  *
663  * If a Tx thread starts a new timestamp, we might not begin processing it
664  * right away but we will notice it at the end when we re-queue the task.
665  *
666  * If a Tx thread starts a new timestamp just after this function exits, the
667  * interrupt for that timestamp should re-trigger this function once
668  * a timestamp is ready.
669  *
670  * In cases where the PTP hardware clock was directly adjusted, some
671  * timestamps may not be able to safely use the timestamp extension math. In
672  * this case, software will set the stale bit for any outstanding Tx
673  * timestamps when the clock is adjusted. Then this function will discard
674  * those captured timestamps instead of sending them to the stack.
675  *
676  * If a Tx packet has been waiting for more than 2 seconds, it is not possible
677  * to correctly extend the timestamp using the cached PHC time. It is
678  * extremely unlikely that a packet will ever take this long to timestamp. If
679  * we detect a Tx timestamp request that has waited for this long we assume
680  * the packet will never be sent by hardware and discard it without reading
681  * the timestamp register.
682  */
683 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
684 {
685 	struct ice_ptp_port *ptp_port;
686 	unsigned long flags;
687 	struct ice_pf *pf;
688 	struct ice_hw *hw;
689 	u64 tstamp_ready;
690 	bool link_up;
691 	int err;
692 	u8 idx;
693 
694 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
695 	pf = ptp_port_to_pf(ptp_port);
696 	hw = &pf->hw;
697 
698 	/* Read the Tx ready status first */
699 	if (tx->has_ready_bitmap) {
700 		err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
701 		if (err)
702 			return;
703 	}
704 
705 	/* Drop packets if the link went down */
706 	link_up = ptp_port->link_up;
707 
708 	for_each_set_bit(idx, tx->in_use, tx->len) {
709 		struct skb_shared_hwtstamps shhwtstamps = {};
710 		u8 phy_idx = idx + tx->offset;
711 		u64 raw_tstamp = 0, tstamp;
712 		bool drop_ts = !link_up;
713 		struct sk_buff *skb;
714 
715 		/* Drop packets which have waited for more than 2 seconds */
716 		if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
717 			drop_ts = true;
718 
719 			/* Count the number of Tx timestamps that timed out */
720 			pf->ptp.tx_hwtstamp_timeouts++;
721 		}
722 
723 		/* Only read a timestamp from the PHY if its marked as ready
724 		 * by the tstamp_ready register. This avoids unnecessary
725 		 * reading of timestamps which are not yet valid. This is
726 		 * important as we must read all timestamps which are valid
727 		 * and only timestamps which are valid during each interrupt.
728 		 * If we do not, the hardware logic for generating a new
729 		 * interrupt can get stuck on some devices.
730 		 */
731 		if (tx->has_ready_bitmap &&
732 		    !(tstamp_ready & BIT_ULL(phy_idx))) {
733 			if (drop_ts)
734 				goto skip_ts_read;
735 
736 			continue;
737 		}
738 
739 		ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
740 
741 		err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
742 		if (err && !drop_ts)
743 			continue;
744 
745 		ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
746 
747 		/* For PHYs which don't implement a proper timestamp ready
748 		 * bitmap, verify that the timestamp value is different
749 		 * from the last cached timestamp. If it is not, skip this for
750 		 * now assuming it hasn't yet been captured by hardware.
751 		 */
752 		if (!drop_ts && !tx->has_ready_bitmap &&
753 		    raw_tstamp == tx->tstamps[idx].cached_tstamp)
754 			continue;
755 
756 		/* Discard any timestamp value without the valid bit set */
757 		if (!(raw_tstamp & ICE_PTP_TS_VALID))
758 			drop_ts = true;
759 
760 skip_ts_read:
761 		spin_lock_irqsave(&tx->lock, flags);
762 		if (!tx->has_ready_bitmap && raw_tstamp)
763 			tx->tstamps[idx].cached_tstamp = raw_tstamp;
764 		clear_bit(idx, tx->in_use);
765 		skb = tx->tstamps[idx].skb;
766 		tx->tstamps[idx].skb = NULL;
767 		if (test_and_clear_bit(idx, tx->stale))
768 			drop_ts = true;
769 		spin_unlock_irqrestore(&tx->lock, flags);
770 
771 		/* It is unlikely but possible that the SKB will have been
772 		 * flushed at this point due to link change or teardown.
773 		 */
774 		if (!skb)
775 			continue;
776 
777 		if (drop_ts) {
778 			dev_kfree_skb_any(skb);
779 			continue;
780 		}
781 
782 		/* Extend the timestamp using cached PHC time */
783 		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
784 		if (tstamp) {
785 			shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
786 			ice_trace(tx_tstamp_complete, skb, idx);
787 		}
788 
789 		skb_tstamp_tx(skb, &shhwtstamps);
790 		dev_kfree_skb_any(skb);
791 	}
792 }
793 
794 /**
795  * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
796  * @pf: Board private structure
797  */
798 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
799 {
800 	struct ice_ptp_port *port;
801 	unsigned int i;
802 
803 	mutex_lock(&pf->ptp.ports_owner.lock);
804 	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
805 		struct ice_ptp_tx *tx = &port->tx;
806 
807 		if (!tx || !tx->init)
808 			continue;
809 
810 		ice_ptp_process_tx_tstamp(tx);
811 	}
812 	mutex_unlock(&pf->ptp.ports_owner.lock);
813 
814 	for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
815 		u64 tstamp_ready;
816 		int err;
817 
818 		/* Read the Tx ready status first */
819 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
820 		if (err)
821 			break;
822 		else if (tstamp_ready)
823 			return ICE_TX_TSTAMP_WORK_PENDING;
824 	}
825 
826 	return ICE_TX_TSTAMP_WORK_DONE;
827 }
828 
829 /**
830  * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
831  * @tx: Tx tracking structure to initialize
832  *
833  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
834  * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
835  */
836 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
837 {
838 	bool more_timestamps;
839 	unsigned long flags;
840 
841 	if (!tx->init)
842 		return ICE_TX_TSTAMP_WORK_DONE;
843 
844 	/* Process the Tx timestamp tracker */
845 	ice_ptp_process_tx_tstamp(tx);
846 
847 	/* Check if there are outstanding Tx timestamps */
848 	spin_lock_irqsave(&tx->lock, flags);
849 	more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
850 	spin_unlock_irqrestore(&tx->lock, flags);
851 
852 	if (more_timestamps)
853 		return ICE_TX_TSTAMP_WORK_PENDING;
854 
855 	return ICE_TX_TSTAMP_WORK_DONE;
856 }
857 
858 /**
859  * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
860  * @tx: Tx tracking structure to initialize
861  *
862  * Assumes that the length has already been initialized. Do not call directly,
863  * use the ice_ptp_init_tx_* instead.
864  */
865 static int
866 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
867 {
868 	unsigned long *in_use, *stale;
869 	struct ice_tx_tstamp *tstamps;
870 
871 	tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
872 	in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
873 	stale = bitmap_zalloc(tx->len, GFP_KERNEL);
874 
875 	if (!tstamps || !in_use || !stale) {
876 		kfree(tstamps);
877 		bitmap_free(in_use);
878 		bitmap_free(stale);
879 
880 		return -ENOMEM;
881 	}
882 
883 	tx->tstamps = tstamps;
884 	tx->in_use = in_use;
885 	tx->stale = stale;
886 	tx->init = 1;
887 	tx->last_ll_ts_idx_read = -1;
888 
889 	spin_lock_init(&tx->lock);
890 
891 	return 0;
892 }
893 
894 /**
895  * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
896  * @pf: Board private structure
897  * @tx: the tracker to flush
898  *
899  * Called during teardown when a Tx tracker is being removed.
900  */
901 static void
902 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
903 {
904 	struct ice_hw *hw = &pf->hw;
905 	unsigned long flags;
906 	u64 tstamp_ready;
907 	int err;
908 	u8 idx;
909 
910 	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
911 	if (err) {
912 		dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
913 			tx->block, err);
914 
915 		/* If we fail to read the Tx timestamp ready bitmap just
916 		 * skip clearing the PHY timestamps.
917 		 */
918 		tstamp_ready = 0;
919 	}
920 
921 	for_each_set_bit(idx, tx->in_use, tx->len) {
922 		u8 phy_idx = idx + tx->offset;
923 		struct sk_buff *skb;
924 
925 		/* In case this timestamp is ready, we need to clear it. */
926 		if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
927 			ice_clear_phy_tstamp(hw, tx->block, phy_idx);
928 
929 		spin_lock_irqsave(&tx->lock, flags);
930 		skb = tx->tstamps[idx].skb;
931 		tx->tstamps[idx].skb = NULL;
932 		clear_bit(idx, tx->in_use);
933 		clear_bit(idx, tx->stale);
934 		spin_unlock_irqrestore(&tx->lock, flags);
935 
936 		/* Count the number of Tx timestamps flushed */
937 		pf->ptp.tx_hwtstamp_flushed++;
938 
939 		/* Free the SKB after we've cleared the bit */
940 		dev_kfree_skb_any(skb);
941 	}
942 }
943 
944 /**
945  * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
946  * @tx: the tracker to mark
947  *
948  * Mark currently outstanding Tx timestamps as stale. This prevents sending
949  * their timestamp value to the stack. This is required to prevent extending
950  * the 40bit hardware timestamp incorrectly.
951  *
952  * This should be called when the PTP clock is modified such as after a set
953  * time request.
954  */
955 static void
956 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
957 {
958 	unsigned long flags;
959 
960 	spin_lock_irqsave(&tx->lock, flags);
961 	bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
962 	spin_unlock_irqrestore(&tx->lock, flags);
963 }
964 
965 /**
966  * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
967  * @pf: Board private structure
968  *
969  * Called by the clock owner to flush all the Tx timestamp trackers associated
970  * with the clock.
971  */
972 static void
973 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
974 {
975 	struct ice_ptp_port *port;
976 
977 	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
978 		ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
979 }
980 
981 /**
982  * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
983  * @pf: Board private structure
984  * @tx: Tx tracking structure to release
985  *
986  * Free memory associated with the Tx timestamp tracker.
987  */
988 static void
989 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
990 {
991 	unsigned long flags;
992 
993 	spin_lock_irqsave(&tx->lock, flags);
994 	tx->init = 0;
995 	spin_unlock_irqrestore(&tx->lock, flags);
996 
997 	/* wait for potentially outstanding interrupt to complete */
998 	synchronize_irq(pf->oicr_irq.virq);
999 
1000 	ice_ptp_flush_tx_tracker(pf, tx);
1001 
1002 	kfree(tx->tstamps);
1003 	tx->tstamps = NULL;
1004 
1005 	bitmap_free(tx->in_use);
1006 	tx->in_use = NULL;
1007 
1008 	bitmap_free(tx->stale);
1009 	tx->stale = NULL;
1010 
1011 	tx->len = 0;
1012 }
1013 
1014 /**
1015  * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
1016  * @pf: Board private structure
1017  * @tx: the Tx tracking structure to initialize
1018  * @port: the port this structure tracks
1019  *
1020  * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
1021  * have independent memory blocks for all ports.
1022  *
1023  * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
1024  */
1025 static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
1026 				  u8 port)
1027 {
1028 	tx->block = port;
1029 	tx->offset = 0;
1030 	tx->len = INDEX_PER_PORT_ETH56G;
1031 	tx->has_ready_bitmap = 1;
1032 
1033 	return ice_ptp_alloc_tx_tracker(tx);
1034 }
1035 
1036 /**
1037  * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
1038  * @pf: Board private structure
1039  * @tx: the Tx tracking structure to initialize
1040  * @port: the port this structure tracks
1041  *
1042  * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
1043  * the timestamp block is shared for all ports in the same quad. To avoid
1044  * ports using the same timestamp index, logically break the block of
1045  * registers into chunks based on the port number.
1046  */
1047 static int
1048 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
1049 {
1050 	tx->block = ICE_GET_QUAD_NUM(port);
1051 	tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
1052 	tx->len = INDEX_PER_PORT_E82X;
1053 	tx->has_ready_bitmap = 1;
1054 
1055 	return ice_ptp_alloc_tx_tracker(tx);
1056 }
1057 
1058 /**
1059  * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
1060  * @pf: Board private structure
1061  * @tx: the Tx tracking structure to initialize
1062  *
1063  * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
1064  * port has its own block of timestamps, independent of the other ports.
1065  */
1066 static int
1067 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1068 {
1069 	tx->block = pf->hw.port_info->lport;
1070 	tx->offset = 0;
1071 	tx->len = INDEX_PER_PORT_E810;
1072 	/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
1073 	 * verify new timestamps against cached copy of the last read
1074 	 * timestamp.
1075 	 */
1076 	tx->has_ready_bitmap = 0;
1077 
1078 	return ice_ptp_alloc_tx_tracker(tx);
1079 }
1080 
1081 /**
1082  * ice_ptp_update_cached_phctime - Update the cached PHC time values
1083  * @pf: Board specific private structure
1084  *
1085  * This function updates the system time values which are cached in the PF
1086  * structure and the Rx rings.
1087  *
1088  * This function must be called periodically to ensure that the cached value
1089  * is never more than 2 seconds old.
1090  *
1091  * Note that the cached copy in the PF PTP structure is always updated, even
1092  * if we can't update the copy in the Rx rings.
1093  *
1094  * Return:
1095  * * 0 - OK, successfully updated
1096  * * -EAGAIN - PF was busy, need to reschedule the update
1097  */
1098 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
1099 {
1100 	struct device *dev = ice_pf_to_dev(pf);
1101 	unsigned long update_before;
1102 	u64 systime;
1103 	int i;
1104 
1105 	update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
1106 	if (pf->ptp.cached_phc_time &&
1107 	    time_is_before_jiffies(update_before)) {
1108 		unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
1109 
1110 		dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
1111 			 jiffies_to_msecs(time_taken));
1112 		pf->ptp.late_cached_phc_updates++;
1113 	}
1114 
1115 	/* Read the current PHC time */
1116 	systime = ice_ptp_read_src_clk_reg(pf, NULL);
1117 
1118 	/* Update the cached PHC time stored in the PF structure */
1119 	WRITE_ONCE(pf->ptp.cached_phc_time, systime);
1120 	WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
1121 
1122 	if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
1123 		return -EAGAIN;
1124 
1125 	ice_for_each_vsi(pf, i) {
1126 		struct ice_vsi *vsi = pf->vsi[i];
1127 		int j;
1128 
1129 		if (!vsi)
1130 			continue;
1131 
1132 		if (vsi->type != ICE_VSI_PF)
1133 			continue;
1134 
1135 		ice_for_each_rxq(vsi, j) {
1136 			if (!vsi->rx_rings[j])
1137 				continue;
1138 			WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
1139 		}
1140 	}
1141 	clear_bit(ICE_CFG_BUSY, pf->state);
1142 
1143 	return 0;
1144 }
1145 
1146 /**
1147  * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1148  * @pf: Board specific private structure
1149  *
1150  * This function must be called when the cached PHC time is no longer valid,
1151  * such as after a time adjustment. It marks any currently outstanding Tx
1152  * timestamps as stale and updates the cached PHC time for both the PF and Rx
1153  * rings.
1154  *
1155  * If updating the PHC time cannot be done immediately, a warning message is
1156  * logged and the work item is scheduled immediately to minimize the window
1157  * with a wrong cached timestamp.
1158  */
1159 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1160 {
1161 	struct device *dev = ice_pf_to_dev(pf);
1162 	int err;
1163 
1164 	/* Update the cached PHC time immediately if possible, otherwise
1165 	 * schedule the work item to execute soon.
1166 	 */
1167 	err = ice_ptp_update_cached_phctime(pf);
1168 	if (err) {
1169 		/* If another thread is updating the Rx rings, we won't
1170 		 * properly reset them here. This could lead to reporting of
1171 		 * invalid timestamps, but there isn't much we can do.
1172 		 */
1173 		dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1174 			 __func__);
1175 
1176 		/* Queue the work item to update the Rx rings when possible */
1177 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1178 					   msecs_to_jiffies(10));
1179 	}
1180 
1181 	/* Mark any outstanding timestamps as stale, since they might have
1182 	 * been captured in hardware before the time update. This could lead
1183 	 * to us extending them with the wrong cached value resulting in
1184 	 * incorrect timestamp values.
1185 	 */
1186 	ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1187 }
1188 
1189 /**
1190  * ice_ptp_write_init - Set PHC time to provided value
1191  * @pf: Board private structure
1192  * @ts: timespec structure that holds the new time value
1193  *
1194  * Set the PHC time to the specified time provided in the timespec.
1195  */
1196 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1197 {
1198 	u64 ns = timespec64_to_ns(ts);
1199 	struct ice_hw *hw = &pf->hw;
1200 
1201 	return ice_ptp_init_time(hw, ns);
1202 }
1203 
1204 /**
1205  * ice_ptp_write_adj - Adjust PHC clock time atomically
1206  * @pf: Board private structure
1207  * @adj: Adjustment in nanoseconds
1208  *
1209  * Perform an atomic adjustment of the PHC time by the specified number of
1210  * nanoseconds.
1211  */
1212 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1213 {
1214 	struct ice_hw *hw = &pf->hw;
1215 
1216 	return ice_ptp_adj_clock(hw, adj);
1217 }
1218 
1219 /**
1220  * ice_base_incval - Get base timer increment value
1221  * @pf: Board private structure
1222  *
1223  * Look up the base timer increment value for this device. The base increment
1224  * value is used to define the nominal clock tick rate. This increment value
1225  * is programmed during device initialization. It is also used as the basis
1226  * for calculating adjustments using scaled_ppm.
1227  */
1228 static u64 ice_base_incval(struct ice_pf *pf)
1229 {
1230 	struct ice_hw *hw = &pf->hw;
1231 	u64 incval;
1232 
1233 	incval = ice_get_base_incval(hw);
1234 
1235 	dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1236 		incval);
1237 
1238 	return incval;
1239 }
1240 
1241 /**
1242  * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1243  * @port: PTP port for which Tx FIFO is checked
1244  */
1245 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1246 {
1247 	int offs = port->port_num % ICE_PORTS_PER_QUAD;
1248 	int quad = ICE_GET_QUAD_NUM(port->port_num);
1249 	struct ice_pf *pf;
1250 	struct ice_hw *hw;
1251 	u32 val, phy_sts;
1252 	int err;
1253 
1254 	pf = ptp_port_to_pf(port);
1255 	hw = &pf->hw;
1256 
1257 	if (port->tx_fifo_busy_cnt == FIFO_OK)
1258 		return 0;
1259 
1260 	/* need to read FIFO state */
1261 	if (offs == 0 || offs == 1)
1262 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1263 					     &val);
1264 	else
1265 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1266 					     &val);
1267 
1268 	if (err) {
1269 		dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1270 			port->port_num, err);
1271 		return err;
1272 	}
1273 
1274 	if (offs & 0x1)
1275 		phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1276 	else
1277 		phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1278 
1279 	if (phy_sts & FIFO_EMPTY) {
1280 		port->tx_fifo_busy_cnt = FIFO_OK;
1281 		return 0;
1282 	}
1283 
1284 	port->tx_fifo_busy_cnt++;
1285 
1286 	dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1287 		port->tx_fifo_busy_cnt, port->port_num);
1288 
1289 	if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1290 		dev_dbg(ice_pf_to_dev(pf),
1291 			"Port %d Tx FIFO still not empty; resetting quad %d\n",
1292 			port->port_num, quad);
1293 		ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1294 		port->tx_fifo_busy_cnt = FIFO_OK;
1295 		return 0;
1296 	}
1297 
1298 	return -EAGAIN;
1299 }
1300 
1301 /**
1302  * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1303  * @work: Pointer to the kthread_work structure for this task
1304  *
1305  * Check whether hardware has completed measuring the Tx and Rx offset values
1306  * used to configure and enable vernier timestamp calibration.
1307  *
1308  * Once the offset in either direction is measured, configure the associated
1309  * registers with the calibrated offset values and enable timestamping. The Tx
1310  * and Rx directions are configured independently as soon as their associated
1311  * offsets are known.
1312  *
1313  * This function reschedules itself until both Tx and Rx calibration have
1314  * completed.
1315  */
1316 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1317 {
1318 	struct ice_ptp_port *port;
1319 	struct ice_pf *pf;
1320 	struct ice_hw *hw;
1321 	int tx_err;
1322 	int rx_err;
1323 
1324 	port = container_of(work, struct ice_ptp_port, ov_work.work);
1325 	pf = ptp_port_to_pf(port);
1326 	hw = &pf->hw;
1327 
1328 	if (ice_is_reset_in_progress(pf->state)) {
1329 		/* wait for device driver to complete reset */
1330 		kthread_queue_delayed_work(pf->ptp.kworker,
1331 					   &port->ov_work,
1332 					   msecs_to_jiffies(100));
1333 		return;
1334 	}
1335 
1336 	tx_err = ice_ptp_check_tx_fifo(port);
1337 	if (!tx_err)
1338 		tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1339 	rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1340 	if (tx_err || rx_err) {
1341 		/* Tx and/or Rx offset not yet configured, try again later */
1342 		kthread_queue_delayed_work(pf->ptp.kworker,
1343 					   &port->ov_work,
1344 					   msecs_to_jiffies(100));
1345 		return;
1346 	}
1347 }
1348 
1349 /**
1350  * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1351  * @ptp_port: PTP port to stop
1352  */
1353 static int
1354 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1355 {
1356 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1357 	u8 port = ptp_port->port_num;
1358 	struct ice_hw *hw = &pf->hw;
1359 	int err;
1360 
1361 	if (ice_is_e810(hw))
1362 		return 0;
1363 
1364 	mutex_lock(&ptp_port->ps_lock);
1365 
1366 	switch (hw->ptp.phy_model) {
1367 	case ICE_PHY_ETH56G:
1368 		err = ice_stop_phy_timer_eth56g(hw, port, true);
1369 		break;
1370 	case ICE_PHY_E82X:
1371 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1372 
1373 		err = ice_stop_phy_timer_e82x(hw, port, true);
1374 		break;
1375 	default:
1376 		err = -ENODEV;
1377 	}
1378 	if (err && err != -EBUSY)
1379 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1380 			port, err);
1381 
1382 	mutex_unlock(&ptp_port->ps_lock);
1383 
1384 	return err;
1385 }
1386 
1387 /**
1388  * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1389  * @ptp_port: PTP port for which the PHY start is set
1390  *
1391  * Start the PHY timestamping block, and initiate Vernier timestamping
1392  * calibration. If timestamping cannot be calibrated (such as if link is down)
1393  * then disable the timestamping block instead.
1394  */
1395 static int
1396 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1397 {
1398 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1399 	u8 port = ptp_port->port_num;
1400 	struct ice_hw *hw = &pf->hw;
1401 	unsigned long flags;
1402 	int err;
1403 
1404 	if (ice_is_e810(hw))
1405 		return 0;
1406 
1407 	if (!ptp_port->link_up)
1408 		return ice_ptp_port_phy_stop(ptp_port);
1409 
1410 	mutex_lock(&ptp_port->ps_lock);
1411 
1412 	switch (hw->ptp.phy_model) {
1413 	case ICE_PHY_ETH56G:
1414 		err = ice_start_phy_timer_eth56g(hw, port);
1415 		break;
1416 	case ICE_PHY_E82X:
1417 		/* Start the PHY timer in Vernier mode */
1418 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1419 
1420 		/* temporarily disable Tx timestamps while calibrating
1421 		 * PHY offset
1422 		 */
1423 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1424 		ptp_port->tx.calibrating = true;
1425 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1426 		ptp_port->tx_fifo_busy_cnt = 0;
1427 
1428 		/* Start the PHY timer in Vernier mode */
1429 		err = ice_start_phy_timer_e82x(hw, port);
1430 		if (err)
1431 			break;
1432 
1433 		/* Enable Tx timestamps right away */
1434 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1435 		ptp_port->tx.calibrating = false;
1436 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1437 
1438 		kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1439 					   0);
1440 		break;
1441 	default:
1442 		err = -ENODEV;
1443 	}
1444 
1445 	if (err)
1446 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1447 			port, err);
1448 
1449 	mutex_unlock(&ptp_port->ps_lock);
1450 
1451 	return err;
1452 }
1453 
1454 /**
1455  * ice_ptp_link_change - Reconfigure PTP after link status change
1456  * @pf: Board private structure
1457  * @port: Port for which the PHY start is set
1458  * @linkup: Link is up or down
1459  */
1460 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
1461 {
1462 	struct ice_ptp_port *ptp_port;
1463 	struct ice_hw *hw = &pf->hw;
1464 
1465 	if (pf->ptp.state != ICE_PTP_READY)
1466 		return;
1467 
1468 	if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
1469 		return;
1470 
1471 	ptp_port = &pf->ptp.port;
1472 	if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
1473 		port *= 2;
1474 	if (WARN_ON_ONCE(ptp_port->port_num != port))
1475 		return;
1476 
1477 	/* Update cached link status for this port immediately */
1478 	ptp_port->link_up = linkup;
1479 
1480 	switch (hw->ptp.phy_model) {
1481 	case ICE_PHY_E810:
1482 		/* Do not reconfigure E810 PHY */
1483 		return;
1484 	case ICE_PHY_ETH56G:
1485 	case ICE_PHY_E82X:
1486 		ice_ptp_port_phy_restart(ptp_port);
1487 		return;
1488 	default:
1489 		dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1490 	}
1491 }
1492 
1493 /**
1494  * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1495  * @pf: PF private structure
1496  * @ena: bool value to enable or disable interrupt
1497  * @threshold: Minimum number of packets at which intr is triggered
1498  *
1499  * Utility function to configure all the PHY interrupt settings, including
1500  * whether the PHY interrupt is enabled, and what threshold to use. Also
1501  * configures The E82X timestamp owner to react to interrupts from all PHYs.
1502  *
1503  * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1504  * when failed to configure PHY interrupt for E82X
1505  */
1506 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1507 {
1508 	struct device *dev = ice_pf_to_dev(pf);
1509 	struct ice_hw *hw = &pf->hw;
1510 
1511 	ice_ptp_reset_ts_memory(hw);
1512 
1513 	switch (hw->ptp.phy_model) {
1514 	case ICE_PHY_ETH56G: {
1515 		int port;
1516 
1517 		for (port = 0; port < hw->ptp.num_lports; port++) {
1518 			int err;
1519 
1520 			err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1521 			if (err) {
1522 				dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1523 					port, err);
1524 				return err;
1525 			}
1526 		}
1527 
1528 		return 0;
1529 	}
1530 	case ICE_PHY_E82X: {
1531 		int quad;
1532 
1533 		for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1534 		     quad++) {
1535 			int err;
1536 
1537 			err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1538 			if (err) {
1539 				dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1540 					quad, err);
1541 				return err;
1542 			}
1543 		}
1544 
1545 		return 0;
1546 	}
1547 	case ICE_PHY_E810:
1548 		return 0;
1549 	case ICE_PHY_UNSUP:
1550 	default:
1551 		dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
1552 			 hw->ptp.phy_model);
1553 		return -EOPNOTSUPP;
1554 	}
1555 }
1556 
1557 /**
1558  * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1559  * @pf: Board private structure
1560  */
1561 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1562 {
1563 	ice_ptp_port_phy_restart(&pf->ptp.port);
1564 }
1565 
1566 /**
1567  * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1568  * @pf: Board private structure
1569  */
1570 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1571 {
1572 	struct list_head *entry;
1573 
1574 	list_for_each(entry, &pf->ptp.ports_owner.ports) {
1575 		struct ice_ptp_port *port = list_entry(entry,
1576 						       struct ice_ptp_port,
1577 						       list_member);
1578 
1579 		if (port->link_up)
1580 			ice_ptp_port_phy_restart(port);
1581 	}
1582 }
1583 
1584 /**
1585  * ice_ptp_adjfine - Adjust clock increment rate
1586  * @info: the driver's PTP info structure
1587  * @scaled_ppm: Parts per million with 16-bit fractional field
1588  *
1589  * Adjust the frequency of the clock by the indicated scaled ppm from the
1590  * base frequency.
1591  */
1592 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1593 {
1594 	struct ice_pf *pf = ptp_info_to_pf(info);
1595 	struct ice_hw *hw = &pf->hw;
1596 	u64 incval;
1597 	int err;
1598 
1599 	incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1600 	err = ice_ptp_write_incval_locked(hw, incval);
1601 	if (err) {
1602 		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1603 			err);
1604 		return -EIO;
1605 	}
1606 
1607 	return 0;
1608 }
1609 
1610 /**
1611  * ice_ptp_extts_event - Process PTP external clock event
1612  * @pf: Board private structure
1613  */
1614 void ice_ptp_extts_event(struct ice_pf *pf)
1615 {
1616 	struct ptp_clock_event event;
1617 	struct ice_hw *hw = &pf->hw;
1618 	u8 chan, tmr_idx;
1619 	u32 hi, lo;
1620 
1621 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1622 	/* Event time is captured by one of the two matched registers
1623 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
1624 	 *      GLTSYN_EVNT_H: 32 MSB of sampled time event
1625 	 * Event is defined in GLTSYN_EVNT_0 register
1626 	 */
1627 	for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1628 		/* Check if channel is enabled */
1629 		if (pf->ptp.ext_ts_irq & (1 << chan)) {
1630 			lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1631 			hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1632 			event.timestamp = (((u64)hi) << 32) | lo;
1633 			event.type = PTP_CLOCK_EXTTS;
1634 			event.index = chan;
1635 
1636 			/* Fire event */
1637 			ptp_clock_event(pf->ptp.clock, &event);
1638 			pf->ptp.ext_ts_irq &= ~(1 << chan);
1639 		}
1640 	}
1641 }
1642 
1643 /**
1644  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1645  * @pf: Board private structure
1646  * @ena: true to enable; false to disable
1647  * @chan: GPIO channel (0-3)
1648  * @gpio_pin: GPIO pin
1649  * @extts_flags: request flags from the ptp_extts_request.flags
1650  */
1651 static int
1652 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
1653 		  unsigned int extts_flags)
1654 {
1655 	u32 func, aux_reg, gpio_reg, irq_reg;
1656 	struct ice_hw *hw = &pf->hw;
1657 	u8 tmr_idx;
1658 
1659 	if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
1660 		return -EINVAL;
1661 
1662 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1663 
1664 	irq_reg = rd32(hw, PFINT_OICR_ENA);
1665 
1666 	if (ena) {
1667 		/* Enable the interrupt */
1668 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1669 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1670 
1671 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE	BIT(0)
1672 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
1673 
1674 		/* set event level to requested edge */
1675 		if (extts_flags & PTP_FALLING_EDGE)
1676 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1677 		if (extts_flags & PTP_RISING_EDGE)
1678 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1679 
1680 		/* Write GPIO CTL reg.
1681 		 * 0x1 is input sampled by EVENT register(channel)
1682 		 * + num_in_channels * tmr_idx
1683 		 */
1684 		func = 1 + chan + (tmr_idx * 3);
1685 		gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1686 		pf->ptp.ext_ts_chan |= (1 << chan);
1687 	} else {
1688 		/* clear the values we set to reset defaults */
1689 		aux_reg = 0;
1690 		gpio_reg = 0;
1691 		pf->ptp.ext_ts_chan &= ~(1 << chan);
1692 		if (!pf->ptp.ext_ts_chan)
1693 			irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1694 	}
1695 
1696 	wr32(hw, PFINT_OICR_ENA, irq_reg);
1697 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1698 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1699 
1700 	return 0;
1701 }
1702 
1703 /**
1704  * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
1705  * @pf: Board private structure
1706  * @chan: GPIO channel (0-3)
1707  * @config: desired periodic clk configuration. NULL will disable channel
1708  * @store: If set to true the values will be stored
1709  *
1710  * Configure the internal clock generator modules to generate the clock wave of
1711  * specified period.
1712  */
1713 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
1714 			      struct ice_perout_channel *config, bool store)
1715 {
1716 	u64 current_time, period, start_time, phase;
1717 	struct ice_hw *hw = &pf->hw;
1718 	u32 func, val, gpio_pin;
1719 	u8 tmr_idx;
1720 
1721 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1722 
1723 	/* 0. Reset mode & out_en in AUX_OUT */
1724 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1725 
1726 	/* If we're disabling the output, clear out CLKO and TGT and keep
1727 	 * output level low
1728 	 */
1729 	if (!config || !config->ena) {
1730 		wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
1731 		wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
1732 		wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
1733 
1734 		val = GLGEN_GPIO_CTL_PIN_DIR_M;
1735 		gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
1736 		wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1737 
1738 		/* Store the value if requested */
1739 		if (store)
1740 			memset(&pf->ptp.perout_channels[chan], 0,
1741 			       sizeof(struct ice_perout_channel));
1742 
1743 		return 0;
1744 	}
1745 	period = config->period;
1746 	start_time = config->start_time;
1747 	div64_u64_rem(start_time, period, &phase);
1748 	gpio_pin = config->gpio_pin;
1749 
1750 	/* 1. Write clkout with half of required period value */
1751 	if (period & 0x1) {
1752 		dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1753 		goto err;
1754 	}
1755 
1756 	period >>= 1;
1757 
1758 	/* For proper operation, the GLTSYN_CLKO must be larger than clock tick
1759 	 */
1760 #define MIN_PULSE 3
1761 	if (period <= MIN_PULSE || period > U32_MAX) {
1762 		dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
1763 			MIN_PULSE * 2);
1764 		goto err;
1765 	}
1766 
1767 	wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1768 
1769 	/* Allow time for programming before start_time is hit */
1770 	current_time = ice_ptp_read_src_clk_reg(pf, NULL);
1771 
1772 	/* if start time is in the past start the timer at the nearest second
1773 	 * maintaining phase
1774 	 */
1775 	if (start_time < current_time)
1776 		start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
1777 				       NSEC_PER_SEC) * NSEC_PER_SEC + phase;
1778 
1779 	if (ice_is_e810(hw))
1780 		start_time -= E810_OUT_PROP_DELAY_NS;
1781 	else
1782 		start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
1783 
1784 	/* 2. Write TARGET time */
1785 	wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
1786 	wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
1787 
1788 	/* 3. Write AUX_OUT register */
1789 	val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1790 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1791 
1792 	/* 4. write GPIO CTL reg */
1793 	func = 8 + chan + (tmr_idx * 4);
1794 	val = GLGEN_GPIO_CTL_PIN_DIR_M |
1795 	      FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1796 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1797 
1798 	/* Store the value if requested */
1799 	if (store) {
1800 		memcpy(&pf->ptp.perout_channels[chan], config,
1801 		       sizeof(struct ice_perout_channel));
1802 		pf->ptp.perout_channels[chan].start_time = phase;
1803 	}
1804 
1805 	return 0;
1806 err:
1807 	dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
1808 	return -EFAULT;
1809 }
1810 
1811 /**
1812  * ice_ptp_disable_all_clkout - Disable all currently configured outputs
1813  * @pf: pointer to the PF structure
1814  *
1815  * Disable all currently configured clock outputs. This is necessary before
1816  * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
1817  * re-enable the clocks again.
1818  */
1819 static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
1820 {
1821 	uint i;
1822 
1823 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1824 		if (pf->ptp.perout_channels[i].ena)
1825 			ice_ptp_cfg_clkout(pf, i, NULL, false);
1826 }
1827 
1828 /**
1829  * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
1830  * @pf: pointer to the PF structure
1831  *
1832  * Enable all currently configured clock outputs. Use this after
1833  * ice_ptp_disable_all_clkout to reconfigure the output signals according to
1834  * their configuration.
1835  */
1836 static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
1837 {
1838 	uint i;
1839 
1840 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1841 		if (pf->ptp.perout_channels[i].ena)
1842 			ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
1843 					   false);
1844 }
1845 
1846 /**
1847  * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
1848  * @info: the driver's PTP info structure
1849  * @rq: The requested feature to change
1850  * @on: Enable/disable flag
1851  */
1852 static int
1853 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
1854 			 struct ptp_clock_request *rq, int on)
1855 {
1856 	struct ice_pf *pf = ptp_info_to_pf(info);
1857 	struct ice_perout_channel clk_cfg = {0};
1858 	bool sma_pres = false;
1859 	unsigned int chan;
1860 	u32 gpio_pin;
1861 	int err;
1862 
1863 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
1864 		sma_pres = true;
1865 
1866 	switch (rq->type) {
1867 	case PTP_CLK_REQ_PEROUT:
1868 		chan = rq->perout.index;
1869 		if (sma_pres) {
1870 			if (chan == ice_pin_desc_e810t[SMA1].chan)
1871 				clk_cfg.gpio_pin = GPIO_20;
1872 			else if (chan == ice_pin_desc_e810t[SMA2].chan)
1873 				clk_cfg.gpio_pin = GPIO_22;
1874 			else
1875 				return -1;
1876 		} else if (ice_is_e810t(&pf->hw)) {
1877 			if (chan == 0)
1878 				clk_cfg.gpio_pin = GPIO_20;
1879 			else
1880 				clk_cfg.gpio_pin = GPIO_22;
1881 		} else if (chan == PPS_CLK_GEN_CHAN) {
1882 			clk_cfg.gpio_pin = PPS_PIN_INDEX;
1883 		} else {
1884 			clk_cfg.gpio_pin = chan;
1885 		}
1886 
1887 		clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1888 				   rq->perout.period.nsec);
1889 		clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1890 				       rq->perout.start.nsec);
1891 		clk_cfg.ena = !!on;
1892 
1893 		err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1894 		break;
1895 	case PTP_CLK_REQ_EXTTS:
1896 		chan = rq->extts.index;
1897 		if (sma_pres) {
1898 			if (chan < ice_pin_desc_e810t[SMA2].chan)
1899 				gpio_pin = GPIO_21;
1900 			else
1901 				gpio_pin = GPIO_23;
1902 		} else if (ice_is_e810t(&pf->hw)) {
1903 			if (chan == 0)
1904 				gpio_pin = GPIO_21;
1905 			else
1906 				gpio_pin = GPIO_23;
1907 		} else {
1908 			gpio_pin = chan;
1909 		}
1910 
1911 		err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
1912 					rq->extts.flags);
1913 		break;
1914 	default:
1915 		return -EOPNOTSUPP;
1916 	}
1917 
1918 	return err;
1919 }
1920 
1921 /**
1922  * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
1923  * @info: the driver's PTP info structure
1924  * @rq: The requested feature to change
1925  * @on: Enable/disable flag
1926  */
1927 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
1928 				    struct ptp_clock_request *rq, int on)
1929 {
1930 	struct ice_pf *pf = ptp_info_to_pf(info);
1931 	struct ice_perout_channel clk_cfg = {0};
1932 	int err;
1933 
1934 	switch (rq->type) {
1935 	case PTP_CLK_REQ_PPS:
1936 		clk_cfg.gpio_pin = PPS_PIN_INDEX;
1937 		clk_cfg.period = NSEC_PER_SEC;
1938 		clk_cfg.ena = !!on;
1939 
1940 		err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
1941 		break;
1942 	case PTP_CLK_REQ_EXTTS:
1943 		err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
1944 					TIME_SYNC_PIN_INDEX, rq->extts.flags);
1945 		break;
1946 	default:
1947 		return -EOPNOTSUPP;
1948 	}
1949 
1950 	return err;
1951 }
1952 
1953 /**
1954  * ice_ptp_gettimex64 - Get the time of the clock
1955  * @info: the driver's PTP info structure
1956  * @ts: timespec64 structure to hold the current time value
1957  * @sts: Optional parameter for holding a pair of system timestamps from
1958  *       the system clock. Will be ignored if NULL is given.
1959  *
1960  * Read the device clock and return the correct value on ns, after converting it
1961  * into a timespec struct.
1962  */
1963 static int
1964 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1965 		   struct ptp_system_timestamp *sts)
1966 {
1967 	struct ice_pf *pf = ptp_info_to_pf(info);
1968 	u64 time_ns;
1969 
1970 	time_ns = ice_ptp_read_src_clk_reg(pf, sts);
1971 	*ts = ns_to_timespec64(time_ns);
1972 	return 0;
1973 }
1974 
1975 /**
1976  * ice_ptp_settime64 - Set the time of the clock
1977  * @info: the driver's PTP info structure
1978  * @ts: timespec64 structure that holds the new time value
1979  *
1980  * Set the device clock to the user input value. The conversion from timespec
1981  * to ns happens in the write function.
1982  */
1983 static int
1984 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1985 {
1986 	struct ice_pf *pf = ptp_info_to_pf(info);
1987 	struct timespec64 ts64 = *ts;
1988 	struct ice_hw *hw = &pf->hw;
1989 	int err;
1990 
1991 	/* For Vernier mode on E82X, we need to recalibrate after new settime.
1992 	 * Start with marking timestamps as invalid.
1993 	 */
1994 	if (hw->ptp.phy_model == ICE_PHY_E82X) {
1995 		err = ice_ptp_clear_phy_offset_ready_e82x(hw);
1996 		if (err)
1997 			dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
1998 	}
1999 
2000 	if (!ice_ptp_lock(hw)) {
2001 		err = -EBUSY;
2002 		goto exit;
2003 	}
2004 
2005 	/* Disable periodic outputs */
2006 	ice_ptp_disable_all_clkout(pf);
2007 
2008 	err = ice_ptp_write_init(pf, &ts64);
2009 	ice_ptp_unlock(hw);
2010 
2011 	if (!err)
2012 		ice_ptp_reset_cached_phctime(pf);
2013 
2014 	/* Reenable periodic outputs */
2015 	ice_ptp_enable_all_clkout(pf);
2016 
2017 	/* Recalibrate and re-enable timestamp blocks for E822/E823 */
2018 	if (hw->ptp.phy_model == ICE_PHY_E82X)
2019 		ice_ptp_restart_all_phy(pf);
2020 exit:
2021 	if (err) {
2022 		dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
2023 		return err;
2024 	}
2025 
2026 	return 0;
2027 }
2028 
2029 /**
2030  * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
2031  * @info: the driver's PTP info structure
2032  * @delta: Offset in nanoseconds to adjust the time by
2033  */
2034 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
2035 {
2036 	struct timespec64 now, then;
2037 	int ret;
2038 
2039 	then = ns_to_timespec64(delta);
2040 	ret = ice_ptp_gettimex64(info, &now, NULL);
2041 	if (ret)
2042 		return ret;
2043 	now = timespec64_add(now, then);
2044 
2045 	return ice_ptp_settime64(info, (const struct timespec64 *)&now);
2046 }
2047 
2048 /**
2049  * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
2050  * @info: the driver's PTP info structure
2051  * @delta: Offset in nanoseconds to adjust the time by
2052  */
2053 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
2054 {
2055 	struct ice_pf *pf = ptp_info_to_pf(info);
2056 	struct ice_hw *hw = &pf->hw;
2057 	struct device *dev;
2058 	int err;
2059 
2060 	dev = ice_pf_to_dev(pf);
2061 
2062 	/* Hardware only supports atomic adjustments using signed 32-bit
2063 	 * integers. For any adjustment outside this range, perform
2064 	 * a non-atomic get->adjust->set flow.
2065 	 */
2066 	if (delta > S32_MAX || delta < S32_MIN) {
2067 		dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
2068 		return ice_ptp_adjtime_nonatomic(info, delta);
2069 	}
2070 
2071 	if (!ice_ptp_lock(hw)) {
2072 		dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
2073 		return -EBUSY;
2074 	}
2075 
2076 	/* Disable periodic outputs */
2077 	ice_ptp_disable_all_clkout(pf);
2078 
2079 	err = ice_ptp_write_adj(pf, delta);
2080 
2081 	/* Reenable periodic outputs */
2082 	ice_ptp_enable_all_clkout(pf);
2083 
2084 	ice_ptp_unlock(hw);
2085 
2086 	if (err) {
2087 		dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2088 		return err;
2089 	}
2090 
2091 	ice_ptp_reset_cached_phctime(pf);
2092 
2093 	return 0;
2094 }
2095 
2096 #ifdef CONFIG_ICE_HWTS
2097 /**
2098  * ice_ptp_get_syncdevicetime - Get the cross time stamp info
2099  * @device: Current device time
2100  * @system: System counter value read synchronously with device time
2101  * @ctx: Context provided by timekeeping code
2102  *
2103  * Read device and system (ART) clock simultaneously and return the corrected
2104  * clock values in ns.
2105  */
2106 static int
2107 ice_ptp_get_syncdevicetime(ktime_t *device,
2108 			   struct system_counterval_t *system,
2109 			   void *ctx)
2110 {
2111 	struct ice_pf *pf = (struct ice_pf *)ctx;
2112 	struct ice_hw *hw = &pf->hw;
2113 	u32 hh_lock, hh_art_ctl;
2114 	int i;
2115 
2116 #define MAX_HH_HW_LOCK_TRIES	5
2117 #define MAX_HH_CTL_LOCK_TRIES	100
2118 
2119 	for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
2120 		/* Get the HW lock */
2121 		hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2122 		if (hh_lock & PFHH_SEM_BUSY_M) {
2123 			usleep_range(10000, 15000);
2124 			continue;
2125 		}
2126 		break;
2127 	}
2128 	if (hh_lock & PFHH_SEM_BUSY_M) {
2129 		dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
2130 		return -EBUSY;
2131 	}
2132 
2133 	/* Program cmd to master timer */
2134 	ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2135 
2136 	/* Start the ART and device clock sync sequence */
2137 	hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2138 	hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
2139 	wr32(hw, GLHH_ART_CTL, hh_art_ctl);
2140 
2141 	for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
2142 		/* Wait for sync to complete */
2143 		hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2144 		if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
2145 			udelay(1);
2146 			continue;
2147 		} else {
2148 			u32 hh_ts_lo, hh_ts_hi, tmr_idx;
2149 			u64 hh_ts;
2150 
2151 			tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2152 			/* Read ART time */
2153 			hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
2154 			hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
2155 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2156 			*system = convert_art_ns_to_tsc(hh_ts);
2157 			/* Read Device source clock time */
2158 			hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
2159 			hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
2160 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2161 			*device = ns_to_ktime(hh_ts);
2162 			break;
2163 		}
2164 	}
2165 
2166 	/* Clear the master timer */
2167 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2168 
2169 	/* Release HW lock */
2170 	hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2171 	hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
2172 	wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
2173 
2174 	if (i == MAX_HH_CTL_LOCK_TRIES)
2175 		return -ETIMEDOUT;
2176 
2177 	return 0;
2178 }
2179 
2180 /**
2181  * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
2182  * @info: the driver's PTP info structure
2183  * @cts: The memory to fill the cross timestamp info
2184  *
2185  * Capture a cross timestamp between the ART and the device PTP hardware
2186  * clock. Fill the cross timestamp information and report it back to the
2187  * caller.
2188  *
2189  * This is only valid for E822 and E823 devices which have support for
2190  * generating the cross timestamp via PCIe PTM.
2191  *
2192  * In order to correctly correlate the ART timestamp back to the TSC time, the
2193  * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2194  */
2195 static int
2196 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
2197 			    struct system_device_crosststamp *cts)
2198 {
2199 	struct ice_pf *pf = ptp_info_to_pf(info);
2200 
2201 	return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
2202 					     pf, NULL, cts);
2203 }
2204 #endif /* CONFIG_ICE_HWTS */
2205 
2206 /**
2207  * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2208  * @pf: Board private structure
2209  * @ifr: ioctl data
2210  *
2211  * Copy the timestamping config to user buffer
2212  */
2213 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2214 {
2215 	struct hwtstamp_config *config;
2216 
2217 	if (pf->ptp.state != ICE_PTP_READY)
2218 		return -EIO;
2219 
2220 	config = &pf->ptp.tstamp_config;
2221 
2222 	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2223 		-EFAULT : 0;
2224 }
2225 
2226 /**
2227  * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2228  * @pf: Board private structure
2229  * @config: hwtstamp settings requested or saved
2230  */
2231 static int
2232 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2233 {
2234 	switch (config->tx_type) {
2235 	case HWTSTAMP_TX_OFF:
2236 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2237 		break;
2238 	case HWTSTAMP_TX_ON:
2239 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2240 		break;
2241 	default:
2242 		return -ERANGE;
2243 	}
2244 
2245 	switch (config->rx_filter) {
2246 	case HWTSTAMP_FILTER_NONE:
2247 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2248 		break;
2249 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2250 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2251 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2252 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2253 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2254 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2255 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2256 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2257 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2258 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2259 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2260 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2261 	case HWTSTAMP_FILTER_NTP_ALL:
2262 	case HWTSTAMP_FILTER_ALL:
2263 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2264 		break;
2265 	default:
2266 		return -ERANGE;
2267 	}
2268 
2269 	/* Immediately update the device timestamping mode */
2270 	ice_ptp_restore_timestamp_mode(pf);
2271 
2272 	return 0;
2273 }
2274 
2275 /**
2276  * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2277  * @pf: Board private structure
2278  * @ifr: ioctl data
2279  *
2280  * Get the user config and store it
2281  */
2282 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2283 {
2284 	struct hwtstamp_config config;
2285 	int err;
2286 
2287 	if (pf->ptp.state != ICE_PTP_READY)
2288 		return -EAGAIN;
2289 
2290 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2291 		return -EFAULT;
2292 
2293 	err = ice_ptp_set_timestamp_mode(pf, &config);
2294 	if (err)
2295 		return err;
2296 
2297 	/* Return the actual configuration set */
2298 	config = pf->ptp.tstamp_config;
2299 
2300 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2301 		-EFAULT : 0;
2302 }
2303 
2304 /**
2305  * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2306  * @rx_desc: Receive descriptor
2307  * @pkt_ctx: Packet context to get the cached time
2308  *
2309  * The driver receives a notification in the receive descriptor with timestamp.
2310  */
2311 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2312 			const struct ice_pkt_ctx *pkt_ctx)
2313 {
2314 	u64 ts_ns, cached_time;
2315 	u32 ts_high;
2316 
2317 	if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2318 		return 0;
2319 
2320 	cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2321 
2322 	/* Do not report a timestamp if we don't have a cached PHC time */
2323 	if (!cached_time)
2324 		return 0;
2325 
2326 	/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2327 	 * PHC value, rather than accessing the PF. This also allows us to
2328 	 * simply pass the upper 32bits of nanoseconds directly. Calling
2329 	 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2330 	 * bits itself.
2331 	 */
2332 	ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2333 	ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2334 
2335 	return ts_ns;
2336 }
2337 
2338 /**
2339  * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
2340  * @pf: pointer to the PF structure
2341  * @info: PTP clock info structure
2342  *
2343  * Disable the OS access to the SMA pins. Called to clear out the OS
2344  * indications of pin support when we fail to setup the E810-T SMA control
2345  * register.
2346  */
2347 static void
2348 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2349 {
2350 	struct device *dev = ice_pf_to_dev(pf);
2351 
2352 	dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
2353 
2354 	info->enable = NULL;
2355 	info->verify = NULL;
2356 	info->n_pins = 0;
2357 	info->n_ext_ts = 0;
2358 	info->n_per_out = 0;
2359 }
2360 
2361 /**
2362  * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
2363  * @pf: pointer to the PF structure
2364  * @info: PTP clock info structure
2365  *
2366  * Finish setting up the SMA pins by allocating pin_config, and setting it up
2367  * according to the current status of the SMA. On failure, disable all of the
2368  * extended SMA pin support.
2369  */
2370 static void
2371 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2372 {
2373 	struct device *dev = ice_pf_to_dev(pf);
2374 	int err;
2375 
2376 	/* Allocate memory for kernel pins interface */
2377 	info->pin_config = devm_kcalloc(dev, info->n_pins,
2378 					sizeof(*info->pin_config), GFP_KERNEL);
2379 	if (!info->pin_config) {
2380 		ice_ptp_disable_sma_pins_e810t(pf, info);
2381 		return;
2382 	}
2383 
2384 	/* Read current SMA status */
2385 	err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
2386 	if (err)
2387 		ice_ptp_disable_sma_pins_e810t(pf, info);
2388 }
2389 
2390 /**
2391  * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
2392  * @pf: pointer to the PF instance
2393  * @info: PTP clock capabilities
2394  */
2395 static void
2396 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2397 {
2398 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2399 		info->n_ext_ts = N_EXT_TS_E810;
2400 		info->n_per_out = N_PER_OUT_E810T;
2401 		info->n_pins = NUM_PTP_PINS_E810T;
2402 		info->verify = ice_verify_pin_e810t;
2403 
2404 		/* Complete setup of the SMA pins */
2405 		ice_ptp_setup_sma_pins_e810t(pf, info);
2406 	} else if (ice_is_e810t(&pf->hw)) {
2407 		info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
2408 		info->n_per_out = N_PER_OUT_NO_SMA_E810T;
2409 	} else {
2410 		info->n_per_out = N_PER_OUT_E810;
2411 		info->n_ext_ts = N_EXT_TS_E810;
2412 	}
2413 }
2414 
2415 /**
2416  * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
2417  * @pf: pointer to the PF instance
2418  * @info: PTP clock capabilities
2419  */
2420 static void
2421 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2422 {
2423 	info->pps = 1;
2424 	info->n_per_out = 0;
2425 	info->n_ext_ts = 1;
2426 }
2427 
2428 /**
2429  * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
2430  * @pf: Board private structure
2431  * @info: PTP info to fill
2432  *
2433  * Assign functions to the PTP capabiltiies structure for E82x devices.
2434  * Functions which operate across all device families should be set directly
2435  * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
2436  * devices.
2437  */
2438 static void
2439 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
2440 {
2441 #ifdef CONFIG_ICE_HWTS
2442 	if (boot_cpu_has(X86_FEATURE_ART) &&
2443 	    boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
2444 		info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
2445 #endif /* CONFIG_ICE_HWTS */
2446 }
2447 
2448 /**
2449  * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2450  * @pf: Board private structure
2451  * @info: PTP info to fill
2452  *
2453  * Assign functions to the PTP capabiltiies structure for E810 devices.
2454  * Functions which operate across all device families should be set directly
2455  * in ice_ptp_set_caps. Only add functions here which are distinct for e810
2456  * devices.
2457  */
2458 static void
2459 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2460 {
2461 	info->enable = ice_ptp_gpio_enable_e810;
2462 	ice_ptp_setup_pins_e810(pf, info);
2463 }
2464 
2465 /**
2466  * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
2467  * @pf: Board private structure
2468  * @info: PTP info to fill
2469  *
2470  * Assign functions to the PTP capabiltiies structure for E823 devices.
2471  * Functions which operate across all device families should be set directly
2472  * in ice_ptp_set_caps. Only add functions here which are distinct for e823
2473  * devices.
2474  */
2475 static void
2476 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2477 {
2478 	ice_ptp_set_funcs_e82x(pf, info);
2479 
2480 	info->enable = ice_ptp_gpio_enable_e823;
2481 	ice_ptp_setup_pins_e823(pf, info);
2482 }
2483 
2484 /**
2485  * ice_ptp_set_caps - Set PTP capabilities
2486  * @pf: Board private structure
2487  */
2488 static void ice_ptp_set_caps(struct ice_pf *pf)
2489 {
2490 	struct ptp_clock_info *info = &pf->ptp.info;
2491 	struct device *dev = ice_pf_to_dev(pf);
2492 
2493 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2494 		 dev_driver_string(dev), dev_name(dev));
2495 	info->owner = THIS_MODULE;
2496 	info->max_adj = 100000000;
2497 	info->adjtime = ice_ptp_adjtime;
2498 	info->adjfine = ice_ptp_adjfine;
2499 	info->gettimex64 = ice_ptp_gettimex64;
2500 	info->settime64 = ice_ptp_settime64;
2501 
2502 	if (ice_is_e810(&pf->hw))
2503 		ice_ptp_set_funcs_e810(pf, info);
2504 	else if (ice_is_e823(&pf->hw))
2505 		ice_ptp_set_funcs_e823(pf, info);
2506 	else
2507 		ice_ptp_set_funcs_e82x(pf, info);
2508 }
2509 
2510 /**
2511  * ice_ptp_create_clock - Create PTP clock device for userspace
2512  * @pf: Board private structure
2513  *
2514  * This function creates a new PTP clock device. It only creates one if we
2515  * don't already have one. Will return error if it can't create one, but success
2516  * if we already have a device. Should be used by ice_ptp_init to create clock
2517  * initially, and prevent global resets from creating new clock devices.
2518  */
2519 static long ice_ptp_create_clock(struct ice_pf *pf)
2520 {
2521 	struct ptp_clock_info *info;
2522 	struct device *dev;
2523 
2524 	/* No need to create a clock device if we already have one */
2525 	if (pf->ptp.clock)
2526 		return 0;
2527 
2528 	ice_ptp_set_caps(pf);
2529 
2530 	info = &pf->ptp.info;
2531 	dev = ice_pf_to_dev(pf);
2532 
2533 	/* Attempt to register the clock before enabling the hardware. */
2534 	pf->ptp.clock = ptp_clock_register(info, dev);
2535 	if (IS_ERR(pf->ptp.clock)) {
2536 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2537 		return PTR_ERR(pf->ptp.clock);
2538 	}
2539 
2540 	return 0;
2541 }
2542 
2543 /**
2544  * ice_ptp_request_ts - Request an available Tx timestamp index
2545  * @tx: the PTP Tx timestamp tracker to request from
2546  * @skb: the SKB to associate with this timestamp request
2547  */
2548 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2549 {
2550 	unsigned long flags;
2551 	u8 idx;
2552 
2553 	spin_lock_irqsave(&tx->lock, flags);
2554 
2555 	/* Check that this tracker is accepting new timestamp requests */
2556 	if (!ice_ptp_is_tx_tracker_up(tx)) {
2557 		spin_unlock_irqrestore(&tx->lock, flags);
2558 		return -1;
2559 	}
2560 
2561 	/* Find and set the first available index */
2562 	idx = find_next_zero_bit(tx->in_use, tx->len,
2563 				 tx->last_ll_ts_idx_read + 1);
2564 	if (idx == tx->len)
2565 		idx = find_first_zero_bit(tx->in_use, tx->len);
2566 
2567 	if (idx < tx->len) {
2568 		/* We got a valid index that no other thread could have set. Store
2569 		 * a reference to the skb and the start time to allow discarding old
2570 		 * requests.
2571 		 */
2572 		set_bit(idx, tx->in_use);
2573 		clear_bit(idx, tx->stale);
2574 		tx->tstamps[idx].start = jiffies;
2575 		tx->tstamps[idx].skb = skb_get(skb);
2576 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2577 		ice_trace(tx_tstamp_request, skb, idx);
2578 	}
2579 
2580 	spin_unlock_irqrestore(&tx->lock, flags);
2581 
2582 	/* return the appropriate PHY timestamp register index, -1 if no
2583 	 * indexes were available.
2584 	 */
2585 	if (idx >= tx->len)
2586 		return -1;
2587 	else
2588 		return idx + tx->offset;
2589 }
2590 
2591 /**
2592  * ice_ptp_process_ts - Process the PTP Tx timestamps
2593  * @pf: Board private structure
2594  *
2595  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2596  * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2597  */
2598 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2599 {
2600 	switch (pf->ptp.tx_interrupt_mode) {
2601 	case ICE_PTP_TX_INTERRUPT_NONE:
2602 		/* This device has the clock owner handle timestamps for it */
2603 		return ICE_TX_TSTAMP_WORK_DONE;
2604 	case ICE_PTP_TX_INTERRUPT_SELF:
2605 		/* This device handles its own timestamps */
2606 		return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2607 	case ICE_PTP_TX_INTERRUPT_ALL:
2608 		/* This device handles timestamps for all ports */
2609 		return ice_ptp_tx_tstamp_owner(pf);
2610 	default:
2611 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2612 			  pf->ptp.tx_interrupt_mode);
2613 		return ICE_TX_TSTAMP_WORK_DONE;
2614 	}
2615 }
2616 
2617 /**
2618  * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2619  * @pf: Board private structure
2620  *
2621  * The device PHY issues Tx timestamp interrupts to the driver for processing
2622  * timestamp data from the PHY. It will not interrupt again until all
2623  * current timestamp data is read. In rare circumstances, it is possible that
2624  * the driver fails to read all outstanding data.
2625  *
2626  * To avoid getting permanently stuck, periodically check if the PHY has
2627  * outstanding timestamp data. If so, trigger an interrupt from software to
2628  * process this data.
2629  */
2630 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2631 {
2632 	struct device *dev = ice_pf_to_dev(pf);
2633 	struct ice_hw *hw = &pf->hw;
2634 	bool trigger_oicr = false;
2635 	unsigned int i;
2636 
2637 	if (ice_is_e810(hw))
2638 		return;
2639 
2640 	if (!ice_pf_src_tmr_owned(pf))
2641 		return;
2642 
2643 	for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2644 		u64 tstamp_ready;
2645 		int err;
2646 
2647 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2648 		if (!err && tstamp_ready) {
2649 			trigger_oicr = true;
2650 			break;
2651 		}
2652 	}
2653 
2654 	if (trigger_oicr) {
2655 		/* Trigger a software interrupt, to ensure this data
2656 		 * gets processed.
2657 		 */
2658 		dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2659 
2660 		wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2661 		ice_flush(hw);
2662 	}
2663 }
2664 
2665 static void ice_ptp_periodic_work(struct kthread_work *work)
2666 {
2667 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2668 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2669 	int err;
2670 
2671 	if (pf->ptp.state != ICE_PTP_READY)
2672 		return;
2673 
2674 	err = ice_ptp_update_cached_phctime(pf);
2675 
2676 	ice_ptp_maybe_trigger_tx_interrupt(pf);
2677 
2678 	/* Run twice a second or reschedule if phc update failed */
2679 	kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2680 				   msecs_to_jiffies(err ? 10 : 500));
2681 }
2682 
2683 /**
2684  * ice_ptp_prepare_for_reset - Prepare PTP for reset
2685  * @pf: Board private structure
2686  * @reset_type: the reset type being performed
2687  */
2688 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2689 {
2690 	struct ice_ptp *ptp = &pf->ptp;
2691 	u8 src_tmr;
2692 
2693 	if (ptp->state != ICE_PTP_READY)
2694 		return;
2695 
2696 	ptp->state = ICE_PTP_RESETTING;
2697 
2698 	/* Disable timestamping for both Tx and Rx */
2699 	ice_ptp_disable_timestamp_mode(pf);
2700 
2701 	kthread_cancel_delayed_work_sync(&ptp->work);
2702 
2703 	if (reset_type == ICE_RESET_PFR)
2704 		return;
2705 
2706 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2707 
2708 	/* Disable periodic outputs */
2709 	ice_ptp_disable_all_clkout(pf);
2710 
2711 	src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2712 
2713 	/* Disable source clock */
2714 	wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2715 
2716 	/* Acquire PHC and system timer to restore after reset */
2717 	ptp->reset_time = ktime_get_real_ns();
2718 }
2719 
2720 /**
2721  * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2722  * @pf: Board private structure
2723  *
2724  * Companion function for ice_ptp_rebuild() which handles tasks that only the
2725  * PTP clock owner instance should perform.
2726  */
2727 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2728 {
2729 	struct ice_ptp *ptp = &pf->ptp;
2730 	struct ice_hw *hw = &pf->hw;
2731 	struct timespec64 ts;
2732 	u64 time_diff;
2733 	int err;
2734 
2735 	err = ice_ptp_init_phc(hw);
2736 	if (err)
2737 		return err;
2738 
2739 	/* Acquire the global hardware lock */
2740 	if (!ice_ptp_lock(hw)) {
2741 		err = -EBUSY;
2742 		return err;
2743 	}
2744 
2745 	/* Write the increment time value to PHY and LAN */
2746 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2747 	if (err) {
2748 		ice_ptp_unlock(hw);
2749 		return err;
2750 	}
2751 
2752 	/* Write the initial Time value to PHY and LAN using the cached PHC
2753 	 * time before the reset and time difference between stopping and
2754 	 * starting the clock.
2755 	 */
2756 	if (ptp->cached_phc_time) {
2757 		time_diff = ktime_get_real_ns() - ptp->reset_time;
2758 		ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2759 	} else {
2760 		ts = ktime_to_timespec64(ktime_get_real());
2761 	}
2762 	err = ice_ptp_write_init(pf, &ts);
2763 	if (err) {
2764 		ice_ptp_unlock(hw);
2765 		return err;
2766 	}
2767 
2768 	/* Release the global hardware lock */
2769 	ice_ptp_unlock(hw);
2770 
2771 	/* Flush software tracking of any outstanding timestamps since we're
2772 	 * about to flush the PHY timestamp block.
2773 	 */
2774 	ice_ptp_flush_all_tx_tracker(pf);
2775 
2776 	if (!ice_is_e810(hw)) {
2777 		/* Enable quad interrupts */
2778 		err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
2779 		if (err)
2780 			return err;
2781 
2782 		ice_ptp_restart_all_phy(pf);
2783 	}
2784 
2785 	return 0;
2786 }
2787 
2788 /**
2789  * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
2790  * @pf: Board private structure
2791  * @reset_type: the reset type being performed
2792  */
2793 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
2794 {
2795 	struct ice_ptp *ptp = &pf->ptp;
2796 	int err;
2797 
2798 	if (ptp->state == ICE_PTP_READY) {
2799 		ice_ptp_prepare_for_reset(pf, reset_type);
2800 	} else if (ptp->state != ICE_PTP_RESETTING) {
2801 		err = -EINVAL;
2802 		dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
2803 		goto err;
2804 	}
2805 
2806 	if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
2807 		err = ice_ptp_rebuild_owner(pf);
2808 		if (err)
2809 			goto err;
2810 	}
2811 
2812 	ptp->state = ICE_PTP_READY;
2813 
2814 	/* Start periodic work going */
2815 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2816 
2817 	dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2818 	return;
2819 
2820 err:
2821 	ptp->state = ICE_PTP_ERROR;
2822 	dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2823 }
2824 
2825 /**
2826  * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
2827  * @aux_dev: auxiliary device to get the auxiliary PF for
2828  */
2829 static struct ice_pf *
2830 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
2831 {
2832 	struct ice_ptp_port *aux_port;
2833 	struct ice_ptp *aux_ptp;
2834 
2835 	aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
2836 	aux_ptp = container_of(aux_port, struct ice_ptp, port);
2837 
2838 	return container_of(aux_ptp, struct ice_pf, ptp);
2839 }
2840 
2841 /**
2842  * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
2843  * @aux_dev: auxiliary device to get the PF for
2844  */
2845 static struct ice_pf *
2846 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
2847 {
2848 	struct ice_ptp_port_owner *ports_owner;
2849 	struct auxiliary_driver *aux_drv;
2850 	struct ice_ptp *owner_ptp;
2851 
2852 	if (!aux_dev->dev.driver)
2853 		return NULL;
2854 
2855 	aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
2856 	ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
2857 				   aux_driver);
2858 	owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
2859 	return container_of(owner_ptp, struct ice_pf, ptp);
2860 }
2861 
2862 /**
2863  * ice_ptp_auxbus_probe - Probe auxiliary devices
2864  * @aux_dev: PF's auxiliary device
2865  * @id: Auxiliary device ID
2866  */
2867 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
2868 				const struct auxiliary_device_id *id)
2869 {
2870 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2871 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2872 
2873 	if (WARN_ON(!owner_pf))
2874 		return -ENODEV;
2875 
2876 	INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
2877 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
2878 	list_add(&aux_pf->ptp.port.list_member,
2879 		 &owner_pf->ptp.ports_owner.ports);
2880 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2881 
2882 	return 0;
2883 }
2884 
2885 /**
2886  * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
2887  * @aux_dev: PF's auxiliary device
2888  */
2889 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
2890 {
2891 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2892 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2893 
2894 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
2895 	list_del(&aux_pf->ptp.port.list_member);
2896 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2897 }
2898 
2899 /**
2900  * ice_ptp_auxbus_shutdown
2901  * @aux_dev: PF's auxiliary device
2902  */
2903 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
2904 {
2905 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2906 }
2907 
2908 /**
2909  * ice_ptp_auxbus_suspend
2910  * @aux_dev: PF's auxiliary device
2911  * @state: power management state indicator
2912  */
2913 static int
2914 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
2915 {
2916 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2917 	return 0;
2918 }
2919 
2920 /**
2921  * ice_ptp_auxbus_resume
2922  * @aux_dev: PF's auxiliary device
2923  */
2924 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
2925 {
2926 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2927 	return 0;
2928 }
2929 
2930 /**
2931  * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
2932  * @pf: Board private structure
2933  * @name: auxiliary bus driver name
2934  */
2935 static struct auxiliary_device_id *
2936 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
2937 {
2938 	struct auxiliary_device_id *ids;
2939 
2940 	/* Second id left empty to terminate the array */
2941 	ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
2942 			   sizeof(struct auxiliary_device_id), GFP_KERNEL);
2943 	if (!ids)
2944 		return NULL;
2945 
2946 	snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
2947 
2948 	return ids;
2949 }
2950 
2951 /**
2952  * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
2953  * @pf: Board private structure
2954  */
2955 static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
2956 {
2957 	struct auxiliary_driver *aux_driver;
2958 	struct ice_ptp *ptp;
2959 	struct device *dev;
2960 	char *name;
2961 	int err;
2962 
2963 	ptp = &pf->ptp;
2964 	dev = ice_pf_to_dev(pf);
2965 	aux_driver = &ptp->ports_owner.aux_driver;
2966 	INIT_LIST_HEAD(&ptp->ports_owner.ports);
2967 	mutex_init(&ptp->ports_owner.lock);
2968 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
2969 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
2970 			      ice_get_ptp_src_clock_index(&pf->hw));
2971 	if (!name)
2972 		return -ENOMEM;
2973 
2974 	aux_driver->name = name;
2975 	aux_driver->shutdown = ice_ptp_auxbus_shutdown;
2976 	aux_driver->suspend = ice_ptp_auxbus_suspend;
2977 	aux_driver->remove = ice_ptp_auxbus_remove;
2978 	aux_driver->resume = ice_ptp_auxbus_resume;
2979 	aux_driver->probe = ice_ptp_auxbus_probe;
2980 	aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
2981 	if (!aux_driver->id_table)
2982 		return -ENOMEM;
2983 
2984 	err = auxiliary_driver_register(aux_driver);
2985 	if (err) {
2986 		devm_kfree(dev, aux_driver->id_table);
2987 		dev_err(dev, "Failed registering aux_driver, name <%s>\n",
2988 			name);
2989 	}
2990 
2991 	return err;
2992 }
2993 
2994 /**
2995  * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
2996  * @pf: Board private structure
2997  */
2998 static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
2999 {
3000 	struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
3001 
3002 	auxiliary_driver_unregister(aux_driver);
3003 	devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
3004 
3005 	mutex_destroy(&pf->ptp.ports_owner.lock);
3006 }
3007 
3008 /**
3009  * ice_ptp_clock_index - Get the PTP clock index for this device
3010  * @pf: Board private structure
3011  *
3012  * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3013  * is associated.
3014  */
3015 int ice_ptp_clock_index(struct ice_pf *pf)
3016 {
3017 	struct auxiliary_device *aux_dev;
3018 	struct ice_pf *owner_pf;
3019 	struct ptp_clock *clock;
3020 
3021 	aux_dev = &pf->ptp.port.aux_dev;
3022 	owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
3023 	if (!owner_pf)
3024 		return -1;
3025 	clock = owner_pf->ptp.clock;
3026 
3027 	return clock ? ptp_clock_index(clock) : -1;
3028 }
3029 
3030 /**
3031  * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3032  * @pf: Board private structure
3033  *
3034  * Setup and initialize a PTP clock device that represents the device hardware
3035  * clock. Save the clock index for other functions connected to the same
3036  * hardware resource.
3037  */
3038 static int ice_ptp_init_owner(struct ice_pf *pf)
3039 {
3040 	struct ice_hw *hw = &pf->hw;
3041 	struct timespec64 ts;
3042 	int err;
3043 
3044 	err = ice_ptp_init_phc(hw);
3045 	if (err) {
3046 		dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3047 			err);
3048 		return err;
3049 	}
3050 
3051 	/* Acquire the global hardware lock */
3052 	if (!ice_ptp_lock(hw)) {
3053 		err = -EBUSY;
3054 		goto err_exit;
3055 	}
3056 
3057 	/* Write the increment time value to PHY and LAN */
3058 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3059 	if (err) {
3060 		ice_ptp_unlock(hw);
3061 		goto err_exit;
3062 	}
3063 
3064 	ts = ktime_to_timespec64(ktime_get_real());
3065 	/* Write the initial Time value to PHY and LAN */
3066 	err = ice_ptp_write_init(pf, &ts);
3067 	if (err) {
3068 		ice_ptp_unlock(hw);
3069 		goto err_exit;
3070 	}
3071 
3072 	/* Release the global hardware lock */
3073 	ice_ptp_unlock(hw);
3074 
3075 	/* Configure PHY interrupt settings */
3076 	err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3077 	if (err)
3078 		goto err_exit;
3079 
3080 	/* Ensure we have a clock device */
3081 	err = ice_ptp_create_clock(pf);
3082 	if (err)
3083 		goto err_clk;
3084 
3085 	err = ice_ptp_register_auxbus_driver(pf);
3086 	if (err) {
3087 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
3088 		goto err_aux;
3089 	}
3090 
3091 	return 0;
3092 err_aux:
3093 	ptp_clock_unregister(pf->ptp.clock);
3094 err_clk:
3095 	pf->ptp.clock = NULL;
3096 err_exit:
3097 	return err;
3098 }
3099 
3100 /**
3101  * ice_ptp_init_work - Initialize PTP work threads
3102  * @pf: Board private structure
3103  * @ptp: PF PTP structure
3104  */
3105 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3106 {
3107 	struct kthread_worker *kworker;
3108 
3109 	/* Initialize work functions */
3110 	kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3111 
3112 	/* Allocate a kworker for handling work required for the ports
3113 	 * connected to the PTP hardware clock.
3114 	 */
3115 	kworker = kthread_create_worker(0, "ice-ptp-%s",
3116 					dev_name(ice_pf_to_dev(pf)));
3117 	if (IS_ERR(kworker))
3118 		return PTR_ERR(kworker);
3119 
3120 	ptp->kworker = kworker;
3121 
3122 	/* Start periodic work going */
3123 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3124 
3125 	return 0;
3126 }
3127 
3128 /**
3129  * ice_ptp_init_port - Initialize PTP port structure
3130  * @pf: Board private structure
3131  * @ptp_port: PTP port structure
3132  */
3133 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3134 {
3135 	struct ice_hw *hw = &pf->hw;
3136 
3137 	mutex_init(&ptp_port->ps_lock);
3138 
3139 	switch (hw->ptp.phy_model) {
3140 	case ICE_PHY_ETH56G:
3141 		return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
3142 					      ptp_port->port_num);
3143 	case ICE_PHY_E810:
3144 		return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
3145 	case ICE_PHY_E82X:
3146 		kthread_init_delayed_work(&ptp_port->ov_work,
3147 					  ice_ptp_wait_for_offsets);
3148 
3149 		return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3150 					    ptp_port->port_num);
3151 	default:
3152 		return -ENODEV;
3153 	}
3154 }
3155 
3156 /**
3157  * ice_ptp_release_auxbus_device
3158  * @dev: device that utilizes the auxbus
3159  */
3160 static void ice_ptp_release_auxbus_device(struct device *dev)
3161 {
3162 	/* Doing nothing here, but handle to auxbux device must be satisfied */
3163 }
3164 
3165 /**
3166  * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
3167  * @pf: Board private structure
3168  */
3169 static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
3170 {
3171 	struct auxiliary_device *aux_dev;
3172 	struct ice_ptp *ptp;
3173 	struct device *dev;
3174 	char *name;
3175 	int err;
3176 	u32 id;
3177 
3178 	ptp = &pf->ptp;
3179 	id = ptp->port.port_num;
3180 	dev = ice_pf_to_dev(pf);
3181 
3182 	aux_dev = &ptp->port.aux_dev;
3183 
3184 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
3185 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
3186 			      ice_get_ptp_src_clock_index(&pf->hw));
3187 	if (!name)
3188 		return -ENOMEM;
3189 
3190 	aux_dev->name = name;
3191 	aux_dev->id = id;
3192 	aux_dev->dev.release = ice_ptp_release_auxbus_device;
3193 	aux_dev->dev.parent = dev;
3194 
3195 	err = auxiliary_device_init(aux_dev);
3196 	if (err)
3197 		goto aux_err;
3198 
3199 	err = auxiliary_device_add(aux_dev);
3200 	if (err) {
3201 		auxiliary_device_uninit(aux_dev);
3202 		goto aux_err;
3203 	}
3204 
3205 	return 0;
3206 aux_err:
3207 	dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
3208 	devm_kfree(dev, name);
3209 	return err;
3210 }
3211 
3212 /**
3213  * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
3214  * @pf: Board private structure
3215  */
3216 static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
3217 {
3218 	struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
3219 
3220 	auxiliary_device_delete(aux_dev);
3221 	auxiliary_device_uninit(aux_dev);
3222 
3223 	memset(aux_dev, 0, sizeof(*aux_dev));
3224 }
3225 
3226 /**
3227  * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3228  * @pf: Board private structure
3229  *
3230  * Initialize the Tx timestamp interrupt mode for this device. For most device
3231  * types, each PF processes the interrupt and manages its own timestamps. For
3232  * E822-based devices, only the clock owner processes the timestamps. Other
3233  * PFs disable the interrupt and do not process their own timestamps.
3234  */
3235 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3236 {
3237 	switch (pf->hw.ptp.phy_model) {
3238 	case ICE_PHY_E82X:
3239 		/* E822 based PHY has the clock owner process the interrupt
3240 		 * for all ports.
3241 		 */
3242 		if (ice_pf_src_tmr_owned(pf))
3243 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3244 		else
3245 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3246 		break;
3247 	default:
3248 		/* other PHY types handle their own Tx interrupt */
3249 		pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3250 	}
3251 }
3252 
3253 /**
3254  * ice_ptp_init - Initialize PTP hardware clock support
3255  * @pf: Board private structure
3256  *
3257  * Set up the device for interacting with the PTP hardware clock for all
3258  * functions, both the function that owns the clock hardware, and the
3259  * functions connected to the clock hardware.
3260  *
3261  * The clock owner will allocate and register a ptp_clock with the
3262  * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3263  * items used for asynchronous work such as Tx timestamps and periodic work.
3264  */
3265 void ice_ptp_init(struct ice_pf *pf)
3266 {
3267 	struct ice_ptp *ptp = &pf->ptp;
3268 	struct ice_hw *hw = &pf->hw;
3269 	int err;
3270 
3271 	ptp->state = ICE_PTP_INITIALIZING;
3272 
3273 	ice_ptp_init_hw(hw);
3274 
3275 	ice_ptp_init_tx_interrupt_mode(pf);
3276 
3277 	/* If this function owns the clock hardware, it must allocate and
3278 	 * configure the PTP clock device to represent it.
3279 	 */
3280 	if (ice_pf_src_tmr_owned(pf)) {
3281 		err = ice_ptp_init_owner(pf);
3282 		if (err)
3283 			goto err;
3284 	}
3285 
3286 	ptp->port.port_num = hw->pf_id;
3287 	if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
3288 		ptp->port.port_num = hw->pf_id * 2;
3289 
3290 	err = ice_ptp_init_port(pf, &ptp->port);
3291 	if (err)
3292 		goto err;
3293 
3294 	/* Start the PHY timestamping block */
3295 	ice_ptp_reset_phy_timestamping(pf);
3296 
3297 	/* Configure initial Tx interrupt settings */
3298 	ice_ptp_cfg_tx_interrupt(pf);
3299 
3300 	err = ice_ptp_create_auxbus_device(pf);
3301 	if (err)
3302 		goto err;
3303 
3304 	ptp->state = ICE_PTP_READY;
3305 
3306 	err = ice_ptp_init_work(pf, ptp);
3307 	if (err)
3308 		goto err;
3309 
3310 	dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3311 	return;
3312 
3313 err:
3314 	/* If we registered a PTP clock, release it */
3315 	if (pf->ptp.clock) {
3316 		ptp_clock_unregister(ptp->clock);
3317 		pf->ptp.clock = NULL;
3318 	}
3319 	ptp->state = ICE_PTP_ERROR;
3320 	dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3321 }
3322 
3323 /**
3324  * ice_ptp_release - Disable the driver/HW support and unregister the clock
3325  * @pf: Board private structure
3326  *
3327  * This function handles the cleanup work required from the initialization by
3328  * clearing out the important information and unregistering the clock
3329  */
3330 void ice_ptp_release(struct ice_pf *pf)
3331 {
3332 	if (pf->ptp.state != ICE_PTP_READY)
3333 		return;
3334 
3335 	pf->ptp.state = ICE_PTP_UNINIT;
3336 
3337 	/* Disable timestamping for both Tx and Rx */
3338 	ice_ptp_disable_timestamp_mode(pf);
3339 
3340 	ice_ptp_remove_auxbus_device(pf);
3341 
3342 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3343 
3344 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
3345 
3346 	ice_ptp_port_phy_stop(&pf->ptp.port);
3347 	mutex_destroy(&pf->ptp.port.ps_lock);
3348 	if (pf->ptp.kworker) {
3349 		kthread_destroy_worker(pf->ptp.kworker);
3350 		pf->ptp.kworker = NULL;
3351 	}
3352 
3353 	if (ice_pf_src_tmr_owned(pf))
3354 		ice_ptp_unregister_auxbus_driver(pf);
3355 
3356 	if (!pf->ptp.clock)
3357 		return;
3358 
3359 	/* Disable periodic outputs */
3360 	ice_ptp_disable_all_clkout(pf);
3361 
3362 	ptp_clock_unregister(pf->ptp.clock);
3363 	pf->ptp.clock = NULL;
3364 
3365 	dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3366 }
3367