xref: /linux/drivers/net/ethernet/intel/ice/ice_ptp.c (revision 67f9c312b0a7f4bc869376d2a68308e673235954)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 
8 #define E810_OUT_PROP_DELAY_NS 1
9 
10 #define UNKNOWN_INCVAL_E82X 0x100000000ULL
11 
12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
13 	/* name    idx   func         chan */
14 	{ "GNSS",  GNSS, PTP_PF_EXTTS, 0, { 0, } },
15 	{ "SMA1",  SMA1, PTP_PF_NONE, 1, { 0, } },
16 	{ "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
17 	{ "SMA2",  SMA2, PTP_PF_NONE, 2, { 0, } },
18 	{ "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
19 };
20 
21 /**
22  * ice_get_sma_config_e810t
23  * @hw: pointer to the hw struct
24  * @ptp_pins: pointer to the ptp_pin_desc struture
25  *
26  * Read the configuration of the SMA control logic and put it into the
27  * ptp_pin_desc structure
28  */
29 static int
30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
31 {
32 	u8 data, i;
33 	int status;
34 
35 	/* Read initial pin state */
36 	status = ice_read_sma_ctrl_e810t(hw, &data);
37 	if (status)
38 		return status;
39 
40 	/* initialize with defaults */
41 	for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
42 		strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
43 			sizeof(ptp_pins[i].name));
44 		ptp_pins[i].index = ice_pin_desc_e810t[i].index;
45 		ptp_pins[i].func = ice_pin_desc_e810t[i].func;
46 		ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
47 	}
48 
49 	/* Parse SMA1/UFL1 */
50 	switch (data & ICE_SMA1_MASK_E810T) {
51 	case ICE_SMA1_MASK_E810T:
52 	default:
53 		ptp_pins[SMA1].func = PTP_PF_NONE;
54 		ptp_pins[UFL1].func = PTP_PF_NONE;
55 		break;
56 	case ICE_SMA1_DIR_EN_E810T:
57 		ptp_pins[SMA1].func = PTP_PF_PEROUT;
58 		ptp_pins[UFL1].func = PTP_PF_NONE;
59 		break;
60 	case ICE_SMA1_TX_EN_E810T:
61 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
62 		ptp_pins[UFL1].func = PTP_PF_NONE;
63 		break;
64 	case 0:
65 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
66 		ptp_pins[UFL1].func = PTP_PF_PEROUT;
67 		break;
68 	}
69 
70 	/* Parse SMA2/UFL2 */
71 	switch (data & ICE_SMA2_MASK_E810T) {
72 	case ICE_SMA2_MASK_E810T:
73 	default:
74 		ptp_pins[SMA2].func = PTP_PF_NONE;
75 		ptp_pins[UFL2].func = PTP_PF_NONE;
76 		break;
77 	case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
78 		ptp_pins[SMA2].func = PTP_PF_EXTTS;
79 		ptp_pins[UFL2].func = PTP_PF_NONE;
80 		break;
81 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
82 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
83 		ptp_pins[UFL2].func = PTP_PF_NONE;
84 		break;
85 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
86 		ptp_pins[SMA2].func = PTP_PF_NONE;
87 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
88 		break;
89 	case ICE_SMA2_DIR_EN_E810T:
90 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
91 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
92 		break;
93 	}
94 
95 	return 0;
96 }
97 
98 /**
99  * ice_ptp_set_sma_config_e810t
100  * @hw: pointer to the hw struct
101  * @ptp_pins: pointer to the ptp_pin_desc struture
102  *
103  * Set the configuration of the SMA control logic based on the configuration in
104  * num_pins parameter
105  */
106 static int
107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
108 			     const struct ptp_pin_desc *ptp_pins)
109 {
110 	int status;
111 	u8 data;
112 
113 	/* SMA1 and UFL1 cannot be set to TX at the same time */
114 	if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
115 	    ptp_pins[UFL1].func == PTP_PF_PEROUT)
116 		return -EINVAL;
117 
118 	/* SMA2 and UFL2 cannot be set to RX at the same time */
119 	if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
120 	    ptp_pins[UFL2].func == PTP_PF_EXTTS)
121 		return -EINVAL;
122 
123 	/* Read initial pin state value */
124 	status = ice_read_sma_ctrl_e810t(hw, &data);
125 	if (status)
126 		return status;
127 
128 	/* Set the right sate based on the desired configuration */
129 	data &= ~ICE_SMA1_MASK_E810T;
130 	if (ptp_pins[SMA1].func == PTP_PF_NONE &&
131 	    ptp_pins[UFL1].func == PTP_PF_NONE) {
132 		dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
133 		data |= ICE_SMA1_MASK_E810T;
134 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
135 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
136 		dev_info(ice_hw_to_dev(hw), "SMA1 RX");
137 		data |= ICE_SMA1_TX_EN_E810T;
138 	} else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
139 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
140 		/* U.FL 1 TX will always enable SMA 1 RX */
141 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
142 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
143 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
144 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
145 	} else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
146 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
147 		dev_info(ice_hw_to_dev(hw), "SMA1 TX");
148 		data |= ICE_SMA1_DIR_EN_E810T;
149 	}
150 
151 	data &= ~ICE_SMA2_MASK_E810T;
152 	if (ptp_pins[SMA2].func == PTP_PF_NONE &&
153 	    ptp_pins[UFL2].func == PTP_PF_NONE) {
154 		dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
155 		data |= ICE_SMA2_MASK_E810T;
156 	} else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
157 			ptp_pins[UFL2].func == PTP_PF_NONE) {
158 		dev_info(ice_hw_to_dev(hw), "SMA2 RX");
159 		data |= (ICE_SMA2_TX_EN_E810T |
160 			 ICE_SMA2_UFL2_RX_DIS_E810T);
161 	} else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
162 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
163 		dev_info(ice_hw_to_dev(hw), "UFL2 RX");
164 		data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
165 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
166 		   ptp_pins[UFL2].func == PTP_PF_NONE) {
167 		dev_info(ice_hw_to_dev(hw), "SMA2 TX");
168 		data |= (ICE_SMA2_DIR_EN_E810T |
169 			 ICE_SMA2_UFL2_RX_DIS_E810T);
170 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
171 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
172 		dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
173 		data |= ICE_SMA2_DIR_EN_E810T;
174 	}
175 
176 	return ice_write_sma_ctrl_e810t(hw, data);
177 }
178 
179 /**
180  * ice_ptp_set_sma_e810t
181  * @info: the driver's PTP info structure
182  * @pin: pin index in kernel structure
183  * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
184  *
185  * Set the configuration of a single SMA pin
186  */
187 static int
188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
189 		      enum ptp_pin_function func)
190 {
191 	struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
192 	struct ice_pf *pf = ptp_info_to_pf(info);
193 	struct ice_hw *hw = &pf->hw;
194 	int err;
195 
196 	if (pin < SMA1 || func > PTP_PF_PEROUT)
197 		return -EOPNOTSUPP;
198 
199 	err = ice_get_sma_config_e810t(hw, ptp_pins);
200 	if (err)
201 		return err;
202 
203 	/* Disable the same function on the other pin sharing the channel */
204 	if (pin == SMA1 && ptp_pins[UFL1].func == func)
205 		ptp_pins[UFL1].func = PTP_PF_NONE;
206 	if (pin == UFL1 && ptp_pins[SMA1].func == func)
207 		ptp_pins[SMA1].func = PTP_PF_NONE;
208 
209 	if (pin == SMA2 && ptp_pins[UFL2].func == func)
210 		ptp_pins[UFL2].func = PTP_PF_NONE;
211 	if (pin == UFL2 && ptp_pins[SMA2].func == func)
212 		ptp_pins[SMA2].func = PTP_PF_NONE;
213 
214 	/* Set up new pin function in the temp table */
215 	ptp_pins[pin].func = func;
216 
217 	return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
218 }
219 
220 /**
221  * ice_verify_pin_e810t
222  * @info: the driver's PTP info structure
223  * @pin: Pin index
224  * @func: Assigned function
225  * @chan: Assigned channel
226  *
227  * Verify if pin supports requested pin function. If the Check pins consistency.
228  * Reconfigure the SMA logic attached to the given pin to enable its
229  * desired functionality
230  */
231 static int
232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
233 		     enum ptp_pin_function func, unsigned int chan)
234 {
235 	/* Don't allow channel reassignment */
236 	if (chan != ice_pin_desc_e810t[pin].chan)
237 		return -EOPNOTSUPP;
238 
239 	/* Check if functions are properly assigned */
240 	switch (func) {
241 	case PTP_PF_NONE:
242 		break;
243 	case PTP_PF_EXTTS:
244 		if (pin == UFL1)
245 			return -EOPNOTSUPP;
246 		break;
247 	case PTP_PF_PEROUT:
248 		if (pin == UFL2 || pin == GNSS)
249 			return -EOPNOTSUPP;
250 		break;
251 	case PTP_PF_PHYSYNC:
252 		return -EOPNOTSUPP;
253 	}
254 
255 	return ice_ptp_set_sma_e810t(info, pin, func);
256 }
257 
258 /**
259  * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
260  * @pf: Board private structure
261  *
262  * Program the device to respond appropriately to the Tx timestamp interrupt
263  * cause.
264  */
265 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
266 {
267 	struct ice_hw *hw = &pf->hw;
268 	bool enable;
269 	u32 val;
270 
271 	switch (pf->ptp.tx_interrupt_mode) {
272 	case ICE_PTP_TX_INTERRUPT_ALL:
273 		/* React to interrupts across all quads. */
274 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
275 		enable = true;
276 		break;
277 	case ICE_PTP_TX_INTERRUPT_NONE:
278 		/* Do not react to interrupts on any quad. */
279 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
280 		enable = false;
281 		break;
282 	case ICE_PTP_TX_INTERRUPT_SELF:
283 	default:
284 		enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
285 		break;
286 	}
287 
288 	/* Configure the Tx timestamp interrupt */
289 	val = rd32(hw, PFINT_OICR_ENA);
290 	if (enable)
291 		val |= PFINT_OICR_TSYN_TX_M;
292 	else
293 		val &= ~PFINT_OICR_TSYN_TX_M;
294 	wr32(hw, PFINT_OICR_ENA, val);
295 }
296 
297 /**
298  * ice_set_rx_tstamp - Enable or disable Rx timestamping
299  * @pf: The PF pointer to search in
300  * @on: bool value for whether timestamps are enabled or disabled
301  */
302 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
303 {
304 	struct ice_vsi *vsi;
305 	u16 i;
306 
307 	vsi = ice_get_main_vsi(pf);
308 	if (!vsi || !vsi->rx_rings)
309 		return;
310 
311 	/* Set the timestamp flag for all the Rx rings */
312 	ice_for_each_rxq(vsi, i) {
313 		if (!vsi->rx_rings[i])
314 			continue;
315 		vsi->rx_rings[i]->ptp_rx = on;
316 	}
317 }
318 
319 /**
320  * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
321  * @pf: Board private structure
322  *
323  * Called during preparation for reset to temporarily disable timestamping on
324  * the device. Called during remove to disable timestamping while cleaning up
325  * driver resources.
326  */
327 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
328 {
329 	struct ice_hw *hw = &pf->hw;
330 	u32 val;
331 
332 	val = rd32(hw, PFINT_OICR_ENA);
333 	val &= ~PFINT_OICR_TSYN_TX_M;
334 	wr32(hw, PFINT_OICR_ENA, val);
335 
336 	ice_set_rx_tstamp(pf, false);
337 }
338 
339 /**
340  * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
341  * @pf: Board private structure
342  *
343  * Called at the end of rebuild to restore timestamp configuration after
344  * a device reset.
345  */
346 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
347 {
348 	struct ice_hw *hw = &pf->hw;
349 	bool enable_rx;
350 
351 	ice_ptp_cfg_tx_interrupt(pf);
352 
353 	enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
354 	ice_set_rx_tstamp(pf, enable_rx);
355 
356 	/* Trigger an immediate software interrupt to ensure that timestamps
357 	 * which occurred during reset are handled now.
358 	 */
359 	wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
360 	ice_flush(hw);
361 }
362 
363 /**
364  * ice_ptp_read_src_clk_reg - Read the source clock register
365  * @pf: Board private structure
366  * @sts: Optional parameter for holding a pair of system timestamps from
367  *       the system clock. Will be ignored if NULL is given.
368  */
369 static u64
370 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
371 {
372 	struct ice_hw *hw = &pf->hw;
373 	u32 hi, lo, lo2;
374 	u8 tmr_idx;
375 
376 	tmr_idx = ice_get_ptp_src_clock_index(hw);
377 	guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
378 	/* Read the system timestamp pre PHC read */
379 	ptp_read_system_prets(sts);
380 
381 	lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
382 
383 	/* Read the system timestamp post PHC read */
384 	ptp_read_system_postts(sts);
385 
386 	hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
387 	lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
388 
389 	if (lo2 < lo) {
390 		/* if TIME_L rolled over read TIME_L again and update
391 		 * system timestamps
392 		 */
393 		ptp_read_system_prets(sts);
394 		lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
395 		ptp_read_system_postts(sts);
396 		hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
397 	}
398 
399 	return ((u64)hi << 32) | lo;
400 }
401 
402 /**
403  * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
404  * @cached_phc_time: recently cached copy of PHC time
405  * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
406  *
407  * Hardware captures timestamps which contain only 32 bits of nominal
408  * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
409  * Note that the captured timestamp values may be 40 bits, but the lower
410  * 8 bits are sub-nanoseconds and generally discarded.
411  *
412  * Extend the 32bit nanosecond timestamp using the following algorithm and
413  * assumptions:
414  *
415  * 1) have a recently cached copy of the PHC time
416  * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
417  *    seconds) before or after the PHC time was captured.
418  * 3) calculate the delta between the cached time and the timestamp
419  * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
420  *    captured after the PHC time. In this case, the full timestamp is just
421  *    the cached PHC time plus the delta.
422  * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
423  *    timestamp was captured *before* the PHC time, i.e. because the PHC
424  *    cache was updated after the timestamp was captured by hardware. In this
425  *    case, the full timestamp is the cached time minus the inverse delta.
426  *
427  * This algorithm works even if the PHC time was updated after a Tx timestamp
428  * was requested, but before the Tx timestamp event was reported from
429  * hardware.
430  *
431  * This calculation primarily relies on keeping the cached PHC time up to
432  * date. If the timestamp was captured more than 2^31 nanoseconds after the
433  * PHC time, it is possible that the lower 32bits of PHC time have
434  * overflowed more than once, and we might generate an incorrect timestamp.
435  *
436  * This is prevented by (a) periodically updating the cached PHC time once
437  * a second, and (b) discarding any Tx timestamp packet if it has waited for
438  * a timestamp for more than one second.
439  */
440 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
441 {
442 	u32 delta, phc_time_lo;
443 	u64 ns;
444 
445 	/* Extract the lower 32 bits of the PHC time */
446 	phc_time_lo = (u32)cached_phc_time;
447 
448 	/* Calculate the delta between the lower 32bits of the cached PHC
449 	 * time and the in_tstamp value
450 	 */
451 	delta = (in_tstamp - phc_time_lo);
452 
453 	/* Do not assume that the in_tstamp is always more recent than the
454 	 * cached PHC time. If the delta is large, it indicates that the
455 	 * in_tstamp was taken in the past, and should be converted
456 	 * forward.
457 	 */
458 	if (delta > (U32_MAX / 2)) {
459 		/* reverse the delta calculation here */
460 		delta = (phc_time_lo - in_tstamp);
461 		ns = cached_phc_time - delta;
462 	} else {
463 		ns = cached_phc_time + delta;
464 	}
465 
466 	return ns;
467 }
468 
469 /**
470  * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
471  * @pf: Board private structure
472  * @in_tstamp: Ingress/egress 40b timestamp value
473  *
474  * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
475  * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
476  *
477  *  *--------------------------------------------------------------*
478  *  | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
479  *  *--------------------------------------------------------------*
480  *
481  * The low bit is an indicator of whether the timestamp is valid. The next
482  * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
483  * and the remaining 32 bits are the lower 32 bits of the PHC timer.
484  *
485  * It is assumed that the caller verifies the timestamp is valid prior to
486  * calling this function.
487  *
488  * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
489  * time stored in the device private PTP structure as the basis for timestamp
490  * extension.
491  *
492  * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
493  * algorithm.
494  */
495 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
496 {
497 	const u64 mask = GENMASK_ULL(31, 0);
498 	unsigned long discard_time;
499 
500 	/* Discard the hardware timestamp if the cached PHC time is too old */
501 	discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
502 	if (time_is_before_jiffies(discard_time)) {
503 		pf->ptp.tx_hwtstamp_discarded++;
504 		return 0;
505 	}
506 
507 	return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
508 				     (in_tstamp >> 8) & mask);
509 }
510 
511 /**
512  * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
513  * @tx: the PTP Tx timestamp tracker to check
514  *
515  * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
516  * to accept new timestamp requests.
517  *
518  * Assumes the tx->lock spinlock is already held.
519  */
520 static bool
521 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
522 {
523 	lockdep_assert_held(&tx->lock);
524 
525 	return tx->init && !tx->calibrating;
526 }
527 
528 /**
529  * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
530  * @tx: the PTP Tx timestamp tracker
531  * @idx: index of the timestamp to request
532  */
533 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
534 {
535 	struct ice_ptp_port *ptp_port;
536 	struct sk_buff *skb;
537 	struct ice_pf *pf;
538 
539 	if (!tx->init)
540 		return;
541 
542 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
543 	pf = ptp_port_to_pf(ptp_port);
544 
545 	/* Drop packets which have waited for more than 2 seconds */
546 	if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
547 		/* Count the number of Tx timestamps that timed out */
548 		pf->ptp.tx_hwtstamp_timeouts++;
549 
550 		skb = tx->tstamps[idx].skb;
551 		tx->tstamps[idx].skb = NULL;
552 		clear_bit(idx, tx->in_use);
553 
554 		dev_kfree_skb_any(skb);
555 		return;
556 	}
557 
558 	ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
559 
560 	/* Write TS index to read to the PF register so the FW can read it */
561 	wr32(&pf->hw, PF_SB_ATQBAL,
562 	     TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
563 	     TS_LL_READ_TS);
564 	tx->last_ll_ts_idx_read = idx;
565 }
566 
567 /**
568  * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
569  * @tx: the PTP Tx timestamp tracker
570  */
571 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
572 {
573 	struct skb_shared_hwtstamps shhwtstamps = {};
574 	u8 idx = tx->last_ll_ts_idx_read;
575 	struct ice_ptp_port *ptp_port;
576 	u64 raw_tstamp, tstamp;
577 	bool drop_ts = false;
578 	struct sk_buff *skb;
579 	struct ice_pf *pf;
580 	u32 val;
581 
582 	if (!tx->init || tx->last_ll_ts_idx_read < 0)
583 		return;
584 
585 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
586 	pf = ptp_port_to_pf(ptp_port);
587 
588 	ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
589 
590 	val = rd32(&pf->hw, PF_SB_ATQBAL);
591 
592 	/* When the bit is cleared, the TS is ready in the register */
593 	if (val & TS_LL_READ_TS) {
594 		dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
595 		return;
596 	}
597 
598 	/* High 8 bit value of the TS is on the bits 16:23 */
599 	raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
600 	raw_tstamp <<= 32;
601 
602 	/* Read the low 32 bit value */
603 	raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
604 
605 	/* Devices using this interface always verify the timestamp differs
606 	 * relative to the last cached timestamp value.
607 	 */
608 	if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
609 		return;
610 
611 	tx->tstamps[idx].cached_tstamp = raw_tstamp;
612 	clear_bit(idx, tx->in_use);
613 	skb = tx->tstamps[idx].skb;
614 	tx->tstamps[idx].skb = NULL;
615 	if (test_and_clear_bit(idx, tx->stale))
616 		drop_ts = true;
617 
618 	if (!skb)
619 		return;
620 
621 	if (drop_ts) {
622 		dev_kfree_skb_any(skb);
623 		return;
624 	}
625 
626 	/* Extend the timestamp using cached PHC time */
627 	tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
628 	if (tstamp) {
629 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
630 		ice_trace(tx_tstamp_complete, skb, idx);
631 	}
632 
633 	skb_tstamp_tx(skb, &shhwtstamps);
634 	dev_kfree_skb_any(skb);
635 }
636 
637 /**
638  * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
639  * @tx: the PTP Tx timestamp tracker
640  *
641  * Process timestamps captured by the PHY associated with this port. To do
642  * this, loop over each index with a waiting skb.
643  *
644  * If a given index has a valid timestamp, perform the following steps:
645  *
646  * 1) check that the timestamp request is not stale
647  * 2) check that a timestamp is ready and available in the PHY memory bank
648  * 3) read and copy the timestamp out of the PHY register
649  * 4) unlock the index by clearing the associated in_use bit
650  * 5) check if the timestamp is stale, and discard if so
651  * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
652  * 7) send this 64 bit timestamp to the stack
653  *
654  * Note that we do not hold the tracking lock while reading the Tx timestamp.
655  * This is because reading the timestamp requires taking a mutex that might
656  * sleep.
657  *
658  * The only place where we set in_use is when a new timestamp is initiated
659  * with a slot index. This is only called in the hard xmit routine where an
660  * SKB has a request flag set. The only places where we clear this bit is this
661  * function, or during teardown when the Tx timestamp tracker is being
662  * removed. A timestamp index will never be re-used until the in_use bit for
663  * that index is cleared.
664  *
665  * If a Tx thread starts a new timestamp, we might not begin processing it
666  * right away but we will notice it at the end when we re-queue the task.
667  *
668  * If a Tx thread starts a new timestamp just after this function exits, the
669  * interrupt for that timestamp should re-trigger this function once
670  * a timestamp is ready.
671  *
672  * In cases where the PTP hardware clock was directly adjusted, some
673  * timestamps may not be able to safely use the timestamp extension math. In
674  * this case, software will set the stale bit for any outstanding Tx
675  * timestamps when the clock is adjusted. Then this function will discard
676  * those captured timestamps instead of sending them to the stack.
677  *
678  * If a Tx packet has been waiting for more than 2 seconds, it is not possible
679  * to correctly extend the timestamp using the cached PHC time. It is
680  * extremely unlikely that a packet will ever take this long to timestamp. If
681  * we detect a Tx timestamp request that has waited for this long we assume
682  * the packet will never be sent by hardware and discard it without reading
683  * the timestamp register.
684  */
685 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
686 {
687 	struct ice_ptp_port *ptp_port;
688 	unsigned long flags;
689 	struct ice_pf *pf;
690 	struct ice_hw *hw;
691 	u64 tstamp_ready;
692 	bool link_up;
693 	int err;
694 	u8 idx;
695 
696 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
697 	pf = ptp_port_to_pf(ptp_port);
698 	hw = &pf->hw;
699 
700 	/* Read the Tx ready status first */
701 	if (tx->has_ready_bitmap) {
702 		err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
703 		if (err)
704 			return;
705 	}
706 
707 	/* Drop packets if the link went down */
708 	link_up = ptp_port->link_up;
709 
710 	for_each_set_bit(idx, tx->in_use, tx->len) {
711 		struct skb_shared_hwtstamps shhwtstamps = {};
712 		u8 phy_idx = idx + tx->offset;
713 		u64 raw_tstamp = 0, tstamp;
714 		bool drop_ts = !link_up;
715 		struct sk_buff *skb;
716 
717 		/* Drop packets which have waited for more than 2 seconds */
718 		if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
719 			drop_ts = true;
720 
721 			/* Count the number of Tx timestamps that timed out */
722 			pf->ptp.tx_hwtstamp_timeouts++;
723 		}
724 
725 		/* Only read a timestamp from the PHY if its marked as ready
726 		 * by the tstamp_ready register. This avoids unnecessary
727 		 * reading of timestamps which are not yet valid. This is
728 		 * important as we must read all timestamps which are valid
729 		 * and only timestamps which are valid during each interrupt.
730 		 * If we do not, the hardware logic for generating a new
731 		 * interrupt can get stuck on some devices.
732 		 */
733 		if (tx->has_ready_bitmap &&
734 		    !(tstamp_ready & BIT_ULL(phy_idx))) {
735 			if (drop_ts)
736 				goto skip_ts_read;
737 
738 			continue;
739 		}
740 
741 		ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
742 
743 		err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
744 		if (err && !drop_ts)
745 			continue;
746 
747 		ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
748 
749 		/* For PHYs which don't implement a proper timestamp ready
750 		 * bitmap, verify that the timestamp value is different
751 		 * from the last cached timestamp. If it is not, skip this for
752 		 * now assuming it hasn't yet been captured by hardware.
753 		 */
754 		if (!drop_ts && !tx->has_ready_bitmap &&
755 		    raw_tstamp == tx->tstamps[idx].cached_tstamp)
756 			continue;
757 
758 		/* Discard any timestamp value without the valid bit set */
759 		if (!(raw_tstamp & ICE_PTP_TS_VALID))
760 			drop_ts = true;
761 
762 skip_ts_read:
763 		spin_lock_irqsave(&tx->lock, flags);
764 		if (!tx->has_ready_bitmap && raw_tstamp)
765 			tx->tstamps[idx].cached_tstamp = raw_tstamp;
766 		clear_bit(idx, tx->in_use);
767 		skb = tx->tstamps[idx].skb;
768 		tx->tstamps[idx].skb = NULL;
769 		if (test_and_clear_bit(idx, tx->stale))
770 			drop_ts = true;
771 		spin_unlock_irqrestore(&tx->lock, flags);
772 
773 		/* It is unlikely but possible that the SKB will have been
774 		 * flushed at this point due to link change or teardown.
775 		 */
776 		if (!skb)
777 			continue;
778 
779 		if (drop_ts) {
780 			dev_kfree_skb_any(skb);
781 			continue;
782 		}
783 
784 		/* Extend the timestamp using cached PHC time */
785 		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
786 		if (tstamp) {
787 			shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
788 			ice_trace(tx_tstamp_complete, skb, idx);
789 		}
790 
791 		skb_tstamp_tx(skb, &shhwtstamps);
792 		dev_kfree_skb_any(skb);
793 	}
794 }
795 
796 /**
797  * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
798  * @pf: Board private structure
799  */
800 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
801 {
802 	struct ice_ptp_port *port;
803 	unsigned int i;
804 
805 	mutex_lock(&pf->ptp.ports_owner.lock);
806 	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
807 		struct ice_ptp_tx *tx = &port->tx;
808 
809 		if (!tx || !tx->init)
810 			continue;
811 
812 		ice_ptp_process_tx_tstamp(tx);
813 	}
814 	mutex_unlock(&pf->ptp.ports_owner.lock);
815 
816 	for (i = 0; i < ICE_MAX_QUAD; i++) {
817 		u64 tstamp_ready;
818 		int err;
819 
820 		/* Read the Tx ready status first */
821 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
822 		if (err)
823 			break;
824 		else if (tstamp_ready)
825 			return ICE_TX_TSTAMP_WORK_PENDING;
826 	}
827 
828 	return ICE_TX_TSTAMP_WORK_DONE;
829 }
830 
831 /**
832  * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
833  * @tx: Tx tracking structure to initialize
834  *
835  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
836  * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
837  */
838 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
839 {
840 	bool more_timestamps;
841 	unsigned long flags;
842 
843 	if (!tx->init)
844 		return ICE_TX_TSTAMP_WORK_DONE;
845 
846 	/* Process the Tx timestamp tracker */
847 	ice_ptp_process_tx_tstamp(tx);
848 
849 	/* Check if there are outstanding Tx timestamps */
850 	spin_lock_irqsave(&tx->lock, flags);
851 	more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
852 	spin_unlock_irqrestore(&tx->lock, flags);
853 
854 	if (more_timestamps)
855 		return ICE_TX_TSTAMP_WORK_PENDING;
856 
857 	return ICE_TX_TSTAMP_WORK_DONE;
858 }
859 
860 /**
861  * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
862  * @tx: Tx tracking structure to initialize
863  *
864  * Assumes that the length has already been initialized. Do not call directly,
865  * use the ice_ptp_init_tx_* instead.
866  */
867 static int
868 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
869 {
870 	unsigned long *in_use, *stale;
871 	struct ice_tx_tstamp *tstamps;
872 
873 	tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
874 	in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
875 	stale = bitmap_zalloc(tx->len, GFP_KERNEL);
876 
877 	if (!tstamps || !in_use || !stale) {
878 		kfree(tstamps);
879 		bitmap_free(in_use);
880 		bitmap_free(stale);
881 
882 		return -ENOMEM;
883 	}
884 
885 	tx->tstamps = tstamps;
886 	tx->in_use = in_use;
887 	tx->stale = stale;
888 	tx->init = 1;
889 	tx->last_ll_ts_idx_read = -1;
890 
891 	spin_lock_init(&tx->lock);
892 
893 	return 0;
894 }
895 
896 /**
897  * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
898  * @pf: Board private structure
899  * @tx: the tracker to flush
900  *
901  * Called during teardown when a Tx tracker is being removed.
902  */
903 static void
904 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
905 {
906 	struct ice_hw *hw = &pf->hw;
907 	unsigned long flags;
908 	u64 tstamp_ready;
909 	int err;
910 	u8 idx;
911 
912 	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
913 	if (err) {
914 		dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
915 			tx->block, err);
916 
917 		/* If we fail to read the Tx timestamp ready bitmap just
918 		 * skip clearing the PHY timestamps.
919 		 */
920 		tstamp_ready = 0;
921 	}
922 
923 	for_each_set_bit(idx, tx->in_use, tx->len) {
924 		u8 phy_idx = idx + tx->offset;
925 		struct sk_buff *skb;
926 
927 		/* In case this timestamp is ready, we need to clear it. */
928 		if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
929 			ice_clear_phy_tstamp(hw, tx->block, phy_idx);
930 
931 		spin_lock_irqsave(&tx->lock, flags);
932 		skb = tx->tstamps[idx].skb;
933 		tx->tstamps[idx].skb = NULL;
934 		clear_bit(idx, tx->in_use);
935 		clear_bit(idx, tx->stale);
936 		spin_unlock_irqrestore(&tx->lock, flags);
937 
938 		/* Count the number of Tx timestamps flushed */
939 		pf->ptp.tx_hwtstamp_flushed++;
940 
941 		/* Free the SKB after we've cleared the bit */
942 		dev_kfree_skb_any(skb);
943 	}
944 }
945 
946 /**
947  * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
948  * @tx: the tracker to mark
949  *
950  * Mark currently outstanding Tx timestamps as stale. This prevents sending
951  * their timestamp value to the stack. This is required to prevent extending
952  * the 40bit hardware timestamp incorrectly.
953  *
954  * This should be called when the PTP clock is modified such as after a set
955  * time request.
956  */
957 static void
958 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
959 {
960 	unsigned long flags;
961 
962 	spin_lock_irqsave(&tx->lock, flags);
963 	bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
964 	spin_unlock_irqrestore(&tx->lock, flags);
965 }
966 
967 /**
968  * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
969  * @pf: Board private structure
970  *
971  * Called by the clock owner to flush all the Tx timestamp trackers associated
972  * with the clock.
973  */
974 static void
975 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
976 {
977 	struct ice_ptp_port *port;
978 
979 	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
980 		ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
981 }
982 
983 /**
984  * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
985  * @pf: Board private structure
986  * @tx: Tx tracking structure to release
987  *
988  * Free memory associated with the Tx timestamp tracker.
989  */
990 static void
991 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
992 {
993 	unsigned long flags;
994 
995 	spin_lock_irqsave(&tx->lock, flags);
996 	tx->init = 0;
997 	spin_unlock_irqrestore(&tx->lock, flags);
998 
999 	/* wait for potentially outstanding interrupt to complete */
1000 	synchronize_irq(pf->oicr_irq.virq);
1001 
1002 	ice_ptp_flush_tx_tracker(pf, tx);
1003 
1004 	kfree(tx->tstamps);
1005 	tx->tstamps = NULL;
1006 
1007 	bitmap_free(tx->in_use);
1008 	tx->in_use = NULL;
1009 
1010 	bitmap_free(tx->stale);
1011 	tx->stale = NULL;
1012 
1013 	tx->len = 0;
1014 }
1015 
1016 /**
1017  * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
1018  * @pf: Board private structure
1019  * @tx: the Tx tracking structure to initialize
1020  * @port: the port this structure tracks
1021  *
1022  * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
1023  * the timestamp block is shared for all ports in the same quad. To avoid
1024  * ports using the same timestamp index, logically break the block of
1025  * registers into chunks based on the port number.
1026  */
1027 static int
1028 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
1029 {
1030 	tx->block = port / ICE_PORTS_PER_QUAD;
1031 	tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
1032 	tx->len = INDEX_PER_PORT_E82X;
1033 	tx->has_ready_bitmap = 1;
1034 
1035 	return ice_ptp_alloc_tx_tracker(tx);
1036 }
1037 
1038 /**
1039  * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
1040  * @pf: Board private structure
1041  * @tx: the Tx tracking structure to initialize
1042  *
1043  * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
1044  * port has its own block of timestamps, independent of the other ports.
1045  */
1046 static int
1047 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1048 {
1049 	tx->block = pf->hw.port_info->lport;
1050 	tx->offset = 0;
1051 	tx->len = INDEX_PER_PORT_E810;
1052 	/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
1053 	 * verify new timestamps against cached copy of the last read
1054 	 * timestamp.
1055 	 */
1056 	tx->has_ready_bitmap = 0;
1057 
1058 	return ice_ptp_alloc_tx_tracker(tx);
1059 }
1060 
1061 /**
1062  * ice_ptp_update_cached_phctime - Update the cached PHC time values
1063  * @pf: Board specific private structure
1064  *
1065  * This function updates the system time values which are cached in the PF
1066  * structure and the Rx rings.
1067  *
1068  * This function must be called periodically to ensure that the cached value
1069  * is never more than 2 seconds old.
1070  *
1071  * Note that the cached copy in the PF PTP structure is always updated, even
1072  * if we can't update the copy in the Rx rings.
1073  *
1074  * Return:
1075  * * 0 - OK, successfully updated
1076  * * -EAGAIN - PF was busy, need to reschedule the update
1077  */
1078 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
1079 {
1080 	struct device *dev = ice_pf_to_dev(pf);
1081 	unsigned long update_before;
1082 	u64 systime;
1083 	int i;
1084 
1085 	update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
1086 	if (pf->ptp.cached_phc_time &&
1087 	    time_is_before_jiffies(update_before)) {
1088 		unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
1089 
1090 		dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
1091 			 jiffies_to_msecs(time_taken));
1092 		pf->ptp.late_cached_phc_updates++;
1093 	}
1094 
1095 	/* Read the current PHC time */
1096 	systime = ice_ptp_read_src_clk_reg(pf, NULL);
1097 
1098 	/* Update the cached PHC time stored in the PF structure */
1099 	WRITE_ONCE(pf->ptp.cached_phc_time, systime);
1100 	WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
1101 
1102 	if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
1103 		return -EAGAIN;
1104 
1105 	ice_for_each_vsi(pf, i) {
1106 		struct ice_vsi *vsi = pf->vsi[i];
1107 		int j;
1108 
1109 		if (!vsi)
1110 			continue;
1111 
1112 		if (vsi->type != ICE_VSI_PF)
1113 			continue;
1114 
1115 		ice_for_each_rxq(vsi, j) {
1116 			if (!vsi->rx_rings[j])
1117 				continue;
1118 			WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
1119 		}
1120 	}
1121 	clear_bit(ICE_CFG_BUSY, pf->state);
1122 
1123 	return 0;
1124 }
1125 
1126 /**
1127  * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1128  * @pf: Board specific private structure
1129  *
1130  * This function must be called when the cached PHC time is no longer valid,
1131  * such as after a time adjustment. It marks any currently outstanding Tx
1132  * timestamps as stale and updates the cached PHC time for both the PF and Rx
1133  * rings.
1134  *
1135  * If updating the PHC time cannot be done immediately, a warning message is
1136  * logged and the work item is scheduled immediately to minimize the window
1137  * with a wrong cached timestamp.
1138  */
1139 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1140 {
1141 	struct device *dev = ice_pf_to_dev(pf);
1142 	int err;
1143 
1144 	/* Update the cached PHC time immediately if possible, otherwise
1145 	 * schedule the work item to execute soon.
1146 	 */
1147 	err = ice_ptp_update_cached_phctime(pf);
1148 	if (err) {
1149 		/* If another thread is updating the Rx rings, we won't
1150 		 * properly reset them here. This could lead to reporting of
1151 		 * invalid timestamps, but there isn't much we can do.
1152 		 */
1153 		dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1154 			 __func__);
1155 
1156 		/* Queue the work item to update the Rx rings when possible */
1157 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1158 					   msecs_to_jiffies(10));
1159 	}
1160 
1161 	/* Mark any outstanding timestamps as stale, since they might have
1162 	 * been captured in hardware before the time update. This could lead
1163 	 * to us extending them with the wrong cached value resulting in
1164 	 * incorrect timestamp values.
1165 	 */
1166 	ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1167 }
1168 
1169 /**
1170  * ice_ptp_write_init - Set PHC time to provided value
1171  * @pf: Board private structure
1172  * @ts: timespec structure that holds the new time value
1173  *
1174  * Set the PHC time to the specified time provided in the timespec.
1175  */
1176 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1177 {
1178 	u64 ns = timespec64_to_ns(ts);
1179 	struct ice_hw *hw = &pf->hw;
1180 
1181 	return ice_ptp_init_time(hw, ns);
1182 }
1183 
1184 /**
1185  * ice_ptp_write_adj - Adjust PHC clock time atomically
1186  * @pf: Board private structure
1187  * @adj: Adjustment in nanoseconds
1188  *
1189  * Perform an atomic adjustment of the PHC time by the specified number of
1190  * nanoseconds.
1191  */
1192 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1193 {
1194 	struct ice_hw *hw = &pf->hw;
1195 
1196 	return ice_ptp_adj_clock(hw, adj);
1197 }
1198 
1199 /**
1200  * ice_base_incval - Get base timer increment value
1201  * @pf: Board private structure
1202  *
1203  * Look up the base timer increment value for this device. The base increment
1204  * value is used to define the nominal clock tick rate. This increment value
1205  * is programmed during device initialization. It is also used as the basis
1206  * for calculating adjustments using scaled_ppm.
1207  */
1208 static u64 ice_base_incval(struct ice_pf *pf)
1209 {
1210 	struct ice_hw *hw = &pf->hw;
1211 	u64 incval;
1212 
1213 	if (ice_is_e810(hw))
1214 		incval = ICE_PTP_NOMINAL_INCVAL_E810;
1215 	else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
1216 		incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
1217 	else
1218 		incval = UNKNOWN_INCVAL_E82X;
1219 
1220 	dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1221 		incval);
1222 
1223 	return incval;
1224 }
1225 
1226 /**
1227  * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1228  * @port: PTP port for which Tx FIFO is checked
1229  */
1230 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1231 {
1232 	int quad = port->port_num / ICE_PORTS_PER_QUAD;
1233 	int offs = port->port_num % ICE_PORTS_PER_QUAD;
1234 	struct ice_pf *pf;
1235 	struct ice_hw *hw;
1236 	u32 val, phy_sts;
1237 	int err;
1238 
1239 	pf = ptp_port_to_pf(port);
1240 	hw = &pf->hw;
1241 
1242 	if (port->tx_fifo_busy_cnt == FIFO_OK)
1243 		return 0;
1244 
1245 	/* need to read FIFO state */
1246 	if (offs == 0 || offs == 1)
1247 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1248 					     &val);
1249 	else
1250 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1251 					     &val);
1252 
1253 	if (err) {
1254 		dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1255 			port->port_num, err);
1256 		return err;
1257 	}
1258 
1259 	if (offs & 0x1)
1260 		phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1261 	else
1262 		phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1263 
1264 	if (phy_sts & FIFO_EMPTY) {
1265 		port->tx_fifo_busy_cnt = FIFO_OK;
1266 		return 0;
1267 	}
1268 
1269 	port->tx_fifo_busy_cnt++;
1270 
1271 	dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1272 		port->tx_fifo_busy_cnt, port->port_num);
1273 
1274 	if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1275 		dev_dbg(ice_pf_to_dev(pf),
1276 			"Port %d Tx FIFO still not empty; resetting quad %d\n",
1277 			port->port_num, quad);
1278 		ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1279 		port->tx_fifo_busy_cnt = FIFO_OK;
1280 		return 0;
1281 	}
1282 
1283 	return -EAGAIN;
1284 }
1285 
1286 /**
1287  * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1288  * @work: Pointer to the kthread_work structure for this task
1289  *
1290  * Check whether hardware has completed measuring the Tx and Rx offset values
1291  * used to configure and enable vernier timestamp calibration.
1292  *
1293  * Once the offset in either direction is measured, configure the associated
1294  * registers with the calibrated offset values and enable timestamping. The Tx
1295  * and Rx directions are configured independently as soon as their associated
1296  * offsets are known.
1297  *
1298  * This function reschedules itself until both Tx and Rx calibration have
1299  * completed.
1300  */
1301 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1302 {
1303 	struct ice_ptp_port *port;
1304 	struct ice_pf *pf;
1305 	struct ice_hw *hw;
1306 	int tx_err;
1307 	int rx_err;
1308 
1309 	port = container_of(work, struct ice_ptp_port, ov_work.work);
1310 	pf = ptp_port_to_pf(port);
1311 	hw = &pf->hw;
1312 
1313 	if (ice_is_reset_in_progress(pf->state)) {
1314 		/* wait for device driver to complete reset */
1315 		kthread_queue_delayed_work(pf->ptp.kworker,
1316 					   &port->ov_work,
1317 					   msecs_to_jiffies(100));
1318 		return;
1319 	}
1320 
1321 	tx_err = ice_ptp_check_tx_fifo(port);
1322 	if (!tx_err)
1323 		tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1324 	rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1325 	if (tx_err || rx_err) {
1326 		/* Tx and/or Rx offset not yet configured, try again later */
1327 		kthread_queue_delayed_work(pf->ptp.kworker,
1328 					   &port->ov_work,
1329 					   msecs_to_jiffies(100));
1330 		return;
1331 	}
1332 }
1333 
1334 /**
1335  * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1336  * @ptp_port: PTP port to stop
1337  */
1338 static int
1339 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1340 {
1341 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1342 	u8 port = ptp_port->port_num;
1343 	struct ice_hw *hw = &pf->hw;
1344 	int err;
1345 
1346 	if (ice_is_e810(hw))
1347 		return 0;
1348 
1349 	mutex_lock(&ptp_port->ps_lock);
1350 
1351 	kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1352 
1353 	err = ice_stop_phy_timer_e82x(hw, port, true);
1354 	if (err)
1355 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1356 			port, err);
1357 
1358 	mutex_unlock(&ptp_port->ps_lock);
1359 
1360 	return err;
1361 }
1362 
1363 /**
1364  * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1365  * @ptp_port: PTP port for which the PHY start is set
1366  *
1367  * Start the PHY timestamping block, and initiate Vernier timestamping
1368  * calibration. If timestamping cannot be calibrated (such as if link is down)
1369  * then disable the timestamping block instead.
1370  */
1371 static int
1372 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1373 {
1374 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1375 	u8 port = ptp_port->port_num;
1376 	struct ice_hw *hw = &pf->hw;
1377 	unsigned long flags;
1378 	int err;
1379 
1380 	if (ice_is_e810(hw))
1381 		return 0;
1382 
1383 	if (!ptp_port->link_up)
1384 		return ice_ptp_port_phy_stop(ptp_port);
1385 
1386 	mutex_lock(&ptp_port->ps_lock);
1387 
1388 	kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1389 
1390 	/* temporarily disable Tx timestamps while calibrating PHY offset */
1391 	spin_lock_irqsave(&ptp_port->tx.lock, flags);
1392 	ptp_port->tx.calibrating = true;
1393 	spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1394 	ptp_port->tx_fifo_busy_cnt = 0;
1395 
1396 	/* Start the PHY timer in Vernier mode */
1397 	err = ice_start_phy_timer_e82x(hw, port);
1398 	if (err)
1399 		goto out_unlock;
1400 
1401 	/* Enable Tx timestamps right away */
1402 	spin_lock_irqsave(&ptp_port->tx.lock, flags);
1403 	ptp_port->tx.calibrating = false;
1404 	spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1405 
1406 	kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
1407 
1408 out_unlock:
1409 	if (err)
1410 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1411 			port, err);
1412 
1413 	mutex_unlock(&ptp_port->ps_lock);
1414 
1415 	return err;
1416 }
1417 
1418 /**
1419  * ice_ptp_link_change - Reconfigure PTP after link status change
1420  * @pf: Board private structure
1421  * @port: Port for which the PHY start is set
1422  * @linkup: Link is up or down
1423  */
1424 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
1425 {
1426 	struct ice_ptp_port *ptp_port;
1427 	struct ice_hw *hw = &pf->hw;
1428 
1429 	if (pf->ptp.state != ICE_PTP_READY)
1430 		return;
1431 
1432 	if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
1433 		return;
1434 
1435 	ptp_port = &pf->ptp.port;
1436 	if (WARN_ON_ONCE(ptp_port->port_num != port))
1437 		return;
1438 
1439 	/* Update cached link status for this port immediately */
1440 	ptp_port->link_up = linkup;
1441 
1442 	switch (hw->phy_model) {
1443 	case ICE_PHY_E810:
1444 		/* Do not reconfigure E810 PHY */
1445 		return;
1446 	case ICE_PHY_E82X:
1447 		ice_ptp_port_phy_restart(ptp_port);
1448 		return;
1449 	default:
1450 		dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1451 	}
1452 }
1453 
1454 /**
1455  * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1456  * @pf: PF private structure
1457  * @ena: bool value to enable or disable interrupt
1458  * @threshold: Minimum number of packets at which intr is triggered
1459  *
1460  * Utility function to enable or disable Tx timestamp interrupt and threshold
1461  */
1462 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1463 {
1464 	struct ice_hw *hw = &pf->hw;
1465 	int err = 0;
1466 	int quad;
1467 	u32 val;
1468 
1469 	ice_ptp_reset_ts_memory(hw);
1470 
1471 	for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
1472 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1473 					     &val);
1474 		if (err)
1475 			break;
1476 
1477 		if (ena) {
1478 			val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1479 			val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
1480 			val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M,
1481 					  threshold);
1482 		} else {
1483 			val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1484 		}
1485 
1486 		err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1487 					      val);
1488 		if (err)
1489 			break;
1490 	}
1491 
1492 	if (err)
1493 		dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
1494 			err);
1495 	return err;
1496 }
1497 
1498 /**
1499  * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1500  * @pf: Board private structure
1501  */
1502 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1503 {
1504 	ice_ptp_port_phy_restart(&pf->ptp.port);
1505 }
1506 
1507 /**
1508  * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1509  * @pf: Board private structure
1510  */
1511 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1512 {
1513 	struct list_head *entry;
1514 
1515 	list_for_each(entry, &pf->ptp.ports_owner.ports) {
1516 		struct ice_ptp_port *port = list_entry(entry,
1517 						       struct ice_ptp_port,
1518 						       list_member);
1519 
1520 		if (port->link_up)
1521 			ice_ptp_port_phy_restart(port);
1522 	}
1523 }
1524 
1525 /**
1526  * ice_ptp_adjfine - Adjust clock increment rate
1527  * @info: the driver's PTP info structure
1528  * @scaled_ppm: Parts per million with 16-bit fractional field
1529  *
1530  * Adjust the frequency of the clock by the indicated scaled ppm from the
1531  * base frequency.
1532  */
1533 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1534 {
1535 	struct ice_pf *pf = ptp_info_to_pf(info);
1536 	struct ice_hw *hw = &pf->hw;
1537 	u64 incval;
1538 	int err;
1539 
1540 	incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1541 	err = ice_ptp_write_incval_locked(hw, incval);
1542 	if (err) {
1543 		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1544 			err);
1545 		return -EIO;
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 /**
1552  * ice_ptp_extts_event - Process PTP external clock event
1553  * @pf: Board private structure
1554  */
1555 void ice_ptp_extts_event(struct ice_pf *pf)
1556 {
1557 	struct ptp_clock_event event;
1558 	struct ice_hw *hw = &pf->hw;
1559 	u8 chan, tmr_idx;
1560 	u32 hi, lo;
1561 
1562 	/* Don't process timestamp events if PTP is not ready */
1563 	if (pf->ptp.state != ICE_PTP_READY)
1564 		return;
1565 
1566 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1567 	/* Event time is captured by one of the two matched registers
1568 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
1569 	 *      GLTSYN_EVNT_H: 32 MSB of sampled time event
1570 	 * Event is defined in GLTSYN_EVNT_0 register
1571 	 */
1572 	for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1573 		/* Check if channel is enabled */
1574 		if (pf->ptp.ext_ts_irq & (1 << chan)) {
1575 			lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1576 			hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1577 			event.timestamp = (((u64)hi) << 32) | lo;
1578 			event.type = PTP_CLOCK_EXTTS;
1579 			event.index = chan;
1580 
1581 			/* Fire event */
1582 			ptp_clock_event(pf->ptp.clock, &event);
1583 			pf->ptp.ext_ts_irq &= ~(1 << chan);
1584 		}
1585 	}
1586 }
1587 
1588 /**
1589  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1590  * @pf: Board private structure
1591  * @chan: GPIO channel (0-3)
1592  * @config: desired EXTTS configuration.
1593  * @store: If set to true, the values will be stored
1594  *
1595  * Configure an external timestamp event on the requested channel.
1596  *
1597  * Return: 0 on success, -EOPNOTUSPP on unsupported flags
1598  */
1599 static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
1600 			     struct ice_extts_channel *config, bool store)
1601 {
1602 	u32 func, aux_reg, gpio_reg, irq_reg;
1603 	struct ice_hw *hw = &pf->hw;
1604 	u8 tmr_idx;
1605 
1606 	/* Reject requests with unsupported flags */
1607 	if (config->flags & ~(PTP_ENABLE_FEATURE |
1608 			      PTP_RISING_EDGE |
1609 			      PTP_FALLING_EDGE |
1610 			      PTP_STRICT_FLAGS))
1611 		return -EOPNOTSUPP;
1612 
1613 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1614 
1615 	irq_reg = rd32(hw, PFINT_OICR_ENA);
1616 
1617 	if (config->ena) {
1618 		/* Enable the interrupt */
1619 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1620 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1621 
1622 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE	BIT(0)
1623 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
1624 
1625 		/* set event level to requested edge */
1626 		if (config->flags & PTP_FALLING_EDGE)
1627 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1628 		if (config->flags & PTP_RISING_EDGE)
1629 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1630 
1631 		/* Write GPIO CTL reg.
1632 		 * 0x1 is input sampled by EVENT register(channel)
1633 		 * + num_in_channels * tmr_idx
1634 		 */
1635 		func = 1 + chan + (tmr_idx * 3);
1636 		gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1637 		pf->ptp.ext_ts_chan |= (1 << chan);
1638 	} else {
1639 		/* clear the values we set to reset defaults */
1640 		aux_reg = 0;
1641 		gpio_reg = 0;
1642 		pf->ptp.ext_ts_chan &= ~(1 << chan);
1643 		if (!pf->ptp.ext_ts_chan)
1644 			irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1645 	}
1646 
1647 	wr32(hw, PFINT_OICR_ENA, irq_reg);
1648 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1649 	wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg);
1650 
1651 	if (store)
1652 		memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config));
1653 
1654 	return 0;
1655 }
1656 
1657 /**
1658  * ice_ptp_disable_all_extts - Disable all EXTTS channels
1659  * @pf: Board private structure
1660  */
1661 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1662 {
1663 	struct ice_extts_channel extts_cfg = {};
1664 	int i;
1665 
1666 	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
1667 		if (pf->ptp.extts_channels[i].ena) {
1668 			extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin;
1669 			extts_cfg.ena = false;
1670 			ice_ptp_cfg_extts(pf, i, &extts_cfg, false);
1671 		}
1672 	}
1673 
1674 	synchronize_irq(pf->oicr_irq.virq);
1675 }
1676 
1677 /**
1678  * ice_ptp_enable_all_extts - Enable all EXTTS channels
1679  * @pf: Board private structure
1680  *
1681  * Called during reset to restore user configuration.
1682  */
1683 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1684 {
1685 	int i;
1686 
1687 	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
1688 		if (pf->ptp.extts_channels[i].ena)
1689 			ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i],
1690 					  false);
1691 	}
1692 }
1693 
1694 /**
1695  * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
1696  * @pf: Board private structure
1697  * @chan: GPIO channel (0-3)
1698  * @config: desired periodic clk configuration. NULL will disable channel
1699  * @store: If set to true the values will be stored
1700  *
1701  * Configure the internal clock generator modules to generate the clock wave of
1702  * specified period.
1703  */
1704 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
1705 			      struct ice_perout_channel *config, bool store)
1706 {
1707 	u64 current_time, period, start_time, phase;
1708 	struct ice_hw *hw = &pf->hw;
1709 	u32 func, val, gpio_pin;
1710 	u8 tmr_idx;
1711 
1712 	if (config && config->flags & ~PTP_PEROUT_PHASE)
1713 		return -EOPNOTSUPP;
1714 
1715 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1716 
1717 	/* 0. Reset mode & out_en in AUX_OUT */
1718 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1719 
1720 	/* If we're disabling the output, clear out CLKO and TGT and keep
1721 	 * output level low
1722 	 */
1723 	if (!config || !config->ena) {
1724 		wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
1725 		wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
1726 		wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
1727 
1728 		val = GLGEN_GPIO_CTL_PIN_DIR_M;
1729 		gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
1730 		wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1731 
1732 		/* Store the value if requested */
1733 		if (store)
1734 			memset(&pf->ptp.perout_channels[chan], 0,
1735 			       sizeof(struct ice_perout_channel));
1736 
1737 		return 0;
1738 	}
1739 	period = config->period;
1740 	start_time = config->start_time;
1741 	div64_u64_rem(start_time, period, &phase);
1742 	gpio_pin = config->gpio_pin;
1743 
1744 	/* 1. Write clkout with half of required period value */
1745 	if (period & 0x1) {
1746 		dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1747 		goto err;
1748 	}
1749 
1750 	period >>= 1;
1751 
1752 	/* For proper operation, the GLTSYN_CLKO must be larger than clock tick
1753 	 */
1754 #define MIN_PULSE 3
1755 	if (period <= MIN_PULSE || period > U32_MAX) {
1756 		dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
1757 			MIN_PULSE * 2);
1758 		goto err;
1759 	}
1760 
1761 	wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1762 
1763 	/* Allow time for programming before start_time is hit */
1764 	current_time = ice_ptp_read_src_clk_reg(pf, NULL);
1765 
1766 	/* if start time is in the past start the timer at the nearest second
1767 	 * maintaining phase
1768 	 */
1769 	if (start_time < current_time)
1770 		start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
1771 				       NSEC_PER_SEC) * NSEC_PER_SEC + phase;
1772 
1773 	if (ice_is_e810(hw))
1774 		start_time -= E810_OUT_PROP_DELAY_NS;
1775 	else
1776 		start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
1777 
1778 	/* 2. Write TARGET time */
1779 	wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
1780 	wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
1781 
1782 	/* 3. Write AUX_OUT register */
1783 	val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1784 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1785 
1786 	/* 4. write GPIO CTL reg */
1787 	func = 8 + chan + (tmr_idx * 4);
1788 	val = GLGEN_GPIO_CTL_PIN_DIR_M |
1789 	      FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1790 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1791 
1792 	/* Store the value if requested */
1793 	if (store) {
1794 		memcpy(&pf->ptp.perout_channels[chan], config,
1795 		       sizeof(struct ice_perout_channel));
1796 		pf->ptp.perout_channels[chan].start_time = phase;
1797 	}
1798 
1799 	return 0;
1800 err:
1801 	dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
1802 	return -EFAULT;
1803 }
1804 
1805 /**
1806  * ice_ptp_disable_all_clkout - Disable all currently configured outputs
1807  * @pf: pointer to the PF structure
1808  *
1809  * Disable all currently configured clock outputs. This is necessary before
1810  * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
1811  * re-enable the clocks again.
1812  */
1813 static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
1814 {
1815 	uint i;
1816 
1817 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1818 		if (pf->ptp.perout_channels[i].ena)
1819 			ice_ptp_cfg_clkout(pf, i, NULL, false);
1820 }
1821 
1822 /**
1823  * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
1824  * @pf: pointer to the PF structure
1825  *
1826  * Enable all currently configured clock outputs. Use this after
1827  * ice_ptp_disable_all_clkout to reconfigure the output signals according to
1828  * their configuration.
1829  */
1830 static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
1831 {
1832 	uint i;
1833 
1834 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1835 		if (pf->ptp.perout_channels[i].ena)
1836 			ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
1837 					   false);
1838 }
1839 
1840 /**
1841  * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
1842  * @info: the driver's PTP info structure
1843  * @rq: The requested feature to change
1844  * @on: Enable/disable flag
1845  */
1846 static int
1847 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
1848 			 struct ptp_clock_request *rq, int on)
1849 {
1850 	struct ice_pf *pf = ptp_info_to_pf(info);
1851 	bool sma_pres = false;
1852 	unsigned int chan;
1853 	u32 gpio_pin;
1854 
1855 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
1856 		sma_pres = true;
1857 
1858 	switch (rq->type) {
1859 	case PTP_CLK_REQ_PEROUT:
1860 	{
1861 		struct ice_perout_channel clk_cfg = {};
1862 
1863 		chan = rq->perout.index;
1864 		if (sma_pres) {
1865 			if (chan == ice_pin_desc_e810t[SMA1].chan)
1866 				clk_cfg.gpio_pin = GPIO_20;
1867 			else if (chan == ice_pin_desc_e810t[SMA2].chan)
1868 				clk_cfg.gpio_pin = GPIO_22;
1869 			else
1870 				return -1;
1871 		} else if (ice_is_e810t(&pf->hw)) {
1872 			if (chan == 0)
1873 				clk_cfg.gpio_pin = GPIO_20;
1874 			else
1875 				clk_cfg.gpio_pin = GPIO_22;
1876 		} else if (chan == PPS_CLK_GEN_CHAN) {
1877 			clk_cfg.gpio_pin = PPS_PIN_INDEX;
1878 		} else {
1879 			clk_cfg.gpio_pin = chan;
1880 		}
1881 
1882 		clk_cfg.flags = rq->perout.flags;
1883 		clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1884 				   rq->perout.period.nsec);
1885 		clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1886 				       rq->perout.start.nsec);
1887 		clk_cfg.ena = !!on;
1888 
1889 		return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1890 	}
1891 	case PTP_CLK_REQ_EXTTS:
1892 	{
1893 		struct ice_extts_channel extts_cfg = {};
1894 
1895 		chan = rq->extts.index;
1896 		if (sma_pres) {
1897 			if (chan < ice_pin_desc_e810t[SMA2].chan)
1898 				gpio_pin = GPIO_21;
1899 			else
1900 				gpio_pin = GPIO_23;
1901 		} else if (ice_is_e810t(&pf->hw)) {
1902 			if (chan == 0)
1903 				gpio_pin = GPIO_21;
1904 			else
1905 				gpio_pin = GPIO_23;
1906 		} else {
1907 			gpio_pin = chan;
1908 		}
1909 
1910 		extts_cfg.flags = rq->extts.flags;
1911 		extts_cfg.gpio_pin = gpio_pin;
1912 		extts_cfg.ena = !!on;
1913 
1914 		return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true);
1915 	}
1916 	default:
1917 		return -EOPNOTSUPP;
1918 	}
1919 }
1920 
1921 /**
1922  * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
1923  * @info: the driver's PTP info structure
1924  * @rq: The requested feature to change
1925  * @on: Enable/disable flag
1926  */
1927 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
1928 				    struct ptp_clock_request *rq, int on)
1929 {
1930 	struct ice_pf *pf = ptp_info_to_pf(info);
1931 
1932 	switch (rq->type) {
1933 	case PTP_CLK_REQ_PPS:
1934 	{
1935 		struct ice_perout_channel clk_cfg = {};
1936 
1937 		clk_cfg.flags = rq->perout.flags;
1938 		clk_cfg.gpio_pin = PPS_PIN_INDEX;
1939 		clk_cfg.period = NSEC_PER_SEC;
1940 		clk_cfg.ena = !!on;
1941 
1942 		return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
1943 	}
1944 	case PTP_CLK_REQ_EXTTS:
1945 	{
1946 		struct ice_extts_channel extts_cfg = {};
1947 
1948 		extts_cfg.flags = rq->extts.flags;
1949 		extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX;
1950 		extts_cfg.ena = !!on;
1951 
1952 		return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true);
1953 	}
1954 	default:
1955 		return -EOPNOTSUPP;
1956 	}
1957 }
1958 
1959 /**
1960  * ice_ptp_gettimex64 - Get the time of the clock
1961  * @info: the driver's PTP info structure
1962  * @ts: timespec64 structure to hold the current time value
1963  * @sts: Optional parameter for holding a pair of system timestamps from
1964  *       the system clock. Will be ignored if NULL is given.
1965  *
1966  * Read the device clock and return the correct value on ns, after converting it
1967  * into a timespec struct.
1968  */
1969 static int
1970 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1971 		   struct ptp_system_timestamp *sts)
1972 {
1973 	struct ice_pf *pf = ptp_info_to_pf(info);
1974 	u64 time_ns;
1975 
1976 	time_ns = ice_ptp_read_src_clk_reg(pf, sts);
1977 	*ts = ns_to_timespec64(time_ns);
1978 	return 0;
1979 }
1980 
1981 /**
1982  * ice_ptp_settime64 - Set the time of the clock
1983  * @info: the driver's PTP info structure
1984  * @ts: timespec64 structure that holds the new time value
1985  *
1986  * Set the device clock to the user input value. The conversion from timespec
1987  * to ns happens in the write function.
1988  */
1989 static int
1990 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1991 {
1992 	struct ice_pf *pf = ptp_info_to_pf(info);
1993 	struct timespec64 ts64 = *ts;
1994 	struct ice_hw *hw = &pf->hw;
1995 	int err;
1996 
1997 	/* For Vernier mode, we need to recalibrate after new settime
1998 	 * Start with disabling timestamp block
1999 	 */
2000 	if (pf->ptp.port.link_up)
2001 		ice_ptp_port_phy_stop(&pf->ptp.port);
2002 
2003 	if (!ice_ptp_lock(hw)) {
2004 		err = -EBUSY;
2005 		goto exit;
2006 	}
2007 
2008 	/* Disable periodic outputs */
2009 	ice_ptp_disable_all_clkout(pf);
2010 
2011 	err = ice_ptp_write_init(pf, &ts64);
2012 	ice_ptp_unlock(hw);
2013 
2014 	if (!err)
2015 		ice_ptp_reset_cached_phctime(pf);
2016 
2017 	/* Reenable periodic outputs */
2018 	ice_ptp_enable_all_clkout(pf);
2019 
2020 	/* Recalibrate and re-enable timestamp blocks for E822/E823 */
2021 	if (hw->phy_model == ICE_PHY_E82X)
2022 		ice_ptp_restart_all_phy(pf);
2023 exit:
2024 	if (err) {
2025 		dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
2026 		return err;
2027 	}
2028 
2029 	return 0;
2030 }
2031 
2032 /**
2033  * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
2034  * @info: the driver's PTP info structure
2035  * @delta: Offset in nanoseconds to adjust the time by
2036  */
2037 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
2038 {
2039 	struct timespec64 now, then;
2040 	int ret;
2041 
2042 	then = ns_to_timespec64(delta);
2043 	ret = ice_ptp_gettimex64(info, &now, NULL);
2044 	if (ret)
2045 		return ret;
2046 	now = timespec64_add(now, then);
2047 
2048 	return ice_ptp_settime64(info, (const struct timespec64 *)&now);
2049 }
2050 
2051 /**
2052  * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
2053  * @info: the driver's PTP info structure
2054  * @delta: Offset in nanoseconds to adjust the time by
2055  */
2056 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
2057 {
2058 	struct ice_pf *pf = ptp_info_to_pf(info);
2059 	struct ice_hw *hw = &pf->hw;
2060 	struct device *dev;
2061 	int err;
2062 
2063 	dev = ice_pf_to_dev(pf);
2064 
2065 	/* Hardware only supports atomic adjustments using signed 32-bit
2066 	 * integers. For any adjustment outside this range, perform
2067 	 * a non-atomic get->adjust->set flow.
2068 	 */
2069 	if (delta > S32_MAX || delta < S32_MIN) {
2070 		dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
2071 		return ice_ptp_adjtime_nonatomic(info, delta);
2072 	}
2073 
2074 	if (!ice_ptp_lock(hw)) {
2075 		dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
2076 		return -EBUSY;
2077 	}
2078 
2079 	/* Disable periodic outputs */
2080 	ice_ptp_disable_all_clkout(pf);
2081 
2082 	err = ice_ptp_write_adj(pf, delta);
2083 
2084 	/* Reenable periodic outputs */
2085 	ice_ptp_enable_all_clkout(pf);
2086 
2087 	ice_ptp_unlock(hw);
2088 
2089 	if (err) {
2090 		dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2091 		return err;
2092 	}
2093 
2094 	ice_ptp_reset_cached_phctime(pf);
2095 
2096 	return 0;
2097 }
2098 
2099 #ifdef CONFIG_ICE_HWTS
2100 /**
2101  * ice_ptp_get_syncdevicetime - Get the cross time stamp info
2102  * @device: Current device time
2103  * @system: System counter value read synchronously with device time
2104  * @ctx: Context provided by timekeeping code
2105  *
2106  * Read device and system (ART) clock simultaneously and return the corrected
2107  * clock values in ns.
2108  */
2109 static int
2110 ice_ptp_get_syncdevicetime(ktime_t *device,
2111 			   struct system_counterval_t *system,
2112 			   void *ctx)
2113 {
2114 	struct ice_pf *pf = (struct ice_pf *)ctx;
2115 	struct ice_hw *hw = &pf->hw;
2116 	u32 hh_lock, hh_art_ctl;
2117 	int i;
2118 
2119 #define MAX_HH_HW_LOCK_TRIES	5
2120 #define MAX_HH_CTL_LOCK_TRIES	100
2121 
2122 	for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
2123 		/* Get the HW lock */
2124 		hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2125 		if (hh_lock & PFHH_SEM_BUSY_M) {
2126 			usleep_range(10000, 15000);
2127 			continue;
2128 		}
2129 		break;
2130 	}
2131 	if (hh_lock & PFHH_SEM_BUSY_M) {
2132 		dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
2133 		return -EBUSY;
2134 	}
2135 
2136 	/* Program cmd to master timer */
2137 	ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2138 
2139 	/* Start the ART and device clock sync sequence */
2140 	hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2141 	hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
2142 	wr32(hw, GLHH_ART_CTL, hh_art_ctl);
2143 
2144 	for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
2145 		/* Wait for sync to complete */
2146 		hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2147 		if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
2148 			udelay(1);
2149 			continue;
2150 		} else {
2151 			u32 hh_ts_lo, hh_ts_hi, tmr_idx;
2152 			u64 hh_ts;
2153 
2154 			tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2155 			/* Read ART time */
2156 			hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
2157 			hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
2158 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2159 			*system = convert_art_ns_to_tsc(hh_ts);
2160 			/* Read Device source clock time */
2161 			hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
2162 			hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
2163 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2164 			*device = ns_to_ktime(hh_ts);
2165 			break;
2166 		}
2167 	}
2168 
2169 	/* Clear the master timer */
2170 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2171 
2172 	/* Release HW lock */
2173 	hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2174 	hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
2175 	wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
2176 
2177 	if (i == MAX_HH_CTL_LOCK_TRIES)
2178 		return -ETIMEDOUT;
2179 
2180 	return 0;
2181 }
2182 
2183 /**
2184  * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
2185  * @info: the driver's PTP info structure
2186  * @cts: The memory to fill the cross timestamp info
2187  *
2188  * Capture a cross timestamp between the ART and the device PTP hardware
2189  * clock. Fill the cross timestamp information and report it back to the
2190  * caller.
2191  *
2192  * This is only valid for E822 and E823 devices which have support for
2193  * generating the cross timestamp via PCIe PTM.
2194  *
2195  * In order to correctly correlate the ART timestamp back to the TSC time, the
2196  * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2197  */
2198 static int
2199 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
2200 			    struct system_device_crosststamp *cts)
2201 {
2202 	struct ice_pf *pf = ptp_info_to_pf(info);
2203 
2204 	return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
2205 					     pf, NULL, cts);
2206 }
2207 #endif /* CONFIG_ICE_HWTS */
2208 
2209 /**
2210  * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2211  * @pf: Board private structure
2212  * @ifr: ioctl data
2213  *
2214  * Copy the timestamping config to user buffer
2215  */
2216 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2217 {
2218 	struct hwtstamp_config *config;
2219 
2220 	if (pf->ptp.state != ICE_PTP_READY)
2221 		return -EIO;
2222 
2223 	config = &pf->ptp.tstamp_config;
2224 
2225 	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2226 		-EFAULT : 0;
2227 }
2228 
2229 /**
2230  * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2231  * @pf: Board private structure
2232  * @config: hwtstamp settings requested or saved
2233  */
2234 static int
2235 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2236 {
2237 	switch (config->tx_type) {
2238 	case HWTSTAMP_TX_OFF:
2239 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2240 		break;
2241 	case HWTSTAMP_TX_ON:
2242 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2243 		break;
2244 	default:
2245 		return -ERANGE;
2246 	}
2247 
2248 	switch (config->rx_filter) {
2249 	case HWTSTAMP_FILTER_NONE:
2250 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2251 		break;
2252 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2253 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2254 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2255 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2256 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2257 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2258 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2259 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2260 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2261 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2262 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2263 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2264 	case HWTSTAMP_FILTER_NTP_ALL:
2265 	case HWTSTAMP_FILTER_ALL:
2266 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2267 		break;
2268 	default:
2269 		return -ERANGE;
2270 	}
2271 
2272 	/* Immediately update the device timestamping mode */
2273 	ice_ptp_restore_timestamp_mode(pf);
2274 
2275 	return 0;
2276 }
2277 
2278 /**
2279  * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2280  * @pf: Board private structure
2281  * @ifr: ioctl data
2282  *
2283  * Get the user config and store it
2284  */
2285 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2286 {
2287 	struct hwtstamp_config config;
2288 	int err;
2289 
2290 	if (pf->ptp.state != ICE_PTP_READY)
2291 		return -EAGAIN;
2292 
2293 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2294 		return -EFAULT;
2295 
2296 	err = ice_ptp_set_timestamp_mode(pf, &config);
2297 	if (err)
2298 		return err;
2299 
2300 	/* Return the actual configuration set */
2301 	config = pf->ptp.tstamp_config;
2302 
2303 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2304 		-EFAULT : 0;
2305 }
2306 
2307 /**
2308  * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2309  * @rx_desc: Receive descriptor
2310  * @pkt_ctx: Packet context to get the cached time
2311  *
2312  * The driver receives a notification in the receive descriptor with timestamp.
2313  */
2314 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2315 			const struct ice_pkt_ctx *pkt_ctx)
2316 {
2317 	u64 ts_ns, cached_time;
2318 	u32 ts_high;
2319 
2320 	if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2321 		return 0;
2322 
2323 	cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2324 
2325 	/* Do not report a timestamp if we don't have a cached PHC time */
2326 	if (!cached_time)
2327 		return 0;
2328 
2329 	/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2330 	 * PHC value, rather than accessing the PF. This also allows us to
2331 	 * simply pass the upper 32bits of nanoseconds directly. Calling
2332 	 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2333 	 * bits itself.
2334 	 */
2335 	ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2336 	ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2337 
2338 	return ts_ns;
2339 }
2340 
2341 /**
2342  * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
2343  * @pf: pointer to the PF structure
2344  * @info: PTP clock info structure
2345  *
2346  * Disable the OS access to the SMA pins. Called to clear out the OS
2347  * indications of pin support when we fail to setup the E810-T SMA control
2348  * register.
2349  */
2350 static void
2351 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2352 {
2353 	struct device *dev = ice_pf_to_dev(pf);
2354 
2355 	dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
2356 
2357 	info->enable = NULL;
2358 	info->verify = NULL;
2359 	info->n_pins = 0;
2360 	info->n_ext_ts = 0;
2361 	info->n_per_out = 0;
2362 }
2363 
2364 /**
2365  * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
2366  * @pf: pointer to the PF structure
2367  * @info: PTP clock info structure
2368  *
2369  * Finish setting up the SMA pins by allocating pin_config, and setting it up
2370  * according to the current status of the SMA. On failure, disable all of the
2371  * extended SMA pin support.
2372  */
2373 static void
2374 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2375 {
2376 	struct device *dev = ice_pf_to_dev(pf);
2377 	int err;
2378 
2379 	/* Allocate memory for kernel pins interface */
2380 	info->pin_config = devm_kcalloc(dev, info->n_pins,
2381 					sizeof(*info->pin_config), GFP_KERNEL);
2382 	if (!info->pin_config) {
2383 		ice_ptp_disable_sma_pins_e810t(pf, info);
2384 		return;
2385 	}
2386 
2387 	/* Read current SMA status */
2388 	err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
2389 	if (err)
2390 		ice_ptp_disable_sma_pins_e810t(pf, info);
2391 }
2392 
2393 /**
2394  * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
2395  * @pf: pointer to the PF instance
2396  * @info: PTP clock capabilities
2397  */
2398 static void
2399 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2400 {
2401 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2402 		info->n_ext_ts = N_EXT_TS_E810;
2403 		info->n_per_out = N_PER_OUT_E810T;
2404 		info->n_pins = NUM_PTP_PINS_E810T;
2405 		info->verify = ice_verify_pin_e810t;
2406 
2407 		/* Complete setup of the SMA pins */
2408 		ice_ptp_setup_sma_pins_e810t(pf, info);
2409 	} else if (ice_is_e810t(&pf->hw)) {
2410 		info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
2411 		info->n_per_out = N_PER_OUT_NO_SMA_E810T;
2412 	} else {
2413 		info->n_per_out = N_PER_OUT_E810;
2414 		info->n_ext_ts = N_EXT_TS_E810;
2415 	}
2416 }
2417 
2418 /**
2419  * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
2420  * @pf: pointer to the PF instance
2421  * @info: PTP clock capabilities
2422  */
2423 static void
2424 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2425 {
2426 	info->pps = 1;
2427 	info->n_per_out = 0;
2428 	info->n_ext_ts = 1;
2429 }
2430 
2431 /**
2432  * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
2433  * @pf: Board private structure
2434  * @info: PTP info to fill
2435  *
2436  * Assign functions to the PTP capabiltiies structure for E82x devices.
2437  * Functions which operate across all device families should be set directly
2438  * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
2439  * devices.
2440  */
2441 static void
2442 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
2443 {
2444 #ifdef CONFIG_ICE_HWTS
2445 	if (boot_cpu_has(X86_FEATURE_ART) &&
2446 	    boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
2447 		info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
2448 #endif /* CONFIG_ICE_HWTS */
2449 }
2450 
2451 /**
2452  * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2453  * @pf: Board private structure
2454  * @info: PTP info to fill
2455  *
2456  * Assign functions to the PTP capabiltiies structure for E810 devices.
2457  * Functions which operate across all device families should be set directly
2458  * in ice_ptp_set_caps. Only add functions here which are distinct for e810
2459  * devices.
2460  */
2461 static void
2462 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2463 {
2464 	info->enable = ice_ptp_gpio_enable_e810;
2465 	ice_ptp_setup_pins_e810(pf, info);
2466 }
2467 
2468 /**
2469  * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
2470  * @pf: Board private structure
2471  * @info: PTP info to fill
2472  *
2473  * Assign functions to the PTP capabiltiies structure for E823 devices.
2474  * Functions which operate across all device families should be set directly
2475  * in ice_ptp_set_caps. Only add functions here which are distinct for e823
2476  * devices.
2477  */
2478 static void
2479 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2480 {
2481 	ice_ptp_set_funcs_e82x(pf, info);
2482 
2483 	info->enable = ice_ptp_gpio_enable_e823;
2484 	ice_ptp_setup_pins_e823(pf, info);
2485 }
2486 
2487 /**
2488  * ice_ptp_set_caps - Set PTP capabilities
2489  * @pf: Board private structure
2490  */
2491 static void ice_ptp_set_caps(struct ice_pf *pf)
2492 {
2493 	struct ptp_clock_info *info = &pf->ptp.info;
2494 	struct device *dev = ice_pf_to_dev(pf);
2495 
2496 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2497 		 dev_driver_string(dev), dev_name(dev));
2498 	info->owner = THIS_MODULE;
2499 	info->max_adj = 100000000;
2500 	info->adjtime = ice_ptp_adjtime;
2501 	info->adjfine = ice_ptp_adjfine;
2502 	info->gettimex64 = ice_ptp_gettimex64;
2503 	info->settime64 = ice_ptp_settime64;
2504 
2505 	if (ice_is_e810(&pf->hw))
2506 		ice_ptp_set_funcs_e810(pf, info);
2507 	else if (ice_is_e823(&pf->hw))
2508 		ice_ptp_set_funcs_e823(pf, info);
2509 	else
2510 		ice_ptp_set_funcs_e82x(pf, info);
2511 }
2512 
2513 /**
2514  * ice_ptp_create_clock - Create PTP clock device for userspace
2515  * @pf: Board private structure
2516  *
2517  * This function creates a new PTP clock device. It only creates one if we
2518  * don't already have one. Will return error if it can't create one, but success
2519  * if we already have a device. Should be used by ice_ptp_init to create clock
2520  * initially, and prevent global resets from creating new clock devices.
2521  */
2522 static long ice_ptp_create_clock(struct ice_pf *pf)
2523 {
2524 	struct ptp_clock_info *info;
2525 	struct device *dev;
2526 
2527 	/* No need to create a clock device if we already have one */
2528 	if (pf->ptp.clock)
2529 		return 0;
2530 
2531 	ice_ptp_set_caps(pf);
2532 
2533 	info = &pf->ptp.info;
2534 	dev = ice_pf_to_dev(pf);
2535 
2536 	/* Attempt to register the clock before enabling the hardware. */
2537 	pf->ptp.clock = ptp_clock_register(info, dev);
2538 	if (IS_ERR(pf->ptp.clock)) {
2539 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2540 		return PTR_ERR(pf->ptp.clock);
2541 	}
2542 
2543 	return 0;
2544 }
2545 
2546 /**
2547  * ice_ptp_request_ts - Request an available Tx timestamp index
2548  * @tx: the PTP Tx timestamp tracker to request from
2549  * @skb: the SKB to associate with this timestamp request
2550  */
2551 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2552 {
2553 	unsigned long flags;
2554 	u8 idx;
2555 
2556 	spin_lock_irqsave(&tx->lock, flags);
2557 
2558 	/* Check that this tracker is accepting new timestamp requests */
2559 	if (!ice_ptp_is_tx_tracker_up(tx)) {
2560 		spin_unlock_irqrestore(&tx->lock, flags);
2561 		return -1;
2562 	}
2563 
2564 	/* Find and set the first available index */
2565 	idx = find_next_zero_bit(tx->in_use, tx->len,
2566 				 tx->last_ll_ts_idx_read + 1);
2567 	if (idx == tx->len)
2568 		idx = find_first_zero_bit(tx->in_use, tx->len);
2569 
2570 	if (idx < tx->len) {
2571 		/* We got a valid index that no other thread could have set. Store
2572 		 * a reference to the skb and the start time to allow discarding old
2573 		 * requests.
2574 		 */
2575 		set_bit(idx, tx->in_use);
2576 		clear_bit(idx, tx->stale);
2577 		tx->tstamps[idx].start = jiffies;
2578 		tx->tstamps[idx].skb = skb_get(skb);
2579 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2580 		ice_trace(tx_tstamp_request, skb, idx);
2581 	}
2582 
2583 	spin_unlock_irqrestore(&tx->lock, flags);
2584 
2585 	/* return the appropriate PHY timestamp register index, -1 if no
2586 	 * indexes were available.
2587 	 */
2588 	if (idx >= tx->len)
2589 		return -1;
2590 	else
2591 		return idx + tx->offset;
2592 }
2593 
2594 /**
2595  * ice_ptp_process_ts - Process the PTP Tx timestamps
2596  * @pf: Board private structure
2597  *
2598  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2599  * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2600  */
2601 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2602 {
2603 	switch (pf->ptp.tx_interrupt_mode) {
2604 	case ICE_PTP_TX_INTERRUPT_NONE:
2605 		/* This device has the clock owner handle timestamps for it */
2606 		return ICE_TX_TSTAMP_WORK_DONE;
2607 	case ICE_PTP_TX_INTERRUPT_SELF:
2608 		/* This device handles its own timestamps */
2609 		return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2610 	case ICE_PTP_TX_INTERRUPT_ALL:
2611 		/* This device handles timestamps for all ports */
2612 		return ice_ptp_tx_tstamp_owner(pf);
2613 	default:
2614 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2615 			  pf->ptp.tx_interrupt_mode);
2616 		return ICE_TX_TSTAMP_WORK_DONE;
2617 	}
2618 }
2619 
2620 /**
2621  * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2622  * @pf: Board private structure
2623  *
2624  * The device PHY issues Tx timestamp interrupts to the driver for processing
2625  * timestamp data from the PHY. It will not interrupt again until all
2626  * current timestamp data is read. In rare circumstances, it is possible that
2627  * the driver fails to read all outstanding data.
2628  *
2629  * To avoid getting permanently stuck, periodically check if the PHY has
2630  * outstanding timestamp data. If so, trigger an interrupt from software to
2631  * process this data.
2632  */
2633 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2634 {
2635 	struct device *dev = ice_pf_to_dev(pf);
2636 	struct ice_hw *hw = &pf->hw;
2637 	bool trigger_oicr = false;
2638 	unsigned int i;
2639 
2640 	if (ice_is_e810(hw))
2641 		return;
2642 
2643 	if (!ice_pf_src_tmr_owned(pf))
2644 		return;
2645 
2646 	for (i = 0; i < ICE_MAX_QUAD; i++) {
2647 		u64 tstamp_ready;
2648 		int err;
2649 
2650 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2651 		if (!err && tstamp_ready) {
2652 			trigger_oicr = true;
2653 			break;
2654 		}
2655 	}
2656 
2657 	if (trigger_oicr) {
2658 		/* Trigger a software interrupt, to ensure this data
2659 		 * gets processed.
2660 		 */
2661 		dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2662 
2663 		wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2664 		ice_flush(hw);
2665 	}
2666 }
2667 
2668 static void ice_ptp_periodic_work(struct kthread_work *work)
2669 {
2670 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2671 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2672 	int err;
2673 
2674 	if (pf->ptp.state != ICE_PTP_READY)
2675 		return;
2676 
2677 	err = ice_ptp_update_cached_phctime(pf);
2678 
2679 	ice_ptp_maybe_trigger_tx_interrupt(pf);
2680 
2681 	/* Run twice a second or reschedule if phc update failed */
2682 	kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2683 				   msecs_to_jiffies(err ? 10 : 500));
2684 }
2685 
2686 /**
2687  * ice_ptp_prepare_for_reset - Prepare PTP for reset
2688  * @pf: Board private structure
2689  * @reset_type: the reset type being performed
2690  */
2691 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2692 {
2693 	struct ice_ptp *ptp = &pf->ptp;
2694 	u8 src_tmr;
2695 
2696 	if (ptp->state != ICE_PTP_READY)
2697 		return;
2698 
2699 	ptp->state = ICE_PTP_RESETTING;
2700 
2701 	/* Disable timestamping for both Tx and Rx */
2702 	ice_ptp_disable_timestamp_mode(pf);
2703 
2704 	kthread_cancel_delayed_work_sync(&ptp->work);
2705 
2706 	if (reset_type == ICE_RESET_PFR)
2707 		return;
2708 
2709 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2710 
2711 	/* Disable periodic outputs */
2712 	ice_ptp_disable_all_clkout(pf);
2713 
2714 	src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2715 
2716 	/* Disable source clock */
2717 	wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2718 
2719 	/* Acquire PHC and system timer to restore after reset */
2720 	ptp->reset_time = ktime_get_real_ns();
2721 }
2722 
2723 /**
2724  * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2725  * @pf: Board private structure
2726  *
2727  * Companion function for ice_ptp_rebuild() which handles tasks that only the
2728  * PTP clock owner instance should perform.
2729  */
2730 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2731 {
2732 	struct ice_ptp *ptp = &pf->ptp;
2733 	struct ice_hw *hw = &pf->hw;
2734 	struct timespec64 ts;
2735 	u64 time_diff;
2736 	int err;
2737 
2738 	err = ice_ptp_init_phc(hw);
2739 	if (err)
2740 		return err;
2741 
2742 	/* Acquire the global hardware lock */
2743 	if (!ice_ptp_lock(hw)) {
2744 		err = -EBUSY;
2745 		return err;
2746 	}
2747 
2748 	/* Write the increment time value to PHY and LAN */
2749 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2750 	if (err) {
2751 		ice_ptp_unlock(hw);
2752 		return err;
2753 	}
2754 
2755 	/* Write the initial Time value to PHY and LAN using the cached PHC
2756 	 * time before the reset and time difference between stopping and
2757 	 * starting the clock.
2758 	 */
2759 	if (ptp->cached_phc_time) {
2760 		time_diff = ktime_get_real_ns() - ptp->reset_time;
2761 		ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2762 	} else {
2763 		ts = ktime_to_timespec64(ktime_get_real());
2764 	}
2765 	err = ice_ptp_write_init(pf, &ts);
2766 	if (err) {
2767 		ice_ptp_unlock(hw);
2768 		return err;
2769 	}
2770 
2771 	/* Release the global hardware lock */
2772 	ice_ptp_unlock(hw);
2773 
2774 	/* Flush software tracking of any outstanding timestamps since we're
2775 	 * about to flush the PHY timestamp block.
2776 	 */
2777 	ice_ptp_flush_all_tx_tracker(pf);
2778 
2779 	if (!ice_is_e810(hw)) {
2780 		/* Enable quad interrupts */
2781 		err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
2782 		if (err)
2783 			return err;
2784 
2785 		ice_ptp_restart_all_phy(pf);
2786 	}
2787 
2788 	/* Re-enable all periodic outputs and external timestamp events */
2789 	ice_ptp_enable_all_clkout(pf);
2790 	ice_ptp_enable_all_extts(pf);
2791 
2792 	return 0;
2793 }
2794 
2795 /**
2796  * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
2797  * @pf: Board private structure
2798  * @reset_type: the reset type being performed
2799  */
2800 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
2801 {
2802 	struct ice_ptp *ptp = &pf->ptp;
2803 	int err;
2804 
2805 	if (ptp->state == ICE_PTP_READY) {
2806 		ice_ptp_prepare_for_reset(pf, reset_type);
2807 	} else if (ptp->state != ICE_PTP_RESETTING) {
2808 		err = -EINVAL;
2809 		dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
2810 		goto err;
2811 	}
2812 
2813 	if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
2814 		err = ice_ptp_rebuild_owner(pf);
2815 		if (err)
2816 			goto err;
2817 	}
2818 
2819 	ptp->state = ICE_PTP_READY;
2820 
2821 	/* Start periodic work going */
2822 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2823 
2824 	dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2825 	return;
2826 
2827 err:
2828 	ptp->state = ICE_PTP_ERROR;
2829 	dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2830 }
2831 
2832 /**
2833  * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
2834  * @aux_dev: auxiliary device to get the auxiliary PF for
2835  */
2836 static struct ice_pf *
2837 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
2838 {
2839 	struct ice_ptp_port *aux_port;
2840 	struct ice_ptp *aux_ptp;
2841 
2842 	aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
2843 	aux_ptp = container_of(aux_port, struct ice_ptp, port);
2844 
2845 	return container_of(aux_ptp, struct ice_pf, ptp);
2846 }
2847 
2848 /**
2849  * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
2850  * @aux_dev: auxiliary device to get the PF for
2851  */
2852 static struct ice_pf *
2853 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
2854 {
2855 	struct ice_ptp_port_owner *ports_owner;
2856 	struct auxiliary_driver *aux_drv;
2857 	struct ice_ptp *owner_ptp;
2858 
2859 	if (!aux_dev->dev.driver)
2860 		return NULL;
2861 
2862 	aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
2863 	ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
2864 				   aux_driver);
2865 	owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
2866 	return container_of(owner_ptp, struct ice_pf, ptp);
2867 }
2868 
2869 /**
2870  * ice_ptp_auxbus_probe - Probe auxiliary devices
2871  * @aux_dev: PF's auxiliary device
2872  * @id: Auxiliary device ID
2873  */
2874 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
2875 				const struct auxiliary_device_id *id)
2876 {
2877 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2878 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2879 
2880 	if (WARN_ON(!owner_pf))
2881 		return -ENODEV;
2882 
2883 	INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
2884 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
2885 	list_add(&aux_pf->ptp.port.list_member,
2886 		 &owner_pf->ptp.ports_owner.ports);
2887 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2888 
2889 	return 0;
2890 }
2891 
2892 /**
2893  * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
2894  * @aux_dev: PF's auxiliary device
2895  */
2896 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
2897 {
2898 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2899 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2900 
2901 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
2902 	list_del(&aux_pf->ptp.port.list_member);
2903 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2904 }
2905 
2906 /**
2907  * ice_ptp_auxbus_shutdown
2908  * @aux_dev: PF's auxiliary device
2909  */
2910 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
2911 {
2912 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2913 }
2914 
2915 /**
2916  * ice_ptp_auxbus_suspend
2917  * @aux_dev: PF's auxiliary device
2918  * @state: power management state indicator
2919  */
2920 static int
2921 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
2922 {
2923 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2924 	return 0;
2925 }
2926 
2927 /**
2928  * ice_ptp_auxbus_resume
2929  * @aux_dev: PF's auxiliary device
2930  */
2931 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
2932 {
2933 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2934 	return 0;
2935 }
2936 
2937 /**
2938  * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
2939  * @pf: Board private structure
2940  * @name: auxiliary bus driver name
2941  */
2942 static struct auxiliary_device_id *
2943 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
2944 {
2945 	struct auxiliary_device_id *ids;
2946 
2947 	/* Second id left empty to terminate the array */
2948 	ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
2949 			   sizeof(struct auxiliary_device_id), GFP_KERNEL);
2950 	if (!ids)
2951 		return NULL;
2952 
2953 	snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
2954 
2955 	return ids;
2956 }
2957 
2958 /**
2959  * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
2960  * @pf: Board private structure
2961  */
2962 static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
2963 {
2964 	struct auxiliary_driver *aux_driver;
2965 	struct ice_ptp *ptp;
2966 	struct device *dev;
2967 	char *name;
2968 	int err;
2969 
2970 	ptp = &pf->ptp;
2971 	dev = ice_pf_to_dev(pf);
2972 	aux_driver = &ptp->ports_owner.aux_driver;
2973 	INIT_LIST_HEAD(&ptp->ports_owner.ports);
2974 	mutex_init(&ptp->ports_owner.lock);
2975 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
2976 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
2977 			      ice_get_ptp_src_clock_index(&pf->hw));
2978 	if (!name)
2979 		return -ENOMEM;
2980 
2981 	aux_driver->name = name;
2982 	aux_driver->shutdown = ice_ptp_auxbus_shutdown;
2983 	aux_driver->suspend = ice_ptp_auxbus_suspend;
2984 	aux_driver->remove = ice_ptp_auxbus_remove;
2985 	aux_driver->resume = ice_ptp_auxbus_resume;
2986 	aux_driver->probe = ice_ptp_auxbus_probe;
2987 	aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
2988 	if (!aux_driver->id_table)
2989 		return -ENOMEM;
2990 
2991 	err = auxiliary_driver_register(aux_driver);
2992 	if (err) {
2993 		devm_kfree(dev, aux_driver->id_table);
2994 		dev_err(dev, "Failed registering aux_driver, name <%s>\n",
2995 			name);
2996 	}
2997 
2998 	return err;
2999 }
3000 
3001 /**
3002  * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
3003  * @pf: Board private structure
3004  */
3005 static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
3006 {
3007 	struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
3008 
3009 	auxiliary_driver_unregister(aux_driver);
3010 	devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
3011 
3012 	mutex_destroy(&pf->ptp.ports_owner.lock);
3013 }
3014 
3015 /**
3016  * ice_ptp_clock_index - Get the PTP clock index for this device
3017  * @pf: Board private structure
3018  *
3019  * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3020  * is associated.
3021  */
3022 int ice_ptp_clock_index(struct ice_pf *pf)
3023 {
3024 	struct auxiliary_device *aux_dev;
3025 	struct ice_pf *owner_pf;
3026 	struct ptp_clock *clock;
3027 
3028 	aux_dev = &pf->ptp.port.aux_dev;
3029 	owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
3030 	if (!owner_pf)
3031 		return -1;
3032 	clock = owner_pf->ptp.clock;
3033 
3034 	return clock ? ptp_clock_index(clock) : -1;
3035 }
3036 
3037 /**
3038  * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3039  * @pf: Board private structure
3040  *
3041  * Setup and initialize a PTP clock device that represents the device hardware
3042  * clock. Save the clock index for other functions connected to the same
3043  * hardware resource.
3044  */
3045 static int ice_ptp_init_owner(struct ice_pf *pf)
3046 {
3047 	struct ice_hw *hw = &pf->hw;
3048 	struct timespec64 ts;
3049 	int err;
3050 
3051 	err = ice_ptp_init_phc(hw);
3052 	if (err) {
3053 		dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3054 			err);
3055 		return err;
3056 	}
3057 
3058 	/* Acquire the global hardware lock */
3059 	if (!ice_ptp_lock(hw)) {
3060 		err = -EBUSY;
3061 		goto err_exit;
3062 	}
3063 
3064 	/* Write the increment time value to PHY and LAN */
3065 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3066 	if (err) {
3067 		ice_ptp_unlock(hw);
3068 		goto err_exit;
3069 	}
3070 
3071 	ts = ktime_to_timespec64(ktime_get_real());
3072 	/* Write the initial Time value to PHY and LAN */
3073 	err = ice_ptp_write_init(pf, &ts);
3074 	if (err) {
3075 		ice_ptp_unlock(hw);
3076 		goto err_exit;
3077 	}
3078 
3079 	/* Release the global hardware lock */
3080 	ice_ptp_unlock(hw);
3081 
3082 	if (!ice_is_e810(hw)) {
3083 		/* Enable quad interrupts */
3084 		err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3085 		if (err)
3086 			goto err_exit;
3087 	}
3088 
3089 	/* Ensure we have a clock device */
3090 	err = ice_ptp_create_clock(pf);
3091 	if (err)
3092 		goto err_clk;
3093 
3094 	err = ice_ptp_register_auxbus_driver(pf);
3095 	if (err) {
3096 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
3097 		goto err_aux;
3098 	}
3099 
3100 	return 0;
3101 err_aux:
3102 	ptp_clock_unregister(pf->ptp.clock);
3103 err_clk:
3104 	pf->ptp.clock = NULL;
3105 err_exit:
3106 	return err;
3107 }
3108 
3109 /**
3110  * ice_ptp_init_work - Initialize PTP work threads
3111  * @pf: Board private structure
3112  * @ptp: PF PTP structure
3113  */
3114 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3115 {
3116 	struct kthread_worker *kworker;
3117 
3118 	/* Initialize work functions */
3119 	kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3120 
3121 	/* Allocate a kworker for handling work required for the ports
3122 	 * connected to the PTP hardware clock.
3123 	 */
3124 	kworker = kthread_create_worker(0, "ice-ptp-%s",
3125 					dev_name(ice_pf_to_dev(pf)));
3126 	if (IS_ERR(kworker))
3127 		return PTR_ERR(kworker);
3128 
3129 	ptp->kworker = kworker;
3130 
3131 	/* Start periodic work going */
3132 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3133 
3134 	return 0;
3135 }
3136 
3137 /**
3138  * ice_ptp_init_port - Initialize PTP port structure
3139  * @pf: Board private structure
3140  * @ptp_port: PTP port structure
3141  */
3142 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3143 {
3144 	struct ice_hw *hw = &pf->hw;
3145 
3146 	mutex_init(&ptp_port->ps_lock);
3147 
3148 	switch (hw->phy_model) {
3149 	case ICE_PHY_E810:
3150 		return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
3151 	case ICE_PHY_E82X:
3152 		kthread_init_delayed_work(&ptp_port->ov_work,
3153 					  ice_ptp_wait_for_offsets);
3154 
3155 		return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3156 					    ptp_port->port_num);
3157 	default:
3158 		return -ENODEV;
3159 	}
3160 }
3161 
3162 /**
3163  * ice_ptp_release_auxbus_device
3164  * @dev: device that utilizes the auxbus
3165  */
3166 static void ice_ptp_release_auxbus_device(struct device *dev)
3167 {
3168 	/* Doing nothing here, but handle to auxbux device must be satisfied */
3169 }
3170 
3171 /**
3172  * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
3173  * @pf: Board private structure
3174  */
3175 static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
3176 {
3177 	struct auxiliary_device *aux_dev;
3178 	struct ice_ptp *ptp;
3179 	struct device *dev;
3180 	char *name;
3181 	int err;
3182 	u32 id;
3183 
3184 	ptp = &pf->ptp;
3185 	id = ptp->port.port_num;
3186 	dev = ice_pf_to_dev(pf);
3187 
3188 	aux_dev = &ptp->port.aux_dev;
3189 
3190 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
3191 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
3192 			      ice_get_ptp_src_clock_index(&pf->hw));
3193 	if (!name)
3194 		return -ENOMEM;
3195 
3196 	aux_dev->name = name;
3197 	aux_dev->id = id;
3198 	aux_dev->dev.release = ice_ptp_release_auxbus_device;
3199 	aux_dev->dev.parent = dev;
3200 
3201 	err = auxiliary_device_init(aux_dev);
3202 	if (err)
3203 		goto aux_err;
3204 
3205 	err = auxiliary_device_add(aux_dev);
3206 	if (err) {
3207 		auxiliary_device_uninit(aux_dev);
3208 		goto aux_err;
3209 	}
3210 
3211 	return 0;
3212 aux_err:
3213 	dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
3214 	devm_kfree(dev, name);
3215 	return err;
3216 }
3217 
3218 /**
3219  * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
3220  * @pf: Board private structure
3221  */
3222 static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
3223 {
3224 	struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
3225 
3226 	auxiliary_device_delete(aux_dev);
3227 	auxiliary_device_uninit(aux_dev);
3228 
3229 	memset(aux_dev, 0, sizeof(*aux_dev));
3230 }
3231 
3232 /**
3233  * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3234  * @pf: Board private structure
3235  *
3236  * Initialize the Tx timestamp interrupt mode for this device. For most device
3237  * types, each PF processes the interrupt and manages its own timestamps. For
3238  * E822-based devices, only the clock owner processes the timestamps. Other
3239  * PFs disable the interrupt and do not process their own timestamps.
3240  */
3241 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3242 {
3243 	switch (pf->hw.phy_model) {
3244 	case ICE_PHY_E82X:
3245 		/* E822 based PHY has the clock owner process the interrupt
3246 		 * for all ports.
3247 		 */
3248 		if (ice_pf_src_tmr_owned(pf))
3249 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3250 		else
3251 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3252 		break;
3253 	default:
3254 		/* other PHY types handle their own Tx interrupt */
3255 		pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3256 	}
3257 }
3258 
3259 /**
3260  * ice_ptp_init - Initialize PTP hardware clock support
3261  * @pf: Board private structure
3262  *
3263  * Set up the device for interacting with the PTP hardware clock for all
3264  * functions, both the function that owns the clock hardware, and the
3265  * functions connected to the clock hardware.
3266  *
3267  * The clock owner will allocate and register a ptp_clock with the
3268  * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3269  * items used for asynchronous work such as Tx timestamps and periodic work.
3270  */
3271 void ice_ptp_init(struct ice_pf *pf)
3272 {
3273 	struct ice_ptp *ptp = &pf->ptp;
3274 	struct ice_hw *hw = &pf->hw;
3275 	int err;
3276 
3277 	ptp->state = ICE_PTP_INITIALIZING;
3278 
3279 	ice_ptp_init_phy_model(hw);
3280 
3281 	ice_ptp_init_tx_interrupt_mode(pf);
3282 
3283 	/* If this function owns the clock hardware, it must allocate and
3284 	 * configure the PTP clock device to represent it.
3285 	 */
3286 	if (ice_pf_src_tmr_owned(pf)) {
3287 		err = ice_ptp_init_owner(pf);
3288 		if (err)
3289 			goto err;
3290 	}
3291 
3292 	ptp->port.port_num = hw->pf_id;
3293 	err = ice_ptp_init_port(pf, &ptp->port);
3294 	if (err)
3295 		goto err;
3296 
3297 	/* Start the PHY timestamping block */
3298 	ice_ptp_reset_phy_timestamping(pf);
3299 
3300 	/* Configure initial Tx interrupt settings */
3301 	ice_ptp_cfg_tx_interrupt(pf);
3302 
3303 	err = ice_ptp_create_auxbus_device(pf);
3304 	if (err)
3305 		goto err;
3306 
3307 	ptp->state = ICE_PTP_READY;
3308 
3309 	err = ice_ptp_init_work(pf, ptp);
3310 	if (err)
3311 		goto err;
3312 
3313 	dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3314 	return;
3315 
3316 err:
3317 	/* If we registered a PTP clock, release it */
3318 	if (pf->ptp.clock) {
3319 		ptp_clock_unregister(ptp->clock);
3320 		pf->ptp.clock = NULL;
3321 	}
3322 	ptp->state = ICE_PTP_ERROR;
3323 	dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3324 }
3325 
3326 /**
3327  * ice_ptp_release - Disable the driver/HW support and unregister the clock
3328  * @pf: Board private structure
3329  *
3330  * This function handles the cleanup work required from the initialization by
3331  * clearing out the important information and unregistering the clock
3332  */
3333 void ice_ptp_release(struct ice_pf *pf)
3334 {
3335 	if (pf->ptp.state != ICE_PTP_READY)
3336 		return;
3337 
3338 	pf->ptp.state = ICE_PTP_UNINIT;
3339 
3340 	/* Disable timestamping for both Tx and Rx */
3341 	ice_ptp_disable_timestamp_mode(pf);
3342 
3343 	ice_ptp_remove_auxbus_device(pf);
3344 
3345 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3346 
3347 	ice_ptp_disable_all_extts(pf);
3348 
3349 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
3350 
3351 	ice_ptp_port_phy_stop(&pf->ptp.port);
3352 	mutex_destroy(&pf->ptp.port.ps_lock);
3353 	if (pf->ptp.kworker) {
3354 		kthread_destroy_worker(pf->ptp.kworker);
3355 		pf->ptp.kworker = NULL;
3356 	}
3357 
3358 	if (ice_pf_src_tmr_owned(pf))
3359 		ice_ptp_unregister_auxbus_driver(pf);
3360 
3361 	if (!pf->ptp.clock)
3362 		return;
3363 
3364 	/* Disable periodic outputs */
3365 	ice_ptp_disable_all_clkout(pf);
3366 
3367 	ptp_clock_unregister(pf->ptp.clock);
3368 	pf->ptp.clock = NULL;
3369 
3370 	dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3371 }
3372