xref: /linux/drivers/net/ethernet/intel/ice/ice_ptp.c (revision a634dda26186cf9a51567020fcce52bcba5e1e59)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 #include "ice_cgu_regs.h"
8 
9 static const char ice_pin_names[][64] = {
10 	"SDP0",
11 	"SDP1",
12 	"SDP2",
13 	"SDP3",
14 	"TIME_SYNC",
15 	"1PPS"
16 };
17 
18 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
19 	/* name,        gpio */
20 	{  TIME_SYNC, {  4, -1 }},
21 	{  ONE_PPS,   { -1,  5 }},
22 };
23 
24 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
25 	/* name,        gpio */
26 	{  SDP0,      {  0,  0 }},
27 	{  SDP1,      {  1,  1 }},
28 	{  SDP2,      {  2,  2 }},
29 	{  SDP3,      {  3,  3 }},
30 	{  TIME_SYNC, {  4, -1 }},
31 	{  ONE_PPS,   { -1,  5 }},
32 };
33 
34 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
35 	/* name,      gpio */
36 	{  SDP0,    {  0, 0 }},
37 	{  SDP1,    {  1, 1 }},
38 	{  SDP2,    {  2, 2 }},
39 	{  SDP3,    {  3, 3 }},
40 	{  ONE_PPS, { -1, 5 }},
41 };
42 
43 static const char ice_pin_names_nvm[][64] = {
44 	"GNSS",
45 	"SMA1",
46 	"U.FL1",
47 	"SMA2",
48 	"U.FL2",
49 };
50 
51 static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
52 	/* name,   gpio */
53 	{  GNSS, {  1, -1 }},
54 	{  SMA1, {  1,  0 }},
55 	{  UFL1, { -1,  0 }},
56 	{  SMA2, {  3,  2 }},
57 	{  UFL2, {  3, -1 }},
58 };
59 
60 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
61 {
62 	return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
63 }
64 
65 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
66 {
67 	struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
68 
69 	return !ctrl_pf ? NULL : &ctrl_pf->ptp;
70 }
71 
72 /**
73  * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc
74  * @pf: Board private structure
75  * @func: Pin function
76  * @chan: GPIO channel
77  *
78  * Return: positive pin number when pin is present, -1 otherwise
79  */
80 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
81 				unsigned int chan)
82 {
83 	const struct ptp_clock_info *info = &pf->ptp.info;
84 	int i;
85 
86 	for (i = 0; i < info->n_pins; i++) {
87 		if (info->pin_config[i].func == func &&
88 		    info->pin_config[i].chan == chan)
89 			return i;
90 	}
91 
92 	return -1;
93 }
94 
95 /**
96  * ice_ptp_update_sma_data - update SMA pins data according to pins setup
97  * @pf: Board private structure
98  * @sma_pins: parsed SMA pins status
99  * @data: SMA data to update
100  */
101 static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[],
102 				    u8 *data)
103 {
104 	const char *state1, *state2;
105 
106 	/* Set the right state based on the desired configuration.
107 	 * When bit is set, functionality is disabled.
108 	 */
109 	*data &= ~ICE_ALL_SMA_MASK;
110 	if (!sma_pins[UFL1 - 1]) {
111 		if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) {
112 			state1 = "SMA1 Rx, U.FL1 disabled";
113 			*data |= ICE_SMA1_TX_EN;
114 		} else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) {
115 			state1 = "SMA1 Tx U.FL1 disabled";
116 			*data |= ICE_SMA1_DIR_EN;
117 		} else {
118 			state1 = "SMA1 disabled, U.FL1 disabled";
119 			*data |= ICE_SMA1_MASK;
120 		}
121 	} else {
122 		/* U.FL1 Tx will always enable SMA1 Rx */
123 		state1 = "SMA1 Rx, U.FL1 Tx";
124 	}
125 
126 	if (!sma_pins[UFL2 - 1]) {
127 		if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) {
128 			state2 = "SMA2 Rx, U.FL2 disabled";
129 			*data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS;
130 		} else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) {
131 			state2 = "SMA2 Tx, U.FL2 disabled";
132 			*data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS;
133 		} else {
134 			state2 = "SMA2 disabled, U.FL2 disabled";
135 			*data |= ICE_SMA2_MASK;
136 		}
137 	} else {
138 		if (!sma_pins[SMA2 - 1]) {
139 			state2 = "SMA2 disabled, U.FL2 Rx";
140 			*data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN;
141 		} else {
142 			state2 = "SMA2 Tx, U.FL2 Rx";
143 			*data |= ICE_SMA2_DIR_EN;
144 		}
145 	}
146 
147 	dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2);
148 }
149 
150 /**
151  * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic
152  * @pf: Board private structure
153  *
154  * Return: 0 on success, negative error code otherwise
155  */
156 static int ice_ptp_set_sma_cfg(struct ice_pf *pf)
157 {
158 	const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc;
159 	struct ptp_pin_desc *pins = pf->ptp.pin_desc;
160 	unsigned int sma_pins[ICE_SMA_PINS_NUM] = {};
161 	int err;
162 	u8 data;
163 
164 	/* Read initial pin state value */
165 	err = ice_read_sma_ctrl(&pf->hw, &data);
166 	if (err)
167 		return err;
168 
169 	/* Get SMA/U.FL pins states */
170 	for (int i = 0; i < pf->ptp.info.n_pins; i++)
171 		if (pins[i].func) {
172 			int name_idx = ice_pins[i].name_idx;
173 
174 			switch (name_idx) {
175 			case SMA1:
176 			case UFL1:
177 			case SMA2:
178 			case UFL2:
179 				sma_pins[name_idx - 1] = pins[i].func;
180 				break;
181 			default:
182 				continue;
183 			}
184 		}
185 
186 	ice_ptp_update_sma_data(pf, sma_pins, &data);
187 	return ice_write_sma_ctrl(&pf->hw, data);
188 }
189 
190 /**
191  * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
192  * @pf: Board private structure
193  *
194  * Program the device to respond appropriately to the Tx timestamp interrupt
195  * cause.
196  */
197 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
198 {
199 	struct ice_hw *hw = &pf->hw;
200 	bool enable;
201 	u32 val;
202 
203 	switch (pf->ptp.tx_interrupt_mode) {
204 	case ICE_PTP_TX_INTERRUPT_ALL:
205 		/* React to interrupts across all quads. */
206 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
207 		enable = true;
208 		break;
209 	case ICE_PTP_TX_INTERRUPT_NONE:
210 		/* Do not react to interrupts on any quad. */
211 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
212 		enable = false;
213 		break;
214 	case ICE_PTP_TX_INTERRUPT_SELF:
215 	default:
216 		enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
217 		break;
218 	}
219 
220 	/* Configure the Tx timestamp interrupt */
221 	val = rd32(hw, PFINT_OICR_ENA);
222 	if (enable)
223 		val |= PFINT_OICR_TSYN_TX_M;
224 	else
225 		val &= ~PFINT_OICR_TSYN_TX_M;
226 	wr32(hw, PFINT_OICR_ENA, val);
227 }
228 
229 /**
230  * ice_set_rx_tstamp - Enable or disable Rx timestamping
231  * @pf: The PF pointer to search in
232  * @on: bool value for whether timestamps are enabled or disabled
233  */
234 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
235 {
236 	struct ice_vsi *vsi;
237 	u16 i;
238 
239 	vsi = ice_get_main_vsi(pf);
240 	if (!vsi || !vsi->rx_rings)
241 		return;
242 
243 	/* Set the timestamp flag for all the Rx rings */
244 	ice_for_each_rxq(vsi, i) {
245 		if (!vsi->rx_rings[i])
246 			continue;
247 		vsi->rx_rings[i]->ptp_rx = on;
248 	}
249 }
250 
251 /**
252  * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
253  * @pf: Board private structure
254  *
255  * Called during preparation for reset to temporarily disable timestamping on
256  * the device. Called during remove to disable timestamping while cleaning up
257  * driver resources.
258  */
259 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
260 {
261 	struct ice_hw *hw = &pf->hw;
262 	u32 val;
263 
264 	val = rd32(hw, PFINT_OICR_ENA);
265 	val &= ~PFINT_OICR_TSYN_TX_M;
266 	wr32(hw, PFINT_OICR_ENA, val);
267 
268 	ice_set_rx_tstamp(pf, false);
269 }
270 
271 /**
272  * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
273  * @pf: Board private structure
274  *
275  * Called at the end of rebuild to restore timestamp configuration after
276  * a device reset.
277  */
278 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
279 {
280 	struct ice_hw *hw = &pf->hw;
281 	bool enable_rx;
282 
283 	ice_ptp_cfg_tx_interrupt(pf);
284 
285 	enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
286 	ice_set_rx_tstamp(pf, enable_rx);
287 
288 	/* Trigger an immediate software interrupt to ensure that timestamps
289 	 * which occurred during reset are handled now.
290 	 */
291 	wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
292 	ice_flush(hw);
293 }
294 
295 /**
296  * ice_ptp_read_src_clk_reg - Read the source clock register
297  * @pf: Board private structure
298  * @sts: Optional parameter for holding a pair of system timestamps from
299  *       the system clock. Will be ignored if NULL is given.
300  */
301 static u64
302 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
303 {
304 	struct ice_hw *hw = &pf->hw;
305 	u32 hi, lo, lo2;
306 	u8 tmr_idx;
307 
308 	tmr_idx = ice_get_ptp_src_clock_index(hw);
309 	guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
310 	/* Read the system timestamp pre PHC read */
311 	ptp_read_system_prets(sts);
312 
313 	lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
314 
315 	/* Read the system timestamp post PHC read */
316 	ptp_read_system_postts(sts);
317 
318 	hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
319 	lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
320 
321 	if (lo2 < lo) {
322 		/* if TIME_L rolled over read TIME_L again and update
323 		 * system timestamps
324 		 */
325 		ptp_read_system_prets(sts);
326 		lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
327 		ptp_read_system_postts(sts);
328 		hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
329 	}
330 
331 	return ((u64)hi << 32) | lo;
332 }
333 
334 /**
335  * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
336  * @cached_phc_time: recently cached copy of PHC time
337  * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
338  *
339  * Hardware captures timestamps which contain only 32 bits of nominal
340  * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
341  * Note that the captured timestamp values may be 40 bits, but the lower
342  * 8 bits are sub-nanoseconds and generally discarded.
343  *
344  * Extend the 32bit nanosecond timestamp using the following algorithm and
345  * assumptions:
346  *
347  * 1) have a recently cached copy of the PHC time
348  * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
349  *    seconds) before or after the PHC time was captured.
350  * 3) calculate the delta between the cached time and the timestamp
351  * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
352  *    captured after the PHC time. In this case, the full timestamp is just
353  *    the cached PHC time plus the delta.
354  * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
355  *    timestamp was captured *before* the PHC time, i.e. because the PHC
356  *    cache was updated after the timestamp was captured by hardware. In this
357  *    case, the full timestamp is the cached time minus the inverse delta.
358  *
359  * This algorithm works even if the PHC time was updated after a Tx timestamp
360  * was requested, but before the Tx timestamp event was reported from
361  * hardware.
362  *
363  * This calculation primarily relies on keeping the cached PHC time up to
364  * date. If the timestamp was captured more than 2^31 nanoseconds after the
365  * PHC time, it is possible that the lower 32bits of PHC time have
366  * overflowed more than once, and we might generate an incorrect timestamp.
367  *
368  * This is prevented by (a) periodically updating the cached PHC time once
369  * a second, and (b) discarding any Tx timestamp packet if it has waited for
370  * a timestamp for more than one second.
371  */
372 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
373 {
374 	u32 delta, phc_time_lo;
375 	u64 ns;
376 
377 	/* Extract the lower 32 bits of the PHC time */
378 	phc_time_lo = (u32)cached_phc_time;
379 
380 	/* Calculate the delta between the lower 32bits of the cached PHC
381 	 * time and the in_tstamp value
382 	 */
383 	delta = (in_tstamp - phc_time_lo);
384 
385 	/* Do not assume that the in_tstamp is always more recent than the
386 	 * cached PHC time. If the delta is large, it indicates that the
387 	 * in_tstamp was taken in the past, and should be converted
388 	 * forward.
389 	 */
390 	if (delta > (U32_MAX / 2)) {
391 		/* reverse the delta calculation here */
392 		delta = (phc_time_lo - in_tstamp);
393 		ns = cached_phc_time - delta;
394 	} else {
395 		ns = cached_phc_time + delta;
396 	}
397 
398 	return ns;
399 }
400 
401 /**
402  * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
403  * @pf: Board private structure
404  * @in_tstamp: Ingress/egress 40b timestamp value
405  *
406  * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
407  * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
408  *
409  *  *--------------------------------------------------------------*
410  *  | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
411  *  *--------------------------------------------------------------*
412  *
413  * The low bit is an indicator of whether the timestamp is valid. The next
414  * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
415  * and the remaining 32 bits are the lower 32 bits of the PHC timer.
416  *
417  * It is assumed that the caller verifies the timestamp is valid prior to
418  * calling this function.
419  *
420  * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
421  * time stored in the device private PTP structure as the basis for timestamp
422  * extension.
423  *
424  * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
425  * algorithm.
426  */
427 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
428 {
429 	const u64 mask = GENMASK_ULL(31, 0);
430 	unsigned long discard_time;
431 
432 	/* Discard the hardware timestamp if the cached PHC time is too old */
433 	discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
434 	if (time_is_before_jiffies(discard_time)) {
435 		pf->ptp.tx_hwtstamp_discarded++;
436 		return 0;
437 	}
438 
439 	return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
440 				     (in_tstamp >> 8) & mask);
441 }
442 
443 /**
444  * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
445  * @tx: the PTP Tx timestamp tracker to check
446  *
447  * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
448  * to accept new timestamp requests.
449  *
450  * Assumes the tx->lock spinlock is already held.
451  */
452 static bool
453 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
454 {
455 	lockdep_assert_held(&tx->lock);
456 
457 	return tx->init && !tx->calibrating;
458 }
459 
460 /**
461  * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
462  * @tx: the PTP Tx timestamp tracker
463  * @idx: index of the timestamp to request
464  */
465 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
466 {
467 	struct ice_ptp_port *ptp_port;
468 	struct sk_buff *skb;
469 	struct ice_pf *pf;
470 
471 	if (!tx->init)
472 		return;
473 
474 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
475 	pf = ptp_port_to_pf(ptp_port);
476 
477 	/* Drop packets which have waited for more than 2 seconds */
478 	if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
479 		/* Count the number of Tx timestamps that timed out */
480 		pf->ptp.tx_hwtstamp_timeouts++;
481 
482 		skb = tx->tstamps[idx].skb;
483 		tx->tstamps[idx].skb = NULL;
484 		clear_bit(idx, tx->in_use);
485 
486 		dev_kfree_skb_any(skb);
487 		return;
488 	}
489 
490 	ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
491 
492 	/* Write TS index to read to the PF register so the FW can read it */
493 	wr32(&pf->hw, PF_SB_ATQBAL,
494 	     TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
495 	     TS_LL_READ_TS);
496 	tx->last_ll_ts_idx_read = idx;
497 }
498 
499 /**
500  * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
501  * @tx: the PTP Tx timestamp tracker
502  */
503 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
504 {
505 	struct skb_shared_hwtstamps shhwtstamps = {};
506 	u8 idx = tx->last_ll_ts_idx_read;
507 	struct ice_ptp_port *ptp_port;
508 	u64 raw_tstamp, tstamp;
509 	bool drop_ts = false;
510 	struct sk_buff *skb;
511 	struct ice_pf *pf;
512 	u32 val;
513 
514 	if (!tx->init || tx->last_ll_ts_idx_read < 0)
515 		return;
516 
517 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
518 	pf = ptp_port_to_pf(ptp_port);
519 
520 	ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
521 
522 	val = rd32(&pf->hw, PF_SB_ATQBAL);
523 
524 	/* When the bit is cleared, the TS is ready in the register */
525 	if (val & TS_LL_READ_TS) {
526 		dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
527 		return;
528 	}
529 
530 	/* High 8 bit value of the TS is on the bits 16:23 */
531 	raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
532 	raw_tstamp <<= 32;
533 
534 	/* Read the low 32 bit value */
535 	raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
536 
537 	/* Devices using this interface always verify the timestamp differs
538 	 * relative to the last cached timestamp value.
539 	 */
540 	if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
541 		return;
542 
543 	tx->tstamps[idx].cached_tstamp = raw_tstamp;
544 	clear_bit(idx, tx->in_use);
545 	skb = tx->tstamps[idx].skb;
546 	tx->tstamps[idx].skb = NULL;
547 	if (test_and_clear_bit(idx, tx->stale))
548 		drop_ts = true;
549 
550 	if (!skb)
551 		return;
552 
553 	if (drop_ts) {
554 		dev_kfree_skb_any(skb);
555 		return;
556 	}
557 
558 	/* Extend the timestamp using cached PHC time */
559 	tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
560 	if (tstamp) {
561 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
562 		ice_trace(tx_tstamp_complete, skb, idx);
563 	}
564 
565 	skb_tstamp_tx(skb, &shhwtstamps);
566 	dev_kfree_skb_any(skb);
567 }
568 
569 /**
570  * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
571  * @tx: the PTP Tx timestamp tracker
572  *
573  * Process timestamps captured by the PHY associated with this port. To do
574  * this, loop over each index with a waiting skb.
575  *
576  * If a given index has a valid timestamp, perform the following steps:
577  *
578  * 1) check that the timestamp request is not stale
579  * 2) check that a timestamp is ready and available in the PHY memory bank
580  * 3) read and copy the timestamp out of the PHY register
581  * 4) unlock the index by clearing the associated in_use bit
582  * 5) check if the timestamp is stale, and discard if so
583  * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
584  * 7) send this 64 bit timestamp to the stack
585  *
586  * Note that we do not hold the tracking lock while reading the Tx timestamp.
587  * This is because reading the timestamp requires taking a mutex that might
588  * sleep.
589  *
590  * The only place where we set in_use is when a new timestamp is initiated
591  * with a slot index. This is only called in the hard xmit routine where an
592  * SKB has a request flag set. The only places where we clear this bit is this
593  * function, or during teardown when the Tx timestamp tracker is being
594  * removed. A timestamp index will never be re-used until the in_use bit for
595  * that index is cleared.
596  *
597  * If a Tx thread starts a new timestamp, we might not begin processing it
598  * right away but we will notice it at the end when we re-queue the task.
599  *
600  * If a Tx thread starts a new timestamp just after this function exits, the
601  * interrupt for that timestamp should re-trigger this function once
602  * a timestamp is ready.
603  *
604  * In cases where the PTP hardware clock was directly adjusted, some
605  * timestamps may not be able to safely use the timestamp extension math. In
606  * this case, software will set the stale bit for any outstanding Tx
607  * timestamps when the clock is adjusted. Then this function will discard
608  * those captured timestamps instead of sending them to the stack.
609  *
610  * If a Tx packet has been waiting for more than 2 seconds, it is not possible
611  * to correctly extend the timestamp using the cached PHC time. It is
612  * extremely unlikely that a packet will ever take this long to timestamp. If
613  * we detect a Tx timestamp request that has waited for this long we assume
614  * the packet will never be sent by hardware and discard it without reading
615  * the timestamp register.
616  */
617 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
618 {
619 	struct ice_ptp_port *ptp_port;
620 	unsigned long flags;
621 	struct ice_pf *pf;
622 	struct ice_hw *hw;
623 	u64 tstamp_ready;
624 	bool link_up;
625 	int err;
626 	u8 idx;
627 
628 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
629 	pf = ptp_port_to_pf(ptp_port);
630 	hw = &pf->hw;
631 
632 	/* Read the Tx ready status first */
633 	if (tx->has_ready_bitmap) {
634 		err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
635 		if (err)
636 			return;
637 	}
638 
639 	/* Drop packets if the link went down */
640 	link_up = ptp_port->link_up;
641 
642 	for_each_set_bit(idx, tx->in_use, tx->len) {
643 		struct skb_shared_hwtstamps shhwtstamps = {};
644 		u8 phy_idx = idx + tx->offset;
645 		u64 raw_tstamp = 0, tstamp;
646 		bool drop_ts = !link_up;
647 		struct sk_buff *skb;
648 
649 		/* Drop packets which have waited for more than 2 seconds */
650 		if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
651 			drop_ts = true;
652 
653 			/* Count the number of Tx timestamps that timed out */
654 			pf->ptp.tx_hwtstamp_timeouts++;
655 		}
656 
657 		/* Only read a timestamp from the PHY if its marked as ready
658 		 * by the tstamp_ready register. This avoids unnecessary
659 		 * reading of timestamps which are not yet valid. This is
660 		 * important as we must read all timestamps which are valid
661 		 * and only timestamps which are valid during each interrupt.
662 		 * If we do not, the hardware logic for generating a new
663 		 * interrupt can get stuck on some devices.
664 		 */
665 		if (tx->has_ready_bitmap &&
666 		    !(tstamp_ready & BIT_ULL(phy_idx))) {
667 			if (drop_ts)
668 				goto skip_ts_read;
669 
670 			continue;
671 		}
672 
673 		ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
674 
675 		err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
676 		if (err && !drop_ts)
677 			continue;
678 
679 		ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
680 
681 		/* For PHYs which don't implement a proper timestamp ready
682 		 * bitmap, verify that the timestamp value is different
683 		 * from the last cached timestamp. If it is not, skip this for
684 		 * now assuming it hasn't yet been captured by hardware.
685 		 */
686 		if (!drop_ts && !tx->has_ready_bitmap &&
687 		    raw_tstamp == tx->tstamps[idx].cached_tstamp)
688 			continue;
689 
690 		/* Discard any timestamp value without the valid bit set */
691 		if (!(raw_tstamp & ICE_PTP_TS_VALID))
692 			drop_ts = true;
693 
694 skip_ts_read:
695 		spin_lock_irqsave(&tx->lock, flags);
696 		if (!tx->has_ready_bitmap && raw_tstamp)
697 			tx->tstamps[idx].cached_tstamp = raw_tstamp;
698 		clear_bit(idx, tx->in_use);
699 		skb = tx->tstamps[idx].skb;
700 		tx->tstamps[idx].skb = NULL;
701 		if (test_and_clear_bit(idx, tx->stale))
702 			drop_ts = true;
703 		spin_unlock_irqrestore(&tx->lock, flags);
704 
705 		/* It is unlikely but possible that the SKB will have been
706 		 * flushed at this point due to link change or teardown.
707 		 */
708 		if (!skb)
709 			continue;
710 
711 		if (drop_ts) {
712 			dev_kfree_skb_any(skb);
713 			continue;
714 		}
715 
716 		/* Extend the timestamp using cached PHC time */
717 		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
718 		if (tstamp) {
719 			shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
720 			ice_trace(tx_tstamp_complete, skb, idx);
721 		}
722 
723 		skb_tstamp_tx(skb, &shhwtstamps);
724 		dev_kfree_skb_any(skb);
725 	}
726 }
727 
728 /**
729  * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
730  * @pf: Board private structure
731  */
732 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
733 {
734 	struct ice_ptp_port *port;
735 	unsigned int i;
736 
737 	mutex_lock(&pf->adapter->ports.lock);
738 	list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
739 		struct ice_ptp_tx *tx = &port->tx;
740 
741 		if (!tx || !tx->init)
742 			continue;
743 
744 		ice_ptp_process_tx_tstamp(tx);
745 	}
746 	mutex_unlock(&pf->adapter->ports.lock);
747 
748 	for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
749 		u64 tstamp_ready;
750 		int err;
751 
752 		/* Read the Tx ready status first */
753 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
754 		if (err)
755 			break;
756 		else if (tstamp_ready)
757 			return ICE_TX_TSTAMP_WORK_PENDING;
758 	}
759 
760 	return ICE_TX_TSTAMP_WORK_DONE;
761 }
762 
763 /**
764  * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
765  * @tx: Tx tracking structure to initialize
766  *
767  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
768  * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
769  */
770 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
771 {
772 	bool more_timestamps;
773 	unsigned long flags;
774 
775 	if (!tx->init)
776 		return ICE_TX_TSTAMP_WORK_DONE;
777 
778 	/* Process the Tx timestamp tracker */
779 	ice_ptp_process_tx_tstamp(tx);
780 
781 	/* Check if there are outstanding Tx timestamps */
782 	spin_lock_irqsave(&tx->lock, flags);
783 	more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
784 	spin_unlock_irqrestore(&tx->lock, flags);
785 
786 	if (more_timestamps)
787 		return ICE_TX_TSTAMP_WORK_PENDING;
788 
789 	return ICE_TX_TSTAMP_WORK_DONE;
790 }
791 
792 /**
793  * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
794  * @tx: Tx tracking structure to initialize
795  *
796  * Assumes that the length has already been initialized. Do not call directly,
797  * use the ice_ptp_init_tx_* instead.
798  */
799 static int
800 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
801 {
802 	unsigned long *in_use, *stale;
803 	struct ice_tx_tstamp *tstamps;
804 
805 	tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
806 	in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
807 	stale = bitmap_zalloc(tx->len, GFP_KERNEL);
808 
809 	if (!tstamps || !in_use || !stale) {
810 		kfree(tstamps);
811 		bitmap_free(in_use);
812 		bitmap_free(stale);
813 
814 		return -ENOMEM;
815 	}
816 
817 	tx->tstamps = tstamps;
818 	tx->in_use = in_use;
819 	tx->stale = stale;
820 	tx->init = 1;
821 	tx->last_ll_ts_idx_read = -1;
822 
823 	spin_lock_init(&tx->lock);
824 
825 	return 0;
826 }
827 
828 /**
829  * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
830  * @pf: Board private structure
831  * @tx: the tracker to flush
832  *
833  * Called during teardown when a Tx tracker is being removed.
834  */
835 static void
836 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
837 {
838 	struct ice_hw *hw = &pf->hw;
839 	unsigned long flags;
840 	u64 tstamp_ready;
841 	int err;
842 	u8 idx;
843 
844 	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
845 	if (err) {
846 		dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
847 			tx->block, err);
848 
849 		/* If we fail to read the Tx timestamp ready bitmap just
850 		 * skip clearing the PHY timestamps.
851 		 */
852 		tstamp_ready = 0;
853 	}
854 
855 	for_each_set_bit(idx, tx->in_use, tx->len) {
856 		u8 phy_idx = idx + tx->offset;
857 		struct sk_buff *skb;
858 
859 		/* In case this timestamp is ready, we need to clear it. */
860 		if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
861 			ice_clear_phy_tstamp(hw, tx->block, phy_idx);
862 
863 		spin_lock_irqsave(&tx->lock, flags);
864 		skb = tx->tstamps[idx].skb;
865 		tx->tstamps[idx].skb = NULL;
866 		clear_bit(idx, tx->in_use);
867 		clear_bit(idx, tx->stale);
868 		spin_unlock_irqrestore(&tx->lock, flags);
869 
870 		/* Count the number of Tx timestamps flushed */
871 		pf->ptp.tx_hwtstamp_flushed++;
872 
873 		/* Free the SKB after we've cleared the bit */
874 		dev_kfree_skb_any(skb);
875 	}
876 }
877 
878 /**
879  * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
880  * @tx: the tracker to mark
881  *
882  * Mark currently outstanding Tx timestamps as stale. This prevents sending
883  * their timestamp value to the stack. This is required to prevent extending
884  * the 40bit hardware timestamp incorrectly.
885  *
886  * This should be called when the PTP clock is modified such as after a set
887  * time request.
888  */
889 static void
890 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
891 {
892 	unsigned long flags;
893 
894 	spin_lock_irqsave(&tx->lock, flags);
895 	bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
896 	spin_unlock_irqrestore(&tx->lock, flags);
897 }
898 
899 /**
900  * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
901  * @pf: Board private structure
902  *
903  * Called by the clock owner to flush all the Tx timestamp trackers associated
904  * with the clock.
905  */
906 static void
907 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
908 {
909 	struct ice_ptp_port *port;
910 
911 	list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
912 		ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
913 }
914 
915 /**
916  * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
917  * @pf: Board private structure
918  * @tx: Tx tracking structure to release
919  *
920  * Free memory associated with the Tx timestamp tracker.
921  */
922 static void
923 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
924 {
925 	unsigned long flags;
926 
927 	spin_lock_irqsave(&tx->lock, flags);
928 	tx->init = 0;
929 	spin_unlock_irqrestore(&tx->lock, flags);
930 
931 	/* wait for potentially outstanding interrupt to complete */
932 	synchronize_irq(pf->oicr_irq.virq);
933 
934 	ice_ptp_flush_tx_tracker(pf, tx);
935 
936 	kfree(tx->tstamps);
937 	tx->tstamps = NULL;
938 
939 	bitmap_free(tx->in_use);
940 	tx->in_use = NULL;
941 
942 	bitmap_free(tx->stale);
943 	tx->stale = NULL;
944 
945 	tx->len = 0;
946 }
947 
948 /**
949  * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
950  * @pf: Board private structure
951  * @tx: the Tx tracking structure to initialize
952  * @port: the port this structure tracks
953  *
954  * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
955  * have independent memory blocks for all ports.
956  *
957  * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
958  */
959 static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
960 				  u8 port)
961 {
962 	tx->block = port;
963 	tx->offset = 0;
964 	tx->len = INDEX_PER_PORT_ETH56G;
965 	tx->has_ready_bitmap = 1;
966 
967 	return ice_ptp_alloc_tx_tracker(tx);
968 }
969 
970 /**
971  * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
972  * @pf: Board private structure
973  * @tx: the Tx tracking structure to initialize
974  * @port: the port this structure tracks
975  *
976  * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
977  * the timestamp block is shared for all ports in the same quad. To avoid
978  * ports using the same timestamp index, logically break the block of
979  * registers into chunks based on the port number.
980  */
981 static int
982 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
983 {
984 	tx->block = ICE_GET_QUAD_NUM(port);
985 	tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
986 	tx->len = INDEX_PER_PORT_E82X;
987 	tx->has_ready_bitmap = 1;
988 
989 	return ice_ptp_alloc_tx_tracker(tx);
990 }
991 
992 /**
993  * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
994  * @pf: Board private structure
995  * @tx: the Tx tracking structure to initialize
996  *
997  * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
998  * port has its own block of timestamps, independent of the other ports.
999  */
1000 static int
1001 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1002 {
1003 	tx->block = pf->hw.port_info->lport;
1004 	tx->offset = 0;
1005 	tx->len = INDEX_PER_PORT_E810;
1006 	/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
1007 	 * verify new timestamps against cached copy of the last read
1008 	 * timestamp.
1009 	 */
1010 	tx->has_ready_bitmap = 0;
1011 
1012 	return ice_ptp_alloc_tx_tracker(tx);
1013 }
1014 
1015 /**
1016  * ice_ptp_update_cached_phctime - Update the cached PHC time values
1017  * @pf: Board specific private structure
1018  *
1019  * This function updates the system time values which are cached in the PF
1020  * structure and the Rx rings.
1021  *
1022  * This function must be called periodically to ensure that the cached value
1023  * is never more than 2 seconds old.
1024  *
1025  * Note that the cached copy in the PF PTP structure is always updated, even
1026  * if we can't update the copy in the Rx rings.
1027  *
1028  * Return:
1029  * * 0 - OK, successfully updated
1030  * * -EAGAIN - PF was busy, need to reschedule the update
1031  */
1032 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
1033 {
1034 	struct device *dev = ice_pf_to_dev(pf);
1035 	unsigned long update_before;
1036 	u64 systime;
1037 	int i;
1038 
1039 	update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
1040 	if (pf->ptp.cached_phc_time &&
1041 	    time_is_before_jiffies(update_before)) {
1042 		unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
1043 
1044 		dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
1045 			 jiffies_to_msecs(time_taken));
1046 		pf->ptp.late_cached_phc_updates++;
1047 	}
1048 
1049 	/* Read the current PHC time */
1050 	systime = ice_ptp_read_src_clk_reg(pf, NULL);
1051 
1052 	/* Update the cached PHC time stored in the PF structure */
1053 	WRITE_ONCE(pf->ptp.cached_phc_time, systime);
1054 	WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
1055 
1056 	if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
1057 		return -EAGAIN;
1058 
1059 	ice_for_each_vsi(pf, i) {
1060 		struct ice_vsi *vsi = pf->vsi[i];
1061 		int j;
1062 
1063 		if (!vsi)
1064 			continue;
1065 
1066 		if (vsi->type != ICE_VSI_PF)
1067 			continue;
1068 
1069 		ice_for_each_rxq(vsi, j) {
1070 			if (!vsi->rx_rings[j])
1071 				continue;
1072 			WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
1073 		}
1074 	}
1075 	clear_bit(ICE_CFG_BUSY, pf->state);
1076 
1077 	return 0;
1078 }
1079 
1080 /**
1081  * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1082  * @pf: Board specific private structure
1083  *
1084  * This function must be called when the cached PHC time is no longer valid,
1085  * such as after a time adjustment. It marks any currently outstanding Tx
1086  * timestamps as stale and updates the cached PHC time for both the PF and Rx
1087  * rings.
1088  *
1089  * If updating the PHC time cannot be done immediately, a warning message is
1090  * logged and the work item is scheduled immediately to minimize the window
1091  * with a wrong cached timestamp.
1092  */
1093 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1094 {
1095 	struct device *dev = ice_pf_to_dev(pf);
1096 	int err;
1097 
1098 	/* Update the cached PHC time immediately if possible, otherwise
1099 	 * schedule the work item to execute soon.
1100 	 */
1101 	err = ice_ptp_update_cached_phctime(pf);
1102 	if (err) {
1103 		/* If another thread is updating the Rx rings, we won't
1104 		 * properly reset them here. This could lead to reporting of
1105 		 * invalid timestamps, but there isn't much we can do.
1106 		 */
1107 		dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1108 			 __func__);
1109 
1110 		/* Queue the work item to update the Rx rings when possible */
1111 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1112 					   msecs_to_jiffies(10));
1113 	}
1114 
1115 	/* Mark any outstanding timestamps as stale, since they might have
1116 	 * been captured in hardware before the time update. This could lead
1117 	 * to us extending them with the wrong cached value resulting in
1118 	 * incorrect timestamp values.
1119 	 */
1120 	ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1121 }
1122 
1123 /**
1124  * ice_ptp_write_init - Set PHC time to provided value
1125  * @pf: Board private structure
1126  * @ts: timespec structure that holds the new time value
1127  *
1128  * Set the PHC time to the specified time provided in the timespec.
1129  */
1130 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1131 {
1132 	u64 ns = timespec64_to_ns(ts);
1133 	struct ice_hw *hw = &pf->hw;
1134 
1135 	return ice_ptp_init_time(hw, ns);
1136 }
1137 
1138 /**
1139  * ice_ptp_write_adj - Adjust PHC clock time atomically
1140  * @pf: Board private structure
1141  * @adj: Adjustment in nanoseconds
1142  *
1143  * Perform an atomic adjustment of the PHC time by the specified number of
1144  * nanoseconds.
1145  */
1146 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1147 {
1148 	struct ice_hw *hw = &pf->hw;
1149 
1150 	return ice_ptp_adj_clock(hw, adj);
1151 }
1152 
1153 /**
1154  * ice_base_incval - Get base timer increment value
1155  * @pf: Board private structure
1156  *
1157  * Look up the base timer increment value for this device. The base increment
1158  * value is used to define the nominal clock tick rate. This increment value
1159  * is programmed during device initialization. It is also used as the basis
1160  * for calculating adjustments using scaled_ppm.
1161  */
1162 static u64 ice_base_incval(struct ice_pf *pf)
1163 {
1164 	struct ice_hw *hw = &pf->hw;
1165 	u64 incval;
1166 
1167 	incval = ice_get_base_incval(hw);
1168 
1169 	dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1170 		incval);
1171 
1172 	return incval;
1173 }
1174 
1175 /**
1176  * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1177  * @port: PTP port for which Tx FIFO is checked
1178  */
1179 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1180 {
1181 	int offs = port->port_num % ICE_PORTS_PER_QUAD;
1182 	int quad = ICE_GET_QUAD_NUM(port->port_num);
1183 	struct ice_pf *pf;
1184 	struct ice_hw *hw;
1185 	u32 val, phy_sts;
1186 	int err;
1187 
1188 	pf = ptp_port_to_pf(port);
1189 	hw = &pf->hw;
1190 
1191 	if (port->tx_fifo_busy_cnt == FIFO_OK)
1192 		return 0;
1193 
1194 	/* need to read FIFO state */
1195 	if (offs == 0 || offs == 1)
1196 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1197 					     &val);
1198 	else
1199 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1200 					     &val);
1201 
1202 	if (err) {
1203 		dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1204 			port->port_num, err);
1205 		return err;
1206 	}
1207 
1208 	if (offs & 0x1)
1209 		phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1210 	else
1211 		phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1212 
1213 	if (phy_sts & FIFO_EMPTY) {
1214 		port->tx_fifo_busy_cnt = FIFO_OK;
1215 		return 0;
1216 	}
1217 
1218 	port->tx_fifo_busy_cnt++;
1219 
1220 	dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1221 		port->tx_fifo_busy_cnt, port->port_num);
1222 
1223 	if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1224 		dev_dbg(ice_pf_to_dev(pf),
1225 			"Port %d Tx FIFO still not empty; resetting quad %d\n",
1226 			port->port_num, quad);
1227 		ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1228 		port->tx_fifo_busy_cnt = FIFO_OK;
1229 		return 0;
1230 	}
1231 
1232 	return -EAGAIN;
1233 }
1234 
1235 /**
1236  * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1237  * @work: Pointer to the kthread_work structure for this task
1238  *
1239  * Check whether hardware has completed measuring the Tx and Rx offset values
1240  * used to configure and enable vernier timestamp calibration.
1241  *
1242  * Once the offset in either direction is measured, configure the associated
1243  * registers with the calibrated offset values and enable timestamping. The Tx
1244  * and Rx directions are configured independently as soon as their associated
1245  * offsets are known.
1246  *
1247  * This function reschedules itself until both Tx and Rx calibration have
1248  * completed.
1249  */
1250 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1251 {
1252 	struct ice_ptp_port *port;
1253 	struct ice_pf *pf;
1254 	struct ice_hw *hw;
1255 	int tx_err;
1256 	int rx_err;
1257 
1258 	port = container_of(work, struct ice_ptp_port, ov_work.work);
1259 	pf = ptp_port_to_pf(port);
1260 	hw = &pf->hw;
1261 
1262 	if (ice_is_reset_in_progress(pf->state)) {
1263 		/* wait for device driver to complete reset */
1264 		kthread_queue_delayed_work(pf->ptp.kworker,
1265 					   &port->ov_work,
1266 					   msecs_to_jiffies(100));
1267 		return;
1268 	}
1269 
1270 	tx_err = ice_ptp_check_tx_fifo(port);
1271 	if (!tx_err)
1272 		tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1273 	rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1274 	if (tx_err || rx_err) {
1275 		/* Tx and/or Rx offset not yet configured, try again later */
1276 		kthread_queue_delayed_work(pf->ptp.kworker,
1277 					   &port->ov_work,
1278 					   msecs_to_jiffies(100));
1279 		return;
1280 	}
1281 }
1282 
1283 /**
1284  * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1285  * @ptp_port: PTP port to stop
1286  */
1287 static int
1288 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1289 {
1290 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1291 	u8 port = ptp_port->port_num;
1292 	struct ice_hw *hw = &pf->hw;
1293 	int err;
1294 
1295 	if (ice_is_e810(hw))
1296 		return 0;
1297 
1298 	mutex_lock(&ptp_port->ps_lock);
1299 
1300 	switch (ice_get_phy_model(hw)) {
1301 	case ICE_PHY_ETH56G:
1302 		err = ice_stop_phy_timer_eth56g(hw, port, true);
1303 		break;
1304 	case ICE_PHY_E82X:
1305 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1306 
1307 		err = ice_stop_phy_timer_e82x(hw, port, true);
1308 		break;
1309 	default:
1310 		err = -ENODEV;
1311 	}
1312 	if (err && err != -EBUSY)
1313 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1314 			port, err);
1315 
1316 	mutex_unlock(&ptp_port->ps_lock);
1317 
1318 	return err;
1319 }
1320 
1321 /**
1322  * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1323  * @ptp_port: PTP port for which the PHY start is set
1324  *
1325  * Start the PHY timestamping block, and initiate Vernier timestamping
1326  * calibration. If timestamping cannot be calibrated (such as if link is down)
1327  * then disable the timestamping block instead.
1328  */
1329 static int
1330 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1331 {
1332 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1333 	u8 port = ptp_port->port_num;
1334 	struct ice_hw *hw = &pf->hw;
1335 	unsigned long flags;
1336 	int err;
1337 
1338 	if (ice_is_e810(hw))
1339 		return 0;
1340 
1341 	if (!ptp_port->link_up)
1342 		return ice_ptp_port_phy_stop(ptp_port);
1343 
1344 	mutex_lock(&ptp_port->ps_lock);
1345 
1346 	switch (ice_get_phy_model(hw)) {
1347 	case ICE_PHY_ETH56G:
1348 		err = ice_start_phy_timer_eth56g(hw, port);
1349 		break;
1350 	case ICE_PHY_E82X:
1351 		/* Start the PHY timer in Vernier mode */
1352 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1353 
1354 		/* temporarily disable Tx timestamps while calibrating
1355 		 * PHY offset
1356 		 */
1357 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1358 		ptp_port->tx.calibrating = true;
1359 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1360 		ptp_port->tx_fifo_busy_cnt = 0;
1361 
1362 		/* Start the PHY timer in Vernier mode */
1363 		err = ice_start_phy_timer_e82x(hw, port);
1364 		if (err)
1365 			break;
1366 
1367 		/* Enable Tx timestamps right away */
1368 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1369 		ptp_port->tx.calibrating = false;
1370 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1371 
1372 		kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1373 					   0);
1374 		break;
1375 	default:
1376 		err = -ENODEV;
1377 	}
1378 
1379 	if (err)
1380 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1381 			port, err);
1382 
1383 	mutex_unlock(&ptp_port->ps_lock);
1384 
1385 	return err;
1386 }
1387 
1388 /**
1389  * ice_ptp_link_change - Reconfigure PTP after link status change
1390  * @pf: Board private structure
1391  * @linkup: Link is up or down
1392  */
1393 void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
1394 {
1395 	struct ice_ptp_port *ptp_port;
1396 	struct ice_hw *hw = &pf->hw;
1397 
1398 	if (pf->ptp.state != ICE_PTP_READY)
1399 		return;
1400 
1401 	ptp_port = &pf->ptp.port;
1402 
1403 	/* Update cached link status for this port immediately */
1404 	ptp_port->link_up = linkup;
1405 
1406 	/* Skip HW writes if reset is in progress */
1407 	if (pf->hw.reset_ongoing)
1408 		return;
1409 	switch (ice_get_phy_model(hw)) {
1410 	case ICE_PHY_E810:
1411 		/* Do not reconfigure E810 PHY */
1412 		return;
1413 	case ICE_PHY_ETH56G:
1414 	case ICE_PHY_E82X:
1415 		ice_ptp_port_phy_restart(ptp_port);
1416 		return;
1417 	default:
1418 		dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1419 	}
1420 }
1421 
1422 /**
1423  * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1424  * @pf: PF private structure
1425  * @ena: bool value to enable or disable interrupt
1426  * @threshold: Minimum number of packets at which intr is triggered
1427  *
1428  * Utility function to configure all the PHY interrupt settings, including
1429  * whether the PHY interrupt is enabled, and what threshold to use. Also
1430  * configures The E82X timestamp owner to react to interrupts from all PHYs.
1431  *
1432  * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1433  * when failed to configure PHY interrupt for E82X
1434  */
1435 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1436 {
1437 	struct device *dev = ice_pf_to_dev(pf);
1438 	struct ice_hw *hw = &pf->hw;
1439 
1440 	ice_ptp_reset_ts_memory(hw);
1441 
1442 	switch (ice_get_phy_model(hw)) {
1443 	case ICE_PHY_ETH56G: {
1444 		int port;
1445 
1446 		for (port = 0; port < hw->ptp.num_lports; port++) {
1447 			int err;
1448 
1449 			err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1450 			if (err) {
1451 				dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1452 					port, err);
1453 				return err;
1454 			}
1455 		}
1456 
1457 		return 0;
1458 	}
1459 	case ICE_PHY_E82X: {
1460 		int quad;
1461 
1462 		for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1463 		     quad++) {
1464 			int err;
1465 
1466 			err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1467 			if (err) {
1468 				dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1469 					quad, err);
1470 				return err;
1471 			}
1472 		}
1473 
1474 		return 0;
1475 	}
1476 	case ICE_PHY_E810:
1477 		return 0;
1478 	case ICE_PHY_UNSUP:
1479 	default:
1480 		dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
1481 			 ice_get_phy_model(hw));
1482 		return -EOPNOTSUPP;
1483 	}
1484 }
1485 
1486 /**
1487  * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1488  * @pf: Board private structure
1489  */
1490 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1491 {
1492 	ice_ptp_port_phy_restart(&pf->ptp.port);
1493 }
1494 
1495 /**
1496  * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1497  * @pf: Board private structure
1498  */
1499 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1500 {
1501 	struct list_head *entry;
1502 
1503 	list_for_each(entry, &pf->adapter->ports.ports) {
1504 		struct ice_ptp_port *port = list_entry(entry,
1505 						       struct ice_ptp_port,
1506 						       list_node);
1507 
1508 		if (port->link_up)
1509 			ice_ptp_port_phy_restart(port);
1510 	}
1511 }
1512 
1513 /**
1514  * ice_ptp_adjfine - Adjust clock increment rate
1515  * @info: the driver's PTP info structure
1516  * @scaled_ppm: Parts per million with 16-bit fractional field
1517  *
1518  * Adjust the frequency of the clock by the indicated scaled ppm from the
1519  * base frequency.
1520  */
1521 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1522 {
1523 	struct ice_pf *pf = ptp_info_to_pf(info);
1524 	struct ice_hw *hw = &pf->hw;
1525 	u64 incval;
1526 	int err;
1527 
1528 	incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1529 	err = ice_ptp_write_incval_locked(hw, incval);
1530 	if (err) {
1531 		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1532 			err);
1533 		return -EIO;
1534 	}
1535 
1536 	return 0;
1537 }
1538 
1539 /**
1540  * ice_ptp_extts_event - Process PTP external clock event
1541  * @pf: Board private structure
1542  */
1543 void ice_ptp_extts_event(struct ice_pf *pf)
1544 {
1545 	struct ptp_clock_event event;
1546 	struct ice_hw *hw = &pf->hw;
1547 	u8 chan, tmr_idx;
1548 	u32 hi, lo;
1549 
1550 	/* Don't process timestamp events if PTP is not ready */
1551 	if (pf->ptp.state != ICE_PTP_READY)
1552 		return;
1553 
1554 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1555 	/* Event time is captured by one of the two matched registers
1556 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
1557 	 *      GLTSYN_EVNT_H: 32 MSB of sampled time event
1558 	 * Event is defined in GLTSYN_EVNT_0 register
1559 	 */
1560 	for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1561 		/* Check if channel is enabled */
1562 		if (pf->ptp.ext_ts_irq & (1 << chan)) {
1563 			lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1564 			hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1565 			event.timestamp = (((u64)hi) << 32) | lo;
1566 			event.type = PTP_CLOCK_EXTTS;
1567 			event.index = chan;
1568 
1569 			/* Fire event */
1570 			ptp_clock_event(pf->ptp.clock, &event);
1571 			pf->ptp.ext_ts_irq &= ~(1 << chan);
1572 		}
1573 	}
1574 }
1575 
1576 /**
1577  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1578  * @pf: Board private structure
1579  * @rq: External timestamp request
1580  * @on: Enable/disable flag
1581  *
1582  * Configure an external timestamp event on the requested channel.
1583  *
1584  * Return: 0 on success, negative error code otherwise
1585  */
1586 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
1587 			     int on)
1588 {
1589 	u32 aux_reg, gpio_reg, irq_reg;
1590 	struct ice_hw *hw = &pf->hw;
1591 	unsigned int chan, gpio_pin;
1592 	int pin_desc_idx;
1593 	u8 tmr_idx;
1594 
1595 	/* Reject requests with unsupported flags */
1596 
1597 	if (rq->flags & ~(PTP_ENABLE_FEATURE |
1598 			  PTP_RISING_EDGE |
1599 			  PTP_FALLING_EDGE |
1600 			  PTP_STRICT_FLAGS))
1601 		return -EOPNOTSUPP;
1602 
1603 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1604 	chan = rq->index;
1605 
1606 	pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1607 	if (pin_desc_idx < 0)
1608 		return -EIO;
1609 
1610 	gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
1611 	irq_reg = rd32(hw, PFINT_OICR_ENA);
1612 
1613 	if (on) {
1614 		/* Enable the interrupt */
1615 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1616 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1617 
1618 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE	BIT(0)
1619 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
1620 
1621 		/* set event level to requested edge */
1622 		if (rq->flags & PTP_FALLING_EDGE)
1623 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1624 		if (rq->flags & PTP_RISING_EDGE)
1625 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1626 
1627 		/* Write GPIO CTL reg.
1628 		 * 0x1 is input sampled by EVENT register(channel)
1629 		 * + num_in_channels * tmr_idx
1630 		 */
1631 		gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1632 				      1 + chan + (tmr_idx * 3));
1633 	} else {
1634 		bool last_enabled = true;
1635 
1636 		/* clear the values we set to reset defaults */
1637 		aux_reg = 0;
1638 		gpio_reg = 0;
1639 
1640 		for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
1641 			if ((pf->ptp.extts_rqs[i].flags &
1642 			     PTP_ENABLE_FEATURE) &&
1643 			    i != chan) {
1644 				last_enabled = false;
1645 			}
1646 
1647 		if (last_enabled)
1648 			irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1649 	}
1650 
1651 	wr32(hw, PFINT_OICR_ENA, irq_reg);
1652 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1653 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1654 
1655 	return 0;
1656 }
1657 
1658 /**
1659  * ice_ptp_disable_all_extts - Disable all EXTTS channels
1660  * @pf: Board private structure
1661  */
1662 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1663 {
1664 	for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1665 		if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1666 			ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1667 					  false);
1668 
1669 	synchronize_irq(pf->oicr_irq.virq);
1670 }
1671 
1672 /**
1673  * ice_ptp_enable_all_extts - Enable all EXTTS channels
1674  * @pf: Board private structure
1675  *
1676  * Called during reset to restore user configuration.
1677  */
1678 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1679 {
1680 	for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1681 		if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1682 			ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1683 					  true);
1684 }
1685 
1686 /**
1687  * ice_ptp_write_perout - Write periodic wave parameters to HW
1688  * @hw: pointer to the HW struct
1689  * @chan: target channel
1690  * @gpio_pin: target GPIO pin
1691  * @start: target time to start periodic output
1692  * @period: target period
1693  *
1694  * Return: 0 on success, negative error code otherwise
1695  */
1696 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
1697 				unsigned int gpio_pin, u64 start, u64 period)
1698 {
1699 
1700 	u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1701 	u32 val = 0;
1702 
1703 	/* 0. Reset mode & out_en in AUX_OUT */
1704 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1705 
1706 	if (ice_is_e825c(hw)) {
1707 		int err;
1708 
1709 		/* Enable/disable CGU 1PPS output for E825C */
1710 		err = ice_cgu_cfg_pps_out(hw, !!period);
1711 		if (err)
1712 			return err;
1713 	}
1714 
1715 	/* 1. Write perout with half of required period value.
1716 	 * HW toggles output when source clock hits the TGT and then adds
1717 	 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle.
1718 	 */
1719 	period >>= 1;
1720 
1721 	/* For proper operation, GLTSYN_CLKO must be larger than clock tick and
1722 	 * period has to fit in 32 bit register.
1723 	 */
1724 #define MIN_PULSE 3
1725 	if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
1726 		dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
1727 			MIN_PULSE);
1728 		return -EIO;
1729 	}
1730 
1731 	wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1732 
1733 	/* 2. Write TARGET time */
1734 	wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
1735 	wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
1736 
1737 	/* 3. Write AUX_OUT register */
1738 	if (!!period)
1739 		val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1740 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1741 
1742 	/* 4. write GPIO CTL reg */
1743 	val = GLGEN_GPIO_CTL_PIN_DIR_M;
1744 	if (!!period)
1745 		val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1746 				  8 + chan + (tmr_idx * 4));
1747 
1748 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1749 
1750 	return 0;
1751 }
1752 
1753 /**
1754  * ice_ptp_cfg_perout - Configure clock to generate periodic wave
1755  * @pf: Board private structure
1756  * @rq: Periodic output request
1757  * @on: Enable/disable flag
1758  *
1759  * Configure the internal clock generator modules to generate the clock wave of
1760  * specified period.
1761  *
1762  * Return: 0 on success, negative error code otherwise
1763  */
1764 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
1765 			      int on)
1766 {
1767 	u64 clk, period, start, phase;
1768 	struct ice_hw *hw = &pf->hw;
1769 	unsigned int gpio_pin;
1770 	int pin_desc_idx;
1771 
1772 	if (rq->flags & ~PTP_PEROUT_PHASE)
1773 		return -EOPNOTSUPP;
1774 
1775 	pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
1776 	if (pin_desc_idx < 0)
1777 		return -EIO;
1778 
1779 	gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
1780 	period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
1781 
1782 	/* If we're disabling the output or period is 0, clear out CLKO and TGT
1783 	 * and keep output level low.
1784 	 */
1785 	if (!on || !period)
1786 		return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
1787 
1788 	if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
1789 	    period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
1790 		dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
1791 		return -EOPNOTSUPP;
1792 	}
1793 
1794 	if (period & 0x1) {
1795 		dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1796 		return -EIO;
1797 	}
1798 
1799 	start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
1800 
1801 	/* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */
1802 	if (rq->flags & PTP_PEROUT_PHASE)
1803 		phase = start;
1804 	else
1805 		div64_u64_rem(start, period, &phase);
1806 
1807 	/* If we have only phase or start time is in the past, start the timer
1808 	 * at the next multiple of period, maintaining phase.
1809 	 */
1810 	clk = ice_ptp_read_src_clk_reg(pf, NULL);
1811 	if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw))
1812 		start = div64_u64(clk + period - 1, period) * period + phase;
1813 
1814 	/* Compensate for propagation delay from the generator to the pin. */
1815 	start -= ice_prop_delay(hw);
1816 
1817 	return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
1818 }
1819 
1820 /**
1821  * ice_ptp_disable_all_perout - Disable all currently configured outputs
1822  * @pf: Board private structure
1823  *
1824  * Disable all currently configured clock outputs. This is necessary before
1825  * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to
1826  * re-enable the clocks again.
1827  */
1828 static void ice_ptp_disable_all_perout(struct ice_pf *pf)
1829 {
1830 	for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1831 		if (pf->ptp.perout_rqs[i].period.sec ||
1832 		    pf->ptp.perout_rqs[i].period.nsec)
1833 			ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1834 					   false);
1835 }
1836 
1837 /**
1838  * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs
1839  * @pf: Board private structure
1840  *
1841  * Enable all currently configured clock outputs. Use this after
1842  * ice_ptp_disable_all_perout to reconfigure the output signals according to
1843  * their configuration.
1844  */
1845 static void ice_ptp_enable_all_perout(struct ice_pf *pf)
1846 {
1847 	for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1848 		if (pf->ptp.perout_rqs[i].period.sec ||
1849 		    pf->ptp.perout_rqs[i].period.nsec)
1850 			ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1851 					   true);
1852 }
1853 
1854 /**
1855  * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO
1856  * @pf: Board private structure
1857  * @pin: Pin index
1858  * @func: Assigned function
1859  *
1860  * Return: 0 on success, negative error code otherwise
1861  */
1862 static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin,
1863 				      enum ptp_pin_function func)
1864 {
1865 	unsigned int gpio_pin;
1866 
1867 	switch (func) {
1868 	case PTP_PF_PEROUT:
1869 		gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1];
1870 		break;
1871 	case PTP_PF_EXTTS:
1872 		gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0];
1873 		break;
1874 	default:
1875 		return -EOPNOTSUPP;
1876 	}
1877 
1878 	for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
1879 		struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i];
1880 		unsigned int chan = pin_desc->chan;
1881 
1882 		/* Skip pin idx from the request */
1883 		if (i == pin)
1884 			continue;
1885 
1886 		if (pin_desc->func == PTP_PF_PEROUT &&
1887 		    pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) {
1888 			pf->ptp.perout_rqs[chan].period.sec = 0;
1889 			pf->ptp.perout_rqs[chan].period.nsec = 0;
1890 			pin_desc->func = PTP_PF_NONE;
1891 			pin_desc->chan = 0;
1892 			dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n",
1893 				i, gpio_pin);
1894 			return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan],
1895 						  false);
1896 		} else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS &&
1897 			   pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) {
1898 			pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE;
1899 			pin_desc->func = PTP_PF_NONE;
1900 			pin_desc->chan = 0;
1901 			dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n",
1902 				i, gpio_pin);
1903 			return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan],
1904 						 false);
1905 		}
1906 	}
1907 
1908 	return 0;
1909 }
1910 
1911 /**
1912  * ice_verify_pin - verify if pin supports requested pin function
1913  * @info: the driver's PTP info structure
1914  * @pin: Pin index
1915  * @func: Assigned function
1916  * @chan: Assigned channel
1917  *
1918  * Return: 0 on success, -EOPNOTSUPP when function is not supported.
1919  */
1920 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
1921 			  enum ptp_pin_function func, unsigned int chan)
1922 {
1923 	struct ice_pf *pf = ptp_info_to_pf(info);
1924 	const struct ice_ptp_pin_desc *pin_desc;
1925 
1926 	pin_desc = &pf->ptp.ice_pin_desc[pin];
1927 
1928 	/* Is assigned function allowed? */
1929 	switch (func) {
1930 	case PTP_PF_EXTTS:
1931 		if (pin_desc->gpio[0] < 0)
1932 			return -EOPNOTSUPP;
1933 		break;
1934 	case PTP_PF_PEROUT:
1935 		if (pin_desc->gpio[1] < 0)
1936 			return -EOPNOTSUPP;
1937 		break;
1938 	case PTP_PF_NONE:
1939 		break;
1940 	case PTP_PF_PHYSYNC:
1941 	default:
1942 		return -EOPNOTSUPP;
1943 	}
1944 
1945 	/* On adapters with SMA_CTRL disable other pins that share same GPIO */
1946 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
1947 		ice_ptp_disable_shared_pin(pf, pin, func);
1948 		pf->ptp.pin_desc[pin].func = func;
1949 		pf->ptp.pin_desc[pin].chan = chan;
1950 		return ice_ptp_set_sma_cfg(pf);
1951 	}
1952 
1953 	return 0;
1954 }
1955 
1956 /**
1957  * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC
1958  * @info: The driver's PTP info structure
1959  * @rq: The requested feature to change
1960  * @on: Enable/disable flag
1961  *
1962  * Return: 0 on success, negative error code otherwise
1963  */
1964 static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
1965 			       struct ptp_clock_request *rq, int on)
1966 {
1967 	struct ice_pf *pf = ptp_info_to_pf(info);
1968 	int err;
1969 
1970 	switch (rq->type) {
1971 	case PTP_CLK_REQ_PEROUT:
1972 	{
1973 		struct ptp_perout_request *cached =
1974 			&pf->ptp.perout_rqs[rq->perout.index];
1975 
1976 		err = ice_ptp_cfg_perout(pf, &rq->perout, on);
1977 		if (!err) {
1978 			*cached = rq->perout;
1979 		} else {
1980 			cached->period.sec = 0;
1981 			cached->period.nsec = 0;
1982 		}
1983 		return err;
1984 	}
1985 	case PTP_CLK_REQ_EXTTS:
1986 	{
1987 		struct ptp_extts_request *cached =
1988 			&pf->ptp.extts_rqs[rq->extts.index];
1989 
1990 		err = ice_ptp_cfg_extts(pf, &rq->extts, on);
1991 		if (!err)
1992 			*cached = rq->extts;
1993 		else
1994 			cached->flags &= ~PTP_ENABLE_FEATURE;
1995 		return err;
1996 	}
1997 	default:
1998 		return -EOPNOTSUPP;
1999 	}
2000 }
2001 
2002 /**
2003  * ice_ptp_gettimex64 - Get the time of the clock
2004  * @info: the driver's PTP info structure
2005  * @ts: timespec64 structure to hold the current time value
2006  * @sts: Optional parameter for holding a pair of system timestamps from
2007  *       the system clock. Will be ignored if NULL is given.
2008  *
2009  * Read the device clock and return the correct value on ns, after converting it
2010  * into a timespec struct.
2011  */
2012 static int
2013 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
2014 		   struct ptp_system_timestamp *sts)
2015 {
2016 	struct ice_pf *pf = ptp_info_to_pf(info);
2017 	u64 time_ns;
2018 
2019 	time_ns = ice_ptp_read_src_clk_reg(pf, sts);
2020 	*ts = ns_to_timespec64(time_ns);
2021 	return 0;
2022 }
2023 
2024 /**
2025  * ice_ptp_settime64 - Set the time of the clock
2026  * @info: the driver's PTP info structure
2027  * @ts: timespec64 structure that holds the new time value
2028  *
2029  * Set the device clock to the user input value. The conversion from timespec
2030  * to ns happens in the write function.
2031  */
2032 static int
2033 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
2034 {
2035 	struct ice_pf *pf = ptp_info_to_pf(info);
2036 	struct timespec64 ts64 = *ts;
2037 	struct ice_hw *hw = &pf->hw;
2038 	int err;
2039 
2040 	/* For Vernier mode on E82X, we need to recalibrate after new settime.
2041 	 * Start with marking timestamps as invalid.
2042 	 */
2043 	if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
2044 		err = ice_ptp_clear_phy_offset_ready_e82x(hw);
2045 		if (err)
2046 			dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
2047 	}
2048 
2049 	if (!ice_ptp_lock(hw)) {
2050 		err = -EBUSY;
2051 		goto exit;
2052 	}
2053 
2054 	/* Disable periodic outputs */
2055 	ice_ptp_disable_all_perout(pf);
2056 
2057 	err = ice_ptp_write_init(pf, &ts64);
2058 	ice_ptp_unlock(hw);
2059 
2060 	if (!err)
2061 		ice_ptp_reset_cached_phctime(pf);
2062 
2063 	/* Reenable periodic outputs */
2064 	ice_ptp_enable_all_perout(pf);
2065 
2066 	/* Recalibrate and re-enable timestamp blocks for E822/E823 */
2067 	if (ice_get_phy_model(hw) == ICE_PHY_E82X)
2068 		ice_ptp_restart_all_phy(pf);
2069 exit:
2070 	if (err) {
2071 		dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
2072 		return err;
2073 	}
2074 
2075 	return 0;
2076 }
2077 
2078 /**
2079  * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
2080  * @info: the driver's PTP info structure
2081  * @delta: Offset in nanoseconds to adjust the time by
2082  */
2083 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
2084 {
2085 	struct timespec64 now, then;
2086 	int ret;
2087 
2088 	then = ns_to_timespec64(delta);
2089 	ret = ice_ptp_gettimex64(info, &now, NULL);
2090 	if (ret)
2091 		return ret;
2092 	now = timespec64_add(now, then);
2093 
2094 	return ice_ptp_settime64(info, (const struct timespec64 *)&now);
2095 }
2096 
2097 /**
2098  * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
2099  * @info: the driver's PTP info structure
2100  * @delta: Offset in nanoseconds to adjust the time by
2101  */
2102 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
2103 {
2104 	struct ice_pf *pf = ptp_info_to_pf(info);
2105 	struct ice_hw *hw = &pf->hw;
2106 	struct device *dev;
2107 	int err;
2108 
2109 	dev = ice_pf_to_dev(pf);
2110 
2111 	/* Hardware only supports atomic adjustments using signed 32-bit
2112 	 * integers. For any adjustment outside this range, perform
2113 	 * a non-atomic get->adjust->set flow.
2114 	 */
2115 	if (delta > S32_MAX || delta < S32_MIN) {
2116 		dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
2117 		return ice_ptp_adjtime_nonatomic(info, delta);
2118 	}
2119 
2120 	if (!ice_ptp_lock(hw)) {
2121 		dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
2122 		return -EBUSY;
2123 	}
2124 
2125 	/* Disable periodic outputs */
2126 	ice_ptp_disable_all_perout(pf);
2127 
2128 	err = ice_ptp_write_adj(pf, delta);
2129 
2130 	/* Reenable periodic outputs */
2131 	ice_ptp_enable_all_perout(pf);
2132 
2133 	ice_ptp_unlock(hw);
2134 
2135 	if (err) {
2136 		dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2137 		return err;
2138 	}
2139 
2140 	ice_ptp_reset_cached_phctime(pf);
2141 
2142 	return 0;
2143 }
2144 
2145 #ifdef CONFIG_ICE_HWTS
2146 /**
2147  * ice_ptp_get_syncdevicetime - Get the cross time stamp info
2148  * @device: Current device time
2149  * @system: System counter value read synchronously with device time
2150  * @ctx: Context provided by timekeeping code
2151  *
2152  * Read device and system (ART) clock simultaneously and return the corrected
2153  * clock values in ns.
2154  */
2155 static int
2156 ice_ptp_get_syncdevicetime(ktime_t *device,
2157 			   struct system_counterval_t *system,
2158 			   void *ctx)
2159 {
2160 	struct ice_pf *pf = (struct ice_pf *)ctx;
2161 	struct ice_hw *hw = &pf->hw;
2162 	u32 hh_lock, hh_art_ctl;
2163 	int i;
2164 
2165 #define MAX_HH_HW_LOCK_TRIES	5
2166 #define MAX_HH_CTL_LOCK_TRIES	100
2167 
2168 	for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
2169 		/* Get the HW lock */
2170 		hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2171 		if (hh_lock & PFHH_SEM_BUSY_M) {
2172 			usleep_range(10000, 15000);
2173 			continue;
2174 		}
2175 		break;
2176 	}
2177 	if (hh_lock & PFHH_SEM_BUSY_M) {
2178 		dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
2179 		return -EBUSY;
2180 	}
2181 
2182 	/* Program cmd to master timer */
2183 	ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2184 
2185 	/* Start the ART and device clock sync sequence */
2186 	hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2187 	hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
2188 	wr32(hw, GLHH_ART_CTL, hh_art_ctl);
2189 
2190 	for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
2191 		/* Wait for sync to complete */
2192 		hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2193 		if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
2194 			udelay(1);
2195 			continue;
2196 		} else {
2197 			u32 hh_ts_lo, hh_ts_hi, tmr_idx;
2198 			u64 hh_ts;
2199 
2200 			tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2201 			/* Read ART time */
2202 			hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
2203 			hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
2204 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2205 			system->cycles = hh_ts;
2206 			system->cs_id = CSID_X86_ART;
2207 			/* Read Device source clock time */
2208 			hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
2209 			hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
2210 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2211 			*device = ns_to_ktime(hh_ts);
2212 			break;
2213 		}
2214 	}
2215 
2216 	/* Clear the master timer */
2217 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2218 
2219 	/* Release HW lock */
2220 	hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2221 	hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
2222 	wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
2223 
2224 	if (i == MAX_HH_CTL_LOCK_TRIES)
2225 		return -ETIMEDOUT;
2226 
2227 	return 0;
2228 }
2229 
2230 /**
2231  * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
2232  * @info: the driver's PTP info structure
2233  * @cts: The memory to fill the cross timestamp info
2234  *
2235  * Capture a cross timestamp between the ART and the device PTP hardware
2236  * clock. Fill the cross timestamp information and report it back to the
2237  * caller.
2238  *
2239  * This is only valid for E822 and E823 devices which have support for
2240  * generating the cross timestamp via PCIe PTM.
2241  *
2242  * In order to correctly correlate the ART timestamp back to the TSC time, the
2243  * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2244  */
2245 static int
2246 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
2247 			    struct system_device_crosststamp *cts)
2248 {
2249 	struct ice_pf *pf = ptp_info_to_pf(info);
2250 
2251 	return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
2252 					     pf, NULL, cts);
2253 }
2254 #endif /* CONFIG_ICE_HWTS */
2255 
2256 /**
2257  * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2258  * @pf: Board private structure
2259  * @ifr: ioctl data
2260  *
2261  * Copy the timestamping config to user buffer
2262  */
2263 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2264 {
2265 	struct hwtstamp_config *config;
2266 
2267 	if (pf->ptp.state != ICE_PTP_READY)
2268 		return -EIO;
2269 
2270 	config = &pf->ptp.tstamp_config;
2271 
2272 	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2273 		-EFAULT : 0;
2274 }
2275 
2276 /**
2277  * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2278  * @pf: Board private structure
2279  * @config: hwtstamp settings requested or saved
2280  */
2281 static int
2282 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2283 {
2284 	switch (config->tx_type) {
2285 	case HWTSTAMP_TX_OFF:
2286 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2287 		break;
2288 	case HWTSTAMP_TX_ON:
2289 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2290 		break;
2291 	default:
2292 		return -ERANGE;
2293 	}
2294 
2295 	switch (config->rx_filter) {
2296 	case HWTSTAMP_FILTER_NONE:
2297 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2298 		break;
2299 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2300 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2301 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2302 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2303 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2304 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2305 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2306 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2307 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2308 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2309 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2310 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2311 	case HWTSTAMP_FILTER_NTP_ALL:
2312 	case HWTSTAMP_FILTER_ALL:
2313 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2314 		break;
2315 	default:
2316 		return -ERANGE;
2317 	}
2318 
2319 	/* Immediately update the device timestamping mode */
2320 	ice_ptp_restore_timestamp_mode(pf);
2321 
2322 	return 0;
2323 }
2324 
2325 /**
2326  * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2327  * @pf: Board private structure
2328  * @ifr: ioctl data
2329  *
2330  * Get the user config and store it
2331  */
2332 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2333 {
2334 	struct hwtstamp_config config;
2335 	int err;
2336 
2337 	if (pf->ptp.state != ICE_PTP_READY)
2338 		return -EAGAIN;
2339 
2340 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2341 		return -EFAULT;
2342 
2343 	err = ice_ptp_set_timestamp_mode(pf, &config);
2344 	if (err)
2345 		return err;
2346 
2347 	/* Return the actual configuration set */
2348 	config = pf->ptp.tstamp_config;
2349 
2350 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2351 		-EFAULT : 0;
2352 }
2353 
2354 /**
2355  * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2356  * @rx_desc: Receive descriptor
2357  * @pkt_ctx: Packet context to get the cached time
2358  *
2359  * The driver receives a notification in the receive descriptor with timestamp.
2360  */
2361 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2362 			const struct ice_pkt_ctx *pkt_ctx)
2363 {
2364 	u64 ts_ns, cached_time;
2365 	u32 ts_high;
2366 
2367 	if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2368 		return 0;
2369 
2370 	cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2371 
2372 	/* Do not report a timestamp if we don't have a cached PHC time */
2373 	if (!cached_time)
2374 		return 0;
2375 
2376 	/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2377 	 * PHC value, rather than accessing the PF. This also allows us to
2378 	 * simply pass the upper 32bits of nanoseconds directly. Calling
2379 	 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2380 	 * bits itself.
2381 	 */
2382 	ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2383 	ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2384 
2385 	return ts_ns;
2386 }
2387 
2388 /**
2389  * ice_ptp_setup_pin_cfg - setup PTP pin_config structure
2390  * @pf: Board private structure
2391  */
2392 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
2393 {
2394 	for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
2395 		const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
2396 		struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
2397 		const char *name = NULL;
2398 
2399 		if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2400 			name = ice_pin_names[desc->name_idx];
2401 		else if (desc->name_idx != GPIO_NA)
2402 			name = ice_pin_names_nvm[desc->name_idx];
2403 		if (name)
2404 			strscpy(pin->name, name, sizeof(pin->name));
2405 
2406 		pin->index = i;
2407 	}
2408 
2409 	pf->ptp.info.pin_config = pf->ptp.pin_desc;
2410 }
2411 
2412 /**
2413  * ice_ptp_disable_pins - Disable PTP pins
2414  * @pf: pointer to the PF structure
2415  *
2416  * Disable the OS access to the SMA pins. Called to clear out the OS
2417  * indications of pin support when we fail to setup the SMA control register.
2418  */
2419 static void ice_ptp_disable_pins(struct ice_pf *pf)
2420 {
2421 	struct ptp_clock_info *info = &pf->ptp.info;
2422 
2423 	dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
2424 
2425 	info->enable = NULL;
2426 	info->verify = NULL;
2427 	info->n_pins = 0;
2428 	info->n_ext_ts = 0;
2429 	info->n_per_out = 0;
2430 }
2431 
2432 /**
2433  * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM
2434  * @pf: pointer to the PF structure
2435  * @entries: SDP connection section from NVM
2436  * @num_entries: number of valid entries in sdp_entries
2437  * @pins: PTP pins array to update
2438  *
2439  * Return: 0 on success, negative error code otherwise.
2440  */
2441 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
2442 				     unsigned int num_entries,
2443 				     struct ice_ptp_pin_desc *pins)
2444 {
2445 	unsigned int n_pins = 0;
2446 	unsigned int i;
2447 
2448 	/* Setup ice_pin_desc array */
2449 	for (i = 0; i < ICE_N_PINS_MAX; i++) {
2450 		pins[i].name_idx = -1;
2451 		pins[i].gpio[0] = -1;
2452 		pins[i].gpio[1] = -1;
2453 	}
2454 
2455 	for (i = 0; i < num_entries; i++) {
2456 		u16 entry = le16_to_cpu(entries[i]);
2457 		DECLARE_BITMAP(bitmap, GPIO_NA);
2458 		unsigned int bitmap_idx;
2459 		bool dir;
2460 		u16 gpio;
2461 
2462 		*bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
2463 		dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
2464 		gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
2465 		for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) {
2466 			unsigned int idx;
2467 
2468 			/* Check if entry's pin bit is valid */
2469 			if (bitmap_idx >= NUM_PTP_PINS_NVM &&
2470 			    bitmap_idx != GPIO_NA)
2471 				continue;
2472 
2473 			/* Check if pin already exists */
2474 			for (idx = 0; idx < ICE_N_PINS_MAX; idx++)
2475 				if (pins[idx].name_idx == bitmap_idx)
2476 					break;
2477 
2478 			if (idx == ICE_N_PINS_MAX) {
2479 				/* Pin not found, setup its entry and name */
2480 				idx = n_pins++;
2481 				pins[idx].name_idx = bitmap_idx;
2482 				if (bitmap_idx == GPIO_NA)
2483 					strscpy(pf->ptp.pin_desc[idx].name,
2484 						ice_pin_names[gpio],
2485 						sizeof(pf->ptp.pin_desc[idx]
2486 							       .name));
2487 			}
2488 
2489 			/* Setup in/out GPIO number */
2490 			pins[idx].gpio[dir] = gpio;
2491 		}
2492 	}
2493 
2494 	for (i = 0; i < n_pins; i++) {
2495 		dev_dbg(ice_pf_to_dev(pf),
2496 			"NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
2497 			i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
2498 	}
2499 
2500 	pf->ptp.info.n_pins = n_pins;
2501 	return 0;
2502 }
2503 
2504 /**
2505  * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support
2506  * @pf: Board private structure
2507  *
2508  * Assign functions to the PTP capabilities structure for E82X devices.
2509  * Functions which operate across all device families should be set directly
2510  * in ice_ptp_set_caps. Only add functions here which are distinct for E82X
2511  * devices.
2512  */
2513 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
2514 {
2515 #ifdef CONFIG_ICE_HWTS
2516 	if (boot_cpu_has(X86_FEATURE_ART) &&
2517 	    boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
2518 		pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
2519 
2520 #endif /* CONFIG_ICE_HWTS */
2521 	if (ice_is_e825c(&pf->hw)) {
2522 		pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
2523 		pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
2524 	} else {
2525 		pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
2526 		pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x);
2527 	}
2528 	ice_ptp_setup_pin_cfg(pf);
2529 }
2530 
2531 /**
2532  * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2533  * @pf: Board private structure
2534  *
2535  * Assign functions to the PTP capabiltiies structure for E810 devices.
2536  * Functions which operate across all device families should be set directly
2537  * in ice_ptp_set_caps. Only add functions here which are distinct for E810
2538  * devices.
2539  */
2540 static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
2541 {
2542 	__le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
2543 	struct ice_ptp_pin_desc *desc = NULL;
2544 	struct ice_ptp *ptp = &pf->ptp;
2545 	unsigned int num_entries;
2546 	int err;
2547 
2548 	err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
2549 	if (err) {
2550 		/* SDP section does not exist in NVM or is corrupted */
2551 		if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2552 			ptp->ice_pin_desc = ice_pin_desc_e810_sma;
2553 			ptp->info.n_pins =
2554 				ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma);
2555 		} else {
2556 			pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2557 			pf->ptp.info.n_pins =
2558 				ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
2559 			err = 0;
2560 		}
2561 	} else {
2562 		desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
2563 				    sizeof(struct ice_ptp_pin_desc),
2564 				    GFP_KERNEL);
2565 		if (!desc)
2566 			goto err;
2567 
2568 		err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
2569 		if (err)
2570 			goto err;
2571 
2572 		ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
2573 	}
2574 
2575 	ptp->info.pin_config = ptp->pin_desc;
2576 	ice_ptp_setup_pin_cfg(pf);
2577 
2578 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2579 		err = ice_ptp_set_sma_cfg(pf);
2580 err:
2581 	if (err) {
2582 		devm_kfree(ice_pf_to_dev(pf), desc);
2583 		ice_ptp_disable_pins(pf);
2584 	}
2585 }
2586 
2587 /**
2588  * ice_ptp_set_caps - Set PTP capabilities
2589  * @pf: Board private structure
2590  */
2591 static void ice_ptp_set_caps(struct ice_pf *pf)
2592 {
2593 	struct ptp_clock_info *info = &pf->ptp.info;
2594 	struct device *dev = ice_pf_to_dev(pf);
2595 
2596 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2597 		 dev_driver_string(dev), dev_name(dev));
2598 	info->owner = THIS_MODULE;
2599 	info->max_adj = 100000000;
2600 	info->adjtime = ice_ptp_adjtime;
2601 	info->adjfine = ice_ptp_adjfine;
2602 	info->gettimex64 = ice_ptp_gettimex64;
2603 	info->settime64 = ice_ptp_settime64;
2604 	info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
2605 	info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
2606 	info->enable = ice_ptp_gpio_enable;
2607 	info->verify = ice_verify_pin;
2608 
2609 	if (ice_is_e810(&pf->hw))
2610 		ice_ptp_set_funcs_e810(pf);
2611 	else
2612 		ice_ptp_set_funcs_e82x(pf);
2613 }
2614 
2615 /**
2616  * ice_ptp_create_clock - Create PTP clock device for userspace
2617  * @pf: Board private structure
2618  *
2619  * This function creates a new PTP clock device. It only creates one if we
2620  * don't already have one. Will return error if it can't create one, but success
2621  * if we already have a device. Should be used by ice_ptp_init to create clock
2622  * initially, and prevent global resets from creating new clock devices.
2623  */
2624 static long ice_ptp_create_clock(struct ice_pf *pf)
2625 {
2626 	struct ptp_clock_info *info;
2627 	struct device *dev;
2628 
2629 	/* No need to create a clock device if we already have one */
2630 	if (pf->ptp.clock)
2631 		return 0;
2632 
2633 	ice_ptp_set_caps(pf);
2634 
2635 	info = &pf->ptp.info;
2636 	dev = ice_pf_to_dev(pf);
2637 
2638 	/* Attempt to register the clock before enabling the hardware. */
2639 	pf->ptp.clock = ptp_clock_register(info, dev);
2640 	if (IS_ERR(pf->ptp.clock)) {
2641 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2642 		return PTR_ERR(pf->ptp.clock);
2643 	}
2644 
2645 	return 0;
2646 }
2647 
2648 /**
2649  * ice_ptp_request_ts - Request an available Tx timestamp index
2650  * @tx: the PTP Tx timestamp tracker to request from
2651  * @skb: the SKB to associate with this timestamp request
2652  */
2653 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2654 {
2655 	unsigned long flags;
2656 	u8 idx;
2657 
2658 	spin_lock_irqsave(&tx->lock, flags);
2659 
2660 	/* Check that this tracker is accepting new timestamp requests */
2661 	if (!ice_ptp_is_tx_tracker_up(tx)) {
2662 		spin_unlock_irqrestore(&tx->lock, flags);
2663 		return -1;
2664 	}
2665 
2666 	/* Find and set the first available index */
2667 	idx = find_next_zero_bit(tx->in_use, tx->len,
2668 				 tx->last_ll_ts_idx_read + 1);
2669 	if (idx == tx->len)
2670 		idx = find_first_zero_bit(tx->in_use, tx->len);
2671 
2672 	if (idx < tx->len) {
2673 		/* We got a valid index that no other thread could have set. Store
2674 		 * a reference to the skb and the start time to allow discarding old
2675 		 * requests.
2676 		 */
2677 		set_bit(idx, tx->in_use);
2678 		clear_bit(idx, tx->stale);
2679 		tx->tstamps[idx].start = jiffies;
2680 		tx->tstamps[idx].skb = skb_get(skb);
2681 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2682 		ice_trace(tx_tstamp_request, skb, idx);
2683 	}
2684 
2685 	spin_unlock_irqrestore(&tx->lock, flags);
2686 
2687 	/* return the appropriate PHY timestamp register index, -1 if no
2688 	 * indexes were available.
2689 	 */
2690 	if (idx >= tx->len)
2691 		return -1;
2692 	else
2693 		return idx + tx->offset;
2694 }
2695 
2696 /**
2697  * ice_ptp_process_ts - Process the PTP Tx timestamps
2698  * @pf: Board private structure
2699  *
2700  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2701  * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2702  */
2703 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2704 {
2705 	switch (pf->ptp.tx_interrupt_mode) {
2706 	case ICE_PTP_TX_INTERRUPT_NONE:
2707 		/* This device has the clock owner handle timestamps for it */
2708 		return ICE_TX_TSTAMP_WORK_DONE;
2709 	case ICE_PTP_TX_INTERRUPT_SELF:
2710 		/* This device handles its own timestamps */
2711 		return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2712 	case ICE_PTP_TX_INTERRUPT_ALL:
2713 		/* This device handles timestamps for all ports */
2714 		return ice_ptp_tx_tstamp_owner(pf);
2715 	default:
2716 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2717 			  pf->ptp.tx_interrupt_mode);
2718 		return ICE_TX_TSTAMP_WORK_DONE;
2719 	}
2720 }
2721 
2722 /**
2723  * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2724  * @pf: Board private structure
2725  *
2726  * The device PHY issues Tx timestamp interrupts to the driver for processing
2727  * timestamp data from the PHY. It will not interrupt again until all
2728  * current timestamp data is read. In rare circumstances, it is possible that
2729  * the driver fails to read all outstanding data.
2730  *
2731  * To avoid getting permanently stuck, periodically check if the PHY has
2732  * outstanding timestamp data. If so, trigger an interrupt from software to
2733  * process this data.
2734  */
2735 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2736 {
2737 	struct device *dev = ice_pf_to_dev(pf);
2738 	struct ice_hw *hw = &pf->hw;
2739 	bool trigger_oicr = false;
2740 	unsigned int i;
2741 
2742 	if (ice_is_e810(hw))
2743 		return;
2744 
2745 	if (!ice_pf_src_tmr_owned(pf))
2746 		return;
2747 
2748 	for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2749 		u64 tstamp_ready;
2750 		int err;
2751 
2752 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2753 		if (!err && tstamp_ready) {
2754 			trigger_oicr = true;
2755 			break;
2756 		}
2757 	}
2758 
2759 	if (trigger_oicr) {
2760 		/* Trigger a software interrupt, to ensure this data
2761 		 * gets processed.
2762 		 */
2763 		dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2764 
2765 		wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2766 		ice_flush(hw);
2767 	}
2768 }
2769 
2770 static void ice_ptp_periodic_work(struct kthread_work *work)
2771 {
2772 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2773 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2774 	int err;
2775 
2776 	if (pf->ptp.state != ICE_PTP_READY)
2777 		return;
2778 
2779 	err = ice_ptp_update_cached_phctime(pf);
2780 
2781 	ice_ptp_maybe_trigger_tx_interrupt(pf);
2782 
2783 	/* Run twice a second or reschedule if phc update failed */
2784 	kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2785 				   msecs_to_jiffies(err ? 10 : 500));
2786 }
2787 
2788 /**
2789  * ice_ptp_prepare_for_reset - Prepare PTP for reset
2790  * @pf: Board private structure
2791  * @reset_type: the reset type being performed
2792  */
2793 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2794 {
2795 	struct ice_ptp *ptp = &pf->ptp;
2796 	u8 src_tmr;
2797 
2798 	if (ptp->state != ICE_PTP_READY)
2799 		return;
2800 
2801 	ptp->state = ICE_PTP_RESETTING;
2802 
2803 	/* Disable timestamping for both Tx and Rx */
2804 	ice_ptp_disable_timestamp_mode(pf);
2805 
2806 	kthread_cancel_delayed_work_sync(&ptp->work);
2807 
2808 	if (reset_type == ICE_RESET_PFR)
2809 		return;
2810 
2811 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2812 
2813 	/* Disable periodic outputs */
2814 	ice_ptp_disable_all_perout(pf);
2815 
2816 	src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2817 
2818 	/* Disable source clock */
2819 	wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2820 
2821 	/* Acquire PHC and system timer to restore after reset */
2822 	ptp->reset_time = ktime_get_real_ns();
2823 }
2824 
2825 /**
2826  * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2827  * @pf: Board private structure
2828  *
2829  * Companion function for ice_ptp_rebuild() which handles tasks that only the
2830  * PTP clock owner instance should perform.
2831  */
2832 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2833 {
2834 	struct ice_ptp *ptp = &pf->ptp;
2835 	struct ice_hw *hw = &pf->hw;
2836 	struct timespec64 ts;
2837 	u64 time_diff;
2838 	int err;
2839 
2840 	err = ice_ptp_init_phc(hw);
2841 	if (err)
2842 		return err;
2843 
2844 	/* Acquire the global hardware lock */
2845 	if (!ice_ptp_lock(hw)) {
2846 		err = -EBUSY;
2847 		return err;
2848 	}
2849 
2850 	/* Write the increment time value to PHY and LAN */
2851 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2852 	if (err)
2853 		goto err_unlock;
2854 
2855 	/* Write the initial Time value to PHY and LAN using the cached PHC
2856 	 * time before the reset and time difference between stopping and
2857 	 * starting the clock.
2858 	 */
2859 	if (ptp->cached_phc_time) {
2860 		time_diff = ktime_get_real_ns() - ptp->reset_time;
2861 		ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2862 	} else {
2863 		ts = ktime_to_timespec64(ktime_get_real());
2864 	}
2865 	err = ice_ptp_write_init(pf, &ts);
2866 	if (err)
2867 		goto err_unlock;
2868 
2869 	/* Release the global hardware lock */
2870 	ice_ptp_unlock(hw);
2871 
2872 	/* Flush software tracking of any outstanding timestamps since we're
2873 	 * about to flush the PHY timestamp block.
2874 	 */
2875 	ice_ptp_flush_all_tx_tracker(pf);
2876 
2877 	if (!ice_is_e810(hw)) {
2878 		/* Enable quad interrupts */
2879 		err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
2880 		if (err)
2881 			return err;
2882 
2883 		ice_ptp_restart_all_phy(pf);
2884 	}
2885 
2886 	/* Re-enable all periodic outputs and external timestamp events */
2887 	ice_ptp_enable_all_perout(pf);
2888 	ice_ptp_enable_all_extts(pf);
2889 
2890 	return 0;
2891 
2892 err_unlock:
2893 	ice_ptp_unlock(hw);
2894 	return err;
2895 }
2896 
2897 /**
2898  * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
2899  * @pf: Board private structure
2900  * @reset_type: the reset type being performed
2901  */
2902 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
2903 {
2904 	struct ice_ptp *ptp = &pf->ptp;
2905 	int err;
2906 
2907 	if (ptp->state == ICE_PTP_READY) {
2908 		ice_ptp_prepare_for_reset(pf, reset_type);
2909 	} else if (ptp->state != ICE_PTP_RESETTING) {
2910 		err = -EINVAL;
2911 		dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
2912 		goto err;
2913 	}
2914 
2915 	if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
2916 		err = ice_ptp_rebuild_owner(pf);
2917 		if (err)
2918 			goto err;
2919 	}
2920 
2921 	ptp->state = ICE_PTP_READY;
2922 
2923 	/* Start periodic work going */
2924 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2925 
2926 	dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2927 	return;
2928 
2929 err:
2930 	ptp->state = ICE_PTP_ERROR;
2931 	dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2932 }
2933 
2934 static bool ice_is_primary(struct ice_hw *hw)
2935 {
2936 	return ice_is_e825c(hw) && ice_is_dual(hw) ?
2937 		!!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
2938 }
2939 
2940 static int ice_ptp_setup_adapter(struct ice_pf *pf)
2941 {
2942 	if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
2943 		return -EPERM;
2944 
2945 	pf->adapter->ctrl_pf = pf;
2946 
2947 	return 0;
2948 }
2949 
2950 static int ice_ptp_setup_pf(struct ice_pf *pf)
2951 {
2952 	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
2953 	struct ice_ptp *ptp = &pf->ptp;
2954 
2955 	if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
2956 		return -ENODEV;
2957 
2958 	INIT_LIST_HEAD(&ptp->port.list_node);
2959 	mutex_lock(&pf->adapter->ports.lock);
2960 
2961 	list_add(&ptp->port.list_node,
2962 		 &pf->adapter->ports.ports);
2963 	mutex_unlock(&pf->adapter->ports.lock);
2964 
2965 	return 0;
2966 }
2967 
2968 static void ice_ptp_cleanup_pf(struct ice_pf *pf)
2969 {
2970 	struct ice_ptp *ptp = &pf->ptp;
2971 
2972 	if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
2973 		mutex_lock(&pf->adapter->ports.lock);
2974 		list_del(&ptp->port.list_node);
2975 		mutex_unlock(&pf->adapter->ports.lock);
2976 	}
2977 }
2978 
2979 /**
2980  * ice_ptp_clock_index - Get the PTP clock index for this device
2981  * @pf: Board private structure
2982  *
2983  * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
2984  * is associated.
2985  */
2986 int ice_ptp_clock_index(struct ice_pf *pf)
2987 {
2988 	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
2989 	struct ptp_clock *clock;
2990 
2991 	if (!ctrl_ptp)
2992 		return -1;
2993 	clock = ctrl_ptp->clock;
2994 
2995 	return clock ? ptp_clock_index(clock) : -1;
2996 }
2997 
2998 /**
2999  * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3000  * @pf: Board private structure
3001  *
3002  * Setup and initialize a PTP clock device that represents the device hardware
3003  * clock. Save the clock index for other functions connected to the same
3004  * hardware resource.
3005  */
3006 static int ice_ptp_init_owner(struct ice_pf *pf)
3007 {
3008 	struct ice_hw *hw = &pf->hw;
3009 	struct timespec64 ts;
3010 	int err;
3011 
3012 	err = ice_ptp_init_phc(hw);
3013 	if (err) {
3014 		dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3015 			err);
3016 		return err;
3017 	}
3018 
3019 	/* Acquire the global hardware lock */
3020 	if (!ice_ptp_lock(hw)) {
3021 		err = -EBUSY;
3022 		goto err_exit;
3023 	}
3024 
3025 	/* Write the increment time value to PHY and LAN */
3026 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3027 	if (err)
3028 		goto err_unlock;
3029 
3030 	ts = ktime_to_timespec64(ktime_get_real());
3031 	/* Write the initial Time value to PHY and LAN */
3032 	err = ice_ptp_write_init(pf, &ts);
3033 	if (err)
3034 		goto err_unlock;
3035 
3036 	/* Release the global hardware lock */
3037 	ice_ptp_unlock(hw);
3038 
3039 	/* Configure PHY interrupt settings */
3040 	err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3041 	if (err)
3042 		goto err_exit;
3043 
3044 	/* Ensure we have a clock device */
3045 	err = ice_ptp_create_clock(pf);
3046 	if (err)
3047 		goto err_clk;
3048 
3049 	return 0;
3050 err_clk:
3051 	pf->ptp.clock = NULL;
3052 err_exit:
3053 	return err;
3054 
3055 err_unlock:
3056 	ice_ptp_unlock(hw);
3057 	return err;
3058 }
3059 
3060 /**
3061  * ice_ptp_init_work - Initialize PTP work threads
3062  * @pf: Board private structure
3063  * @ptp: PF PTP structure
3064  */
3065 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3066 {
3067 	struct kthread_worker *kworker;
3068 
3069 	/* Initialize work functions */
3070 	kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3071 
3072 	/* Allocate a kworker for handling work required for the ports
3073 	 * connected to the PTP hardware clock.
3074 	 */
3075 	kworker = kthread_create_worker(0, "ice-ptp-%s",
3076 					dev_name(ice_pf_to_dev(pf)));
3077 	if (IS_ERR(kworker))
3078 		return PTR_ERR(kworker);
3079 
3080 	ptp->kworker = kworker;
3081 
3082 	/* Start periodic work going */
3083 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3084 
3085 	return 0;
3086 }
3087 
3088 /**
3089  * ice_ptp_init_port - Initialize PTP port structure
3090  * @pf: Board private structure
3091  * @ptp_port: PTP port structure
3092  */
3093 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3094 {
3095 	struct ice_hw *hw = &pf->hw;
3096 
3097 	mutex_init(&ptp_port->ps_lock);
3098 
3099 	switch (ice_get_phy_model(hw)) {
3100 	case ICE_PHY_ETH56G:
3101 		return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
3102 					      ptp_port->port_num);
3103 	case ICE_PHY_E810:
3104 		return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
3105 	case ICE_PHY_E82X:
3106 		kthread_init_delayed_work(&ptp_port->ov_work,
3107 					  ice_ptp_wait_for_offsets);
3108 
3109 		return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3110 					    ptp_port->port_num);
3111 	default:
3112 		return -ENODEV;
3113 	}
3114 }
3115 
3116 /**
3117  * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3118  * @pf: Board private structure
3119  *
3120  * Initialize the Tx timestamp interrupt mode for this device. For most device
3121  * types, each PF processes the interrupt and manages its own timestamps. For
3122  * E822-based devices, only the clock owner processes the timestamps. Other
3123  * PFs disable the interrupt and do not process their own timestamps.
3124  */
3125 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3126 {
3127 	switch (ice_get_phy_model(&pf->hw)) {
3128 	case ICE_PHY_E82X:
3129 		/* E822 based PHY has the clock owner process the interrupt
3130 		 * for all ports.
3131 		 */
3132 		if (ice_pf_src_tmr_owned(pf))
3133 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3134 		else
3135 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3136 		break;
3137 	default:
3138 		/* other PHY types handle their own Tx interrupt */
3139 		pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3140 	}
3141 }
3142 
3143 /**
3144  * ice_ptp_init - Initialize PTP hardware clock support
3145  * @pf: Board private structure
3146  *
3147  * Set up the device for interacting with the PTP hardware clock for all
3148  * functions, both the function that owns the clock hardware, and the
3149  * functions connected to the clock hardware.
3150  *
3151  * The clock owner will allocate and register a ptp_clock with the
3152  * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3153  * items used for asynchronous work such as Tx timestamps and periodic work.
3154  */
3155 void ice_ptp_init(struct ice_pf *pf)
3156 {
3157 	struct ice_ptp *ptp = &pf->ptp;
3158 	struct ice_hw *hw = &pf->hw;
3159 	int lane_num, err;
3160 
3161 	ptp->state = ICE_PTP_INITIALIZING;
3162 
3163 	lane_num = ice_get_phy_lane_number(hw);
3164 	if (lane_num < 0) {
3165 		err = lane_num;
3166 		goto err_exit;
3167 	}
3168 
3169 	ptp->port.port_num = (u8)lane_num;
3170 	ice_ptp_init_hw(hw);
3171 
3172 	ice_ptp_init_tx_interrupt_mode(pf);
3173 
3174 	/* If this function owns the clock hardware, it must allocate and
3175 	 * configure the PTP clock device to represent it.
3176 	 */
3177 	if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
3178 		err = ice_ptp_setup_adapter(pf);
3179 		if (err)
3180 			goto err_exit;
3181 		err = ice_ptp_init_owner(pf);
3182 		if (err)
3183 			goto err_exit;
3184 	}
3185 
3186 	err = ice_ptp_setup_pf(pf);
3187 	if (err)
3188 		goto err_exit;
3189 
3190 	err = ice_ptp_init_port(pf, &ptp->port);
3191 	if (err)
3192 		goto err_exit;
3193 
3194 	/* Start the PHY timestamping block */
3195 	ice_ptp_reset_phy_timestamping(pf);
3196 
3197 	/* Configure initial Tx interrupt settings */
3198 	ice_ptp_cfg_tx_interrupt(pf);
3199 
3200 	ptp->state = ICE_PTP_READY;
3201 
3202 	err = ice_ptp_init_work(pf, ptp);
3203 	if (err)
3204 		goto err_exit;
3205 
3206 	dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3207 	return;
3208 
3209 err_exit:
3210 	/* If we registered a PTP clock, release it */
3211 	if (pf->ptp.clock) {
3212 		ptp_clock_unregister(ptp->clock);
3213 		pf->ptp.clock = NULL;
3214 	}
3215 	ptp->state = ICE_PTP_ERROR;
3216 	dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3217 }
3218 
3219 /**
3220  * ice_ptp_release - Disable the driver/HW support and unregister the clock
3221  * @pf: Board private structure
3222  *
3223  * This function handles the cleanup work required from the initialization by
3224  * clearing out the important information and unregistering the clock
3225  */
3226 void ice_ptp_release(struct ice_pf *pf)
3227 {
3228 	if (pf->ptp.state != ICE_PTP_READY)
3229 		return;
3230 
3231 	pf->ptp.state = ICE_PTP_UNINIT;
3232 
3233 	/* Disable timestamping for both Tx and Rx */
3234 	ice_ptp_disable_timestamp_mode(pf);
3235 
3236 	ice_ptp_cleanup_pf(pf);
3237 
3238 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3239 
3240 	ice_ptp_disable_all_extts(pf);
3241 
3242 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
3243 
3244 	ice_ptp_port_phy_stop(&pf->ptp.port);
3245 	mutex_destroy(&pf->ptp.port.ps_lock);
3246 	if (pf->ptp.kworker) {
3247 		kthread_destroy_worker(pf->ptp.kworker);
3248 		pf->ptp.kworker = NULL;
3249 	}
3250 
3251 	if (!pf->ptp.clock)
3252 		return;
3253 
3254 	/* Disable periodic outputs */
3255 	ice_ptp_disable_all_perout(pf);
3256 
3257 	ptp_clock_unregister(pf->ptp.clock);
3258 	pf->ptp.clock = NULL;
3259 
3260 	dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3261 }
3262