xref: /linux/drivers/net/ethernet/intel/ice/ice_ptp.c (revision a55f7f5f29b32c2c53cc291899cf9b0c25a07f7c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 
8 static const char ice_pin_names[][64] = {
9 	"SDP0",
10 	"SDP1",
11 	"SDP2",
12 	"SDP3",
13 	"TIME_SYNC",
14 	"1PPS"
15 };
16 
17 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
18 	/* name,        gpio,       delay */
19 	{  TIME_SYNC, {  4, -1 }, { 0,  0 }},
20 	{  ONE_PPS,   { -1,  5 }, { 0, 11 }},
21 };
22 
23 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
24 	/* name,        gpio,       delay */
25 	{  SDP0,      {  0,  0 }, { 15, 14 }},
26 	{  SDP1,      {  1,  1 }, { 15, 14 }},
27 	{  SDP2,      {  2,  2 }, { 15, 14 }},
28 	{  SDP3,      {  3,  3 }, { 15, 14 }},
29 	{  TIME_SYNC, {  4, -1 }, { 11,  0 }},
30 	{  ONE_PPS,   { -1,  5 }, {  0,  9 }},
31 };
32 
33 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
34 	/* name,        gpio,       delay */
35 	{  SDP0,      {  0,  0 }, { 0, 1 }},
36 	{  SDP1,      {  1,  1 }, { 0, 1 }},
37 	{  SDP2,      {  2,  2 }, { 0, 1 }},
38 	{  SDP3,      {  3,  3 }, { 0, 1 }},
39 	{  ONE_PPS,   { -1,  5 }, { 0, 1 }},
40 };
41 
42 static const char ice_pin_names_dpll[][64] = {
43 	"SDP20",
44 	"SDP21",
45 	"SDP22",
46 	"SDP23",
47 };
48 
49 static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
50 	/* name,   gpio,       delay */
51 	{  SDP0, { -1,  0 }, { 0, 1 }},
52 	{  SDP1, {  1, -1 }, { 0, 0 }},
53 	{  SDP2, { -1,  2 }, { 0, 1 }},
54 	{  SDP3, {  3, -1 }, { 0, 0 }},
55 };
56 
ice_get_ctrl_pf(struct ice_pf * pf)57 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
58 {
59 	return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
60 }
61 
ice_get_ctrl_ptp(struct ice_pf * pf)62 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
63 {
64 	struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
65 
66 	return !ctrl_pf ? NULL : &ctrl_pf->ptp;
67 }
68 
69 /**
70  * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc
71  * @pf: Board private structure
72  * @func: Pin function
73  * @chan: GPIO channel
74  *
75  * Return: positive pin number when pin is present, -1 otherwise
76  */
ice_ptp_find_pin_idx(struct ice_pf * pf,enum ptp_pin_function func,unsigned int chan)77 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
78 				unsigned int chan)
79 {
80 	const struct ptp_clock_info *info = &pf->ptp.info;
81 	int i;
82 
83 	for (i = 0; i < info->n_pins; i++) {
84 		if (info->pin_config[i].func == func &&
85 		    info->pin_config[i].chan == chan)
86 			return i;
87 	}
88 
89 	return -1;
90 }
91 
92 /**
93  * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
94  * @pf: Board private structure
95  *
96  * Program the device to respond appropriately to the Tx timestamp interrupt
97  * cause.
98  */
ice_ptp_cfg_tx_interrupt(struct ice_pf * pf)99 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
100 {
101 	struct ice_hw *hw = &pf->hw;
102 	bool enable;
103 	u32 val;
104 
105 	switch (pf->ptp.tx_interrupt_mode) {
106 	case ICE_PTP_TX_INTERRUPT_ALL:
107 		/* React to interrupts across all quads. */
108 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
109 		enable = true;
110 		break;
111 	case ICE_PTP_TX_INTERRUPT_NONE:
112 		/* Do not react to interrupts on any quad. */
113 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
114 		enable = false;
115 		break;
116 	case ICE_PTP_TX_INTERRUPT_SELF:
117 	default:
118 		enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
119 		break;
120 	}
121 
122 	/* Configure the Tx timestamp interrupt */
123 	val = rd32(hw, PFINT_OICR_ENA);
124 	if (enable)
125 		val |= PFINT_OICR_TSYN_TX_M;
126 	else
127 		val &= ~PFINT_OICR_TSYN_TX_M;
128 	wr32(hw, PFINT_OICR_ENA, val);
129 }
130 
131 /**
132  * ice_set_rx_tstamp - Enable or disable Rx timestamping
133  * @pf: The PF pointer to search in
134  * @on: bool value for whether timestamps are enabled or disabled
135  */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)136 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
137 {
138 	struct ice_vsi *vsi;
139 	u16 i;
140 
141 	vsi = ice_get_main_vsi(pf);
142 	if (!vsi || !vsi->rx_rings)
143 		return;
144 
145 	/* Set the timestamp flag for all the Rx rings */
146 	ice_for_each_rxq(vsi, i) {
147 		if (!vsi->rx_rings[i])
148 			continue;
149 		vsi->rx_rings[i]->ptp_rx = on;
150 	}
151 }
152 
153 /**
154  * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
155  * @pf: Board private structure
156  *
157  * Called during preparation for reset to temporarily disable timestamping on
158  * the device. Called during remove to disable timestamping while cleaning up
159  * driver resources.
160  */
ice_ptp_disable_timestamp_mode(struct ice_pf * pf)161 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
162 {
163 	struct ice_hw *hw = &pf->hw;
164 	u32 val;
165 
166 	val = rd32(hw, PFINT_OICR_ENA);
167 	val &= ~PFINT_OICR_TSYN_TX_M;
168 	wr32(hw, PFINT_OICR_ENA, val);
169 
170 	ice_set_rx_tstamp(pf, false);
171 }
172 
173 /**
174  * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
175  * @pf: Board private structure
176  *
177  * Called at the end of rebuild to restore timestamp configuration after
178  * a device reset.
179  */
ice_ptp_restore_timestamp_mode(struct ice_pf * pf)180 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
181 {
182 	struct ice_hw *hw = &pf->hw;
183 	bool enable_rx;
184 
185 	ice_ptp_cfg_tx_interrupt(pf);
186 
187 	enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
188 	ice_set_rx_tstamp(pf, enable_rx);
189 
190 	/* Trigger an immediate software interrupt to ensure that timestamps
191 	 * which occurred during reset are handled now.
192 	 */
193 	wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
194 	ice_flush(hw);
195 }
196 
197 /**
198  * ice_ptp_read_src_clk_reg - Read the source clock register
199  * @pf: Board private structure
200  * @sts: Optional parameter for holding a pair of system timestamps from
201  *       the system clock. Will be ignored if NULL is given.
202  */
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)203 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
204 			     struct ptp_system_timestamp *sts)
205 {
206 	struct ice_hw *hw = &pf->hw;
207 	u32 hi, lo, lo2;
208 	u8 tmr_idx;
209 
210 	if (!ice_is_primary(hw))
211 		hw = ice_get_primary_hw(pf);
212 
213 	tmr_idx = ice_get_ptp_src_clock_index(hw);
214 	guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
215 	/* Read the system timestamp pre PHC read */
216 	ptp_read_system_prets(sts);
217 
218 	if (hw->mac_type == ICE_MAC_E830) {
219 		u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
220 
221 		/* Read the system timestamp post PHC read */
222 		ptp_read_system_postts(sts);
223 
224 		return clk_time;
225 	}
226 
227 	lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
228 
229 	/* Read the system timestamp post PHC read */
230 	ptp_read_system_postts(sts);
231 
232 	hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
233 	lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
234 
235 	if (lo2 < lo) {
236 		/* if TIME_L rolled over read TIME_L again and update
237 		 * system timestamps
238 		 */
239 		ptp_read_system_prets(sts);
240 		lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
241 		ptp_read_system_postts(sts);
242 		hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
243 	}
244 
245 	return ((u64)hi << 32) | lo;
246 }
247 
248 /**
249  * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
250  * @cached_phc_time: recently cached copy of PHC time
251  * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
252  *
253  * Hardware captures timestamps which contain only 32 bits of nominal
254  * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
255  * Note that the captured timestamp values may be 40 bits, but the lower
256  * 8 bits are sub-nanoseconds and generally discarded.
257  *
258  * Extend the 32bit nanosecond timestamp using the following algorithm and
259  * assumptions:
260  *
261  * 1) have a recently cached copy of the PHC time
262  * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
263  *    seconds) before or after the PHC time was captured.
264  * 3) calculate the delta between the cached time and the timestamp
265  * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
266  *    captured after the PHC time. In this case, the full timestamp is just
267  *    the cached PHC time plus the delta.
268  * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
269  *    timestamp was captured *before* the PHC time, i.e. because the PHC
270  *    cache was updated after the timestamp was captured by hardware. In this
271  *    case, the full timestamp is the cached time minus the inverse delta.
272  *
273  * This algorithm works even if the PHC time was updated after a Tx timestamp
274  * was requested, but before the Tx timestamp event was reported from
275  * hardware.
276  *
277  * This calculation primarily relies on keeping the cached PHC time up to
278  * date. If the timestamp was captured more than 2^31 nanoseconds after the
279  * PHC time, it is possible that the lower 32bits of PHC time have
280  * overflowed more than once, and we might generate an incorrect timestamp.
281  *
282  * This is prevented by (a) periodically updating the cached PHC time once
283  * a second, and (b) discarding any Tx timestamp packet if it has waited for
284  * a timestamp for more than one second.
285  */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)286 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
287 {
288 	u32 delta, phc_time_lo;
289 	u64 ns;
290 
291 	/* Extract the lower 32 bits of the PHC time */
292 	phc_time_lo = (u32)cached_phc_time;
293 
294 	/* Calculate the delta between the lower 32bits of the cached PHC
295 	 * time and the in_tstamp value
296 	 */
297 	delta = (in_tstamp - phc_time_lo);
298 
299 	/* Do not assume that the in_tstamp is always more recent than the
300 	 * cached PHC time. If the delta is large, it indicates that the
301 	 * in_tstamp was taken in the past, and should be converted
302 	 * forward.
303 	 */
304 	if (delta > (U32_MAX / 2)) {
305 		/* reverse the delta calculation here */
306 		delta = (phc_time_lo - in_tstamp);
307 		ns = cached_phc_time - delta;
308 	} else {
309 		ns = cached_phc_time + delta;
310 	}
311 
312 	return ns;
313 }
314 
315 /**
316  * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
317  * @pf: Board private structure
318  * @in_tstamp: Ingress/egress 40b timestamp value
319  *
320  * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
321  * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
322  *
323  *  *--------------------------------------------------------------*
324  *  | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
325  *  *--------------------------------------------------------------*
326  *
327  * The low bit is an indicator of whether the timestamp is valid. The next
328  * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
329  * and the remaining 32 bits are the lower 32 bits of the PHC timer.
330  *
331  * It is assumed that the caller verifies the timestamp is valid prior to
332  * calling this function.
333  *
334  * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
335  * time stored in the device private PTP structure as the basis for timestamp
336  * extension.
337  *
338  * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
339  * algorithm.
340  */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)341 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
342 {
343 	const u64 mask = GENMASK_ULL(31, 0);
344 	unsigned long discard_time;
345 
346 	/* Discard the hardware timestamp if the cached PHC time is too old */
347 	discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
348 	if (time_is_before_jiffies(discard_time)) {
349 		pf->ptp.tx_hwtstamp_discarded++;
350 		return 0;
351 	}
352 
353 	return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
354 				     (in_tstamp >> 8) & mask);
355 }
356 
357 /**
358  * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
359  * @tx: the PTP Tx timestamp tracker to check
360  *
361  * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
362  * to accept new timestamp requests.
363  *
364  * Assumes the tx->lock spinlock is already held.
365  */
366 static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx)367 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
368 {
369 	lockdep_assert_held(&tx->lock);
370 
371 	return tx->init && !tx->calibrating;
372 }
373 
374 /**
375  * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
376  * @tx: the PTP Tx timestamp tracker
377  * @idx: index of the timestamp to request
378  */
ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx)379 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
380 {
381 	struct ice_e810_params *params;
382 	struct ice_ptp_port *ptp_port;
383 	unsigned long flags;
384 	struct sk_buff *skb;
385 	struct ice_pf *pf;
386 
387 	if (!tx->init)
388 		return;
389 
390 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
391 	pf = ptp_port_to_pf(ptp_port);
392 	params = &pf->hw.ptp.phy.e810;
393 
394 	/* Drop packets which have waited for more than 2 seconds */
395 	if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
396 		/* Count the number of Tx timestamps that timed out */
397 		pf->ptp.tx_hwtstamp_timeouts++;
398 
399 		skb = tx->tstamps[idx].skb;
400 		tx->tstamps[idx].skb = NULL;
401 		clear_bit(idx, tx->in_use);
402 
403 		dev_kfree_skb_any(skb);
404 		return;
405 	}
406 
407 	ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
408 
409 	spin_lock_irqsave(&params->atqbal_wq.lock, flags);
410 
411 	params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
412 
413 	/* Write TS index to read to the PF register so the FW can read it */
414 	wr32(&pf->hw, REG_LL_PROXY_H,
415 	     REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
416 	     REG_LL_PROXY_H_EXEC);
417 	tx->last_ll_ts_idx_read = idx;
418 
419 	spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
420 }
421 
422 /**
423  * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
424  * @tx: the PTP Tx timestamp tracker
425  */
ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx)426 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
427 {
428 	struct skb_shared_hwtstamps shhwtstamps = {};
429 	u8 idx = tx->last_ll_ts_idx_read;
430 	struct ice_e810_params *params;
431 	struct ice_ptp_port *ptp_port;
432 	u64 raw_tstamp, tstamp;
433 	bool drop_ts = false;
434 	struct sk_buff *skb;
435 	unsigned long flags;
436 	struct device *dev;
437 	struct ice_pf *pf;
438 	u32 reg_ll_high;
439 
440 	if (!tx->init || tx->last_ll_ts_idx_read < 0)
441 		return;
442 
443 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
444 	pf = ptp_port_to_pf(ptp_port);
445 	dev = ice_pf_to_dev(pf);
446 	params = &pf->hw.ptp.phy.e810;
447 
448 	ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
449 
450 	spin_lock_irqsave(&params->atqbal_wq.lock, flags);
451 
452 	if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
453 		dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
454 			__func__);
455 
456 	/* Read the low 32 bit value */
457 	raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
458 	/* Read the status together with high TS part */
459 	reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
460 
461 	/* Wake up threads waiting on low latency interface */
462 	params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
463 
464 	wake_up_locked(&params->atqbal_wq);
465 
466 	spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
467 
468 	/* When the bit is cleared, the TS is ready in the register */
469 	if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
470 		dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
471 		return;
472 	}
473 
474 	/* High 8 bit value of the TS is on the bits 16:23 */
475 	raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
476 
477 	/* Devices using this interface always verify the timestamp differs
478 	 * relative to the last cached timestamp value.
479 	 */
480 	if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
481 		return;
482 
483 	tx->tstamps[idx].cached_tstamp = raw_tstamp;
484 	clear_bit(idx, tx->in_use);
485 	skb = tx->tstamps[idx].skb;
486 	tx->tstamps[idx].skb = NULL;
487 	if (test_and_clear_bit(idx, tx->stale))
488 		drop_ts = true;
489 
490 	if (!skb)
491 		return;
492 
493 	if (drop_ts) {
494 		dev_kfree_skb_any(skb);
495 		return;
496 	}
497 
498 	/* Extend the timestamp using cached PHC time */
499 	tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
500 	if (tstamp) {
501 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
502 		ice_trace(tx_tstamp_complete, skb, idx);
503 
504 		/* Count the number of Tx timestamps that succeeded */
505 		pf->ptp.tx_hwtstamp_good++;
506 	}
507 
508 	skb_tstamp_tx(skb, &shhwtstamps);
509 	dev_kfree_skb_any(skb);
510 }
511 
512 /**
513  * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
514  * @tx: the PTP Tx timestamp tracker
515  *
516  * Process timestamps captured by the PHY associated with this port. To do
517  * this, loop over each index with a waiting skb.
518  *
519  * If a given index has a valid timestamp, perform the following steps:
520  *
521  * 1) check that the timestamp request is not stale
522  * 2) check that a timestamp is ready and available in the PHY memory bank
523  * 3) read and copy the timestamp out of the PHY register
524  * 4) unlock the index by clearing the associated in_use bit
525  * 5) check if the timestamp is stale, and discard if so
526  * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
527  * 7) send this 64 bit timestamp to the stack
528  *
529  * Note that we do not hold the tracking lock while reading the Tx timestamp.
530  * This is because reading the timestamp requires taking a mutex that might
531  * sleep.
532  *
533  * The only place where we set in_use is when a new timestamp is initiated
534  * with a slot index. This is only called in the hard xmit routine where an
535  * SKB has a request flag set. The only places where we clear this bit is this
536  * function, or during teardown when the Tx timestamp tracker is being
537  * removed. A timestamp index will never be re-used until the in_use bit for
538  * that index is cleared.
539  *
540  * If a Tx thread starts a new timestamp, we might not begin processing it
541  * right away but we will notice it at the end when we re-queue the task.
542  *
543  * If a Tx thread starts a new timestamp just after this function exits, the
544  * interrupt for that timestamp should re-trigger this function once
545  * a timestamp is ready.
546  *
547  * In cases where the PTP hardware clock was directly adjusted, some
548  * timestamps may not be able to safely use the timestamp extension math. In
549  * this case, software will set the stale bit for any outstanding Tx
550  * timestamps when the clock is adjusted. Then this function will discard
551  * those captured timestamps instead of sending them to the stack.
552  *
553  * If a Tx packet has been waiting for more than 2 seconds, it is not possible
554  * to correctly extend the timestamp using the cached PHC time. It is
555  * extremely unlikely that a packet will ever take this long to timestamp. If
556  * we detect a Tx timestamp request that has waited for this long we assume
557  * the packet will never be sent by hardware and discard it without reading
558  * the timestamp register.
559  */
ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx)560 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
561 {
562 	struct ice_ptp_port *ptp_port;
563 	unsigned long flags;
564 	u32 tstamp_good = 0;
565 	struct ice_pf *pf;
566 	struct ice_hw *hw;
567 	u64 tstamp_ready;
568 	bool link_up;
569 	int err;
570 	u8 idx;
571 
572 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
573 	pf = ptp_port_to_pf(ptp_port);
574 	hw = &pf->hw;
575 
576 	if (!tx->init)
577 		return;
578 
579 	/* Read the Tx ready status first */
580 	if (tx->has_ready_bitmap) {
581 		err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
582 		if (err)
583 			return;
584 	}
585 
586 	/* Drop packets if the link went down */
587 	link_up = ptp_port->link_up;
588 
589 	for_each_set_bit(idx, tx->in_use, tx->len) {
590 		struct skb_shared_hwtstamps shhwtstamps = {};
591 		u8 phy_idx = idx + tx->offset;
592 		u64 raw_tstamp = 0, tstamp;
593 		bool drop_ts = !link_up;
594 		struct sk_buff *skb;
595 
596 		/* Drop packets which have waited for more than 2 seconds */
597 		if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
598 			drop_ts = true;
599 
600 			/* Count the number of Tx timestamps that timed out */
601 			pf->ptp.tx_hwtstamp_timeouts++;
602 		}
603 
604 		/* Only read a timestamp from the PHY if its marked as ready
605 		 * by the tstamp_ready register. This avoids unnecessary
606 		 * reading of timestamps which are not yet valid. This is
607 		 * important as we must read all timestamps which are valid
608 		 * and only timestamps which are valid during each interrupt.
609 		 * If we do not, the hardware logic for generating a new
610 		 * interrupt can get stuck on some devices.
611 		 */
612 		if (tx->has_ready_bitmap &&
613 		    !(tstamp_ready & BIT_ULL(phy_idx))) {
614 			if (drop_ts)
615 				goto skip_ts_read;
616 
617 			continue;
618 		}
619 
620 		ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
621 
622 		err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
623 		if (err && !drop_ts)
624 			continue;
625 
626 		ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
627 
628 		/* For PHYs which don't implement a proper timestamp ready
629 		 * bitmap, verify that the timestamp value is different
630 		 * from the last cached timestamp. If it is not, skip this for
631 		 * now assuming it hasn't yet been captured by hardware.
632 		 */
633 		if (!drop_ts && !tx->has_ready_bitmap &&
634 		    raw_tstamp == tx->tstamps[idx].cached_tstamp)
635 			continue;
636 
637 		/* Discard any timestamp value without the valid bit set */
638 		if (!(raw_tstamp & ICE_PTP_TS_VALID))
639 			drop_ts = true;
640 
641 skip_ts_read:
642 		spin_lock_irqsave(&tx->lock, flags);
643 		if (!tx->has_ready_bitmap && raw_tstamp)
644 			tx->tstamps[idx].cached_tstamp = raw_tstamp;
645 		clear_bit(idx, tx->in_use);
646 		skb = tx->tstamps[idx].skb;
647 		tx->tstamps[idx].skb = NULL;
648 		if (test_and_clear_bit(idx, tx->stale))
649 			drop_ts = true;
650 		spin_unlock_irqrestore(&tx->lock, flags);
651 
652 		/* It is unlikely but possible that the SKB will have been
653 		 * flushed at this point due to link change or teardown.
654 		 */
655 		if (!skb)
656 			continue;
657 
658 		if (drop_ts) {
659 			dev_kfree_skb_any(skb);
660 			continue;
661 		}
662 
663 		/* Extend the timestamp using cached PHC time */
664 		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
665 		if (tstamp) {
666 			shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
667 			ice_trace(tx_tstamp_complete, skb, idx);
668 
669 			/* Count the number of Tx timestamps that succeeded */
670 			tstamp_good++;
671 		}
672 
673 		skb_tstamp_tx(skb, &shhwtstamps);
674 		dev_kfree_skb_any(skb);
675 	}
676 
677 	pf->ptp.tx_hwtstamp_good += tstamp_good;
678 }
679 
ice_ptp_tx_tstamp_owner(struct ice_pf * pf)680 static void ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
681 {
682 	struct ice_ptp_port *port;
683 
684 	mutex_lock(&pf->adapter->ports.lock);
685 	list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
686 		struct ice_ptp_tx *tx = &port->tx;
687 
688 		if (!tx || !tx->init)
689 			continue;
690 
691 		ice_ptp_process_tx_tstamp(tx);
692 	}
693 	mutex_unlock(&pf->adapter->ports.lock);
694 }
695 
696 /**
697  * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
698  * @tx: Tx tracking structure to initialize
699  *
700  * Assumes that the length has already been initialized. Do not call directly,
701  * use the ice_ptp_init_tx_* instead.
702  */
703 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)704 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
705 {
706 	unsigned long *in_use, *stale;
707 	struct ice_tx_tstamp *tstamps;
708 
709 	tstamps = kzalloc_objs(*tstamps, tx->len);
710 	in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
711 	stale = bitmap_zalloc(tx->len, GFP_KERNEL);
712 
713 	if (!tstamps || !in_use || !stale) {
714 		kfree(tstamps);
715 		bitmap_free(in_use);
716 		bitmap_free(stale);
717 
718 		return -ENOMEM;
719 	}
720 
721 	tx->tstamps = tstamps;
722 	tx->in_use = in_use;
723 	tx->stale = stale;
724 	tx->init = 1;
725 	tx->last_ll_ts_idx_read = -1;
726 
727 	spin_lock_init(&tx->lock);
728 
729 	return 0;
730 }
731 
732 /**
733  * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
734  * @pf: Board private structure
735  * @tx: the tracker to flush
736  *
737  * Called during teardown when a Tx tracker is being removed.
738  */
739 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)740 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
741 {
742 	struct ice_hw *hw = &pf->hw;
743 	unsigned long flags;
744 	u64 tstamp_ready;
745 	int err;
746 	u8 idx;
747 
748 	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
749 	if (err) {
750 		dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
751 			tx->block, err);
752 
753 		/* If we fail to read the Tx timestamp ready bitmap just
754 		 * skip clearing the PHY timestamps.
755 		 */
756 		tstamp_ready = 0;
757 	}
758 
759 	for_each_set_bit(idx, tx->in_use, tx->len) {
760 		u8 phy_idx = idx + tx->offset;
761 		struct sk_buff *skb;
762 
763 		/* In case this timestamp is ready, we need to clear it. */
764 		if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
765 			ice_clear_phy_tstamp(hw, tx->block, phy_idx);
766 
767 		spin_lock_irqsave(&tx->lock, flags);
768 		skb = tx->tstamps[idx].skb;
769 		tx->tstamps[idx].skb = NULL;
770 		clear_bit(idx, tx->in_use);
771 		clear_bit(idx, tx->stale);
772 		spin_unlock_irqrestore(&tx->lock, flags);
773 
774 		/* Count the number of Tx timestamps flushed */
775 		pf->ptp.tx_hwtstamp_flushed++;
776 
777 		/* Free the SKB after we've cleared the bit */
778 		dev_kfree_skb_any(skb);
779 	}
780 }
781 
782 /**
783  * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
784  * @tx: the tracker to mark
785  *
786  * Mark currently outstanding Tx timestamps as stale. This prevents sending
787  * their timestamp value to the stack. This is required to prevent extending
788  * the 40bit hardware timestamp incorrectly.
789  *
790  * This should be called when the PTP clock is modified such as after a set
791  * time request.
792  */
793 static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx)794 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
795 {
796 	unsigned long flags;
797 
798 	spin_lock_irqsave(&tx->lock, flags);
799 	bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
800 	spin_unlock_irqrestore(&tx->lock, flags);
801 }
802 
803 /**
804  * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
805  * @pf: Board private structure
806  *
807  * Called by the clock owner to flush all the Tx timestamp trackers associated
808  * with the clock.
809  */
810 static void
ice_ptp_flush_all_tx_tracker(struct ice_pf * pf)811 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
812 {
813 	struct ice_ptp_port *port;
814 
815 	list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
816 		ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
817 }
818 
819 /**
820  * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
821  * @pf: Board private structure
822  * @tx: Tx tracking structure to release
823  *
824  * Free memory associated with the Tx timestamp tracker.
825  */
826 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)827 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
828 {
829 	unsigned long flags;
830 
831 	spin_lock_irqsave(&tx->lock, flags);
832 	tx->init = 0;
833 	spin_unlock_irqrestore(&tx->lock, flags);
834 
835 	/* wait for potentially outstanding interrupt to complete */
836 	synchronize_irq(pf->oicr_irq.virq);
837 
838 	ice_ptp_flush_tx_tracker(pf, tx);
839 
840 	kfree(tx->tstamps);
841 	tx->tstamps = NULL;
842 
843 	bitmap_free(tx->in_use);
844 	tx->in_use = NULL;
845 
846 	bitmap_free(tx->stale);
847 	tx->stale = NULL;
848 
849 	tx->len = 0;
850 }
851 
852 /**
853  * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
854  * @pf: Board private structure
855  * @tx: the Tx tracking structure to initialize
856  * @port: the port this structure tracks
857  *
858  * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
859  * the timestamp block is shared for all ports in the same quad. To avoid
860  * ports using the same timestamp index, logically break the block of
861  * registers into chunks based on the port number.
862  *
863  * Return: 0 on success, -ENOMEM when out of memory
864  */
ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)865 static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
866 				u8 port)
867 {
868 	tx->block = ICE_GET_QUAD_NUM(port);
869 	tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
870 	tx->len = INDEX_PER_PORT_E82X;
871 	tx->has_ready_bitmap = 1;
872 
873 	return ice_ptp_alloc_tx_tracker(tx);
874 }
875 
876 /**
877  * ice_ptp_init_tx - Initialize tracking for Tx timestamps
878  * @pf: Board private structure
879  * @tx: the Tx tracking structure to initialize
880  * @port: the port this structure tracks
881  *
882  * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
883  * each port has its own block of timestamps, independent of the other ports.
884  *
885  * Return: 0 on success, -ENOMEM when out of memory
886  */
ice_ptp_init_tx(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)887 static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
888 {
889 	tx->block = port;
890 	tx->offset = 0;
891 	tx->len = INDEX_PER_PORT;
892 
893 	/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
894 	 * verify new timestamps against cached copy of the last read
895 	 * timestamp.
896 	 */
897 	tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
898 
899 	return ice_ptp_alloc_tx_tracker(tx);
900 }
901 
902 /**
903  * ice_ptp_update_cached_phctime - Update the cached PHC time values
904  * @pf: Board specific private structure
905  *
906  * This function updates the system time values which are cached in the PF
907  * structure and the Rx rings.
908  *
909  * This function must be called periodically to ensure that the cached value
910  * is never more than 2 seconds old.
911  *
912  * Note that the cached copy in the PF PTP structure is always updated, even
913  * if we can't update the copy in the Rx rings.
914  *
915  * Return:
916  * * 0 - OK, successfully updated
917  * * -EAGAIN - PF was busy, need to reschedule the update
918  */
ice_ptp_update_cached_phctime(struct ice_pf * pf)919 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
920 {
921 	struct device *dev = ice_pf_to_dev(pf);
922 	unsigned long update_before;
923 	u64 systime;
924 	int i;
925 
926 	update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
927 	if (pf->ptp.cached_phc_time &&
928 	    time_is_before_jiffies(update_before)) {
929 		unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
930 
931 		dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
932 			 jiffies_to_msecs(time_taken));
933 		pf->ptp.late_cached_phc_updates++;
934 	}
935 
936 	/* Read the current PHC time */
937 	systime = ice_ptp_read_src_clk_reg(pf, NULL);
938 
939 	/* Update the cached PHC time stored in the PF structure */
940 	WRITE_ONCE(pf->ptp.cached_phc_time, systime);
941 	WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
942 
943 	if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
944 		return -EAGAIN;
945 
946 	ice_for_each_vsi(pf, i) {
947 		struct ice_vsi *vsi = pf->vsi[i];
948 		int j;
949 
950 		if (!vsi)
951 			continue;
952 
953 		if (vsi->type != ICE_VSI_PF)
954 			continue;
955 
956 		ice_for_each_rxq(vsi, j) {
957 			if (!vsi->rx_rings[j])
958 				continue;
959 			WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
960 		}
961 	}
962 	clear_bit(ICE_CFG_BUSY, pf->state);
963 
964 	return 0;
965 }
966 
967 /**
968  * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
969  * @pf: Board specific private structure
970  *
971  * This function must be called when the cached PHC time is no longer valid,
972  * such as after a time adjustment. It marks any currently outstanding Tx
973  * timestamps as stale and updates the cached PHC time for both the PF and Rx
974  * rings.
975  *
976  * If updating the PHC time cannot be done immediately, a warning message is
977  * logged and the work item is scheduled immediately to minimize the window
978  * with a wrong cached timestamp.
979  */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)980 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
981 {
982 	struct device *dev = ice_pf_to_dev(pf);
983 	int err;
984 
985 	/* Update the cached PHC time immediately if possible, otherwise
986 	 * schedule the work item to execute soon.
987 	 */
988 	err = ice_ptp_update_cached_phctime(pf);
989 	if (err) {
990 		/* If another thread is updating the Rx rings, we won't
991 		 * properly reset them here. This could lead to reporting of
992 		 * invalid timestamps, but there isn't much we can do.
993 		 */
994 		dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
995 			 __func__);
996 
997 		/* Queue the work item to update the Rx rings when possible */
998 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
999 					   msecs_to_jiffies(10));
1000 	}
1001 
1002 	/* Mark any outstanding timestamps as stale, since they might have
1003 	 * been captured in hardware before the time update. This could lead
1004 	 * to us extending them with the wrong cached value resulting in
1005 	 * incorrect timestamp values.
1006 	 */
1007 	ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1008 }
1009 
1010 /**
1011  * ice_ptp_write_init - Set PHC time to provided value
1012  * @pf: Board private structure
1013  * @ts: timespec structure that holds the new time value
1014  *
1015  * Set the PHC time to the specified time provided in the timespec.
1016  */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1017 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1018 {
1019 	u64 ns = timespec64_to_ns(ts);
1020 	struct ice_hw *hw = &pf->hw;
1021 
1022 	return ice_ptp_init_time(hw, ns);
1023 }
1024 
1025 /**
1026  * ice_ptp_write_adj - Adjust PHC clock time atomically
1027  * @pf: Board private structure
1028  * @adj: Adjustment in nanoseconds
1029  *
1030  * Perform an atomic adjustment of the PHC time by the specified number of
1031  * nanoseconds.
1032  */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1033 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1034 {
1035 	struct ice_hw *hw = &pf->hw;
1036 
1037 	return ice_ptp_adj_clock(hw, adj);
1038 }
1039 
1040 /**
1041  * ice_base_incval - Get base timer increment value
1042  * @pf: Board private structure
1043  *
1044  * Look up the base timer increment value for this device. The base increment
1045  * value is used to define the nominal clock tick rate. This increment value
1046  * is programmed during device initialization. It is also used as the basis
1047  * for calculating adjustments using scaled_ppm.
1048  */
ice_base_incval(struct ice_pf * pf)1049 static u64 ice_base_incval(struct ice_pf *pf)
1050 {
1051 	struct ice_hw *hw = &pf->hw;
1052 	u64 incval;
1053 
1054 	incval = ice_get_base_incval(hw);
1055 
1056 	dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1057 		incval);
1058 
1059 	return incval;
1060 }
1061 
1062 /**
1063  * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1064  * @port: PTP port for which Tx FIFO is checked
1065  */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1066 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1067 {
1068 	int offs = port->port_num % ICE_PORTS_PER_QUAD;
1069 	int quad = ICE_GET_QUAD_NUM(port->port_num);
1070 	struct ice_pf *pf;
1071 	struct ice_hw *hw;
1072 	u32 val, phy_sts;
1073 	int err;
1074 
1075 	pf = ptp_port_to_pf(port);
1076 	hw = &pf->hw;
1077 
1078 	if (port->tx_fifo_busy_cnt == FIFO_OK)
1079 		return 0;
1080 
1081 	/* need to read FIFO state */
1082 	if (offs == 0 || offs == 1)
1083 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1084 					     &val);
1085 	else
1086 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1087 					     &val);
1088 
1089 	if (err) {
1090 		dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1091 			port->port_num, err);
1092 		return err;
1093 	}
1094 
1095 	if (offs & 0x1)
1096 		phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1097 	else
1098 		phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1099 
1100 	if (phy_sts & FIFO_EMPTY) {
1101 		port->tx_fifo_busy_cnt = FIFO_OK;
1102 		return 0;
1103 	}
1104 
1105 	port->tx_fifo_busy_cnt++;
1106 
1107 	dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1108 		port->tx_fifo_busy_cnt, port->port_num);
1109 
1110 	if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1111 		dev_dbg(ice_pf_to_dev(pf),
1112 			"Port %d Tx FIFO still not empty; resetting quad %d\n",
1113 			port->port_num, quad);
1114 		ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1115 		port->tx_fifo_busy_cnt = FIFO_OK;
1116 		return 0;
1117 	}
1118 
1119 	return -EAGAIN;
1120 }
1121 
1122 /**
1123  * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1124  * @work: Pointer to the kthread_work structure for this task
1125  *
1126  * Check whether hardware has completed measuring the Tx and Rx offset values
1127  * used to configure and enable vernier timestamp calibration.
1128  *
1129  * Once the offset in either direction is measured, configure the associated
1130  * registers with the calibrated offset values and enable timestamping. The Tx
1131  * and Rx directions are configured independently as soon as their associated
1132  * offsets are known.
1133  *
1134  * This function reschedules itself until both Tx and Rx calibration have
1135  * completed.
1136  */
ice_ptp_wait_for_offsets(struct kthread_work * work)1137 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1138 {
1139 	struct ice_ptp_port *port;
1140 	struct ice_pf *pf;
1141 	struct ice_hw *hw;
1142 	int tx_err;
1143 	int rx_err;
1144 
1145 	port = container_of(work, struct ice_ptp_port, ov_work.work);
1146 	pf = ptp_port_to_pf(port);
1147 	hw = &pf->hw;
1148 
1149 	if (ice_is_reset_in_progress(pf->state)) {
1150 		/* wait for device driver to complete reset */
1151 		kthread_queue_delayed_work(pf->ptp.kworker,
1152 					   &port->ov_work,
1153 					   msecs_to_jiffies(100));
1154 		return;
1155 	}
1156 
1157 	tx_err = ice_ptp_check_tx_fifo(port);
1158 	if (!tx_err)
1159 		tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1160 	rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1161 	if (tx_err || rx_err) {
1162 		/* Tx and/or Rx offset not yet configured, try again later */
1163 		kthread_queue_delayed_work(pf->ptp.kworker,
1164 					   &port->ov_work,
1165 					   msecs_to_jiffies(100));
1166 		return;
1167 	}
1168 }
1169 
1170 /**
1171  * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1172  * @ptp_port: PTP port to stop
1173  */
1174 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1175 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1176 {
1177 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1178 	u8 port = ptp_port->port_num;
1179 	struct ice_hw *hw = &pf->hw;
1180 	int err;
1181 
1182 	mutex_lock(&ptp_port->ps_lock);
1183 
1184 	switch (hw->mac_type) {
1185 	case ICE_MAC_E810:
1186 	case ICE_MAC_E830:
1187 		err = 0;
1188 		break;
1189 	case ICE_MAC_GENERIC:
1190 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1191 
1192 		err = ice_stop_phy_timer_e82x(hw, port, true);
1193 		break;
1194 	case ICE_MAC_GENERIC_3K_E825:
1195 		err = ice_stop_phy_timer_eth56g(hw, port, true);
1196 		break;
1197 	default:
1198 		err = -ENODEV;
1199 	}
1200 	if (err && err != -EBUSY)
1201 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1202 			port, err);
1203 
1204 	mutex_unlock(&ptp_port->ps_lock);
1205 
1206 	return err;
1207 }
1208 
1209 /**
1210  * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1211  * @ptp_port: PTP port for which the PHY start is set
1212  *
1213  * Start the PHY timestamping block, and initiate Vernier timestamping
1214  * calibration. If timestamping cannot be calibrated (such as if link is down)
1215  * then disable the timestamping block instead.
1216  */
1217 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1218 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1219 {
1220 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1221 	u8 port = ptp_port->port_num;
1222 	struct ice_hw *hw = &pf->hw;
1223 	unsigned long flags;
1224 	int err;
1225 
1226 	if (!ptp_port->link_up)
1227 		return ice_ptp_port_phy_stop(ptp_port);
1228 
1229 	mutex_lock(&ptp_port->ps_lock);
1230 
1231 	switch (hw->mac_type) {
1232 	case ICE_MAC_E810:
1233 	case ICE_MAC_E830:
1234 		err = 0;
1235 		break;
1236 	case ICE_MAC_GENERIC:
1237 		/* Start the PHY timer in Vernier mode */
1238 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1239 
1240 		/* temporarily disable Tx timestamps while calibrating
1241 		 * PHY offset
1242 		 */
1243 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1244 		ptp_port->tx.calibrating = true;
1245 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1246 		ptp_port->tx_fifo_busy_cnt = 0;
1247 
1248 		/* Start the PHY timer in Vernier mode */
1249 		err = ice_start_phy_timer_e82x(hw, port);
1250 		if (err)
1251 			break;
1252 
1253 		/* Enable Tx timestamps right away */
1254 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1255 		ptp_port->tx.calibrating = false;
1256 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1257 
1258 		kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1259 					   0);
1260 		break;
1261 	case ICE_MAC_GENERIC_3K_E825:
1262 		err = ice_start_phy_timer_eth56g(hw, port);
1263 		break;
1264 	default:
1265 		err = -ENODEV;
1266 	}
1267 
1268 	if (err)
1269 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1270 			port, err);
1271 
1272 	mutex_unlock(&ptp_port->ps_lock);
1273 
1274 	return err;
1275 }
1276 
1277 /**
1278  * ice_ptp_link_change - Reconfigure PTP after link status change
1279  * @pf: Board private structure
1280  * @linkup: Link is up or down
1281  */
ice_ptp_link_change(struct ice_pf * pf,bool linkup)1282 void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
1283 {
1284 	struct ice_ptp_port *ptp_port;
1285 	struct ice_hw *hw = &pf->hw;
1286 
1287 	if (pf->ptp.state != ICE_PTP_READY)
1288 		return;
1289 
1290 	ptp_port = &pf->ptp.port;
1291 
1292 	/* Update cached link status for this port immediately */
1293 	ptp_port->link_up = linkup;
1294 
1295 	/* Skip HW writes if reset is in progress */
1296 	if (pf->hw.reset_ongoing)
1297 		return;
1298 
1299 	if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
1300 	    test_bit(ICE_FLAG_DPLL, pf->flags)) {
1301 		int pin, err;
1302 
1303 		mutex_lock(&pf->dplls.lock);
1304 		for (pin = 0; pin < ICE_SYNCE_CLK_NUM; pin++) {
1305 			enum ice_synce_clk clk_pin;
1306 			bool active;
1307 			u8 port_num;
1308 
1309 			port_num = ptp_port->port_num;
1310 			clk_pin = (enum ice_synce_clk)pin;
1311 			err = ice_tspll_bypass_mux_active_e825c(hw,
1312 								port_num,
1313 								&active,
1314 								clk_pin);
1315 			if (err) {
1316 				dev_err_once(ice_pf_to_dev(pf),
1317 					     "Failed to read SyncE bypass mux for pin %d, err %d\n",
1318 					     pin, err);
1319 				break;
1320 			}
1321 
1322 			err = ice_tspll_cfg_synce_ethdiv_e825c(hw, clk_pin);
1323 			if (active && err) {
1324 				dev_err_once(ice_pf_to_dev(pf),
1325 					     "Failed to configure SyncE ETH divider for pin %d, err %d\n",
1326 					     pin, err);
1327 				break;
1328 			}
1329 		}
1330 		mutex_unlock(&pf->dplls.lock);
1331 	}
1332 
1333 	switch (hw->mac_type) {
1334 	case ICE_MAC_E810:
1335 	case ICE_MAC_E830:
1336 		/* Do not reconfigure E810 or E830 PHY */
1337 		return;
1338 	case ICE_MAC_GENERIC:
1339 		ice_ptp_port_phy_restart(ptp_port);
1340 		return;
1341 	case ICE_MAC_GENERIC_3K_E825:
1342 		if (linkup)
1343 			ice_ptp_port_phy_restart(ptp_port);
1344 		return;
1345 	default:
1346 		dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1347 	}
1348 }
1349 
1350 /**
1351  * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1352  * @pf: PF private structure
1353  * @ena: bool value to enable or disable interrupt
1354  * @threshold: Minimum number of packets at which intr is triggered
1355  *
1356  * Utility function to configure all the PHY interrupt settings, including
1357  * whether the PHY interrupt is enabled, and what threshold to use. Also
1358  * configures The E82X timestamp owner to react to interrupts from all PHYs.
1359  *
1360  * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1361  * when failed to configure PHY interrupt for E82X
1362  */
ice_ptp_cfg_phy_interrupt(struct ice_pf * pf,bool ena,u32 threshold)1363 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1364 {
1365 	struct device *dev = ice_pf_to_dev(pf);
1366 	struct ice_hw *hw = &pf->hw;
1367 
1368 	ice_ptp_reset_ts_memory(hw);
1369 
1370 	switch (hw->mac_type) {
1371 	case ICE_MAC_E810:
1372 	case ICE_MAC_E830:
1373 		return 0;
1374 	case ICE_MAC_GENERIC: {
1375 		int quad;
1376 
1377 		for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1378 		     quad++) {
1379 			int err;
1380 
1381 			err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1382 			if (err) {
1383 				dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1384 					quad, err);
1385 				return err;
1386 			}
1387 		}
1388 
1389 		return 0;
1390 	}
1391 	case ICE_MAC_GENERIC_3K_E825: {
1392 		int port;
1393 
1394 		for (port = 0; port < hw->ptp.num_lports; port++) {
1395 			int err;
1396 
1397 			err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1398 			if (err) {
1399 				dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1400 					port, err);
1401 				return err;
1402 			}
1403 		}
1404 
1405 		return 0;
1406 	}
1407 	case ICE_MAC_UNKNOWN:
1408 	default:
1409 		return -EOPNOTSUPP;
1410 	}
1411 }
1412 
1413 /**
1414  * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1415  * @pf: Board private structure
1416  */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1417 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1418 {
1419 	ice_ptp_port_phy_restart(&pf->ptp.port);
1420 }
1421 
1422 /**
1423  * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1424  * @pf: Board private structure
1425  */
ice_ptp_restart_all_phy(struct ice_pf * pf)1426 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1427 {
1428 	struct list_head *entry;
1429 
1430 	list_for_each(entry, &pf->adapter->ports.ports) {
1431 		struct ice_ptp_port *port = list_entry(entry,
1432 						       struct ice_ptp_port,
1433 						       list_node);
1434 
1435 		if (port->link_up)
1436 			ice_ptp_port_phy_restart(port);
1437 	}
1438 }
1439 
1440 /**
1441  * ice_ptp_adjfine - Adjust clock increment rate
1442  * @info: the driver's PTP info structure
1443  * @scaled_ppm: Parts per million with 16-bit fractional field
1444  *
1445  * Adjust the frequency of the clock by the indicated scaled ppm from the
1446  * base frequency.
1447  */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1448 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1449 {
1450 	struct ice_pf *pf = ptp_info_to_pf(info);
1451 	struct ice_hw *hw = &pf->hw;
1452 	u64 incval;
1453 	int err;
1454 
1455 	incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1456 	err = ice_ptp_write_incval_locked(hw, incval);
1457 	if (err) {
1458 		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1459 			err);
1460 		return -EIO;
1461 	}
1462 
1463 	return 0;
1464 }
1465 
1466 /**
1467  * ice_ptp_extts_event - Process PTP external clock event
1468  * @pf: Board private structure
1469  */
ice_ptp_extts_event(struct ice_pf * pf)1470 void ice_ptp_extts_event(struct ice_pf *pf)
1471 {
1472 	struct ptp_clock_event event;
1473 	struct ice_hw *hw = &pf->hw;
1474 	u8 chan, tmr_idx;
1475 	u32 hi, lo;
1476 
1477 	/* Don't process timestamp events if PTP is not ready */
1478 	if (pf->ptp.state != ICE_PTP_READY)
1479 		return;
1480 
1481 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1482 	/* Event time is captured by one of the two matched registers
1483 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
1484 	 *      GLTSYN_EVNT_H: 32 MSB of sampled time event
1485 	 * Event is defined in GLTSYN_EVNT_0 register
1486 	 */
1487 	for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1488 		int pin_desc_idx;
1489 
1490 		/* Check if channel is enabled */
1491 		if (!(pf->ptp.ext_ts_irq & (1 << chan)))
1492 			continue;
1493 
1494 		lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1495 		hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1496 		event.timestamp = (u64)hi << 32 | lo;
1497 
1498 		/* Add delay compensation */
1499 		pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1500 		if (pin_desc_idx >= 0) {
1501 			const struct ice_ptp_pin_desc *desc;
1502 
1503 			desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
1504 			event.timestamp -= desc->delay[0];
1505 		}
1506 
1507 		event.type = PTP_CLOCK_EXTTS;
1508 		event.index = chan;
1509 		pf->ptp.ext_ts_irq &= ~(1 << chan);
1510 		ptp_clock_event(pf->ptp.clock, &event);
1511 	}
1512 }
1513 
1514 /**
1515  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1516  * @pf: Board private structure
1517  * @rq: External timestamp request
1518  * @on: Enable/disable flag
1519  *
1520  * Configure an external timestamp event on the requested channel.
1521  *
1522  * Return: 0 on success, negative error code otherwise
1523  */
ice_ptp_cfg_extts(struct ice_pf * pf,struct ptp_extts_request * rq,int on)1524 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
1525 			     int on)
1526 {
1527 	u32 aux_reg, gpio_reg, irq_reg;
1528 	struct ice_hw *hw = &pf->hw;
1529 	unsigned int chan, gpio_pin;
1530 	int pin_desc_idx;
1531 	u8 tmr_idx;
1532 
1533 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1534 	chan = rq->index;
1535 
1536 	pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1537 	if (pin_desc_idx < 0)
1538 		return -EIO;
1539 
1540 	gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
1541 	irq_reg = rd32(hw, PFINT_OICR_ENA);
1542 
1543 	if (on) {
1544 		/* Enable the interrupt */
1545 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1546 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1547 
1548 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE	BIT(0)
1549 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
1550 
1551 		/* set event level to requested edge */
1552 		if (rq->flags & PTP_FALLING_EDGE)
1553 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1554 		if (rq->flags & PTP_RISING_EDGE)
1555 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1556 
1557 		/* Write GPIO CTL reg.
1558 		 * 0x1 is input sampled by EVENT register(channel)
1559 		 * + num_in_channels * tmr_idx
1560 		 */
1561 		gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1562 				      1 + chan + (tmr_idx * 3));
1563 	} else {
1564 		bool last_enabled = true;
1565 
1566 		/* clear the values we set to reset defaults */
1567 		aux_reg = 0;
1568 		gpio_reg = 0;
1569 
1570 		for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
1571 			if ((pf->ptp.extts_rqs[i].flags &
1572 			     PTP_ENABLE_FEATURE) &&
1573 			    i != chan) {
1574 				last_enabled = false;
1575 			}
1576 
1577 		if (last_enabled)
1578 			irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1579 	}
1580 
1581 	wr32(hw, PFINT_OICR_ENA, irq_reg);
1582 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1583 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1584 
1585 	return 0;
1586 }
1587 
1588 /**
1589  * ice_ptp_disable_all_extts - Disable all EXTTS channels
1590  * @pf: Board private structure
1591  */
ice_ptp_disable_all_extts(struct ice_pf * pf)1592 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1593 {
1594 	for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1595 		if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1596 			ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1597 					  false);
1598 
1599 	synchronize_irq(pf->oicr_irq.virq);
1600 }
1601 
1602 /**
1603  * ice_ptp_enable_all_extts - Enable all EXTTS channels
1604  * @pf: Board private structure
1605  *
1606  * Called during reset to restore user configuration.
1607  */
ice_ptp_enable_all_extts(struct ice_pf * pf)1608 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1609 {
1610 	for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1611 		if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1612 			ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1613 					  true);
1614 }
1615 
1616 /**
1617  * ice_ptp_write_perout - Write periodic wave parameters to HW
1618  * @hw: pointer to the HW struct
1619  * @chan: target channel
1620  * @gpio_pin: target GPIO pin
1621  * @start: target time to start periodic output
1622  * @period: target period
1623  *
1624  * Return: 0 on success, negative error code otherwise
1625  */
ice_ptp_write_perout(struct ice_hw * hw,unsigned int chan,unsigned int gpio_pin,u64 start,u64 period)1626 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
1627 				unsigned int gpio_pin, u64 start, u64 period)
1628 {
1629 
1630 	u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1631 	u32 val = 0;
1632 
1633 	/* 0. Reset mode & out_en in AUX_OUT */
1634 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1635 
1636 	if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
1637 		int err;
1638 
1639 		/* Enable/disable CGU 1PPS output for E825C */
1640 		err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
1641 		if (err)
1642 			return err;
1643 	}
1644 
1645 	/* 1. Write perout with half of required period value.
1646 	 * HW toggles output when source clock hits the TGT and then adds
1647 	 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle.
1648 	 */
1649 	period >>= 1;
1650 
1651 	/* For proper operation, GLTSYN_CLKO must be larger than clock tick and
1652 	 * period has to fit in 32 bit register.
1653 	 */
1654 #define MIN_PULSE 3
1655 	if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
1656 		dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
1657 			MIN_PULSE);
1658 		return -EIO;
1659 	}
1660 
1661 	wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1662 
1663 	/* 2. Write TARGET time */
1664 	wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
1665 	wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
1666 
1667 	/* 3. Write AUX_OUT register */
1668 	if (!!period)
1669 		val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1670 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1671 
1672 	/* 4. write GPIO CTL reg */
1673 	val = GLGEN_GPIO_CTL_PIN_DIR_M;
1674 	if (!!period)
1675 		val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1676 				  8 + chan + (tmr_idx * 4));
1677 
1678 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1679 	ice_flush(hw);
1680 
1681 	return 0;
1682 }
1683 
1684 /**
1685  * ice_ptp_cfg_perout - Configure clock to generate periodic wave
1686  * @pf: Board private structure
1687  * @rq: Periodic output request
1688  * @on: Enable/disable flag
1689  *
1690  * Configure the internal clock generator modules to generate the clock wave of
1691  * specified period.
1692  *
1693  * Return: 0 on success, negative error code otherwise
1694  */
ice_ptp_cfg_perout(struct ice_pf * pf,struct ptp_perout_request * rq,int on)1695 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
1696 			      int on)
1697 {
1698 	unsigned int gpio_pin, prop_delay_ns;
1699 	u64 clk, period, start, phase;
1700 	struct ice_hw *hw = &pf->hw;
1701 	int pin_desc_idx;
1702 
1703 	pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
1704 	if (pin_desc_idx < 0)
1705 		return -EIO;
1706 
1707 	gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
1708 	prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
1709 	period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
1710 
1711 	/* If we're disabling the output or period is 0, clear out CLKO and TGT
1712 	 * and keep output level low.
1713 	 */
1714 	if (!on || !period)
1715 		return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
1716 
1717 	if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
1718 	    period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
1719 		dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
1720 		return -EOPNOTSUPP;
1721 	}
1722 
1723 	if (period & 0x1) {
1724 		dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1725 		return -EIO;
1726 	}
1727 
1728 	start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
1729 
1730 	/* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */
1731 	if (rq->flags & PTP_PEROUT_PHASE)
1732 		phase = start;
1733 	else
1734 		div64_u64_rem(start, period, &phase);
1735 
1736 	/* If we have only phase or start time is in the past, start the timer
1737 	 * at the next multiple of period, maintaining phase at least 0.5 second
1738 	 * from now, so we have time to write it to HW.
1739 	 */
1740 	clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
1741 	if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
1742 		start = div64_u64(clk + period - 1, period) * period + phase;
1743 
1744 	/* Compensate for propagation delay from the generator to the pin. */
1745 	start -= prop_delay_ns;
1746 
1747 	return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
1748 }
1749 
1750 /**
1751  * ice_ptp_disable_all_perout - Disable all currently configured outputs
1752  * @pf: Board private structure
1753  *
1754  * Disable all currently configured clock outputs. This is necessary before
1755  * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to
1756  * re-enable the clocks again.
1757  */
ice_ptp_disable_all_perout(struct ice_pf * pf)1758 static void ice_ptp_disable_all_perout(struct ice_pf *pf)
1759 {
1760 	for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1761 		if (pf->ptp.perout_rqs[i].period.sec ||
1762 		    pf->ptp.perout_rqs[i].period.nsec)
1763 			ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1764 					   false);
1765 }
1766 
1767 /**
1768  * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs
1769  * @pf: Board private structure
1770  *
1771  * Enable all currently configured clock outputs. Use this after
1772  * ice_ptp_disable_all_perout to reconfigure the output signals according to
1773  * their configuration.
1774  */
ice_ptp_enable_all_perout(struct ice_pf * pf)1775 static void ice_ptp_enable_all_perout(struct ice_pf *pf)
1776 {
1777 	for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1778 		if (pf->ptp.perout_rqs[i].period.sec ||
1779 		    pf->ptp.perout_rqs[i].period.nsec)
1780 			ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1781 					   true);
1782 }
1783 
1784 /**
1785  * ice_verify_pin - verify if pin supports requested pin function
1786  * @info: the driver's PTP info structure
1787  * @pin: Pin index
1788  * @func: Assigned function
1789  * @chan: Assigned channel
1790  *
1791  * Return: 0 on success, -EOPNOTSUPP when function is not supported.
1792  */
ice_verify_pin(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)1793 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
1794 			  enum ptp_pin_function func, unsigned int chan)
1795 {
1796 	struct ice_pf *pf = ptp_info_to_pf(info);
1797 	const struct ice_ptp_pin_desc *pin_desc;
1798 
1799 	pin_desc = &pf->ptp.ice_pin_desc[pin];
1800 
1801 	/* Is assigned function allowed? */
1802 	switch (func) {
1803 	case PTP_PF_EXTTS:
1804 		if (pin_desc->gpio[0] < 0)
1805 			return -EOPNOTSUPP;
1806 		break;
1807 	case PTP_PF_PEROUT:
1808 		if (pin_desc->gpio[1] < 0)
1809 			return -EOPNOTSUPP;
1810 		break;
1811 	case PTP_PF_NONE:
1812 		break;
1813 	case PTP_PF_PHYSYNC:
1814 	default:
1815 		return -EOPNOTSUPP;
1816 	}
1817 
1818 	return 0;
1819 }
1820 
1821 /**
1822  * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC
1823  * @info: The driver's PTP info structure
1824  * @rq: The requested feature to change
1825  * @on: Enable/disable flag
1826  *
1827  * Return: 0 on success, negative error code otherwise
1828  */
ice_ptp_gpio_enable(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1829 static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
1830 			       struct ptp_clock_request *rq, int on)
1831 {
1832 	struct ice_pf *pf = ptp_info_to_pf(info);
1833 	int err;
1834 
1835 	switch (rq->type) {
1836 	case PTP_CLK_REQ_PEROUT:
1837 	{
1838 		struct ptp_perout_request *cached =
1839 			&pf->ptp.perout_rqs[rq->perout.index];
1840 
1841 		err = ice_ptp_cfg_perout(pf, &rq->perout, on);
1842 		if (!err) {
1843 			*cached = rq->perout;
1844 		} else {
1845 			cached->period.sec = 0;
1846 			cached->period.nsec = 0;
1847 		}
1848 		return err;
1849 	}
1850 	case PTP_CLK_REQ_EXTTS:
1851 	{
1852 		struct ptp_extts_request *cached =
1853 			&pf->ptp.extts_rqs[rq->extts.index];
1854 
1855 		err = ice_ptp_cfg_extts(pf, &rq->extts, on);
1856 		if (!err)
1857 			*cached = rq->extts;
1858 		else
1859 			cached->flags &= ~PTP_ENABLE_FEATURE;
1860 		return err;
1861 	}
1862 	default:
1863 		return -EOPNOTSUPP;
1864 	}
1865 }
1866 
1867 /**
1868  * ice_ptp_gettimex64 - Get the time of the clock
1869  * @info: the driver's PTP info structure
1870  * @ts: timespec64 structure to hold the current time value
1871  * @sts: Optional parameter for holding a pair of system timestamps from
1872  *       the system clock. Will be ignored if NULL is given.
1873  *
1874  * Read the device clock and return the correct value on ns, after converting it
1875  * into a timespec struct.
1876  */
1877 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)1878 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1879 		   struct ptp_system_timestamp *sts)
1880 {
1881 	struct ice_pf *pf = ptp_info_to_pf(info);
1882 	u64 time_ns;
1883 
1884 	time_ns = ice_ptp_read_src_clk_reg(pf, sts);
1885 	*ts = ns_to_timespec64(time_ns);
1886 	return 0;
1887 }
1888 
1889 /**
1890  * ice_ptp_settime64 - Set the time of the clock
1891  * @info: the driver's PTP info structure
1892  * @ts: timespec64 structure that holds the new time value
1893  *
1894  * Set the device clock to the user input value. The conversion from timespec
1895  * to ns happens in the write function.
1896  */
1897 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)1898 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1899 {
1900 	struct ice_pf *pf = ptp_info_to_pf(info);
1901 	struct timespec64 ts64 = *ts;
1902 	struct ice_hw *hw = &pf->hw;
1903 	int err;
1904 
1905 	/* For Vernier mode on E82X, we need to recalibrate after new settime.
1906 	 * Start with marking timestamps as invalid.
1907 	 */
1908 	if (hw->mac_type == ICE_MAC_GENERIC) {
1909 		err = ice_ptp_clear_phy_offset_ready_e82x(hw);
1910 		if (err)
1911 			dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
1912 	}
1913 
1914 	if (!ice_ptp_lock(hw)) {
1915 		err = -EBUSY;
1916 		goto exit;
1917 	}
1918 
1919 	/* Disable periodic outputs */
1920 	ice_ptp_disable_all_perout(pf);
1921 
1922 	err = ice_ptp_write_init(pf, &ts64);
1923 	ice_ptp_unlock(hw);
1924 
1925 	if (!err)
1926 		ice_ptp_reset_cached_phctime(pf);
1927 
1928 	/* Reenable periodic outputs */
1929 	ice_ptp_enable_all_perout(pf);
1930 
1931 	/* Recalibrate and re-enable timestamp blocks for E822/E823 */
1932 	if (hw->mac_type == ICE_MAC_GENERIC)
1933 		ice_ptp_restart_all_phy(pf);
1934 exit:
1935 	if (err) {
1936 		dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
1937 		return err;
1938 	}
1939 
1940 	return 0;
1941 }
1942 
1943 /**
1944  * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
1945  * @info: the driver's PTP info structure
1946  * @delta: Offset in nanoseconds to adjust the time by
1947  */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)1948 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1949 {
1950 	struct timespec64 now, then;
1951 	int ret;
1952 
1953 	then = ns_to_timespec64(delta);
1954 	ret = ice_ptp_gettimex64(info, &now, NULL);
1955 	if (ret)
1956 		return ret;
1957 	now = timespec64_add(now, then);
1958 
1959 	return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1960 }
1961 
1962 /**
1963  * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
1964  * @info: the driver's PTP info structure
1965  * @delta: Offset in nanoseconds to adjust the time by
1966  */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)1967 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
1968 {
1969 	struct ice_pf *pf = ptp_info_to_pf(info);
1970 	struct ice_hw *hw = &pf->hw;
1971 	struct device *dev;
1972 	int err;
1973 
1974 	dev = ice_pf_to_dev(pf);
1975 
1976 	/* Hardware only supports atomic adjustments using signed 32-bit
1977 	 * integers. For any adjustment outside this range, perform
1978 	 * a non-atomic get->adjust->set flow.
1979 	 */
1980 	if (delta > S32_MAX || delta < S32_MIN) {
1981 		dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
1982 		return ice_ptp_adjtime_nonatomic(info, delta);
1983 	}
1984 
1985 	if (!ice_ptp_lock(hw)) {
1986 		dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
1987 		return -EBUSY;
1988 	}
1989 
1990 	/* Disable periodic outputs */
1991 	ice_ptp_disable_all_perout(pf);
1992 
1993 	err = ice_ptp_write_adj(pf, delta);
1994 
1995 	/* Reenable periodic outputs */
1996 	ice_ptp_enable_all_perout(pf);
1997 
1998 	ice_ptp_unlock(hw);
1999 
2000 	if (err) {
2001 		dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2002 		return err;
2003 	}
2004 
2005 	ice_ptp_reset_cached_phctime(pf);
2006 
2007 	return 0;
2008 }
2009 
2010 /**
2011  * struct ice_crosststamp_cfg - Device cross timestamp configuration
2012  * @lock_reg: The hardware semaphore lock to use
2013  * @lock_busy: Bit in the semaphore lock indicating the lock is busy
2014  * @ctl_reg: The hardware register to request cross timestamp
2015  * @ctl_active: Bit in the control register to request cross timestamp
2016  * @art_time_l: Lower 32-bits of ART system time
2017  * @art_time_h: Upper 32-bits of ART system time
2018  * @dev_time_l: Lower 32-bits of device time (per timer index)
2019  * @dev_time_h: Upper 32-bits of device time (per timer index)
2020  */
2021 struct ice_crosststamp_cfg {
2022 	/* HW semaphore lock register */
2023 	u32 lock_reg;
2024 	u32 lock_busy;
2025 
2026 	/* Capture control register */
2027 	u32 ctl_reg;
2028 	u32 ctl_active;
2029 
2030 	/* Time storage */
2031 	u32 art_time_l;
2032 	u32 art_time_h;
2033 	u32 dev_time_l[2];
2034 	u32 dev_time_h[2];
2035 };
2036 
2037 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
2038 	.lock_reg = PFHH_SEM,
2039 	.lock_busy = PFHH_SEM_BUSY_M,
2040 	.ctl_reg = GLHH_ART_CTL,
2041 	.ctl_active = GLHH_ART_CTL_ACTIVE_M,
2042 	.art_time_l = GLHH_ART_TIME_L,
2043 	.art_time_h = GLHH_ART_TIME_H,
2044 	.dev_time_l[0] = GLTSYN_HHTIME_L(0),
2045 	.dev_time_h[0] = GLTSYN_HHTIME_H(0),
2046 	.dev_time_l[1] = GLTSYN_HHTIME_L(1),
2047 	.dev_time_h[1] = GLTSYN_HHTIME_H(1),
2048 };
2049 
2050 #ifdef CONFIG_ICE_HWTS
2051 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
2052 	.lock_reg = E830_PFPTM_SEM,
2053 	.lock_busy = E830_PFPTM_SEM_BUSY_M,
2054 	.ctl_reg = E830_GLPTM_ART_CTL,
2055 	.ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
2056 	.art_time_l = E830_GLPTM_ART_TIME_L,
2057 	.art_time_h = E830_GLPTM_ART_TIME_H,
2058 	.dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
2059 	.dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
2060 	.dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
2061 	.dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
2062 };
2063 
2064 #endif /* CONFIG_ICE_HWTS */
2065 /**
2066  * struct ice_crosststamp_ctx - Device cross timestamp context
2067  * @snapshot: snapshot of system clocks for historic interpolation
2068  * @pf: pointer to the PF private structure
2069  * @cfg: pointer to hardware configuration for cross timestamp
2070  */
2071 struct ice_crosststamp_ctx {
2072 	struct system_time_snapshot snapshot;
2073 	struct ice_pf *pf;
2074 	const struct ice_crosststamp_cfg *cfg;
2075 };
2076 
2077 /**
2078  * ice_capture_crosststamp - Capture a device/system cross timestamp
2079  * @device: Current device time
2080  * @system: System counter value read synchronously with device time
2081  * @__ctx: Context passed from ice_ptp_getcrosststamp
2082  *
2083  * Read device and system (ART) clock simultaneously and return the corrected
2084  * clock values in ns.
2085  *
2086  * Return: zero on success, or a negative error code on failure.
2087  */
ice_capture_crosststamp(ktime_t * device,struct system_counterval_t * system,void * __ctx)2088 static int ice_capture_crosststamp(ktime_t *device,
2089 				   struct system_counterval_t *system,
2090 				   void *__ctx)
2091 {
2092 	struct ice_crosststamp_ctx *ctx = __ctx;
2093 	const struct ice_crosststamp_cfg *cfg;
2094 	u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
2095 	struct ice_pf *pf;
2096 	struct ice_hw *hw;
2097 	int err;
2098 	u64 ts;
2099 
2100 	cfg = ctx->cfg;
2101 	pf = ctx->pf;
2102 	hw = &pf->hw;
2103 
2104 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2105 	if (tmr_idx > 1)
2106 		return -EINVAL;
2107 
2108 	/* Poll until we obtain the cross-timestamp hardware semaphore */
2109 	err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
2110 				!(lock & cfg->lock_busy),
2111 				10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
2112 	if (err) {
2113 		dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
2114 		return -EBUSY;
2115 	}
2116 
2117 	/* Snapshot system time for historic interpolation */
2118 	ktime_get_snapshot(&ctx->snapshot);
2119 
2120 	/* Program cmd to master timer */
2121 	ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2122 
2123 	/* Start the ART and device clock sync sequence */
2124 	ctl = rd32(hw, cfg->ctl_reg);
2125 	ctl |= cfg->ctl_active;
2126 	wr32(hw, cfg->ctl_reg, ctl);
2127 
2128 	/* Poll until hardware completes the capture */
2129 	err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
2130 				5, 20 * USEC_PER_MSEC);
2131 	if (err)
2132 		goto err_timeout;
2133 
2134 	/* Read ART system time */
2135 	ts_lo = rd32(hw, cfg->art_time_l);
2136 	ts_hi = rd32(hw, cfg->art_time_h);
2137 	ts = ((u64)ts_hi << 32) | ts_lo;
2138 	system->cycles = ts;
2139 	system->cs_id = CSID_X86_ART;
2140 	system->use_nsecs = true;
2141 
2142 	/* Read Device source clock time */
2143 	ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
2144 	ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
2145 	ts = ((u64)ts_hi << 32) | ts_lo;
2146 	*device = ns_to_ktime(ts);
2147 
2148 err_timeout:
2149 	/* Clear the master timer */
2150 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2151 
2152 	/* Release HW lock */
2153 	lock = rd32(hw, cfg->lock_reg);
2154 	lock &= ~cfg->lock_busy;
2155 	wr32(hw, cfg->lock_reg, lock);
2156 
2157 	return err;
2158 }
2159 
2160 /**
2161  * ice_ptp_getcrosststamp - Capture a device cross timestamp
2162  * @info: the driver's PTP info structure
2163  * @cts: The memory to fill the cross timestamp info
2164  *
2165  * Capture a cross timestamp between the ART and the device PTP hardware
2166  * clock. Fill the cross timestamp information and report it back to the
2167  * caller.
2168  *
2169  * In order to correctly correlate the ART timestamp back to the TSC time, the
2170  * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2171  *
2172  * Return: zero on success, or a negative error code on failure.
2173  */
ice_ptp_getcrosststamp(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2174 static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
2175 				  struct system_device_crosststamp *cts)
2176 {
2177 	struct ice_pf *pf = ptp_info_to_pf(info);
2178 	struct ice_crosststamp_ctx ctx = {
2179 		.pf = pf,
2180 	};
2181 
2182 	switch (pf->hw.mac_type) {
2183 	case ICE_MAC_GENERIC:
2184 	case ICE_MAC_GENERIC_3K_E825:
2185 		ctx.cfg = &ice_crosststamp_cfg_e82x;
2186 		break;
2187 #ifdef CONFIG_ICE_HWTS
2188 	case ICE_MAC_E830:
2189 		ctx.cfg = &ice_crosststamp_cfg_e830;
2190 		break;
2191 #endif /* CONFIG_ICE_HWTS */
2192 	default:
2193 		return -EOPNOTSUPP;
2194 	}
2195 
2196 	return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
2197 					     &ctx.snapshot, cts);
2198 }
2199 
2200 /**
2201  * ice_ptp_hwtstamp_get - interface to read the timestamping config
2202  * @netdev: Pointer to network interface device structure
2203  * @config: Timestamping configuration structure
2204  *
2205  * Copy the timestamping config to user buffer
2206  */
ice_ptp_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)2207 int ice_ptp_hwtstamp_get(struct net_device *netdev,
2208 			 struct kernel_hwtstamp_config *config)
2209 {
2210 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2211 
2212 	if (pf->ptp.state != ICE_PTP_READY)
2213 		return -EIO;
2214 
2215 	*config = pf->ptp.tstamp_config;
2216 
2217 	return 0;
2218 }
2219 
2220 /**
2221  * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2222  * @pf: Board private structure
2223  * @config: hwtstamp settings requested or saved
2224  */
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct kernel_hwtstamp_config * config)2225 static int ice_ptp_set_timestamp_mode(struct ice_pf *pf,
2226 				      struct kernel_hwtstamp_config *config)
2227 {
2228 	switch (config->tx_type) {
2229 	case HWTSTAMP_TX_OFF:
2230 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2231 		break;
2232 	case HWTSTAMP_TX_ON:
2233 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2234 		break;
2235 	default:
2236 		return -ERANGE;
2237 	}
2238 
2239 	switch (config->rx_filter) {
2240 	case HWTSTAMP_FILTER_NONE:
2241 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2242 		break;
2243 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2244 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2245 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2246 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2247 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2248 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2249 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2250 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2251 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2252 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2253 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2254 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2255 	case HWTSTAMP_FILTER_NTP_ALL:
2256 	case HWTSTAMP_FILTER_ALL:
2257 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2258 		break;
2259 	default:
2260 		return -ERANGE;
2261 	}
2262 
2263 	/* Immediately update the device timestamping mode */
2264 	ice_ptp_restore_timestamp_mode(pf);
2265 
2266 	return 0;
2267 }
2268 
2269 /**
2270  * ice_ptp_hwtstamp_set - interface to control the timestamping
2271  * @netdev: Pointer to network interface device structure
2272  * @config: Timestamping configuration structure
2273  * @extack: Netlink extended ack structure for error reporting
2274  *
2275  * Get the user config and store it
2276  */
ice_ptp_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)2277 int ice_ptp_hwtstamp_set(struct net_device *netdev,
2278 			 struct kernel_hwtstamp_config *config,
2279 			 struct netlink_ext_ack *extack)
2280 {
2281 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2282 	int err;
2283 
2284 	if (pf->ptp.state != ICE_PTP_READY)
2285 		return -EAGAIN;
2286 
2287 	err = ice_ptp_set_timestamp_mode(pf, config);
2288 	if (err)
2289 		return err;
2290 
2291 	/* Return the actual configuration set */
2292 	*config = pf->ptp.tstamp_config;
2293 
2294 	return 0;
2295 }
2296 
2297 /**
2298  * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2299  * @rx_desc: Receive descriptor
2300  * @pkt_ctx: Packet context to get the cached time
2301  *
2302  * The driver receives a notification in the receive descriptor with timestamp.
2303  */
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc * rx_desc,const struct ice_pkt_ctx * pkt_ctx)2304 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2305 			const struct ice_pkt_ctx *pkt_ctx)
2306 {
2307 	u64 ts_ns, cached_time;
2308 	u32 ts_high;
2309 
2310 	if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2311 		return 0;
2312 
2313 	cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2314 
2315 	/* Do not report a timestamp if we don't have a cached PHC time */
2316 	if (!cached_time)
2317 		return 0;
2318 
2319 	/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2320 	 * PHC value, rather than accessing the PF. This also allows us to
2321 	 * simply pass the upper 32bits of nanoseconds directly. Calling
2322 	 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2323 	 * bits itself.
2324 	 */
2325 	ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2326 	ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2327 
2328 	return ts_ns;
2329 }
2330 
2331 /**
2332  * ice_ptp_setup_pin_cfg - setup PTP pin_config structure
2333  * @pf: Board private structure
2334  */
ice_ptp_setup_pin_cfg(struct ice_pf * pf)2335 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
2336 {
2337 	for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
2338 		const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
2339 		struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
2340 		const char *name;
2341 
2342 		if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2343 			name = ice_pin_names[desc->name_idx];
2344 		else
2345 			name = ice_pin_names_dpll[desc->name_idx];
2346 
2347 		strscpy(pin->name, name, sizeof(pin->name));
2348 
2349 		pin->index = i;
2350 	}
2351 
2352 	pf->ptp.info.pin_config = pf->ptp.pin_desc;
2353 }
2354 
2355 /**
2356  * ice_ptp_disable_pins - Disable PTP pins
2357  * @pf: pointer to the PF structure
2358  *
2359  * Disable the OS access to the pins. Called to clear out the OS
2360  * indications of pin support when we fail to setup pin array.
2361  */
ice_ptp_disable_pins(struct ice_pf * pf)2362 static void ice_ptp_disable_pins(struct ice_pf *pf)
2363 {
2364 	struct ptp_clock_info *info = &pf->ptp.info;
2365 
2366 	dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
2367 
2368 	info->enable = NULL;
2369 	info->verify = NULL;
2370 	info->n_pins = 0;
2371 	info->n_ext_ts = 0;
2372 	info->n_per_out = 0;
2373 }
2374 
2375 /**
2376  * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM
2377  * @pf: pointer to the PF structure
2378  * @entries: SDP connection section from NVM
2379  * @num_entries: number of valid entries in sdp_entries
2380  * @pins: PTP pins array to update
2381  *
2382  * Return: 0 on success, negative error code otherwise.
2383  */
ice_ptp_parse_sdp_entries(struct ice_pf * pf,__le16 * entries,unsigned int num_entries,struct ice_ptp_pin_desc * pins)2384 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
2385 				     unsigned int num_entries,
2386 				     struct ice_ptp_pin_desc *pins)
2387 {
2388 	unsigned int n_pins = 0;
2389 	unsigned int i;
2390 
2391 	/* Setup ice_pin_desc array */
2392 	for (i = 0; i < ICE_N_PINS_MAX; i++) {
2393 		pins[i].name_idx = -1;
2394 		pins[i].gpio[0] = -1;
2395 		pins[i].gpio[1] = -1;
2396 	}
2397 
2398 	for (i = 0; i < num_entries; i++) {
2399 		u16 entry = le16_to_cpu(entries[i]);
2400 		DECLARE_BITMAP(bitmap, GPIO_NA);
2401 		unsigned int idx;
2402 		bool dir;
2403 		u16 gpio;
2404 
2405 		*bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
2406 
2407 		/* Check if entry's pin bitmap is valid. */
2408 		if (bitmap_empty(bitmap, GPIO_NA))
2409 			continue;
2410 
2411 		dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
2412 		gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
2413 
2414 		for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
2415 			if (pins[idx].name_idx == gpio)
2416 				break;
2417 		}
2418 
2419 		if (idx == ICE_N_PINS_MAX) {
2420 			/* Pin not found, setup its entry and name */
2421 			idx = n_pins++;
2422 			pins[idx].name_idx = gpio;
2423 		}
2424 		pins[idx].gpio[dir] = gpio;
2425 	}
2426 
2427 	for (i = 0; i < n_pins; i++) {
2428 		dev_dbg(ice_pf_to_dev(pf),
2429 			"NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
2430 			i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
2431 	}
2432 
2433 	pf->ptp.info.n_pins = n_pins;
2434 	return 0;
2435 }
2436 
2437 /**
2438  * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support
2439  * @pf: Board private structure
2440  *
2441  * Assign functions to the PTP capabilities structure for E82X devices.
2442  * Functions which operate across all device families should be set directly
2443  * in ice_ptp_set_caps. Only add functions here which are distinct for E82X
2444  * devices.
2445  */
ice_ptp_set_funcs_e82x(struct ice_pf * pf)2446 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
2447 {
2448 	pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2449 
2450 	if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
2451 		pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
2452 		pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
2453 	} else {
2454 		pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
2455 		pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
2456 	}
2457 	ice_ptp_setup_pin_cfg(pf);
2458 }
2459 
2460 /**
2461  * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2462  * @pf: Board private structure
2463  *
2464  * Assign functions to the PTP capabiltiies structure for E810 devices.
2465  * Functions which operate across all device families should be set directly
2466  * in ice_ptp_set_caps. Only add functions here which are distinct for E810
2467  * devices.
2468  */
ice_ptp_set_funcs_e810(struct ice_pf * pf)2469 static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
2470 {
2471 	__le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
2472 	struct ice_ptp_pin_desc *desc = NULL;
2473 	struct ice_ptp *ptp = &pf->ptp;
2474 	unsigned int num_entries;
2475 	int err;
2476 
2477 	err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
2478 	if (err) {
2479 		/* SDP section does not exist in NVM or is corrupted */
2480 		if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2481 			ptp->ice_pin_desc = ice_pin_desc_dpll;
2482 			ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
2483 		} else {
2484 			pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2485 			pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
2486 		}
2487 		err = 0;
2488 	} else {
2489 		desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
2490 				    sizeof(struct ice_ptp_pin_desc),
2491 				    GFP_KERNEL);
2492 		if (!desc)
2493 			goto err;
2494 
2495 		err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
2496 		if (err)
2497 			goto err;
2498 
2499 		ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
2500 	}
2501 
2502 	ptp->info.pin_config = ptp->pin_desc;
2503 	ice_ptp_setup_pin_cfg(pf);
2504 
2505 err:
2506 	if (err) {
2507 		devm_kfree(ice_pf_to_dev(pf), desc);
2508 		ice_ptp_disable_pins(pf);
2509 	}
2510 }
2511 
2512 /**
2513  * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
2514  * @pf: Board private structure
2515  *
2516  * Assign functions to the PTP capabiltiies structure for E830 devices.
2517  * Functions which operate across all device families should be set directly
2518  * in ice_ptp_set_caps. Only add functions here which are distinct for E830
2519  * devices.
2520  */
ice_ptp_set_funcs_e830(struct ice_pf * pf)2521 static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
2522 {
2523 #ifdef CONFIG_ICE_HWTS
2524 	if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
2525 		pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2526 
2527 #endif /* CONFIG_ICE_HWTS */
2528 	/* Rest of the config is the same as base E810 */
2529 	pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2530 	pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
2531 	ice_ptp_setup_pin_cfg(pf);
2532 }
2533 
2534 /**
2535  * ice_ptp_set_caps - Set PTP capabilities
2536  * @pf: Board private structure
2537  */
ice_ptp_set_caps(struct ice_pf * pf)2538 static void ice_ptp_set_caps(struct ice_pf *pf)
2539 {
2540 	struct ptp_clock_info *info = &pf->ptp.info;
2541 	struct device *dev = ice_pf_to_dev(pf);
2542 
2543 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2544 		 dev_driver_string(dev), dev_name(dev));
2545 	info->owner = THIS_MODULE;
2546 	info->max_adj = 100000000;
2547 	info->adjtime = ice_ptp_adjtime;
2548 	info->adjfine = ice_ptp_adjfine;
2549 	info->gettimex64 = ice_ptp_gettimex64;
2550 	info->settime64 = ice_ptp_settime64;
2551 	info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
2552 	info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
2553 	info->enable = ice_ptp_gpio_enable;
2554 	info->verify = ice_verify_pin;
2555 
2556 	info->supported_extts_flags = PTP_RISING_EDGE |
2557 				      PTP_FALLING_EDGE |
2558 				      PTP_STRICT_FLAGS;
2559 	info->supported_perout_flags = PTP_PEROUT_PHASE;
2560 
2561 	switch (pf->hw.mac_type) {
2562 	case ICE_MAC_E810:
2563 		ice_ptp_set_funcs_e810(pf);
2564 		return;
2565 	case ICE_MAC_E830:
2566 		ice_ptp_set_funcs_e830(pf);
2567 		return;
2568 	case ICE_MAC_GENERIC:
2569 	case ICE_MAC_GENERIC_3K_E825:
2570 		ice_ptp_set_funcs_e82x(pf);
2571 		return;
2572 	default:
2573 		return;
2574 	}
2575 }
2576 
2577 /**
2578  * ice_ptp_create_clock - Create PTP clock device for userspace
2579  * @pf: Board private structure
2580  *
2581  * This function creates a new PTP clock device. It only creates one if we
2582  * don't already have one. Will return error if it can't create one, but success
2583  * if we already have a device. Should be used by ice_ptp_init to create clock
2584  * initially, and prevent global resets from creating new clock devices.
2585  */
ice_ptp_create_clock(struct ice_pf * pf)2586 static long ice_ptp_create_clock(struct ice_pf *pf)
2587 {
2588 	struct ptp_clock_info *info;
2589 	struct device *dev;
2590 
2591 	/* No need to create a clock device if we already have one */
2592 	if (pf->ptp.clock)
2593 		return 0;
2594 
2595 	ice_ptp_set_caps(pf);
2596 
2597 	info = &pf->ptp.info;
2598 	dev = ice_pf_to_dev(pf);
2599 
2600 	/* Attempt to register the clock before enabling the hardware. */
2601 	pf->ptp.clock = ptp_clock_register(info, dev);
2602 	if (IS_ERR(pf->ptp.clock)) {
2603 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2604 		return PTR_ERR(pf->ptp.clock);
2605 	}
2606 
2607 	return 0;
2608 }
2609 
2610 /**
2611  * ice_ptp_request_ts - Request an available Tx timestamp index
2612  * @tx: the PTP Tx timestamp tracker to request from
2613  * @skb: the SKB to associate with this timestamp request
2614  */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2615 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2616 {
2617 	unsigned long flags;
2618 	u8 idx;
2619 
2620 	spin_lock_irqsave(&tx->lock, flags);
2621 
2622 	/* Check that this tracker is accepting new timestamp requests */
2623 	if (!ice_ptp_is_tx_tracker_up(tx)) {
2624 		spin_unlock_irqrestore(&tx->lock, flags);
2625 		return -1;
2626 	}
2627 
2628 	/* Find and set the first available index */
2629 	idx = find_next_zero_bit(tx->in_use, tx->len,
2630 				 tx->last_ll_ts_idx_read + 1);
2631 	if (idx == tx->len)
2632 		idx = find_first_zero_bit(tx->in_use, tx->len);
2633 
2634 	if (idx < tx->len) {
2635 		/* We got a valid index that no other thread could have set. Store
2636 		 * a reference to the skb and the start time to allow discarding old
2637 		 * requests.
2638 		 */
2639 		set_bit(idx, tx->in_use);
2640 		clear_bit(idx, tx->stale);
2641 		tx->tstamps[idx].start = jiffies;
2642 		tx->tstamps[idx].skb = skb_get(skb);
2643 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2644 		ice_trace(tx_tstamp_request, skb, idx);
2645 	}
2646 
2647 	spin_unlock_irqrestore(&tx->lock, flags);
2648 
2649 	/* return the appropriate PHY timestamp register index, -1 if no
2650 	 * indexes were available.
2651 	 */
2652 	if (idx >= tx->len)
2653 		return -1;
2654 	else
2655 		return idx + tx->offset;
2656 }
2657 
ice_ptp_process_ts(struct ice_pf * pf)2658 void ice_ptp_process_ts(struct ice_pf *pf)
2659 {
2660 	switch (pf->ptp.tx_interrupt_mode) {
2661 	case ICE_PTP_TX_INTERRUPT_NONE:
2662 		/* This device has the clock owner handle timestamps for it */
2663 		return;
2664 	case ICE_PTP_TX_INTERRUPT_SELF:
2665 		/* This device handles its own timestamps */
2666 		ice_ptp_process_tx_tstamp(&pf->ptp.port.tx);
2667 		return;
2668 	case ICE_PTP_TX_INTERRUPT_ALL:
2669 		/* This device handles timestamps for all ports */
2670 		ice_ptp_tx_tstamp_owner(pf);
2671 		return;
2672 	default:
2673 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2674 			  pf->ptp.tx_interrupt_mode);
2675 		return;
2676 	}
2677 }
2678 
ice_port_has_timestamps(struct ice_ptp_tx * tx)2679 static bool ice_port_has_timestamps(struct ice_ptp_tx *tx)
2680 {
2681 	bool more_timestamps;
2682 
2683 	scoped_guard(spinlock_irqsave, &tx->lock) {
2684 		if (!tx->init)
2685 			return false;
2686 
2687 		more_timestamps = !bitmap_empty(tx->in_use, tx->len);
2688 	}
2689 
2690 	return more_timestamps;
2691 }
2692 
ice_any_port_has_timestamps(struct ice_pf * pf)2693 static bool ice_any_port_has_timestamps(struct ice_pf *pf)
2694 {
2695 	struct ice_ptp_port *port;
2696 
2697 	scoped_guard(mutex, &pf->adapter->ports.lock) {
2698 		list_for_each_entry(port, &pf->adapter->ports.ports,
2699 				    list_node) {
2700 			struct ice_ptp_tx *tx = &port->tx;
2701 
2702 			if (ice_port_has_timestamps(tx))
2703 				return true;
2704 		}
2705 	}
2706 
2707 	return false;
2708 }
2709 
ice_ptp_tx_tstamps_pending(struct ice_pf * pf)2710 bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf)
2711 {
2712 	struct ice_hw *hw = &pf->hw;
2713 	unsigned int i;
2714 
2715 	/* Check software indicator */
2716 	switch (pf->ptp.tx_interrupt_mode) {
2717 	case ICE_PTP_TX_INTERRUPT_NONE:
2718 		return false;
2719 	case ICE_PTP_TX_INTERRUPT_SELF:
2720 		if (ice_port_has_timestamps(&pf->ptp.port.tx))
2721 			return true;
2722 		break;
2723 	case ICE_PTP_TX_INTERRUPT_ALL:
2724 		if (ice_any_port_has_timestamps(pf))
2725 			return true;
2726 		break;
2727 	default:
2728 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2729 			  pf->ptp.tx_interrupt_mode);
2730 		break;
2731 	}
2732 
2733 	/* Check hardware indicator */
2734 	for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2735 		u64 tstamp_ready = 0;
2736 		int err;
2737 
2738 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2739 		if (err || tstamp_ready)
2740 			return true;
2741 	}
2742 
2743 	return false;
2744 }
2745 
2746 /**
2747  * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
2748  * @pf: Board private structure
2749  *
2750  * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
2751  *         half of the interrupt and IRQ_HANDLED otherwise.
2752  */
ice_ptp_ts_irq(struct ice_pf * pf)2753 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
2754 {
2755 	struct ice_hw *hw = &pf->hw;
2756 
2757 	switch (hw->mac_type) {
2758 	case ICE_MAC_E810:
2759 		/* E810 capable of low latency timestamping with interrupt can
2760 		 * request a single timestamp in the top half and wait for
2761 		 * a second LL TS interrupt from the FW when it's ready.
2762 		 */
2763 		if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
2764 			struct ice_ptp_tx *tx = &pf->ptp.port.tx;
2765 			u8 idx, last;
2766 
2767 			if (!ice_pf_state_is_nominal(pf))
2768 				return IRQ_HANDLED;
2769 
2770 			spin_lock(&tx->lock);
2771 			if (tx->init) {
2772 				last = tx->last_ll_ts_idx_read + 1;
2773 				idx = find_next_bit_wrap(tx->in_use, tx->len,
2774 							 last);
2775 				if (idx != tx->len)
2776 					ice_ptp_req_tx_single_tstamp(tx, idx);
2777 			}
2778 			spin_unlock(&tx->lock);
2779 
2780 			return IRQ_HANDLED;
2781 		}
2782 		fallthrough; /* non-LL_TS E810 */
2783 	case ICE_MAC_GENERIC:
2784 	case ICE_MAC_GENERIC_3K_E825:
2785 		/* All other devices process timestamps in the bottom half due
2786 		 * to sleeping or polling.
2787 		 */
2788 		if (!ice_ptp_pf_handles_tx_interrupt(pf))
2789 			return IRQ_HANDLED;
2790 
2791 		set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
2792 		return IRQ_WAKE_THREAD;
2793 	case ICE_MAC_E830:
2794 		/* E830 can read timestamps in the top half using rd32() */
2795 		ice_ptp_process_ts(pf);
2796 
2797 		if (ice_ptp_tx_tstamps_pending(pf)) {
2798 			/* Process outstanding Tx timestamps. If there
2799 			 * is more work, re-arm the interrupt to trigger again.
2800 			 */
2801 			wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2802 			ice_flush(hw);
2803 		}
2804 		return IRQ_HANDLED;
2805 	default:
2806 		return IRQ_HANDLED;
2807 	}
2808 }
2809 
2810 /**
2811  * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2812  * @pf: Board private structure
2813  *
2814  * The device PHY issues Tx timestamp interrupts to the driver for processing
2815  * timestamp data from the PHY. It will not interrupt again until all
2816  * current timestamp data is read. In rare circumstances, it is possible that
2817  * the driver fails to read all outstanding data.
2818  *
2819  * To avoid getting permanently stuck, periodically check if the PHY has
2820  * outstanding timestamp data. If so, trigger an interrupt from software to
2821  * process this data.
2822  */
ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf * pf)2823 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2824 {
2825 	struct device *dev = ice_pf_to_dev(pf);
2826 	struct ice_hw *hw = &pf->hw;
2827 	bool trigger_oicr = false;
2828 	unsigned int i;
2829 
2830 	if (!pf->ptp.port.tx.has_ready_bitmap)
2831 		return;
2832 
2833 	if (!ice_pf_src_tmr_owned(pf))
2834 		return;
2835 
2836 	for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2837 		u64 tstamp_ready;
2838 		int err;
2839 
2840 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2841 		if (!err && tstamp_ready) {
2842 			trigger_oicr = true;
2843 			break;
2844 		}
2845 	}
2846 
2847 	if (trigger_oicr) {
2848 		/* Trigger a software interrupt, to ensure this data
2849 		 * gets processed.
2850 		 */
2851 		dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2852 
2853 		wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2854 		ice_flush(hw);
2855 	}
2856 }
2857 
ice_ptp_periodic_work(struct kthread_work * work)2858 static void ice_ptp_periodic_work(struct kthread_work *work)
2859 {
2860 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2861 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2862 	int err;
2863 
2864 	if (pf->ptp.state != ICE_PTP_READY)
2865 		return;
2866 
2867 	err = ice_ptp_update_cached_phctime(pf);
2868 
2869 	ice_ptp_maybe_trigger_tx_interrupt(pf);
2870 
2871 	/* Run twice a second or reschedule if phc update failed */
2872 	kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2873 				   msecs_to_jiffies(err ? 10 : 500));
2874 }
2875 
2876 /**
2877  * ice_ptp_queue_work - Queue PTP periodic work for a PF
2878  * @pf: Board private structure
2879  *
2880  * Helper function to queue PTP periodic work after VSI rebuild completes.
2881  * This ensures that PTP work only runs when VSI structures are ready.
2882  */
ice_ptp_queue_work(struct ice_pf * pf)2883 void ice_ptp_queue_work(struct ice_pf *pf)
2884 {
2885 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) &&
2886 	    pf->ptp.state == ICE_PTP_READY)
2887 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
2888 }
2889 
2890 /**
2891  * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
2892  * @pf: Board private structure
2893  * @rebuild: rebuild if true, prepare if false
2894  * @reset_type: the reset type being performed
2895  */
ice_ptp_prepare_rebuild_sec(struct ice_pf * pf,bool rebuild,enum ice_reset_req reset_type)2896 static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
2897 					enum ice_reset_req reset_type)
2898 {
2899 	struct list_head *entry;
2900 
2901 	list_for_each(entry, &pf->adapter->ports.ports) {
2902 		struct ice_ptp_port *port = list_entry(entry,
2903 						       struct ice_ptp_port,
2904 						       list_node);
2905 		struct ice_pf *peer_pf = ptp_port_to_pf(port);
2906 
2907 		if (!ice_is_primary(&peer_pf->hw)) {
2908 			if (rebuild) {
2909 				/* TODO: When implementing rebuild=true:
2910 				 * 1. Ensure secondary PFs' VSIs are rebuilt
2911 				 * 2. Call ice_ptp_queue_work(peer_pf) after VSI rebuild
2912 				 */
2913 				ice_ptp_rebuild(peer_pf, reset_type);
2914 			} else {
2915 				ice_ptp_prepare_for_reset(peer_pf, reset_type);
2916 			}
2917 		}
2918 	}
2919 }
2920 
2921 /**
2922  * ice_ptp_prepare_for_reset - Prepare PTP for reset
2923  * @pf: Board private structure
2924  * @reset_type: the reset type being performed
2925  */
ice_ptp_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)2926 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2927 {
2928 	struct ice_ptp *ptp = &pf->ptp;
2929 	struct ice_hw *hw = &pf->hw;
2930 	u8 src_tmr;
2931 
2932 	if (ptp->state != ICE_PTP_READY)
2933 		return;
2934 
2935 	ptp->state = ICE_PTP_RESETTING;
2936 
2937 	/* Disable timestamping for both Tx and Rx */
2938 	ice_ptp_disable_timestamp_mode(pf);
2939 
2940 	kthread_cancel_delayed_work_sync(&ptp->work);
2941 
2942 	if (reset_type == ICE_RESET_PFR)
2943 		return;
2944 
2945 	if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
2946 		ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
2947 
2948 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2949 
2950 	/* Disable periodic outputs */
2951 	ice_ptp_disable_all_perout(pf);
2952 
2953 	src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2954 
2955 	/* Disable source clock */
2956 	wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2957 
2958 	/* Acquire PHC and system timer to restore after reset */
2959 	ptp->reset_time = ktime_get_real_ns();
2960 }
2961 
2962 /**
2963  * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2964  * @pf: Board private structure
2965  *
2966  * Companion function for ice_ptp_rebuild() which handles tasks that only the
2967  * PTP clock owner instance should perform.
2968  */
ice_ptp_rebuild_owner(struct ice_pf * pf)2969 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2970 {
2971 	struct ice_ptp *ptp = &pf->ptp;
2972 	struct ice_hw *hw = &pf->hw;
2973 	struct timespec64 ts;
2974 	u64 time_diff;
2975 	int err;
2976 
2977 	err = ice_ptp_init_phc(hw);
2978 	if (err)
2979 		return err;
2980 
2981 	err = ice_tspll_init(hw);
2982 	if (err)
2983 		return err;
2984 
2985 	/* Acquire the global hardware lock */
2986 	if (!ice_ptp_lock(hw)) {
2987 		err = -EBUSY;
2988 		return err;
2989 	}
2990 
2991 	/* Write the increment time value to PHY and LAN */
2992 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2993 	if (err)
2994 		goto err_unlock;
2995 
2996 	/* Write the initial Time value to PHY and LAN using the cached PHC
2997 	 * time before the reset and time difference between stopping and
2998 	 * starting the clock.
2999 	 */
3000 	if (ptp->cached_phc_time) {
3001 		time_diff = ktime_get_real_ns() - ptp->reset_time;
3002 		ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
3003 	} else {
3004 		ts = ktime_to_timespec64(ktime_get_real());
3005 	}
3006 	err = ice_ptp_write_init(pf, &ts);
3007 	if (err)
3008 		goto err_unlock;
3009 
3010 	/* Release the global hardware lock */
3011 	ice_ptp_unlock(hw);
3012 
3013 	/* Flush software tracking of any outstanding timestamps since we're
3014 	 * about to flush the PHY timestamp block.
3015 	 */
3016 	ice_ptp_flush_all_tx_tracker(pf);
3017 
3018 	/* Enable quad interrupts */
3019 	err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3020 	if (err)
3021 		return err;
3022 
3023 	ice_ptp_restart_all_phy(pf);
3024 
3025 	/* Re-enable all periodic outputs and external timestamp events */
3026 	ice_ptp_enable_all_perout(pf);
3027 	ice_ptp_enable_all_extts(pf);
3028 
3029 	return 0;
3030 
3031 err_unlock:
3032 	ice_ptp_unlock(hw);
3033 	return err;
3034 }
3035 
3036 /**
3037  * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
3038  * @pf: Board private structure
3039  * @reset_type: the reset type being performed
3040  */
ice_ptp_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)3041 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
3042 {
3043 	struct ice_ptp *ptp = &pf->ptp;
3044 	int err;
3045 
3046 	if (ptp->state == ICE_PTP_READY) {
3047 		ice_ptp_prepare_for_reset(pf, reset_type);
3048 	} else if (ptp->state != ICE_PTP_RESETTING) {
3049 		err = -EINVAL;
3050 		dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
3051 		goto err;
3052 	}
3053 
3054 	if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
3055 		err = ice_ptp_rebuild_owner(pf);
3056 		if (err)
3057 			goto err;
3058 	}
3059 
3060 	ptp->state = ICE_PTP_READY;
3061 
3062 	dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
3063 	return;
3064 
3065 err:
3066 	ptp->state = ICE_PTP_ERROR;
3067 	dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
3068 }
3069 
ice_ptp_setup_adapter(struct ice_pf * pf)3070 static int ice_ptp_setup_adapter(struct ice_pf *pf)
3071 {
3072 	if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
3073 		return -EPERM;
3074 
3075 	pf->adapter->ctrl_pf = pf;
3076 
3077 	return 0;
3078 }
3079 
ice_ptp_setup_pf(struct ice_pf * pf)3080 static int ice_ptp_setup_pf(struct ice_pf *pf)
3081 {
3082 	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3083 	struct ice_ptp *ptp = &pf->ptp;
3084 
3085 	if (!ctrl_ptp) {
3086 		dev_info(ice_pf_to_dev(pf),
3087 			 "PTP unavailable: no controlling PF\n");
3088 		return -EOPNOTSUPP;
3089 	}
3090 
3091 	if (pf->hw.mac_type == ICE_MAC_UNKNOWN)
3092 		return -ENODEV;
3093 
3094 	INIT_LIST_HEAD(&ptp->port.list_node);
3095 	mutex_lock(&pf->adapter->ports.lock);
3096 
3097 	list_add(&ptp->port.list_node,
3098 		 &pf->adapter->ports.ports);
3099 	mutex_unlock(&pf->adapter->ports.lock);
3100 
3101 	return 0;
3102 }
3103 
ice_ptp_cleanup_pf(struct ice_pf * pf)3104 static void ice_ptp_cleanup_pf(struct ice_pf *pf)
3105 {
3106 	struct ice_ptp *ptp = &pf->ptp;
3107 
3108 	if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
3109 		mutex_lock(&pf->adapter->ports.lock);
3110 		list_del(&ptp->port.list_node);
3111 		mutex_unlock(&pf->adapter->ports.lock);
3112 	}
3113 }
3114 
3115 /**
3116  * ice_ptp_clock_index - Get the PTP clock index for this device
3117  * @pf: Board private structure
3118  *
3119  * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3120  * is associated.
3121  */
ice_ptp_clock_index(struct ice_pf * pf)3122 int ice_ptp_clock_index(struct ice_pf *pf)
3123 {
3124 	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3125 	struct ptp_clock *clock;
3126 
3127 	if (!ctrl_ptp)
3128 		return -1;
3129 	clock = ctrl_ptp->clock;
3130 
3131 	return clock ? ptp_clock_index(clock) : -1;
3132 }
3133 
3134 /**
3135  * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3136  * @pf: Board private structure
3137  *
3138  * Setup and initialize a PTP clock device that represents the device hardware
3139  * clock. Save the clock index for other functions connected to the same
3140  * hardware resource.
3141  */
ice_ptp_init_owner(struct ice_pf * pf)3142 static int ice_ptp_init_owner(struct ice_pf *pf)
3143 {
3144 	struct ice_hw *hw = &pf->hw;
3145 	struct timespec64 ts;
3146 	int err;
3147 
3148 	err = ice_ptp_init_phc(hw);
3149 	if (err) {
3150 		dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3151 			err);
3152 		return err;
3153 	}
3154 
3155 	err = ice_tspll_init(hw);
3156 	if (err) {
3157 		dev_err(ice_pf_to_dev(pf), "Failed to initialize CGU, status %d\n",
3158 			err);
3159 		return err;
3160 	}
3161 
3162 	/* Acquire the global hardware lock */
3163 	if (!ice_ptp_lock(hw)) {
3164 		err = -EBUSY;
3165 		goto err_exit;
3166 	}
3167 
3168 	/* Write the increment time value to PHY and LAN */
3169 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3170 	if (err)
3171 		goto err_unlock;
3172 
3173 	ts = ktime_to_timespec64(ktime_get_real());
3174 	/* Write the initial Time value to PHY and LAN */
3175 	err = ice_ptp_write_init(pf, &ts);
3176 	if (err)
3177 		goto err_unlock;
3178 
3179 	/* Release the global hardware lock */
3180 	ice_ptp_unlock(hw);
3181 
3182 	/* Configure PHY interrupt settings */
3183 	err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3184 	if (err)
3185 		goto err_exit;
3186 
3187 	/* Ensure we have a clock device */
3188 	err = ice_ptp_create_clock(pf);
3189 	if (err)
3190 		goto err_clk;
3191 
3192 	return 0;
3193 err_clk:
3194 	pf->ptp.clock = NULL;
3195 err_exit:
3196 	return err;
3197 
3198 err_unlock:
3199 	ice_ptp_unlock(hw);
3200 	return err;
3201 }
3202 
3203 /**
3204  * ice_ptp_init_work - Initialize PTP work threads
3205  * @pf: Board private structure
3206  * @ptp: PF PTP structure
3207  */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)3208 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3209 {
3210 	struct kthread_worker *kworker;
3211 
3212 	/* Initialize work functions */
3213 	kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3214 
3215 	/* Allocate a kworker for handling work required for the ports
3216 	 * connected to the PTP hardware clock.
3217 	 */
3218 	kworker = kthread_run_worker(0, "ice-ptp-%s",
3219 					dev_name(ice_pf_to_dev(pf)));
3220 	if (IS_ERR(kworker))
3221 		return PTR_ERR(kworker);
3222 
3223 	ptp->kworker = kworker;
3224 
3225 	/* Start periodic work going */
3226 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3227 
3228 	return 0;
3229 }
3230 
3231 /**
3232  * ice_ptp_init_port - Initialize PTP port structure
3233  * @pf: Board private structure
3234  * @ptp_port: PTP port structure
3235  *
3236  * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
3237  */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)3238 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3239 {
3240 	struct ice_hw *hw = &pf->hw;
3241 
3242 	mutex_init(&ptp_port->ps_lock);
3243 
3244 	switch (hw->mac_type) {
3245 	case ICE_MAC_E810:
3246 	case ICE_MAC_E830:
3247 	case ICE_MAC_GENERIC_3K_E825:
3248 		return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
3249 	case ICE_MAC_GENERIC:
3250 		kthread_init_delayed_work(&ptp_port->ov_work,
3251 					  ice_ptp_wait_for_offsets);
3252 		return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3253 					    ptp_port->port_num);
3254 	default:
3255 		return -ENODEV;
3256 	}
3257 }
3258 
3259 /**
3260  * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3261  * @pf: Board private structure
3262  *
3263  * Initialize the Tx timestamp interrupt mode for this device. For most device
3264  * types, each PF processes the interrupt and manages its own timestamps. For
3265  * E822-based devices, only the clock owner processes the timestamps. Other
3266  * PFs disable the interrupt and do not process their own timestamps.
3267  */
ice_ptp_init_tx_interrupt_mode(struct ice_pf * pf)3268 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3269 {
3270 	switch (pf->hw.mac_type) {
3271 	case ICE_MAC_GENERIC:
3272 	case ICE_MAC_GENERIC_3K_E825:
3273 		/* E82x hardware has the clock owner process timestamps for
3274 		 * all ports.
3275 		 */
3276 		if (ice_pf_src_tmr_owned(pf))
3277 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3278 		else
3279 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3280 		break;
3281 	default:
3282 		/* other PHY types handle their own Tx interrupt */
3283 		pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3284 	}
3285 }
3286 
3287 /**
3288  * ice_ptp_init - Initialize PTP hardware clock support
3289  * @pf: Board private structure
3290  *
3291  * Set up the device for interacting with the PTP hardware clock for all
3292  * functions, both the function that owns the clock hardware, and the
3293  * functions connected to the clock hardware.
3294  *
3295  * The clock owner will allocate and register a ptp_clock with the
3296  * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3297  * items used for asynchronous work such as Tx timestamps and periodic work.
3298  */
ice_ptp_init(struct ice_pf * pf)3299 void ice_ptp_init(struct ice_pf *pf)
3300 {
3301 	struct ice_ptp *ptp = &pf->ptp;
3302 	struct ice_hw *hw = &pf->hw;
3303 	int err;
3304 
3305 	ptp->state = ICE_PTP_INITIALIZING;
3306 
3307 	if (hw->lane_num < 0) {
3308 		err = hw->lane_num;
3309 		goto err_exit;
3310 	}
3311 	ptp->port.port_num = hw->lane_num;
3312 
3313 	ice_ptp_init_hw(hw);
3314 
3315 	ice_ptp_init_tx_interrupt_mode(pf);
3316 
3317 	/* If this function owns the clock hardware, it must allocate and
3318 	 * configure the PTP clock device to represent it.
3319 	 */
3320 	if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
3321 		err = ice_ptp_setup_adapter(pf);
3322 		if (err)
3323 			goto err_exit;
3324 		err = ice_ptp_init_owner(pf);
3325 		if (err)
3326 			goto err_exit;
3327 	}
3328 
3329 	err = ice_ptp_setup_pf(pf);
3330 	if (err)
3331 		goto err_exit;
3332 
3333 	err = ice_ptp_init_port(pf, &ptp->port);
3334 	if (err)
3335 		goto err_clean_pf;
3336 
3337 	/* Start the PHY timestamping block */
3338 	ice_ptp_reset_phy_timestamping(pf);
3339 
3340 	/* Configure initial Tx interrupt settings */
3341 	ice_ptp_cfg_tx_interrupt(pf);
3342 
3343 	ptp->state = ICE_PTP_READY;
3344 
3345 	err = ice_ptp_init_work(pf, ptp);
3346 	if (err)
3347 		goto err_exit;
3348 
3349 	dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3350 	return;
3351 
3352 err_clean_pf:
3353 	mutex_destroy(&ptp->port.ps_lock);
3354 	ice_ptp_cleanup_pf(pf);
3355 err_exit:
3356 	/* If we registered a PTP clock, release it */
3357 	if (pf->ptp.clock) {
3358 		ptp_clock_unregister(ptp->clock);
3359 		pf->ptp.clock = NULL;
3360 	}
3361 	/* Keep ICE_PTP_UNINIT state to avoid ambiguity at driver unload
3362 	 * and to avoid duplicated resources release.
3363 	 */
3364 	ptp->state = ICE_PTP_UNINIT;
3365 	dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3366 }
3367 
3368 /**
3369  * ice_ptp_release - Disable the driver/HW support and unregister the clock
3370  * @pf: Board private structure
3371  *
3372  * This function handles the cleanup work required from the initialization by
3373  * clearing out the important information and unregistering the clock
3374  */
ice_ptp_release(struct ice_pf * pf)3375 void ice_ptp_release(struct ice_pf *pf)
3376 {
3377 	if (pf->ptp.state == ICE_PTP_UNINIT)
3378 		return;
3379 
3380 	if (pf->ptp.state != ICE_PTP_READY) {
3381 		mutex_destroy(&pf->ptp.port.ps_lock);
3382 		ice_ptp_cleanup_pf(pf);
3383 		if (pf->ptp.clock) {
3384 			ptp_clock_unregister(pf->ptp.clock);
3385 			pf->ptp.clock = NULL;
3386 		}
3387 		return;
3388 	}
3389 
3390 	pf->ptp.state = ICE_PTP_UNINIT;
3391 
3392 	/* Disable timestamping for both Tx and Rx */
3393 	ice_ptp_disable_timestamp_mode(pf);
3394 
3395 	ice_ptp_cleanup_pf(pf);
3396 
3397 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3398 
3399 	ice_ptp_disable_all_extts(pf);
3400 
3401 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
3402 
3403 	ice_ptp_port_phy_stop(&pf->ptp.port);
3404 	mutex_destroy(&pf->ptp.port.ps_lock);
3405 	if (pf->ptp.kworker) {
3406 		kthread_destroy_worker(pf->ptp.kworker);
3407 		pf->ptp.kworker = NULL;
3408 	}
3409 
3410 	if (!pf->ptp.clock)
3411 		return;
3412 
3413 	/* Disable periodic outputs */
3414 	ice_ptp_disable_all_perout(pf);
3415 
3416 	ptp_clock_unregister(pf->ptp.clock);
3417 	pf->ptp.clock = NULL;
3418 
3419 	dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3420 }
3421