xref: /linux/drivers/net/ethernet/intel/ice/ice_ptp.c (revision 8fdb05de0e2db89d8f56144c60ab784812e8c3b7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 
8 static const char ice_pin_names[][64] = {
9 	"SDP0",
10 	"SDP1",
11 	"SDP2",
12 	"SDP3",
13 	"TIME_SYNC",
14 	"1PPS"
15 };
16 
17 static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
18 	/* name,        gpio,       delay */
19 	{  TIME_SYNC, {  4, -1 }, { 0,  0 }},
20 	{  ONE_PPS,   { -1,  5 }, { 0, 11 }},
21 };
22 
23 static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
24 	/* name,        gpio,       delay */
25 	{  SDP0,      {  0,  0 }, { 15, 14 }},
26 	{  SDP1,      {  1,  1 }, { 15, 14 }},
27 	{  SDP2,      {  2,  2 }, { 15, 14 }},
28 	{  SDP3,      {  3,  3 }, { 15, 14 }},
29 	{  TIME_SYNC, {  4, -1 }, { 11,  0 }},
30 	{  ONE_PPS,   { -1,  5 }, {  0,  9 }},
31 };
32 
33 static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
34 	/* name,        gpio,       delay */
35 	{  SDP0,      {  0,  0 }, { 0, 1 }},
36 	{  SDP1,      {  1,  1 }, { 0, 1 }},
37 	{  SDP2,      {  2,  2 }, { 0, 1 }},
38 	{  SDP3,      {  3,  3 }, { 0, 1 }},
39 	{  ONE_PPS,   { -1,  5 }, { 0, 1 }},
40 };
41 
42 static const char ice_pin_names_dpll[][64] = {
43 	"SDP20",
44 	"SDP21",
45 	"SDP22",
46 	"SDP23",
47 };
48 
49 static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
50 	/* name,   gpio,       delay */
51 	{  SDP0, { -1,  0 }, { 0, 1 }},
52 	{  SDP1, {  1, -1 }, { 0, 0 }},
53 	{  SDP2, { -1,  2 }, { 0, 1 }},
54 	{  SDP3, {  3, -1 }, { 0, 0 }},
55 };
56 
ice_get_ctrl_pf(struct ice_pf * pf)57 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
58 {
59 	return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
60 }
61 
ice_get_ctrl_ptp(struct ice_pf * pf)62 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
63 {
64 	struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
65 
66 	return !ctrl_pf ? NULL : &ctrl_pf->ptp;
67 }
68 
69 /**
70  * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc
71  * @pf: Board private structure
72  * @func: Pin function
73  * @chan: GPIO channel
74  *
75  * Return: positive pin number when pin is present, -1 otherwise
76  */
ice_ptp_find_pin_idx(struct ice_pf * pf,enum ptp_pin_function func,unsigned int chan)77 static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
78 				unsigned int chan)
79 {
80 	const struct ptp_clock_info *info = &pf->ptp.info;
81 	int i;
82 
83 	for (i = 0; i < info->n_pins; i++) {
84 		if (info->pin_config[i].func == func &&
85 		    info->pin_config[i].chan == chan)
86 			return i;
87 	}
88 
89 	return -1;
90 }
91 
92 /**
93  * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
94  * @pf: Board private structure
95  *
96  * Program the device to respond appropriately to the Tx timestamp interrupt
97  * cause.
98  */
ice_ptp_cfg_tx_interrupt(struct ice_pf * pf)99 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
100 {
101 	struct ice_hw *hw = &pf->hw;
102 	bool enable;
103 	u32 val;
104 
105 	switch (pf->ptp.tx_interrupt_mode) {
106 	case ICE_PTP_TX_INTERRUPT_ALL:
107 		/* React to interrupts across all quads. */
108 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
109 		enable = true;
110 		break;
111 	case ICE_PTP_TX_INTERRUPT_NONE:
112 		/* Do not react to interrupts on any quad. */
113 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
114 		enable = false;
115 		break;
116 	case ICE_PTP_TX_INTERRUPT_SELF:
117 	default:
118 		enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
119 		break;
120 	}
121 
122 	/* Configure the Tx timestamp interrupt */
123 	val = rd32(hw, PFINT_OICR_ENA);
124 	if (enable)
125 		val |= PFINT_OICR_TSYN_TX_M;
126 	else
127 		val &= ~PFINT_OICR_TSYN_TX_M;
128 	wr32(hw, PFINT_OICR_ENA, val);
129 }
130 
131 /**
132  * ice_set_rx_tstamp - Enable or disable Rx timestamping
133  * @pf: The PF pointer to search in
134  * @on: bool value for whether timestamps are enabled or disabled
135  */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)136 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
137 {
138 	struct ice_vsi *vsi;
139 	u16 i;
140 
141 	vsi = ice_get_main_vsi(pf);
142 	if (!vsi || !vsi->rx_rings)
143 		return;
144 
145 	/* Set the timestamp flag for all the Rx rings */
146 	ice_for_each_rxq(vsi, i) {
147 		if (!vsi->rx_rings[i])
148 			continue;
149 		vsi->rx_rings[i]->ptp_rx = on;
150 	}
151 }
152 
153 /**
154  * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
155  * @pf: Board private structure
156  *
157  * Called during preparation for reset to temporarily disable timestamping on
158  * the device. Called during remove to disable timestamping while cleaning up
159  * driver resources.
160  */
ice_ptp_disable_timestamp_mode(struct ice_pf * pf)161 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
162 {
163 	struct ice_hw *hw = &pf->hw;
164 	u32 val;
165 
166 	val = rd32(hw, PFINT_OICR_ENA);
167 	val &= ~PFINT_OICR_TSYN_TX_M;
168 	wr32(hw, PFINT_OICR_ENA, val);
169 
170 	ice_set_rx_tstamp(pf, false);
171 }
172 
173 /**
174  * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
175  * @pf: Board private structure
176  *
177  * Called at the end of rebuild to restore timestamp configuration after
178  * a device reset.
179  */
ice_ptp_restore_timestamp_mode(struct ice_pf * pf)180 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
181 {
182 	struct ice_hw *hw = &pf->hw;
183 	bool enable_rx;
184 
185 	ice_ptp_cfg_tx_interrupt(pf);
186 
187 	enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
188 	ice_set_rx_tstamp(pf, enable_rx);
189 
190 	/* Trigger an immediate software interrupt to ensure that timestamps
191 	 * which occurred during reset are handled now.
192 	 */
193 	wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
194 	ice_flush(hw);
195 }
196 
197 /**
198  * ice_ptp_read_src_clk_reg - Read the source clock register
199  * @pf: Board private structure
200  * @sts: Optional parameter for holding a pair of system timestamps from
201  *       the system clock. Will be ignored if NULL is given.
202  */
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)203 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
204 			     struct ptp_system_timestamp *sts)
205 {
206 	struct ice_hw *hw = &pf->hw;
207 	u32 hi, lo, lo2;
208 	u8 tmr_idx;
209 
210 	if (!ice_is_primary(hw))
211 		hw = ice_get_primary_hw(pf);
212 
213 	tmr_idx = ice_get_ptp_src_clock_index(hw);
214 	guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
215 	/* Read the system timestamp pre PHC read */
216 	ptp_read_system_prets(sts);
217 
218 	if (hw->mac_type == ICE_MAC_E830) {
219 		u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
220 
221 		/* Read the system timestamp post PHC read */
222 		ptp_read_system_postts(sts);
223 
224 		return clk_time;
225 	}
226 
227 	lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
228 
229 	/* Read the system timestamp post PHC read */
230 	ptp_read_system_postts(sts);
231 
232 	hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
233 	lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
234 
235 	if (lo2 < lo) {
236 		/* if TIME_L rolled over read TIME_L again and update
237 		 * system timestamps
238 		 */
239 		ptp_read_system_prets(sts);
240 		lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
241 		ptp_read_system_postts(sts);
242 		hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
243 	}
244 
245 	return ((u64)hi << 32) | lo;
246 }
247 
248 /**
249  * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
250  * @cached_phc_time: recently cached copy of PHC time
251  * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
252  *
253  * Hardware captures timestamps which contain only 32 bits of nominal
254  * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
255  * Note that the captured timestamp values may be 40 bits, but the lower
256  * 8 bits are sub-nanoseconds and generally discarded.
257  *
258  * Extend the 32bit nanosecond timestamp using the following algorithm and
259  * assumptions:
260  *
261  * 1) have a recently cached copy of the PHC time
262  * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
263  *    seconds) before or after the PHC time was captured.
264  * 3) calculate the delta between the cached time and the timestamp
265  * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
266  *    captured after the PHC time. In this case, the full timestamp is just
267  *    the cached PHC time plus the delta.
268  * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
269  *    timestamp was captured *before* the PHC time, i.e. because the PHC
270  *    cache was updated after the timestamp was captured by hardware. In this
271  *    case, the full timestamp is the cached time minus the inverse delta.
272  *
273  * This algorithm works even if the PHC time was updated after a Tx timestamp
274  * was requested, but before the Tx timestamp event was reported from
275  * hardware.
276  *
277  * This calculation primarily relies on keeping the cached PHC time up to
278  * date. If the timestamp was captured more than 2^31 nanoseconds after the
279  * PHC time, it is possible that the lower 32bits of PHC time have
280  * overflowed more than once, and we might generate an incorrect timestamp.
281  *
282  * This is prevented by (a) periodically updating the cached PHC time once
283  * a second, and (b) discarding any Tx timestamp packet if it has waited for
284  * a timestamp for more than one second.
285  */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)286 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
287 {
288 	u32 delta, phc_time_lo;
289 	u64 ns;
290 
291 	/* Extract the lower 32 bits of the PHC time */
292 	phc_time_lo = (u32)cached_phc_time;
293 
294 	/* Calculate the delta between the lower 32bits of the cached PHC
295 	 * time and the in_tstamp value
296 	 */
297 	delta = (in_tstamp - phc_time_lo);
298 
299 	/* Do not assume that the in_tstamp is always more recent than the
300 	 * cached PHC time. If the delta is large, it indicates that the
301 	 * in_tstamp was taken in the past, and should be converted
302 	 * forward.
303 	 */
304 	if (delta > (U32_MAX / 2)) {
305 		/* reverse the delta calculation here */
306 		delta = (phc_time_lo - in_tstamp);
307 		ns = cached_phc_time - delta;
308 	} else {
309 		ns = cached_phc_time + delta;
310 	}
311 
312 	return ns;
313 }
314 
315 /**
316  * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
317  * @pf: Board private structure
318  * @in_tstamp: Ingress/egress 40b timestamp value
319  *
320  * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
321  * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
322  *
323  *  *--------------------------------------------------------------*
324  *  | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
325  *  *--------------------------------------------------------------*
326  *
327  * The low bit is an indicator of whether the timestamp is valid. The next
328  * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
329  * and the remaining 32 bits are the lower 32 bits of the PHC timer.
330  *
331  * It is assumed that the caller verifies the timestamp is valid prior to
332  * calling this function.
333  *
334  * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
335  * time stored in the device private PTP structure as the basis for timestamp
336  * extension.
337  *
338  * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
339  * algorithm.
340  */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)341 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
342 {
343 	const u64 mask = GENMASK_ULL(31, 0);
344 	unsigned long discard_time;
345 
346 	/* Discard the hardware timestamp if the cached PHC time is too old */
347 	discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
348 	if (time_is_before_jiffies(discard_time)) {
349 		pf->ptp.tx_hwtstamp_discarded++;
350 		return 0;
351 	}
352 
353 	return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
354 				     (in_tstamp >> 8) & mask);
355 }
356 
357 /**
358  * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
359  * @tx: the PTP Tx timestamp tracker to check
360  *
361  * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
362  * to accept new timestamp requests.
363  *
364  * Assumes the tx->lock spinlock is already held.
365  */
366 static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx)367 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
368 {
369 	lockdep_assert_held(&tx->lock);
370 
371 	return tx->init && !tx->calibrating;
372 }
373 
374 /**
375  * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
376  * @tx: the PTP Tx timestamp tracker
377  * @idx: index of the timestamp to request
378  */
ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx)379 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
380 {
381 	struct ice_e810_params *params;
382 	struct ice_ptp_port *ptp_port;
383 	unsigned long flags;
384 	struct sk_buff *skb;
385 	struct ice_pf *pf;
386 
387 	if (!tx->init)
388 		return;
389 
390 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
391 	pf = ptp_port_to_pf(ptp_port);
392 	params = &pf->hw.ptp.phy.e810;
393 
394 	/* Drop packets which have waited for more than 2 seconds */
395 	if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
396 		/* Count the number of Tx timestamps that timed out */
397 		pf->ptp.tx_hwtstamp_timeouts++;
398 
399 		skb = tx->tstamps[idx].skb;
400 		tx->tstamps[idx].skb = NULL;
401 		clear_bit(idx, tx->in_use);
402 
403 		dev_kfree_skb_any(skb);
404 		return;
405 	}
406 
407 	ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
408 
409 	spin_lock_irqsave(&params->atqbal_wq.lock, flags);
410 
411 	params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
412 
413 	/* Write TS index to read to the PF register so the FW can read it */
414 	wr32(&pf->hw, REG_LL_PROXY_H,
415 	     REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
416 	     REG_LL_PROXY_H_EXEC);
417 	tx->last_ll_ts_idx_read = idx;
418 
419 	spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
420 }
421 
422 /**
423  * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
424  * @tx: the PTP Tx timestamp tracker
425  */
ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx)426 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
427 {
428 	struct skb_shared_hwtstamps shhwtstamps = {};
429 	u8 idx = tx->last_ll_ts_idx_read;
430 	struct ice_e810_params *params;
431 	struct ice_ptp_port *ptp_port;
432 	u64 raw_tstamp, tstamp;
433 	bool drop_ts = false;
434 	struct sk_buff *skb;
435 	unsigned long flags;
436 	struct device *dev;
437 	struct ice_pf *pf;
438 	u32 reg_ll_high;
439 
440 	if (!tx->init || tx->last_ll_ts_idx_read < 0)
441 		return;
442 
443 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
444 	pf = ptp_port_to_pf(ptp_port);
445 	dev = ice_pf_to_dev(pf);
446 	params = &pf->hw.ptp.phy.e810;
447 
448 	ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
449 
450 	spin_lock_irqsave(&params->atqbal_wq.lock, flags);
451 
452 	if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
453 		dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
454 			__func__);
455 
456 	/* Read the low 32 bit value */
457 	raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
458 	/* Read the status together with high TS part */
459 	reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
460 
461 	/* Wake up threads waiting on low latency interface */
462 	params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
463 
464 	wake_up_locked(&params->atqbal_wq);
465 
466 	spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
467 
468 	/* When the bit is cleared, the TS is ready in the register */
469 	if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
470 		dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
471 		return;
472 	}
473 
474 	/* High 8 bit value of the TS is on the bits 16:23 */
475 	raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
476 
477 	/* Devices using this interface always verify the timestamp differs
478 	 * relative to the last cached timestamp value.
479 	 */
480 	if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
481 		return;
482 
483 	tx->tstamps[idx].cached_tstamp = raw_tstamp;
484 	clear_bit(idx, tx->in_use);
485 	skb = tx->tstamps[idx].skb;
486 	tx->tstamps[idx].skb = NULL;
487 	if (test_and_clear_bit(idx, tx->stale))
488 		drop_ts = true;
489 
490 	if (!skb)
491 		return;
492 
493 	if (drop_ts) {
494 		dev_kfree_skb_any(skb);
495 		return;
496 	}
497 
498 	/* Extend the timestamp using cached PHC time */
499 	tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
500 	if (tstamp) {
501 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
502 		ice_trace(tx_tstamp_complete, skb, idx);
503 
504 		/* Count the number of Tx timestamps that succeeded */
505 		pf->ptp.tx_hwtstamp_good++;
506 	}
507 
508 	skb_tstamp_tx(skb, &shhwtstamps);
509 	dev_kfree_skb_any(skb);
510 }
511 
512 /**
513  * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
514  * @tx: the PTP Tx timestamp tracker
515  *
516  * Process timestamps captured by the PHY associated with this port. To do
517  * this, loop over each index with a waiting skb.
518  *
519  * If a given index has a valid timestamp, perform the following steps:
520  *
521  * 1) check that the timestamp request is not stale
522  * 2) check that a timestamp is ready and available in the PHY memory bank
523  * 3) read and copy the timestamp out of the PHY register
524  * 4) unlock the index by clearing the associated in_use bit
525  * 5) check if the timestamp is stale, and discard if so
526  * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
527  * 7) send this 64 bit timestamp to the stack
528  *
529  * Note that we do not hold the tracking lock while reading the Tx timestamp.
530  * This is because reading the timestamp requires taking a mutex that might
531  * sleep.
532  *
533  * The only place where we set in_use is when a new timestamp is initiated
534  * with a slot index. This is only called in the hard xmit routine where an
535  * SKB has a request flag set. The only places where we clear this bit is this
536  * function, or during teardown when the Tx timestamp tracker is being
537  * removed. A timestamp index will never be re-used until the in_use bit for
538  * that index is cleared.
539  *
540  * If a Tx thread starts a new timestamp, we might not begin processing it
541  * right away but we will notice it at the end when we re-queue the task.
542  *
543  * If a Tx thread starts a new timestamp just after this function exits, the
544  * interrupt for that timestamp should re-trigger this function once
545  * a timestamp is ready.
546  *
547  * In cases where the PTP hardware clock was directly adjusted, some
548  * timestamps may not be able to safely use the timestamp extension math. In
549  * this case, software will set the stale bit for any outstanding Tx
550  * timestamps when the clock is adjusted. Then this function will discard
551  * those captured timestamps instead of sending them to the stack.
552  *
553  * If a Tx packet has been waiting for more than 2 seconds, it is not possible
554  * to correctly extend the timestamp using the cached PHC time. It is
555  * extremely unlikely that a packet will ever take this long to timestamp. If
556  * we detect a Tx timestamp request that has waited for this long we assume
557  * the packet will never be sent by hardware and discard it without reading
558  * the timestamp register.
559  */
ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx)560 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
561 {
562 	struct ice_ptp_port *ptp_port;
563 	unsigned long flags;
564 	u32 tstamp_good = 0;
565 	struct ice_pf *pf;
566 	struct ice_hw *hw;
567 	u64 tstamp_ready;
568 	bool link_up;
569 	int err;
570 	u8 idx;
571 
572 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
573 	pf = ptp_port_to_pf(ptp_port);
574 	hw = &pf->hw;
575 
576 	if (!tx->init)
577 		return;
578 
579 	/* Read the Tx ready status first */
580 	if (tx->has_ready_bitmap) {
581 		err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
582 		if (err)
583 			return;
584 	}
585 
586 	/* Drop packets if the link went down */
587 	link_up = ptp_port->link_up;
588 
589 	for_each_set_bit(idx, tx->in_use, tx->len) {
590 		struct skb_shared_hwtstamps shhwtstamps = {};
591 		u8 phy_idx = idx + tx->offset;
592 		u64 raw_tstamp = 0, tstamp;
593 		bool drop_ts = !link_up;
594 		struct sk_buff *skb;
595 
596 		/* Drop packets which have waited for more than 2 seconds */
597 		if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
598 			drop_ts = true;
599 
600 			/* Count the number of Tx timestamps that timed out */
601 			pf->ptp.tx_hwtstamp_timeouts++;
602 		}
603 
604 		/* Only read a timestamp from the PHY if its marked as ready
605 		 * by the tstamp_ready register. This avoids unnecessary
606 		 * reading of timestamps which are not yet valid. This is
607 		 * important as we must read all timestamps which are valid
608 		 * and only timestamps which are valid during each interrupt.
609 		 * If we do not, the hardware logic for generating a new
610 		 * interrupt can get stuck on some devices.
611 		 */
612 		if (tx->has_ready_bitmap &&
613 		    !(tstamp_ready & BIT_ULL(phy_idx))) {
614 			if (drop_ts)
615 				goto skip_ts_read;
616 
617 			continue;
618 		}
619 
620 		ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
621 
622 		err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
623 		if (err && !drop_ts)
624 			continue;
625 
626 		ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
627 
628 		/* For PHYs which don't implement a proper timestamp ready
629 		 * bitmap, verify that the timestamp value is different
630 		 * from the last cached timestamp. If it is not, skip this for
631 		 * now assuming it hasn't yet been captured by hardware.
632 		 */
633 		if (!drop_ts && !tx->has_ready_bitmap &&
634 		    raw_tstamp == tx->tstamps[idx].cached_tstamp)
635 			continue;
636 
637 		/* Discard any timestamp value without the valid bit set */
638 		if (!(raw_tstamp & ICE_PTP_TS_VALID))
639 			drop_ts = true;
640 
641 skip_ts_read:
642 		spin_lock_irqsave(&tx->lock, flags);
643 		if (!tx->has_ready_bitmap && raw_tstamp)
644 			tx->tstamps[idx].cached_tstamp = raw_tstamp;
645 		clear_bit(idx, tx->in_use);
646 		skb = tx->tstamps[idx].skb;
647 		tx->tstamps[idx].skb = NULL;
648 		if (test_and_clear_bit(idx, tx->stale))
649 			drop_ts = true;
650 		spin_unlock_irqrestore(&tx->lock, flags);
651 
652 		/* It is unlikely but possible that the SKB will have been
653 		 * flushed at this point due to link change or teardown.
654 		 */
655 		if (!skb)
656 			continue;
657 
658 		if (drop_ts) {
659 			dev_kfree_skb_any(skb);
660 			continue;
661 		}
662 
663 		/* Extend the timestamp using cached PHC time */
664 		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
665 		if (tstamp) {
666 			shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
667 			ice_trace(tx_tstamp_complete, skb, idx);
668 
669 			/* Count the number of Tx timestamps that succeeded */
670 			tstamp_good++;
671 		}
672 
673 		skb_tstamp_tx(skb, &shhwtstamps);
674 		dev_kfree_skb_any(skb);
675 	}
676 
677 	pf->ptp.tx_hwtstamp_good += tstamp_good;
678 }
679 
ice_ptp_tx_tstamp_owner(struct ice_pf * pf)680 static void ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
681 {
682 	struct ice_ptp_port *port;
683 
684 	mutex_lock(&pf->adapter->ports.lock);
685 	list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
686 		struct ice_ptp_tx *tx = &port->tx;
687 
688 		if (!tx || !tx->init)
689 			continue;
690 
691 		ice_ptp_process_tx_tstamp(tx);
692 	}
693 	mutex_unlock(&pf->adapter->ports.lock);
694 }
695 
696 /**
697  * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
698  * @tx: Tx tracking structure to initialize
699  *
700  * Assumes that the length has already been initialized. Do not call directly,
701  * use the ice_ptp_init_tx_* instead.
702  */
703 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)704 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
705 {
706 	unsigned long *in_use, *stale;
707 	struct ice_tx_tstamp *tstamps;
708 
709 	tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
710 	in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
711 	stale = bitmap_zalloc(tx->len, GFP_KERNEL);
712 
713 	if (!tstamps || !in_use || !stale) {
714 		kfree(tstamps);
715 		bitmap_free(in_use);
716 		bitmap_free(stale);
717 
718 		return -ENOMEM;
719 	}
720 
721 	tx->tstamps = tstamps;
722 	tx->in_use = in_use;
723 	tx->stale = stale;
724 	tx->init = 1;
725 	tx->last_ll_ts_idx_read = -1;
726 
727 	spin_lock_init(&tx->lock);
728 
729 	return 0;
730 }
731 
732 /**
733  * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
734  * @pf: Board private structure
735  * @tx: the tracker to flush
736  *
737  * Called during teardown when a Tx tracker is being removed.
738  */
739 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)740 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
741 {
742 	struct ice_hw *hw = &pf->hw;
743 	unsigned long flags;
744 	u64 tstamp_ready;
745 	int err;
746 	u8 idx;
747 
748 	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
749 	if (err) {
750 		dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
751 			tx->block, err);
752 
753 		/* If we fail to read the Tx timestamp ready bitmap just
754 		 * skip clearing the PHY timestamps.
755 		 */
756 		tstamp_ready = 0;
757 	}
758 
759 	for_each_set_bit(idx, tx->in_use, tx->len) {
760 		u8 phy_idx = idx + tx->offset;
761 		struct sk_buff *skb;
762 
763 		/* In case this timestamp is ready, we need to clear it. */
764 		if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
765 			ice_clear_phy_tstamp(hw, tx->block, phy_idx);
766 
767 		spin_lock_irqsave(&tx->lock, flags);
768 		skb = tx->tstamps[idx].skb;
769 		tx->tstamps[idx].skb = NULL;
770 		clear_bit(idx, tx->in_use);
771 		clear_bit(idx, tx->stale);
772 		spin_unlock_irqrestore(&tx->lock, flags);
773 
774 		/* Count the number of Tx timestamps flushed */
775 		pf->ptp.tx_hwtstamp_flushed++;
776 
777 		/* Free the SKB after we've cleared the bit */
778 		dev_kfree_skb_any(skb);
779 	}
780 }
781 
782 /**
783  * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
784  * @tx: the tracker to mark
785  *
786  * Mark currently outstanding Tx timestamps as stale. This prevents sending
787  * their timestamp value to the stack. This is required to prevent extending
788  * the 40bit hardware timestamp incorrectly.
789  *
790  * This should be called when the PTP clock is modified such as after a set
791  * time request.
792  */
793 static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx)794 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
795 {
796 	unsigned long flags;
797 
798 	spin_lock_irqsave(&tx->lock, flags);
799 	bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
800 	spin_unlock_irqrestore(&tx->lock, flags);
801 }
802 
803 /**
804  * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
805  * @pf: Board private structure
806  *
807  * Called by the clock owner to flush all the Tx timestamp trackers associated
808  * with the clock.
809  */
810 static void
ice_ptp_flush_all_tx_tracker(struct ice_pf * pf)811 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
812 {
813 	struct ice_ptp_port *port;
814 
815 	list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
816 		ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
817 }
818 
819 /**
820  * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
821  * @pf: Board private structure
822  * @tx: Tx tracking structure to release
823  *
824  * Free memory associated with the Tx timestamp tracker.
825  */
826 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)827 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
828 {
829 	unsigned long flags;
830 
831 	spin_lock_irqsave(&tx->lock, flags);
832 	tx->init = 0;
833 	spin_unlock_irqrestore(&tx->lock, flags);
834 
835 	/* wait for potentially outstanding interrupt to complete */
836 	synchronize_irq(pf->oicr_irq.virq);
837 
838 	ice_ptp_flush_tx_tracker(pf, tx);
839 
840 	kfree(tx->tstamps);
841 	tx->tstamps = NULL;
842 
843 	bitmap_free(tx->in_use);
844 	tx->in_use = NULL;
845 
846 	bitmap_free(tx->stale);
847 	tx->stale = NULL;
848 
849 	tx->len = 0;
850 }
851 
852 /**
853  * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
854  * @pf: Board private structure
855  * @tx: the Tx tracking structure to initialize
856  * @port: the port this structure tracks
857  *
858  * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
859  * the timestamp block is shared for all ports in the same quad. To avoid
860  * ports using the same timestamp index, logically break the block of
861  * registers into chunks based on the port number.
862  *
863  * Return: 0 on success, -ENOMEM when out of memory
864  */
ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)865 static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
866 				u8 port)
867 {
868 	tx->block = ICE_GET_QUAD_NUM(port);
869 	tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
870 	tx->len = INDEX_PER_PORT_E82X;
871 	tx->has_ready_bitmap = 1;
872 
873 	return ice_ptp_alloc_tx_tracker(tx);
874 }
875 
876 /**
877  * ice_ptp_init_tx - Initialize tracking for Tx timestamps
878  * @pf: Board private structure
879  * @tx: the Tx tracking structure to initialize
880  * @port: the port this structure tracks
881  *
882  * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
883  * each port has its own block of timestamps, independent of the other ports.
884  *
885  * Return: 0 on success, -ENOMEM when out of memory
886  */
ice_ptp_init_tx(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)887 static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
888 {
889 	tx->block = port;
890 	tx->offset = 0;
891 	tx->len = INDEX_PER_PORT;
892 
893 	/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
894 	 * verify new timestamps against cached copy of the last read
895 	 * timestamp.
896 	 */
897 	tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
898 
899 	return ice_ptp_alloc_tx_tracker(tx);
900 }
901 
902 /**
903  * ice_ptp_update_cached_phctime - Update the cached PHC time values
904  * @pf: Board specific private structure
905  *
906  * This function updates the system time values which are cached in the PF
907  * structure and the Rx rings.
908  *
909  * This function must be called periodically to ensure that the cached value
910  * is never more than 2 seconds old.
911  *
912  * Note that the cached copy in the PF PTP structure is always updated, even
913  * if we can't update the copy in the Rx rings.
914  *
915  * Return:
916  * * 0 - OK, successfully updated
917  * * -EAGAIN - PF was busy, need to reschedule the update
918  */
ice_ptp_update_cached_phctime(struct ice_pf * pf)919 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
920 {
921 	struct device *dev = ice_pf_to_dev(pf);
922 	unsigned long update_before;
923 	u64 systime;
924 	int i;
925 
926 	update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
927 	if (pf->ptp.cached_phc_time &&
928 	    time_is_before_jiffies(update_before)) {
929 		unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
930 
931 		dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
932 			 jiffies_to_msecs(time_taken));
933 		pf->ptp.late_cached_phc_updates++;
934 	}
935 
936 	/* Read the current PHC time */
937 	systime = ice_ptp_read_src_clk_reg(pf, NULL);
938 
939 	/* Update the cached PHC time stored in the PF structure */
940 	WRITE_ONCE(pf->ptp.cached_phc_time, systime);
941 	WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
942 
943 	if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
944 		return -EAGAIN;
945 
946 	ice_for_each_vsi(pf, i) {
947 		struct ice_vsi *vsi = pf->vsi[i];
948 		int j;
949 
950 		if (!vsi)
951 			continue;
952 
953 		if (vsi->type != ICE_VSI_PF)
954 			continue;
955 
956 		ice_for_each_rxq(vsi, j) {
957 			if (!vsi->rx_rings[j])
958 				continue;
959 			WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
960 		}
961 	}
962 	clear_bit(ICE_CFG_BUSY, pf->state);
963 
964 	return 0;
965 }
966 
967 /**
968  * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
969  * @pf: Board specific private structure
970  *
971  * This function must be called when the cached PHC time is no longer valid,
972  * such as after a time adjustment. It marks any currently outstanding Tx
973  * timestamps as stale and updates the cached PHC time for both the PF and Rx
974  * rings.
975  *
976  * If updating the PHC time cannot be done immediately, a warning message is
977  * logged and the work item is scheduled immediately to minimize the window
978  * with a wrong cached timestamp.
979  */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)980 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
981 {
982 	struct device *dev = ice_pf_to_dev(pf);
983 	int err;
984 
985 	/* Update the cached PHC time immediately if possible, otherwise
986 	 * schedule the work item to execute soon.
987 	 */
988 	err = ice_ptp_update_cached_phctime(pf);
989 	if (err) {
990 		/* If another thread is updating the Rx rings, we won't
991 		 * properly reset them here. This could lead to reporting of
992 		 * invalid timestamps, but there isn't much we can do.
993 		 */
994 		dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
995 			 __func__);
996 
997 		/* Queue the work item to update the Rx rings when possible */
998 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
999 					   msecs_to_jiffies(10));
1000 	}
1001 
1002 	/* Mark any outstanding timestamps as stale, since they might have
1003 	 * been captured in hardware before the time update. This could lead
1004 	 * to us extending them with the wrong cached value resulting in
1005 	 * incorrect timestamp values.
1006 	 */
1007 	ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1008 }
1009 
1010 /**
1011  * ice_ptp_write_init - Set PHC time to provided value
1012  * @pf: Board private structure
1013  * @ts: timespec structure that holds the new time value
1014  *
1015  * Set the PHC time to the specified time provided in the timespec.
1016  */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1017 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1018 {
1019 	u64 ns = timespec64_to_ns(ts);
1020 	struct ice_hw *hw = &pf->hw;
1021 
1022 	return ice_ptp_init_time(hw, ns);
1023 }
1024 
1025 /**
1026  * ice_ptp_write_adj - Adjust PHC clock time atomically
1027  * @pf: Board private structure
1028  * @adj: Adjustment in nanoseconds
1029  *
1030  * Perform an atomic adjustment of the PHC time by the specified number of
1031  * nanoseconds.
1032  */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1033 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1034 {
1035 	struct ice_hw *hw = &pf->hw;
1036 
1037 	return ice_ptp_adj_clock(hw, adj);
1038 }
1039 
1040 /**
1041  * ice_base_incval - Get base timer increment value
1042  * @pf: Board private structure
1043  *
1044  * Look up the base timer increment value for this device. The base increment
1045  * value is used to define the nominal clock tick rate. This increment value
1046  * is programmed during device initialization. It is also used as the basis
1047  * for calculating adjustments using scaled_ppm.
1048  */
ice_base_incval(struct ice_pf * pf)1049 static u64 ice_base_incval(struct ice_pf *pf)
1050 {
1051 	struct ice_hw *hw = &pf->hw;
1052 	u64 incval;
1053 
1054 	incval = ice_get_base_incval(hw);
1055 
1056 	dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1057 		incval);
1058 
1059 	return incval;
1060 }
1061 
1062 /**
1063  * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1064  * @port: PTP port for which Tx FIFO is checked
1065  */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1066 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1067 {
1068 	int offs = port->port_num % ICE_PORTS_PER_QUAD;
1069 	int quad = ICE_GET_QUAD_NUM(port->port_num);
1070 	struct ice_pf *pf;
1071 	struct ice_hw *hw;
1072 	u32 val, phy_sts;
1073 	int err;
1074 
1075 	pf = ptp_port_to_pf(port);
1076 	hw = &pf->hw;
1077 
1078 	if (port->tx_fifo_busy_cnt == FIFO_OK)
1079 		return 0;
1080 
1081 	/* need to read FIFO state */
1082 	if (offs == 0 || offs == 1)
1083 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1084 					     &val);
1085 	else
1086 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1087 					     &val);
1088 
1089 	if (err) {
1090 		dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1091 			port->port_num, err);
1092 		return err;
1093 	}
1094 
1095 	if (offs & 0x1)
1096 		phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1097 	else
1098 		phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1099 
1100 	if (phy_sts & FIFO_EMPTY) {
1101 		port->tx_fifo_busy_cnt = FIFO_OK;
1102 		return 0;
1103 	}
1104 
1105 	port->tx_fifo_busy_cnt++;
1106 
1107 	dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1108 		port->tx_fifo_busy_cnt, port->port_num);
1109 
1110 	if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1111 		dev_dbg(ice_pf_to_dev(pf),
1112 			"Port %d Tx FIFO still not empty; resetting quad %d\n",
1113 			port->port_num, quad);
1114 		ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1115 		port->tx_fifo_busy_cnt = FIFO_OK;
1116 		return 0;
1117 	}
1118 
1119 	return -EAGAIN;
1120 }
1121 
1122 /**
1123  * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1124  * @work: Pointer to the kthread_work structure for this task
1125  *
1126  * Check whether hardware has completed measuring the Tx and Rx offset values
1127  * used to configure and enable vernier timestamp calibration.
1128  *
1129  * Once the offset in either direction is measured, configure the associated
1130  * registers with the calibrated offset values and enable timestamping. The Tx
1131  * and Rx directions are configured independently as soon as their associated
1132  * offsets are known.
1133  *
1134  * This function reschedules itself until both Tx and Rx calibration have
1135  * completed.
1136  */
ice_ptp_wait_for_offsets(struct kthread_work * work)1137 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1138 {
1139 	struct ice_ptp_port *port;
1140 	struct ice_pf *pf;
1141 	struct ice_hw *hw;
1142 	int tx_err;
1143 	int rx_err;
1144 
1145 	port = container_of(work, struct ice_ptp_port, ov_work.work);
1146 	pf = ptp_port_to_pf(port);
1147 	hw = &pf->hw;
1148 
1149 	if (ice_is_reset_in_progress(pf->state)) {
1150 		/* wait for device driver to complete reset */
1151 		kthread_queue_delayed_work(pf->ptp.kworker,
1152 					   &port->ov_work,
1153 					   msecs_to_jiffies(100));
1154 		return;
1155 	}
1156 
1157 	tx_err = ice_ptp_check_tx_fifo(port);
1158 	if (!tx_err)
1159 		tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1160 	rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1161 	if (tx_err || rx_err) {
1162 		/* Tx and/or Rx offset not yet configured, try again later */
1163 		kthread_queue_delayed_work(pf->ptp.kworker,
1164 					   &port->ov_work,
1165 					   msecs_to_jiffies(100));
1166 		return;
1167 	}
1168 }
1169 
1170 /**
1171  * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1172  * @ptp_port: PTP port to stop
1173  */
1174 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1175 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1176 {
1177 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1178 	u8 port = ptp_port->port_num;
1179 	struct ice_hw *hw = &pf->hw;
1180 	int err;
1181 
1182 	mutex_lock(&ptp_port->ps_lock);
1183 
1184 	switch (hw->mac_type) {
1185 	case ICE_MAC_E810:
1186 	case ICE_MAC_E830:
1187 		err = 0;
1188 		break;
1189 	case ICE_MAC_GENERIC:
1190 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1191 
1192 		err = ice_stop_phy_timer_e82x(hw, port, true);
1193 		break;
1194 	case ICE_MAC_GENERIC_3K_E825:
1195 		err = ice_stop_phy_timer_eth56g(hw, port, true);
1196 		break;
1197 	default:
1198 		err = -ENODEV;
1199 	}
1200 	if (err && err != -EBUSY)
1201 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1202 			port, err);
1203 
1204 	mutex_unlock(&ptp_port->ps_lock);
1205 
1206 	return err;
1207 }
1208 
1209 /**
1210  * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1211  * @ptp_port: PTP port for which the PHY start is set
1212  *
1213  * Start the PHY timestamping block, and initiate Vernier timestamping
1214  * calibration. If timestamping cannot be calibrated (such as if link is down)
1215  * then disable the timestamping block instead.
1216  */
1217 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1218 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1219 {
1220 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1221 	u8 port = ptp_port->port_num;
1222 	struct ice_hw *hw = &pf->hw;
1223 	unsigned long flags;
1224 	int err;
1225 
1226 	if (!ptp_port->link_up)
1227 		return ice_ptp_port_phy_stop(ptp_port);
1228 
1229 	mutex_lock(&ptp_port->ps_lock);
1230 
1231 	switch (hw->mac_type) {
1232 	case ICE_MAC_E810:
1233 	case ICE_MAC_E830:
1234 		err = 0;
1235 		break;
1236 	case ICE_MAC_GENERIC:
1237 		/* Start the PHY timer in Vernier mode */
1238 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1239 
1240 		/* temporarily disable Tx timestamps while calibrating
1241 		 * PHY offset
1242 		 */
1243 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1244 		ptp_port->tx.calibrating = true;
1245 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1246 		ptp_port->tx_fifo_busy_cnt = 0;
1247 
1248 		/* Start the PHY timer in Vernier mode */
1249 		err = ice_start_phy_timer_e82x(hw, port);
1250 		if (err)
1251 			break;
1252 
1253 		/* Enable Tx timestamps right away */
1254 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1255 		ptp_port->tx.calibrating = false;
1256 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1257 
1258 		kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1259 					   0);
1260 		break;
1261 	case ICE_MAC_GENERIC_3K_E825:
1262 		err = ice_start_phy_timer_eth56g(hw, port);
1263 		break;
1264 	default:
1265 		err = -ENODEV;
1266 	}
1267 
1268 	if (err)
1269 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1270 			port, err);
1271 
1272 	mutex_unlock(&ptp_port->ps_lock);
1273 
1274 	return err;
1275 }
1276 
1277 /**
1278  * ice_ptp_link_change - Reconfigure PTP after link status change
1279  * @pf: Board private structure
1280  * @linkup: Link is up or down
1281  */
ice_ptp_link_change(struct ice_pf * pf,bool linkup)1282 void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
1283 {
1284 	struct ice_ptp_port *ptp_port;
1285 	struct ice_hw *hw = &pf->hw;
1286 
1287 	if (pf->ptp.state != ICE_PTP_READY)
1288 		return;
1289 
1290 	ptp_port = &pf->ptp.port;
1291 
1292 	/* Update cached link status for this port immediately */
1293 	ptp_port->link_up = linkup;
1294 
1295 	/* Skip HW writes if reset is in progress */
1296 	if (pf->hw.reset_ongoing)
1297 		return;
1298 
1299 	switch (hw->mac_type) {
1300 	case ICE_MAC_E810:
1301 	case ICE_MAC_E830:
1302 		/* Do not reconfigure E810 or E830 PHY */
1303 		return;
1304 	case ICE_MAC_GENERIC:
1305 		ice_ptp_port_phy_restart(ptp_port);
1306 		return;
1307 	case ICE_MAC_GENERIC_3K_E825:
1308 		if (linkup)
1309 			ice_ptp_port_phy_restart(ptp_port);
1310 		return;
1311 	default:
1312 		dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1313 	}
1314 }
1315 
1316 /**
1317  * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1318  * @pf: PF private structure
1319  * @ena: bool value to enable or disable interrupt
1320  * @threshold: Minimum number of packets at which intr is triggered
1321  *
1322  * Utility function to configure all the PHY interrupt settings, including
1323  * whether the PHY interrupt is enabled, and what threshold to use. Also
1324  * configures The E82X timestamp owner to react to interrupts from all PHYs.
1325  *
1326  * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1327  * when failed to configure PHY interrupt for E82X
1328  */
ice_ptp_cfg_phy_interrupt(struct ice_pf * pf,bool ena,u32 threshold)1329 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1330 {
1331 	struct device *dev = ice_pf_to_dev(pf);
1332 	struct ice_hw *hw = &pf->hw;
1333 
1334 	ice_ptp_reset_ts_memory(hw);
1335 
1336 	switch (hw->mac_type) {
1337 	case ICE_MAC_E810:
1338 	case ICE_MAC_E830:
1339 		return 0;
1340 	case ICE_MAC_GENERIC: {
1341 		int quad;
1342 
1343 		for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1344 		     quad++) {
1345 			int err;
1346 
1347 			err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1348 			if (err) {
1349 				dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1350 					quad, err);
1351 				return err;
1352 			}
1353 		}
1354 
1355 		return 0;
1356 	}
1357 	case ICE_MAC_GENERIC_3K_E825: {
1358 		int port;
1359 
1360 		for (port = 0; port < hw->ptp.num_lports; port++) {
1361 			int err;
1362 
1363 			err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1364 			if (err) {
1365 				dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1366 					port, err);
1367 				return err;
1368 			}
1369 		}
1370 
1371 		return 0;
1372 	}
1373 	case ICE_MAC_UNKNOWN:
1374 	default:
1375 		return -EOPNOTSUPP;
1376 	}
1377 }
1378 
1379 /**
1380  * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1381  * @pf: Board private structure
1382  */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1383 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1384 {
1385 	ice_ptp_port_phy_restart(&pf->ptp.port);
1386 }
1387 
1388 /**
1389  * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1390  * @pf: Board private structure
1391  */
ice_ptp_restart_all_phy(struct ice_pf * pf)1392 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1393 {
1394 	struct list_head *entry;
1395 
1396 	list_for_each(entry, &pf->adapter->ports.ports) {
1397 		struct ice_ptp_port *port = list_entry(entry,
1398 						       struct ice_ptp_port,
1399 						       list_node);
1400 
1401 		if (port->link_up)
1402 			ice_ptp_port_phy_restart(port);
1403 	}
1404 }
1405 
1406 /**
1407  * ice_ptp_adjfine - Adjust clock increment rate
1408  * @info: the driver's PTP info structure
1409  * @scaled_ppm: Parts per million with 16-bit fractional field
1410  *
1411  * Adjust the frequency of the clock by the indicated scaled ppm from the
1412  * base frequency.
1413  */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1414 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1415 {
1416 	struct ice_pf *pf = ptp_info_to_pf(info);
1417 	struct ice_hw *hw = &pf->hw;
1418 	u64 incval;
1419 	int err;
1420 
1421 	incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1422 	err = ice_ptp_write_incval_locked(hw, incval);
1423 	if (err) {
1424 		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1425 			err);
1426 		return -EIO;
1427 	}
1428 
1429 	return 0;
1430 }
1431 
1432 /**
1433  * ice_ptp_extts_event - Process PTP external clock event
1434  * @pf: Board private structure
1435  */
ice_ptp_extts_event(struct ice_pf * pf)1436 void ice_ptp_extts_event(struct ice_pf *pf)
1437 {
1438 	struct ptp_clock_event event;
1439 	struct ice_hw *hw = &pf->hw;
1440 	u8 chan, tmr_idx;
1441 	u32 hi, lo;
1442 
1443 	/* Don't process timestamp events if PTP is not ready */
1444 	if (pf->ptp.state != ICE_PTP_READY)
1445 		return;
1446 
1447 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1448 	/* Event time is captured by one of the two matched registers
1449 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
1450 	 *      GLTSYN_EVNT_H: 32 MSB of sampled time event
1451 	 * Event is defined in GLTSYN_EVNT_0 register
1452 	 */
1453 	for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1454 		int pin_desc_idx;
1455 
1456 		/* Check if channel is enabled */
1457 		if (!(pf->ptp.ext_ts_irq & (1 << chan)))
1458 			continue;
1459 
1460 		lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1461 		hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1462 		event.timestamp = (u64)hi << 32 | lo;
1463 
1464 		/* Add delay compensation */
1465 		pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1466 		if (pin_desc_idx >= 0) {
1467 			const struct ice_ptp_pin_desc *desc;
1468 
1469 			desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
1470 			event.timestamp -= desc->delay[0];
1471 		}
1472 
1473 		event.type = PTP_CLOCK_EXTTS;
1474 		event.index = chan;
1475 		pf->ptp.ext_ts_irq &= ~(1 << chan);
1476 		ptp_clock_event(pf->ptp.clock, &event);
1477 	}
1478 }
1479 
1480 /**
1481  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1482  * @pf: Board private structure
1483  * @rq: External timestamp request
1484  * @on: Enable/disable flag
1485  *
1486  * Configure an external timestamp event on the requested channel.
1487  *
1488  * Return: 0 on success, negative error code otherwise
1489  */
ice_ptp_cfg_extts(struct ice_pf * pf,struct ptp_extts_request * rq,int on)1490 static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
1491 			     int on)
1492 {
1493 	u32 aux_reg, gpio_reg, irq_reg;
1494 	struct ice_hw *hw = &pf->hw;
1495 	unsigned int chan, gpio_pin;
1496 	int pin_desc_idx;
1497 	u8 tmr_idx;
1498 
1499 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1500 	chan = rq->index;
1501 
1502 	pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
1503 	if (pin_desc_idx < 0)
1504 		return -EIO;
1505 
1506 	gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
1507 	irq_reg = rd32(hw, PFINT_OICR_ENA);
1508 
1509 	if (on) {
1510 		/* Enable the interrupt */
1511 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1512 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1513 
1514 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE	BIT(0)
1515 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
1516 
1517 		/* set event level to requested edge */
1518 		if (rq->flags & PTP_FALLING_EDGE)
1519 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1520 		if (rq->flags & PTP_RISING_EDGE)
1521 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1522 
1523 		/* Write GPIO CTL reg.
1524 		 * 0x1 is input sampled by EVENT register(channel)
1525 		 * + num_in_channels * tmr_idx
1526 		 */
1527 		gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1528 				      1 + chan + (tmr_idx * 3));
1529 	} else {
1530 		bool last_enabled = true;
1531 
1532 		/* clear the values we set to reset defaults */
1533 		aux_reg = 0;
1534 		gpio_reg = 0;
1535 
1536 		for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
1537 			if ((pf->ptp.extts_rqs[i].flags &
1538 			     PTP_ENABLE_FEATURE) &&
1539 			    i != chan) {
1540 				last_enabled = false;
1541 			}
1542 
1543 		if (last_enabled)
1544 			irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1545 	}
1546 
1547 	wr32(hw, PFINT_OICR_ENA, irq_reg);
1548 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1549 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1550 
1551 	return 0;
1552 }
1553 
1554 /**
1555  * ice_ptp_disable_all_extts - Disable all EXTTS channels
1556  * @pf: Board private structure
1557  */
ice_ptp_disable_all_extts(struct ice_pf * pf)1558 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1559 {
1560 	for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1561 		if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1562 			ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1563 					  false);
1564 
1565 	synchronize_irq(pf->oicr_irq.virq);
1566 }
1567 
1568 /**
1569  * ice_ptp_enable_all_extts - Enable all EXTTS channels
1570  * @pf: Board private structure
1571  *
1572  * Called during reset to restore user configuration.
1573  */
ice_ptp_enable_all_extts(struct ice_pf * pf)1574 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1575 {
1576 	for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
1577 		if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
1578 			ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
1579 					  true);
1580 }
1581 
1582 /**
1583  * ice_ptp_write_perout - Write periodic wave parameters to HW
1584  * @hw: pointer to the HW struct
1585  * @chan: target channel
1586  * @gpio_pin: target GPIO pin
1587  * @start: target time to start periodic output
1588  * @period: target period
1589  *
1590  * Return: 0 on success, negative error code otherwise
1591  */
ice_ptp_write_perout(struct ice_hw * hw,unsigned int chan,unsigned int gpio_pin,u64 start,u64 period)1592 static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
1593 				unsigned int gpio_pin, u64 start, u64 period)
1594 {
1595 
1596 	u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1597 	u32 val = 0;
1598 
1599 	/* 0. Reset mode & out_en in AUX_OUT */
1600 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1601 
1602 	if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
1603 		int err;
1604 
1605 		/* Enable/disable CGU 1PPS output for E825C */
1606 		err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
1607 		if (err)
1608 			return err;
1609 	}
1610 
1611 	/* 1. Write perout with half of required period value.
1612 	 * HW toggles output when source clock hits the TGT and then adds
1613 	 * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle.
1614 	 */
1615 	period >>= 1;
1616 
1617 	/* For proper operation, GLTSYN_CLKO must be larger than clock tick and
1618 	 * period has to fit in 32 bit register.
1619 	 */
1620 #define MIN_PULSE 3
1621 	if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
1622 		dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
1623 			MIN_PULSE);
1624 		return -EIO;
1625 	}
1626 
1627 	wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1628 
1629 	/* 2. Write TARGET time */
1630 	wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
1631 	wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
1632 
1633 	/* 3. Write AUX_OUT register */
1634 	if (!!period)
1635 		val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1636 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1637 
1638 	/* 4. write GPIO CTL reg */
1639 	val = GLGEN_GPIO_CTL_PIN_DIR_M;
1640 	if (!!period)
1641 		val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1642 				  8 + chan + (tmr_idx * 4));
1643 
1644 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1645 	ice_flush(hw);
1646 
1647 	return 0;
1648 }
1649 
1650 /**
1651  * ice_ptp_cfg_perout - Configure clock to generate periodic wave
1652  * @pf: Board private structure
1653  * @rq: Periodic output request
1654  * @on: Enable/disable flag
1655  *
1656  * Configure the internal clock generator modules to generate the clock wave of
1657  * specified period.
1658  *
1659  * Return: 0 on success, negative error code otherwise
1660  */
ice_ptp_cfg_perout(struct ice_pf * pf,struct ptp_perout_request * rq,int on)1661 static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
1662 			      int on)
1663 {
1664 	unsigned int gpio_pin, prop_delay_ns;
1665 	u64 clk, period, start, phase;
1666 	struct ice_hw *hw = &pf->hw;
1667 	int pin_desc_idx;
1668 
1669 	pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
1670 	if (pin_desc_idx < 0)
1671 		return -EIO;
1672 
1673 	gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
1674 	prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
1675 	period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
1676 
1677 	/* If we're disabling the output or period is 0, clear out CLKO and TGT
1678 	 * and keep output level low.
1679 	 */
1680 	if (!on || !period)
1681 		return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
1682 
1683 	if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
1684 	    period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
1685 		dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
1686 		return -EOPNOTSUPP;
1687 	}
1688 
1689 	if (period & 0x1) {
1690 		dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1691 		return -EIO;
1692 	}
1693 
1694 	start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
1695 
1696 	/* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */
1697 	if (rq->flags & PTP_PEROUT_PHASE)
1698 		phase = start;
1699 	else
1700 		div64_u64_rem(start, period, &phase);
1701 
1702 	/* If we have only phase or start time is in the past, start the timer
1703 	 * at the next multiple of period, maintaining phase at least 0.5 second
1704 	 * from now, so we have time to write it to HW.
1705 	 */
1706 	clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
1707 	if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
1708 		start = div64_u64(clk + period - 1, period) * period + phase;
1709 
1710 	/* Compensate for propagation delay from the generator to the pin. */
1711 	start -= prop_delay_ns;
1712 
1713 	return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
1714 }
1715 
1716 /**
1717  * ice_ptp_disable_all_perout - Disable all currently configured outputs
1718  * @pf: Board private structure
1719  *
1720  * Disable all currently configured clock outputs. This is necessary before
1721  * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to
1722  * re-enable the clocks again.
1723  */
ice_ptp_disable_all_perout(struct ice_pf * pf)1724 static void ice_ptp_disable_all_perout(struct ice_pf *pf)
1725 {
1726 	for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1727 		if (pf->ptp.perout_rqs[i].period.sec ||
1728 		    pf->ptp.perout_rqs[i].period.nsec)
1729 			ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1730 					   false);
1731 }
1732 
1733 /**
1734  * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs
1735  * @pf: Board private structure
1736  *
1737  * Enable all currently configured clock outputs. Use this after
1738  * ice_ptp_disable_all_perout to reconfigure the output signals according to
1739  * their configuration.
1740  */
ice_ptp_enable_all_perout(struct ice_pf * pf)1741 static void ice_ptp_enable_all_perout(struct ice_pf *pf)
1742 {
1743 	for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
1744 		if (pf->ptp.perout_rqs[i].period.sec ||
1745 		    pf->ptp.perout_rqs[i].period.nsec)
1746 			ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
1747 					   true);
1748 }
1749 
1750 /**
1751  * ice_verify_pin - verify if pin supports requested pin function
1752  * @info: the driver's PTP info structure
1753  * @pin: Pin index
1754  * @func: Assigned function
1755  * @chan: Assigned channel
1756  *
1757  * Return: 0 on success, -EOPNOTSUPP when function is not supported.
1758  */
ice_verify_pin(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)1759 static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
1760 			  enum ptp_pin_function func, unsigned int chan)
1761 {
1762 	struct ice_pf *pf = ptp_info_to_pf(info);
1763 	const struct ice_ptp_pin_desc *pin_desc;
1764 
1765 	pin_desc = &pf->ptp.ice_pin_desc[pin];
1766 
1767 	/* Is assigned function allowed? */
1768 	switch (func) {
1769 	case PTP_PF_EXTTS:
1770 		if (pin_desc->gpio[0] < 0)
1771 			return -EOPNOTSUPP;
1772 		break;
1773 	case PTP_PF_PEROUT:
1774 		if (pin_desc->gpio[1] < 0)
1775 			return -EOPNOTSUPP;
1776 		break;
1777 	case PTP_PF_NONE:
1778 		break;
1779 	case PTP_PF_PHYSYNC:
1780 	default:
1781 		return -EOPNOTSUPP;
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 /**
1788  * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC
1789  * @info: The driver's PTP info structure
1790  * @rq: The requested feature to change
1791  * @on: Enable/disable flag
1792  *
1793  * Return: 0 on success, negative error code otherwise
1794  */
ice_ptp_gpio_enable(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1795 static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
1796 			       struct ptp_clock_request *rq, int on)
1797 {
1798 	struct ice_pf *pf = ptp_info_to_pf(info);
1799 	int err;
1800 
1801 	switch (rq->type) {
1802 	case PTP_CLK_REQ_PEROUT:
1803 	{
1804 		struct ptp_perout_request *cached =
1805 			&pf->ptp.perout_rqs[rq->perout.index];
1806 
1807 		err = ice_ptp_cfg_perout(pf, &rq->perout, on);
1808 		if (!err) {
1809 			*cached = rq->perout;
1810 		} else {
1811 			cached->period.sec = 0;
1812 			cached->period.nsec = 0;
1813 		}
1814 		return err;
1815 	}
1816 	case PTP_CLK_REQ_EXTTS:
1817 	{
1818 		struct ptp_extts_request *cached =
1819 			&pf->ptp.extts_rqs[rq->extts.index];
1820 
1821 		err = ice_ptp_cfg_extts(pf, &rq->extts, on);
1822 		if (!err)
1823 			*cached = rq->extts;
1824 		else
1825 			cached->flags &= ~PTP_ENABLE_FEATURE;
1826 		return err;
1827 	}
1828 	default:
1829 		return -EOPNOTSUPP;
1830 	}
1831 }
1832 
1833 /**
1834  * ice_ptp_gettimex64 - Get the time of the clock
1835  * @info: the driver's PTP info structure
1836  * @ts: timespec64 structure to hold the current time value
1837  * @sts: Optional parameter for holding a pair of system timestamps from
1838  *       the system clock. Will be ignored if NULL is given.
1839  *
1840  * Read the device clock and return the correct value on ns, after converting it
1841  * into a timespec struct.
1842  */
1843 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)1844 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1845 		   struct ptp_system_timestamp *sts)
1846 {
1847 	struct ice_pf *pf = ptp_info_to_pf(info);
1848 	u64 time_ns;
1849 
1850 	time_ns = ice_ptp_read_src_clk_reg(pf, sts);
1851 	*ts = ns_to_timespec64(time_ns);
1852 	return 0;
1853 }
1854 
1855 /**
1856  * ice_ptp_settime64 - Set the time of the clock
1857  * @info: the driver's PTP info structure
1858  * @ts: timespec64 structure that holds the new time value
1859  *
1860  * Set the device clock to the user input value. The conversion from timespec
1861  * to ns happens in the write function.
1862  */
1863 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)1864 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1865 {
1866 	struct ice_pf *pf = ptp_info_to_pf(info);
1867 	struct timespec64 ts64 = *ts;
1868 	struct ice_hw *hw = &pf->hw;
1869 	int err;
1870 
1871 	/* For Vernier mode on E82X, we need to recalibrate after new settime.
1872 	 * Start with marking timestamps as invalid.
1873 	 */
1874 	if (hw->mac_type == ICE_MAC_GENERIC) {
1875 		err = ice_ptp_clear_phy_offset_ready_e82x(hw);
1876 		if (err)
1877 			dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
1878 	}
1879 
1880 	if (!ice_ptp_lock(hw)) {
1881 		err = -EBUSY;
1882 		goto exit;
1883 	}
1884 
1885 	/* Disable periodic outputs */
1886 	ice_ptp_disable_all_perout(pf);
1887 
1888 	err = ice_ptp_write_init(pf, &ts64);
1889 	ice_ptp_unlock(hw);
1890 
1891 	if (!err)
1892 		ice_ptp_reset_cached_phctime(pf);
1893 
1894 	/* Reenable periodic outputs */
1895 	ice_ptp_enable_all_perout(pf);
1896 
1897 	/* Recalibrate and re-enable timestamp blocks for E822/E823 */
1898 	if (hw->mac_type == ICE_MAC_GENERIC)
1899 		ice_ptp_restart_all_phy(pf);
1900 exit:
1901 	if (err) {
1902 		dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
1903 		return err;
1904 	}
1905 
1906 	return 0;
1907 }
1908 
1909 /**
1910  * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
1911  * @info: the driver's PTP info structure
1912  * @delta: Offset in nanoseconds to adjust the time by
1913  */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)1914 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1915 {
1916 	struct timespec64 now, then;
1917 	int ret;
1918 
1919 	then = ns_to_timespec64(delta);
1920 	ret = ice_ptp_gettimex64(info, &now, NULL);
1921 	if (ret)
1922 		return ret;
1923 	now = timespec64_add(now, then);
1924 
1925 	return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1926 }
1927 
1928 /**
1929  * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
1930  * @info: the driver's PTP info structure
1931  * @delta: Offset in nanoseconds to adjust the time by
1932  */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)1933 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
1934 {
1935 	struct ice_pf *pf = ptp_info_to_pf(info);
1936 	struct ice_hw *hw = &pf->hw;
1937 	struct device *dev;
1938 	int err;
1939 
1940 	dev = ice_pf_to_dev(pf);
1941 
1942 	/* Hardware only supports atomic adjustments using signed 32-bit
1943 	 * integers. For any adjustment outside this range, perform
1944 	 * a non-atomic get->adjust->set flow.
1945 	 */
1946 	if (delta > S32_MAX || delta < S32_MIN) {
1947 		dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
1948 		return ice_ptp_adjtime_nonatomic(info, delta);
1949 	}
1950 
1951 	if (!ice_ptp_lock(hw)) {
1952 		dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
1953 		return -EBUSY;
1954 	}
1955 
1956 	/* Disable periodic outputs */
1957 	ice_ptp_disable_all_perout(pf);
1958 
1959 	err = ice_ptp_write_adj(pf, delta);
1960 
1961 	/* Reenable periodic outputs */
1962 	ice_ptp_enable_all_perout(pf);
1963 
1964 	ice_ptp_unlock(hw);
1965 
1966 	if (err) {
1967 		dev_err(dev, "PTP failed to adjust time, err %d\n", err);
1968 		return err;
1969 	}
1970 
1971 	ice_ptp_reset_cached_phctime(pf);
1972 
1973 	return 0;
1974 }
1975 
1976 /**
1977  * struct ice_crosststamp_cfg - Device cross timestamp configuration
1978  * @lock_reg: The hardware semaphore lock to use
1979  * @lock_busy: Bit in the semaphore lock indicating the lock is busy
1980  * @ctl_reg: The hardware register to request cross timestamp
1981  * @ctl_active: Bit in the control register to request cross timestamp
1982  * @art_time_l: Lower 32-bits of ART system time
1983  * @art_time_h: Upper 32-bits of ART system time
1984  * @dev_time_l: Lower 32-bits of device time (per timer index)
1985  * @dev_time_h: Upper 32-bits of device time (per timer index)
1986  */
1987 struct ice_crosststamp_cfg {
1988 	/* HW semaphore lock register */
1989 	u32 lock_reg;
1990 	u32 lock_busy;
1991 
1992 	/* Capture control register */
1993 	u32 ctl_reg;
1994 	u32 ctl_active;
1995 
1996 	/* Time storage */
1997 	u32 art_time_l;
1998 	u32 art_time_h;
1999 	u32 dev_time_l[2];
2000 	u32 dev_time_h[2];
2001 };
2002 
2003 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
2004 	.lock_reg = PFHH_SEM,
2005 	.lock_busy = PFHH_SEM_BUSY_M,
2006 	.ctl_reg = GLHH_ART_CTL,
2007 	.ctl_active = GLHH_ART_CTL_ACTIVE_M,
2008 	.art_time_l = GLHH_ART_TIME_L,
2009 	.art_time_h = GLHH_ART_TIME_H,
2010 	.dev_time_l[0] = GLTSYN_HHTIME_L(0),
2011 	.dev_time_h[0] = GLTSYN_HHTIME_H(0),
2012 	.dev_time_l[1] = GLTSYN_HHTIME_L(1),
2013 	.dev_time_h[1] = GLTSYN_HHTIME_H(1),
2014 };
2015 
2016 #ifdef CONFIG_ICE_HWTS
2017 static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
2018 	.lock_reg = E830_PFPTM_SEM,
2019 	.lock_busy = E830_PFPTM_SEM_BUSY_M,
2020 	.ctl_reg = E830_GLPTM_ART_CTL,
2021 	.ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
2022 	.art_time_l = E830_GLPTM_ART_TIME_L,
2023 	.art_time_h = E830_GLPTM_ART_TIME_H,
2024 	.dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
2025 	.dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
2026 	.dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
2027 	.dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
2028 };
2029 
2030 #endif /* CONFIG_ICE_HWTS */
2031 /**
2032  * struct ice_crosststamp_ctx - Device cross timestamp context
2033  * @snapshot: snapshot of system clocks for historic interpolation
2034  * @pf: pointer to the PF private structure
2035  * @cfg: pointer to hardware configuration for cross timestamp
2036  */
2037 struct ice_crosststamp_ctx {
2038 	struct system_time_snapshot snapshot;
2039 	struct ice_pf *pf;
2040 	const struct ice_crosststamp_cfg *cfg;
2041 };
2042 
2043 /**
2044  * ice_capture_crosststamp - Capture a device/system cross timestamp
2045  * @device: Current device time
2046  * @system: System counter value read synchronously with device time
2047  * @__ctx: Context passed from ice_ptp_getcrosststamp
2048  *
2049  * Read device and system (ART) clock simultaneously and return the corrected
2050  * clock values in ns.
2051  *
2052  * Return: zero on success, or a negative error code on failure.
2053  */
ice_capture_crosststamp(ktime_t * device,struct system_counterval_t * system,void * __ctx)2054 static int ice_capture_crosststamp(ktime_t *device,
2055 				   struct system_counterval_t *system,
2056 				   void *__ctx)
2057 {
2058 	struct ice_crosststamp_ctx *ctx = __ctx;
2059 	const struct ice_crosststamp_cfg *cfg;
2060 	u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
2061 	struct ice_pf *pf;
2062 	struct ice_hw *hw;
2063 	int err;
2064 	u64 ts;
2065 
2066 	cfg = ctx->cfg;
2067 	pf = ctx->pf;
2068 	hw = &pf->hw;
2069 
2070 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2071 	if (tmr_idx > 1)
2072 		return -EINVAL;
2073 
2074 	/* Poll until we obtain the cross-timestamp hardware semaphore */
2075 	err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
2076 				!(lock & cfg->lock_busy),
2077 				10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
2078 	if (err) {
2079 		dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
2080 		return -EBUSY;
2081 	}
2082 
2083 	/* Snapshot system time for historic interpolation */
2084 	ktime_get_snapshot(&ctx->snapshot);
2085 
2086 	/* Program cmd to master timer */
2087 	ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2088 
2089 	/* Start the ART and device clock sync sequence */
2090 	ctl = rd32(hw, cfg->ctl_reg);
2091 	ctl |= cfg->ctl_active;
2092 	wr32(hw, cfg->ctl_reg, ctl);
2093 
2094 	/* Poll until hardware completes the capture */
2095 	err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
2096 				5, 20 * USEC_PER_MSEC);
2097 	if (err)
2098 		goto err_timeout;
2099 
2100 	/* Read ART system time */
2101 	ts_lo = rd32(hw, cfg->art_time_l);
2102 	ts_hi = rd32(hw, cfg->art_time_h);
2103 	ts = ((u64)ts_hi << 32) | ts_lo;
2104 	system->cycles = ts;
2105 	system->cs_id = CSID_X86_ART;
2106 	system->use_nsecs = true;
2107 
2108 	/* Read Device source clock time */
2109 	ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
2110 	ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
2111 	ts = ((u64)ts_hi << 32) | ts_lo;
2112 	*device = ns_to_ktime(ts);
2113 
2114 err_timeout:
2115 	/* Clear the master timer */
2116 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2117 
2118 	/* Release HW lock */
2119 	lock = rd32(hw, cfg->lock_reg);
2120 	lock &= ~cfg->lock_busy;
2121 	wr32(hw, cfg->lock_reg, lock);
2122 
2123 	return err;
2124 }
2125 
2126 /**
2127  * ice_ptp_getcrosststamp - Capture a device cross timestamp
2128  * @info: the driver's PTP info structure
2129  * @cts: The memory to fill the cross timestamp info
2130  *
2131  * Capture a cross timestamp between the ART and the device PTP hardware
2132  * clock. Fill the cross timestamp information and report it back to the
2133  * caller.
2134  *
2135  * In order to correctly correlate the ART timestamp back to the TSC time, the
2136  * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2137  *
2138  * Return: zero on success, or a negative error code on failure.
2139  */
ice_ptp_getcrosststamp(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2140 static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
2141 				  struct system_device_crosststamp *cts)
2142 {
2143 	struct ice_pf *pf = ptp_info_to_pf(info);
2144 	struct ice_crosststamp_ctx ctx = {
2145 		.pf = pf,
2146 	};
2147 
2148 	switch (pf->hw.mac_type) {
2149 	case ICE_MAC_GENERIC:
2150 	case ICE_MAC_GENERIC_3K_E825:
2151 		ctx.cfg = &ice_crosststamp_cfg_e82x;
2152 		break;
2153 #ifdef CONFIG_ICE_HWTS
2154 	case ICE_MAC_E830:
2155 		ctx.cfg = &ice_crosststamp_cfg_e830;
2156 		break;
2157 #endif /* CONFIG_ICE_HWTS */
2158 	default:
2159 		return -EOPNOTSUPP;
2160 	}
2161 
2162 	return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
2163 					     &ctx.snapshot, cts);
2164 }
2165 
2166 /**
2167  * ice_ptp_hwtstamp_get - interface to read the timestamping config
2168  * @netdev: Pointer to network interface device structure
2169  * @config: Timestamping configuration structure
2170  *
2171  * Copy the timestamping config to user buffer
2172  */
ice_ptp_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)2173 int ice_ptp_hwtstamp_get(struct net_device *netdev,
2174 			 struct kernel_hwtstamp_config *config)
2175 {
2176 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2177 
2178 	if (pf->ptp.state != ICE_PTP_READY)
2179 		return -EIO;
2180 
2181 	*config = pf->ptp.tstamp_config;
2182 
2183 	return 0;
2184 }
2185 
2186 /**
2187  * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2188  * @pf: Board private structure
2189  * @config: hwtstamp settings requested or saved
2190  */
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct kernel_hwtstamp_config * config)2191 static int ice_ptp_set_timestamp_mode(struct ice_pf *pf,
2192 				      struct kernel_hwtstamp_config *config)
2193 {
2194 	switch (config->tx_type) {
2195 	case HWTSTAMP_TX_OFF:
2196 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2197 		break;
2198 	case HWTSTAMP_TX_ON:
2199 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2200 		break;
2201 	default:
2202 		return -ERANGE;
2203 	}
2204 
2205 	switch (config->rx_filter) {
2206 	case HWTSTAMP_FILTER_NONE:
2207 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2208 		break;
2209 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2210 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2211 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2212 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2213 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2214 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2215 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2216 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2217 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2218 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2219 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2220 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2221 	case HWTSTAMP_FILTER_NTP_ALL:
2222 	case HWTSTAMP_FILTER_ALL:
2223 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2224 		break;
2225 	default:
2226 		return -ERANGE;
2227 	}
2228 
2229 	/* Immediately update the device timestamping mode */
2230 	ice_ptp_restore_timestamp_mode(pf);
2231 
2232 	return 0;
2233 }
2234 
2235 /**
2236  * ice_ptp_hwtstamp_set - interface to control the timestamping
2237  * @netdev: Pointer to network interface device structure
2238  * @config: Timestamping configuration structure
2239  * @extack: Netlink extended ack structure for error reporting
2240  *
2241  * Get the user config and store it
2242  */
ice_ptp_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)2243 int ice_ptp_hwtstamp_set(struct net_device *netdev,
2244 			 struct kernel_hwtstamp_config *config,
2245 			 struct netlink_ext_ack *extack)
2246 {
2247 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2248 	int err;
2249 
2250 	if (pf->ptp.state != ICE_PTP_READY)
2251 		return -EAGAIN;
2252 
2253 	err = ice_ptp_set_timestamp_mode(pf, config);
2254 	if (err)
2255 		return err;
2256 
2257 	/* Return the actual configuration set */
2258 	*config = pf->ptp.tstamp_config;
2259 
2260 	return 0;
2261 }
2262 
2263 /**
2264  * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2265  * @rx_desc: Receive descriptor
2266  * @pkt_ctx: Packet context to get the cached time
2267  *
2268  * The driver receives a notification in the receive descriptor with timestamp.
2269  */
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc * rx_desc,const struct ice_pkt_ctx * pkt_ctx)2270 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2271 			const struct ice_pkt_ctx *pkt_ctx)
2272 {
2273 	u64 ts_ns, cached_time;
2274 	u32 ts_high;
2275 
2276 	if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2277 		return 0;
2278 
2279 	cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2280 
2281 	/* Do not report a timestamp if we don't have a cached PHC time */
2282 	if (!cached_time)
2283 		return 0;
2284 
2285 	/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2286 	 * PHC value, rather than accessing the PF. This also allows us to
2287 	 * simply pass the upper 32bits of nanoseconds directly. Calling
2288 	 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2289 	 * bits itself.
2290 	 */
2291 	ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2292 	ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2293 
2294 	return ts_ns;
2295 }
2296 
2297 /**
2298  * ice_ptp_setup_pin_cfg - setup PTP pin_config structure
2299  * @pf: Board private structure
2300  */
ice_ptp_setup_pin_cfg(struct ice_pf * pf)2301 static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
2302 {
2303 	for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
2304 		const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
2305 		struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
2306 		const char *name;
2307 
2308 		if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
2309 			name = ice_pin_names[desc->name_idx];
2310 		else
2311 			name = ice_pin_names_dpll[desc->name_idx];
2312 
2313 		strscpy(pin->name, name, sizeof(pin->name));
2314 
2315 		pin->index = i;
2316 	}
2317 
2318 	pf->ptp.info.pin_config = pf->ptp.pin_desc;
2319 }
2320 
2321 /**
2322  * ice_ptp_disable_pins - Disable PTP pins
2323  * @pf: pointer to the PF structure
2324  *
2325  * Disable the OS access to the pins. Called to clear out the OS
2326  * indications of pin support when we fail to setup pin array.
2327  */
ice_ptp_disable_pins(struct ice_pf * pf)2328 static void ice_ptp_disable_pins(struct ice_pf *pf)
2329 {
2330 	struct ptp_clock_info *info = &pf->ptp.info;
2331 
2332 	dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
2333 
2334 	info->enable = NULL;
2335 	info->verify = NULL;
2336 	info->n_pins = 0;
2337 	info->n_ext_ts = 0;
2338 	info->n_per_out = 0;
2339 }
2340 
2341 /**
2342  * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM
2343  * @pf: pointer to the PF structure
2344  * @entries: SDP connection section from NVM
2345  * @num_entries: number of valid entries in sdp_entries
2346  * @pins: PTP pins array to update
2347  *
2348  * Return: 0 on success, negative error code otherwise.
2349  */
ice_ptp_parse_sdp_entries(struct ice_pf * pf,__le16 * entries,unsigned int num_entries,struct ice_ptp_pin_desc * pins)2350 static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
2351 				     unsigned int num_entries,
2352 				     struct ice_ptp_pin_desc *pins)
2353 {
2354 	unsigned int n_pins = 0;
2355 	unsigned int i;
2356 
2357 	/* Setup ice_pin_desc array */
2358 	for (i = 0; i < ICE_N_PINS_MAX; i++) {
2359 		pins[i].name_idx = -1;
2360 		pins[i].gpio[0] = -1;
2361 		pins[i].gpio[1] = -1;
2362 	}
2363 
2364 	for (i = 0; i < num_entries; i++) {
2365 		u16 entry = le16_to_cpu(entries[i]);
2366 		DECLARE_BITMAP(bitmap, GPIO_NA);
2367 		unsigned int idx;
2368 		bool dir;
2369 		u16 gpio;
2370 
2371 		*bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
2372 
2373 		/* Check if entry's pin bitmap is valid. */
2374 		if (bitmap_empty(bitmap, GPIO_NA))
2375 			continue;
2376 
2377 		dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
2378 		gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
2379 
2380 		for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
2381 			if (pins[idx].name_idx == gpio)
2382 				break;
2383 		}
2384 
2385 		if (idx == ICE_N_PINS_MAX) {
2386 			/* Pin not found, setup its entry and name */
2387 			idx = n_pins++;
2388 			pins[idx].name_idx = gpio;
2389 		}
2390 		pins[idx].gpio[dir] = gpio;
2391 	}
2392 
2393 	for (i = 0; i < n_pins; i++) {
2394 		dev_dbg(ice_pf_to_dev(pf),
2395 			"NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
2396 			i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
2397 	}
2398 
2399 	pf->ptp.info.n_pins = n_pins;
2400 	return 0;
2401 }
2402 
2403 /**
2404  * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support
2405  * @pf: Board private structure
2406  *
2407  * Assign functions to the PTP capabilities structure for E82X devices.
2408  * Functions which operate across all device families should be set directly
2409  * in ice_ptp_set_caps. Only add functions here which are distinct for E82X
2410  * devices.
2411  */
ice_ptp_set_funcs_e82x(struct ice_pf * pf)2412 static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
2413 {
2414 	pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2415 
2416 	if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
2417 		pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
2418 		pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
2419 	} else {
2420 		pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
2421 		pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
2422 	}
2423 	ice_ptp_setup_pin_cfg(pf);
2424 }
2425 
2426 /**
2427  * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2428  * @pf: Board private structure
2429  *
2430  * Assign functions to the PTP capabiltiies structure for E810 devices.
2431  * Functions which operate across all device families should be set directly
2432  * in ice_ptp_set_caps. Only add functions here which are distinct for E810
2433  * devices.
2434  */
ice_ptp_set_funcs_e810(struct ice_pf * pf)2435 static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
2436 {
2437 	__le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
2438 	struct ice_ptp_pin_desc *desc = NULL;
2439 	struct ice_ptp *ptp = &pf->ptp;
2440 	unsigned int num_entries;
2441 	int err;
2442 
2443 	err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
2444 	if (err) {
2445 		/* SDP section does not exist in NVM or is corrupted */
2446 		if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2447 			ptp->ice_pin_desc = ice_pin_desc_dpll;
2448 			ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
2449 		} else {
2450 			pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2451 			pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
2452 		}
2453 		err = 0;
2454 	} else {
2455 		desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
2456 				    sizeof(struct ice_ptp_pin_desc),
2457 				    GFP_KERNEL);
2458 		if (!desc)
2459 			goto err;
2460 
2461 		err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
2462 		if (err)
2463 			goto err;
2464 
2465 		ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
2466 	}
2467 
2468 	ptp->info.pin_config = ptp->pin_desc;
2469 	ice_ptp_setup_pin_cfg(pf);
2470 
2471 err:
2472 	if (err) {
2473 		devm_kfree(ice_pf_to_dev(pf), desc);
2474 		ice_ptp_disable_pins(pf);
2475 	}
2476 }
2477 
2478 /**
2479  * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
2480  * @pf: Board private structure
2481  *
2482  * Assign functions to the PTP capabiltiies structure for E830 devices.
2483  * Functions which operate across all device families should be set directly
2484  * in ice_ptp_set_caps. Only add functions here which are distinct for E830
2485  * devices.
2486  */
ice_ptp_set_funcs_e830(struct ice_pf * pf)2487 static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
2488 {
2489 #ifdef CONFIG_ICE_HWTS
2490 	if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
2491 		pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
2492 
2493 #endif /* CONFIG_ICE_HWTS */
2494 	/* Rest of the config is the same as base E810 */
2495 	pf->ptp.ice_pin_desc = ice_pin_desc_e810;
2496 	pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
2497 	ice_ptp_setup_pin_cfg(pf);
2498 }
2499 
2500 /**
2501  * ice_ptp_set_caps - Set PTP capabilities
2502  * @pf: Board private structure
2503  */
ice_ptp_set_caps(struct ice_pf * pf)2504 static void ice_ptp_set_caps(struct ice_pf *pf)
2505 {
2506 	struct ptp_clock_info *info = &pf->ptp.info;
2507 	struct device *dev = ice_pf_to_dev(pf);
2508 
2509 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2510 		 dev_driver_string(dev), dev_name(dev));
2511 	info->owner = THIS_MODULE;
2512 	info->max_adj = 100000000;
2513 	info->adjtime = ice_ptp_adjtime;
2514 	info->adjfine = ice_ptp_adjfine;
2515 	info->gettimex64 = ice_ptp_gettimex64;
2516 	info->settime64 = ice_ptp_settime64;
2517 	info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
2518 	info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
2519 	info->enable = ice_ptp_gpio_enable;
2520 	info->verify = ice_verify_pin;
2521 
2522 	info->supported_extts_flags = PTP_RISING_EDGE |
2523 				      PTP_FALLING_EDGE |
2524 				      PTP_STRICT_FLAGS;
2525 	info->supported_perout_flags = PTP_PEROUT_PHASE;
2526 
2527 	switch (pf->hw.mac_type) {
2528 	case ICE_MAC_E810:
2529 		ice_ptp_set_funcs_e810(pf);
2530 		return;
2531 	case ICE_MAC_E830:
2532 		ice_ptp_set_funcs_e830(pf);
2533 		return;
2534 	case ICE_MAC_GENERIC:
2535 	case ICE_MAC_GENERIC_3K_E825:
2536 		ice_ptp_set_funcs_e82x(pf);
2537 		return;
2538 	default:
2539 		return;
2540 	}
2541 }
2542 
2543 /**
2544  * ice_ptp_create_clock - Create PTP clock device for userspace
2545  * @pf: Board private structure
2546  *
2547  * This function creates a new PTP clock device. It only creates one if we
2548  * don't already have one. Will return error if it can't create one, but success
2549  * if we already have a device. Should be used by ice_ptp_init to create clock
2550  * initially, and prevent global resets from creating new clock devices.
2551  */
ice_ptp_create_clock(struct ice_pf * pf)2552 static long ice_ptp_create_clock(struct ice_pf *pf)
2553 {
2554 	struct ptp_clock_info *info;
2555 	struct device *dev;
2556 
2557 	/* No need to create a clock device if we already have one */
2558 	if (pf->ptp.clock)
2559 		return 0;
2560 
2561 	ice_ptp_set_caps(pf);
2562 
2563 	info = &pf->ptp.info;
2564 	dev = ice_pf_to_dev(pf);
2565 
2566 	/* Attempt to register the clock before enabling the hardware. */
2567 	pf->ptp.clock = ptp_clock_register(info, dev);
2568 	if (IS_ERR(pf->ptp.clock)) {
2569 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2570 		return PTR_ERR(pf->ptp.clock);
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 /**
2577  * ice_ptp_request_ts - Request an available Tx timestamp index
2578  * @tx: the PTP Tx timestamp tracker to request from
2579  * @skb: the SKB to associate with this timestamp request
2580  */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2581 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2582 {
2583 	unsigned long flags;
2584 	u8 idx;
2585 
2586 	spin_lock_irqsave(&tx->lock, flags);
2587 
2588 	/* Check that this tracker is accepting new timestamp requests */
2589 	if (!ice_ptp_is_tx_tracker_up(tx)) {
2590 		spin_unlock_irqrestore(&tx->lock, flags);
2591 		return -1;
2592 	}
2593 
2594 	/* Find and set the first available index */
2595 	idx = find_next_zero_bit(tx->in_use, tx->len,
2596 				 tx->last_ll_ts_idx_read + 1);
2597 	if (idx == tx->len)
2598 		idx = find_first_zero_bit(tx->in_use, tx->len);
2599 
2600 	if (idx < tx->len) {
2601 		/* We got a valid index that no other thread could have set. Store
2602 		 * a reference to the skb and the start time to allow discarding old
2603 		 * requests.
2604 		 */
2605 		set_bit(idx, tx->in_use);
2606 		clear_bit(idx, tx->stale);
2607 		tx->tstamps[idx].start = jiffies;
2608 		tx->tstamps[idx].skb = skb_get(skb);
2609 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2610 		ice_trace(tx_tstamp_request, skb, idx);
2611 	}
2612 
2613 	spin_unlock_irqrestore(&tx->lock, flags);
2614 
2615 	/* return the appropriate PHY timestamp register index, -1 if no
2616 	 * indexes were available.
2617 	 */
2618 	if (idx >= tx->len)
2619 		return -1;
2620 	else
2621 		return idx + tx->offset;
2622 }
2623 
ice_ptp_process_ts(struct ice_pf * pf)2624 void ice_ptp_process_ts(struct ice_pf *pf)
2625 {
2626 	switch (pf->ptp.tx_interrupt_mode) {
2627 	case ICE_PTP_TX_INTERRUPT_NONE:
2628 		/* This device has the clock owner handle timestamps for it */
2629 		return;
2630 	case ICE_PTP_TX_INTERRUPT_SELF:
2631 		/* This device handles its own timestamps */
2632 		ice_ptp_process_tx_tstamp(&pf->ptp.port.tx);
2633 		return;
2634 	case ICE_PTP_TX_INTERRUPT_ALL:
2635 		/* This device handles timestamps for all ports */
2636 		ice_ptp_tx_tstamp_owner(pf);
2637 		return;
2638 	default:
2639 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2640 			  pf->ptp.tx_interrupt_mode);
2641 		return;
2642 	}
2643 }
2644 
ice_port_has_timestamps(struct ice_ptp_tx * tx)2645 static bool ice_port_has_timestamps(struct ice_ptp_tx *tx)
2646 {
2647 	bool more_timestamps;
2648 
2649 	scoped_guard(spinlock_irqsave, &tx->lock) {
2650 		if (!tx->init)
2651 			return false;
2652 
2653 		more_timestamps = !bitmap_empty(tx->in_use, tx->len);
2654 	}
2655 
2656 	return more_timestamps;
2657 }
2658 
ice_any_port_has_timestamps(struct ice_pf * pf)2659 static bool ice_any_port_has_timestamps(struct ice_pf *pf)
2660 {
2661 	struct ice_ptp_port *port;
2662 
2663 	scoped_guard(mutex, &pf->adapter->ports.lock) {
2664 		list_for_each_entry(port, &pf->adapter->ports.ports,
2665 				    list_node) {
2666 			struct ice_ptp_tx *tx = &port->tx;
2667 
2668 			if (ice_port_has_timestamps(tx))
2669 				return true;
2670 		}
2671 	}
2672 
2673 	return false;
2674 }
2675 
ice_ptp_tx_tstamps_pending(struct ice_pf * pf)2676 bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf)
2677 {
2678 	struct ice_hw *hw = &pf->hw;
2679 	unsigned int i;
2680 
2681 	/* Check software indicator */
2682 	switch (pf->ptp.tx_interrupt_mode) {
2683 	case ICE_PTP_TX_INTERRUPT_NONE:
2684 		return false;
2685 	case ICE_PTP_TX_INTERRUPT_SELF:
2686 		if (ice_port_has_timestamps(&pf->ptp.port.tx))
2687 			return true;
2688 		break;
2689 	case ICE_PTP_TX_INTERRUPT_ALL:
2690 		if (ice_any_port_has_timestamps(pf))
2691 			return true;
2692 		break;
2693 	default:
2694 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2695 			  pf->ptp.tx_interrupt_mode);
2696 		break;
2697 	}
2698 
2699 	/* Check hardware indicator */
2700 	for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2701 		u64 tstamp_ready = 0;
2702 		int err;
2703 
2704 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2705 		if (err || tstamp_ready)
2706 			return true;
2707 	}
2708 
2709 	return false;
2710 }
2711 
2712 /**
2713  * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
2714  * @pf: Board private structure
2715  *
2716  * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
2717  *         half of the interrupt and IRQ_HANDLED otherwise.
2718  */
ice_ptp_ts_irq(struct ice_pf * pf)2719 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
2720 {
2721 	struct ice_hw *hw = &pf->hw;
2722 
2723 	switch (hw->mac_type) {
2724 	case ICE_MAC_E810:
2725 		/* E810 capable of low latency timestamping with interrupt can
2726 		 * request a single timestamp in the top half and wait for
2727 		 * a second LL TS interrupt from the FW when it's ready.
2728 		 */
2729 		if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
2730 			struct ice_ptp_tx *tx = &pf->ptp.port.tx;
2731 			u8 idx, last;
2732 
2733 			if (!ice_pf_state_is_nominal(pf))
2734 				return IRQ_HANDLED;
2735 
2736 			spin_lock(&tx->lock);
2737 			if (tx->init) {
2738 				last = tx->last_ll_ts_idx_read + 1;
2739 				idx = find_next_bit_wrap(tx->in_use, tx->len,
2740 							 last);
2741 				if (idx != tx->len)
2742 					ice_ptp_req_tx_single_tstamp(tx, idx);
2743 			}
2744 			spin_unlock(&tx->lock);
2745 
2746 			return IRQ_HANDLED;
2747 		}
2748 		fallthrough; /* non-LL_TS E810 */
2749 	case ICE_MAC_GENERIC:
2750 	case ICE_MAC_GENERIC_3K_E825:
2751 		/* All other devices process timestamps in the bottom half due
2752 		 * to sleeping or polling.
2753 		 */
2754 		if (!ice_ptp_pf_handles_tx_interrupt(pf))
2755 			return IRQ_HANDLED;
2756 
2757 		set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
2758 		return IRQ_WAKE_THREAD;
2759 	case ICE_MAC_E830:
2760 		/* E830 can read timestamps in the top half using rd32() */
2761 		ice_ptp_process_ts(pf);
2762 
2763 		if (ice_ptp_tx_tstamps_pending(pf)) {
2764 			/* Process outstanding Tx timestamps. If there
2765 			 * is more work, re-arm the interrupt to trigger again.
2766 			 */
2767 			wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2768 			ice_flush(hw);
2769 		}
2770 		return IRQ_HANDLED;
2771 	default:
2772 		return IRQ_HANDLED;
2773 	}
2774 }
2775 
2776 /**
2777  * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2778  * @pf: Board private structure
2779  *
2780  * The device PHY issues Tx timestamp interrupts to the driver for processing
2781  * timestamp data from the PHY. It will not interrupt again until all
2782  * current timestamp data is read. In rare circumstances, it is possible that
2783  * the driver fails to read all outstanding data.
2784  *
2785  * To avoid getting permanently stuck, periodically check if the PHY has
2786  * outstanding timestamp data. If so, trigger an interrupt from software to
2787  * process this data.
2788  */
ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf * pf)2789 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2790 {
2791 	struct device *dev = ice_pf_to_dev(pf);
2792 	struct ice_hw *hw = &pf->hw;
2793 	bool trigger_oicr = false;
2794 	unsigned int i;
2795 
2796 	if (!pf->ptp.port.tx.has_ready_bitmap)
2797 		return;
2798 
2799 	if (!ice_pf_src_tmr_owned(pf))
2800 		return;
2801 
2802 	for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2803 		u64 tstamp_ready;
2804 		int err;
2805 
2806 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2807 		if (!err && tstamp_ready) {
2808 			trigger_oicr = true;
2809 			break;
2810 		}
2811 	}
2812 
2813 	if (trigger_oicr) {
2814 		/* Trigger a software interrupt, to ensure this data
2815 		 * gets processed.
2816 		 */
2817 		dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2818 
2819 		wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2820 		ice_flush(hw);
2821 	}
2822 }
2823 
ice_ptp_periodic_work(struct kthread_work * work)2824 static void ice_ptp_periodic_work(struct kthread_work *work)
2825 {
2826 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2827 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2828 	int err;
2829 
2830 	if (pf->ptp.state != ICE_PTP_READY)
2831 		return;
2832 
2833 	err = ice_ptp_update_cached_phctime(pf);
2834 
2835 	ice_ptp_maybe_trigger_tx_interrupt(pf);
2836 
2837 	/* Run twice a second or reschedule if phc update failed */
2838 	kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2839 				   msecs_to_jiffies(err ? 10 : 500));
2840 }
2841 
2842 /**
2843  * ice_ptp_queue_work - Queue PTP periodic work for a PF
2844  * @pf: Board private structure
2845  *
2846  * Helper function to queue PTP periodic work after VSI rebuild completes.
2847  * This ensures that PTP work only runs when VSI structures are ready.
2848  */
ice_ptp_queue_work(struct ice_pf * pf)2849 void ice_ptp_queue_work(struct ice_pf *pf)
2850 {
2851 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) &&
2852 	    pf->ptp.state == ICE_PTP_READY)
2853 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
2854 }
2855 
2856 /**
2857  * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
2858  * @pf: Board private structure
2859  * @rebuild: rebuild if true, prepare if false
2860  * @reset_type: the reset type being performed
2861  */
ice_ptp_prepare_rebuild_sec(struct ice_pf * pf,bool rebuild,enum ice_reset_req reset_type)2862 static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
2863 					enum ice_reset_req reset_type)
2864 {
2865 	struct list_head *entry;
2866 
2867 	list_for_each(entry, &pf->adapter->ports.ports) {
2868 		struct ice_ptp_port *port = list_entry(entry,
2869 						       struct ice_ptp_port,
2870 						       list_node);
2871 		struct ice_pf *peer_pf = ptp_port_to_pf(port);
2872 
2873 		if (!ice_is_primary(&peer_pf->hw)) {
2874 			if (rebuild) {
2875 				/* TODO: When implementing rebuild=true:
2876 				 * 1. Ensure secondary PFs' VSIs are rebuilt
2877 				 * 2. Call ice_ptp_queue_work(peer_pf) after VSI rebuild
2878 				 */
2879 				ice_ptp_rebuild(peer_pf, reset_type);
2880 			} else {
2881 				ice_ptp_prepare_for_reset(peer_pf, reset_type);
2882 			}
2883 		}
2884 	}
2885 }
2886 
2887 /**
2888  * ice_ptp_prepare_for_reset - Prepare PTP for reset
2889  * @pf: Board private structure
2890  * @reset_type: the reset type being performed
2891  */
ice_ptp_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)2892 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2893 {
2894 	struct ice_ptp *ptp = &pf->ptp;
2895 	struct ice_hw *hw = &pf->hw;
2896 	u8 src_tmr;
2897 
2898 	if (ptp->state != ICE_PTP_READY)
2899 		return;
2900 
2901 	ptp->state = ICE_PTP_RESETTING;
2902 
2903 	/* Disable timestamping for both Tx and Rx */
2904 	ice_ptp_disable_timestamp_mode(pf);
2905 
2906 	kthread_cancel_delayed_work_sync(&ptp->work);
2907 
2908 	if (reset_type == ICE_RESET_PFR)
2909 		return;
2910 
2911 	if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
2912 		ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
2913 
2914 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2915 
2916 	/* Disable periodic outputs */
2917 	ice_ptp_disable_all_perout(pf);
2918 
2919 	src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2920 
2921 	/* Disable source clock */
2922 	wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2923 
2924 	/* Acquire PHC and system timer to restore after reset */
2925 	ptp->reset_time = ktime_get_real_ns();
2926 }
2927 
2928 /**
2929  * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2930  * @pf: Board private structure
2931  *
2932  * Companion function for ice_ptp_rebuild() which handles tasks that only the
2933  * PTP clock owner instance should perform.
2934  */
ice_ptp_rebuild_owner(struct ice_pf * pf)2935 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2936 {
2937 	struct ice_ptp *ptp = &pf->ptp;
2938 	struct ice_hw *hw = &pf->hw;
2939 	struct timespec64 ts;
2940 	u64 time_diff;
2941 	int err;
2942 
2943 	err = ice_ptp_init_phc(hw);
2944 	if (err)
2945 		return err;
2946 
2947 	err = ice_tspll_init(hw);
2948 	if (err)
2949 		return err;
2950 
2951 	/* Acquire the global hardware lock */
2952 	if (!ice_ptp_lock(hw)) {
2953 		err = -EBUSY;
2954 		return err;
2955 	}
2956 
2957 	/* Write the increment time value to PHY and LAN */
2958 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2959 	if (err)
2960 		goto err_unlock;
2961 
2962 	/* Write the initial Time value to PHY and LAN using the cached PHC
2963 	 * time before the reset and time difference between stopping and
2964 	 * starting the clock.
2965 	 */
2966 	if (ptp->cached_phc_time) {
2967 		time_diff = ktime_get_real_ns() - ptp->reset_time;
2968 		ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2969 	} else {
2970 		ts = ktime_to_timespec64(ktime_get_real());
2971 	}
2972 	err = ice_ptp_write_init(pf, &ts);
2973 	if (err)
2974 		goto err_unlock;
2975 
2976 	/* Release the global hardware lock */
2977 	ice_ptp_unlock(hw);
2978 
2979 	/* Flush software tracking of any outstanding timestamps since we're
2980 	 * about to flush the PHY timestamp block.
2981 	 */
2982 	ice_ptp_flush_all_tx_tracker(pf);
2983 
2984 	/* Enable quad interrupts */
2985 	err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
2986 	if (err)
2987 		return err;
2988 
2989 	ice_ptp_restart_all_phy(pf);
2990 
2991 	/* Re-enable all periodic outputs and external timestamp events */
2992 	ice_ptp_enable_all_perout(pf);
2993 	ice_ptp_enable_all_extts(pf);
2994 
2995 	return 0;
2996 
2997 err_unlock:
2998 	ice_ptp_unlock(hw);
2999 	return err;
3000 }
3001 
3002 /**
3003  * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
3004  * @pf: Board private structure
3005  * @reset_type: the reset type being performed
3006  */
ice_ptp_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)3007 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
3008 {
3009 	struct ice_ptp *ptp = &pf->ptp;
3010 	int err;
3011 
3012 	if (ptp->state == ICE_PTP_READY) {
3013 		ice_ptp_prepare_for_reset(pf, reset_type);
3014 	} else if (ptp->state != ICE_PTP_RESETTING) {
3015 		err = -EINVAL;
3016 		dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
3017 		goto err;
3018 	}
3019 
3020 	if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
3021 		err = ice_ptp_rebuild_owner(pf);
3022 		if (err)
3023 			goto err;
3024 	}
3025 
3026 	ptp->state = ICE_PTP_READY;
3027 
3028 	dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
3029 	return;
3030 
3031 err:
3032 	ptp->state = ICE_PTP_ERROR;
3033 	dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
3034 }
3035 
ice_ptp_setup_adapter(struct ice_pf * pf)3036 static int ice_ptp_setup_adapter(struct ice_pf *pf)
3037 {
3038 	if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
3039 		return -EPERM;
3040 
3041 	pf->adapter->ctrl_pf = pf;
3042 
3043 	return 0;
3044 }
3045 
ice_ptp_setup_pf(struct ice_pf * pf)3046 static int ice_ptp_setup_pf(struct ice_pf *pf)
3047 {
3048 	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3049 	struct ice_ptp *ptp = &pf->ptp;
3050 
3051 	if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
3052 		return -ENODEV;
3053 
3054 	INIT_LIST_HEAD(&ptp->port.list_node);
3055 	mutex_lock(&pf->adapter->ports.lock);
3056 
3057 	list_add(&ptp->port.list_node,
3058 		 &pf->adapter->ports.ports);
3059 	mutex_unlock(&pf->adapter->ports.lock);
3060 
3061 	return 0;
3062 }
3063 
ice_ptp_cleanup_pf(struct ice_pf * pf)3064 static void ice_ptp_cleanup_pf(struct ice_pf *pf)
3065 {
3066 	struct ice_ptp *ptp = &pf->ptp;
3067 
3068 	if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
3069 		mutex_lock(&pf->adapter->ports.lock);
3070 		list_del(&ptp->port.list_node);
3071 		mutex_unlock(&pf->adapter->ports.lock);
3072 	}
3073 }
3074 
3075 /**
3076  * ice_ptp_clock_index - Get the PTP clock index for this device
3077  * @pf: Board private structure
3078  *
3079  * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3080  * is associated.
3081  */
ice_ptp_clock_index(struct ice_pf * pf)3082 int ice_ptp_clock_index(struct ice_pf *pf)
3083 {
3084 	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3085 	struct ptp_clock *clock;
3086 
3087 	if (!ctrl_ptp)
3088 		return -1;
3089 	clock = ctrl_ptp->clock;
3090 
3091 	return clock ? ptp_clock_index(clock) : -1;
3092 }
3093 
3094 /**
3095  * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3096  * @pf: Board private structure
3097  *
3098  * Setup and initialize a PTP clock device that represents the device hardware
3099  * clock. Save the clock index for other functions connected to the same
3100  * hardware resource.
3101  */
ice_ptp_init_owner(struct ice_pf * pf)3102 static int ice_ptp_init_owner(struct ice_pf *pf)
3103 {
3104 	struct ice_hw *hw = &pf->hw;
3105 	struct timespec64 ts;
3106 	int err;
3107 
3108 	err = ice_ptp_init_phc(hw);
3109 	if (err) {
3110 		dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3111 			err);
3112 		return err;
3113 	}
3114 
3115 	err = ice_tspll_init(hw);
3116 	if (err) {
3117 		dev_err(ice_pf_to_dev(pf), "Failed to initialize CGU, status %d\n",
3118 			err);
3119 		return err;
3120 	}
3121 
3122 	/* Acquire the global hardware lock */
3123 	if (!ice_ptp_lock(hw)) {
3124 		err = -EBUSY;
3125 		goto err_exit;
3126 	}
3127 
3128 	/* Write the increment time value to PHY and LAN */
3129 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3130 	if (err)
3131 		goto err_unlock;
3132 
3133 	ts = ktime_to_timespec64(ktime_get_real());
3134 	/* Write the initial Time value to PHY and LAN */
3135 	err = ice_ptp_write_init(pf, &ts);
3136 	if (err)
3137 		goto err_unlock;
3138 
3139 	/* Release the global hardware lock */
3140 	ice_ptp_unlock(hw);
3141 
3142 	/* Configure PHY interrupt settings */
3143 	err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3144 	if (err)
3145 		goto err_exit;
3146 
3147 	/* Ensure we have a clock device */
3148 	err = ice_ptp_create_clock(pf);
3149 	if (err)
3150 		goto err_clk;
3151 
3152 	return 0;
3153 err_clk:
3154 	pf->ptp.clock = NULL;
3155 err_exit:
3156 	return err;
3157 
3158 err_unlock:
3159 	ice_ptp_unlock(hw);
3160 	return err;
3161 }
3162 
3163 /**
3164  * ice_ptp_init_work - Initialize PTP work threads
3165  * @pf: Board private structure
3166  * @ptp: PF PTP structure
3167  */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)3168 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3169 {
3170 	struct kthread_worker *kworker;
3171 
3172 	/* Initialize work functions */
3173 	kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3174 
3175 	/* Allocate a kworker for handling work required for the ports
3176 	 * connected to the PTP hardware clock.
3177 	 */
3178 	kworker = kthread_run_worker(0, "ice-ptp-%s",
3179 					dev_name(ice_pf_to_dev(pf)));
3180 	if (IS_ERR(kworker))
3181 		return PTR_ERR(kworker);
3182 
3183 	ptp->kworker = kworker;
3184 
3185 	/* Start periodic work going */
3186 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3187 
3188 	return 0;
3189 }
3190 
3191 /**
3192  * ice_ptp_init_port - Initialize PTP port structure
3193  * @pf: Board private structure
3194  * @ptp_port: PTP port structure
3195  *
3196  * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
3197  */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)3198 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3199 {
3200 	struct ice_hw *hw = &pf->hw;
3201 
3202 	mutex_init(&ptp_port->ps_lock);
3203 
3204 	switch (hw->mac_type) {
3205 	case ICE_MAC_E810:
3206 	case ICE_MAC_E830:
3207 	case ICE_MAC_GENERIC_3K_E825:
3208 		return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
3209 	case ICE_MAC_GENERIC:
3210 		kthread_init_delayed_work(&ptp_port->ov_work,
3211 					  ice_ptp_wait_for_offsets);
3212 		return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3213 					    ptp_port->port_num);
3214 	default:
3215 		return -ENODEV;
3216 	}
3217 }
3218 
3219 /**
3220  * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3221  * @pf: Board private structure
3222  *
3223  * Initialize the Tx timestamp interrupt mode for this device. For most device
3224  * types, each PF processes the interrupt and manages its own timestamps. For
3225  * E822-based devices, only the clock owner processes the timestamps. Other
3226  * PFs disable the interrupt and do not process their own timestamps.
3227  */
ice_ptp_init_tx_interrupt_mode(struct ice_pf * pf)3228 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3229 {
3230 	switch (pf->hw.mac_type) {
3231 	case ICE_MAC_GENERIC:
3232 	case ICE_MAC_GENERIC_3K_E825:
3233 		/* E82x hardware has the clock owner process timestamps for
3234 		 * all ports.
3235 		 */
3236 		if (ice_pf_src_tmr_owned(pf))
3237 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3238 		else
3239 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3240 		break;
3241 	default:
3242 		/* other PHY types handle their own Tx interrupt */
3243 		pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3244 	}
3245 }
3246 
3247 /**
3248  * ice_ptp_init - Initialize PTP hardware clock support
3249  * @pf: Board private structure
3250  *
3251  * Set up the device for interacting with the PTP hardware clock for all
3252  * functions, both the function that owns the clock hardware, and the
3253  * functions connected to the clock hardware.
3254  *
3255  * The clock owner will allocate and register a ptp_clock with the
3256  * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3257  * items used for asynchronous work such as Tx timestamps and periodic work.
3258  */
ice_ptp_init(struct ice_pf * pf)3259 void ice_ptp_init(struct ice_pf *pf)
3260 {
3261 	struct ice_ptp *ptp = &pf->ptp;
3262 	struct ice_hw *hw = &pf->hw;
3263 	int err;
3264 
3265 	ptp->state = ICE_PTP_INITIALIZING;
3266 
3267 	if (hw->lane_num < 0) {
3268 		err = hw->lane_num;
3269 		goto err_exit;
3270 	}
3271 	ptp->port.port_num = hw->lane_num;
3272 
3273 	ice_ptp_init_hw(hw);
3274 
3275 	ice_ptp_init_tx_interrupt_mode(pf);
3276 
3277 	/* If this function owns the clock hardware, it must allocate and
3278 	 * configure the PTP clock device to represent it.
3279 	 */
3280 	if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
3281 		err = ice_ptp_setup_adapter(pf);
3282 		if (err)
3283 			goto err_exit;
3284 		err = ice_ptp_init_owner(pf);
3285 		if (err)
3286 			goto err_exit;
3287 	}
3288 
3289 	err = ice_ptp_setup_pf(pf);
3290 	if (err)
3291 		goto err_exit;
3292 
3293 	err = ice_ptp_init_port(pf, &ptp->port);
3294 	if (err)
3295 		goto err_clean_pf;
3296 
3297 	/* Start the PHY timestamping block */
3298 	ice_ptp_reset_phy_timestamping(pf);
3299 
3300 	/* Configure initial Tx interrupt settings */
3301 	ice_ptp_cfg_tx_interrupt(pf);
3302 
3303 	ptp->state = ICE_PTP_READY;
3304 
3305 	err = ice_ptp_init_work(pf, ptp);
3306 	if (err)
3307 		goto err_exit;
3308 
3309 	dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3310 	return;
3311 
3312 err_clean_pf:
3313 	mutex_destroy(&ptp->port.ps_lock);
3314 	ice_ptp_cleanup_pf(pf);
3315 err_exit:
3316 	/* If we registered a PTP clock, release it */
3317 	if (pf->ptp.clock) {
3318 		ptp_clock_unregister(ptp->clock);
3319 		pf->ptp.clock = NULL;
3320 	}
3321 	/* Keep ICE_PTP_UNINIT state to avoid ambiguity at driver unload
3322 	 * and to avoid duplicated resources release.
3323 	 */
3324 	ptp->state = ICE_PTP_UNINIT;
3325 	dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3326 }
3327 
3328 /**
3329  * ice_ptp_release - Disable the driver/HW support and unregister the clock
3330  * @pf: Board private structure
3331  *
3332  * This function handles the cleanup work required from the initialization by
3333  * clearing out the important information and unregistering the clock
3334  */
ice_ptp_release(struct ice_pf * pf)3335 void ice_ptp_release(struct ice_pf *pf)
3336 {
3337 	if (pf->ptp.state == ICE_PTP_UNINIT)
3338 		return;
3339 
3340 	if (pf->ptp.state != ICE_PTP_READY) {
3341 		mutex_destroy(&pf->ptp.port.ps_lock);
3342 		ice_ptp_cleanup_pf(pf);
3343 		if (pf->ptp.clock) {
3344 			ptp_clock_unregister(pf->ptp.clock);
3345 			pf->ptp.clock = NULL;
3346 		}
3347 		return;
3348 	}
3349 
3350 	pf->ptp.state = ICE_PTP_UNINIT;
3351 
3352 	/* Disable timestamping for both Tx and Rx */
3353 	ice_ptp_disable_timestamp_mode(pf);
3354 
3355 	ice_ptp_cleanup_pf(pf);
3356 
3357 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3358 
3359 	ice_ptp_disable_all_extts(pf);
3360 
3361 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
3362 
3363 	ice_ptp_port_phy_stop(&pf->ptp.port);
3364 	mutex_destroy(&pf->ptp.port.ps_lock);
3365 	if (pf->ptp.kworker) {
3366 		kthread_destroy_worker(pf->ptp.kworker);
3367 		pf->ptp.kworker = NULL;
3368 	}
3369 
3370 	if (!pf->ptp.clock)
3371 		return;
3372 
3373 	/* Disable periodic outputs */
3374 	ice_ptp_disable_all_perout(pf);
3375 
3376 	ptp_clock_unregister(pf->ptp.clock);
3377 	pf->ptp.clock = NULL;
3378 
3379 	dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3380 }
3381