xref: /linux/drivers/net/ethernet/intel/ice/ice_ptp_hw.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_ptp_hw.h"
6 #include "ice_ptp_consts.h"
7 #include "ice_cgu_regs.h"
8 
9 /* Low level functions for interacting with and managing the device clock used
10  * for the Precision Time Protocol.
11  *
12  * The ice hardware represents the current time using three registers:
13  *
14  *    GLTSYN_TIME_H     GLTSYN_TIME_L     GLTSYN_TIME_R
15  *  +---------------+ +---------------+ +---------------+
16  *  |    32 bits    | |    32 bits    | |    32 bits    |
17  *  +---------------+ +---------------+ +---------------+
18  *
19  * The registers are incremented every clock tick using a 40bit increment
20  * value defined over two registers:
21  *
22  *                     GLTSYN_INCVAL_H   GLTSYN_INCVAL_L
23  *                    +---------------+ +---------------+
24  *                    |    8 bit s    | |    32 bits    |
25  *                    +---------------+ +---------------+
26  *
27  * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
28  * registers every clock source tick. Depending on the specific device
29  * configuration, the clock source frequency could be one of a number of
30  * values.
31  *
32  * For E810 devices, the increment frequency is 812.5 MHz
33  *
34  * For E822 devices the clock can be derived from different sources, and the
35  * increment has an effective frequency of one of the following:
36  * - 823.4375 MHz
37  * - 783.36 MHz
38  * - 796.875 MHz
39  * - 816 MHz
40  * - 830.078125 MHz
41  * - 783.36 MHz
42  *
43  * The hardware captures timestamps in the PHY for incoming packets, and for
44  * outgoing packets on request. To support this, the PHY maintains a timer
45  * that matches the lower 64 bits of the global source timer.
46  *
47  * In order to ensure that the PHY timers and the source timer are equivalent,
48  * shadow registers are used to prepare the desired initial values. A special
49  * sync command is issued to trigger copying from the shadow registers into
50  * the appropriate source and PHY registers simultaneously.
51  *
52  * The driver supports devices which have different PHYs with subtly different
53  * mechanisms to program and control the timers. We divide the devices into
54  * families named after the first major device, E810 and similar devices, and
55  * E822 and similar devices.
56  *
57  * - E822 based devices have additional support for fine grained Vernier
58  *   calibration which requires significant setup
59  * - The layout of timestamp data in the PHY register blocks is different
60  * - The way timer synchronization commands are issued is different.
61  *
62  * To support this, very low level functions have an e810 or e822 suffix
63  * indicating what type of device they work on. Higher level abstractions for
64  * tasks that can be done on both devices do not have the suffix and will
65  * correctly look up the appropriate low level function when running.
66  *
67  * Functions which only make sense on a single device family may not have
68  * a suitable generic implementation
69  */
70 
71 /**
72  * ice_get_ptp_src_clock_index - determine source clock index
73  * @hw: pointer to HW struct
74  *
75  * Determine the source clock index currently in use, based on device
76  * capabilities reported during initialization.
77  */
78 u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
79 {
80 	return hw->func_caps.ts_func_info.tmr_index_assoc;
81 }
82 
83 /**
84  * ice_ptp_read_src_incval - Read source timer increment value
85  * @hw: pointer to HW struct
86  *
87  * Read the increment value of the source timer and return it.
88  */
89 static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
90 {
91 	u32 lo, hi;
92 	u8 tmr_idx;
93 
94 	tmr_idx = ice_get_ptp_src_clock_index(hw);
95 
96 	lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
97 	hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
98 
99 	return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
100 }
101 
102 /**
103  * ice_ptp_src_cmd - Prepare source timer for a timer command
104  * @hw: pointer to HW structure
105  * @cmd: Timer command
106  *
107  * Prepare the source timer for an upcoming timer sync command.
108  */
109 static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
110 {
111 	u32 cmd_val;
112 	u8 tmr_idx;
113 
114 	tmr_idx = ice_get_ptp_src_clock_index(hw);
115 	cmd_val = tmr_idx << SEL_CPK_SRC;
116 
117 	switch (cmd) {
118 	case INIT_TIME:
119 		cmd_val |= GLTSYN_CMD_INIT_TIME;
120 		break;
121 	case INIT_INCVAL:
122 		cmd_val |= GLTSYN_CMD_INIT_INCVAL;
123 		break;
124 	case ADJ_TIME:
125 		cmd_val |= GLTSYN_CMD_ADJ_TIME;
126 		break;
127 	case ADJ_TIME_AT_TIME:
128 		cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
129 		break;
130 	case READ_TIME:
131 		cmd_val |= GLTSYN_CMD_READ_TIME;
132 		break;
133 	}
134 
135 	wr32(hw, GLTSYN_CMD, cmd_val);
136 }
137 
138 /**
139  * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
140  * @hw: pointer to HW struct
141  *
142  * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
143  * write immediately. This triggers the hardware to begin executing all of the
144  * source and PHY timer commands synchronously.
145  */
146 static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
147 {
148 	wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
149 	ice_flush(hw);
150 }
151 
152 /* E822 family functions
153  *
154  * The following functions operate on the E822 family of devices.
155  */
156 
157 /**
158  * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
159  * @msg: the PHY message buffer to fill in
160  * @port: the port to access
161  * @offset: the register offset
162  */
163 static void
164 ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
165 {
166 	int phy_port, phy, quadtype;
167 
168 	phy_port = port % ICE_PORTS_PER_PHY;
169 	phy = port / ICE_PORTS_PER_PHY;
170 	quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
171 
172 	if (quadtype == 0) {
173 		msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
174 		msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
175 	} else {
176 		msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
177 		msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
178 	}
179 
180 	if (phy == 0)
181 		msg->dest_dev = rmn_0;
182 	else if (phy == 1)
183 		msg->dest_dev = rmn_1;
184 	else
185 		msg->dest_dev = rmn_2;
186 }
187 
188 /**
189  * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
190  * @low_addr: the low address to check
191  * @high_addr: on return, contains the high address of the 64bit register
192  *
193  * Checks if the provided low address is one of the known 64bit PHY values
194  * represented as two 32bit registers. If it is, return the appropriate high
195  * register offset to use.
196  */
197 static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
198 {
199 	switch (low_addr) {
200 	case P_REG_PAR_PCS_TX_OFFSET_L:
201 		*high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
202 		return true;
203 	case P_REG_PAR_PCS_RX_OFFSET_L:
204 		*high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
205 		return true;
206 	case P_REG_PAR_TX_TIME_L:
207 		*high_addr = P_REG_PAR_TX_TIME_U;
208 		return true;
209 	case P_REG_PAR_RX_TIME_L:
210 		*high_addr = P_REG_PAR_RX_TIME_U;
211 		return true;
212 	case P_REG_TOTAL_TX_OFFSET_L:
213 		*high_addr = P_REG_TOTAL_TX_OFFSET_U;
214 		return true;
215 	case P_REG_TOTAL_RX_OFFSET_L:
216 		*high_addr = P_REG_TOTAL_RX_OFFSET_U;
217 		return true;
218 	case P_REG_UIX66_10G_40G_L:
219 		*high_addr = P_REG_UIX66_10G_40G_U;
220 		return true;
221 	case P_REG_UIX66_25G_100G_L:
222 		*high_addr = P_REG_UIX66_25G_100G_U;
223 		return true;
224 	case P_REG_TX_CAPTURE_L:
225 		*high_addr = P_REG_TX_CAPTURE_U;
226 		return true;
227 	case P_REG_RX_CAPTURE_L:
228 		*high_addr = P_REG_RX_CAPTURE_U;
229 		return true;
230 	case P_REG_TX_TIMER_INC_PRE_L:
231 		*high_addr = P_REG_TX_TIMER_INC_PRE_U;
232 		return true;
233 	case P_REG_RX_TIMER_INC_PRE_L:
234 		*high_addr = P_REG_RX_TIMER_INC_PRE_U;
235 		return true;
236 	default:
237 		return false;
238 	}
239 }
240 
241 /**
242  * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
243  * @low_addr: the low address to check
244  * @high_addr: on return, contains the high address of the 40bit value
245  *
246  * Checks if the provided low address is one of the known 40bit PHY values
247  * split into two registers with the lower 8 bits in the low register and the
248  * upper 32 bits in the high register. If it is, return the appropriate high
249  * register offset to use.
250  */
251 static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
252 {
253 	switch (low_addr) {
254 	case P_REG_TIMETUS_L:
255 		*high_addr = P_REG_TIMETUS_U;
256 		return true;
257 	case P_REG_PAR_RX_TUS_L:
258 		*high_addr = P_REG_PAR_RX_TUS_U;
259 		return true;
260 	case P_REG_PAR_TX_TUS_L:
261 		*high_addr = P_REG_PAR_TX_TUS_U;
262 		return true;
263 	case P_REG_PCS_RX_TUS_L:
264 		*high_addr = P_REG_PCS_RX_TUS_U;
265 		return true;
266 	case P_REG_PCS_TX_TUS_L:
267 		*high_addr = P_REG_PCS_TX_TUS_U;
268 		return true;
269 	case P_REG_DESK_PAR_RX_TUS_L:
270 		*high_addr = P_REG_DESK_PAR_RX_TUS_U;
271 		return true;
272 	case P_REG_DESK_PAR_TX_TUS_L:
273 		*high_addr = P_REG_DESK_PAR_TX_TUS_U;
274 		return true;
275 	case P_REG_DESK_PCS_RX_TUS_L:
276 		*high_addr = P_REG_DESK_PCS_RX_TUS_U;
277 		return true;
278 	case P_REG_DESK_PCS_TX_TUS_L:
279 		*high_addr = P_REG_DESK_PCS_TX_TUS_U;
280 		return true;
281 	default:
282 		return false;
283 	}
284 }
285 
286 /**
287  * ice_read_phy_reg_e822 - Read a PHY register
288  * @hw: pointer to the HW struct
289  * @port: PHY port to read from
290  * @offset: PHY register offset to read
291  * @val: on return, the contents read from the PHY
292  *
293  * Read a PHY register for the given port over the device sideband queue.
294  */
295 int
296 ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
297 {
298 	struct ice_sbq_msg_input msg = {0};
299 	int err;
300 
301 	ice_fill_phy_msg_e822(&msg, port, offset);
302 	msg.opcode = ice_sbq_msg_rd;
303 
304 	err = ice_sbq_rw_reg(hw, &msg);
305 	if (err) {
306 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
307 			  err);
308 		return err;
309 	}
310 
311 	*val = msg.data;
312 
313 	return 0;
314 }
315 
316 /**
317  * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
318  * @hw: pointer to the HW struct
319  * @port: PHY port to read from
320  * @low_addr: offset of the lower register to read from
321  * @val: on return, the contents of the 64bit value from the PHY registers
322  *
323  * Reads the two registers associated with a 64bit value and returns it in the
324  * val pointer. The offset always specifies the lower register offset to use.
325  * The high offset is looked up. This function only operates on registers
326  * known to be two parts of a 64bit value.
327  */
328 static int
329 ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
330 {
331 	u32 low, high;
332 	u16 high_addr;
333 	int err;
334 
335 	/* Only operate on registers known to be split into two 32bit
336 	 * registers.
337 	 */
338 	if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
339 		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
340 			  low_addr);
341 		return -EINVAL;
342 	}
343 
344 	err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
345 	if (err) {
346 		ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
347 			  low_addr, err);
348 		return err;
349 	}
350 
351 	err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
352 	if (err) {
353 		ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
354 			  high_addr, err);
355 		return err;
356 	}
357 
358 	*val = (u64)high << 32 | low;
359 
360 	return 0;
361 }
362 
363 /**
364  * ice_write_phy_reg_e822 - Write a PHY register
365  * @hw: pointer to the HW struct
366  * @port: PHY port to write to
367  * @offset: PHY register offset to write
368  * @val: The value to write to the register
369  *
370  * Write a PHY register for the given port over the device sideband queue.
371  */
372 int
373 ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
374 {
375 	struct ice_sbq_msg_input msg = {0};
376 	int err;
377 
378 	ice_fill_phy_msg_e822(&msg, port, offset);
379 	msg.opcode = ice_sbq_msg_wr;
380 	msg.data = val;
381 
382 	err = ice_sbq_rw_reg(hw, &msg);
383 	if (err) {
384 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
385 			  err);
386 		return err;
387 	}
388 
389 	return 0;
390 }
391 
392 /**
393  * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
394  * @hw: pointer to the HW struct
395  * @port: port to write to
396  * @low_addr: offset of the low register
397  * @val: 40b value to write
398  *
399  * Write the provided 40b value to the two associated registers by splitting
400  * it up into two chunks, the lower 8 bits and the upper 32 bits.
401  */
402 static int
403 ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
404 {
405 	u32 low, high;
406 	u16 high_addr;
407 	int err;
408 
409 	/* Only operate on registers known to be split into a lower 8 bit
410 	 * register and an upper 32 bit register.
411 	 */
412 	if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
413 		ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
414 			  low_addr);
415 		return -EINVAL;
416 	}
417 
418 	low = (u32)(val & P_REG_40B_LOW_M);
419 	high = (u32)(val >> P_REG_40B_HIGH_S);
420 
421 	err = ice_write_phy_reg_e822(hw, port, low_addr, low);
422 	if (err) {
423 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
424 			  low_addr, err);
425 		return err;
426 	}
427 
428 	err = ice_write_phy_reg_e822(hw, port, high_addr, high);
429 	if (err) {
430 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
431 			  high_addr, err);
432 		return err;
433 	}
434 
435 	return 0;
436 }
437 
438 /**
439  * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
440  * @hw: pointer to the HW struct
441  * @port: PHY port to read from
442  * @low_addr: offset of the lower register to read from
443  * @val: the contents of the 64bit value to write to PHY
444  *
445  * Write the 64bit value to the two associated 32bit PHY registers. The offset
446  * is always specified as the lower register, and the high address is looked
447  * up. This function only operates on registers known to be two parts of
448  * a 64bit value.
449  */
450 static int
451 ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
452 {
453 	u32 low, high;
454 	u16 high_addr;
455 	int err;
456 
457 	/* Only operate on registers known to be split into two 32bit
458 	 * registers.
459 	 */
460 	if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
461 		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
462 			  low_addr);
463 		return -EINVAL;
464 	}
465 
466 	low = lower_32_bits(val);
467 	high = upper_32_bits(val);
468 
469 	err = ice_write_phy_reg_e822(hw, port, low_addr, low);
470 	if (err) {
471 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
472 			  low_addr, err);
473 		return err;
474 	}
475 
476 	err = ice_write_phy_reg_e822(hw, port, high_addr, high);
477 	if (err) {
478 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
479 			  high_addr, err);
480 		return err;
481 	}
482 
483 	return 0;
484 }
485 
486 /**
487  * ice_fill_quad_msg_e822 - Fill message data for quad register access
488  * @msg: the PHY message buffer to fill in
489  * @quad: the quad to access
490  * @offset: the register offset
491  *
492  * Fill a message buffer for accessing a register in a quad shared between
493  * multiple PHYs.
494  */
495 static void
496 ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
497 {
498 	u32 addr;
499 
500 	msg->dest_dev = rmn_0;
501 
502 	if ((quad % ICE_NUM_QUAD_TYPE) == 0)
503 		addr = Q_0_BASE + offset;
504 	else
505 		addr = Q_1_BASE + offset;
506 
507 	msg->msg_addr_low = lower_16_bits(addr);
508 	msg->msg_addr_high = upper_16_bits(addr);
509 }
510 
511 /**
512  * ice_read_quad_reg_e822 - Read a PHY quad register
513  * @hw: pointer to the HW struct
514  * @quad: quad to read from
515  * @offset: quad register offset to read
516  * @val: on return, the contents read from the quad
517  *
518  * Read a quad register over the device sideband queue. Quad registers are
519  * shared between multiple PHYs.
520  */
521 int
522 ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
523 {
524 	struct ice_sbq_msg_input msg = {0};
525 	int err;
526 
527 	if (quad >= ICE_MAX_QUAD)
528 		return -EINVAL;
529 
530 	ice_fill_quad_msg_e822(&msg, quad, offset);
531 	msg.opcode = ice_sbq_msg_rd;
532 
533 	err = ice_sbq_rw_reg(hw, &msg);
534 	if (err) {
535 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
536 			  err);
537 		return err;
538 	}
539 
540 	*val = msg.data;
541 
542 	return 0;
543 }
544 
545 /**
546  * ice_write_quad_reg_e822 - Write a PHY quad register
547  * @hw: pointer to the HW struct
548  * @quad: quad to write to
549  * @offset: quad register offset to write
550  * @val: The value to write to the register
551  *
552  * Write a quad register over the device sideband queue. Quad registers are
553  * shared between multiple PHYs.
554  */
555 int
556 ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
557 {
558 	struct ice_sbq_msg_input msg = {0};
559 	int err;
560 
561 	if (quad >= ICE_MAX_QUAD)
562 		return -EINVAL;
563 
564 	ice_fill_quad_msg_e822(&msg, quad, offset);
565 	msg.opcode = ice_sbq_msg_wr;
566 	msg.data = val;
567 
568 	err = ice_sbq_rw_reg(hw, &msg);
569 	if (err) {
570 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
571 			  err);
572 		return err;
573 	}
574 
575 	return 0;
576 }
577 
578 /**
579  * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
580  * @hw: pointer to the HW struct
581  * @quad: the quad to read from
582  * @idx: the timestamp index to read
583  * @tstamp: on return, the 40bit timestamp value
584  *
585  * Read a 40bit timestamp value out of the two associated registers in the
586  * quad memory block that is shared between the internal PHYs of the E822
587  * family of devices.
588  */
589 static int
590 ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
591 {
592 	u16 lo_addr, hi_addr;
593 	u32 lo, hi;
594 	int err;
595 
596 	lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
597 	hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
598 
599 	err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
600 	if (err) {
601 		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
602 			  err);
603 		return err;
604 	}
605 
606 	err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
607 	if (err) {
608 		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
609 			  err);
610 		return err;
611 	}
612 
613 	/* For E822 based internal PHYs, the timestamp is reported with the
614 	 * lower 8 bits in the low register, and the upper 32 bits in the high
615 	 * register.
616 	 */
617 	*tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
618 
619 	return 0;
620 }
621 
622 /**
623  * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
624  * @hw: pointer to the HW struct
625  * @quad: the quad to read from
626  * @idx: the timestamp index to reset
627  *
628  * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
629  * shared between the internal PHYs on the E822 devices.
630  */
631 static int
632 ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
633 {
634 	u16 lo_addr, hi_addr;
635 	int err;
636 
637 	lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
638 	hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
639 
640 	err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
641 	if (err) {
642 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
643 			  err);
644 		return err;
645 	}
646 
647 	err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
648 	if (err) {
649 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
650 			  err);
651 		return err;
652 	}
653 
654 	return 0;
655 }
656 
657 /**
658  * ice_read_cgu_reg_e822 - Read a CGU register
659  * @hw: pointer to the HW struct
660  * @addr: Register address to read
661  * @val: storage for register value read
662  *
663  * Read the contents of a register of the Clock Generation Unit. Only
664  * applicable to E822 devices.
665  */
666 static int
667 ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
668 {
669 	struct ice_sbq_msg_input cgu_msg;
670 	int err;
671 
672 	cgu_msg.opcode = ice_sbq_msg_rd;
673 	cgu_msg.dest_dev = cgu;
674 	cgu_msg.msg_addr_low = addr;
675 	cgu_msg.msg_addr_high = 0x0;
676 
677 	err = ice_sbq_rw_reg(hw, &cgu_msg);
678 	if (err) {
679 		ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
680 			  addr, err);
681 		return err;
682 	}
683 
684 	*val = cgu_msg.data;
685 
686 	return err;
687 }
688 
689 /**
690  * ice_write_cgu_reg_e822 - Write a CGU register
691  * @hw: pointer to the HW struct
692  * @addr: Register address to write
693  * @val: value to write into the register
694  *
695  * Write the specified value to a register of the Clock Generation Unit. Only
696  * applicable to E822 devices.
697  */
698 static int
699 ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
700 {
701 	struct ice_sbq_msg_input cgu_msg;
702 	int err;
703 
704 	cgu_msg.opcode = ice_sbq_msg_wr;
705 	cgu_msg.dest_dev = cgu;
706 	cgu_msg.msg_addr_low = addr;
707 	cgu_msg.msg_addr_high = 0x0;
708 	cgu_msg.data = val;
709 
710 	err = ice_sbq_rw_reg(hw, &cgu_msg);
711 	if (err) {
712 		ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
713 			  addr, err);
714 		return err;
715 	}
716 
717 	return err;
718 }
719 
720 /**
721  * ice_clk_freq_str - Convert time_ref_freq to string
722  * @clk_freq: Clock frequency
723  *
724  * Convert the specified TIME_REF clock frequency to a string.
725  */
726 static const char *ice_clk_freq_str(u8 clk_freq)
727 {
728 	switch ((enum ice_time_ref_freq)clk_freq) {
729 	case ICE_TIME_REF_FREQ_25_000:
730 		return "25 MHz";
731 	case ICE_TIME_REF_FREQ_122_880:
732 		return "122.88 MHz";
733 	case ICE_TIME_REF_FREQ_125_000:
734 		return "125 MHz";
735 	case ICE_TIME_REF_FREQ_153_600:
736 		return "153.6 MHz";
737 	case ICE_TIME_REF_FREQ_156_250:
738 		return "156.25 MHz";
739 	case ICE_TIME_REF_FREQ_245_760:
740 		return "245.76 MHz";
741 	default:
742 		return "Unknown";
743 	}
744 }
745 
746 /**
747  * ice_clk_src_str - Convert time_ref_src to string
748  * @clk_src: Clock source
749  *
750  * Convert the specified clock source to its string name.
751  */
752 static const char *ice_clk_src_str(u8 clk_src)
753 {
754 	switch ((enum ice_clk_src)clk_src) {
755 	case ICE_CLK_SRC_TCX0:
756 		return "TCX0";
757 	case ICE_CLK_SRC_TIME_REF:
758 		return "TIME_REF";
759 	default:
760 		return "Unknown";
761 	}
762 }
763 
764 /**
765  * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
766  * @hw: pointer to the HW struct
767  * @clk_freq: Clock frequency to program
768  * @clk_src: Clock source to select (TIME_REF, or TCX0)
769  *
770  * Configure the Clock Generation Unit with the desired clock frequency and
771  * time reference, enabling the PLL which drives the PTP hardware clock.
772  */
773 static int
774 ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
775 		     enum ice_clk_src clk_src)
776 {
777 	union tspll_ro_bwm_lf bwm_lf;
778 	union nac_cgu_dword19 dw19;
779 	union nac_cgu_dword22 dw22;
780 	union nac_cgu_dword24 dw24;
781 	union nac_cgu_dword9 dw9;
782 	int err;
783 
784 	if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
785 		dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
786 			 clk_freq);
787 		return -EINVAL;
788 	}
789 
790 	if (clk_src >= NUM_ICE_CLK_SRC) {
791 		dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
792 			 clk_src);
793 		return -EINVAL;
794 	}
795 
796 	if (clk_src == ICE_CLK_SRC_TCX0 &&
797 	    clk_freq != ICE_TIME_REF_FREQ_25_000) {
798 		dev_warn(ice_hw_to_dev(hw),
799 			 "TCX0 only supports 25 MHz frequency\n");
800 		return -EINVAL;
801 	}
802 
803 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
804 	if (err)
805 		return err;
806 
807 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
808 	if (err)
809 		return err;
810 
811 	err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
812 	if (err)
813 		return err;
814 
815 	/* Log the current clock configuration */
816 	ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
817 		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
818 		  ice_clk_src_str(dw24.field.time_ref_sel),
819 		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
820 		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
821 
822 	/* Disable the PLL before changing the clock source or frequency */
823 	if (dw24.field.ts_pll_enable) {
824 		dw24.field.ts_pll_enable = 0;
825 
826 		err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
827 		if (err)
828 			return err;
829 	}
830 
831 	/* Set the frequency */
832 	dw9.field.time_ref_freq_sel = clk_freq;
833 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
834 	if (err)
835 		return err;
836 
837 	/* Configure the TS PLL feedback divisor */
838 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
839 	if (err)
840 		return err;
841 
842 	dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
843 	dw19.field.tspll_ndivratio = 1;
844 
845 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
846 	if (err)
847 		return err;
848 
849 	/* Configure the TS PLL post divisor */
850 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
851 	if (err)
852 		return err;
853 
854 	dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
855 	dw22.field.time1588clk_sel_div2 = 0;
856 
857 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
858 	if (err)
859 		return err;
860 
861 	/* Configure the TS PLL pre divisor and clock source */
862 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
863 	if (err)
864 		return err;
865 
866 	dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
867 	dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
868 	dw24.field.time_ref_sel = clk_src;
869 
870 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
871 	if (err)
872 		return err;
873 
874 	/* Finally, enable the PLL */
875 	dw24.field.ts_pll_enable = 1;
876 
877 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
878 	if (err)
879 		return err;
880 
881 	/* Wait to verify if the PLL locks */
882 	usleep_range(1000, 5000);
883 
884 	err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
885 	if (err)
886 		return err;
887 
888 	if (!bwm_lf.field.plllock_true_lock_cri) {
889 		dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
890 		return -EBUSY;
891 	}
892 
893 	/* Log the current clock configuration */
894 	ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
895 		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
896 		  ice_clk_src_str(dw24.field.time_ref_sel),
897 		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
898 		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
899 
900 	return 0;
901 }
902 
903 /**
904  * ice_init_cgu_e822 - Initialize CGU with settings from firmware
905  * @hw: pointer to the HW structure
906  *
907  * Initialize the Clock Generation Unit of the E822 device.
908  */
909 static int ice_init_cgu_e822(struct ice_hw *hw)
910 {
911 	struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
912 	union tspll_cntr_bist_settings cntr_bist;
913 	int err;
914 
915 	err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
916 				    &cntr_bist.val);
917 	if (err)
918 		return err;
919 
920 	/* Disable sticky lock detection so lock err reported is accurate */
921 	cntr_bist.field.i_plllock_sel_0 = 0;
922 	cntr_bist.field.i_plllock_sel_1 = 0;
923 
924 	err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
925 				     cntr_bist.val);
926 	if (err)
927 		return err;
928 
929 	/* Configure the CGU PLL using the parameters from the function
930 	 * capabilities.
931 	 */
932 	err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
933 				   (enum ice_clk_src)ts_info->clk_src);
934 	if (err)
935 		return err;
936 
937 	return 0;
938 }
939 
940 /**
941  * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
942  * @hw: pointer to the HW struct
943  *
944  * Set the window length used for the vernier port calibration process.
945  */
946 static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
947 {
948 	u8 port;
949 
950 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
951 		int err;
952 
953 		err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
954 					     PTP_VERNIER_WL);
955 		if (err) {
956 			ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
957 				  port, err);
958 			return err;
959 		}
960 	}
961 
962 	return 0;
963 }
964 
965 /**
966  * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
967  * @hw: pointer to HW struct
968  *
969  * Perform PHC initialization steps specific to E822 devices.
970  */
971 static int ice_ptp_init_phc_e822(struct ice_hw *hw)
972 {
973 	int err;
974 	u32 regval;
975 
976 	/* Enable reading switch and PHY registers over the sideband queue */
977 #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
978 #define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
979 	regval = rd32(hw, PF_SB_REM_DEV_CTL);
980 	regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
981 		   PF_SB_REM_DEV_CTL_PHY0);
982 	wr32(hw, PF_SB_REM_DEV_CTL, regval);
983 
984 	/* Initialize the Clock Generation Unit */
985 	err = ice_init_cgu_e822(hw);
986 	if (err)
987 		return err;
988 
989 	/* Set window length for all the ports */
990 	return ice_ptp_set_vernier_wl(hw);
991 }
992 
993 /**
994  * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
995  * @hw: pointer to the HW struct
996  * @time: Time to initialize the PHY port clocks to
997  *
998  * Program the PHY port registers with a new initial time value. The port
999  * clock will be initialized once the driver issues an INIT_TIME sync
1000  * command. The time value is the upper 32 bits of the PHY timer, usually in
1001  * units of nominal nanoseconds.
1002  */
1003 static int
1004 ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
1005 {
1006 	u64 phy_time;
1007 	u8 port;
1008 	int err;
1009 
1010 	/* The time represents the upper 32 bits of the PHY timer, so we need
1011 	 * to shift to account for this when programming.
1012 	 */
1013 	phy_time = (u64)time << 32;
1014 
1015 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1016 		/* Tx case */
1017 		err = ice_write_64b_phy_reg_e822(hw, port,
1018 						 P_REG_TX_TIMER_INC_PRE_L,
1019 						 phy_time);
1020 		if (err)
1021 			goto exit_err;
1022 
1023 		/* Rx case */
1024 		err = ice_write_64b_phy_reg_e822(hw, port,
1025 						 P_REG_RX_TIMER_INC_PRE_L,
1026 						 phy_time);
1027 		if (err)
1028 			goto exit_err;
1029 	}
1030 
1031 	return 0;
1032 
1033 exit_err:
1034 	ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1035 		  port, err);
1036 
1037 	return err;
1038 }
1039 
1040 /**
1041  * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
1042  * @hw: pointer to HW struct
1043  * @port: Port number to be programmed
1044  * @time: time in cycles to adjust the port Tx and Rx clocks
1045  *
1046  * Program the port for an atomic adjustment by writing the Tx and Rx timer
1047  * registers. The atomic adjustment won't be completed until the driver issues
1048  * an ADJ_TIME command.
1049  *
1050  * Note that time is not in units of nanoseconds. It is in clock time
1051  * including the lower sub-nanosecond portion of the port timer.
1052  *
1053  * Negative adjustments are supported using 2s complement arithmetic.
1054  */
1055 int
1056 ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
1057 {
1058 	u32 l_time, u_time;
1059 	int err;
1060 
1061 	l_time = lower_32_bits(time);
1062 	u_time = upper_32_bits(time);
1063 
1064 	/* Tx case */
1065 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
1066 				     l_time);
1067 	if (err)
1068 		goto exit_err;
1069 
1070 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
1071 				     u_time);
1072 	if (err)
1073 		goto exit_err;
1074 
1075 	/* Rx case */
1076 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
1077 				     l_time);
1078 	if (err)
1079 		goto exit_err;
1080 
1081 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
1082 				     u_time);
1083 	if (err)
1084 		goto exit_err;
1085 
1086 	return 0;
1087 
1088 exit_err:
1089 	ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1090 		  port, err);
1091 	return err;
1092 }
1093 
1094 /**
1095  * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
1096  * @hw: pointer to HW struct
1097  * @adj: adjustment in nanoseconds
1098  *
1099  * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1100  * Tx and Rx port registers. The actual adjustment is completed by issuing an
1101  * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
1102  */
1103 static int
1104 ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
1105 {
1106 	s64 cycles;
1107 	u8 port;
1108 
1109 	/* The port clock supports adjustment of the sub-nanosecond portion of
1110 	 * the clock. We shift the provided adjustment in nanoseconds to
1111 	 * calculate the appropriate adjustment to program into the PHY ports.
1112 	 */
1113 	if (adj > 0)
1114 		cycles = (s64)adj << 32;
1115 	else
1116 		cycles = -(((s64)-adj) << 32);
1117 
1118 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1119 		int err;
1120 
1121 		err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
1122 		if (err)
1123 			return err;
1124 	}
1125 
1126 	return 0;
1127 }
1128 
1129 /**
1130  * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
1131  * @hw: pointer to HW struct
1132  * @incval: new increment value to prepare
1133  *
1134  * Prepare each of the PHY ports for a new increment value by programming the
1135  * port's TIMETUS registers. The new increment value will be updated after
1136  * issuing an INIT_INCVAL command.
1137  */
1138 static int
1139 ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
1140 {
1141 	int err;
1142 	u8 port;
1143 
1144 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1145 		err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
1146 						 incval);
1147 		if (err)
1148 			goto exit_err;
1149 	}
1150 
1151 	return 0;
1152 
1153 exit_err:
1154 	ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1155 		  port, err);
1156 
1157 	return err;
1158 }
1159 
1160 /**
1161  * ice_ptp_read_port_capture - Read a port's local time capture
1162  * @hw: pointer to HW struct
1163  * @port: Port number to read
1164  * @tx_ts: on return, the Tx port time capture
1165  * @rx_ts: on return, the Rx port time capture
1166  *
1167  * Read the port's Tx and Rx local time capture values.
1168  *
1169  * Note this has no equivalent for the E810 devices.
1170  */
1171 static int
1172 ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
1173 {
1174 	int err;
1175 
1176 	/* Tx case */
1177 	err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
1178 	if (err) {
1179 		ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1180 			  err);
1181 		return err;
1182 	}
1183 
1184 	ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
1185 		  (unsigned long long)*tx_ts);
1186 
1187 	/* Rx case */
1188 	err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
1189 	if (err) {
1190 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1191 			  err);
1192 		return err;
1193 	}
1194 
1195 	ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
1196 		  (unsigned long long)*rx_ts);
1197 
1198 	return 0;
1199 }
1200 
1201 /**
1202  * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
1203  * @hw: pointer to HW struct
1204  * @port: Port to which cmd has to be sent
1205  * @cmd: Command to be sent to the port
1206  *
1207  * Prepare the requested port for an upcoming timer sync command.
1208  *
1209  * Note there is no equivalent of this operation on E810, as that device
1210  * always handles all external PHYs internally.
1211  */
1212 static int
1213 ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
1214 {
1215 	u32 cmd_val, val;
1216 	u8 tmr_idx;
1217 	int err;
1218 
1219 	tmr_idx = ice_get_ptp_src_clock_index(hw);
1220 	cmd_val = tmr_idx << SEL_PHY_SRC;
1221 	switch (cmd) {
1222 	case INIT_TIME:
1223 		cmd_val |= PHY_CMD_INIT_TIME;
1224 		break;
1225 	case INIT_INCVAL:
1226 		cmd_val |= PHY_CMD_INIT_INCVAL;
1227 		break;
1228 	case ADJ_TIME:
1229 		cmd_val |= PHY_CMD_ADJ_TIME;
1230 		break;
1231 	case READ_TIME:
1232 		cmd_val |= PHY_CMD_READ_TIME;
1233 		break;
1234 	case ADJ_TIME_AT_TIME:
1235 		cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
1236 		break;
1237 	}
1238 
1239 	/* Tx case */
1240 	/* Read, modify, write */
1241 	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
1242 	if (err) {
1243 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
1244 			  err);
1245 		return err;
1246 	}
1247 
1248 	/* Modify necessary bits only and perform write */
1249 	val &= ~TS_CMD_MASK;
1250 	val |= cmd_val;
1251 
1252 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
1253 	if (err) {
1254 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1255 			  err);
1256 		return err;
1257 	}
1258 
1259 	/* Rx case */
1260 	/* Read, modify, write */
1261 	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
1262 	if (err) {
1263 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
1264 			  err);
1265 		return err;
1266 	}
1267 
1268 	/* Modify necessary bits only and perform write */
1269 	val &= ~TS_CMD_MASK;
1270 	val |= cmd_val;
1271 
1272 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
1273 	if (err) {
1274 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1275 			  err);
1276 		return err;
1277 	}
1278 
1279 	return 0;
1280 }
1281 
1282 /**
1283  * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
1284  * @hw: pointer to the HW struct
1285  * @cmd: timer command to prepare
1286  *
1287  * Prepare all ports connected to this device for an upcoming timer sync
1288  * command.
1289  */
1290 static int
1291 ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
1292 {
1293 	u8 port;
1294 
1295 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1296 		int err;
1297 
1298 		err = ice_ptp_one_port_cmd(hw, port, cmd);
1299 		if (err)
1300 			return err;
1301 	}
1302 
1303 	return 0;
1304 }
1305 
1306 /* E822 Vernier calibration functions
1307  *
1308  * The following functions are used as part of the vernier calibration of
1309  * a port. This calibration increases the precision of the timestamps on the
1310  * port.
1311  */
1312 
1313 /**
1314  * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
1315  * @hw: pointer to HW struct
1316  * @port: the port to read from
1317  * @link_out: if non-NULL, holds link speed on success
1318  * @fec_out: if non-NULL, holds FEC algorithm on success
1319  *
1320  * Read the serdes data for the PHY port and extract the link speed and FEC
1321  * algorithm.
1322  */
1323 static int
1324 ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
1325 			       enum ice_ptp_link_spd *link_out,
1326 			       enum ice_ptp_fec_mode *fec_out)
1327 {
1328 	enum ice_ptp_link_spd link;
1329 	enum ice_ptp_fec_mode fec;
1330 	u32 serdes;
1331 	int err;
1332 
1333 	err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
1334 	if (err) {
1335 		ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
1336 		return err;
1337 	}
1338 
1339 	/* Determine the FEC algorithm */
1340 	fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
1341 
1342 	serdes &= P_REG_LINK_SPEED_SERDES_M;
1343 
1344 	/* Determine the link speed */
1345 	if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
1346 		switch (serdes) {
1347 		case ICE_PTP_SERDES_25G:
1348 			link = ICE_PTP_LNK_SPD_25G_RS;
1349 			break;
1350 		case ICE_PTP_SERDES_50G:
1351 			link = ICE_PTP_LNK_SPD_50G_RS;
1352 			break;
1353 		case ICE_PTP_SERDES_100G:
1354 			link = ICE_PTP_LNK_SPD_100G_RS;
1355 			break;
1356 		default:
1357 			return -EIO;
1358 		}
1359 	} else {
1360 		switch (serdes) {
1361 		case ICE_PTP_SERDES_1G:
1362 			link = ICE_PTP_LNK_SPD_1G;
1363 			break;
1364 		case ICE_PTP_SERDES_10G:
1365 			link = ICE_PTP_LNK_SPD_10G;
1366 			break;
1367 		case ICE_PTP_SERDES_25G:
1368 			link = ICE_PTP_LNK_SPD_25G;
1369 			break;
1370 		case ICE_PTP_SERDES_40G:
1371 			link = ICE_PTP_LNK_SPD_40G;
1372 			break;
1373 		case ICE_PTP_SERDES_50G:
1374 			link = ICE_PTP_LNK_SPD_50G;
1375 			break;
1376 		default:
1377 			return -EIO;
1378 		}
1379 	}
1380 
1381 	if (link_out)
1382 		*link_out = link;
1383 	if (fec_out)
1384 		*fec_out = fec;
1385 
1386 	return 0;
1387 }
1388 
1389 /**
1390  * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
1391  * @hw: pointer to HW struct
1392  * @port: to configure the quad for
1393  */
1394 static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
1395 {
1396 	enum ice_ptp_link_spd link_spd;
1397 	int err;
1398 	u32 val;
1399 	u8 quad;
1400 
1401 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
1402 	if (err) {
1403 		ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
1404 			  err);
1405 		return;
1406 	}
1407 
1408 	quad = port / ICE_PORTS_PER_QUAD;
1409 
1410 	err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
1411 	if (err) {
1412 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
1413 			  err);
1414 		return;
1415 	}
1416 
1417 	if (link_spd >= ICE_PTP_LNK_SPD_40G)
1418 		val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1419 	else
1420 		val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1421 
1422 	err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
1423 	if (err) {
1424 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
1425 			  err);
1426 		return;
1427 	}
1428 }
1429 
1430 /**
1431  * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
1432  * @hw: pointer to the HW structure
1433  * @port: the port to configure
1434  *
1435  * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
1436  * hardware clock time units (TUs). That is, determine the number of TUs per
1437  * serdes unit interval, and program the UIX registers with this conversion.
1438  *
1439  * This conversion is used as part of the calibration process when determining
1440  * the additional error of a timestamp vs the real time of transmission or
1441  * receipt of the packet.
1442  *
1443  * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
1444  * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
1445  *
1446  * To calculate the conversion ratio, we use the following facts:
1447  *
1448  * a) the clock frequency in Hz (cycles per second)
1449  * b) the number of TUs per cycle (the increment value of the clock)
1450  * c) 1 second per 1 billion nanoseconds
1451  * d) the duration of 66 UIs in nanoseconds
1452  *
1453  * Given these facts, we can use the following table to work out what ratios
1454  * to multiply in order to get the number of TUs per 66 UIs:
1455  *
1456  * cycles |   1 second   | incval (TUs) | nanoseconds
1457  * -------+--------------+--------------+-------------
1458  * second | 1 billion ns |    cycle     |   66 UIs
1459  *
1460  * To perform the multiplication using integers without too much loss of
1461  * precision, we can take use the following equation:
1462  *
1463  * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
1464  *
1465  * We scale up to using 6600 UI instead of 66 in order to avoid fractional
1466  * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
1467  *
1468  * The increment value has a maximum expected range of about 34 bits, while
1469  * the frequency value is about 29 bits. Multiplying these values shouldn't
1470  * overflow the 64 bits. However, we must then further multiply them again by
1471  * the Serdes unit interval duration. To avoid overflow here, we split the
1472  * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
1473  * a divide by 390,625,000. This does lose some precision, but avoids
1474  * miscalculation due to arithmetic overflow.
1475  */
1476 static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
1477 {
1478 	u64 cur_freq, clk_incval, tu_per_sec, uix;
1479 	int err;
1480 
1481 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1482 	clk_incval = ice_ptp_read_src_incval(hw);
1483 
1484 	/* Calculate TUs per second divided by 256 */
1485 	tu_per_sec = (cur_freq * clk_incval) >> 8;
1486 
1487 #define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
1488 #define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
1489 
1490 	/* Program the 10Gb/40Gb conversion ratio */
1491 	uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
1492 
1493 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
1494 					 uix);
1495 	if (err) {
1496 		ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
1497 			  err);
1498 		return err;
1499 	}
1500 
1501 	/* Program the 25Gb/100Gb conversion ratio */
1502 	uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
1503 
1504 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
1505 					 uix);
1506 	if (err) {
1507 		ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
1508 			  err);
1509 		return err;
1510 	}
1511 
1512 	return 0;
1513 }
1514 
1515 /**
1516  * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
1517  * @hw: pointer to the HW struct
1518  * @port: port to configure
1519  *
1520  * Configure the number of TUs for the PAR and PCS clocks used as part of the
1521  * timestamp calibration process. This depends on the link speed, as the PHY
1522  * uses different markers depending on the speed.
1523  *
1524  * 1Gb/10Gb/25Gb:
1525  * - Tx/Rx PAR/PCS markers
1526  *
1527  * 25Gb RS:
1528  * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1529  *
1530  * 40Gb/50Gb:
1531  * - Tx/Rx PAR/PCS markers
1532  * - Rx Deskew PAR/PCS markers
1533  *
1534  * 50G RS and 100GB RS:
1535  * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1536  * - Rx Deskew PAR/PCS markers
1537  * - Tx PAR/PCS markers
1538  *
1539  * To calculate the conversion, we use the PHC clock frequency (cycles per
1540  * second), the increment value (TUs per cycle), and the related PHY clock
1541  * frequency to calculate the TUs per unit of the PHY link clock. The
1542  * following table shows how the units convert:
1543  *
1544  * cycles |  TUs  | second
1545  * -------+-------+--------
1546  * second | cycle | cycles
1547  *
1548  * For each conversion register, look up the appropriate frequency from the
1549  * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
1550  * this to the appropriate register, preparing hardware to perform timestamp
1551  * calibration to calculate the total Tx or Rx offset to adjust the timestamp
1552  * in order to calibrate for the internal PHY delays.
1553  *
1554  * Note that the increment value ranges up to ~34 bits, and the clock
1555  * frequency is ~29 bits, so multiplying them together should fit within the
1556  * 64 bit arithmetic.
1557  */
1558 static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
1559 {
1560 	u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
1561 	enum ice_ptp_link_spd link_spd;
1562 	enum ice_ptp_fec_mode fec_mode;
1563 	int err;
1564 
1565 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1566 	if (err)
1567 		return err;
1568 
1569 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1570 	clk_incval = ice_ptp_read_src_incval(hw);
1571 
1572 	/* Calculate TUs per cycle of the PHC clock */
1573 	tu_per_sec = cur_freq * clk_incval;
1574 
1575 	/* For each PHY conversion register, look up the appropriate link
1576 	 * speed frequency and determine the TUs per that clock's cycle time.
1577 	 * Split this into a high and low value and then program the
1578 	 * appropriate register. If that link speed does not use the
1579 	 * associated register, write zeros to clear it instead.
1580 	 */
1581 
1582 	/* P_REG_PAR_TX_TUS */
1583 	if (e822_vernier[link_spd].tx_par_clk)
1584 		phy_tus = div_u64(tu_per_sec,
1585 				  e822_vernier[link_spd].tx_par_clk);
1586 	else
1587 		phy_tus = 0;
1588 
1589 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
1590 					 phy_tus);
1591 	if (err)
1592 		return err;
1593 
1594 	/* P_REG_PAR_RX_TUS */
1595 	if (e822_vernier[link_spd].rx_par_clk)
1596 		phy_tus = div_u64(tu_per_sec,
1597 				  e822_vernier[link_spd].rx_par_clk);
1598 	else
1599 		phy_tus = 0;
1600 
1601 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
1602 					 phy_tus);
1603 	if (err)
1604 		return err;
1605 
1606 	/* P_REG_PCS_TX_TUS */
1607 	if (e822_vernier[link_spd].tx_pcs_clk)
1608 		phy_tus = div_u64(tu_per_sec,
1609 				  e822_vernier[link_spd].tx_pcs_clk);
1610 	else
1611 		phy_tus = 0;
1612 
1613 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
1614 					 phy_tus);
1615 	if (err)
1616 		return err;
1617 
1618 	/* P_REG_PCS_RX_TUS */
1619 	if (e822_vernier[link_spd].rx_pcs_clk)
1620 		phy_tus = div_u64(tu_per_sec,
1621 				  e822_vernier[link_spd].rx_pcs_clk);
1622 	else
1623 		phy_tus = 0;
1624 
1625 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
1626 					 phy_tus);
1627 	if (err)
1628 		return err;
1629 
1630 	/* P_REG_DESK_PAR_TX_TUS */
1631 	if (e822_vernier[link_spd].tx_desk_rsgb_par)
1632 		phy_tus = div_u64(tu_per_sec,
1633 				  e822_vernier[link_spd].tx_desk_rsgb_par);
1634 	else
1635 		phy_tus = 0;
1636 
1637 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
1638 					 phy_tus);
1639 	if (err)
1640 		return err;
1641 
1642 	/* P_REG_DESK_PAR_RX_TUS */
1643 	if (e822_vernier[link_spd].rx_desk_rsgb_par)
1644 		phy_tus = div_u64(tu_per_sec,
1645 				  e822_vernier[link_spd].rx_desk_rsgb_par);
1646 	else
1647 		phy_tus = 0;
1648 
1649 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
1650 					 phy_tus);
1651 	if (err)
1652 		return err;
1653 
1654 	/* P_REG_DESK_PCS_TX_TUS */
1655 	if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
1656 		phy_tus = div_u64(tu_per_sec,
1657 				  e822_vernier[link_spd].tx_desk_rsgb_pcs);
1658 	else
1659 		phy_tus = 0;
1660 
1661 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
1662 					 phy_tus);
1663 	if (err)
1664 		return err;
1665 
1666 	/* P_REG_DESK_PCS_RX_TUS */
1667 	if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
1668 		phy_tus = div_u64(tu_per_sec,
1669 				  e822_vernier[link_spd].rx_desk_rsgb_pcs);
1670 	else
1671 		phy_tus = 0;
1672 
1673 	return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
1674 					  phy_tus);
1675 }
1676 
1677 /**
1678  * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
1679  * @hw: pointer to the HW struct
1680  * @link_spd: the Link speed to calculate for
1681  *
1682  * Calculate the fixed offset due to known static latency data.
1683  */
1684 static u64
1685 ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1686 {
1687 	u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1688 
1689 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1690 	clk_incval = ice_ptp_read_src_incval(hw);
1691 
1692 	/* Calculate TUs per second */
1693 	tu_per_sec = cur_freq * clk_incval;
1694 
1695 	/* Calculate number of TUs to add for the fixed Tx latency. Since the
1696 	 * latency measurement is in 1/100th of a nanosecond, we need to
1697 	 * multiply by tu_per_sec and then divide by 1e11. This calculation
1698 	 * overflows 64 bit integer arithmetic, so break it up into two
1699 	 * divisions by 1e4 first then by 1e7.
1700 	 */
1701 	fixed_offset = div_u64(tu_per_sec, 10000);
1702 	fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
1703 	fixed_offset = div_u64(fixed_offset, 10000000);
1704 
1705 	return fixed_offset;
1706 }
1707 
1708 /**
1709  * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
1710  * @hw: pointer to the HW struct
1711  * @port: the PHY port to configure
1712  *
1713  * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
1714  * adjust Tx timestamps by. This is calculated by combining some known static
1715  * latency along with the Vernier offset computations done by hardware.
1716  *
1717  * This function must be called only after the offset registers are valid,
1718  * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
1719  * has measured the offset.
1720  *
1721  * To avoid overflow, when calculating the offset based on the known static
1722  * latency values, we use measurements in 1/100th of a nanosecond, and divide
1723  * the TUs per second up front. This avoids overflow while allowing
1724  * calculation of the adjustment using integer arithmetic.
1725  */
1726 static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
1727 {
1728 	enum ice_ptp_link_spd link_spd;
1729 	enum ice_ptp_fec_mode fec_mode;
1730 	u64 total_offset, val;
1731 	int err;
1732 
1733 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1734 	if (err)
1735 		return err;
1736 
1737 	total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
1738 
1739 	/* Read the first Vernier offset from the PHY register and add it to
1740 	 * the total offset.
1741 	 */
1742 	if (link_spd == ICE_PTP_LNK_SPD_1G ||
1743 	    link_spd == ICE_PTP_LNK_SPD_10G ||
1744 	    link_spd == ICE_PTP_LNK_SPD_25G ||
1745 	    link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1746 	    link_spd == ICE_PTP_LNK_SPD_40G ||
1747 	    link_spd == ICE_PTP_LNK_SPD_50G) {
1748 		err = ice_read_64b_phy_reg_e822(hw, port,
1749 						P_REG_PAR_PCS_TX_OFFSET_L,
1750 						&val);
1751 		if (err)
1752 			return err;
1753 
1754 		total_offset += val;
1755 	}
1756 
1757 	/* For Tx, we only need to use the second Vernier offset for
1758 	 * multi-lane link speeds with RS-FEC. The lanes will always be
1759 	 * aligned.
1760 	 */
1761 	if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1762 	    link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1763 		err = ice_read_64b_phy_reg_e822(hw, port,
1764 						P_REG_PAR_TX_TIME_L,
1765 						&val);
1766 		if (err)
1767 			return err;
1768 
1769 		total_offset += val;
1770 	}
1771 
1772 	/* Now that the total offset has been calculated, program it to the
1773 	 * PHY and indicate that the Tx offset is ready. After this,
1774 	 * timestamps will be enabled.
1775 	 */
1776 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
1777 					 total_offset);
1778 	if (err)
1779 		return err;
1780 
1781 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
1782 	if (err)
1783 		return err;
1784 
1785 	return 0;
1786 }
1787 
1788 /**
1789  * ice_phy_cfg_fixed_tx_offset_e822 - Configure Tx offset for bypass mode
1790  * @hw: pointer to the HW struct
1791  * @port: the PHY port to configure
1792  *
1793  * Calculate and program the fixed Tx offset, and indicate that the offset is
1794  * ready. This can be used when operating in bypass mode.
1795  */
1796 static int
1797 ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw *hw, u8 port)
1798 {
1799 	enum ice_ptp_link_spd link_spd;
1800 	enum ice_ptp_fec_mode fec_mode;
1801 	u64 total_offset;
1802 	int err;
1803 
1804 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1805 	if (err)
1806 		return err;
1807 
1808 	total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
1809 
1810 	/* Program the fixed Tx offset into the P_REG_TOTAL_TX_OFFSET_L
1811 	 * register, then indicate that the Tx offset is ready. After this,
1812 	 * timestamps will be enabled.
1813 	 *
1814 	 * Note that this skips including the more precise offsets generated
1815 	 * by the Vernier calibration.
1816 	 */
1817 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
1818 					 total_offset);
1819 	if (err)
1820 		return err;
1821 
1822 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
1823 	if (err)
1824 		return err;
1825 
1826 	return 0;
1827 }
1828 
1829 /**
1830  * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
1831  * @hw: pointer to the HW struct
1832  * @port: the PHY port to adjust for
1833  * @link_spd: the current link speed of the PHY
1834  * @fec_mode: the current FEC mode of the PHY
1835  * @pmd_adj: on return, the amount to adjust the Rx total offset by
1836  *
1837  * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
1838  * This varies by link speed and FEC mode. The value calculated accounts for
1839  * various delays caused when receiving a packet.
1840  */
1841 static int
1842 ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
1843 			  enum ice_ptp_link_spd link_spd,
1844 			  enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
1845 {
1846 	u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
1847 	u8 pmd_align;
1848 	u32 val;
1849 	int err;
1850 
1851 	err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
1852 	if (err) {
1853 		ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
1854 			  err);
1855 		return err;
1856 	}
1857 
1858 	pmd_align = (u8)val;
1859 
1860 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1861 	clk_incval = ice_ptp_read_src_incval(hw);
1862 
1863 	/* Calculate TUs per second */
1864 	tu_per_sec = cur_freq * clk_incval;
1865 
1866 	/* The PMD alignment adjustment measurement depends on the link speed,
1867 	 * and whether FEC is enabled. For each link speed, the alignment
1868 	 * adjustment is calculated by dividing a value by the length of
1869 	 * a Time Unit in nanoseconds.
1870 	 *
1871 	 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
1872 	 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
1873 	 * 10G w/FEC: align * 0.1 * 32/33
1874 	 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
1875 	 * 25G w/FEC: align * 0.4 * 32/33
1876 	 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
1877 	 * 40G w/FEC: align * 0.1 * 32/33
1878 	 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
1879 	 * 50G w/FEC: align * 0.8 * 32/33
1880 	 *
1881 	 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
1882 	 *
1883 	 * To allow for calculating this value using integer arithmetic, we
1884 	 * instead start with the number of TUs per second, (inverse of the
1885 	 * length of a Time Unit in nanoseconds), multiply by a value based
1886 	 * on the PMD alignment register, and then divide by the right value
1887 	 * calculated based on the table above. To avoid integer overflow this
1888 	 * division is broken up into a step of dividing by 125 first.
1889 	 */
1890 	if (link_spd == ICE_PTP_LNK_SPD_1G) {
1891 		if (pmd_align == 4)
1892 			mult = 10;
1893 		else
1894 			mult = (pmd_align + 6) % 10;
1895 	} else if (link_spd == ICE_PTP_LNK_SPD_10G ||
1896 		   link_spd == ICE_PTP_LNK_SPD_25G ||
1897 		   link_spd == ICE_PTP_LNK_SPD_40G ||
1898 		   link_spd == ICE_PTP_LNK_SPD_50G) {
1899 		/* If Clause 74 FEC, always calculate PMD adjust */
1900 		if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
1901 			mult = pmd_align;
1902 		else
1903 			mult = 0;
1904 	} else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1905 		   link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1906 		   link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1907 		if (pmd_align < 17)
1908 			mult = pmd_align + 40;
1909 		else
1910 			mult = pmd_align;
1911 	} else {
1912 		ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
1913 			  link_spd);
1914 		mult = 0;
1915 	}
1916 
1917 	/* In some cases, there's no need to adjust for the PMD alignment */
1918 	if (!mult) {
1919 		*pmd_adj = 0;
1920 		return 0;
1921 	}
1922 
1923 	/* Calculate the adjustment by multiplying TUs per second by the
1924 	 * appropriate multiplier and divisor. To avoid overflow, we first
1925 	 * divide by 125, and then handle remaining divisor based on the link
1926 	 * speed pmd_adj_divisor value.
1927 	 */
1928 	adj = div_u64(tu_per_sec, 125);
1929 	adj *= mult;
1930 	adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
1931 
1932 	/* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
1933 	 * cycle count is necessary.
1934 	 */
1935 	if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
1936 		u64 cycle_adj;
1937 		u8 rx_cycle;
1938 
1939 		err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
1940 					    &val);
1941 		if (err) {
1942 			ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
1943 				  err);
1944 			return err;
1945 		}
1946 
1947 		rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
1948 		if (rx_cycle) {
1949 			mult = (4 - rx_cycle) * 40;
1950 
1951 			cycle_adj = div_u64(tu_per_sec, 125);
1952 			cycle_adj *= mult;
1953 			cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
1954 
1955 			adj += cycle_adj;
1956 		}
1957 	} else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
1958 		u64 cycle_adj;
1959 		u8 rx_cycle;
1960 
1961 		err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
1962 					    &val);
1963 		if (err) {
1964 			ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
1965 				  err);
1966 			return err;
1967 		}
1968 
1969 		rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
1970 		if (rx_cycle) {
1971 			mult = rx_cycle * 40;
1972 
1973 			cycle_adj = div_u64(tu_per_sec, 125);
1974 			cycle_adj *= mult;
1975 			cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
1976 
1977 			adj += cycle_adj;
1978 		}
1979 	}
1980 
1981 	/* Return the calculated adjustment */
1982 	*pmd_adj = adj;
1983 
1984 	return 0;
1985 }
1986 
1987 /**
1988  * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
1989  * @hw: pointer to HW struct
1990  * @link_spd: The Link speed to calculate for
1991  *
1992  * Determine the fixed Rx latency for a given link speed.
1993  */
1994 static u64
1995 ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1996 {
1997 	u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1998 
1999 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
2000 	clk_incval = ice_ptp_read_src_incval(hw);
2001 
2002 	/* Calculate TUs per second */
2003 	tu_per_sec = cur_freq * clk_incval;
2004 
2005 	/* Calculate number of TUs to add for the fixed Rx latency. Since the
2006 	 * latency measurement is in 1/100th of a nanosecond, we need to
2007 	 * multiply by tu_per_sec and then divide by 1e11. This calculation
2008 	 * overflows 64 bit integer arithmetic, so break it up into two
2009 	 * divisions by 1e4 first then by 1e7.
2010 	 */
2011 	fixed_offset = div_u64(tu_per_sec, 10000);
2012 	fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
2013 	fixed_offset = div_u64(fixed_offset, 10000000);
2014 
2015 	return fixed_offset;
2016 }
2017 
2018 /**
2019  * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
2020  * @hw: pointer to the HW struct
2021  * @port: the PHY port to configure
2022  *
2023  * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
2024  * adjust Rx timestamps by. This combines calculations from the Vernier offset
2025  * measurements taken in hardware with some data about known fixed delay as
2026  * well as adjusting for multi-lane alignment delay.
2027  *
2028  * This function must be called only after the offset registers are valid,
2029  * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
2030  * has measured the offset.
2031  *
2032  * To avoid overflow, when calculating the offset based on the known static
2033  * latency values, we use measurements in 1/100th of a nanosecond, and divide
2034  * the TUs per second up front. This avoids overflow while allowing
2035  * calculation of the adjustment using integer arithmetic.
2036  */
2037 static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
2038 {
2039 	enum ice_ptp_link_spd link_spd;
2040 	enum ice_ptp_fec_mode fec_mode;
2041 	u64 total_offset, pmd, val;
2042 	int err;
2043 
2044 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
2045 	if (err)
2046 		return err;
2047 
2048 	total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
2049 
2050 	/* Read the first Vernier offset from the PHY register and add it to
2051 	 * the total offset.
2052 	 */
2053 	err = ice_read_64b_phy_reg_e822(hw, port,
2054 					P_REG_PAR_PCS_RX_OFFSET_L,
2055 					&val);
2056 	if (err)
2057 		return err;
2058 
2059 	total_offset += val;
2060 
2061 	/* For Rx, all multi-lane link speeds include a second Vernier
2062 	 * calibration, because the lanes might not be aligned.
2063 	 */
2064 	if (link_spd == ICE_PTP_LNK_SPD_40G ||
2065 	    link_spd == ICE_PTP_LNK_SPD_50G ||
2066 	    link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2067 	    link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2068 		err = ice_read_64b_phy_reg_e822(hw, port,
2069 						P_REG_PAR_RX_TIME_L,
2070 						&val);
2071 		if (err)
2072 			return err;
2073 
2074 		total_offset += val;
2075 	}
2076 
2077 	/* In addition, Rx must account for the PMD alignment */
2078 	err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
2079 	if (err)
2080 		return err;
2081 
2082 	/* For RS-FEC, this adjustment adds delay, but for other modes, it
2083 	 * subtracts delay.
2084 	 */
2085 	if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
2086 		total_offset += pmd;
2087 	else
2088 		total_offset -= pmd;
2089 
2090 	/* Now that the total offset has been calculated, program it to the
2091 	 * PHY and indicate that the Rx offset is ready. After this,
2092 	 * timestamps will be enabled.
2093 	 */
2094 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2095 					 total_offset);
2096 	if (err)
2097 		return err;
2098 
2099 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
2100 	if (err)
2101 		return err;
2102 
2103 	return 0;
2104 }
2105 
2106 /**
2107  * ice_phy_cfg_fixed_rx_offset_e822 - Configure fixed Rx offset for bypass mode
2108  * @hw: pointer to the HW struct
2109  * @port: the PHY port to configure
2110  *
2111  * Calculate and program the fixed Rx offset, and indicate that the offset is
2112  * ready. This can be used when operating in bypass mode.
2113  */
2114 static int
2115 ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw *hw, u8 port)
2116 {
2117 	enum ice_ptp_link_spd link_spd;
2118 	enum ice_ptp_fec_mode fec_mode;
2119 	u64 total_offset;
2120 	int err;
2121 
2122 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
2123 	if (err)
2124 		return err;
2125 
2126 	total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
2127 
2128 	/* Program the fixed Rx offset into the P_REG_TOTAL_RX_OFFSET_L
2129 	 * register, then indicate that the Rx offset is ready. After this,
2130 	 * timestamps will be enabled.
2131 	 *
2132 	 * Note that this skips including the more precise offsets generated
2133 	 * by Vernier calibration.
2134 	 */
2135 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2136 					 total_offset);
2137 	if (err)
2138 		return err;
2139 
2140 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
2141 	if (err)
2142 		return err;
2143 
2144 	return 0;
2145 }
2146 
2147 /**
2148  * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
2149  * @hw: pointer to the HW struct
2150  * @port: the PHY port to read
2151  * @phy_time: on return, the 64bit PHY timer value
2152  * @phc_time: on return, the lower 64bits of PHC time
2153  *
2154  * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
2155  * timer values.
2156  */
2157 static int
2158 ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
2159 			       u64 *phc_time)
2160 {
2161 	u64 tx_time, rx_time;
2162 	u32 zo, lo;
2163 	u8 tmr_idx;
2164 	int err;
2165 
2166 	tmr_idx = ice_get_ptp_src_clock_index(hw);
2167 
2168 	/* Prepare the PHC timer for a READ_TIME capture command */
2169 	ice_ptp_src_cmd(hw, READ_TIME);
2170 
2171 	/* Prepare the PHY timer for a READ_TIME capture command */
2172 	err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
2173 	if (err)
2174 		return err;
2175 
2176 	/* Issue the sync to start the READ_TIME capture */
2177 	ice_ptp_exec_tmr_cmd(hw);
2178 
2179 	/* Read the captured PHC time from the shadow time registers */
2180 	zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2181 	lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2182 	*phc_time = (u64)lo << 32 | zo;
2183 
2184 	/* Read the captured PHY time from the PHY shadow registers */
2185 	err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
2186 	if (err)
2187 		return err;
2188 
2189 	/* If the PHY Tx and Rx timers don't match, log a warning message.
2190 	 * Note that this should not happen in normal circumstances since the
2191 	 * driver always programs them together.
2192 	 */
2193 	if (tx_time != rx_time)
2194 		dev_warn(ice_hw_to_dev(hw),
2195 			 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2196 			 port, (unsigned long long)tx_time,
2197 			 (unsigned long long)rx_time);
2198 
2199 	*phy_time = tx_time;
2200 
2201 	return 0;
2202 }
2203 
2204 /**
2205  * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
2206  * @hw: pointer to the HW struct
2207  * @port: the PHY port to synchronize
2208  *
2209  * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2210  * This is done by issuing a READ_TIME command which triggers a simultaneous
2211  * read of the PHY timer and PHC timer. Then we use the difference to
2212  * calculate an appropriate 2s complement addition to add to the PHY timer in
2213  * order to ensure it reads the same value as the primary PHC timer.
2214  */
2215 static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
2216 {
2217 	u64 phc_time, phy_time, difference;
2218 	int err;
2219 
2220 	if (!ice_ptp_lock(hw)) {
2221 		ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2222 		return -EBUSY;
2223 	}
2224 
2225 	err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2226 	if (err)
2227 		goto err_unlock;
2228 
2229 	/* Calculate the amount required to add to the port time in order for
2230 	 * it to match the PHC time.
2231 	 *
2232 	 * Note that the port adjustment is done using 2s complement
2233 	 * arithmetic. This is convenient since it means that we can simply
2234 	 * calculate the difference between the PHC time and the port time,
2235 	 * and it will be interpreted correctly.
2236 	 */
2237 	difference = phc_time - phy_time;
2238 
2239 	err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
2240 	if (err)
2241 		goto err_unlock;
2242 
2243 	err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
2244 	if (err)
2245 		goto err_unlock;
2246 
2247 	/* Issue the sync to activate the time adjustment */
2248 	ice_ptp_exec_tmr_cmd(hw);
2249 
2250 	/* Re-capture the timer values to flush the command registers and
2251 	 * verify that the time was properly adjusted.
2252 	 */
2253 	err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2254 	if (err)
2255 		goto err_unlock;
2256 
2257 	dev_info(ice_hw_to_dev(hw),
2258 		 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2259 		 port, (unsigned long long)phy_time,
2260 		 (unsigned long long)phc_time);
2261 
2262 	ice_ptp_unlock(hw);
2263 
2264 	return 0;
2265 
2266 err_unlock:
2267 	ice_ptp_unlock(hw);
2268 	return err;
2269 }
2270 
2271 /**
2272  * ice_stop_phy_timer_e822 - Stop the PHY clock timer
2273  * @hw: pointer to the HW struct
2274  * @port: the PHY port to stop
2275  * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
2276  *
2277  * Stop the clock of a PHY port. This must be done as part of the flow to
2278  * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2279  * initialized or when link speed changes.
2280  */
2281 int
2282 ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
2283 {
2284 	int err;
2285 	u32 val;
2286 
2287 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
2288 	if (err)
2289 		return err;
2290 
2291 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
2292 	if (err)
2293 		return err;
2294 
2295 	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2296 	if (err)
2297 		return err;
2298 
2299 	val &= ~P_REG_PS_START_M;
2300 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2301 	if (err)
2302 		return err;
2303 
2304 	val &= ~P_REG_PS_ENA_CLK_M;
2305 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2306 	if (err)
2307 		return err;
2308 
2309 	if (soft_reset) {
2310 		val |= P_REG_PS_SFT_RESET_M;
2311 		err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2312 		if (err)
2313 			return err;
2314 	}
2315 
2316 	ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2317 
2318 	return 0;
2319 }
2320 
2321 /**
2322  * ice_start_phy_timer_e822 - Start the PHY clock timer
2323  * @hw: pointer to the HW struct
2324  * @port: the PHY port to start
2325  * @bypass: if true, start the PHY in bypass mode
2326  *
2327  * Start the clock of a PHY port. This must be done as part of the flow to
2328  * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2329  * initialized or when link speed changes.
2330  *
2331  * Bypass mode enables timestamps immediately without waiting for Vernier
2332  * calibration to complete. Hardware will still continue taking Vernier
2333  * measurements on Tx or Rx of packets, but they will not be applied to
2334  * timestamps. Use ice_phy_exit_bypass_e822 to exit bypass mode once hardware
2335  * has completed offset calculation.
2336  */
2337 int
2338 ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
2339 {
2340 	u32 lo, hi, val;
2341 	u64 incval;
2342 	u8 tmr_idx;
2343 	int err;
2344 
2345 	tmr_idx = ice_get_ptp_src_clock_index(hw);
2346 
2347 	err = ice_stop_phy_timer_e822(hw, port, false);
2348 	if (err)
2349 		return err;
2350 
2351 	ice_phy_cfg_lane_e822(hw, port);
2352 
2353 	err = ice_phy_cfg_uix_e822(hw, port);
2354 	if (err)
2355 		return err;
2356 
2357 	err = ice_phy_cfg_parpcs_e822(hw, port);
2358 	if (err)
2359 		return err;
2360 
2361 	lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2362 	hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2363 	incval = (u64)hi << 32 | lo;
2364 
2365 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
2366 	if (err)
2367 		return err;
2368 
2369 	err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2370 	if (err)
2371 		return err;
2372 
2373 	ice_ptp_exec_tmr_cmd(hw);
2374 
2375 	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2376 	if (err)
2377 		return err;
2378 
2379 	val |= P_REG_PS_SFT_RESET_M;
2380 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2381 	if (err)
2382 		return err;
2383 
2384 	val |= P_REG_PS_START_M;
2385 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2386 	if (err)
2387 		return err;
2388 
2389 	val &= ~P_REG_PS_SFT_RESET_M;
2390 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2391 	if (err)
2392 		return err;
2393 
2394 	err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2395 	if (err)
2396 		return err;
2397 
2398 	ice_ptp_exec_tmr_cmd(hw);
2399 
2400 	val |= P_REG_PS_ENA_CLK_M;
2401 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2402 	if (err)
2403 		return err;
2404 
2405 	val |= P_REG_PS_LOAD_OFFSET_M;
2406 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2407 	if (err)
2408 		return err;
2409 
2410 	ice_ptp_exec_tmr_cmd(hw);
2411 
2412 	err = ice_sync_phy_timer_e822(hw, port);
2413 	if (err)
2414 		return err;
2415 
2416 	if (bypass) {
2417 		val |= P_REG_PS_BYPASS_MODE_M;
2418 		/* Enter BYPASS mode, enabling timestamps immediately. */
2419 		err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2420 		if (err)
2421 			return err;
2422 
2423 		/* Program the fixed Tx offset */
2424 		err = ice_phy_cfg_fixed_tx_offset_e822(hw, port);
2425 		if (err)
2426 			return err;
2427 
2428 		/* Program the fixed Rx offset */
2429 		err = ice_phy_cfg_fixed_rx_offset_e822(hw, port);
2430 		if (err)
2431 			return err;
2432 	}
2433 
2434 	ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2435 
2436 	return 0;
2437 }
2438 
2439 /**
2440  * ice_phy_exit_bypass_e822 - Exit bypass mode, after vernier calculations
2441  * @hw: pointer to the HW struct
2442  * @port: the PHY port to configure
2443  *
2444  * After hardware finishes vernier calculations for the Tx and Rx offset, this
2445  * function can be used to exit bypass mode by updating the total Tx and Rx
2446  * offsets, and then disabling bypass. This will enable hardware to include
2447  * the more precise offset calibrations, increasing precision of the generated
2448  * timestamps.
2449  *
2450  * This cannot be done until hardware has measured the offsets, which requires
2451  * waiting until at least one packet has been sent and received by the device.
2452  */
2453 int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port)
2454 {
2455 	int err;
2456 	u32 val;
2457 
2458 	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val);
2459 	if (err) {
2460 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
2461 			  port, err);
2462 		return err;
2463 	}
2464 
2465 	if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
2466 		ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n",
2467 			  port);
2468 		return -EBUSY;
2469 	}
2470 
2471 	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val);
2472 	if (err) {
2473 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
2474 			  port, err);
2475 		return err;
2476 	}
2477 
2478 	if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
2479 		ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n",
2480 			  port);
2481 		return -EBUSY;
2482 	}
2483 
2484 	err = ice_phy_cfg_tx_offset_e822(hw, port);
2485 	if (err) {
2486 		ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n",
2487 			  port, err);
2488 		return err;
2489 	}
2490 
2491 	err = ice_phy_cfg_rx_offset_e822(hw, port);
2492 	if (err) {
2493 		ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n",
2494 			  port, err);
2495 		return err;
2496 	}
2497 
2498 	/* Exit bypass mode now that the offset has been updated */
2499 	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2500 	if (err) {
2501 		ice_debug(hw, ICE_DBG_PTP, "Failed to read P_REG_PS for port %u, err %d\n",
2502 			  port, err);
2503 		return err;
2504 	}
2505 
2506 	if (!(val & P_REG_PS_BYPASS_MODE_M))
2507 		ice_debug(hw, ICE_DBG_PTP, "Port %u not in bypass mode\n",
2508 			  port);
2509 
2510 	val &= ~P_REG_PS_BYPASS_MODE_M;
2511 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2512 	if (err) {
2513 		ice_debug(hw, ICE_DBG_PTP, "Failed to disable bypass for port %u, err %d\n",
2514 			  port, err);
2515 		return err;
2516 	}
2517 
2518 	dev_info(ice_hw_to_dev(hw), "Exiting bypass mode on PHY port %u\n",
2519 		 port);
2520 
2521 	return 0;
2522 }
2523 
2524 /* E810 functions
2525  *
2526  * The following functions operate on the E810 series devices which use
2527  * a separate external PHY.
2528  */
2529 
2530 /**
2531  * ice_read_phy_reg_e810 - Read register from external PHY on E810
2532  * @hw: pointer to the HW struct
2533  * @addr: the address to read from
2534  * @val: On return, the value read from the PHY
2535  *
2536  * Read a register from the external PHY on the E810 device.
2537  */
2538 static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
2539 {
2540 	struct ice_sbq_msg_input msg = {0};
2541 	int err;
2542 
2543 	msg.msg_addr_low = lower_16_bits(addr);
2544 	msg.msg_addr_high = upper_16_bits(addr);
2545 	msg.opcode = ice_sbq_msg_rd;
2546 	msg.dest_dev = rmn_0;
2547 
2548 	err = ice_sbq_rw_reg(hw, &msg);
2549 	if (err) {
2550 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2551 			  err);
2552 		return err;
2553 	}
2554 
2555 	*val = msg.data;
2556 
2557 	return 0;
2558 }
2559 
2560 /**
2561  * ice_write_phy_reg_e810 - Write register on external PHY on E810
2562  * @hw: pointer to the HW struct
2563  * @addr: the address to writem to
2564  * @val: the value to write to the PHY
2565  *
2566  * Write a value to a register of the external PHY on the E810 device.
2567  */
2568 static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
2569 {
2570 	struct ice_sbq_msg_input msg = {0};
2571 	int err;
2572 
2573 	msg.msg_addr_low = lower_16_bits(addr);
2574 	msg.msg_addr_high = upper_16_bits(addr);
2575 	msg.opcode = ice_sbq_msg_wr;
2576 	msg.dest_dev = rmn_0;
2577 	msg.data = val;
2578 
2579 	err = ice_sbq_rw_reg(hw, &msg);
2580 	if (err) {
2581 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2582 			  err);
2583 		return err;
2584 	}
2585 
2586 	return 0;
2587 }
2588 
2589 /**
2590  * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
2591  * @hw: pointer to the HW struct
2592  * @lport: the lport to read from
2593  * @idx: the timestamp index to read
2594  * @tstamp: on return, the 40bit timestamp value
2595  *
2596  * Read a 40bit timestamp value out of the timestamp block of the external PHY
2597  * on the E810 device.
2598  */
2599 static int
2600 ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
2601 {
2602 	u32 lo_addr, hi_addr, lo, hi;
2603 	int err;
2604 
2605 	lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2606 	hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2607 
2608 	err = ice_read_phy_reg_e810(hw, lo_addr, &lo);
2609 	if (err) {
2610 		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
2611 			  err);
2612 		return err;
2613 	}
2614 
2615 	err = ice_read_phy_reg_e810(hw, hi_addr, &hi);
2616 	if (err) {
2617 		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
2618 			  err);
2619 		return err;
2620 	}
2621 
2622 	/* For E810 devices, the timestamp is reported with the lower 32 bits
2623 	 * in the low register, and the upper 8 bits in the high register.
2624 	 */
2625 	*tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
2626 
2627 	return 0;
2628 }
2629 
2630 /**
2631  * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
2632  * @hw: pointer to the HW struct
2633  * @lport: the lport to read from
2634  * @idx: the timestamp index to reset
2635  *
2636  * Clear a timestamp, resetting its valid bit, from the timestamp block of the
2637  * external PHY on the E810 device.
2638  */
2639 static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
2640 {
2641 	u32 lo_addr, hi_addr;
2642 	int err;
2643 
2644 	lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2645 	hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2646 
2647 	err = ice_write_phy_reg_e810(hw, lo_addr, 0);
2648 	if (err) {
2649 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
2650 			  err);
2651 		return err;
2652 	}
2653 
2654 	err = ice_write_phy_reg_e810(hw, hi_addr, 0);
2655 	if (err) {
2656 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
2657 			  err);
2658 		return err;
2659 	}
2660 
2661 	return 0;
2662 }
2663 
2664 /**
2665  * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
2666  * @hw: pointer to HW struct
2667  *
2668  * Enable the timesync PTP functionality for the external PHY connected to
2669  * this function.
2670  */
2671 int ice_ptp_init_phy_e810(struct ice_hw *hw)
2672 {
2673 	u8 tmr_idx;
2674 	int err;
2675 
2676 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2677 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
2678 				     GLTSYN_ENA_TSYN_ENA_M);
2679 	if (err)
2680 		ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
2681 			  err);
2682 
2683 	return err;
2684 }
2685 
2686 /**
2687  * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
2688  * @hw: pointer to HW struct
2689  *
2690  * Perform E810-specific PTP hardware clock initialization steps.
2691  */
2692 static int ice_ptp_init_phc_e810(struct ice_hw *hw)
2693 {
2694 	/* Ensure synchronization delay is zero */
2695 	wr32(hw, GLTSYN_SYNC_DLAY, 0);
2696 
2697 	/* Initialize the PHY */
2698 	return ice_ptp_init_phy_e810(hw);
2699 }
2700 
2701 /**
2702  * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
2703  * @hw: Board private structure
2704  * @time: Time to initialize the PHY port clock to
2705  *
2706  * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
2707  * initial clock time. The time will not actually be programmed until the
2708  * driver issues an INIT_TIME command.
2709  *
2710  * The time value is the upper 32 bits of the PHY timer, usually in units of
2711  * nominal nanoseconds.
2712  */
2713 static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
2714 {
2715 	u8 tmr_idx;
2716 	int err;
2717 
2718 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2719 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
2720 	if (err) {
2721 		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
2722 			  err);
2723 		return err;
2724 	}
2725 
2726 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
2727 	if (err) {
2728 		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
2729 			  err);
2730 		return err;
2731 	}
2732 
2733 	return 0;
2734 }
2735 
2736 /**
2737  * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
2738  * @hw: pointer to HW struct
2739  * @adj: adjustment value to program
2740  *
2741  * Prepare the PHY port for an atomic adjustment by programming the PHY
2742  * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
2743  * is completed by issuing an ADJ_TIME sync command.
2744  *
2745  * The adjustment value only contains the portion used for the upper 32bits of
2746  * the PHY timer, usually in units of nominal nanoseconds. Negative
2747  * adjustments are supported using 2s complement arithmetic.
2748  */
2749 static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
2750 {
2751 	u8 tmr_idx;
2752 	int err;
2753 
2754 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2755 
2756 	/* Adjustments are represented as signed 2's complement values in
2757 	 * nanoseconds. Sub-nanosecond adjustment is not supported.
2758 	 */
2759 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
2760 	if (err) {
2761 		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
2762 			  err);
2763 		return err;
2764 	}
2765 
2766 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
2767 	if (err) {
2768 		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
2769 			  err);
2770 		return err;
2771 	}
2772 
2773 	return 0;
2774 }
2775 
2776 /**
2777  * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
2778  * @hw: pointer to HW struct
2779  * @incval: The new 40bit increment value to prepare
2780  *
2781  * Prepare the PHY port for a new increment value by programming the PHY
2782  * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
2783  * completed by issuing an INIT_INCVAL command.
2784  */
2785 static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
2786 {
2787 	u32 high, low;
2788 	u8 tmr_idx;
2789 	int err;
2790 
2791 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2792 	low = lower_32_bits(incval);
2793 	high = upper_32_bits(incval);
2794 
2795 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
2796 	if (err) {
2797 		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
2798 			  err);
2799 		return err;
2800 	}
2801 
2802 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
2803 	if (err) {
2804 		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
2805 			  err);
2806 		return err;
2807 	}
2808 
2809 	return 0;
2810 }
2811 
2812 /**
2813  * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
2814  * @hw: pointer to HW struct
2815  * @cmd: Command to be sent to the port
2816  *
2817  * Prepare the external PHYs connected to this device for a timer sync
2818  * command.
2819  */
2820 static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
2821 {
2822 	u32 cmd_val, val;
2823 	int err;
2824 
2825 	switch (cmd) {
2826 	case INIT_TIME:
2827 		cmd_val = GLTSYN_CMD_INIT_TIME;
2828 		break;
2829 	case INIT_INCVAL:
2830 		cmd_val = GLTSYN_CMD_INIT_INCVAL;
2831 		break;
2832 	case ADJ_TIME:
2833 		cmd_val = GLTSYN_CMD_ADJ_TIME;
2834 		break;
2835 	case READ_TIME:
2836 		cmd_val = GLTSYN_CMD_READ_TIME;
2837 		break;
2838 	case ADJ_TIME_AT_TIME:
2839 		cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
2840 		break;
2841 	}
2842 
2843 	/* Read, modify, write */
2844 	err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
2845 	if (err) {
2846 		ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
2847 		return err;
2848 	}
2849 
2850 	/* Modify necessary bits only and perform write */
2851 	val &= ~TS_CMD_MASK_E810;
2852 	val |= cmd_val;
2853 
2854 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
2855 	if (err) {
2856 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
2857 		return err;
2858 	}
2859 
2860 	return 0;
2861 }
2862 
2863 /* Device agnostic functions
2864  *
2865  * The following functions implement shared behavior common to both E822 and
2866  * E810 devices, possibly calling a device specific implementation where
2867  * necessary.
2868  */
2869 
2870 /**
2871  * ice_ptp_lock - Acquire PTP global semaphore register lock
2872  * @hw: pointer to the HW struct
2873  *
2874  * Acquire the global PTP hardware semaphore lock. Returns true if the lock
2875  * was acquired, false otherwise.
2876  *
2877  * The PFTSYN_SEM register sets the busy bit on read, returning the previous
2878  * value. If software sees the busy bit cleared, this means that this function
2879  * acquired the lock (and the busy bit is now set). If software sees the busy
2880  * bit set, it means that another function acquired the lock.
2881  *
2882  * Software must clear the busy bit with a write to release the lock for other
2883  * functions when done.
2884  */
2885 bool ice_ptp_lock(struct ice_hw *hw)
2886 {
2887 	u32 hw_lock;
2888 	int i;
2889 
2890 #define MAX_TRIES 5
2891 
2892 	for (i = 0; i < MAX_TRIES; i++) {
2893 		hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2894 		hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
2895 		if (!hw_lock)
2896 			break;
2897 
2898 		/* Somebody is holding the lock */
2899 		usleep_range(10000, 20000);
2900 	}
2901 
2902 	return !hw_lock;
2903 }
2904 
2905 /**
2906  * ice_ptp_unlock - Release PTP global semaphore register lock
2907  * @hw: pointer to the HW struct
2908  *
2909  * Release the global PTP hardware semaphore lock. This is done by writing to
2910  * the PFTSYN_SEM register.
2911  */
2912 void ice_ptp_unlock(struct ice_hw *hw)
2913 {
2914 	wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
2915 }
2916 
2917 /**
2918  * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
2919  * @hw: pointer to HW struct
2920  * @cmd: the command to issue
2921  *
2922  * Prepare the source timer and PHY timers and then trigger the requested
2923  * command. This causes the shadow registers previously written in preparation
2924  * for the command to be synchronously applied to both the source and PHY
2925  * timers.
2926  */
2927 static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
2928 {
2929 	int err;
2930 
2931 	/* First, prepare the source timer */
2932 	ice_ptp_src_cmd(hw, cmd);
2933 
2934 	/* Next, prepare the ports */
2935 	if (ice_is_e810(hw))
2936 		err = ice_ptp_port_cmd_e810(hw, cmd);
2937 	else
2938 		err = ice_ptp_port_cmd_e822(hw, cmd);
2939 	if (err) {
2940 		ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
2941 			  cmd, err);
2942 		return err;
2943 	}
2944 
2945 	/* Write the sync command register to drive both source and PHY timer
2946 	 * commands synchronously
2947 	 */
2948 	ice_ptp_exec_tmr_cmd(hw);
2949 
2950 	return 0;
2951 }
2952 
2953 /**
2954  * ice_ptp_init_time - Initialize device time to provided value
2955  * @hw: pointer to HW struct
2956  * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
2957  *
2958  * Initialize the device to the specified time provided. This requires a three
2959  * step process:
2960  *
2961  * 1) write the new init time to the source timer shadow registers
2962  * 2) write the new init time to the PHY timer shadow registers
2963  * 3) issue an init_time timer command to synchronously switch both the source
2964  *    and port timers to the new init time value at the next clock cycle.
2965  */
2966 int ice_ptp_init_time(struct ice_hw *hw, u64 time)
2967 {
2968 	u8 tmr_idx;
2969 	int err;
2970 
2971 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2972 
2973 	/* Source timers */
2974 	wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
2975 	wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
2976 	wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
2977 
2978 	/* PHY timers */
2979 	/* Fill Rx and Tx ports and send msg to PHY */
2980 	if (ice_is_e810(hw))
2981 		err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
2982 	else
2983 		err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
2984 	if (err)
2985 		return err;
2986 
2987 	return ice_ptp_tmr_cmd(hw, INIT_TIME);
2988 }
2989 
2990 /**
2991  * ice_ptp_write_incval - Program PHC with new increment value
2992  * @hw: pointer to HW struct
2993  * @incval: Source timer increment value per clock cycle
2994  *
2995  * Program the PHC with a new increment value. This requires a three-step
2996  * process:
2997  *
2998  * 1) Write the increment value to the source timer shadow registers
2999  * 2) Write the increment value to the PHY timer shadow registers
3000  * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
3001  *    source and port timers to the new increment value at the next clock
3002  *    cycle.
3003  */
3004 int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
3005 {
3006 	u8 tmr_idx;
3007 	int err;
3008 
3009 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3010 
3011 	/* Shadow Adjust */
3012 	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
3013 	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
3014 
3015 	if (ice_is_e810(hw))
3016 		err = ice_ptp_prep_phy_incval_e810(hw, incval);
3017 	else
3018 		err = ice_ptp_prep_phy_incval_e822(hw, incval);
3019 	if (err)
3020 		return err;
3021 
3022 	return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
3023 }
3024 
3025 /**
3026  * ice_ptp_write_incval_locked - Program new incval while holding semaphore
3027  * @hw: pointer to HW struct
3028  * @incval: Source timer increment value per clock cycle
3029  *
3030  * Program a new PHC incval while holding the PTP semaphore.
3031  */
3032 int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
3033 {
3034 	int err;
3035 
3036 	if (!ice_ptp_lock(hw))
3037 		return -EBUSY;
3038 
3039 	err = ice_ptp_write_incval(hw, incval);
3040 
3041 	ice_ptp_unlock(hw);
3042 
3043 	return err;
3044 }
3045 
3046 /**
3047  * ice_ptp_adj_clock - Adjust PHC clock time atomically
3048  * @hw: pointer to HW struct
3049  * @adj: Adjustment in nanoseconds
3050  *
3051  * Perform an atomic adjustment of the PHC time by the specified number of
3052  * nanoseconds. This requires a three-step process:
3053  *
3054  * 1) Write the adjustment to the source timer shadow registers
3055  * 2) Write the adjustment to the PHY timer shadow registers
3056  * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
3057  *    both the source and port timers at the next clock cycle.
3058  */
3059 int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
3060 {
3061 	u8 tmr_idx;
3062 	int err;
3063 
3064 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3065 
3066 	/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
3067 	 * For an ADJ_TIME command, this set of registers represents the value
3068 	 * to add to the clock time. It supports subtraction by interpreting
3069 	 * the value as a 2's complement integer.
3070 	 */
3071 	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
3072 	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
3073 
3074 	if (ice_is_e810(hw))
3075 		err = ice_ptp_prep_phy_adj_e810(hw, adj);
3076 	else
3077 		err = ice_ptp_prep_phy_adj_e822(hw, adj);
3078 	if (err)
3079 		return err;
3080 
3081 	return ice_ptp_tmr_cmd(hw, ADJ_TIME);
3082 }
3083 
3084 /**
3085  * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
3086  * @hw: pointer to the HW struct
3087  * @block: the block to read from
3088  * @idx: the timestamp index to read
3089  * @tstamp: on return, the 40bit timestamp value
3090  *
3091  * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
3092  * the block is the quad to read from. For E810 devices, the block is the
3093  * logical port to read from.
3094  */
3095 int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
3096 {
3097 	if (ice_is_e810(hw))
3098 		return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
3099 	else
3100 		return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
3101 }
3102 
3103 /**
3104  * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
3105  * @hw: pointer to the HW struct
3106  * @block: the block to read from
3107  * @idx: the timestamp index to reset
3108  *
3109  * Clear a timestamp, resetting its valid bit, from the timestamp block. For
3110  * E822 devices, the block is the quad to clear from. For E810 devices, the
3111  * block is the logical port to clear from.
3112  */
3113 int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
3114 {
3115 	if (ice_is_e810(hw))
3116 		return ice_clear_phy_tstamp_e810(hw, block, idx);
3117 	else
3118 		return ice_clear_phy_tstamp_e822(hw, block, idx);
3119 }
3120 
3121 /* E810T SMA functions
3122  *
3123  * The following functions operate specifically on E810T hardware and are used
3124  * to access the extended GPIOs available.
3125  */
3126 
3127 /**
3128  * ice_get_pca9575_handle
3129  * @hw: pointer to the hw struct
3130  * @pca9575_handle: GPIO controller's handle
3131  *
3132  * Find and return the GPIO controller's handle in the netlist.
3133  * When found - the value will be cached in the hw structure and following calls
3134  * will return cached value
3135  */
3136 static int
3137 ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
3138 {
3139 	struct ice_aqc_get_link_topo *cmd;
3140 	struct ice_aq_desc desc;
3141 	int status;
3142 	u8 idx;
3143 
3144 	/* If handle was read previously return cached value */
3145 	if (hw->io_expander_handle) {
3146 		*pca9575_handle = hw->io_expander_handle;
3147 		return 0;
3148 	}
3149 
3150 	/* If handle was not detected read it from the netlist */
3151 	cmd = &desc.params.get_link_topo;
3152 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3153 
3154 	/* Set node type to GPIO controller */
3155 	cmd->addr.topo_params.node_type_ctx =
3156 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
3157 		 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
3158 
3159 #define SW_PCA9575_SFP_TOPO_IDX		2
3160 #define SW_PCA9575_QSFP_TOPO_IDX	1
3161 
3162 	/* Check if the SW IO expander controlling SMA exists in the netlist. */
3163 	if (hw->device_id == ICE_DEV_ID_E810C_SFP)
3164 		idx = SW_PCA9575_SFP_TOPO_IDX;
3165 	else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
3166 		idx = SW_PCA9575_QSFP_TOPO_IDX;
3167 	else
3168 		return -EOPNOTSUPP;
3169 
3170 	cmd->addr.topo_params.index = idx;
3171 
3172 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3173 	if (status)
3174 		return -EOPNOTSUPP;
3175 
3176 	/* Verify if we found the right IO expander type */
3177 	if (desc.params.get_link_topo.node_part_num !=
3178 		ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
3179 		return -EOPNOTSUPP;
3180 
3181 	/* If present save the handle and return it */
3182 	hw->io_expander_handle =
3183 		le16_to_cpu(desc.params.get_link_topo.addr.handle);
3184 	*pca9575_handle = hw->io_expander_handle;
3185 
3186 	return 0;
3187 }
3188 
3189 /**
3190  * ice_read_sma_ctrl_e810t
3191  * @hw: pointer to the hw struct
3192  * @data: pointer to data to be read from the GPIO controller
3193  *
3194  * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
3195  * PCA9575 expander, so only bits 3-7 in data are valid.
3196  */
3197 int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
3198 {
3199 	int status;
3200 	u16 handle;
3201 	u8 i;
3202 
3203 	status = ice_get_pca9575_handle(hw, &handle);
3204 	if (status)
3205 		return status;
3206 
3207 	*data = 0;
3208 
3209 	for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3210 		bool pin;
3211 
3212 		status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3213 					 &pin, NULL);
3214 		if (status)
3215 			break;
3216 		*data |= (u8)(!pin) << i;
3217 	}
3218 
3219 	return status;
3220 }
3221 
3222 /**
3223  * ice_write_sma_ctrl_e810t
3224  * @hw: pointer to the hw struct
3225  * @data: data to be written to the GPIO controller
3226  *
3227  * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
3228  * of the PCA9575 expander, so only bits 3-7 in data are valid.
3229  */
3230 int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
3231 {
3232 	int status;
3233 	u16 handle;
3234 	u8 i;
3235 
3236 	status = ice_get_pca9575_handle(hw, &handle);
3237 	if (status)
3238 		return status;
3239 
3240 	for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3241 		bool pin;
3242 
3243 		pin = !(data & (1 << i));
3244 		status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3245 					 pin, NULL);
3246 		if (status)
3247 			break;
3248 	}
3249 
3250 	return status;
3251 }
3252 
3253 /**
3254  * ice_read_pca9575_reg_e810t
3255  * @hw: pointer to the hw struct
3256  * @offset: GPIO controller register offset
3257  * @data: pointer to data to be read from the GPIO controller
3258  *
3259  * Read the register from the GPIO controller
3260  */
3261 int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
3262 {
3263 	struct ice_aqc_link_topo_addr link_topo;
3264 	__le16 addr;
3265 	u16 handle;
3266 	int err;
3267 
3268 	memset(&link_topo, 0, sizeof(link_topo));
3269 
3270 	err = ice_get_pca9575_handle(hw, &handle);
3271 	if (err)
3272 		return err;
3273 
3274 	link_topo.handle = cpu_to_le16(handle);
3275 	link_topo.topo_params.node_type_ctx =
3276 		FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
3277 			   ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
3278 
3279 	addr = cpu_to_le16((u16)offset);
3280 
3281 	return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
3282 }
3283 
3284 /**
3285  * ice_is_pca9575_present
3286  * @hw: pointer to the hw struct
3287  *
3288  * Check if the SW IO expander is present in the netlist
3289  */
3290 bool ice_is_pca9575_present(struct ice_hw *hw)
3291 {
3292 	u16 handle = 0;
3293 	int status;
3294 
3295 	if (!ice_is_e810t(hw))
3296 		return false;
3297 
3298 	status = ice_get_pca9575_handle(hw, &handle);
3299 
3300 	return !status && handle;
3301 }
3302 
3303 /**
3304  * ice_ptp_init_phc - Initialize PTP hardware clock
3305  * @hw: pointer to the HW struct
3306  *
3307  * Perform the steps required to initialize the PTP hardware clock.
3308  */
3309 int ice_ptp_init_phc(struct ice_hw *hw)
3310 {
3311 	u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3312 
3313 	/* Enable source clocks */
3314 	wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
3315 
3316 	/* Clear event err indications for auxiliary pins */
3317 	(void)rd32(hw, GLTSYN_STAT(src_idx));
3318 
3319 	if (ice_is_e810(hw))
3320 		return ice_ptp_init_phc_e810(hw);
3321 	else
3322 		return ice_ptp_init_phc_e822(hw);
3323 }
3324