xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c (revision cf21f328fcafacf4f96e7a30ef9dceede1076378)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  *
6  * The Sparx5 Chip Register Model can be browsed at this location:
7  * https://github.com/microchip-ung/sparx-5_reginfo
8  */
9 #include <linux/ptp_classify.h>
10 
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13 
14 #define SPARX5_MAX_PTP_ID	512
15 
16 #define TOD_ACC_PIN		0x4
17 
18 enum {
19 	PTP_PIN_ACTION_IDLE = 0,
20 	PTP_PIN_ACTION_LOAD,
21 	PTP_PIN_ACTION_SAVE,
22 	PTP_PIN_ACTION_CLOCK,
23 	PTP_PIN_ACTION_DELTA,
24 	PTP_PIN_ACTION_TOD
25 };
26 
27 static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
28 {
29 	/* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625)
30 	 * 1.99609375000(500), 3.99218750000(250) as reference
31 	 * The value is calculated as following:
32 	 * (1/1000000)/((2^-59)/X)
33 	 */
34 
35 	u64 res = 0;
36 
37 	switch (sparx5->coreclock) {
38 	case SPX5_CORE_CLOCK_250MHZ:
39 		res = 2301339409586;
40 		break;
41 	case SPX5_CORE_CLOCK_500MHZ:
42 		res = 1150669704793;
43 		break;
44 	case SPX5_CORE_CLOCK_625MHZ:
45 		res =  920535763834;
46 		break;
47 	default:
48 		WARN(1, "Invalid core clock");
49 		break;
50 	}
51 
52 	return res;
53 }
54 
55 static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
56 {
57 	u64 res = 0;
58 
59 	switch (sparx5->coreclock) {
60 	case SPX5_CORE_CLOCK_250MHZ:
61 		res = 0x1FF0000000000000;
62 		break;
63 	case SPX5_CORE_CLOCK_500MHZ:
64 		res = 0x0FF8000000000000;
65 		break;
66 	case SPX5_CORE_CLOCK_625MHZ:
67 		res = 0x0CC6666666666666;
68 		break;
69 	default:
70 		WARN(1, "Invalid core clock");
71 		break;
72 	}
73 
74 	return res;
75 }
76 
77 int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
78 {
79 	struct sparx5 *sparx5 = port->sparx5;
80 	struct hwtstamp_config cfg;
81 	struct sparx5_phc *phc;
82 
83 	/* For now don't allow to run ptp on ports that are part of a bridge,
84 	 * because in case of transparent clock the HW will still forward the
85 	 * frames, so there would be duplicate frames
86 	 */
87 
88 	if (test_bit(port->portno, sparx5->bridge_mask))
89 		return -EINVAL;
90 
91 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
92 		return -EFAULT;
93 
94 	switch (cfg.tx_type) {
95 	case HWTSTAMP_TX_ON:
96 		port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
97 		break;
98 	case HWTSTAMP_TX_ONESTEP_SYNC:
99 		port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
100 		break;
101 	case HWTSTAMP_TX_OFF:
102 		port->ptp_cmd = IFH_REW_OP_NOOP;
103 		break;
104 	default:
105 		return -ERANGE;
106 	}
107 
108 	switch (cfg.rx_filter) {
109 	case HWTSTAMP_FILTER_NONE:
110 		break;
111 	case HWTSTAMP_FILTER_ALL:
112 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
113 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
114 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
115 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
116 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
117 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
118 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
119 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
120 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
121 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
122 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
123 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
124 	case HWTSTAMP_FILTER_NTP_ALL:
125 		cfg.rx_filter = HWTSTAMP_FILTER_ALL;
126 		break;
127 	default:
128 		return -ERANGE;
129 	}
130 
131 	/* Commit back the result & save it */
132 	mutex_lock(&sparx5->ptp_lock);
133 	phc = &sparx5->phc[SPARX5_PHC_PORT];
134 	memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
135 	mutex_unlock(&sparx5->ptp_lock);
136 
137 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
138 }
139 
140 int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr)
141 {
142 	struct sparx5 *sparx5 = port->sparx5;
143 	struct sparx5_phc *phc;
144 
145 	phc = &sparx5->phc[SPARX5_PHC_PORT];
146 	return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
147 			    sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
148 }
149 
150 static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb,
151 				u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset)
152 {
153 	struct ptp_header *header;
154 	u8 msgtype;
155 	int type;
156 
157 	if (port->ptp_cmd == IFH_REW_OP_NOOP) {
158 		*rew_op = IFH_REW_OP_NOOP;
159 		*pdu_type = IFH_PDU_TYPE_NONE;
160 		*pdu_w16_offset = 0;
161 		return;
162 	}
163 
164 	type = ptp_classify_raw(skb);
165 	if (type == PTP_CLASS_NONE) {
166 		*rew_op = IFH_REW_OP_NOOP;
167 		*pdu_type = IFH_PDU_TYPE_NONE;
168 		*pdu_w16_offset = 0;
169 		return;
170 	}
171 
172 	header = ptp_parse_header(skb, type);
173 	if (!header) {
174 		*rew_op = IFH_REW_OP_NOOP;
175 		*pdu_type = IFH_PDU_TYPE_NONE;
176 		*pdu_w16_offset = 0;
177 		return;
178 	}
179 
180 	*pdu_w16_offset = 7;
181 	if (type & PTP_CLASS_L2)
182 		*pdu_type = IFH_PDU_TYPE_PTP;
183 	if (type & PTP_CLASS_IPV4)
184 		*pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP;
185 	if (type & PTP_CLASS_IPV6)
186 		*pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP;
187 
188 	if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
189 		*rew_op = IFH_REW_OP_TWO_STEP_PTP;
190 		return;
191 	}
192 
193 	/* If it is sync and run 1 step then set the correct operation,
194 	 * otherwise run as 2 step
195 	 */
196 	msgtype = ptp_get_msgtype(header, type);
197 	if ((msgtype & 0xf) == 0) {
198 		*rew_op = IFH_REW_OP_ONE_STEP_PTP;
199 		return;
200 	}
201 
202 	*rew_op = IFH_REW_OP_TWO_STEP_PTP;
203 }
204 
205 static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port)
206 {
207 	struct sk_buff *skb, *skb_tmp;
208 	unsigned long flags;
209 
210 	spin_lock_irqsave(&port->tx_skbs.lock, flags);
211 	skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
212 		if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT,
213 			      jiffies)
214 			break;
215 
216 		__skb_unlink(skb, &port->tx_skbs);
217 		dev_kfree_skb_any(skb);
218 	}
219 	spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
220 }
221 
222 int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
223 				struct sk_buff *skb)
224 {
225 	struct sparx5 *sparx5 = port->sparx5;
226 	u8 rew_op, pdu_type, pdu_w16_offset;
227 	unsigned long flags;
228 
229 	sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset);
230 	SPARX5_SKB_CB(skb)->rew_op = rew_op;
231 	SPARX5_SKB_CB(skb)->pdu_type = pdu_type;
232 	SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset;
233 
234 	if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
235 		return 0;
236 
237 	sparx5_ptp_txtstamp_old_release(port);
238 
239 	spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
240 	if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) {
241 		spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
242 		return -EBUSY;
243 	}
244 
245 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
246 
247 	skb_queue_tail(&port->tx_skbs, skb);
248 	SPARX5_SKB_CB(skb)->ts_id = port->ts_id;
249 	SPARX5_SKB_CB(skb)->jiffies = jiffies;
250 
251 	sparx5->ptp_skbs++;
252 	port->ts_id++;
253 	if (port->ts_id == SPARX5_MAX_PTP_ID)
254 		port->ts_id = 0;
255 
256 	spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
257 
258 	return 0;
259 }
260 
261 void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
262 				 struct sk_buff *skb)
263 {
264 	struct sparx5 *sparx5 = port->sparx5;
265 	unsigned long flags;
266 
267 	spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
268 	port->ts_id--;
269 	sparx5->ptp_skbs--;
270 	skb_unlink(skb, &port->tx_skbs);
271 	spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
272 }
273 
274 static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
275 				   struct timespec64 *ts,
276 				   u32 nsec)
277 {
278 	/* Read current PTP time to get seconds */
279 	unsigned long flags;
280 	u32 curr_nsec;
281 
282 	spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
283 
284 	spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
285 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) |
286 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
287 		 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
288 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
289 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
290 		 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
291 
292 	ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
293 	curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
294 
295 	ts->tv_nsec = nsec;
296 
297 	/* Sec has incremented since the ts was registered */
298 	if (curr_nsec < nsec)
299 		ts->tv_sec--;
300 
301 	spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
302 }
303 
304 irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
305 {
306 	int budget = SPARX5_MAX_PTP_ID;
307 	struct sparx5 *sparx5 = args;
308 
309 	while (budget--) {
310 		struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
311 		struct skb_shared_hwtstamps shhwtstamps;
312 		struct sparx5_port *port;
313 		struct timespec64 ts;
314 		unsigned long flags;
315 		u32 val, id, txport;
316 		u32 delay;
317 
318 		val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
319 
320 		/* Check if a timestamp can be retrieved */
321 		if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
322 			break;
323 
324 		WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL);
325 
326 		if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX))
327 			continue;
328 
329 		/* Retrieve the ts Tx port */
330 		txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
331 
332 		/* Retrieve its associated skb */
333 		port = sparx5->ports[txport];
334 
335 		/* Retrieve the delay */
336 		delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
337 		delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
338 
339 		/* Get next timestamp from fifo, which needs to be the
340 		 * rx timestamp which represents the id of the frame
341 		 */
342 		spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
343 			 REW_PTP_TWOSTEP_CTRL_PTP_NXT,
344 			 sparx5, REW_PTP_TWOSTEP_CTRL);
345 
346 		val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
347 
348 		/* Check if a timestamp can be retried */
349 		if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
350 			break;
351 
352 		/* Read RX timestamping to get the ID */
353 		id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
354 		id <<= 8;
355 		id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS);
356 
357 		spin_lock_irqsave(&port->tx_skbs.lock, flags);
358 		skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
359 			if (SPARX5_SKB_CB(skb)->ts_id != id)
360 				continue;
361 
362 			__skb_unlink(skb, &port->tx_skbs);
363 			skb_match = skb;
364 			break;
365 		}
366 		spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
367 
368 		/* Next ts */
369 		spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
370 			 REW_PTP_TWOSTEP_CTRL_PTP_NXT,
371 			 sparx5, REW_PTP_TWOSTEP_CTRL);
372 
373 		if (WARN_ON(!skb_match))
374 			continue;
375 
376 		spin_lock(&sparx5->ptp_ts_id_lock);
377 		sparx5->ptp_skbs--;
378 		spin_unlock(&sparx5->ptp_ts_id_lock);
379 
380 		/* Get the h/w timestamp */
381 		sparx5_get_hwtimestamp(sparx5, &ts, delay);
382 
383 		/* Set the timestamp into the skb */
384 		shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
385 		skb_tstamp_tx(skb_match, &shhwtstamps);
386 
387 		dev_kfree_skb_any(skb_match);
388 	}
389 
390 	return IRQ_HANDLED;
391 }
392 
393 static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
394 {
395 	struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
396 	struct sparx5 *sparx5 = phc->sparx5;
397 	unsigned long flags;
398 	bool neg_adj = 0;
399 	u64 tod_inc;
400 	u64 ref;
401 
402 	if (!scaled_ppm)
403 		return 0;
404 
405 	if (scaled_ppm < 0) {
406 		neg_adj = 1;
407 		scaled_ppm = -scaled_ppm;
408 	}
409 
410 	tod_inc = sparx5_ptp_get_nominal_value(sparx5);
411 
412 	/* The multiplication is split in 2 separate additions because of
413 	 * overflow issues. If scaled_ppm with 16bit fractional part was bigger
414 	 * than 20ppm then we got overflow.
415 	 */
416 	ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16);
417 	ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16;
418 	tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
419 
420 	spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
421 
422 	spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)),
423 		 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
424 		 sparx5, PTP_PTP_DOM_CFG);
425 
426 	spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5,
427 		PTP_CLK_PER_CFG(phc->index, 0));
428 	spx5_wr((u32)(tod_inc >> 32), sparx5,
429 		PTP_CLK_PER_CFG(phc->index, 1));
430 
431 	spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
432 		 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5,
433 		 PTP_PTP_DOM_CFG);
434 
435 	spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
436 
437 	return 0;
438 }
439 
440 static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
441 				const struct timespec64 *ts)
442 {
443 	struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
444 	struct sparx5 *sparx5 = phc->sparx5;
445 	unsigned long flags;
446 
447 	spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
448 
449 	/* Must be in IDLE mode before the time can be loaded */
450 	spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
451 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
452 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
453 		 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
454 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
455 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
456 		 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
457 
458 	/* Set new value */
459 	spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
460 		sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
461 	spx5_wr(lower_32_bits(ts->tv_sec),
462 		sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
463 	spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
464 
465 	/* Apply new values */
466 	spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
467 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
468 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
469 		 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
470 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
471 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
472 		 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
473 
474 	spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
475 
476 	return 0;
477 }
478 
479 int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
480 {
481 	struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
482 	struct sparx5 *sparx5 = phc->sparx5;
483 	unsigned long flags;
484 	time64_t s;
485 	s64 ns;
486 
487 	spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
488 
489 	spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
490 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
491 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
492 		 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
493 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
494 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
495 		 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
496 
497 	s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
498 	s <<= 32;
499 	s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
500 	ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
501 	ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC;
502 
503 	spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
504 
505 	/* Deal with negative values */
506 	if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
507 		s--;
508 		ns &= 0xf;
509 		ns += 999999984;
510 	}
511 
512 	set_normalized_timespec64(ts, s, ns);
513 	return 0;
514 }
515 
516 static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
517 {
518 	struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
519 	struct sparx5 *sparx5 = phc->sparx5;
520 
521 	if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
522 		unsigned long flags;
523 
524 		spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
525 
526 		/* Must be in IDLE mode before the time can be loaded */
527 		spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
528 			 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
529 			 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
530 			 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
531 			 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
532 			 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
533 			 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
534 
535 		spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta),
536 			sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
537 
538 		/* Adjust time with the value of PTP_TOD_NSEC */
539 		spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
540 			 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
541 			 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
542 			 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
543 			 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
544 			 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
545 			 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
546 
547 		spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
548 	} else {
549 		/* Fall back using sparx5_ptp_settime64 which is not exact */
550 		struct timespec64 ts;
551 		u64 now;
552 
553 		sparx5_ptp_gettime64(ptp, &ts);
554 
555 		now = ktime_to_ns(timespec64_to_ktime(ts));
556 		ts = ns_to_timespec64(now + delta);
557 
558 		sparx5_ptp_settime64(ptp, &ts);
559 	}
560 
561 	return 0;
562 }
563 
564 static struct ptp_clock_info sparx5_ptp_clock_info = {
565 	.owner		= THIS_MODULE,
566 	.name		= "sparx5 ptp",
567 	.max_adj	= 200000,
568 	.gettime64	= sparx5_ptp_gettime64,
569 	.settime64	= sparx5_ptp_settime64,
570 	.adjtime	= sparx5_ptp_adjtime,
571 	.adjfine	= sparx5_ptp_adjfine,
572 };
573 
574 static int sparx5_ptp_phc_init(struct sparx5 *sparx5,
575 			       int index,
576 			       struct ptp_clock_info *clock_info)
577 {
578 	struct sparx5_phc *phc = &sparx5->phc[index];
579 
580 	phc->info = *clock_info;
581 	phc->clock = ptp_clock_register(&phc->info, sparx5->dev);
582 	if (IS_ERR(phc->clock))
583 		return PTR_ERR(phc->clock);
584 
585 	phc->index = index;
586 	phc->sparx5 = sparx5;
587 
588 	/* PTP Rx stamping is always enabled.  */
589 	phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
590 
591 	return 0;
592 }
593 
594 int sparx5_ptp_init(struct sparx5 *sparx5)
595 {
596 	u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5);
597 	struct sparx5_port *port;
598 	int err, i;
599 
600 	if (!sparx5->ptp)
601 		return 0;
602 
603 	for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
604 		err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info);
605 		if (err)
606 			return err;
607 	}
608 
609 	spin_lock_init(&sparx5->ptp_clock_lock);
610 	spin_lock_init(&sparx5->ptp_ts_id_lock);
611 	mutex_init(&sparx5->ptp_lock);
612 
613 	/* Disable master counters */
614 	spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG);
615 
616 	/* Configure the nominal TOD increment per clock cycle */
617 	spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7),
618 		 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
619 		 sparx5, PTP_PTP_DOM_CFG);
620 
621 	for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
622 		spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5,
623 			PTP_CLK_PER_CFG(i, 0));
624 		spx5_wr((u32)(tod_adj >> 32), sparx5,
625 			PTP_CLK_PER_CFG(i, 1));
626 	}
627 
628 	spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
629 		 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
630 		 sparx5, PTP_PTP_DOM_CFG);
631 
632 	/* Enable master counters */
633 	spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
634 
635 	for (i = 0; i < SPX5_PORTS; i++) {
636 		port = sparx5->ports[i];
637 		if (!port)
638 			continue;
639 
640 		skb_queue_head_init(&port->tx_skbs);
641 	}
642 
643 	return 0;
644 }
645 
646 void sparx5_ptp_deinit(struct sparx5 *sparx5)
647 {
648 	struct sparx5_port *port;
649 	int i;
650 
651 	for (i = 0; i < SPX5_PORTS; i++) {
652 		port = sparx5->ports[i];
653 		if (!port)
654 			continue;
655 
656 		skb_queue_purge(&port->tx_skbs);
657 	}
658 
659 	for (i = 0; i < SPARX5_PHC_COUNT; ++i)
660 		ptp_clock_unregister(sparx5->phc[i].clock);
661 }
662 
663 void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
664 			 u64 timestamp)
665 {
666 	struct skb_shared_hwtstamps *shhwtstamps;
667 	struct sparx5_phc *phc;
668 	struct timespec64 ts;
669 	u64 full_ts_in_ns;
670 
671 	if (!sparx5->ptp)
672 		return;
673 
674 	phc = &sparx5->phc[SPARX5_PHC_PORT];
675 	sparx5_ptp_gettime64(&phc->info, &ts);
676 
677 	if (ts.tv_nsec < timestamp)
678 		ts.tv_sec--;
679 	ts.tv_nsec = timestamp;
680 	full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
681 
682 	shhwtstamps = skb_hwtstamps(skb);
683 	shhwtstamps->hwtstamp = full_ts_in_ns;
684 }
685