xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/ptp_clock_kernel.h>
5 #include <linux/clocksource.h>
6 #include <linux/timecounter.h>
7 #include <linux/spinlock.h>
8 #include <linux/device.h>
9 #include <linux/rhashtable.h>
10 #include <linux/ptp_classify.h>
11 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
13 #include <linux/net_tstamp.h>
14 
15 #include "spectrum.h"
16 #include "spectrum_ptp.h"
17 #include "core.h"
18 
19 #define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT	29
20 #define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ		156257 /* 6.4nSec */
21 #define MLXSW_SP1_PTP_CLOCK_MASK		64
22 
23 #define MLXSW_SP1_PTP_HT_GC_INTERVAL		500 /* ms */
24 
25 /* How long, approximately, should the unmatched entries stay in the hash table
26  * before they are collected. Should be evenly divisible by the GC interval.
27  */
28 #define MLXSW_SP1_PTP_HT_GC_TIMEOUT		1000 /* ms */
29 
30 struct mlxsw_sp_ptp_state {
31 	struct mlxsw_sp *mlxsw_sp;
32 	struct rhltable unmatched_ht;
33 	spinlock_t unmatched_lock; /* protects the HT */
34 	struct delayed_work ht_gc_dw;
35 	u32 gc_cycle;
36 };
37 
38 struct mlxsw_sp1_ptp_key {
39 	u16 local_port;
40 	u8 message_type;
41 	u16 sequence_id;
42 	u8 domain_number;
43 	bool ingress;
44 };
45 
46 struct mlxsw_sp1_ptp_unmatched {
47 	struct mlxsw_sp1_ptp_key key;
48 	struct rhlist_head ht_node;
49 	struct rcu_head rcu;
50 	struct sk_buff *skb;
51 	u64 timestamp;
52 	u32 gc_cycle;
53 };
54 
55 static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
56 	.key_len = sizeof_field(struct mlxsw_sp1_ptp_unmatched, key),
57 	.key_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, key),
58 	.head_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, ht_node),
59 };
60 
61 struct mlxsw_sp_ptp_clock {
62 	struct mlxsw_core *core;
63 	spinlock_t lock; /* protect this structure */
64 	struct cyclecounter cycles;
65 	struct timecounter tc;
66 	u32 nominal_c_mult;
67 	struct ptp_clock *ptp;
68 	struct ptp_clock_info ptp_info;
69 	unsigned long overflow_period;
70 	struct delayed_work overflow_work;
71 };
72 
73 static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
74 				    struct ptp_system_timestamp *sts)
75 {
76 	struct mlxsw_core *mlxsw_core = clock->core;
77 	u32 frc_h1, frc_h2, frc_l;
78 
79 	frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
80 	ptp_read_system_prets(sts);
81 	frc_l = mlxsw_core_read_frc_l(mlxsw_core);
82 	ptp_read_system_postts(sts);
83 	frc_h2 = mlxsw_core_read_frc_h(mlxsw_core);
84 
85 	if (frc_h1 != frc_h2) {
86 		/* wrap around */
87 		ptp_read_system_prets(sts);
88 		frc_l = mlxsw_core_read_frc_l(mlxsw_core);
89 		ptp_read_system_postts(sts);
90 	}
91 
92 	return (u64) frc_l | (u64) frc_h2 << 32;
93 }
94 
95 static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
96 {
97 	struct mlxsw_sp_ptp_clock *clock =
98 		container_of(cc, struct mlxsw_sp_ptp_clock, cycles);
99 
100 	return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
101 }
102 
103 static int
104 mlxsw_sp1_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
105 {
106 	struct mlxsw_core *mlxsw_core = clock->core;
107 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
108 
109 	mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
110 			     freq_adj, 0);
111 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
112 }
113 
114 static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
115 {
116 	u64 cycles = (u64) nsec;
117 
118 	cycles <<= tc->cc->shift;
119 	cycles = div_u64(cycles, tc->cc->mult);
120 
121 	return cycles;
122 }
123 
124 static int
125 mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
126 {
127 	struct mlxsw_core *mlxsw_core = clock->core;
128 	u64 next_sec, next_sec_in_nsec, cycles;
129 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
130 	char mtpps_pl[MLXSW_REG_MTPPS_LEN];
131 	int err;
132 
133 	next_sec = div_u64(nsec, NSEC_PER_SEC) + 1;
134 	next_sec_in_nsec = next_sec * NSEC_PER_SEC;
135 
136 	spin_lock_bh(&clock->lock);
137 	cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec);
138 	spin_unlock_bh(&clock->lock);
139 
140 	mlxsw_reg_mtpps_vpin_pack(mtpps_pl, cycles);
141 	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtpps), mtpps_pl);
142 	if (err)
143 		return err;
144 
145 	mlxsw_reg_mtutc_pack(mtutc_pl,
146 			     MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
147 			     0, next_sec);
148 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
149 }
150 
151 static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
152 {
153 	struct mlxsw_sp_ptp_clock *clock =
154 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
155 	int neg_adj = 0;
156 	u32 diff;
157 	u64 adj;
158 	s32 ppb;
159 
160 	ppb = scaled_ppm_to_ppb(scaled_ppm);
161 
162 	if (ppb < 0) {
163 		neg_adj = 1;
164 		ppb = -ppb;
165 	}
166 
167 	adj = clock->nominal_c_mult;
168 	adj *= ppb;
169 	diff = div_u64(adj, NSEC_PER_SEC);
170 
171 	spin_lock_bh(&clock->lock);
172 	timecounter_read(&clock->tc);
173 	clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
174 				       clock->nominal_c_mult + diff;
175 	spin_unlock_bh(&clock->lock);
176 
177 	return mlxsw_sp1_ptp_phc_adjfreq(clock, neg_adj ? -ppb : ppb);
178 }
179 
180 static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
181 {
182 	struct mlxsw_sp_ptp_clock *clock =
183 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
184 	u64 nsec;
185 
186 	spin_lock_bh(&clock->lock);
187 	timecounter_adjtime(&clock->tc, delta);
188 	nsec = timecounter_read(&clock->tc);
189 	spin_unlock_bh(&clock->lock);
190 
191 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
192 }
193 
194 static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
195 				  struct timespec64 *ts,
196 				  struct ptp_system_timestamp *sts)
197 {
198 	struct mlxsw_sp_ptp_clock *clock =
199 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
200 	u64 cycles, nsec;
201 
202 	spin_lock_bh(&clock->lock);
203 	cycles = __mlxsw_sp1_ptp_read_frc(clock, sts);
204 	nsec = timecounter_cyc2time(&clock->tc, cycles);
205 	spin_unlock_bh(&clock->lock);
206 
207 	*ts = ns_to_timespec64(nsec);
208 
209 	return 0;
210 }
211 
212 static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
213 				 const struct timespec64 *ts)
214 {
215 	struct mlxsw_sp_ptp_clock *clock =
216 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
217 	u64 nsec = timespec64_to_ns(ts);
218 
219 	spin_lock_bh(&clock->lock);
220 	timecounter_init(&clock->tc, &clock->cycles, nsec);
221 	nsec = timecounter_read(&clock->tc);
222 	spin_unlock_bh(&clock->lock);
223 
224 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
225 }
226 
227 static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
228 	.owner		= THIS_MODULE,
229 	.name		= "mlxsw_sp_clock",
230 	.max_adj	= 100000000,
231 	.adjfine	= mlxsw_sp1_ptp_adjfine,
232 	.adjtime	= mlxsw_sp1_ptp_adjtime,
233 	.gettimex64	= mlxsw_sp1_ptp_gettimex,
234 	.settime64	= mlxsw_sp1_ptp_settime,
235 };
236 
237 static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
238 {
239 	struct delayed_work *dwork = to_delayed_work(work);
240 	struct mlxsw_sp_ptp_clock *clock;
241 
242 	clock = container_of(dwork, struct mlxsw_sp_ptp_clock, overflow_work);
243 
244 	spin_lock_bh(&clock->lock);
245 	timecounter_read(&clock->tc);
246 	spin_unlock_bh(&clock->lock);
247 	mlxsw_core_schedule_dw(&clock->overflow_work, clock->overflow_period);
248 }
249 
250 struct mlxsw_sp_ptp_clock *
251 mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
252 {
253 	u64 overflow_cycles, nsec, frac = 0;
254 	struct mlxsw_sp_ptp_clock *clock;
255 	int err;
256 
257 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
258 	if (!clock)
259 		return ERR_PTR(-ENOMEM);
260 
261 	spin_lock_init(&clock->lock);
262 	clock->cycles.read = mlxsw_sp1_ptp_read_frc;
263 	clock->cycles.shift = MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT;
264 	clock->cycles.mult = clocksource_khz2mult(MLXSW_SP1_PTP_CLOCK_FREQ_KHZ,
265 						  clock->cycles.shift);
266 	clock->nominal_c_mult = clock->cycles.mult;
267 	clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
268 	clock->core = mlxsw_sp->core;
269 
270 	timecounter_init(&clock->tc, &clock->cycles,
271 			 ktime_to_ns(ktime_get_real()));
272 
273 	/* Calculate period in seconds to call the overflow watchdog - to make
274 	 * sure counter is checked at least twice every wrap around.
275 	 * The period is calculated as the minimum between max HW cycles count
276 	 * (The clock source mask) and max amount of cycles that can be
277 	 * multiplied by clock multiplier where the result doesn't exceed
278 	 * 64bits.
279 	 */
280 	overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
281 	overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
282 
283 	nsec = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, 0, &frac);
284 	clock->overflow_period = nsecs_to_jiffies(nsec);
285 
286 	INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
287 	mlxsw_core_schedule_dw(&clock->overflow_work, 0);
288 
289 	clock->ptp_info = mlxsw_sp1_ptp_clock_info;
290 	clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
291 	if (IS_ERR(clock->ptp)) {
292 		err = PTR_ERR(clock->ptp);
293 		dev_err(dev, "ptp_clock_register failed %d\n", err);
294 		goto err_ptp_clock_register;
295 	}
296 
297 	return clock;
298 
299 err_ptp_clock_register:
300 	cancel_delayed_work_sync(&clock->overflow_work);
301 	kfree(clock);
302 	return ERR_PTR(err);
303 }
304 
305 void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
306 {
307 	ptp_clock_unregister(clock->ptp);
308 	cancel_delayed_work_sync(&clock->overflow_work);
309 	kfree(clock);
310 }
311 
312 static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
313 			      u8 *p_domain_number,
314 			      u8 *p_message_type,
315 			      u16 *p_sequence_id)
316 {
317 	unsigned int ptp_class;
318 	struct ptp_header *hdr;
319 
320 	ptp_class = ptp_classify_raw(skb);
321 
322 	switch (ptp_class & PTP_CLASS_VMASK) {
323 	case PTP_CLASS_V1:
324 	case PTP_CLASS_V2:
325 		break;
326 	default:
327 		return -ERANGE;
328 	}
329 
330 	hdr = ptp_parse_header(skb, ptp_class);
331 	if (!hdr)
332 		return -EINVAL;
333 
334 	*p_message_type	 = ptp_get_msgtype(hdr, ptp_class);
335 	*p_domain_number = hdr->domain_number;
336 	*p_sequence_id	 = be16_to_cpu(hdr->sequence_id);
337 
338 	return 0;
339 }
340 
341 /* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
342  * error.
343  */
344 static int
345 mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
346 			     struct mlxsw_sp1_ptp_key key,
347 			     struct sk_buff *skb,
348 			     u64 timestamp)
349 {
350 	int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
351 	struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
352 	struct mlxsw_sp1_ptp_unmatched *unmatched;
353 	int err;
354 
355 	unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
356 	if (!unmatched)
357 		return -ENOMEM;
358 
359 	unmatched->key = key;
360 	unmatched->skb = skb;
361 	unmatched->timestamp = timestamp;
362 	unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
363 
364 	err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
365 			      mlxsw_sp1_ptp_unmatched_ht_params);
366 	if (err)
367 		kfree(unmatched);
368 
369 	return err;
370 }
371 
372 static struct mlxsw_sp1_ptp_unmatched *
373 mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
374 			       struct mlxsw_sp1_ptp_key key, int *p_length)
375 {
376 	struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
377 	struct rhlist_head *tmp, *list;
378 	int length = 0;
379 
380 	list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
381 			       mlxsw_sp1_ptp_unmatched_ht_params);
382 	rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
383 		last = unmatched;
384 		length++;
385 	}
386 
387 	*p_length = length;
388 	return last;
389 }
390 
391 static int
392 mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
393 			       struct mlxsw_sp1_ptp_unmatched *unmatched)
394 {
395 	return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
396 			       &unmatched->ht_node,
397 			       mlxsw_sp1_ptp_unmatched_ht_params);
398 }
399 
400 /* This function is called in the following scenarios:
401  *
402  * 1) When a packet is matched with its timestamp.
403  * 2) In several situation when it is necessary to immediately pass on
404  *    an SKB without a timestamp.
405  * 3) From GC indirectly through mlxsw_sp1_ptp_unmatched_finish().
406  *    This case is similar to 2) above.
407  */
408 static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp,
409 					struct sk_buff *skb, u16 local_port,
410 					bool ingress,
411 					struct skb_shared_hwtstamps *hwtstamps)
412 {
413 	struct mlxsw_sp_port *mlxsw_sp_port;
414 
415 	/* Between capturing the packet and finishing it, there is a window of
416 	 * opportunity for the originating port to go away (e.g. due to a
417 	 * split). Also make sure the SKB device reference is still valid.
418 	 */
419 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
420 	if (!(mlxsw_sp_port && (!skb->dev || skb->dev == mlxsw_sp_port->dev))) {
421 		dev_kfree_skb_any(skb);
422 		return;
423 	}
424 
425 	if (ingress) {
426 		if (hwtstamps)
427 			*skb_hwtstamps(skb) = *hwtstamps;
428 		mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
429 	} else {
430 		/* skb_tstamp_tx() allows hwtstamps to be NULL. */
431 		skb_tstamp_tx(skb, hwtstamps);
432 		dev_kfree_skb_any(skb);
433 	}
434 }
435 
436 static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
437 				       struct mlxsw_sp1_ptp_key key,
438 				       struct sk_buff *skb,
439 				       u64 timestamp)
440 {
441 	struct skb_shared_hwtstamps hwtstamps;
442 	u64 nsec;
443 
444 	spin_lock_bh(&mlxsw_sp->clock->lock);
445 	nsec = timecounter_cyc2time(&mlxsw_sp->clock->tc, timestamp);
446 	spin_unlock_bh(&mlxsw_sp->clock->lock);
447 
448 	hwtstamps.hwtstamp = ns_to_ktime(nsec);
449 	mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
450 				    key.local_port, key.ingress, &hwtstamps);
451 }
452 
453 static void
454 mlxsw_sp1_ptp_unmatched_finish(struct mlxsw_sp *mlxsw_sp,
455 			       struct mlxsw_sp1_ptp_unmatched *unmatched)
456 {
457 	if (unmatched->skb && unmatched->timestamp)
458 		mlxsw_sp1_packet_timestamp(mlxsw_sp, unmatched->key,
459 					   unmatched->skb,
460 					   unmatched->timestamp);
461 	else if (unmatched->skb)
462 		mlxsw_sp1_ptp_packet_finish(mlxsw_sp, unmatched->skb,
463 					    unmatched->key.local_port,
464 					    unmatched->key.ingress, NULL);
465 	kfree_rcu(unmatched, rcu);
466 }
467 
468 static void mlxsw_sp1_ptp_unmatched_free_fn(void *ptr, void *arg)
469 {
470 	struct mlxsw_sp1_ptp_unmatched *unmatched = ptr;
471 
472 	/* This is invoked at a point where the ports are gone already. Nothing
473 	 * to do with whatever is left in the HT but to free it.
474 	 */
475 	if (unmatched->skb)
476 		dev_kfree_skb_any(unmatched->skb);
477 	kfree_rcu(unmatched, rcu);
478 }
479 
480 static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
481 				    struct mlxsw_sp1_ptp_key key,
482 				    struct sk_buff *skb, u64 timestamp)
483 {
484 	struct mlxsw_sp1_ptp_unmatched *unmatched;
485 	int length;
486 	int err;
487 
488 	rcu_read_lock();
489 
490 	spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
491 
492 	unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
493 	if (skb && unmatched && unmatched->timestamp) {
494 		unmatched->skb = skb;
495 	} else if (timestamp && unmatched && unmatched->skb) {
496 		unmatched->timestamp = timestamp;
497 	} else {
498 		/* Either there is no entry to match, or one that is there is
499 		 * incompatible.
500 		 */
501 		if (length < 100)
502 			err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
503 							   skb, timestamp);
504 		else
505 			err = -E2BIG;
506 		if (err && skb)
507 			mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
508 						    key.local_port,
509 						    key.ingress, NULL);
510 		unmatched = NULL;
511 	}
512 
513 	if (unmatched) {
514 		err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
515 		WARN_ON_ONCE(err);
516 	}
517 
518 	spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
519 
520 	if (unmatched)
521 		mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
522 
523 	rcu_read_unlock();
524 }
525 
526 static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp,
527 				     struct sk_buff *skb, u16 local_port,
528 				     bool ingress)
529 {
530 	struct mlxsw_sp_port *mlxsw_sp_port;
531 	struct mlxsw_sp1_ptp_key key;
532 	u8 types;
533 	int err;
534 
535 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
536 	if (!mlxsw_sp_port)
537 		goto immediate;
538 
539 	types = ingress ? mlxsw_sp_port->ptp.ing_types :
540 			  mlxsw_sp_port->ptp.egr_types;
541 	if (!types)
542 		goto immediate;
543 
544 	memset(&key, 0, sizeof(key));
545 	key.local_port = local_port;
546 	key.ingress = ingress;
547 
548 	err = mlxsw_sp_ptp_parse(skb, &key.domain_number, &key.message_type,
549 				 &key.sequence_id);
550 	if (err)
551 		goto immediate;
552 
553 	/* For packets whose timestamping was not enabled on this port, don't
554 	 * bother trying to match the timestamp.
555 	 */
556 	if (!((1 << key.message_type) & types))
557 		goto immediate;
558 
559 	mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, skb, 0);
560 	return;
561 
562 immediate:
563 	mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, local_port, ingress, NULL);
564 }
565 
566 void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
567 				 u16 local_port, u8 message_type,
568 				 u8 domain_number, u16 sequence_id,
569 				 u64 timestamp)
570 {
571 	struct mlxsw_sp_port *mlxsw_sp_port;
572 	struct mlxsw_sp1_ptp_key key;
573 	u8 types;
574 
575 	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
576 		return;
577 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
578 	if (!mlxsw_sp_port)
579 		return;
580 
581 	types = ingress ? mlxsw_sp_port->ptp.ing_types :
582 			  mlxsw_sp_port->ptp.egr_types;
583 
584 	/* For message types whose timestamping was not enabled on this port,
585 	 * don't bother with the timestamp.
586 	 */
587 	if (!((1 << message_type) & types))
588 		return;
589 
590 	memset(&key, 0, sizeof(key));
591 	key.local_port = local_port;
592 	key.domain_number = domain_number;
593 	key.message_type = message_type;
594 	key.sequence_id = sequence_id;
595 	key.ingress = ingress;
596 
597 	mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, NULL, timestamp);
598 }
599 
600 void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
601 			   u16 local_port)
602 {
603 	skb_reset_mac_header(skb);
604 	mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
605 }
606 
607 void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
608 			       struct sk_buff *skb, u16 local_port)
609 {
610 	mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
611 }
612 
613 static void
614 mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
615 			    struct mlxsw_sp1_ptp_unmatched *unmatched)
616 {
617 	struct mlxsw_sp_ptp_port_dir_stats *stats;
618 	struct mlxsw_sp_port *mlxsw_sp_port;
619 	int err;
620 
621 	/* If an unmatched entry has an SKB, it has to be handed over to the
622 	 * networking stack. This is usually done from a trap handler, which is
623 	 * invoked in a softirq context. Here we are going to do it in process
624 	 * context. If that were to be interrupted by a softirq, it could cause
625 	 * a deadlock when an attempt is made to take an already-taken lock
626 	 * somewhere along the sending path. Disable softirqs to prevent this.
627 	 */
628 	local_bh_disable();
629 
630 	spin_lock(&ptp_state->unmatched_lock);
631 	err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
632 			      mlxsw_sp1_ptp_unmatched_ht_params);
633 	spin_unlock(&ptp_state->unmatched_lock);
634 
635 	if (err)
636 		/* The packet was matched with timestamp during the walk. */
637 		goto out;
638 
639 	mlxsw_sp_port = ptp_state->mlxsw_sp->ports[unmatched->key.local_port];
640 	if (mlxsw_sp_port) {
641 		stats = unmatched->key.ingress ?
642 			&mlxsw_sp_port->ptp.stats.rx_gcd :
643 			&mlxsw_sp_port->ptp.stats.tx_gcd;
644 		if (unmatched->skb)
645 			stats->packets++;
646 		else
647 			stats->timestamps++;
648 	}
649 
650 	/* mlxsw_sp1_ptp_unmatched_finish() invokes netif_receive_skb(). While
651 	 * the comment at that function states that it can only be called in
652 	 * soft IRQ context, this pattern of local_bh_disable() +
653 	 * netif_receive_skb(), in process context, is seen elsewhere in the
654 	 * kernel, notably in pktgen.
655 	 */
656 	mlxsw_sp1_ptp_unmatched_finish(ptp_state->mlxsw_sp, unmatched);
657 
658 out:
659 	local_bh_enable();
660 }
661 
662 static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
663 {
664 	struct delayed_work *dwork = to_delayed_work(work);
665 	struct mlxsw_sp1_ptp_unmatched *unmatched;
666 	struct mlxsw_sp_ptp_state *ptp_state;
667 	struct rhashtable_iter iter;
668 	u32 gc_cycle;
669 	void *obj;
670 
671 	ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
672 	gc_cycle = ptp_state->gc_cycle++;
673 
674 	rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
675 	rhashtable_walk_start(&iter);
676 	while ((obj = rhashtable_walk_next(&iter))) {
677 		if (IS_ERR(obj))
678 			continue;
679 
680 		unmatched = obj;
681 		if (unmatched->gc_cycle <= gc_cycle)
682 			mlxsw_sp1_ptp_ht_gc_collect(ptp_state, unmatched);
683 	}
684 	rhashtable_walk_stop(&iter);
685 	rhashtable_walk_exit(&iter);
686 
687 	mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
688 			       MLXSW_SP1_PTP_HT_GC_INTERVAL);
689 }
690 
691 static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
692 				   enum mlxsw_reg_mtptpt_trap_id trap_id,
693 				   u16 message_type)
694 {
695 	char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
696 
697 	mlxsw_reg_mtptptp_pack(mtptpt_pl, trap_id, message_type);
698 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
699 }
700 
701 static int mlxsw_sp1_ptp_set_fifo_clr_on_trap(struct mlxsw_sp *mlxsw_sp,
702 					      bool clr)
703 {
704 	char mogcr_pl[MLXSW_REG_MOGCR_LEN] = {0};
705 	int err;
706 
707 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
708 	if (err)
709 		return err;
710 
711 	mlxsw_reg_mogcr_ptp_iftc_set(mogcr_pl, clr);
712 	mlxsw_reg_mogcr_ptp_eftc_set(mogcr_pl, clr);
713 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
714 }
715 
716 static int mlxsw_sp1_ptp_mtpppc_set(struct mlxsw_sp *mlxsw_sp,
717 				    u16 ing_types, u16 egr_types)
718 {
719 	char mtpppc_pl[MLXSW_REG_MTPPPC_LEN];
720 
721 	mlxsw_reg_mtpppc_pack(mtpppc_pl, ing_types, egr_types);
722 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpppc), mtpppc_pl);
723 }
724 
725 struct mlxsw_sp1_ptp_shaper_params {
726 	u32 ethtool_speed;
727 	enum mlxsw_reg_qpsc_port_speed port_speed;
728 	u8 shaper_time_exp;
729 	u8 shaper_time_mantissa;
730 	u8 shaper_inc;
731 	u8 shaper_bs;
732 	u8 port_to_shaper_credits;
733 	int ing_timestamp_inc;
734 	int egr_timestamp_inc;
735 };
736 
737 static const struct mlxsw_sp1_ptp_shaper_params
738 mlxsw_sp1_ptp_shaper_params[] = {
739 	{
740 		.ethtool_speed		= SPEED_100,
741 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_100M,
742 		.shaper_time_exp	= 4,
743 		.shaper_time_mantissa	= 12,
744 		.shaper_inc		= 9,
745 		.shaper_bs		= 1,
746 		.port_to_shaper_credits	= 1,
747 		.ing_timestamp_inc	= -313,
748 		.egr_timestamp_inc	= 313,
749 	},
750 	{
751 		.ethtool_speed		= SPEED_1000,
752 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_1G,
753 		.shaper_time_exp	= 0,
754 		.shaper_time_mantissa	= 12,
755 		.shaper_inc		= 6,
756 		.shaper_bs		= 0,
757 		.port_to_shaper_credits	= 1,
758 		.ing_timestamp_inc	= -35,
759 		.egr_timestamp_inc	= 35,
760 	},
761 	{
762 		.ethtool_speed		= SPEED_10000,
763 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_10G,
764 		.shaper_time_exp	= 0,
765 		.shaper_time_mantissa	= 2,
766 		.shaper_inc		= 14,
767 		.shaper_bs		= 1,
768 		.port_to_shaper_credits	= 1,
769 		.ing_timestamp_inc	= -11,
770 		.egr_timestamp_inc	= 11,
771 	},
772 	{
773 		.ethtool_speed		= SPEED_25000,
774 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_25G,
775 		.shaper_time_exp	= 0,
776 		.shaper_time_mantissa	= 0,
777 		.shaper_inc		= 11,
778 		.shaper_bs		= 1,
779 		.port_to_shaper_credits	= 1,
780 		.ing_timestamp_inc	= -14,
781 		.egr_timestamp_inc	= 14,
782 	},
783 };
784 
785 #define MLXSW_SP1_PTP_SHAPER_PARAMS_LEN ARRAY_SIZE(mlxsw_sp1_ptp_shaper_params)
786 
787 static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
788 {
789 	const struct mlxsw_sp1_ptp_shaper_params *params;
790 	char qpsc_pl[MLXSW_REG_QPSC_LEN];
791 	int i, err;
792 
793 	for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
794 		params = &mlxsw_sp1_ptp_shaper_params[i];
795 		mlxsw_reg_qpsc_pack(qpsc_pl, params->port_speed,
796 				    params->shaper_time_exp,
797 				    params->shaper_time_mantissa,
798 				    params->shaper_inc, params->shaper_bs,
799 				    params->port_to_shaper_credits,
800 				    params->ing_timestamp_inc,
801 				    params->egr_timestamp_inc);
802 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpsc), qpsc_pl);
803 		if (err)
804 			return err;
805 	}
806 
807 	return 0;
808 }
809 
810 struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
811 {
812 	struct mlxsw_sp_ptp_state *ptp_state;
813 	u16 message_type;
814 	int err;
815 
816 	err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
817 	if (err)
818 		return ERR_PTR(err);
819 
820 	ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
821 	if (!ptp_state)
822 		return ERR_PTR(-ENOMEM);
823 	ptp_state->mlxsw_sp = mlxsw_sp;
824 
825 	spin_lock_init(&ptp_state->unmatched_lock);
826 
827 	err = rhltable_init(&ptp_state->unmatched_ht,
828 			    &mlxsw_sp1_ptp_unmatched_ht_params);
829 	if (err)
830 		goto err_hashtable_init;
831 
832 	/* Delive these message types as PTP0. */
833 	message_type = BIT(PTP_MSGTYPE_SYNC) |
834 		       BIT(PTP_MSGTYPE_DELAY_REQ) |
835 		       BIT(PTP_MSGTYPE_PDELAY_REQ) |
836 		       BIT(PTP_MSGTYPE_PDELAY_RESP);
837 	err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
838 				      message_type);
839 	if (err)
840 		goto err_mtptpt_set;
841 
842 	/* Everything else is PTP1. */
843 	message_type = ~message_type;
844 	err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
845 				      message_type);
846 	if (err)
847 		goto err_mtptpt1_set;
848 
849 	err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
850 	if (err)
851 		goto err_fifo_clr;
852 
853 	INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
854 	mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
855 			       MLXSW_SP1_PTP_HT_GC_INTERVAL);
856 	return ptp_state;
857 
858 err_fifo_clr:
859 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
860 err_mtptpt1_set:
861 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
862 err_mtptpt_set:
863 	rhltable_destroy(&ptp_state->unmatched_ht);
864 err_hashtable_init:
865 	kfree(ptp_state);
866 	return ERR_PTR(err);
867 }
868 
869 void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
870 {
871 	struct mlxsw_sp *mlxsw_sp = ptp_state->mlxsw_sp;
872 
873 	cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
874 	mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
875 	mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
876 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
877 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
878 	rhltable_free_and_destroy(&ptp_state->unmatched_ht,
879 				  &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
880 	kfree(ptp_state);
881 }
882 
883 int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
884 			       struct hwtstamp_config *config)
885 {
886 	*config = mlxsw_sp_port->ptp.hwtstamp_config;
887 	return 0;
888 }
889 
890 static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
891 					  u16 *p_ing_types, u16 *p_egr_types,
892 					  enum hwtstamp_rx_filters *p_rx_filter)
893 {
894 	enum hwtstamp_rx_filters rx_filter = config->rx_filter;
895 	enum hwtstamp_tx_types tx_type = config->tx_type;
896 	u16 ing_types = 0x00;
897 	u16 egr_types = 0x00;
898 
899 	switch (tx_type) {
900 	case HWTSTAMP_TX_OFF:
901 		egr_types = 0x00;
902 		break;
903 	case HWTSTAMP_TX_ON:
904 		egr_types = 0xff;
905 		break;
906 	case HWTSTAMP_TX_ONESTEP_SYNC:
907 	case HWTSTAMP_TX_ONESTEP_P2P:
908 		return -ERANGE;
909 	default:
910 		return -EINVAL;
911 	}
912 
913 	switch (rx_filter) {
914 	case HWTSTAMP_FILTER_NONE:
915 		ing_types = 0x00;
916 		break;
917 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
918 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
919 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
920 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
921 		ing_types = 0x01;
922 		break;
923 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
924 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
925 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
926 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
927 		ing_types = 0x02;
928 		break;
929 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
930 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
931 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
932 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
933 		ing_types = 0x0f;
934 		break;
935 	case HWTSTAMP_FILTER_ALL:
936 		ing_types = 0xff;
937 		break;
938 	case HWTSTAMP_FILTER_SOME:
939 	case HWTSTAMP_FILTER_NTP_ALL:
940 		return -ERANGE;
941 	default:
942 		return -EINVAL;
943 	}
944 
945 	*p_ing_types = ing_types;
946 	*p_egr_types = egr_types;
947 	*p_rx_filter = rx_filter;
948 	return 0;
949 }
950 
951 static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
952 				       u16 ing_types, u16 egr_types)
953 {
954 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
955 	struct mlxsw_sp_port *tmp;
956 	u16 orig_ing_types = 0;
957 	u16 orig_egr_types = 0;
958 	int err;
959 	int i;
960 
961 	/* MTPPPC configures timestamping globally, not per port. Find the
962 	 * configuration that contains all configured timestamping requests.
963 	 */
964 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
965 		tmp = mlxsw_sp->ports[i];
966 		if (tmp) {
967 			orig_ing_types |= tmp->ptp.ing_types;
968 			orig_egr_types |= tmp->ptp.egr_types;
969 		}
970 		if (tmp && tmp != mlxsw_sp_port) {
971 			ing_types |= tmp->ptp.ing_types;
972 			egr_types |= tmp->ptp.egr_types;
973 		}
974 	}
975 
976 	if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) {
977 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
978 		if (err) {
979 			netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth");
980 			return err;
981 		}
982 	}
983 	if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types))
984 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
985 
986 	return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp,
987 				       ing_types, egr_types);
988 }
989 
990 static bool mlxsw_sp1_ptp_hwtstamp_enabled(struct mlxsw_sp_port *mlxsw_sp_port)
991 {
992 	return mlxsw_sp_port->ptp.ing_types || mlxsw_sp_port->ptp.egr_types;
993 }
994 
995 static int
996 mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
997 {
998 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
999 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1000 
1001 	mlxsw_reg_qeec_ptps_pack(qeec_pl, mlxsw_sp_port->local_port, enable);
1002 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1003 }
1004 
1005 static int mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port *mlxsw_sp_port)
1006 {
1007 	bool ptps = false;
1008 	int err, i;
1009 	u32 speed;
1010 
1011 	if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
1012 		return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, false);
1013 
1014 	err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
1015 	if (err)
1016 		return err;
1017 
1018 	for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
1019 		if (mlxsw_sp1_ptp_shaper_params[i].ethtool_speed == speed) {
1020 			ptps = true;
1021 			break;
1022 		}
1023 	}
1024 
1025 	return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, ptps);
1026 }
1027 
1028 void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
1029 {
1030 	struct delayed_work *dwork = to_delayed_work(work);
1031 	struct mlxsw_sp_port *mlxsw_sp_port;
1032 	int err;
1033 
1034 	mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
1035 				     ptp.shaper_dw);
1036 
1037 	if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
1038 		return;
1039 
1040 	err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
1041 	if (err)
1042 		netdev_err(mlxsw_sp_port->dev, "Failed to set up PTP shaper\n");
1043 }
1044 
1045 int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1046 			       struct hwtstamp_config *config)
1047 {
1048 	enum hwtstamp_rx_filters rx_filter;
1049 	u16 ing_types;
1050 	u16 egr_types;
1051 	int err;
1052 
1053 	err = mlxsw_sp_ptp_get_message_types(config, &ing_types, &egr_types,
1054 					     &rx_filter);
1055 	if (err)
1056 		return err;
1057 
1058 	err = mlxsw_sp1_ptp_mtpppc_update(mlxsw_sp_port, ing_types, egr_types);
1059 	if (err)
1060 		return err;
1061 
1062 	mlxsw_sp_port->ptp.hwtstamp_config = *config;
1063 	mlxsw_sp_port->ptp.ing_types = ing_types;
1064 	mlxsw_sp_port->ptp.egr_types = egr_types;
1065 
1066 	err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
1067 	if (err)
1068 		return err;
1069 
1070 	/* Notify the ioctl caller what we are actually timestamping. */
1071 	config->rx_filter = rx_filter;
1072 
1073 	return 0;
1074 }
1075 
1076 int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
1077 			      struct ethtool_ts_info *info)
1078 {
1079 	info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
1080 
1081 	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1082 				SOF_TIMESTAMPING_RX_HARDWARE |
1083 				SOF_TIMESTAMPING_RAW_HARDWARE;
1084 
1085 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1086 			 BIT(HWTSTAMP_TX_ON);
1087 
1088 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1089 			   BIT(HWTSTAMP_FILTER_ALL);
1090 
1091 	return 0;
1092 }
1093 
1094 struct mlxsw_sp_ptp_port_stat {
1095 	char str[ETH_GSTRING_LEN];
1096 	ptrdiff_t offset;
1097 };
1098 
1099 #define MLXSW_SP_PTP_PORT_STAT(NAME, FIELD)				\
1100 	{								\
1101 		.str = NAME,						\
1102 		.offset = offsetof(struct mlxsw_sp_ptp_port_stats,	\
1103 				    FIELD),				\
1104 	}
1105 
1106 static const struct mlxsw_sp_ptp_port_stat mlxsw_sp_ptp_port_stats[] = {
1107 	MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_packets",    rx_gcd.packets),
1108 	MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_timestamps", rx_gcd.timestamps),
1109 	MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_packets",    tx_gcd.packets),
1110 	MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_timestamps", tx_gcd.timestamps),
1111 };
1112 
1113 #undef MLXSW_SP_PTP_PORT_STAT
1114 
1115 #define MLXSW_SP_PTP_PORT_STATS_LEN \
1116 	ARRAY_SIZE(mlxsw_sp_ptp_port_stats)
1117 
1118 int mlxsw_sp1_get_stats_count(void)
1119 {
1120 	return MLXSW_SP_PTP_PORT_STATS_LEN;
1121 }
1122 
1123 void mlxsw_sp1_get_stats_strings(u8 **p)
1124 {
1125 	int i;
1126 
1127 	for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
1128 		memcpy(*p, mlxsw_sp_ptp_port_stats[i].str,
1129 		       ETH_GSTRING_LEN);
1130 		*p += ETH_GSTRING_LEN;
1131 	}
1132 }
1133 
1134 void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1135 			 u64 *data, int data_index)
1136 {
1137 	void *stats = &mlxsw_sp_port->ptp.stats;
1138 	ptrdiff_t offset;
1139 	int i;
1140 
1141 	data += data_index;
1142 	for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
1143 		offset = mlxsw_sp_ptp_port_stats[i].offset;
1144 		*data++ = *(u64 *)(stats + offset);
1145 	}
1146 }
1147