xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c (revision e1c4c5436b4ad579762fbe78bfabc8aef59bd5b1)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/ptp_clock_kernel.h>
5 #include <linux/clocksource.h>
6 #include <linux/timecounter.h>
7 #include <linux/spinlock.h>
8 #include <linux/device.h>
9 #include <linux/rhashtable.h>
10 #include <linux/ptp_classify.h>
11 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/refcount.h>
15 
16 #include "spectrum.h"
17 #include "spectrum_ptp.h"
18 #include "core.h"
19 
20 #define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT	29
21 #define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ		156257 /* 6.4nSec */
22 #define MLXSW_SP1_PTP_CLOCK_MASK		64
23 
24 #define MLXSW_SP1_PTP_HT_GC_INTERVAL		500 /* ms */
25 
26 /* How long, approximately, should the unmatched entries stay in the hash table
27  * before they are collected. Should be evenly divisible by the GC interval.
28  */
29 #define MLXSW_SP1_PTP_HT_GC_TIMEOUT		1000 /* ms */
30 
31 struct mlxsw_sp_ptp_state {
32 	struct mlxsw_sp *mlxsw_sp;
33 };
34 
35 struct mlxsw_sp1_ptp_state {
36 	struct mlxsw_sp_ptp_state common;
37 	struct rhltable unmatched_ht;
38 	spinlock_t unmatched_lock; /* protects the HT */
39 	struct delayed_work ht_gc_dw;
40 	u32 gc_cycle;
41 };
42 
43 struct mlxsw_sp2_ptp_state {
44 	struct mlxsw_sp_ptp_state common;
45 	refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
46 					  * enabled.
47 					  */
48 	struct hwtstamp_config config;
49 	struct mutex lock; /* Protects 'config' and HW configuration. */
50 };
51 
52 struct mlxsw_sp1_ptp_key {
53 	u16 local_port;
54 	u8 message_type;
55 	u16 sequence_id;
56 	u8 domain_number;
57 	bool ingress;
58 };
59 
60 struct mlxsw_sp1_ptp_unmatched {
61 	struct mlxsw_sp1_ptp_key key;
62 	struct rhlist_head ht_node;
63 	struct rcu_head rcu;
64 	struct sk_buff *skb;
65 	u64 timestamp;
66 	u32 gc_cycle;
67 };
68 
69 static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
70 	.key_len = sizeof_field(struct mlxsw_sp1_ptp_unmatched, key),
71 	.key_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, key),
72 	.head_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, ht_node),
73 };
74 
75 struct mlxsw_sp_ptp_clock {
76 	struct mlxsw_core *core;
77 	struct ptp_clock *ptp;
78 	struct ptp_clock_info ptp_info;
79 };
80 
81 struct mlxsw_sp1_ptp_clock {
82 	struct mlxsw_sp_ptp_clock common;
83 	spinlock_t lock; /* protect this structure */
84 	struct cyclecounter cycles;
85 	struct timecounter tc;
86 	u32 nominal_c_mult;
87 	unsigned long overflow_period;
88 	struct delayed_work overflow_work;
89 };
90 
91 static struct mlxsw_sp1_ptp_state *
92 mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
93 {
94 	return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp1_ptp_state,
95 			    common);
96 }
97 
98 static struct mlxsw_sp2_ptp_state *
99 mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
100 {
101 	return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
102 			    common);
103 }
104 
105 static struct mlxsw_sp1_ptp_clock *
106 mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
107 {
108 	return container_of(ptp, struct mlxsw_sp1_ptp_clock, common.ptp_info);
109 }
110 
111 static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
112 				    struct ptp_system_timestamp *sts)
113 {
114 	struct mlxsw_core *mlxsw_core = clock->common.core;
115 	u32 frc_h1, frc_h2, frc_l;
116 
117 	frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
118 	ptp_read_system_prets(sts);
119 	frc_l = mlxsw_core_read_frc_l(mlxsw_core);
120 	ptp_read_system_postts(sts);
121 	frc_h2 = mlxsw_core_read_frc_h(mlxsw_core);
122 
123 	if (frc_h1 != frc_h2) {
124 		/* wrap around */
125 		ptp_read_system_prets(sts);
126 		frc_l = mlxsw_core_read_frc_l(mlxsw_core);
127 		ptp_read_system_postts(sts);
128 	}
129 
130 	return (u64) frc_l | (u64) frc_h2 << 32;
131 }
132 
133 static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
134 {
135 	struct mlxsw_sp1_ptp_clock *clock =
136 		container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
137 
138 	return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
139 }
140 
141 static int
142 mlxsw_sp_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
143 {
144 	struct mlxsw_core *mlxsw_core = clock->core;
145 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
146 
147 	mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
148 			     freq_adj, 0, 0, 0);
149 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
150 }
151 
152 static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
153 {
154 	u64 cycles = (u64) nsec;
155 
156 	cycles <<= tc->cc->shift;
157 	cycles = div_u64(cycles, tc->cc->mult);
158 
159 	return cycles;
160 }
161 
162 static int
163 mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock *clock, u64 nsec)
164 {
165 	struct mlxsw_core *mlxsw_core = clock->common.core;
166 	u64 next_sec, next_sec_in_nsec, cycles;
167 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
168 	char mtpps_pl[MLXSW_REG_MTPPS_LEN];
169 	int err;
170 
171 	next_sec = div_u64(nsec, NSEC_PER_SEC) + 1;
172 	next_sec_in_nsec = next_sec * NSEC_PER_SEC;
173 
174 	spin_lock_bh(&clock->lock);
175 	cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec);
176 	spin_unlock_bh(&clock->lock);
177 
178 	mlxsw_reg_mtpps_vpin_pack(mtpps_pl, cycles);
179 	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtpps), mtpps_pl);
180 	if (err)
181 		return err;
182 
183 	mlxsw_reg_mtutc_pack(mtutc_pl,
184 			     MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
185 			     0, next_sec, 0, 0);
186 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
187 }
188 
189 static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
190 {
191 	struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
192 	int neg_adj = 0;
193 	u32 diff;
194 	u64 adj;
195 	s32 ppb;
196 
197 	ppb = scaled_ppm_to_ppb(scaled_ppm);
198 
199 	if (ppb < 0) {
200 		neg_adj = 1;
201 		ppb = -ppb;
202 	}
203 
204 	adj = clock->nominal_c_mult;
205 	adj *= ppb;
206 	diff = div_u64(adj, NSEC_PER_SEC);
207 
208 	spin_lock_bh(&clock->lock);
209 	timecounter_read(&clock->tc);
210 	clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
211 				       clock->nominal_c_mult + diff;
212 	spin_unlock_bh(&clock->lock);
213 
214 	return mlxsw_sp_ptp_phc_adjfreq(&clock->common, neg_adj ? -ppb : ppb);
215 }
216 
217 static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
218 {
219 	struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
220 	u64 nsec;
221 
222 	spin_lock_bh(&clock->lock);
223 	timecounter_adjtime(&clock->tc, delta);
224 	nsec = timecounter_read(&clock->tc);
225 	spin_unlock_bh(&clock->lock);
226 
227 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
228 }
229 
230 static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
231 				  struct timespec64 *ts,
232 				  struct ptp_system_timestamp *sts)
233 {
234 	struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
235 	u64 cycles, nsec;
236 
237 	spin_lock_bh(&clock->lock);
238 	cycles = __mlxsw_sp1_ptp_read_frc(clock, sts);
239 	nsec = timecounter_cyc2time(&clock->tc, cycles);
240 	spin_unlock_bh(&clock->lock);
241 
242 	*ts = ns_to_timespec64(nsec);
243 
244 	return 0;
245 }
246 
247 static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
248 				 const struct timespec64 *ts)
249 {
250 	struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
251 	u64 nsec = timespec64_to_ns(ts);
252 
253 	spin_lock_bh(&clock->lock);
254 	timecounter_init(&clock->tc, &clock->cycles, nsec);
255 	nsec = timecounter_read(&clock->tc);
256 	spin_unlock_bh(&clock->lock);
257 
258 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
259 }
260 
261 static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
262 	.owner		= THIS_MODULE,
263 	.name		= "mlxsw_sp_clock",
264 	.max_adj	= 100000000,
265 	.adjfine	= mlxsw_sp1_ptp_adjfine,
266 	.adjtime	= mlxsw_sp1_ptp_adjtime,
267 	.gettimex64	= mlxsw_sp1_ptp_gettimex,
268 	.settime64	= mlxsw_sp1_ptp_settime,
269 };
270 
271 static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
272 {
273 	struct delayed_work *dwork = to_delayed_work(work);
274 	struct mlxsw_sp1_ptp_clock *clock;
275 
276 	clock = container_of(dwork, struct mlxsw_sp1_ptp_clock, overflow_work);
277 
278 	spin_lock_bh(&clock->lock);
279 	timecounter_read(&clock->tc);
280 	spin_unlock_bh(&clock->lock);
281 	mlxsw_core_schedule_dw(&clock->overflow_work, clock->overflow_period);
282 }
283 
284 struct mlxsw_sp_ptp_clock *
285 mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
286 {
287 	u64 overflow_cycles, nsec, frac = 0;
288 	struct mlxsw_sp1_ptp_clock *clock;
289 	int err;
290 
291 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
292 	if (!clock)
293 		return ERR_PTR(-ENOMEM);
294 
295 	spin_lock_init(&clock->lock);
296 	clock->cycles.read = mlxsw_sp1_ptp_read_frc;
297 	clock->cycles.shift = MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT;
298 	clock->cycles.mult = clocksource_khz2mult(MLXSW_SP1_PTP_CLOCK_FREQ_KHZ,
299 						  clock->cycles.shift);
300 	clock->nominal_c_mult = clock->cycles.mult;
301 	clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
302 	clock->common.core = mlxsw_sp->core;
303 
304 	timecounter_init(&clock->tc, &clock->cycles, 0);
305 
306 	/* Calculate period in seconds to call the overflow watchdog - to make
307 	 * sure counter is checked at least twice every wrap around.
308 	 * The period is calculated as the minimum between max HW cycles count
309 	 * (The clock source mask) and max amount of cycles that can be
310 	 * multiplied by clock multiplier where the result doesn't exceed
311 	 * 64bits.
312 	 */
313 	overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
314 	overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
315 
316 	nsec = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, 0, &frac);
317 	clock->overflow_period = nsecs_to_jiffies(nsec);
318 
319 	INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
320 	mlxsw_core_schedule_dw(&clock->overflow_work, 0);
321 
322 	clock->common.ptp_info = mlxsw_sp1_ptp_clock_info;
323 	clock->common.ptp = ptp_clock_register(&clock->common.ptp_info, dev);
324 	if (IS_ERR(clock->common.ptp)) {
325 		err = PTR_ERR(clock->common.ptp);
326 		dev_err(dev, "ptp_clock_register failed %d\n", err);
327 		goto err_ptp_clock_register;
328 	}
329 
330 	return &clock->common;
331 
332 err_ptp_clock_register:
333 	cancel_delayed_work_sync(&clock->overflow_work);
334 	kfree(clock);
335 	return ERR_PTR(err);
336 }
337 
338 void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
339 {
340 	struct mlxsw_sp1_ptp_clock *clock =
341 		container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
342 
343 	ptp_clock_unregister(clock_common->ptp);
344 	cancel_delayed_work_sync(&clock->overflow_work);
345 	kfree(clock);
346 }
347 
348 static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
349 				  struct ptp_system_timestamp *sts)
350 {
351 	struct mlxsw_core *mlxsw_core = clock->core;
352 	u32 utc_sec1, utc_sec2, utc_nsec;
353 
354 	utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
355 	ptp_read_system_prets(sts);
356 	utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
357 	ptp_read_system_postts(sts);
358 	utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
359 
360 	if (utc_sec1 != utc_sec2) {
361 		/* Wrap around. */
362 		ptp_read_system_prets(sts);
363 		utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
364 		ptp_read_system_postts(sts);
365 	}
366 
367 	return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
368 }
369 
370 static int
371 mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
372 {
373 	struct mlxsw_core *mlxsw_core = clock->core;
374 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
375 	u32 sec, nsec_rem;
376 
377 	sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
378 	mlxsw_reg_mtutc_pack(mtutc_pl,
379 			     MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
380 			     0, sec, nsec_rem, 0);
381 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
382 }
383 
384 static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
385 {
386 	struct mlxsw_sp_ptp_clock *clock =
387 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
388 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
389 
390 	/* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
391 	 * reversed, positive values mean to decrease the frequency. Adjust the
392 	 * sign of PPB to this behavior.
393 	 */
394 	return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
395 }
396 
397 static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
398 {
399 	struct mlxsw_sp_ptp_clock *clock =
400 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
401 	struct mlxsw_core *mlxsw_core = clock->core;
402 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
403 
404 	/* HW time adjustment range is s16. If out of range, set time instead. */
405 	if (delta < S16_MIN || delta > S16_MAX) {
406 		u64 nsec;
407 
408 		nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
409 		nsec += delta;
410 
411 		return mlxsw_sp2_ptp_phc_settime(clock, nsec);
412 	}
413 
414 	mlxsw_reg_mtutc_pack(mtutc_pl,
415 			     MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
416 			     0, 0, 0, delta);
417 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
418 }
419 
420 static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
421 				  struct timespec64 *ts,
422 				  struct ptp_system_timestamp *sts)
423 {
424 	struct mlxsw_sp_ptp_clock *clock =
425 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
426 	u64 nsec;
427 
428 	nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
429 	*ts = ns_to_timespec64(nsec);
430 
431 	return 0;
432 }
433 
434 static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
435 				 const struct timespec64 *ts)
436 {
437 	struct mlxsw_sp_ptp_clock *clock =
438 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
439 	u64 nsec = timespec64_to_ns(ts);
440 
441 	return mlxsw_sp2_ptp_phc_settime(clock, nsec);
442 }
443 
444 static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
445 	.owner		= THIS_MODULE,
446 	.name		= "mlxsw_sp_clock",
447 	.max_adj	= MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
448 	.adjfine	= mlxsw_sp2_ptp_adjfine,
449 	.adjtime	= mlxsw_sp2_ptp_adjtime,
450 	.gettimex64	= mlxsw_sp2_ptp_gettimex,
451 	.settime64	= mlxsw_sp2_ptp_settime,
452 };
453 
454 struct mlxsw_sp_ptp_clock *
455 mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
456 {
457 	struct mlxsw_sp_ptp_clock *clock;
458 	int err;
459 
460 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
461 	if (!clock)
462 		return ERR_PTR(-ENOMEM);
463 
464 	clock->core = mlxsw_sp->core;
465 
466 	clock->ptp_info = mlxsw_sp2_ptp_clock_info;
467 
468 	err = mlxsw_sp2_ptp_phc_settime(clock, 0);
469 	if (err) {
470 		dev_err(dev, "setting UTC time failed %d\n", err);
471 		goto err_ptp_phc_settime;
472 	}
473 
474 	clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
475 	if (IS_ERR(clock->ptp)) {
476 		err = PTR_ERR(clock->ptp);
477 		dev_err(dev, "ptp_clock_register failed %d\n", err);
478 		goto err_ptp_clock_register;
479 	}
480 
481 	return clock;
482 
483 err_ptp_clock_register:
484 err_ptp_phc_settime:
485 	kfree(clock);
486 	return ERR_PTR(err);
487 }
488 
489 void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
490 {
491 	ptp_clock_unregister(clock->ptp);
492 	kfree(clock);
493 }
494 
495 static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
496 			      u8 *p_domain_number,
497 			      u8 *p_message_type,
498 			      u16 *p_sequence_id)
499 {
500 	unsigned int ptp_class;
501 	struct ptp_header *hdr;
502 
503 	ptp_class = ptp_classify_raw(skb);
504 
505 	switch (ptp_class & PTP_CLASS_VMASK) {
506 	case PTP_CLASS_V1:
507 	case PTP_CLASS_V2:
508 		break;
509 	default:
510 		return -ERANGE;
511 	}
512 
513 	hdr = ptp_parse_header(skb, ptp_class);
514 	if (!hdr)
515 		return -EINVAL;
516 
517 	*p_message_type	 = ptp_get_msgtype(hdr, ptp_class);
518 	*p_domain_number = hdr->domain_number;
519 	*p_sequence_id	 = be16_to_cpu(hdr->sequence_id);
520 
521 	return 0;
522 }
523 
524 /* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
525  * error.
526  */
527 static int
528 mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
529 			     struct mlxsw_sp1_ptp_key key,
530 			     struct sk_buff *skb,
531 			     u64 timestamp)
532 {
533 	int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
534 	struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
535 	struct mlxsw_sp1_ptp_unmatched *unmatched;
536 	int err;
537 
538 	unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
539 	if (!unmatched)
540 		return -ENOMEM;
541 
542 	unmatched->key = key;
543 	unmatched->skb = skb;
544 	unmatched->timestamp = timestamp;
545 	unmatched->gc_cycle = ptp_state->gc_cycle + cycles;
546 
547 	err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
548 			      mlxsw_sp1_ptp_unmatched_ht_params);
549 	if (err)
550 		kfree(unmatched);
551 
552 	return err;
553 }
554 
555 static struct mlxsw_sp1_ptp_unmatched *
556 mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
557 			       struct mlxsw_sp1_ptp_key key, int *p_length)
558 {
559 	struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
560 	struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
561 	struct rhlist_head *tmp, *list;
562 	int length = 0;
563 
564 	list = rhltable_lookup(&ptp_state->unmatched_ht, &key,
565 			       mlxsw_sp1_ptp_unmatched_ht_params);
566 	rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
567 		last = unmatched;
568 		length++;
569 	}
570 
571 	*p_length = length;
572 	return last;
573 }
574 
575 static int
576 mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
577 			       struct mlxsw_sp1_ptp_unmatched *unmatched)
578 {
579 	struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
580 
581 	return rhltable_remove(&ptp_state->unmatched_ht,
582 			       &unmatched->ht_node,
583 			       mlxsw_sp1_ptp_unmatched_ht_params);
584 }
585 
586 /* This function is called in the following scenarios:
587  *
588  * 1) When a packet is matched with its timestamp.
589  * 2) In several situation when it is necessary to immediately pass on
590  *    an SKB without a timestamp.
591  * 3) From GC indirectly through mlxsw_sp1_ptp_unmatched_finish().
592  *    This case is similar to 2) above.
593  */
594 static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp,
595 					struct sk_buff *skb, u16 local_port,
596 					bool ingress,
597 					struct skb_shared_hwtstamps *hwtstamps)
598 {
599 	struct mlxsw_sp_port *mlxsw_sp_port;
600 
601 	/* Between capturing the packet and finishing it, there is a window of
602 	 * opportunity for the originating port to go away (e.g. due to a
603 	 * split). Also make sure the SKB device reference is still valid.
604 	 */
605 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
606 	if (!(mlxsw_sp_port && (!skb->dev || skb->dev == mlxsw_sp_port->dev))) {
607 		dev_kfree_skb_any(skb);
608 		return;
609 	}
610 
611 	if (ingress) {
612 		if (hwtstamps)
613 			*skb_hwtstamps(skb) = *hwtstamps;
614 		mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
615 	} else {
616 		/* skb_tstamp_tx() allows hwtstamps to be NULL. */
617 		skb_tstamp_tx(skb, hwtstamps);
618 		dev_kfree_skb_any(skb);
619 	}
620 }
621 
622 static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
623 				       struct mlxsw_sp1_ptp_key key,
624 				       struct sk_buff *skb,
625 				       u64 timestamp)
626 {
627 	struct mlxsw_sp_ptp_clock *clock_common = mlxsw_sp->clock;
628 	struct mlxsw_sp1_ptp_clock *clock =
629 		container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
630 
631 	struct skb_shared_hwtstamps hwtstamps;
632 	u64 nsec;
633 
634 	spin_lock_bh(&clock->lock);
635 	nsec = timecounter_cyc2time(&clock->tc, timestamp);
636 	spin_unlock_bh(&clock->lock);
637 
638 	hwtstamps.hwtstamp = ns_to_ktime(nsec);
639 	mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
640 				    key.local_port, key.ingress, &hwtstamps);
641 }
642 
643 static void
644 mlxsw_sp1_ptp_unmatched_finish(struct mlxsw_sp *mlxsw_sp,
645 			       struct mlxsw_sp1_ptp_unmatched *unmatched)
646 {
647 	if (unmatched->skb && unmatched->timestamp)
648 		mlxsw_sp1_packet_timestamp(mlxsw_sp, unmatched->key,
649 					   unmatched->skb,
650 					   unmatched->timestamp);
651 	else if (unmatched->skb)
652 		mlxsw_sp1_ptp_packet_finish(mlxsw_sp, unmatched->skb,
653 					    unmatched->key.local_port,
654 					    unmatched->key.ingress, NULL);
655 	kfree_rcu(unmatched, rcu);
656 }
657 
658 static void mlxsw_sp1_ptp_unmatched_free_fn(void *ptr, void *arg)
659 {
660 	struct mlxsw_sp1_ptp_unmatched *unmatched = ptr;
661 
662 	/* This is invoked at a point where the ports are gone already. Nothing
663 	 * to do with whatever is left in the HT but to free it.
664 	 */
665 	if (unmatched->skb)
666 		dev_kfree_skb_any(unmatched->skb);
667 	kfree_rcu(unmatched, rcu);
668 }
669 
670 static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
671 				    struct mlxsw_sp1_ptp_key key,
672 				    struct sk_buff *skb, u64 timestamp)
673 {
674 	struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
675 	struct mlxsw_sp1_ptp_unmatched *unmatched;
676 	int length;
677 	int err;
678 
679 	rcu_read_lock();
680 
681 	spin_lock(&ptp_state->unmatched_lock);
682 
683 	unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
684 	if (skb && unmatched && unmatched->timestamp) {
685 		unmatched->skb = skb;
686 	} else if (timestamp && unmatched && unmatched->skb) {
687 		unmatched->timestamp = timestamp;
688 	} else {
689 		/* Either there is no entry to match, or one that is there is
690 		 * incompatible.
691 		 */
692 		if (length < 100)
693 			err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
694 							   skb, timestamp);
695 		else
696 			err = -E2BIG;
697 		if (err && skb)
698 			mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
699 						    key.local_port,
700 						    key.ingress, NULL);
701 		unmatched = NULL;
702 	}
703 
704 	if (unmatched) {
705 		err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
706 		WARN_ON_ONCE(err);
707 	}
708 
709 	spin_unlock(&ptp_state->unmatched_lock);
710 
711 	if (unmatched)
712 		mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
713 
714 	rcu_read_unlock();
715 }
716 
717 static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp,
718 				     struct sk_buff *skb, u16 local_port,
719 				     bool ingress)
720 {
721 	struct mlxsw_sp_port *mlxsw_sp_port;
722 	struct mlxsw_sp1_ptp_key key;
723 	u8 types;
724 	int err;
725 
726 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
727 	if (!mlxsw_sp_port)
728 		goto immediate;
729 
730 	types = ingress ? mlxsw_sp_port->ptp.ing_types :
731 			  mlxsw_sp_port->ptp.egr_types;
732 	if (!types)
733 		goto immediate;
734 
735 	memset(&key, 0, sizeof(key));
736 	key.local_port = local_port;
737 	key.ingress = ingress;
738 
739 	err = mlxsw_sp_ptp_parse(skb, &key.domain_number, &key.message_type,
740 				 &key.sequence_id);
741 	if (err)
742 		goto immediate;
743 
744 	/* For packets whose timestamping was not enabled on this port, don't
745 	 * bother trying to match the timestamp.
746 	 */
747 	if (!((1 << key.message_type) & types))
748 		goto immediate;
749 
750 	mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, skb, 0);
751 	return;
752 
753 immediate:
754 	mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, local_port, ingress, NULL);
755 }
756 
757 void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
758 				 u16 local_port, u8 message_type,
759 				 u8 domain_number, u16 sequence_id,
760 				 u64 timestamp)
761 {
762 	struct mlxsw_sp_port *mlxsw_sp_port;
763 	struct mlxsw_sp1_ptp_key key;
764 	u8 types;
765 
766 	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
767 		return;
768 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
769 	if (!mlxsw_sp_port)
770 		return;
771 
772 	types = ingress ? mlxsw_sp_port->ptp.ing_types :
773 			  mlxsw_sp_port->ptp.egr_types;
774 
775 	/* For message types whose timestamping was not enabled on this port,
776 	 * don't bother with the timestamp.
777 	 */
778 	if (!((1 << message_type) & types))
779 		return;
780 
781 	memset(&key, 0, sizeof(key));
782 	key.local_port = local_port;
783 	key.domain_number = domain_number;
784 	key.message_type = message_type;
785 	key.sequence_id = sequence_id;
786 	key.ingress = ingress;
787 
788 	mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, NULL, timestamp);
789 }
790 
791 void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
792 			   u16 local_port)
793 {
794 	skb_reset_mac_header(skb);
795 	mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
796 }
797 
798 void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
799 			       struct sk_buff *skb, u16 local_port)
800 {
801 	mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
802 }
803 
804 static void
805 mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp1_ptp_state *ptp_state,
806 			    struct mlxsw_sp1_ptp_unmatched *unmatched)
807 {
808 	struct mlxsw_sp *mlxsw_sp = ptp_state->common.mlxsw_sp;
809 	struct mlxsw_sp_ptp_port_dir_stats *stats;
810 	struct mlxsw_sp_port *mlxsw_sp_port;
811 	int err;
812 
813 	/* If an unmatched entry has an SKB, it has to be handed over to the
814 	 * networking stack. This is usually done from a trap handler, which is
815 	 * invoked in a softirq context. Here we are going to do it in process
816 	 * context. If that were to be interrupted by a softirq, it could cause
817 	 * a deadlock when an attempt is made to take an already-taken lock
818 	 * somewhere along the sending path. Disable softirqs to prevent this.
819 	 */
820 	local_bh_disable();
821 
822 	spin_lock(&ptp_state->unmatched_lock);
823 	err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
824 			      mlxsw_sp1_ptp_unmatched_ht_params);
825 	spin_unlock(&ptp_state->unmatched_lock);
826 
827 	if (err)
828 		/* The packet was matched with timestamp during the walk. */
829 		goto out;
830 
831 	mlxsw_sp_port = mlxsw_sp->ports[unmatched->key.local_port];
832 	if (mlxsw_sp_port) {
833 		stats = unmatched->key.ingress ?
834 			&mlxsw_sp_port->ptp.stats.rx_gcd :
835 			&mlxsw_sp_port->ptp.stats.tx_gcd;
836 		if (unmatched->skb)
837 			stats->packets++;
838 		else
839 			stats->timestamps++;
840 	}
841 
842 	/* mlxsw_sp1_ptp_unmatched_finish() invokes netif_receive_skb(). While
843 	 * the comment at that function states that it can only be called in
844 	 * soft IRQ context, this pattern of local_bh_disable() +
845 	 * netif_receive_skb(), in process context, is seen elsewhere in the
846 	 * kernel, notably in pktgen.
847 	 */
848 	mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
849 
850 out:
851 	local_bh_enable();
852 }
853 
854 static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
855 {
856 	struct delayed_work *dwork = to_delayed_work(work);
857 	struct mlxsw_sp1_ptp_unmatched *unmatched;
858 	struct mlxsw_sp1_ptp_state *ptp_state;
859 	struct rhashtable_iter iter;
860 	u32 gc_cycle;
861 	void *obj;
862 
863 	ptp_state = container_of(dwork, struct mlxsw_sp1_ptp_state, ht_gc_dw);
864 	gc_cycle = ptp_state->gc_cycle++;
865 
866 	rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
867 	rhashtable_walk_start(&iter);
868 	while ((obj = rhashtable_walk_next(&iter))) {
869 		if (IS_ERR(obj))
870 			continue;
871 
872 		unmatched = obj;
873 		if (unmatched->gc_cycle <= gc_cycle)
874 			mlxsw_sp1_ptp_ht_gc_collect(ptp_state, unmatched);
875 	}
876 	rhashtable_walk_stop(&iter);
877 	rhashtable_walk_exit(&iter);
878 
879 	mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
880 			       MLXSW_SP1_PTP_HT_GC_INTERVAL);
881 }
882 
883 static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
884 				   enum mlxsw_reg_mtptpt_trap_id trap_id,
885 				   u16 message_type)
886 {
887 	char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
888 
889 	mlxsw_reg_mtptpt_pack(mtptpt_pl, trap_id, message_type);
890 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
891 }
892 
893 static int mlxsw_sp1_ptp_set_fifo_clr_on_trap(struct mlxsw_sp *mlxsw_sp,
894 					      bool clr)
895 {
896 	char mogcr_pl[MLXSW_REG_MOGCR_LEN] = {0};
897 	int err;
898 
899 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
900 	if (err)
901 		return err;
902 
903 	mlxsw_reg_mogcr_ptp_iftc_set(mogcr_pl, clr);
904 	mlxsw_reg_mogcr_ptp_eftc_set(mogcr_pl, clr);
905 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
906 }
907 
908 static int mlxsw_sp1_ptp_mtpppc_set(struct mlxsw_sp *mlxsw_sp,
909 				    u16 ing_types, u16 egr_types)
910 {
911 	char mtpppc_pl[MLXSW_REG_MTPPPC_LEN];
912 
913 	mlxsw_reg_mtpppc_pack(mtpppc_pl, ing_types, egr_types);
914 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpppc), mtpppc_pl);
915 }
916 
917 struct mlxsw_sp1_ptp_shaper_params {
918 	u32 ethtool_speed;
919 	enum mlxsw_reg_qpsc_port_speed port_speed;
920 	u8 shaper_time_exp;
921 	u8 shaper_time_mantissa;
922 	u8 shaper_inc;
923 	u8 shaper_bs;
924 	u8 port_to_shaper_credits;
925 	int ing_timestamp_inc;
926 	int egr_timestamp_inc;
927 };
928 
929 static const struct mlxsw_sp1_ptp_shaper_params
930 mlxsw_sp1_ptp_shaper_params[] = {
931 	{
932 		.ethtool_speed		= SPEED_100,
933 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_100M,
934 		.shaper_time_exp	= 4,
935 		.shaper_time_mantissa	= 12,
936 		.shaper_inc		= 9,
937 		.shaper_bs		= 1,
938 		.port_to_shaper_credits	= 1,
939 		.ing_timestamp_inc	= -313,
940 		.egr_timestamp_inc	= 313,
941 	},
942 	{
943 		.ethtool_speed		= SPEED_1000,
944 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_1G,
945 		.shaper_time_exp	= 0,
946 		.shaper_time_mantissa	= 12,
947 		.shaper_inc		= 6,
948 		.shaper_bs		= 0,
949 		.port_to_shaper_credits	= 1,
950 		.ing_timestamp_inc	= -35,
951 		.egr_timestamp_inc	= 35,
952 	},
953 	{
954 		.ethtool_speed		= SPEED_10000,
955 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_10G,
956 		.shaper_time_exp	= 0,
957 		.shaper_time_mantissa	= 2,
958 		.shaper_inc		= 14,
959 		.shaper_bs		= 1,
960 		.port_to_shaper_credits	= 1,
961 		.ing_timestamp_inc	= -11,
962 		.egr_timestamp_inc	= 11,
963 	},
964 	{
965 		.ethtool_speed		= SPEED_25000,
966 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_25G,
967 		.shaper_time_exp	= 0,
968 		.shaper_time_mantissa	= 0,
969 		.shaper_inc		= 11,
970 		.shaper_bs		= 1,
971 		.port_to_shaper_credits	= 1,
972 		.ing_timestamp_inc	= -14,
973 		.egr_timestamp_inc	= 14,
974 	},
975 };
976 
977 #define MLXSW_SP1_PTP_SHAPER_PARAMS_LEN ARRAY_SIZE(mlxsw_sp1_ptp_shaper_params)
978 
979 static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
980 {
981 	const struct mlxsw_sp1_ptp_shaper_params *params;
982 	char qpsc_pl[MLXSW_REG_QPSC_LEN];
983 	int i, err;
984 
985 	for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
986 		params = &mlxsw_sp1_ptp_shaper_params[i];
987 		mlxsw_reg_qpsc_pack(qpsc_pl, params->port_speed,
988 				    params->shaper_time_exp,
989 				    params->shaper_time_mantissa,
990 				    params->shaper_inc, params->shaper_bs,
991 				    params->port_to_shaper_credits,
992 				    params->ing_timestamp_inc,
993 				    params->egr_timestamp_inc);
994 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpsc), qpsc_pl);
995 		if (err)
996 			return err;
997 	}
998 
999 	return 0;
1000 }
1001 
1002 static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
1003 {
1004 	u16 event_message_type;
1005 	int err;
1006 
1007 	/* Deliver these message types as PTP0. */
1008 	event_message_type = BIT(PTP_MSGTYPE_SYNC) |
1009 			     BIT(PTP_MSGTYPE_DELAY_REQ) |
1010 			     BIT(PTP_MSGTYPE_PDELAY_REQ) |
1011 			     BIT(PTP_MSGTYPE_PDELAY_RESP);
1012 
1013 	err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
1014 				      event_message_type);
1015 	if (err)
1016 		return err;
1017 
1018 	/* Everything else is PTP1. */
1019 	err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
1020 				      ~event_message_type);
1021 	if (err)
1022 		goto err_mtptpt1_set;
1023 
1024 	return 0;
1025 
1026 err_mtptpt1_set:
1027 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
1028 	return err;
1029 }
1030 
1031 static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
1032 {
1033 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
1034 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
1035 }
1036 
1037 struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
1038 {
1039 	struct mlxsw_sp1_ptp_state *ptp_state;
1040 	int err;
1041 
1042 	err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
1043 	if (err)
1044 		return ERR_PTR(err);
1045 
1046 	ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
1047 	if (!ptp_state)
1048 		return ERR_PTR(-ENOMEM);
1049 	ptp_state->common.mlxsw_sp = mlxsw_sp;
1050 
1051 	spin_lock_init(&ptp_state->unmatched_lock);
1052 
1053 	err = rhltable_init(&ptp_state->unmatched_ht,
1054 			    &mlxsw_sp1_ptp_unmatched_ht_params);
1055 	if (err)
1056 		goto err_hashtable_init;
1057 
1058 	err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
1059 	if (err)
1060 		goto err_ptp_traps_set;
1061 
1062 	err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
1063 	if (err)
1064 		goto err_fifo_clr;
1065 
1066 	INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
1067 	mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
1068 			       MLXSW_SP1_PTP_HT_GC_INTERVAL);
1069 	return &ptp_state->common;
1070 
1071 err_fifo_clr:
1072 	mlxsw_sp_ptp_traps_unset(mlxsw_sp);
1073 err_ptp_traps_set:
1074 	rhltable_destroy(&ptp_state->unmatched_ht);
1075 err_hashtable_init:
1076 	kfree(ptp_state);
1077 	return ERR_PTR(err);
1078 }
1079 
1080 void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
1081 {
1082 	struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
1083 	struct mlxsw_sp1_ptp_state *ptp_state;
1084 
1085 	ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
1086 
1087 	cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
1088 	mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
1089 	mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
1090 	mlxsw_sp_ptp_traps_unset(mlxsw_sp);
1091 	rhltable_free_and_destroy(&ptp_state->unmatched_ht,
1092 				  &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
1093 	kfree(ptp_state);
1094 }
1095 
1096 int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1097 			       struct hwtstamp_config *config)
1098 {
1099 	*config = mlxsw_sp_port->ptp.hwtstamp_config;
1100 	return 0;
1101 }
1102 
1103 static int
1104 mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
1105 				u16 *p_ing_types, u16 *p_egr_types,
1106 				enum hwtstamp_rx_filters *p_rx_filter)
1107 {
1108 	enum hwtstamp_rx_filters rx_filter = config->rx_filter;
1109 	enum hwtstamp_tx_types tx_type = config->tx_type;
1110 	u16 ing_types = 0x00;
1111 	u16 egr_types = 0x00;
1112 
1113 	switch (tx_type) {
1114 	case HWTSTAMP_TX_OFF:
1115 		egr_types = 0x00;
1116 		break;
1117 	case HWTSTAMP_TX_ON:
1118 		egr_types = 0xff;
1119 		break;
1120 	case HWTSTAMP_TX_ONESTEP_SYNC:
1121 	case HWTSTAMP_TX_ONESTEP_P2P:
1122 		return -ERANGE;
1123 	default:
1124 		return -EINVAL;
1125 	}
1126 
1127 	switch (rx_filter) {
1128 	case HWTSTAMP_FILTER_NONE:
1129 		ing_types = 0x00;
1130 		break;
1131 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1132 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1133 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1134 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1135 		ing_types = 0x01;
1136 		break;
1137 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1138 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1139 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1140 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1141 		ing_types = 0x02;
1142 		break;
1143 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1144 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1145 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1146 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1147 		ing_types = 0x0f;
1148 		break;
1149 	case HWTSTAMP_FILTER_ALL:
1150 		ing_types = 0xff;
1151 		break;
1152 	case HWTSTAMP_FILTER_SOME:
1153 	case HWTSTAMP_FILTER_NTP_ALL:
1154 		return -ERANGE;
1155 	default:
1156 		return -EINVAL;
1157 	}
1158 
1159 	*p_ing_types = ing_types;
1160 	*p_egr_types = egr_types;
1161 	*p_rx_filter = rx_filter;
1162 	return 0;
1163 }
1164 
1165 static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
1166 				       u16 ing_types, u16 egr_types)
1167 {
1168 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1169 	struct mlxsw_sp_port *tmp;
1170 	u16 orig_ing_types = 0;
1171 	u16 orig_egr_types = 0;
1172 	int err;
1173 	int i;
1174 
1175 	/* MTPPPC configures timestamping globally, not per port. Find the
1176 	 * configuration that contains all configured timestamping requests.
1177 	 */
1178 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1179 		tmp = mlxsw_sp->ports[i];
1180 		if (tmp) {
1181 			orig_ing_types |= tmp->ptp.ing_types;
1182 			orig_egr_types |= tmp->ptp.egr_types;
1183 		}
1184 		if (tmp && tmp != mlxsw_sp_port) {
1185 			ing_types |= tmp->ptp.ing_types;
1186 			egr_types |= tmp->ptp.egr_types;
1187 		}
1188 	}
1189 
1190 	if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) {
1191 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1192 		if (err) {
1193 			netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth");
1194 			return err;
1195 		}
1196 	}
1197 	if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types))
1198 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1199 
1200 	return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp,
1201 				       ing_types, egr_types);
1202 }
1203 
1204 static bool mlxsw_sp1_ptp_hwtstamp_enabled(struct mlxsw_sp_port *mlxsw_sp_port)
1205 {
1206 	return mlxsw_sp_port->ptp.ing_types || mlxsw_sp_port->ptp.egr_types;
1207 }
1208 
1209 static int
1210 mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
1211 {
1212 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1213 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1214 
1215 	mlxsw_reg_qeec_ptps_pack(qeec_pl, mlxsw_sp_port->local_port, enable);
1216 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1217 }
1218 
1219 static int mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port *mlxsw_sp_port)
1220 {
1221 	bool ptps = false;
1222 	int err, i;
1223 	u32 speed;
1224 
1225 	if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
1226 		return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, false);
1227 
1228 	err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
1229 	if (err)
1230 		return err;
1231 
1232 	for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
1233 		if (mlxsw_sp1_ptp_shaper_params[i].ethtool_speed == speed) {
1234 			ptps = true;
1235 			break;
1236 		}
1237 	}
1238 
1239 	return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, ptps);
1240 }
1241 
1242 void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
1243 {
1244 	struct delayed_work *dwork = to_delayed_work(work);
1245 	struct mlxsw_sp_port *mlxsw_sp_port;
1246 	int err;
1247 
1248 	mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
1249 				     ptp.shaper_dw);
1250 
1251 	if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
1252 		return;
1253 
1254 	err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
1255 	if (err)
1256 		netdev_err(mlxsw_sp_port->dev, "Failed to set up PTP shaper\n");
1257 }
1258 
1259 int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1260 			       struct hwtstamp_config *config)
1261 {
1262 	enum hwtstamp_rx_filters rx_filter;
1263 	u16 ing_types;
1264 	u16 egr_types;
1265 	int err;
1266 
1267 	err = mlxsw_sp1_ptp_get_message_types(config, &ing_types, &egr_types,
1268 					      &rx_filter);
1269 	if (err)
1270 		return err;
1271 
1272 	err = mlxsw_sp1_ptp_mtpppc_update(mlxsw_sp_port, ing_types, egr_types);
1273 	if (err)
1274 		return err;
1275 
1276 	mlxsw_sp_port->ptp.hwtstamp_config = *config;
1277 	mlxsw_sp_port->ptp.ing_types = ing_types;
1278 	mlxsw_sp_port->ptp.egr_types = egr_types;
1279 
1280 	err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
1281 	if (err)
1282 		return err;
1283 
1284 	/* Notify the ioctl caller what we are actually timestamping. */
1285 	config->rx_filter = rx_filter;
1286 
1287 	return 0;
1288 }
1289 
1290 int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
1291 			      struct ethtool_ts_info *info)
1292 {
1293 	info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
1294 
1295 	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1296 				SOF_TIMESTAMPING_RX_HARDWARE |
1297 				SOF_TIMESTAMPING_RAW_HARDWARE;
1298 
1299 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1300 			 BIT(HWTSTAMP_TX_ON);
1301 
1302 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1303 			   BIT(HWTSTAMP_FILTER_ALL);
1304 
1305 	return 0;
1306 }
1307 
1308 struct mlxsw_sp_ptp_port_stat {
1309 	char str[ETH_GSTRING_LEN];
1310 	ptrdiff_t offset;
1311 };
1312 
1313 #define MLXSW_SP_PTP_PORT_STAT(NAME, FIELD)				\
1314 	{								\
1315 		.str = NAME,						\
1316 		.offset = offsetof(struct mlxsw_sp_ptp_port_stats,	\
1317 				    FIELD),				\
1318 	}
1319 
1320 static const struct mlxsw_sp_ptp_port_stat mlxsw_sp_ptp_port_stats[] = {
1321 	MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_packets",    rx_gcd.packets),
1322 	MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_timestamps", rx_gcd.timestamps),
1323 	MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_packets",    tx_gcd.packets),
1324 	MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_timestamps", tx_gcd.timestamps),
1325 };
1326 
1327 #undef MLXSW_SP_PTP_PORT_STAT
1328 
1329 #define MLXSW_SP_PTP_PORT_STATS_LEN \
1330 	ARRAY_SIZE(mlxsw_sp_ptp_port_stats)
1331 
1332 int mlxsw_sp1_get_stats_count(void)
1333 {
1334 	return MLXSW_SP_PTP_PORT_STATS_LEN;
1335 }
1336 
1337 void mlxsw_sp1_get_stats_strings(u8 **p)
1338 {
1339 	int i;
1340 
1341 	for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
1342 		memcpy(*p, mlxsw_sp_ptp_port_stats[i].str,
1343 		       ETH_GSTRING_LEN);
1344 		*p += ETH_GSTRING_LEN;
1345 	}
1346 }
1347 
1348 void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1349 			 u64 *data, int data_index)
1350 {
1351 	void *stats = &mlxsw_sp_port->ptp.stats;
1352 	ptrdiff_t offset;
1353 	int i;
1354 
1355 	data += data_index;
1356 	for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
1357 		offset = mlxsw_sp_ptp_port_stats[i].offset;
1358 		*data++ = *(u64 *)(stats + offset);
1359 	}
1360 }
1361 
1362 struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
1363 {
1364 	struct mlxsw_sp2_ptp_state *ptp_state;
1365 	int err;
1366 
1367 	ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
1368 	if (!ptp_state)
1369 		return ERR_PTR(-ENOMEM);
1370 
1371 	ptp_state->common.mlxsw_sp = mlxsw_sp;
1372 
1373 	err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
1374 	if (err)
1375 		goto err_ptp_traps_set;
1376 
1377 	refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
1378 	mutex_init(&ptp_state->lock);
1379 	return &ptp_state->common;
1380 
1381 err_ptp_traps_set:
1382 	kfree(ptp_state);
1383 	return ERR_PTR(err);
1384 }
1385 
1386 void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
1387 {
1388 	struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
1389 	struct mlxsw_sp2_ptp_state *ptp_state;
1390 
1391 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
1392 
1393 	mutex_destroy(&ptp_state->lock);
1394 	mlxsw_sp_ptp_traps_unset(mlxsw_sp);
1395 	kfree(ptp_state);
1396 }
1397 
1398 static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
1399 					    u8 cqe_ts_sec)
1400 {
1401 	u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
1402 
1403 	if (cqe_ts_sec > (utc_sec & 0xff))
1404 		/* Time stamp above the last bits of UTC (UTC & 0xff) means the
1405 		 * latter has wrapped after the time stamp was collected.
1406 		 */
1407 		utc_sec -= 256;
1408 
1409 	utc_sec &= ~0xff;
1410 	utc_sec |= cqe_ts_sec;
1411 
1412 	return utc_sec;
1413 }
1414 
1415 static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
1416 					const struct mlxsw_skb_cb *cb,
1417 					struct skb_shared_hwtstamps *hwtstamps)
1418 {
1419 	u64 ts_sec, ts_nsec, nsec;
1420 
1421 	WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
1422 
1423 	/* The time stamp in the CQE is represented by 38 bits, which is a short
1424 	 * representation of UTC time. Software should create the full time
1425 	 * stamp using the global UTC clock. The seconds have only 8 bits in the
1426 	 * CQE, to create the full time stamp, use the current UTC time and fix
1427 	 * the seconds according to the relation between UTC seconds and CQE
1428 	 * seconds.
1429 	 */
1430 	ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
1431 	ts_nsec = cb->cqe_ts.nsec;
1432 
1433 	nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
1434 
1435 	hwtstamps->hwtstamp = ns_to_ktime(nsec);
1436 }
1437 
1438 void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
1439 			   u16 local_port)
1440 {
1441 	struct skb_shared_hwtstamps hwtstamps;
1442 
1443 	mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
1444 				    &hwtstamps);
1445 	*skb_hwtstamps(skb) = hwtstamps;
1446 	mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
1447 }
1448 
1449 void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
1450 			       struct sk_buff *skb, u16 local_port)
1451 {
1452 	struct skb_shared_hwtstamps hwtstamps;
1453 
1454 	mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
1455 				    &hwtstamps);
1456 	skb_tstamp_tx(skb, &hwtstamps);
1457 	dev_kfree_skb_any(skb);
1458 }
1459 
1460 int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1461 			       struct hwtstamp_config *config)
1462 {
1463 	struct mlxsw_sp2_ptp_state *ptp_state;
1464 
1465 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1466 
1467 	mutex_lock(&ptp_state->lock);
1468 	*config = ptp_state->config;
1469 	mutex_unlock(&ptp_state->lock);
1470 
1471 	return 0;
1472 }
1473 
1474 static int
1475 mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
1476 				u16 *p_ing_types, u16 *p_egr_types,
1477 				enum hwtstamp_rx_filters *p_rx_filter)
1478 {
1479 	enum hwtstamp_rx_filters rx_filter = config->rx_filter;
1480 	enum hwtstamp_tx_types tx_type = config->tx_type;
1481 	u16 ing_types = 0x00;
1482 	u16 egr_types = 0x00;
1483 
1484 	*p_rx_filter = rx_filter;
1485 
1486 	switch (rx_filter) {
1487 	case HWTSTAMP_FILTER_NONE:
1488 		ing_types = 0x00;
1489 		break;
1490 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1491 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1492 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1493 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1494 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1495 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1496 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1497 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1498 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1499 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1500 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1501 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1502 		/* In Spectrum-2 and above, all packets get time stamp by
1503 		 * default and the driver fill the time stamp only for event
1504 		 * packets. Return all event types even if only specific types
1505 		 * were required.
1506 		 */
1507 		ing_types = 0x0f;
1508 		*p_rx_filter = HWTSTAMP_FILTER_SOME;
1509 		break;
1510 	case HWTSTAMP_FILTER_ALL:
1511 	case HWTSTAMP_FILTER_SOME:
1512 	case HWTSTAMP_FILTER_NTP_ALL:
1513 		return -ERANGE;
1514 	default:
1515 		return -EINVAL;
1516 	}
1517 
1518 	switch (tx_type) {
1519 	case HWTSTAMP_TX_OFF:
1520 		egr_types = 0x00;
1521 		break;
1522 	case HWTSTAMP_TX_ON:
1523 		egr_types = 0x0f;
1524 		break;
1525 	case HWTSTAMP_TX_ONESTEP_SYNC:
1526 	case HWTSTAMP_TX_ONESTEP_P2P:
1527 		return -ERANGE;
1528 	default:
1529 		return -EINVAL;
1530 	}
1531 
1532 	if ((ing_types && !egr_types) || (!ing_types && egr_types))
1533 		return -EINVAL;
1534 
1535 	*p_ing_types = ing_types;
1536 	*p_egr_types = egr_types;
1537 	return 0;
1538 }
1539 
1540 static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
1541 				    u16 ing_types, u16 egr_types)
1542 {
1543 	char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
1544 
1545 	mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
1546 			      egr_types);
1547 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
1548 }
1549 
1550 static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
1551 				u16 egr_types,
1552 				struct hwtstamp_config new_config)
1553 {
1554 	struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
1555 	int err;
1556 
1557 	err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
1558 	if (err)
1559 		return err;
1560 
1561 	ptp_state->config = new_config;
1562 	return 0;
1563 }
1564 
1565 static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
1566 				 struct hwtstamp_config new_config)
1567 {
1568 	struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
1569 	int err;
1570 
1571 	err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
1572 	if (err)
1573 		return err;
1574 
1575 	ptp_state->config = new_config;
1576 	return 0;
1577 }
1578 
1579 static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
1580 					u16 ing_types, u16 egr_types,
1581 					struct hwtstamp_config new_config)
1582 {
1583 	struct mlxsw_sp2_ptp_state *ptp_state;
1584 	int err;
1585 
1586 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1587 
1588 	if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
1589 		return 0;
1590 
1591 	err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
1592 				   egr_types, new_config);
1593 	if (err)
1594 		return err;
1595 
1596 	refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
1597 
1598 	return 0;
1599 }
1600 
1601 static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
1602 					  struct hwtstamp_config new_config)
1603 {
1604 	struct mlxsw_sp2_ptp_state *ptp_state;
1605 	int err;
1606 
1607 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1608 
1609 	if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
1610 		return 0;
1611 
1612 	err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
1613 	if (err)
1614 		goto err_ptp_disable;
1615 
1616 	return 0;
1617 
1618 err_ptp_disable:
1619 	refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
1620 	return err;
1621 }
1622 
1623 int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1624 			       struct hwtstamp_config *config)
1625 {
1626 	struct mlxsw_sp2_ptp_state *ptp_state;
1627 	enum hwtstamp_rx_filters rx_filter;
1628 	struct hwtstamp_config new_config;
1629 	u16 new_ing_types, new_egr_types;
1630 	bool ptp_enabled;
1631 	int err;
1632 
1633 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1634 	mutex_lock(&ptp_state->lock);
1635 
1636 	err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
1637 					      &new_egr_types, &rx_filter);
1638 	if (err)
1639 		goto err_get_message_types;
1640 
1641 	new_config.flags = config->flags;
1642 	new_config.tx_type = config->tx_type;
1643 	new_config.rx_filter = rx_filter;
1644 
1645 	ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
1646 		      mlxsw_sp_port->ptp.egr_types;
1647 
1648 	if ((new_ing_types || new_egr_types) && !ptp_enabled) {
1649 		err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
1650 						   new_egr_types, new_config);
1651 		if (err)
1652 			goto err_configure_port;
1653 	} else if (!new_ing_types && !new_egr_types && ptp_enabled) {
1654 		err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
1655 		if (err)
1656 			goto err_deconfigure_port;
1657 	}
1658 
1659 	mlxsw_sp_port->ptp.ing_types = new_ing_types;
1660 	mlxsw_sp_port->ptp.egr_types = new_egr_types;
1661 
1662 	/* Notify the ioctl caller what we are actually timestamping. */
1663 	config->rx_filter = rx_filter;
1664 	mutex_unlock(&ptp_state->lock);
1665 
1666 	return 0;
1667 
1668 err_deconfigure_port:
1669 err_configure_port:
1670 err_get_message_types:
1671 	mutex_unlock(&ptp_state->lock);
1672 	return err;
1673 }
1674 
1675 int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
1676 			      struct ethtool_ts_info *info)
1677 {
1678 	info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
1679 
1680 	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1681 				SOF_TIMESTAMPING_RX_HARDWARE |
1682 				SOF_TIMESTAMPING_RAW_HARDWARE;
1683 
1684 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1685 			 BIT(HWTSTAMP_TX_ON);
1686 
1687 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1688 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1689 			   BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
1690 
1691 	return 0;
1692 }
1693 
1694 int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
1695 				 struct mlxsw_sp_port *mlxsw_sp_port,
1696 				 struct sk_buff *skb,
1697 				 const struct mlxsw_tx_info *tx_info)
1698 {
1699 	mlxsw_sp_txhdr_construct(skb, tx_info);
1700 	return 0;
1701 }
1702 
1703 int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
1704 				  struct mlxsw_sp_port *mlxsw_sp_port,
1705 				  struct sk_buff *skb,
1706 				  const struct mlxsw_tx_info *tx_info)
1707 {
1708 	/* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
1709 	 * their correction field correctly set on the egress port they must be
1710 	 * transmitted as data packets. Such packets ingress the ASIC via the
1711 	 * CPU port and must have a VLAN tag, as the CPU port is not configured
1712 	 * with a PVID. Push the default VLAN (4095), which is configured as
1713 	 * egress untagged on all the ports.
1714 	 */
1715 	if (!skb_vlan_tagged(skb)) {
1716 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1717 						MLXSW_SP_DEFAULT_VID);
1718 		if (!skb) {
1719 			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1720 			return -ENOMEM;
1721 		}
1722 	}
1723 
1724 	return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
1725 						 tx_info);
1726 }
1727