xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/ptp_clock_kernel.h>
5 #include <linux/clocksource.h>
6 #include <linux/timecounter.h>
7 #include <linux/spinlock.h>
8 #include <linux/device.h>
9 #include <linux/rhashtable.h>
10 #include <linux/ptp_classify.h>
11 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/refcount.h>
15 
16 #include "spectrum.h"
17 #include "spectrum_ptp.h"
18 #include "core.h"
19 
20 #define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT	29
21 #define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ		156257 /* 6.4nSec */
22 #define MLXSW_SP1_PTP_CLOCK_MASK		64
23 
24 #define MLXSW_SP1_PTP_HT_GC_INTERVAL		500 /* ms */
25 
26 /* How long, approximately, should the unmatched entries stay in the hash table
27  * before they are collected. Should be evenly divisible by the GC interval.
28  */
29 #define MLXSW_SP1_PTP_HT_GC_TIMEOUT		1000 /* ms */
30 
31 struct mlxsw_sp_ptp_state {
32 	struct mlxsw_sp *mlxsw_sp;
33 };
34 
35 struct mlxsw_sp1_ptp_state {
36 	struct mlxsw_sp_ptp_state common;
37 	struct rhltable unmatched_ht;
38 	spinlock_t unmatched_lock; /* protects the HT */
39 	struct delayed_work ht_gc_dw;
40 	u32 gc_cycle;
41 };
42 
43 struct mlxsw_sp2_ptp_state {
44 	struct mlxsw_sp_ptp_state common;
45 	refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
46 					  * enabled.
47 					  */
48 	struct hwtstamp_config config;
49 	struct mutex lock; /* Protects 'config' and HW configuration. */
50 };
51 
52 struct mlxsw_sp1_ptp_key {
53 	u16 local_port;
54 	u8 message_type;
55 	u16 sequence_id;
56 	u8 domain_number;
57 	bool ingress;
58 };
59 
60 struct mlxsw_sp1_ptp_unmatched {
61 	struct mlxsw_sp1_ptp_key key;
62 	struct rhlist_head ht_node;
63 	struct rcu_head rcu;
64 	struct sk_buff *skb;
65 	u64 timestamp;
66 	u32 gc_cycle;
67 };
68 
69 static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
70 	.key_len = sizeof_field(struct mlxsw_sp1_ptp_unmatched, key),
71 	.key_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, key),
72 	.head_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, ht_node),
73 };
74 
75 struct mlxsw_sp_ptp_clock {
76 	struct mlxsw_core *core;
77 	struct ptp_clock *ptp;
78 	struct ptp_clock_info ptp_info;
79 };
80 
81 struct mlxsw_sp1_ptp_clock {
82 	struct mlxsw_sp_ptp_clock common;
83 	spinlock_t lock; /* protect this structure */
84 	struct cyclecounter cycles;
85 	struct timecounter tc;
86 	u32 nominal_c_mult;
87 	unsigned long overflow_period;
88 	struct delayed_work overflow_work;
89 };
90 
91 static struct mlxsw_sp1_ptp_state *
92 mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
93 {
94 	return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp1_ptp_state,
95 			    common);
96 }
97 
98 static struct mlxsw_sp2_ptp_state *
99 mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
100 {
101 	return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
102 			    common);
103 }
104 
105 static struct mlxsw_sp1_ptp_clock *
106 mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
107 {
108 	return container_of(ptp, struct mlxsw_sp1_ptp_clock, common.ptp_info);
109 }
110 
111 static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
112 				    struct ptp_system_timestamp *sts)
113 {
114 	struct mlxsw_core *mlxsw_core = clock->common.core;
115 	u32 frc_h1, frc_h2, frc_l;
116 
117 	frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
118 	ptp_read_system_prets(sts);
119 	frc_l = mlxsw_core_read_frc_l(mlxsw_core);
120 	ptp_read_system_postts(sts);
121 	frc_h2 = mlxsw_core_read_frc_h(mlxsw_core);
122 
123 	if (frc_h1 != frc_h2) {
124 		/* wrap around */
125 		ptp_read_system_prets(sts);
126 		frc_l = mlxsw_core_read_frc_l(mlxsw_core);
127 		ptp_read_system_postts(sts);
128 	}
129 
130 	return (u64) frc_l | (u64) frc_h2 << 32;
131 }
132 
133 static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
134 {
135 	struct mlxsw_sp1_ptp_clock *clock =
136 		container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
137 
138 	return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
139 }
140 
141 static int
142 mlxsw_sp_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
143 {
144 	struct mlxsw_core *mlxsw_core = clock->core;
145 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
146 
147 	mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
148 			     freq_adj, 0, 0, 0);
149 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
150 }
151 
152 static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
153 {
154 	u64 cycles = (u64) nsec;
155 
156 	cycles <<= tc->cc->shift;
157 	cycles = div_u64(cycles, tc->cc->mult);
158 
159 	return cycles;
160 }
161 
162 static int
163 mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock *clock, u64 nsec)
164 {
165 	struct mlxsw_core *mlxsw_core = clock->common.core;
166 	u64 next_sec, next_sec_in_nsec, cycles;
167 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
168 	char mtpps_pl[MLXSW_REG_MTPPS_LEN];
169 	int err;
170 
171 	next_sec = div_u64(nsec, NSEC_PER_SEC) + 1;
172 	next_sec_in_nsec = next_sec * NSEC_PER_SEC;
173 
174 	spin_lock_bh(&clock->lock);
175 	cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec);
176 	spin_unlock_bh(&clock->lock);
177 
178 	mlxsw_reg_mtpps_vpin_pack(mtpps_pl, cycles);
179 	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtpps), mtpps_pl);
180 	if (err)
181 		return err;
182 
183 	mlxsw_reg_mtutc_pack(mtutc_pl,
184 			     MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
185 			     0, next_sec, 0, 0);
186 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
187 }
188 
189 static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
190 {
191 	struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
192 	s32 ppb;
193 
194 	ppb = scaled_ppm_to_ppb(scaled_ppm);
195 
196 	spin_lock_bh(&clock->lock);
197 	timecounter_read(&clock->tc);
198 	clock->cycles.mult = adjust_by_scaled_ppm(clock->nominal_c_mult,
199 						  scaled_ppm);
200 	spin_unlock_bh(&clock->lock);
201 
202 	return mlxsw_sp_ptp_phc_adjfreq(&clock->common, ppb);
203 }
204 
205 static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
206 {
207 	struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
208 	u64 nsec;
209 
210 	spin_lock_bh(&clock->lock);
211 	timecounter_adjtime(&clock->tc, delta);
212 	nsec = timecounter_read(&clock->tc);
213 	spin_unlock_bh(&clock->lock);
214 
215 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
216 }
217 
218 static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
219 				  struct timespec64 *ts,
220 				  struct ptp_system_timestamp *sts)
221 {
222 	struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
223 	u64 cycles, nsec;
224 
225 	spin_lock_bh(&clock->lock);
226 	cycles = __mlxsw_sp1_ptp_read_frc(clock, sts);
227 	nsec = timecounter_cyc2time(&clock->tc, cycles);
228 	spin_unlock_bh(&clock->lock);
229 
230 	*ts = ns_to_timespec64(nsec);
231 
232 	return 0;
233 }
234 
235 static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
236 				 const struct timespec64 *ts)
237 {
238 	struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
239 	u64 nsec = timespec64_to_ns(ts);
240 
241 	spin_lock_bh(&clock->lock);
242 	timecounter_init(&clock->tc, &clock->cycles, nsec);
243 	nsec = timecounter_read(&clock->tc);
244 	spin_unlock_bh(&clock->lock);
245 
246 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
247 }
248 
249 static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
250 	.owner		= THIS_MODULE,
251 	.name		= "mlxsw_sp_clock",
252 	.max_adj	= 100000000,
253 	.adjfine	= mlxsw_sp1_ptp_adjfine,
254 	.adjtime	= mlxsw_sp1_ptp_adjtime,
255 	.gettimex64	= mlxsw_sp1_ptp_gettimex,
256 	.settime64	= mlxsw_sp1_ptp_settime,
257 };
258 
259 static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
260 {
261 	struct delayed_work *dwork = to_delayed_work(work);
262 	struct mlxsw_sp1_ptp_clock *clock;
263 
264 	clock = container_of(dwork, struct mlxsw_sp1_ptp_clock, overflow_work);
265 
266 	spin_lock_bh(&clock->lock);
267 	timecounter_read(&clock->tc);
268 	spin_unlock_bh(&clock->lock);
269 	mlxsw_core_schedule_dw(&clock->overflow_work, clock->overflow_period);
270 }
271 
272 struct mlxsw_sp_ptp_clock *
273 mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
274 {
275 	u64 overflow_cycles, nsec, frac = 0;
276 	struct mlxsw_sp1_ptp_clock *clock;
277 	int err;
278 
279 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
280 	if (!clock)
281 		return ERR_PTR(-ENOMEM);
282 
283 	spin_lock_init(&clock->lock);
284 	clock->cycles.read = mlxsw_sp1_ptp_read_frc;
285 	clock->cycles.shift = MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT;
286 	clock->cycles.mult = clocksource_khz2mult(MLXSW_SP1_PTP_CLOCK_FREQ_KHZ,
287 						  clock->cycles.shift);
288 	clock->nominal_c_mult = clock->cycles.mult;
289 	clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
290 	clock->common.core = mlxsw_sp->core;
291 
292 	timecounter_init(&clock->tc, &clock->cycles, 0);
293 
294 	/* Calculate period in seconds to call the overflow watchdog - to make
295 	 * sure counter is checked at least twice every wrap around.
296 	 * The period is calculated as the minimum between max HW cycles count
297 	 * (The clock source mask) and max amount of cycles that can be
298 	 * multiplied by clock multiplier where the result doesn't exceed
299 	 * 64bits.
300 	 */
301 	overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
302 	overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
303 
304 	nsec = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, 0, &frac);
305 	clock->overflow_period = nsecs_to_jiffies(nsec);
306 
307 	INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
308 	mlxsw_core_schedule_dw(&clock->overflow_work, 0);
309 
310 	clock->common.ptp_info = mlxsw_sp1_ptp_clock_info;
311 	clock->common.ptp = ptp_clock_register(&clock->common.ptp_info, dev);
312 	if (IS_ERR(clock->common.ptp)) {
313 		err = PTR_ERR(clock->common.ptp);
314 		dev_err(dev, "ptp_clock_register failed %d\n", err);
315 		goto err_ptp_clock_register;
316 	}
317 
318 	return &clock->common;
319 
320 err_ptp_clock_register:
321 	cancel_delayed_work_sync(&clock->overflow_work);
322 	kfree(clock);
323 	return ERR_PTR(err);
324 }
325 
326 void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
327 {
328 	struct mlxsw_sp1_ptp_clock *clock =
329 		container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
330 
331 	ptp_clock_unregister(clock_common->ptp);
332 	cancel_delayed_work_sync(&clock->overflow_work);
333 	kfree(clock);
334 }
335 
336 static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
337 				  struct ptp_system_timestamp *sts)
338 {
339 	struct mlxsw_core *mlxsw_core = clock->core;
340 	u32 utc_sec1, utc_sec2, utc_nsec;
341 
342 	utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
343 	ptp_read_system_prets(sts);
344 	utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
345 	ptp_read_system_postts(sts);
346 	utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
347 
348 	if (utc_sec1 != utc_sec2) {
349 		/* Wrap around. */
350 		ptp_read_system_prets(sts);
351 		utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
352 		ptp_read_system_postts(sts);
353 	}
354 
355 	return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
356 }
357 
358 static int
359 mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
360 {
361 	struct mlxsw_core *mlxsw_core = clock->core;
362 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
363 	u32 sec, nsec_rem;
364 
365 	sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
366 	mlxsw_reg_mtutc_pack(mtutc_pl,
367 			     MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
368 			     0, sec, nsec_rem, 0);
369 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
370 }
371 
372 static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
373 {
374 	struct mlxsw_sp_ptp_clock *clock =
375 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
376 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
377 
378 	/* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
379 	 * reversed, positive values mean to decrease the frequency. Adjust the
380 	 * sign of PPB to this behavior.
381 	 */
382 	return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
383 }
384 
385 static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
386 {
387 	struct mlxsw_sp_ptp_clock *clock =
388 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
389 	struct mlxsw_core *mlxsw_core = clock->core;
390 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
391 
392 	/* HW time adjustment range is s16. If out of range, set time instead. */
393 	if (delta < S16_MIN || delta > S16_MAX) {
394 		u64 nsec;
395 
396 		nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
397 		nsec += delta;
398 
399 		return mlxsw_sp2_ptp_phc_settime(clock, nsec);
400 	}
401 
402 	mlxsw_reg_mtutc_pack(mtutc_pl,
403 			     MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
404 			     0, 0, 0, delta);
405 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
406 }
407 
408 static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
409 				  struct timespec64 *ts,
410 				  struct ptp_system_timestamp *sts)
411 {
412 	struct mlxsw_sp_ptp_clock *clock =
413 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
414 	u64 nsec;
415 
416 	nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
417 	*ts = ns_to_timespec64(nsec);
418 
419 	return 0;
420 }
421 
422 static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
423 				 const struct timespec64 *ts)
424 {
425 	struct mlxsw_sp_ptp_clock *clock =
426 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
427 	u64 nsec = timespec64_to_ns(ts);
428 
429 	return mlxsw_sp2_ptp_phc_settime(clock, nsec);
430 }
431 
432 static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
433 	.owner		= THIS_MODULE,
434 	.name		= "mlxsw_sp_clock",
435 	.max_adj	= MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
436 	.adjfine	= mlxsw_sp2_ptp_adjfine,
437 	.adjtime	= mlxsw_sp2_ptp_adjtime,
438 	.gettimex64	= mlxsw_sp2_ptp_gettimex,
439 	.settime64	= mlxsw_sp2_ptp_settime,
440 };
441 
442 struct mlxsw_sp_ptp_clock *
443 mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
444 {
445 	struct mlxsw_sp_ptp_clock *clock;
446 	int err;
447 
448 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
449 	if (!clock)
450 		return ERR_PTR(-ENOMEM);
451 
452 	clock->core = mlxsw_sp->core;
453 
454 	clock->ptp_info = mlxsw_sp2_ptp_clock_info;
455 
456 	err = mlxsw_sp2_ptp_phc_settime(clock, 0);
457 	if (err) {
458 		dev_err(dev, "setting UTC time failed %d\n", err);
459 		goto err_ptp_phc_settime;
460 	}
461 
462 	clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
463 	if (IS_ERR(clock->ptp)) {
464 		err = PTR_ERR(clock->ptp);
465 		dev_err(dev, "ptp_clock_register failed %d\n", err);
466 		goto err_ptp_clock_register;
467 	}
468 
469 	return clock;
470 
471 err_ptp_clock_register:
472 err_ptp_phc_settime:
473 	kfree(clock);
474 	return ERR_PTR(err);
475 }
476 
477 void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
478 {
479 	ptp_clock_unregister(clock->ptp);
480 	kfree(clock);
481 }
482 
483 static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
484 			      u8 *p_domain_number,
485 			      u8 *p_message_type,
486 			      u16 *p_sequence_id)
487 {
488 	unsigned int ptp_class;
489 	struct ptp_header *hdr;
490 
491 	ptp_class = ptp_classify_raw(skb);
492 
493 	switch (ptp_class & PTP_CLASS_VMASK) {
494 	case PTP_CLASS_V1:
495 	case PTP_CLASS_V2:
496 		break;
497 	default:
498 		return -ERANGE;
499 	}
500 
501 	hdr = ptp_parse_header(skb, ptp_class);
502 	if (!hdr)
503 		return -EINVAL;
504 
505 	*p_message_type	 = ptp_get_msgtype(hdr, ptp_class);
506 	*p_domain_number = hdr->domain_number;
507 	*p_sequence_id	 = be16_to_cpu(hdr->sequence_id);
508 
509 	return 0;
510 }
511 
512 /* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
513  * error.
514  */
515 static int
516 mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
517 			     struct mlxsw_sp1_ptp_key key,
518 			     struct sk_buff *skb,
519 			     u64 timestamp)
520 {
521 	int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
522 	struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
523 	struct mlxsw_sp1_ptp_unmatched *unmatched;
524 	int err;
525 
526 	unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
527 	if (!unmatched)
528 		return -ENOMEM;
529 
530 	unmatched->key = key;
531 	unmatched->skb = skb;
532 	unmatched->timestamp = timestamp;
533 	unmatched->gc_cycle = ptp_state->gc_cycle + cycles;
534 
535 	err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
536 			      mlxsw_sp1_ptp_unmatched_ht_params);
537 	if (err)
538 		kfree(unmatched);
539 
540 	return err;
541 }
542 
543 static struct mlxsw_sp1_ptp_unmatched *
544 mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
545 			       struct mlxsw_sp1_ptp_key key, int *p_length)
546 {
547 	struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
548 	struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
549 	struct rhlist_head *tmp, *list;
550 	int length = 0;
551 
552 	list = rhltable_lookup(&ptp_state->unmatched_ht, &key,
553 			       mlxsw_sp1_ptp_unmatched_ht_params);
554 	rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
555 		last = unmatched;
556 		length++;
557 	}
558 
559 	*p_length = length;
560 	return last;
561 }
562 
563 static int
564 mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
565 			       struct mlxsw_sp1_ptp_unmatched *unmatched)
566 {
567 	struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
568 
569 	return rhltable_remove(&ptp_state->unmatched_ht,
570 			       &unmatched->ht_node,
571 			       mlxsw_sp1_ptp_unmatched_ht_params);
572 }
573 
574 /* This function is called in the following scenarios:
575  *
576  * 1) When a packet is matched with its timestamp.
577  * 2) In several situation when it is necessary to immediately pass on
578  *    an SKB without a timestamp.
579  * 3) From GC indirectly through mlxsw_sp1_ptp_unmatched_finish().
580  *    This case is similar to 2) above.
581  */
582 static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp,
583 					struct sk_buff *skb, u16 local_port,
584 					bool ingress,
585 					struct skb_shared_hwtstamps *hwtstamps)
586 {
587 	struct mlxsw_sp_port *mlxsw_sp_port;
588 
589 	/* Between capturing the packet and finishing it, there is a window of
590 	 * opportunity for the originating port to go away (e.g. due to a
591 	 * split). Also make sure the SKB device reference is still valid.
592 	 */
593 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
594 	if (!(mlxsw_sp_port && (!skb->dev || skb->dev == mlxsw_sp_port->dev))) {
595 		dev_kfree_skb_any(skb);
596 		return;
597 	}
598 
599 	if (ingress) {
600 		if (hwtstamps)
601 			*skb_hwtstamps(skb) = *hwtstamps;
602 		mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
603 	} else {
604 		/* skb_tstamp_tx() allows hwtstamps to be NULL. */
605 		skb_tstamp_tx(skb, hwtstamps);
606 		dev_kfree_skb_any(skb);
607 	}
608 }
609 
610 static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
611 				       struct mlxsw_sp1_ptp_key key,
612 				       struct sk_buff *skb,
613 				       u64 timestamp)
614 {
615 	struct mlxsw_sp_ptp_clock *clock_common = mlxsw_sp->clock;
616 	struct mlxsw_sp1_ptp_clock *clock =
617 		container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
618 
619 	struct skb_shared_hwtstamps hwtstamps;
620 	u64 nsec;
621 
622 	spin_lock_bh(&clock->lock);
623 	nsec = timecounter_cyc2time(&clock->tc, timestamp);
624 	spin_unlock_bh(&clock->lock);
625 
626 	hwtstamps.hwtstamp = ns_to_ktime(nsec);
627 	mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
628 				    key.local_port, key.ingress, &hwtstamps);
629 }
630 
631 static void
632 mlxsw_sp1_ptp_unmatched_finish(struct mlxsw_sp *mlxsw_sp,
633 			       struct mlxsw_sp1_ptp_unmatched *unmatched)
634 {
635 	if (unmatched->skb && unmatched->timestamp)
636 		mlxsw_sp1_packet_timestamp(mlxsw_sp, unmatched->key,
637 					   unmatched->skb,
638 					   unmatched->timestamp);
639 	else if (unmatched->skb)
640 		mlxsw_sp1_ptp_packet_finish(mlxsw_sp, unmatched->skb,
641 					    unmatched->key.local_port,
642 					    unmatched->key.ingress, NULL);
643 	kfree_rcu(unmatched, rcu);
644 }
645 
646 static void mlxsw_sp1_ptp_unmatched_free_fn(void *ptr, void *arg)
647 {
648 	struct mlxsw_sp1_ptp_unmatched *unmatched = ptr;
649 
650 	/* This is invoked at a point where the ports are gone already. Nothing
651 	 * to do with whatever is left in the HT but to free it.
652 	 */
653 	if (unmatched->skb)
654 		dev_kfree_skb_any(unmatched->skb);
655 	kfree_rcu(unmatched, rcu);
656 }
657 
658 static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
659 				    struct mlxsw_sp1_ptp_key key,
660 				    struct sk_buff *skb, u64 timestamp)
661 {
662 	struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
663 	struct mlxsw_sp1_ptp_unmatched *unmatched;
664 	int length;
665 	int err;
666 
667 	rcu_read_lock();
668 
669 	spin_lock(&ptp_state->unmatched_lock);
670 
671 	unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
672 	if (skb && unmatched && unmatched->timestamp) {
673 		unmatched->skb = skb;
674 	} else if (timestamp && unmatched && unmatched->skb) {
675 		unmatched->timestamp = timestamp;
676 	} else {
677 		/* Either there is no entry to match, or one that is there is
678 		 * incompatible.
679 		 */
680 		if (length < 100)
681 			err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
682 							   skb, timestamp);
683 		else
684 			err = -E2BIG;
685 		if (err && skb)
686 			mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
687 						    key.local_port,
688 						    key.ingress, NULL);
689 		unmatched = NULL;
690 	}
691 
692 	if (unmatched) {
693 		err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
694 		WARN_ON_ONCE(err);
695 	}
696 
697 	spin_unlock(&ptp_state->unmatched_lock);
698 
699 	if (unmatched)
700 		mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
701 
702 	rcu_read_unlock();
703 }
704 
705 static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp,
706 				     struct sk_buff *skb, u16 local_port,
707 				     bool ingress)
708 {
709 	struct mlxsw_sp_port *mlxsw_sp_port;
710 	struct mlxsw_sp1_ptp_key key;
711 	u8 types;
712 	int err;
713 
714 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
715 	if (!mlxsw_sp_port)
716 		goto immediate;
717 
718 	types = ingress ? mlxsw_sp_port->ptp.ing_types :
719 			  mlxsw_sp_port->ptp.egr_types;
720 	if (!types)
721 		goto immediate;
722 
723 	memset(&key, 0, sizeof(key));
724 	key.local_port = local_port;
725 	key.ingress = ingress;
726 
727 	err = mlxsw_sp_ptp_parse(skb, &key.domain_number, &key.message_type,
728 				 &key.sequence_id);
729 	if (err)
730 		goto immediate;
731 
732 	/* For packets whose timestamping was not enabled on this port, don't
733 	 * bother trying to match the timestamp.
734 	 */
735 	if (!((1 << key.message_type) & types))
736 		goto immediate;
737 
738 	mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, skb, 0);
739 	return;
740 
741 immediate:
742 	mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, local_port, ingress, NULL);
743 }
744 
745 void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
746 				 u16 local_port, u8 message_type,
747 				 u8 domain_number, u16 sequence_id,
748 				 u64 timestamp)
749 {
750 	struct mlxsw_sp_port *mlxsw_sp_port;
751 	struct mlxsw_sp1_ptp_key key;
752 	u8 types;
753 
754 	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
755 		return;
756 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
757 	if (!mlxsw_sp_port)
758 		return;
759 
760 	types = ingress ? mlxsw_sp_port->ptp.ing_types :
761 			  mlxsw_sp_port->ptp.egr_types;
762 
763 	/* For message types whose timestamping was not enabled on this port,
764 	 * don't bother with the timestamp.
765 	 */
766 	if (!((1 << message_type) & types))
767 		return;
768 
769 	memset(&key, 0, sizeof(key));
770 	key.local_port = local_port;
771 	key.domain_number = domain_number;
772 	key.message_type = message_type;
773 	key.sequence_id = sequence_id;
774 	key.ingress = ingress;
775 
776 	mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, NULL, timestamp);
777 }
778 
779 void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
780 			   u16 local_port)
781 {
782 	skb_reset_mac_header(skb);
783 	mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
784 }
785 
786 void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
787 			       struct sk_buff *skb, u16 local_port)
788 {
789 	mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
790 }
791 
792 static void
793 mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp1_ptp_state *ptp_state,
794 			    struct mlxsw_sp1_ptp_unmatched *unmatched)
795 {
796 	struct mlxsw_sp *mlxsw_sp = ptp_state->common.mlxsw_sp;
797 	struct mlxsw_sp_ptp_port_dir_stats *stats;
798 	struct mlxsw_sp_port *mlxsw_sp_port;
799 	int err;
800 
801 	/* If an unmatched entry has an SKB, it has to be handed over to the
802 	 * networking stack. This is usually done from a trap handler, which is
803 	 * invoked in a softirq context. Here we are going to do it in process
804 	 * context. If that were to be interrupted by a softirq, it could cause
805 	 * a deadlock when an attempt is made to take an already-taken lock
806 	 * somewhere along the sending path. Disable softirqs to prevent this.
807 	 */
808 	local_bh_disable();
809 
810 	spin_lock(&ptp_state->unmatched_lock);
811 	err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
812 			      mlxsw_sp1_ptp_unmatched_ht_params);
813 	spin_unlock(&ptp_state->unmatched_lock);
814 
815 	if (err)
816 		/* The packet was matched with timestamp during the walk. */
817 		goto out;
818 
819 	mlxsw_sp_port = mlxsw_sp->ports[unmatched->key.local_port];
820 	if (mlxsw_sp_port) {
821 		stats = unmatched->key.ingress ?
822 			&mlxsw_sp_port->ptp.stats.rx_gcd :
823 			&mlxsw_sp_port->ptp.stats.tx_gcd;
824 		if (unmatched->skb)
825 			stats->packets++;
826 		else
827 			stats->timestamps++;
828 	}
829 
830 	/* mlxsw_sp1_ptp_unmatched_finish() invokes netif_receive_skb(). While
831 	 * the comment at that function states that it can only be called in
832 	 * soft IRQ context, this pattern of local_bh_disable() +
833 	 * netif_receive_skb(), in process context, is seen elsewhere in the
834 	 * kernel, notably in pktgen.
835 	 */
836 	mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
837 
838 out:
839 	local_bh_enable();
840 }
841 
842 static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
843 {
844 	struct delayed_work *dwork = to_delayed_work(work);
845 	struct mlxsw_sp1_ptp_unmatched *unmatched;
846 	struct mlxsw_sp1_ptp_state *ptp_state;
847 	struct rhashtable_iter iter;
848 	u32 gc_cycle;
849 	void *obj;
850 
851 	ptp_state = container_of(dwork, struct mlxsw_sp1_ptp_state, ht_gc_dw);
852 	gc_cycle = ptp_state->gc_cycle++;
853 
854 	rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
855 	rhashtable_walk_start(&iter);
856 	while ((obj = rhashtable_walk_next(&iter))) {
857 		if (IS_ERR(obj))
858 			continue;
859 
860 		unmatched = obj;
861 		if (unmatched->gc_cycle <= gc_cycle)
862 			mlxsw_sp1_ptp_ht_gc_collect(ptp_state, unmatched);
863 	}
864 	rhashtable_walk_stop(&iter);
865 	rhashtable_walk_exit(&iter);
866 
867 	mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
868 			       MLXSW_SP1_PTP_HT_GC_INTERVAL);
869 }
870 
871 static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
872 				   enum mlxsw_reg_mtptpt_trap_id trap_id,
873 				   u16 message_type)
874 {
875 	char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
876 
877 	mlxsw_reg_mtptpt_pack(mtptpt_pl, trap_id, message_type);
878 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
879 }
880 
881 static int mlxsw_sp1_ptp_set_fifo_clr_on_trap(struct mlxsw_sp *mlxsw_sp,
882 					      bool clr)
883 {
884 	char mogcr_pl[MLXSW_REG_MOGCR_LEN] = {0};
885 	int err;
886 
887 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
888 	if (err)
889 		return err;
890 
891 	mlxsw_reg_mogcr_ptp_iftc_set(mogcr_pl, clr);
892 	mlxsw_reg_mogcr_ptp_eftc_set(mogcr_pl, clr);
893 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
894 }
895 
896 static int mlxsw_sp1_ptp_mtpppc_set(struct mlxsw_sp *mlxsw_sp,
897 				    u16 ing_types, u16 egr_types)
898 {
899 	char mtpppc_pl[MLXSW_REG_MTPPPC_LEN];
900 
901 	mlxsw_reg_mtpppc_pack(mtpppc_pl, ing_types, egr_types);
902 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpppc), mtpppc_pl);
903 }
904 
905 struct mlxsw_sp1_ptp_shaper_params {
906 	u32 ethtool_speed;
907 	enum mlxsw_reg_qpsc_port_speed port_speed;
908 	u8 shaper_time_exp;
909 	u8 shaper_time_mantissa;
910 	u8 shaper_inc;
911 	u8 shaper_bs;
912 	u8 port_to_shaper_credits;
913 	int ing_timestamp_inc;
914 	int egr_timestamp_inc;
915 };
916 
917 static const struct mlxsw_sp1_ptp_shaper_params
918 mlxsw_sp1_ptp_shaper_params[] = {
919 	{
920 		.ethtool_speed		= SPEED_100,
921 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_100M,
922 		.shaper_time_exp	= 4,
923 		.shaper_time_mantissa	= 12,
924 		.shaper_inc		= 9,
925 		.shaper_bs		= 1,
926 		.port_to_shaper_credits	= 1,
927 		.ing_timestamp_inc	= -313,
928 		.egr_timestamp_inc	= 313,
929 	},
930 	{
931 		.ethtool_speed		= SPEED_1000,
932 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_1G,
933 		.shaper_time_exp	= 0,
934 		.shaper_time_mantissa	= 12,
935 		.shaper_inc		= 6,
936 		.shaper_bs		= 0,
937 		.port_to_shaper_credits	= 1,
938 		.ing_timestamp_inc	= -35,
939 		.egr_timestamp_inc	= 35,
940 	},
941 	{
942 		.ethtool_speed		= SPEED_10000,
943 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_10G,
944 		.shaper_time_exp	= 0,
945 		.shaper_time_mantissa	= 2,
946 		.shaper_inc		= 14,
947 		.shaper_bs		= 1,
948 		.port_to_shaper_credits	= 1,
949 		.ing_timestamp_inc	= -11,
950 		.egr_timestamp_inc	= 11,
951 	},
952 	{
953 		.ethtool_speed		= SPEED_25000,
954 		.port_speed		= MLXSW_REG_QPSC_PORT_SPEED_25G,
955 		.shaper_time_exp	= 0,
956 		.shaper_time_mantissa	= 0,
957 		.shaper_inc		= 11,
958 		.shaper_bs		= 1,
959 		.port_to_shaper_credits	= 1,
960 		.ing_timestamp_inc	= -14,
961 		.egr_timestamp_inc	= 14,
962 	},
963 };
964 
965 #define MLXSW_SP1_PTP_SHAPER_PARAMS_LEN ARRAY_SIZE(mlxsw_sp1_ptp_shaper_params)
966 
967 static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
968 {
969 	const struct mlxsw_sp1_ptp_shaper_params *params;
970 	char qpsc_pl[MLXSW_REG_QPSC_LEN];
971 	int i, err;
972 
973 	for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
974 		params = &mlxsw_sp1_ptp_shaper_params[i];
975 		mlxsw_reg_qpsc_pack(qpsc_pl, params->port_speed,
976 				    params->shaper_time_exp,
977 				    params->shaper_time_mantissa,
978 				    params->shaper_inc, params->shaper_bs,
979 				    params->port_to_shaper_credits,
980 				    params->ing_timestamp_inc,
981 				    params->egr_timestamp_inc);
982 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpsc), qpsc_pl);
983 		if (err)
984 			return err;
985 	}
986 
987 	return 0;
988 }
989 
990 static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
991 {
992 	u16 event_message_type;
993 	int err;
994 
995 	/* Deliver these message types as PTP0. */
996 	event_message_type = BIT(PTP_MSGTYPE_SYNC) |
997 			     BIT(PTP_MSGTYPE_DELAY_REQ) |
998 			     BIT(PTP_MSGTYPE_PDELAY_REQ) |
999 			     BIT(PTP_MSGTYPE_PDELAY_RESP);
1000 
1001 	err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
1002 				      event_message_type);
1003 	if (err)
1004 		return err;
1005 
1006 	/* Everything else is PTP1. */
1007 	err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
1008 				      ~event_message_type);
1009 	if (err)
1010 		goto err_mtptpt1_set;
1011 
1012 	return 0;
1013 
1014 err_mtptpt1_set:
1015 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
1016 	return err;
1017 }
1018 
1019 static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
1020 {
1021 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
1022 	mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
1023 }
1024 
1025 struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
1026 {
1027 	struct mlxsw_sp1_ptp_state *ptp_state;
1028 	int err;
1029 
1030 	err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
1031 	if (err)
1032 		return ERR_PTR(err);
1033 
1034 	ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
1035 	if (!ptp_state)
1036 		return ERR_PTR(-ENOMEM);
1037 	ptp_state->common.mlxsw_sp = mlxsw_sp;
1038 
1039 	spin_lock_init(&ptp_state->unmatched_lock);
1040 
1041 	err = rhltable_init(&ptp_state->unmatched_ht,
1042 			    &mlxsw_sp1_ptp_unmatched_ht_params);
1043 	if (err)
1044 		goto err_hashtable_init;
1045 
1046 	err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
1047 	if (err)
1048 		goto err_ptp_traps_set;
1049 
1050 	err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
1051 	if (err)
1052 		goto err_fifo_clr;
1053 
1054 	INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
1055 	mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
1056 			       MLXSW_SP1_PTP_HT_GC_INTERVAL);
1057 	return &ptp_state->common;
1058 
1059 err_fifo_clr:
1060 	mlxsw_sp_ptp_traps_unset(mlxsw_sp);
1061 err_ptp_traps_set:
1062 	rhltable_destroy(&ptp_state->unmatched_ht);
1063 err_hashtable_init:
1064 	kfree(ptp_state);
1065 	return ERR_PTR(err);
1066 }
1067 
1068 void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
1069 {
1070 	struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
1071 	struct mlxsw_sp1_ptp_state *ptp_state;
1072 
1073 	ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
1074 
1075 	cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
1076 	mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
1077 	mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
1078 	mlxsw_sp_ptp_traps_unset(mlxsw_sp);
1079 	rhltable_free_and_destroy(&ptp_state->unmatched_ht,
1080 				  &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
1081 	kfree(ptp_state);
1082 }
1083 
1084 int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1085 			       struct hwtstamp_config *config)
1086 {
1087 	*config = mlxsw_sp_port->ptp.hwtstamp_config;
1088 	return 0;
1089 }
1090 
1091 static int
1092 mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
1093 				u16 *p_ing_types, u16 *p_egr_types,
1094 				enum hwtstamp_rx_filters *p_rx_filter)
1095 {
1096 	enum hwtstamp_rx_filters rx_filter = config->rx_filter;
1097 	enum hwtstamp_tx_types tx_type = config->tx_type;
1098 	u16 ing_types = 0x00;
1099 	u16 egr_types = 0x00;
1100 
1101 	switch (tx_type) {
1102 	case HWTSTAMP_TX_OFF:
1103 		egr_types = 0x00;
1104 		break;
1105 	case HWTSTAMP_TX_ON:
1106 		egr_types = 0xff;
1107 		break;
1108 	case HWTSTAMP_TX_ONESTEP_SYNC:
1109 	case HWTSTAMP_TX_ONESTEP_P2P:
1110 		return -ERANGE;
1111 	default:
1112 		return -EINVAL;
1113 	}
1114 
1115 	switch (rx_filter) {
1116 	case HWTSTAMP_FILTER_NONE:
1117 		ing_types = 0x00;
1118 		break;
1119 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1120 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1121 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1122 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1123 		ing_types = 0x01;
1124 		break;
1125 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1126 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1127 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1128 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1129 		ing_types = 0x02;
1130 		break;
1131 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1132 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1133 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1134 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1135 		ing_types = 0x0f;
1136 		break;
1137 	case HWTSTAMP_FILTER_ALL:
1138 		ing_types = 0xff;
1139 		break;
1140 	case HWTSTAMP_FILTER_SOME:
1141 	case HWTSTAMP_FILTER_NTP_ALL:
1142 		return -ERANGE;
1143 	default:
1144 		return -EINVAL;
1145 	}
1146 
1147 	*p_ing_types = ing_types;
1148 	*p_egr_types = egr_types;
1149 	*p_rx_filter = rx_filter;
1150 	return 0;
1151 }
1152 
1153 static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
1154 				       u16 ing_types, u16 egr_types)
1155 {
1156 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1157 	struct mlxsw_sp_port *tmp;
1158 	u16 orig_ing_types = 0;
1159 	u16 orig_egr_types = 0;
1160 	int err;
1161 	int i;
1162 
1163 	/* MTPPPC configures timestamping globally, not per port. Find the
1164 	 * configuration that contains all configured timestamping requests.
1165 	 */
1166 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1167 		tmp = mlxsw_sp->ports[i];
1168 		if (tmp) {
1169 			orig_ing_types |= tmp->ptp.ing_types;
1170 			orig_egr_types |= tmp->ptp.egr_types;
1171 		}
1172 		if (tmp && tmp != mlxsw_sp_port) {
1173 			ing_types |= tmp->ptp.ing_types;
1174 			egr_types |= tmp->ptp.egr_types;
1175 		}
1176 	}
1177 
1178 	if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) {
1179 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1180 		if (err) {
1181 			netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth");
1182 			return err;
1183 		}
1184 	}
1185 	if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types))
1186 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1187 
1188 	return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp,
1189 				       ing_types, egr_types);
1190 }
1191 
1192 static bool mlxsw_sp1_ptp_hwtstamp_enabled(struct mlxsw_sp_port *mlxsw_sp_port)
1193 {
1194 	return mlxsw_sp_port->ptp.ing_types || mlxsw_sp_port->ptp.egr_types;
1195 }
1196 
1197 static int
1198 mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
1199 {
1200 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1201 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1202 
1203 	mlxsw_reg_qeec_ptps_pack(qeec_pl, mlxsw_sp_port->local_port, enable);
1204 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1205 }
1206 
1207 static int mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port *mlxsw_sp_port)
1208 {
1209 	bool ptps = false;
1210 	int err, i;
1211 	u32 speed;
1212 
1213 	if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
1214 		return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, false);
1215 
1216 	err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
1217 	if (err)
1218 		return err;
1219 
1220 	for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
1221 		if (mlxsw_sp1_ptp_shaper_params[i].ethtool_speed == speed) {
1222 			ptps = true;
1223 			break;
1224 		}
1225 	}
1226 
1227 	return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, ptps);
1228 }
1229 
1230 void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
1231 {
1232 	struct delayed_work *dwork = to_delayed_work(work);
1233 	struct mlxsw_sp_port *mlxsw_sp_port;
1234 	int err;
1235 
1236 	mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
1237 				     ptp.shaper_dw);
1238 
1239 	if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
1240 		return;
1241 
1242 	err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
1243 	if (err)
1244 		netdev_err(mlxsw_sp_port->dev, "Failed to set up PTP shaper\n");
1245 }
1246 
1247 int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1248 			       struct hwtstamp_config *config)
1249 {
1250 	enum hwtstamp_rx_filters rx_filter;
1251 	u16 ing_types;
1252 	u16 egr_types;
1253 	int err;
1254 
1255 	err = mlxsw_sp1_ptp_get_message_types(config, &ing_types, &egr_types,
1256 					      &rx_filter);
1257 	if (err)
1258 		return err;
1259 
1260 	err = mlxsw_sp1_ptp_mtpppc_update(mlxsw_sp_port, ing_types, egr_types);
1261 	if (err)
1262 		return err;
1263 
1264 	mlxsw_sp_port->ptp.hwtstamp_config = *config;
1265 	mlxsw_sp_port->ptp.ing_types = ing_types;
1266 	mlxsw_sp_port->ptp.egr_types = egr_types;
1267 
1268 	err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
1269 	if (err)
1270 		return err;
1271 
1272 	/* Notify the ioctl caller what we are actually timestamping. */
1273 	config->rx_filter = rx_filter;
1274 
1275 	return 0;
1276 }
1277 
1278 int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
1279 			      struct ethtool_ts_info *info)
1280 {
1281 	info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
1282 
1283 	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1284 				SOF_TIMESTAMPING_RX_HARDWARE |
1285 				SOF_TIMESTAMPING_RAW_HARDWARE;
1286 
1287 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1288 			 BIT(HWTSTAMP_TX_ON);
1289 
1290 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1291 			   BIT(HWTSTAMP_FILTER_ALL);
1292 
1293 	return 0;
1294 }
1295 
1296 struct mlxsw_sp_ptp_port_stat {
1297 	char str[ETH_GSTRING_LEN];
1298 	ptrdiff_t offset;
1299 };
1300 
1301 #define MLXSW_SP_PTP_PORT_STAT(NAME, FIELD)				\
1302 	{								\
1303 		.str = NAME,						\
1304 		.offset = offsetof(struct mlxsw_sp_ptp_port_stats,	\
1305 				    FIELD),				\
1306 	}
1307 
1308 static const struct mlxsw_sp_ptp_port_stat mlxsw_sp_ptp_port_stats[] = {
1309 	MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_packets",    rx_gcd.packets),
1310 	MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_timestamps", rx_gcd.timestamps),
1311 	MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_packets",    tx_gcd.packets),
1312 	MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_timestamps", tx_gcd.timestamps),
1313 };
1314 
1315 #undef MLXSW_SP_PTP_PORT_STAT
1316 
1317 #define MLXSW_SP_PTP_PORT_STATS_LEN \
1318 	ARRAY_SIZE(mlxsw_sp_ptp_port_stats)
1319 
1320 int mlxsw_sp1_get_stats_count(void)
1321 {
1322 	return MLXSW_SP_PTP_PORT_STATS_LEN;
1323 }
1324 
1325 void mlxsw_sp1_get_stats_strings(u8 **p)
1326 {
1327 	int i;
1328 
1329 	for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
1330 		memcpy(*p, mlxsw_sp_ptp_port_stats[i].str,
1331 		       ETH_GSTRING_LEN);
1332 		*p += ETH_GSTRING_LEN;
1333 	}
1334 }
1335 
1336 void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1337 			 u64 *data, int data_index)
1338 {
1339 	void *stats = &mlxsw_sp_port->ptp.stats;
1340 	ptrdiff_t offset;
1341 	int i;
1342 
1343 	data += data_index;
1344 	for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
1345 		offset = mlxsw_sp_ptp_port_stats[i].offset;
1346 		*data++ = *(u64 *)(stats + offset);
1347 	}
1348 }
1349 
1350 struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
1351 {
1352 	struct mlxsw_sp2_ptp_state *ptp_state;
1353 	int err;
1354 
1355 	ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
1356 	if (!ptp_state)
1357 		return ERR_PTR(-ENOMEM);
1358 
1359 	ptp_state->common.mlxsw_sp = mlxsw_sp;
1360 
1361 	err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
1362 	if (err)
1363 		goto err_ptp_traps_set;
1364 
1365 	refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
1366 	mutex_init(&ptp_state->lock);
1367 	return &ptp_state->common;
1368 
1369 err_ptp_traps_set:
1370 	kfree(ptp_state);
1371 	return ERR_PTR(err);
1372 }
1373 
1374 void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
1375 {
1376 	struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
1377 	struct mlxsw_sp2_ptp_state *ptp_state;
1378 
1379 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
1380 
1381 	mutex_destroy(&ptp_state->lock);
1382 	mlxsw_sp_ptp_traps_unset(mlxsw_sp);
1383 	kfree(ptp_state);
1384 }
1385 
1386 static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
1387 					    u8 cqe_ts_sec)
1388 {
1389 	u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
1390 
1391 	if (cqe_ts_sec > (utc_sec & 0xff))
1392 		/* Time stamp above the last bits of UTC (UTC & 0xff) means the
1393 		 * latter has wrapped after the time stamp was collected.
1394 		 */
1395 		utc_sec -= 256;
1396 
1397 	utc_sec &= ~0xff;
1398 	utc_sec |= cqe_ts_sec;
1399 
1400 	return utc_sec;
1401 }
1402 
1403 static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
1404 					const struct mlxsw_skb_cb *cb,
1405 					struct skb_shared_hwtstamps *hwtstamps)
1406 {
1407 	u64 ts_sec, ts_nsec, nsec;
1408 
1409 	WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
1410 
1411 	/* The time stamp in the CQE is represented by 38 bits, which is a short
1412 	 * representation of UTC time. Software should create the full time
1413 	 * stamp using the global UTC clock. The seconds have only 8 bits in the
1414 	 * CQE, to create the full time stamp, use the current UTC time and fix
1415 	 * the seconds according to the relation between UTC seconds and CQE
1416 	 * seconds.
1417 	 */
1418 	ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
1419 	ts_nsec = cb->cqe_ts.nsec;
1420 
1421 	nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
1422 
1423 	hwtstamps->hwtstamp = ns_to_ktime(nsec);
1424 }
1425 
1426 void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
1427 			   u16 local_port)
1428 {
1429 	struct skb_shared_hwtstamps hwtstamps;
1430 
1431 	mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
1432 				    &hwtstamps);
1433 	*skb_hwtstamps(skb) = hwtstamps;
1434 	mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
1435 }
1436 
1437 void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
1438 			       struct sk_buff *skb, u16 local_port)
1439 {
1440 	struct skb_shared_hwtstamps hwtstamps;
1441 
1442 	mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
1443 				    &hwtstamps);
1444 	skb_tstamp_tx(skb, &hwtstamps);
1445 	dev_kfree_skb_any(skb);
1446 }
1447 
1448 int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1449 			       struct hwtstamp_config *config)
1450 {
1451 	struct mlxsw_sp2_ptp_state *ptp_state;
1452 
1453 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1454 
1455 	mutex_lock(&ptp_state->lock);
1456 	*config = ptp_state->config;
1457 	mutex_unlock(&ptp_state->lock);
1458 
1459 	return 0;
1460 }
1461 
1462 static int
1463 mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
1464 				u16 *p_ing_types, u16 *p_egr_types,
1465 				enum hwtstamp_rx_filters *p_rx_filter)
1466 {
1467 	enum hwtstamp_rx_filters rx_filter = config->rx_filter;
1468 	enum hwtstamp_tx_types tx_type = config->tx_type;
1469 	u16 ing_types = 0x00;
1470 	u16 egr_types = 0x00;
1471 
1472 	*p_rx_filter = rx_filter;
1473 
1474 	switch (rx_filter) {
1475 	case HWTSTAMP_FILTER_NONE:
1476 		ing_types = 0x00;
1477 		break;
1478 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1479 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1480 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1481 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1482 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1483 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1484 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1485 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1486 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1487 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1488 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1489 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1490 		/* In Spectrum-2 and above, all packets get time stamp by
1491 		 * default and the driver fill the time stamp only for event
1492 		 * packets. Return all event types even if only specific types
1493 		 * were required.
1494 		 */
1495 		ing_types = 0x0f;
1496 		*p_rx_filter = HWTSTAMP_FILTER_SOME;
1497 		break;
1498 	case HWTSTAMP_FILTER_ALL:
1499 	case HWTSTAMP_FILTER_SOME:
1500 	case HWTSTAMP_FILTER_NTP_ALL:
1501 		return -ERANGE;
1502 	default:
1503 		return -EINVAL;
1504 	}
1505 
1506 	switch (tx_type) {
1507 	case HWTSTAMP_TX_OFF:
1508 		egr_types = 0x00;
1509 		break;
1510 	case HWTSTAMP_TX_ON:
1511 		egr_types = 0x0f;
1512 		break;
1513 	case HWTSTAMP_TX_ONESTEP_SYNC:
1514 	case HWTSTAMP_TX_ONESTEP_P2P:
1515 		return -ERANGE;
1516 	default:
1517 		return -EINVAL;
1518 	}
1519 
1520 	if ((ing_types && !egr_types) || (!ing_types && egr_types))
1521 		return -EINVAL;
1522 
1523 	*p_ing_types = ing_types;
1524 	*p_egr_types = egr_types;
1525 	return 0;
1526 }
1527 
1528 static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
1529 				    u16 ing_types, u16 egr_types)
1530 {
1531 	char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
1532 
1533 	mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
1534 			      egr_types);
1535 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
1536 }
1537 
1538 static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
1539 				u16 egr_types,
1540 				struct hwtstamp_config new_config)
1541 {
1542 	struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
1543 	int err;
1544 
1545 	err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
1546 	if (err)
1547 		return err;
1548 
1549 	ptp_state->config = new_config;
1550 	return 0;
1551 }
1552 
1553 static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
1554 				 struct hwtstamp_config new_config)
1555 {
1556 	struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
1557 	int err;
1558 
1559 	err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
1560 	if (err)
1561 		return err;
1562 
1563 	ptp_state->config = new_config;
1564 	return 0;
1565 }
1566 
1567 static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
1568 					u16 ing_types, u16 egr_types,
1569 					struct hwtstamp_config new_config)
1570 {
1571 	struct mlxsw_sp2_ptp_state *ptp_state;
1572 	int err;
1573 
1574 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1575 
1576 	if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
1577 		return 0;
1578 
1579 	err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
1580 				   egr_types, new_config);
1581 	if (err)
1582 		return err;
1583 
1584 	refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
1585 
1586 	return 0;
1587 }
1588 
1589 static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
1590 					  struct hwtstamp_config new_config)
1591 {
1592 	struct mlxsw_sp2_ptp_state *ptp_state;
1593 	int err;
1594 
1595 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1596 
1597 	if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
1598 		return 0;
1599 
1600 	err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
1601 	if (err)
1602 		goto err_ptp_disable;
1603 
1604 	return 0;
1605 
1606 err_ptp_disable:
1607 	refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
1608 	return err;
1609 }
1610 
1611 int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1612 			       struct hwtstamp_config *config)
1613 {
1614 	struct mlxsw_sp2_ptp_state *ptp_state;
1615 	enum hwtstamp_rx_filters rx_filter;
1616 	struct hwtstamp_config new_config;
1617 	u16 new_ing_types, new_egr_types;
1618 	bool ptp_enabled;
1619 	int err;
1620 
1621 	ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1622 	mutex_lock(&ptp_state->lock);
1623 
1624 	err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
1625 					      &new_egr_types, &rx_filter);
1626 	if (err)
1627 		goto err_get_message_types;
1628 
1629 	new_config.flags = config->flags;
1630 	new_config.tx_type = config->tx_type;
1631 	new_config.rx_filter = rx_filter;
1632 
1633 	ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
1634 		      mlxsw_sp_port->ptp.egr_types;
1635 
1636 	if ((new_ing_types || new_egr_types) && !ptp_enabled) {
1637 		err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
1638 						   new_egr_types, new_config);
1639 		if (err)
1640 			goto err_configure_port;
1641 	} else if (!new_ing_types && !new_egr_types && ptp_enabled) {
1642 		err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
1643 		if (err)
1644 			goto err_deconfigure_port;
1645 	}
1646 
1647 	mlxsw_sp_port->ptp.ing_types = new_ing_types;
1648 	mlxsw_sp_port->ptp.egr_types = new_egr_types;
1649 
1650 	/* Notify the ioctl caller what we are actually timestamping. */
1651 	config->rx_filter = rx_filter;
1652 	mutex_unlock(&ptp_state->lock);
1653 
1654 	return 0;
1655 
1656 err_deconfigure_port:
1657 err_configure_port:
1658 err_get_message_types:
1659 	mutex_unlock(&ptp_state->lock);
1660 	return err;
1661 }
1662 
1663 int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
1664 			      struct ethtool_ts_info *info)
1665 {
1666 	info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
1667 
1668 	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1669 				SOF_TIMESTAMPING_RX_HARDWARE |
1670 				SOF_TIMESTAMPING_RAW_HARDWARE;
1671 
1672 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1673 			 BIT(HWTSTAMP_TX_ON);
1674 
1675 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1676 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1677 			   BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
1678 
1679 	return 0;
1680 }
1681 
1682 int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
1683 				 struct mlxsw_sp_port *mlxsw_sp_port,
1684 				 struct sk_buff *skb,
1685 				 const struct mlxsw_tx_info *tx_info)
1686 {
1687 	mlxsw_sp_txhdr_construct(skb, tx_info);
1688 	return 0;
1689 }
1690 
1691 int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
1692 				  struct mlxsw_sp_port *mlxsw_sp_port,
1693 				  struct sk_buff *skb,
1694 				  const struct mlxsw_tx_info *tx_info)
1695 {
1696 	/* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
1697 	 * their correction field correctly set on the egress port they must be
1698 	 * transmitted as data packets. Such packets ingress the ASIC via the
1699 	 * CPU port and must have a VLAN tag, as the CPU port is not configured
1700 	 * with a PVID. Push the default VLAN (4095), which is configured as
1701 	 * egress untagged on all the ports.
1702 	 */
1703 	if (!skb_vlan_tagged(skb)) {
1704 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1705 						MLXSW_SP_DEFAULT_VID);
1706 		if (!skb) {
1707 			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1708 			return -ENOMEM;
1709 		}
1710 	}
1711 
1712 	return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
1713 						 tx_info);
1714 }
1715