xref: /linux/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c (revision 93a3545d812ae7cfe4426374e00a7d8f64ac02e0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File aq_ptp.c:
9  * Definition of functions for Linux PTP support.
10  */
11 
12 #include <linux/ptp_clock_kernel.h>
13 #include <linux/ptp_classify.h>
14 #include <linux/interrupt.h>
15 #include <linux/clocksource.h>
16 
17 #include "aq_nic.h"
18 #include "aq_ptp.h"
19 #include "aq_ring.h"
20 #include "aq_phy.h"
21 #include "aq_filters.h"
22 
23 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
24 
25 #define AQ_PTP_TX_TIMEOUT        (HZ *  10)
26 
27 #define POLL_SYNC_TIMER_MS 15
28 
29 enum ptp_speed_offsets {
30 	ptp_offset_idx_10 = 0,
31 	ptp_offset_idx_100,
32 	ptp_offset_idx_1000,
33 	ptp_offset_idx_2500,
34 	ptp_offset_idx_5000,
35 	ptp_offset_idx_10000,
36 };
37 
38 struct ptp_skb_ring {
39 	struct sk_buff **buff;
40 	spinlock_t lock;
41 	unsigned int size;
42 	unsigned int head;
43 	unsigned int tail;
44 };
45 
46 struct ptp_tx_timeout {
47 	spinlock_t lock;
48 	bool active;
49 	unsigned long tx_start;
50 };
51 
52 struct aq_ptp_s {
53 	struct aq_nic_s *aq_nic;
54 	struct hwtstamp_config hwtstamp_config;
55 	spinlock_t ptp_lock;
56 	spinlock_t ptp_ring_lock;
57 	struct ptp_clock *ptp_clock;
58 	struct ptp_clock_info ptp_info;
59 
60 	atomic_t offset_egress;
61 	atomic_t offset_ingress;
62 
63 	struct aq_ring_param_s ptp_ring_param;
64 
65 	struct ptp_tx_timeout ptp_tx_timeout;
66 
67 	unsigned int idx_vector;
68 	struct napi_struct napi;
69 
70 	struct aq_ring_s ptp_tx;
71 	struct aq_ring_s ptp_rx;
72 	struct aq_ring_s hwts_rx;
73 
74 	struct ptp_skb_ring skb_ring;
75 
76 	struct aq_rx_filter_l3l4 udp_filter;
77 	struct aq_rx_filter_l2 eth_type_filter;
78 
79 	struct delayed_work poll_sync;
80 	u32 poll_timeout_ms;
81 
82 	bool extts_pin_enabled;
83 	u64 last_sync1588_ts;
84 };
85 
86 struct ptp_tm_offset {
87 	unsigned int mbps;
88 	int egress;
89 	int ingress;
90 };
91 
92 static struct ptp_tm_offset ptp_offset[6];
93 
94 void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
95 {
96 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
97 	int i, egress, ingress;
98 
99 	if (!aq_ptp)
100 		return;
101 
102 	egress = 0;
103 	ingress = 0;
104 
105 	for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
106 		if (mbps == ptp_offset[i].mbps) {
107 			egress = ptp_offset[i].egress;
108 			ingress = ptp_offset[i].ingress;
109 			break;
110 		}
111 	}
112 
113 	atomic_set(&aq_ptp->offset_egress, egress);
114 	atomic_set(&aq_ptp->offset_ingress, ingress);
115 }
116 
117 static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
118 {
119 	unsigned int next_head = (ring->head + 1) % ring->size;
120 
121 	if (next_head == ring->tail)
122 		return -ENOMEM;
123 
124 	ring->buff[ring->head] = skb_get(skb);
125 	ring->head = next_head;
126 
127 	return 0;
128 }
129 
130 static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
131 {
132 	unsigned long flags;
133 	int ret;
134 
135 	spin_lock_irqsave(&ring->lock, flags);
136 	ret = __aq_ptp_skb_put(ring, skb);
137 	spin_unlock_irqrestore(&ring->lock, flags);
138 
139 	return ret;
140 }
141 
142 static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
143 {
144 	struct sk_buff *skb;
145 
146 	if (ring->tail == ring->head)
147 		return NULL;
148 
149 	skb = ring->buff[ring->tail];
150 	ring->tail = (ring->tail + 1) % ring->size;
151 
152 	return skb;
153 }
154 
155 static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
156 {
157 	unsigned long flags;
158 	struct sk_buff *skb;
159 
160 	spin_lock_irqsave(&ring->lock, flags);
161 	skb = __aq_ptp_skb_get(ring);
162 	spin_unlock_irqrestore(&ring->lock, flags);
163 
164 	return skb;
165 }
166 
167 static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
168 {
169 	unsigned long flags;
170 	unsigned int len;
171 
172 	spin_lock_irqsave(&ring->lock, flags);
173 	len = (ring->head >= ring->tail) ?
174 	ring->head - ring->tail :
175 	ring->size - ring->tail + ring->head;
176 	spin_unlock_irqrestore(&ring->lock, flags);
177 
178 	return len;
179 }
180 
181 static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
182 {
183 	struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
184 
185 	if (!buff)
186 		return -ENOMEM;
187 
188 	spin_lock_init(&ring->lock);
189 
190 	ring->buff = buff;
191 	ring->size = size;
192 	ring->head = 0;
193 	ring->tail = 0;
194 
195 	return 0;
196 }
197 
198 static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
199 {
200 	struct sk_buff *skb;
201 
202 	while ((skb = aq_ptp_skb_get(ring)) != NULL)
203 		dev_kfree_skb_any(skb);
204 }
205 
206 static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
207 {
208 	if (ring->buff) {
209 		aq_ptp_skb_ring_clean(ring);
210 		kfree(ring->buff);
211 		ring->buff = NULL;
212 	}
213 }
214 
215 static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
216 {
217 	spin_lock_init(&timeout->lock);
218 	timeout->active = false;
219 }
220 
221 static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
222 {
223 	struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
224 	unsigned long flags;
225 
226 	spin_lock_irqsave(&timeout->lock, flags);
227 	timeout->active = true;
228 	timeout->tx_start = jiffies;
229 	spin_unlock_irqrestore(&timeout->lock, flags);
230 }
231 
232 static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
233 {
234 	if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
235 		struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
236 		unsigned long flags;
237 
238 		spin_lock_irqsave(&timeout->lock, flags);
239 		timeout->active = false;
240 		spin_unlock_irqrestore(&timeout->lock, flags);
241 	}
242 }
243 
244 static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
245 {
246 	struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
247 	unsigned long flags;
248 	bool timeout_flag;
249 
250 	timeout_flag = false;
251 
252 	spin_lock_irqsave(&timeout->lock, flags);
253 	if (timeout->active) {
254 		timeout_flag = time_is_before_jiffies(timeout->tx_start +
255 						      AQ_PTP_TX_TIMEOUT);
256 		/* reset active flag if timeout detected */
257 		if (timeout_flag)
258 			timeout->active = false;
259 	}
260 	spin_unlock_irqrestore(&timeout->lock, flags);
261 
262 	if (timeout_flag) {
263 		aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
264 		netdev_err(aq_ptp->aq_nic->ndev,
265 			   "PTP Timeout. Clearing Tx Timestamp SKBs\n");
266 	}
267 }
268 
269 /* aq_ptp_adjfine
270  * @ptp: the ptp clock structure
271  * @ppb: parts per billion adjustment from base
272  *
273  * adjust the frequency of the ptp cycle counter by the
274  * indicated ppb from the base frequency.
275  */
276 static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
277 {
278 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
279 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
280 
281 	mutex_lock(&aq_nic->fwreq_mutex);
282 	aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw,
283 					     scaled_ppm_to_ppb(scaled_ppm));
284 	mutex_unlock(&aq_nic->fwreq_mutex);
285 
286 	return 0;
287 }
288 
289 /* aq_ptp_adjtime
290  * @ptp: the ptp clock structure
291  * @delta: offset to adjust the cycle counter by
292  *
293  * adjust the timer by resetting the timecounter structure.
294  */
295 static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
296 {
297 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
298 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
299 	unsigned long flags;
300 
301 	spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
302 	aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta);
303 	spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
304 
305 	return 0;
306 }
307 
308 /* aq_ptp_gettime
309  * @ptp: the ptp clock structure
310  * @ts: timespec structure to hold the current time value
311  *
312  * read the timecounter and return the correct value on ns,
313  * after converting it into a struct timespec.
314  */
315 static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
316 {
317 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
318 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
319 	unsigned long flags;
320 	u64 ns;
321 
322 	spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
323 	aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns);
324 	spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
325 
326 	*ts = ns_to_timespec64(ns);
327 
328 	return 0;
329 }
330 
331 /* aq_ptp_settime
332  * @ptp: the ptp clock structure
333  * @ts: the timespec containing the new time for the cycle counter
334  *
335  * reset the timecounter to use a new base value instead of the kernel
336  * wall timer value.
337  */
338 static int aq_ptp_settime(struct ptp_clock_info *ptp,
339 			  const struct timespec64 *ts)
340 {
341 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
342 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
343 	unsigned long flags;
344 	u64 ns = timespec64_to_ns(ts);
345 	u64 now;
346 
347 	spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
348 	aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now);
349 	aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now);
350 
351 	spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
352 
353 	return 0;
354 }
355 
356 static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
357 				       struct skb_shared_hwtstamps *hwtstamp,
358 				       u64 timestamp)
359 {
360 	memset(hwtstamp, 0, sizeof(*hwtstamp));
361 	hwtstamp->hwtstamp = ns_to_ktime(timestamp);
362 }
363 
364 static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start,
365 			      u64 period)
366 {
367 	if (period)
368 		netdev_dbg(aq_nic->ndev,
369 			   "Enable GPIO %d pulsing, start time %llu, period %u\n",
370 			   pin_index, start, (u32)period);
371 	else
372 		netdev_dbg(aq_nic->ndev,
373 			   "Disable GPIO %d pulsing, start time %llu, period %u\n",
374 			   pin_index, start, (u32)period);
375 
376 	/* Notify hardware of request to being sending pulses.
377 	 * If period is ZERO then pulsen is disabled.
378 	 */
379 	mutex_lock(&aq_nic->fwreq_mutex);
380 	aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index,
381 					 start, (u32)period);
382 	mutex_unlock(&aq_nic->fwreq_mutex);
383 
384 	return 0;
385 }
386 
387 static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp,
388 				       struct ptp_clock_request *rq, int on)
389 {
390 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
391 	struct ptp_clock_time *t = &rq->perout.period;
392 	struct ptp_clock_time *s = &rq->perout.start;
393 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
394 	u64 start, period;
395 	u32 pin_index = rq->perout.index;
396 
397 	/* verify the request channel is there */
398 	if (pin_index >= ptp->n_per_out)
399 		return -EINVAL;
400 
401 	/* we cannot support periods greater
402 	 * than 4 seconds due to reg limit
403 	 */
404 	if (t->sec > 4 || t->sec < 0)
405 		return -ERANGE;
406 
407 	/* convert to unsigned 64b ns,
408 	 * verify we can put it in a 32b register
409 	 */
410 	period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0;
411 
412 	/* verify the value is in range supported by hardware */
413 	if (period > U32_MAX)
414 		return -ERANGE;
415 	/* convert to unsigned 64b ns */
416 	/* TODO convert to AQ time */
417 	start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0;
418 
419 	aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
420 
421 	return 0;
422 }
423 
424 static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp,
425 				    struct ptp_clock_request *rq, int on)
426 {
427 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
428 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
429 	u64 start, period;
430 	u32 pin_index = 0;
431 	u32 rest = 0;
432 
433 	/* verify the request channel is there */
434 	if (pin_index >= ptp->n_per_out)
435 		return -EINVAL;
436 
437 	aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start);
438 	div_u64_rem(start, NSEC_PER_SEC, &rest);
439 	period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */
440 	start = on ? start - rest + NSEC_PER_SEC *
441 		(rest > 990000000LL ? 2 : 1) : 0;
442 
443 	aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
444 
445 	return 0;
446 }
447 
448 static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp)
449 {
450 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
451 	u32 enable = aq_ptp->extts_pin_enabled;
452 
453 	if (aq_nic->aq_hw_ops->hw_extts_gpio_enable)
454 		aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0,
455 							enable);
456 }
457 
458 static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp,
459 				      struct ptp_clock_request *rq, int on)
460 {
461 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
462 
463 	u32 pin_index = rq->extts.index;
464 
465 	if (pin_index >= ptp->n_ext_ts)
466 		return -EINVAL;
467 
468 	aq_ptp->extts_pin_enabled = !!on;
469 	if (on) {
470 		aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
471 		cancel_delayed_work_sync(&aq_ptp->poll_sync);
472 		schedule_delayed_work(&aq_ptp->poll_sync,
473 				      msecs_to_jiffies(aq_ptp->poll_timeout_ms));
474 	}
475 
476 	aq_ptp_extts_pin_ctrl(aq_ptp);
477 	return 0;
478 }
479 
480 /* aq_ptp_gpio_feature_enable
481  * @ptp: the ptp clock structure
482  * @rq: the requested feature to change
483  * @on: whether to enable or disable the feature
484  */
485 static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp,
486 				      struct ptp_clock_request *rq, int on)
487 {
488 	switch (rq->type) {
489 	case PTP_CLK_REQ_EXTTS:
490 		return aq_ptp_extts_pin_configure(ptp, rq, on);
491 	case PTP_CLK_REQ_PEROUT:
492 		return aq_ptp_perout_pin_configure(ptp, rq, on);
493 	case PTP_CLK_REQ_PPS:
494 		return aq_ptp_pps_pin_configure(ptp, rq, on);
495 	default:
496 		return -EOPNOTSUPP;
497 	}
498 
499 	return 0;
500 }
501 
502 /* aq_ptp_verify
503  * @ptp: the ptp clock structure
504  * @pin: index of the pin in question
505  * @func: the desired function to use
506  * @chan: the function channel index to use
507  */
508 static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
509 			 enum ptp_pin_function func, unsigned int chan)
510 {
511 	/* verify the requested pin is there */
512 	if (!ptp->pin_config || pin >= ptp->n_pins)
513 		return -EINVAL;
514 
515 	/* enforce locked channels, no changing them */
516 	if (chan != ptp->pin_config[pin].chan)
517 		return -EINVAL;
518 
519 	/* we want to keep the functions locked as well */
520 	if (func != ptp->pin_config[pin].func)
521 		return -EINVAL;
522 
523 	return 0;
524 }
525 
526 /* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
527  * @adapter: the private adapter struct
528  *
529  * if the timestamp is valid, we convert it into the timecounter ns
530  * value, then store that result into the hwtstamps structure which
531  * is passed up the network stack
532  */
533 void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
534 {
535 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
536 	struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
537 	struct skb_shared_hwtstamps hwtstamp;
538 
539 	if (!skb) {
540 		netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n");
541 		return;
542 	}
543 
544 	timestamp += atomic_read(&aq_ptp->offset_egress);
545 	aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
546 	skb_tstamp_tx(skb, &hwtstamp);
547 	dev_kfree_skb_any(skb);
548 
549 	aq_ptp_tx_timeout_update(aq_ptp);
550 }
551 
552 /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
553  * @adapter: pointer to adapter struct
554  * @skb: particular skb to send timestamp with
555  *
556  * if the timestamp is valid, we convert it into the timecounter ns
557  * value, then store that result into the hwtstamps structure which
558  * is passed up the network stack
559  */
560 static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
561 			       u64 timestamp)
562 {
563 	timestamp -= atomic_read(&aq_ptp->offset_ingress);
564 	aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
565 }
566 
567 void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
568 				struct hwtstamp_config *config)
569 {
570 	*config = aq_ptp->hwtstamp_config;
571 }
572 
573 static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
574 {
575 	aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 |
576 			       HW_ATL_RX_ENABLE_CMP_PROT_L4 |
577 			       HW_ATL_RX_UDP |
578 			       HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
579 			       HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT |
580 			       HW_ATL_RX_ENABLE_QUEUE_L3L4 |
581 			       aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
582 	aq_ptp->udp_filter.p_dst = PTP_EV_PORT;
583 
584 	aq_ptp->eth_type_filter.ethertype = ETH_P_1588;
585 	aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx;
586 }
587 
588 int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
589 			       struct hwtstamp_config *config)
590 {
591 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
592 	const struct aq_hw_ops *hw_ops;
593 	int err = 0;
594 
595 	hw_ops = aq_nic->aq_hw_ops;
596 	if (config->tx_type == HWTSTAMP_TX_ON ||
597 	    config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) {
598 		aq_ptp_prepare_filters(aq_ptp);
599 		if (hw_ops->hw_filter_l3l4_set) {
600 			err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
601 							 &aq_ptp->udp_filter);
602 		}
603 		if (!err && hw_ops->hw_filter_l2_set) {
604 			err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw,
605 						       &aq_ptp->eth_type_filter);
606 		}
607 		aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
608 	} else {
609 		aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4;
610 		if (hw_ops->hw_filter_l3l4_set) {
611 			err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
612 							 &aq_ptp->udp_filter);
613 		}
614 		if (!err && hw_ops->hw_filter_l2_clear) {
615 			err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw,
616 							&aq_ptp->eth_type_filter);
617 		}
618 		aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
619 	}
620 
621 	if (err)
622 		return -EREMOTEIO;
623 
624 	aq_ptp->hwtstamp_config = *config;
625 
626 	return 0;
627 }
628 
629 bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
630 {
631 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
632 
633 	if (!aq_ptp)
634 		return false;
635 
636 	return &aq_ptp->ptp_tx == ring ||
637 	       &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
638 }
639 
640 u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
641 		      unsigned int len)
642 {
643 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
644 	u64 timestamp = 0;
645 	u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
646 						   p, len, &timestamp);
647 
648 	if (ret > 0)
649 		aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
650 
651 	return ret;
652 }
653 
654 static int aq_ptp_poll(struct napi_struct *napi, int budget)
655 {
656 	struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
657 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
658 	bool was_cleaned = false;
659 	int work_done = 0;
660 	int err;
661 
662 	/* Processing PTP TX traffic */
663 	err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
664 							&aq_ptp->ptp_tx);
665 	if (err < 0)
666 		goto err_exit;
667 
668 	if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
669 		aq_ring_tx_clean(&aq_ptp->ptp_tx);
670 
671 		was_cleaned = true;
672 	}
673 
674 	/* Processing HW_TIMESTAMP RX traffic */
675 	err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
676 							 &aq_ptp->hwts_rx);
677 	if (err < 0)
678 		goto err_exit;
679 
680 	if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
681 		aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
682 
683 		err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
684 							      &aq_ptp->hwts_rx);
685 		if (err < 0)
686 			goto err_exit;
687 
688 		was_cleaned = true;
689 	}
690 
691 	/* Processing PTP RX traffic */
692 	err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
693 						    &aq_ptp->ptp_rx);
694 	if (err < 0)
695 		goto err_exit;
696 
697 	if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
698 		unsigned int sw_tail_old;
699 
700 		err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
701 		if (err < 0)
702 			goto err_exit;
703 
704 		sw_tail_old = aq_ptp->ptp_rx.sw_tail;
705 		err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
706 		if (err < 0)
707 			goto err_exit;
708 
709 		err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
710 							 &aq_ptp->ptp_rx,
711 							 sw_tail_old);
712 		if (err < 0)
713 			goto err_exit;
714 	}
715 
716 	if (was_cleaned)
717 		work_done = budget;
718 
719 	if (work_done < budget) {
720 		napi_complete_done(napi, work_done);
721 		aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
722 					BIT_ULL(aq_ptp->ptp_ring_param.vec_idx));
723 	}
724 
725 err_exit:
726 	return work_done;
727 }
728 
729 static irqreturn_t aq_ptp_isr(int irq, void *private)
730 {
731 	struct aq_ptp_s *aq_ptp = private;
732 	int err = 0;
733 
734 	if (!aq_ptp) {
735 		err = -EINVAL;
736 		goto err_exit;
737 	}
738 	napi_schedule(&aq_ptp->napi);
739 
740 err_exit:
741 	return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
742 }
743 
744 int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
745 {
746 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
747 	struct aq_ring_s *ring = &aq_ptp->ptp_tx;
748 	unsigned long irq_flags;
749 	int err = NETDEV_TX_OK;
750 	unsigned int frags;
751 
752 	if (skb->len <= 0) {
753 		dev_kfree_skb_any(skb);
754 		goto err_exit;
755 	}
756 
757 	frags = skb_shinfo(skb)->nr_frags + 1;
758 	/* Frags cannot be bigger 16KB
759 	 * because PTP usually works
760 	 * without Jumbo even in a background
761 	 */
762 	if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
763 		/* Drop packet because it doesn't make sence to delay it */
764 		dev_kfree_skb_any(skb);
765 		goto err_exit;
766 	}
767 
768 	err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
769 	if (err) {
770 		netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
771 			   ring->size);
772 		return NETDEV_TX_BUSY;
773 	}
774 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
775 	aq_ptp_tx_timeout_start(aq_ptp);
776 	skb_tx_timestamp(skb);
777 
778 	spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
779 	frags = aq_nic_map_skb(aq_nic, skb, ring);
780 
781 	if (likely(frags)) {
782 		err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
783 						       ring, frags);
784 		if (err >= 0) {
785 			++ring->stats.tx.packets;
786 			ring->stats.tx.bytes += skb->len;
787 		}
788 	} else {
789 		err = NETDEV_TX_BUSY;
790 	}
791 	spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
792 
793 err_exit:
794 	return err;
795 }
796 
797 void aq_ptp_service_task(struct aq_nic_s *aq_nic)
798 {
799 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
800 
801 	if (!aq_ptp)
802 		return;
803 
804 	aq_ptp_tx_timeout_check(aq_ptp);
805 }
806 
807 int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
808 {
809 	struct pci_dev *pdev = aq_nic->pdev;
810 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
811 	int err = 0;
812 
813 	if (!aq_ptp)
814 		return 0;
815 
816 	if (pdev->msix_enabled || pdev->msi_enabled) {
817 		err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
818 				  aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
819 	} else {
820 		err = -EINVAL;
821 		goto err_exit;
822 	}
823 
824 err_exit:
825 	return err;
826 }
827 
828 void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
829 {
830 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
831 	struct pci_dev *pdev = aq_nic->pdev;
832 
833 	if (!aq_ptp)
834 		return;
835 
836 	free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
837 }
838 
839 int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
840 {
841 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
842 	int err = 0;
843 
844 	if (!aq_ptp)
845 		return 0;
846 
847 	err = aq_ring_init(&aq_ptp->ptp_tx);
848 	if (err < 0)
849 		goto err_exit;
850 	err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
851 						 &aq_ptp->ptp_tx,
852 						 &aq_ptp->ptp_ring_param);
853 	if (err < 0)
854 		goto err_exit;
855 
856 	err = aq_ring_init(&aq_ptp->ptp_rx);
857 	if (err < 0)
858 		goto err_exit;
859 	err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
860 						 &aq_ptp->ptp_rx,
861 						 &aq_ptp->ptp_ring_param);
862 	if (err < 0)
863 		goto err_exit;
864 
865 	err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
866 	if (err < 0)
867 		goto err_rx_free;
868 	err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
869 						 &aq_ptp->ptp_rx,
870 						 0U);
871 	if (err < 0)
872 		goto err_rx_free;
873 
874 	err = aq_ring_init(&aq_ptp->hwts_rx);
875 	if (err < 0)
876 		goto err_rx_free;
877 	err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
878 						 &aq_ptp->hwts_rx,
879 						 &aq_ptp->ptp_ring_param);
880 	if (err < 0)
881 		goto err_exit;
882 	err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
883 						      &aq_ptp->hwts_rx);
884 	if (err < 0)
885 		goto err_exit;
886 
887 	return err;
888 
889 err_rx_free:
890 	aq_ring_rx_deinit(&aq_ptp->ptp_rx);
891 err_exit:
892 	return err;
893 }
894 
895 int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
896 {
897 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
898 	int err = 0;
899 
900 	if (!aq_ptp)
901 		return 0;
902 
903 	err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx);
904 	if (err < 0)
905 		goto err_exit;
906 
907 	err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx);
908 	if (err < 0)
909 		goto err_exit;
910 
911 	err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw,
912 						  &aq_ptp->hwts_rx);
913 	if (err < 0)
914 		goto err_exit;
915 
916 	napi_enable(&aq_ptp->napi);
917 
918 err_exit:
919 	return err;
920 }
921 
922 void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
923 {
924 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
925 
926 	if (!aq_ptp)
927 		return;
928 
929 	aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx);
930 	aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
931 
932 	aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
933 
934 	napi_disable(&aq_ptp->napi);
935 }
936 
937 void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
938 {
939 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
940 
941 	if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic)
942 		return;
943 
944 	aq_ring_tx_clean(&aq_ptp->ptp_tx);
945 	aq_ring_rx_deinit(&aq_ptp->ptp_rx);
946 }
947 
948 #define PTP_8TC_RING_IDX             8
949 #define PTP_4TC_RING_IDX            16
950 #define PTP_HWST_RING_IDX           31
951 
952 /* Index must be 8 (8 TCs) or 16 (4 TCs).
953  * It depends on Traffic Class mode.
954  */
955 static unsigned int ptp_ring_idx(const enum aq_tc_mode tc_mode)
956 {
957 	if (tc_mode == AQ_TC_MODE_8TCS)
958 		return PTP_8TC_RING_IDX;
959 
960 	return PTP_4TC_RING_IDX;
961 }
962 
963 int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
964 {
965 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
966 	unsigned int tx_ring_idx, rx_ring_idx;
967 	struct aq_ring_s *hwts;
968 	struct aq_ring_s *ring;
969 	int err;
970 
971 	if (!aq_ptp)
972 		return 0;
973 
974 	tx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
975 
976 	ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
977 				tx_ring_idx, &aq_nic->aq_nic_cfg);
978 	if (!ring) {
979 		err = -ENOMEM;
980 		goto err_exit;
981 	}
982 
983 	rx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
984 
985 	ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
986 				rx_ring_idx, &aq_nic->aq_nic_cfg);
987 	if (!ring) {
988 		err = -ENOMEM;
989 		goto err_exit_ptp_tx;
990 	}
991 
992 	hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
993 				     aq_nic->aq_nic_cfg.rxds,
994 				     aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
995 	if (!hwts) {
996 		err = -ENOMEM;
997 		goto err_exit_ptp_rx;
998 	}
999 
1000 	err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
1001 	if (err != 0) {
1002 		err = -ENOMEM;
1003 		goto err_exit_hwts_rx;
1004 	}
1005 
1006 	aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
1007 	aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
1008 			aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
1009 	cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
1010 			&aq_ptp->ptp_ring_param.affinity_mask);
1011 
1012 	return 0;
1013 
1014 err_exit_hwts_rx:
1015 	aq_ring_free(&aq_ptp->hwts_rx);
1016 err_exit_ptp_rx:
1017 	aq_ring_free(&aq_ptp->ptp_rx);
1018 err_exit_ptp_tx:
1019 	aq_ring_free(&aq_ptp->ptp_tx);
1020 err_exit:
1021 	return err;
1022 }
1023 
1024 void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
1025 {
1026 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1027 
1028 	if (!aq_ptp)
1029 		return;
1030 
1031 	aq_ring_free(&aq_ptp->ptp_tx);
1032 	aq_ring_free(&aq_ptp->ptp_rx);
1033 	aq_ring_free(&aq_ptp->hwts_rx);
1034 
1035 	aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
1036 }
1037 
1038 #define MAX_PTP_GPIO_COUNT 4
1039 
1040 static struct ptp_clock_info aq_ptp_clock = {
1041 	.owner		= THIS_MODULE,
1042 	.name		= "atlantic ptp",
1043 	.max_adj	= 999999999,
1044 	.n_ext_ts	= 0,
1045 	.pps		= 0,
1046 	.adjfine	= aq_ptp_adjfine,
1047 	.adjtime	= aq_ptp_adjtime,
1048 	.gettime64	= aq_ptp_gettime,
1049 	.settime64	= aq_ptp_settime,
1050 	.n_per_out	= 0,
1051 	.enable		= aq_ptp_gpio_feature_enable,
1052 	.n_pins		= 0,
1053 	.verify		= aq_ptp_verify,
1054 	.pin_config	= NULL,
1055 };
1056 
1057 #define ptp_offset_init(__idx, __mbps, __egress, __ingress)   do { \
1058 		ptp_offset[__idx].mbps = (__mbps); \
1059 		ptp_offset[__idx].egress = (__egress); \
1060 		ptp_offset[__idx].ingress = (__ingress); } \
1061 		while (0)
1062 
1063 static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets)
1064 {
1065 	int i;
1066 
1067 	/* Load offsets for PTP */
1068 	for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
1069 		switch (i) {
1070 		/* 100M */
1071 		case ptp_offset_idx_100:
1072 			ptp_offset_init(i, 100,
1073 					offsets->egress_100,
1074 					offsets->ingress_100);
1075 			break;
1076 		/* 1G */
1077 		case ptp_offset_idx_1000:
1078 			ptp_offset_init(i, 1000,
1079 					offsets->egress_1000,
1080 					offsets->ingress_1000);
1081 			break;
1082 		/* 2.5G */
1083 		case ptp_offset_idx_2500:
1084 			ptp_offset_init(i, 2500,
1085 					offsets->egress_2500,
1086 					offsets->ingress_2500);
1087 			break;
1088 		/* 5G */
1089 		case ptp_offset_idx_5000:
1090 			ptp_offset_init(i, 5000,
1091 					offsets->egress_5000,
1092 					offsets->ingress_5000);
1093 			break;
1094 		/* 10G */
1095 		case ptp_offset_idx_10000:
1096 			ptp_offset_init(i, 10000,
1097 					offsets->egress_10000,
1098 					offsets->ingress_10000);
1099 			break;
1100 		}
1101 	}
1102 }
1103 
1104 static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets)
1105 {
1106 	memset(ptp_offset, 0, sizeof(ptp_offset));
1107 
1108 	aq_ptp_offset_init_from_fw(offsets);
1109 }
1110 
1111 static void aq_ptp_gpio_init(struct ptp_clock_info *info,
1112 			     struct hw_atl_info *hw_info)
1113 {
1114 	struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
1115 	u32 extts_pin_cnt = 0;
1116 	u32 out_pin_cnt = 0;
1117 	u32 i;
1118 
1119 	memset(pin_desc, 0, sizeof(pin_desc));
1120 
1121 	for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) {
1122 		if (hw_info->gpio_pin[i] ==
1123 		    (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) {
1124 			snprintf(pin_desc[out_pin_cnt].name,
1125 				 sizeof(pin_desc[out_pin_cnt].name),
1126 				 "AQ_GPIO%d", i);
1127 			pin_desc[out_pin_cnt].index = out_pin_cnt;
1128 			pin_desc[out_pin_cnt].chan = out_pin_cnt;
1129 			pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT;
1130 		}
1131 	}
1132 
1133 	info->n_per_out = out_pin_cnt;
1134 
1135 	if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) {
1136 		extts_pin_cnt += 1;
1137 
1138 		snprintf(pin_desc[out_pin_cnt].name,
1139 			 sizeof(pin_desc[out_pin_cnt].name),
1140 			  "AQ_GPIO%d", out_pin_cnt);
1141 		pin_desc[out_pin_cnt].index = out_pin_cnt;
1142 		pin_desc[out_pin_cnt].chan = 0;
1143 		pin_desc[out_pin_cnt].func = PTP_PF_EXTTS;
1144 	}
1145 
1146 	info->n_pins = out_pin_cnt + extts_pin_cnt;
1147 	info->n_ext_ts = extts_pin_cnt;
1148 
1149 	if (!info->n_pins)
1150 		return;
1151 
1152 	info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc),
1153 				   GFP_KERNEL);
1154 
1155 	if (!info->pin_config)
1156 		return;
1157 
1158 	memcpy(info->pin_config, &pin_desc,
1159 	       sizeof(struct ptp_pin_desc) * info->n_pins);
1160 }
1161 
1162 void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
1163 {
1164 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1165 	struct timespec64 ts;
1166 
1167 	ktime_get_real_ts64(&ts);
1168 	aq_ptp_settime(&aq_ptp->ptp_info, &ts);
1169 }
1170 
1171 static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
1172 
1173 int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
1174 {
1175 	struct hw_atl_utils_mbox mbox;
1176 	struct ptp_clock *clock;
1177 	struct aq_ptp_s *aq_ptp;
1178 	int err = 0;
1179 
1180 	if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
1181 		aq_nic->aq_ptp = NULL;
1182 		return 0;
1183 	}
1184 
1185 	if (!aq_nic->aq_fw_ops->enable_ptp) {
1186 		aq_nic->aq_ptp = NULL;
1187 		return 0;
1188 	}
1189 
1190 	hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox);
1191 
1192 	if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) {
1193 		aq_nic->aq_ptp = NULL;
1194 		return 0;
1195 	}
1196 
1197 	aq_ptp_offset_init(&mbox.info.ptp_offset);
1198 
1199 	aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
1200 	if (!aq_ptp) {
1201 		err = -ENOMEM;
1202 		goto err_exit;
1203 	}
1204 
1205 	aq_ptp->aq_nic = aq_nic;
1206 
1207 	spin_lock_init(&aq_ptp->ptp_lock);
1208 	spin_lock_init(&aq_ptp->ptp_ring_lock);
1209 
1210 	aq_ptp->ptp_info = aq_ptp_clock;
1211 	aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
1212 	clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
1213 	if (IS_ERR(clock)) {
1214 		netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
1215 		err = PTR_ERR(clock);
1216 		goto err_exit;
1217 	}
1218 	aq_ptp->ptp_clock = clock;
1219 	aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
1220 
1221 	atomic_set(&aq_ptp->offset_egress, 0);
1222 	atomic_set(&aq_ptp->offset_ingress, 0);
1223 
1224 	netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
1225 		       aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
1226 
1227 	aq_ptp->idx_vector = idx_vec;
1228 
1229 	aq_nic->aq_ptp = aq_ptp;
1230 
1231 	/* enable ptp counter */
1232 	aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE);
1233 	mutex_lock(&aq_nic->fwreq_mutex);
1234 	aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1);
1235 	aq_ptp_clock_init(aq_nic);
1236 	mutex_unlock(&aq_nic->fwreq_mutex);
1237 
1238 	INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb);
1239 	aq_ptp->eth_type_filter.location =
1240 			aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype);
1241 	aq_ptp->udp_filter.location =
1242 			aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4);
1243 
1244 	return 0;
1245 
1246 err_exit:
1247 	if (aq_ptp)
1248 		kfree(aq_ptp->ptp_info.pin_config);
1249 	kfree(aq_ptp);
1250 	aq_nic->aq_ptp = NULL;
1251 	return err;
1252 }
1253 
1254 void aq_ptp_unregister(struct aq_nic_s *aq_nic)
1255 {
1256 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1257 
1258 	if (!aq_ptp)
1259 		return;
1260 
1261 	ptp_clock_unregister(aq_ptp->ptp_clock);
1262 }
1263 
1264 void aq_ptp_free(struct aq_nic_s *aq_nic)
1265 {
1266 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1267 
1268 	if (!aq_ptp)
1269 		return;
1270 
1271 	aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype,
1272 			      aq_ptp->eth_type_filter.location);
1273 	aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4,
1274 			      aq_ptp->udp_filter.location);
1275 	cancel_delayed_work_sync(&aq_ptp->poll_sync);
1276 	/* disable ptp */
1277 	mutex_lock(&aq_nic->fwreq_mutex);
1278 	aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
1279 	mutex_unlock(&aq_nic->fwreq_mutex);
1280 
1281 	kfree(aq_ptp->ptp_info.pin_config);
1282 
1283 	netif_napi_del(&aq_ptp->napi);
1284 	kfree(aq_ptp);
1285 	aq_nic->aq_ptp = NULL;
1286 }
1287 
1288 struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
1289 {
1290 	return aq_ptp->ptp_clock;
1291 }
1292 
1293 /* PTP external GPIO nanoseconds count */
1294 static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic)
1295 {
1296 	u64 ts = 0;
1297 
1298 	if (aq_nic->aq_hw_ops->hw_get_sync_ts)
1299 		aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts);
1300 
1301 	return ts;
1302 }
1303 
1304 static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp)
1305 {
1306 	if (aq_ptp->extts_pin_enabled) {
1307 		aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
1308 		aq_ptp->last_sync1588_ts =
1309 				aq_ptp_get_sync1588_ts(aq_ptp->aq_nic);
1310 		schedule_delayed_work(&aq_ptp->poll_sync,
1311 				      msecs_to_jiffies(aq_ptp->poll_timeout_ms));
1312 	}
1313 }
1314 
1315 int aq_ptp_link_change(struct aq_nic_s *aq_nic)
1316 {
1317 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1318 
1319 	if (!aq_ptp)
1320 		return 0;
1321 
1322 	if (aq_nic->aq_hw->aq_link_status.mbps)
1323 		aq_ptp_start_work(aq_ptp);
1324 	else
1325 		cancel_delayed_work_sync(&aq_ptp->poll_sync);
1326 
1327 	return 0;
1328 }
1329 
1330 static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts)
1331 {
1332 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
1333 	u64 sync_ts2;
1334 	u64 sync_ts;
1335 
1336 	sync_ts = aq_ptp_get_sync1588_ts(aq_nic);
1337 
1338 	if (sync_ts != aq_ptp->last_sync1588_ts) {
1339 		sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
1340 		if (sync_ts != sync_ts2) {
1341 			sync_ts = sync_ts2;
1342 			sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
1343 			if (sync_ts != sync_ts2) {
1344 				netdev_err(aq_nic->ndev,
1345 					   "%s: Unable to get correct GPIO TS",
1346 					   __func__);
1347 				sync_ts = 0;
1348 			}
1349 		}
1350 
1351 		*new_ts = sync_ts;
1352 		return true;
1353 	}
1354 	return false;
1355 }
1356 
1357 static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
1358 {
1359 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
1360 	u64 sync_ts;
1361 
1362 	 /* Sync1588 pin was triggered */
1363 	if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) {
1364 		if (aq_ptp->extts_pin_enabled) {
1365 			struct ptp_clock_event ptp_event;
1366 			u64 time = 0;
1367 
1368 			aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw,
1369 							      sync_ts, &time);
1370 			ptp_event.index = aq_ptp->ptp_info.n_pins - 1;
1371 			ptp_event.timestamp = time;
1372 
1373 			ptp_event.type = PTP_CLOCK_EXTTS;
1374 			ptp_clock_event(aq_ptp->ptp_clock, &ptp_event);
1375 		}
1376 
1377 		aq_ptp->last_sync1588_ts = sync_ts;
1378 	}
1379 
1380 	return 0;
1381 }
1382 
1383 static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
1384 {
1385 	struct delayed_work *dw = to_delayed_work(w);
1386 	struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
1387 
1388 	aq_ptp_check_sync1588(aq_ptp);
1389 
1390 	if (aq_ptp->extts_pin_enabled) {
1391 		unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms);
1392 
1393 		schedule_delayed_work(&aq_ptp->poll_sync, timeout);
1394 	}
1395 }
1396 #endif
1397