xref: /linux/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File aq_ptp.c:
9  * Definition of functions for Linux PTP support.
10  */
11 
12 #include <linux/ptp_clock_kernel.h>
13 #include <linux/ptp_classify.h>
14 #include <linux/interrupt.h>
15 #include <linux/clocksource.h>
16 
17 #include "aq_nic.h"
18 #include "aq_ptp.h"
19 #include "aq_ring.h"
20 #include "aq_phy.h"
21 #include "aq_filters.h"
22 
23 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
24 
25 #define AQ_PTP_TX_TIMEOUT        (HZ *  10)
26 
27 #define POLL_SYNC_TIMER_MS 15
28 
29 enum ptp_speed_offsets {
30 	ptp_offset_idx_10 = 0,
31 	ptp_offset_idx_100,
32 	ptp_offset_idx_1000,
33 	ptp_offset_idx_2500,
34 	ptp_offset_idx_5000,
35 	ptp_offset_idx_10000,
36 };
37 
38 struct ptp_skb_ring {
39 	struct sk_buff **buff;
40 	spinlock_t lock;
41 	unsigned int size;
42 	unsigned int head;
43 	unsigned int tail;
44 };
45 
46 struct ptp_tx_timeout {
47 	spinlock_t lock;
48 	bool active;
49 	unsigned long tx_start;
50 };
51 
52 struct aq_ptp_s {
53 	struct aq_nic_s *aq_nic;
54 	struct hwtstamp_config hwtstamp_config;
55 	spinlock_t ptp_lock;
56 	spinlock_t ptp_ring_lock;
57 	struct ptp_clock *ptp_clock;
58 	struct ptp_clock_info ptp_info;
59 
60 	atomic_t offset_egress;
61 	atomic_t offset_ingress;
62 
63 	struct aq_ring_param_s ptp_ring_param;
64 
65 	struct ptp_tx_timeout ptp_tx_timeout;
66 
67 	unsigned int idx_vector;
68 	struct napi_struct napi;
69 
70 	struct aq_ring_s ptp_tx;
71 	struct aq_ring_s ptp_rx;
72 	struct aq_ring_s hwts_rx;
73 
74 	struct ptp_skb_ring skb_ring;
75 
76 	struct aq_rx_filter_l3l4 udp_filter;
77 	struct aq_rx_filter_l2 eth_type_filter;
78 
79 	struct delayed_work poll_sync;
80 	u32 poll_timeout_ms;
81 
82 	bool extts_pin_enabled;
83 	u64 last_sync1588_ts;
84 
85 	bool a1_ptp;
86 };
87 
88 struct ptp_tm_offset {
89 	unsigned int mbps;
90 	int egress;
91 	int ingress;
92 };
93 
94 static struct ptp_tm_offset ptp_offset[6];
95 
aq_ptp_tm_offset_set(struct aq_nic_s * aq_nic,unsigned int mbps)96 void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
97 {
98 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
99 	int i, egress, ingress;
100 
101 	if (!aq_ptp)
102 		return;
103 
104 	egress = 0;
105 	ingress = 0;
106 
107 	for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
108 		if (mbps == ptp_offset[i].mbps) {
109 			egress = ptp_offset[i].egress;
110 			ingress = ptp_offset[i].ingress;
111 			break;
112 		}
113 	}
114 
115 	atomic_set(&aq_ptp->offset_egress, egress);
116 	atomic_set(&aq_ptp->offset_ingress, ingress);
117 }
118 
__aq_ptp_skb_put(struct ptp_skb_ring * ring,struct sk_buff * skb)119 static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
120 {
121 	unsigned int next_head = (ring->head + 1) % ring->size;
122 
123 	if (next_head == ring->tail)
124 		return -ENOMEM;
125 
126 	ring->buff[ring->head] = skb_get(skb);
127 	ring->head = next_head;
128 
129 	return 0;
130 }
131 
aq_ptp_skb_put(struct ptp_skb_ring * ring,struct sk_buff * skb)132 static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
133 {
134 	unsigned long flags;
135 	int ret;
136 
137 	spin_lock_irqsave(&ring->lock, flags);
138 	ret = __aq_ptp_skb_put(ring, skb);
139 	spin_unlock_irqrestore(&ring->lock, flags);
140 
141 	return ret;
142 }
143 
__aq_ptp_skb_get(struct ptp_skb_ring * ring)144 static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
145 {
146 	struct sk_buff *skb;
147 
148 	if (ring->tail == ring->head)
149 		return NULL;
150 
151 	skb = ring->buff[ring->tail];
152 	ring->tail = (ring->tail + 1) % ring->size;
153 
154 	return skb;
155 }
156 
aq_ptp_skb_get(struct ptp_skb_ring * ring)157 static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
158 {
159 	unsigned long flags;
160 	struct sk_buff *skb;
161 
162 	spin_lock_irqsave(&ring->lock, flags);
163 	skb = __aq_ptp_skb_get(ring);
164 	spin_unlock_irqrestore(&ring->lock, flags);
165 
166 	return skb;
167 }
168 
aq_ptp_skb_buf_len(struct ptp_skb_ring * ring)169 static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
170 {
171 	unsigned long flags;
172 	unsigned int len;
173 
174 	spin_lock_irqsave(&ring->lock, flags);
175 	len = (ring->head >= ring->tail) ?
176 	ring->head - ring->tail :
177 	ring->size - ring->tail + ring->head;
178 	spin_unlock_irqrestore(&ring->lock, flags);
179 
180 	return len;
181 }
182 
aq_ptp_skb_ring_init(struct ptp_skb_ring * ring,unsigned int size)183 static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
184 {
185 	struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
186 
187 	if (!buff)
188 		return -ENOMEM;
189 
190 	spin_lock_init(&ring->lock);
191 
192 	ring->buff = buff;
193 	ring->size = size;
194 	ring->head = 0;
195 	ring->tail = 0;
196 
197 	return 0;
198 }
199 
aq_ptp_skb_ring_clean(struct ptp_skb_ring * ring)200 static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
201 {
202 	struct sk_buff *skb;
203 
204 	while ((skb = aq_ptp_skb_get(ring)) != NULL)
205 		dev_kfree_skb_any(skb);
206 }
207 
aq_ptp_skb_ring_release(struct ptp_skb_ring * ring)208 static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
209 {
210 	if (ring->buff) {
211 		aq_ptp_skb_ring_clean(ring);
212 		kfree(ring->buff);
213 		ring->buff = NULL;
214 	}
215 }
216 
aq_ptp_tx_timeout_init(struct ptp_tx_timeout * timeout)217 static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
218 {
219 	spin_lock_init(&timeout->lock);
220 	timeout->active = false;
221 }
222 
aq_ptp_tx_timeout_start(struct aq_ptp_s * aq_ptp)223 static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
224 {
225 	struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
226 	unsigned long flags;
227 
228 	spin_lock_irqsave(&timeout->lock, flags);
229 	timeout->active = true;
230 	timeout->tx_start = jiffies;
231 	spin_unlock_irqrestore(&timeout->lock, flags);
232 }
233 
aq_ptp_tx_timeout_update(struct aq_ptp_s * aq_ptp)234 static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
235 {
236 	if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
237 		struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
238 		unsigned long flags;
239 
240 		spin_lock_irqsave(&timeout->lock, flags);
241 		timeout->active = false;
242 		spin_unlock_irqrestore(&timeout->lock, flags);
243 	}
244 }
245 
aq_ptp_tx_timeout_check(struct aq_ptp_s * aq_ptp)246 static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
247 {
248 	struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
249 	unsigned long flags;
250 	bool timeout_flag;
251 
252 	timeout_flag = false;
253 
254 	spin_lock_irqsave(&timeout->lock, flags);
255 	if (timeout->active) {
256 		timeout_flag = time_is_before_jiffies(timeout->tx_start +
257 						      AQ_PTP_TX_TIMEOUT);
258 		/* reset active flag if timeout detected */
259 		if (timeout_flag)
260 			timeout->active = false;
261 	}
262 	spin_unlock_irqrestore(&timeout->lock, flags);
263 
264 	if (timeout_flag) {
265 		aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
266 		netdev_err(aq_ptp->aq_nic->ndev,
267 			   "PTP Timeout. Clearing Tx Timestamp SKBs\n");
268 	}
269 }
270 
271 /* aq_ptp_adjfine
272  * @ptp: the ptp clock structure
273  * @ppb: parts per billion adjustment from base
274  *
275  * adjust the frequency of the ptp cycle counter by the
276  * indicated ppb from the base frequency.
277  */
aq_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)278 static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
279 {
280 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
281 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
282 
283 	mutex_lock(&aq_nic->fwreq_mutex);
284 	aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw,
285 					     scaled_ppm_to_ppb(scaled_ppm));
286 	mutex_unlock(&aq_nic->fwreq_mutex);
287 
288 	return 0;
289 }
290 
291 /* aq_ptp_adjtime
292  * @ptp: the ptp clock structure
293  * @delta: offset to adjust the cycle counter by
294  *
295  * adjust the timer by resetting the timecounter structure.
296  */
aq_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)297 static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
298 {
299 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
300 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
301 	unsigned long flags;
302 
303 	spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
304 	aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta);
305 	spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
306 
307 	return 0;
308 }
309 
310 /* aq_ptp_gettime
311  * @ptp: the ptp clock structure
312  * @ts: timespec structure to hold the current time value
313  *
314  * read the timecounter and return the correct value on ns,
315  * after converting it into a struct timespec.
316  */
aq_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)317 static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
318 {
319 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
320 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
321 	unsigned long flags;
322 	u64 ns;
323 
324 	spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
325 	aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns);
326 	spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
327 
328 	*ts = ns_to_timespec64(ns);
329 
330 	return 0;
331 }
332 
333 /* aq_ptp_settime
334  * @ptp: the ptp clock structure
335  * @ts: the timespec containing the new time for the cycle counter
336  *
337  * reset the timecounter to use a new base value instead of the kernel
338  * wall timer value.
339  */
aq_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)340 static int aq_ptp_settime(struct ptp_clock_info *ptp,
341 			  const struct timespec64 *ts)
342 {
343 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
344 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
345 	unsigned long flags;
346 	u64 ns = timespec64_to_ns(ts);
347 	u64 now;
348 
349 	spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
350 	aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now);
351 	aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now);
352 
353 	spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
354 
355 	return 0;
356 }
357 
aq_ptp_convert_to_hwtstamp(struct aq_ptp_s * aq_ptp,struct skb_shared_hwtstamps * hwtstamp,u64 timestamp)358 static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
359 				       struct skb_shared_hwtstamps *hwtstamp,
360 				       u64 timestamp)
361 {
362 	memset(hwtstamp, 0, sizeof(*hwtstamp));
363 	hwtstamp->hwtstamp = ns_to_ktime(timestamp);
364 }
365 
aq_ptp_hw_pin_conf(struct aq_nic_s * aq_nic,u32 pin_index,u64 start,u64 period)366 static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start,
367 			      u64 period)
368 {
369 	if (period)
370 		netdev_dbg(aq_nic->ndev,
371 			   "Enable GPIO %d pulsing, start time %llu, period %u\n",
372 			   pin_index, start, (u32)period);
373 	else
374 		netdev_dbg(aq_nic->ndev,
375 			   "Disable GPIO %d pulsing, start time %llu, period %u\n",
376 			   pin_index, start, (u32)period);
377 
378 	/* Notify hardware of request to being sending pulses.
379 	 * If period is ZERO then pulsen is disabled.
380 	 */
381 	mutex_lock(&aq_nic->fwreq_mutex);
382 	aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index,
383 					 start, (u32)period);
384 	mutex_unlock(&aq_nic->fwreq_mutex);
385 
386 	return 0;
387 }
388 
aq_ptp_perout_pin_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)389 static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp,
390 				       struct ptp_clock_request *rq, int on)
391 {
392 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
393 	struct ptp_clock_time *t = &rq->perout.period;
394 	struct ptp_clock_time *s = &rq->perout.start;
395 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
396 	u64 start, period;
397 	u32 pin_index = rq->perout.index;
398 
399 	/* verify the request channel is there */
400 	if (pin_index >= ptp->n_per_out)
401 		return -EINVAL;
402 
403 	/* we cannot support periods greater
404 	 * than 4 seconds due to reg limit
405 	 */
406 	if (t->sec > 4 || t->sec < 0)
407 		return -ERANGE;
408 
409 	/* convert to unsigned 64b ns,
410 	 * verify we can put it in a 32b register
411 	 */
412 	period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0;
413 
414 	/* verify the value is in range supported by hardware */
415 	if (period > U32_MAX)
416 		return -ERANGE;
417 	/* convert to unsigned 64b ns */
418 	/* TODO convert to AQ time */
419 	start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0;
420 
421 	aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
422 
423 	return 0;
424 }
425 
aq_ptp_pps_pin_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)426 static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp,
427 				    struct ptp_clock_request *rq, int on)
428 {
429 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
430 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
431 	u64 start, period;
432 	u32 pin_index = 0;
433 	u32 rest = 0;
434 
435 	/* verify the request channel is there */
436 	if (pin_index >= ptp->n_per_out)
437 		return -EINVAL;
438 
439 	aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start);
440 	div_u64_rem(start, NSEC_PER_SEC, &rest);
441 	period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */
442 	start = on ? start - rest + NSEC_PER_SEC *
443 		(rest > 990000000LL ? 2 : 1) : 0;
444 
445 	aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
446 
447 	return 0;
448 }
449 
aq_ptp_extts_pin_ctrl(struct aq_ptp_s * aq_ptp)450 static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp)
451 {
452 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
453 	u32 enable = aq_ptp->extts_pin_enabled;
454 
455 	if (aq_nic->aq_hw_ops->hw_extts_gpio_enable)
456 		aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0,
457 							enable);
458 }
459 
aq_ptp_extts_pin_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)460 static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp,
461 				      struct ptp_clock_request *rq, int on)
462 {
463 	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
464 
465 	u32 pin_index = rq->extts.index;
466 
467 	if (pin_index >= ptp->n_ext_ts)
468 		return -EINVAL;
469 
470 	aq_ptp->extts_pin_enabled = !!on;
471 	if (on) {
472 		aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
473 		cancel_delayed_work_sync(&aq_ptp->poll_sync);
474 		schedule_delayed_work(&aq_ptp->poll_sync,
475 				      msecs_to_jiffies(aq_ptp->poll_timeout_ms));
476 	}
477 
478 	aq_ptp_extts_pin_ctrl(aq_ptp);
479 	return 0;
480 }
481 
482 /* aq_ptp_gpio_feature_enable
483  * @ptp: the ptp clock structure
484  * @rq: the requested feature to change
485  * @on: whether to enable or disable the feature
486  */
aq_ptp_gpio_feature_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)487 static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp,
488 				      struct ptp_clock_request *rq, int on)
489 {
490 	switch (rq->type) {
491 	case PTP_CLK_REQ_EXTTS:
492 		return aq_ptp_extts_pin_configure(ptp, rq, on);
493 	case PTP_CLK_REQ_PEROUT:
494 		return aq_ptp_perout_pin_configure(ptp, rq, on);
495 	case PTP_CLK_REQ_PPS:
496 		return aq_ptp_pps_pin_configure(ptp, rq, on);
497 	default:
498 		return -EOPNOTSUPP;
499 	}
500 
501 	return 0;
502 }
503 
504 /* aq_ptp_verify
505  * @ptp: the ptp clock structure
506  * @pin: index of the pin in question
507  * @func: the desired function to use
508  * @chan: the function channel index to use
509  */
aq_ptp_verify(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)510 static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
511 			 enum ptp_pin_function func, unsigned int chan)
512 {
513 	/* verify the requested pin is there */
514 	if (!ptp->pin_config || pin >= ptp->n_pins)
515 		return -EINVAL;
516 
517 	/* enforce locked channels, no changing them */
518 	if (chan != ptp->pin_config[pin].chan)
519 		return -EINVAL;
520 
521 	/* we want to keep the functions locked as well */
522 	if (func != ptp->pin_config[pin].func)
523 		return -EINVAL;
524 
525 	return 0;
526 }
527 
528 /* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
529  * @adapter: the private adapter struct
530  *
531  * if the timestamp is valid, we convert it into the timecounter ns
532  * value, then store that result into the hwtstamps structure which
533  * is passed up the network stack
534  */
aq_ptp_tx_hwtstamp(struct aq_nic_s * aq_nic,u64 timestamp)535 void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
536 {
537 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
538 	struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
539 	struct skb_shared_hwtstamps hwtstamp;
540 
541 	if (!skb) {
542 		netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n");
543 		return;
544 	}
545 
546 	timestamp += atomic_read(&aq_ptp->offset_egress);
547 	aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
548 	skb_tstamp_tx(skb, &hwtstamp);
549 	dev_kfree_skb_any(skb);
550 
551 	aq_ptp_tx_timeout_update(aq_ptp);
552 }
553 
554 /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
555  * @adapter: pointer to adapter struct
556  * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp
557  *
558  * if the timestamp is valid, we convert it into the timecounter ns
559  * value, then store that result into the hwtstamps structure which
560  * is passed up the network stack
561  */
aq_ptp_rx_hwtstamp(struct aq_ptp_s * aq_ptp,struct skb_shared_hwtstamps * shhwtstamps,u64 timestamp)562 static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps,
563 			       u64 timestamp)
564 {
565 	timestamp -= atomic_read(&aq_ptp->offset_ingress);
566 	aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp);
567 }
568 
aq_ptp_hwtstamp_config_get(struct aq_ptp_s * aq_ptp,struct hwtstamp_config * config)569 void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
570 				struct hwtstamp_config *config)
571 {
572 	*config = aq_ptp->hwtstamp_config;
573 }
574 
aq_ptp_prepare_filters(struct aq_ptp_s * aq_ptp)575 static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
576 {
577 	aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 |
578 			       HW_ATL_RX_ENABLE_CMP_PROT_L4 |
579 			       HW_ATL_RX_UDP |
580 			       HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
581 			       HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT |
582 			       HW_ATL_RX_ENABLE_QUEUE_L3L4 |
583 			       aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
584 	aq_ptp->udp_filter.p_dst = PTP_EV_PORT;
585 
586 	aq_ptp->eth_type_filter.ethertype = ETH_P_1588;
587 	aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx;
588 }
589 
aq_ptp_hwtstamp_config_set(struct aq_ptp_s * aq_ptp,struct hwtstamp_config * config)590 int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
591 			       struct hwtstamp_config *config)
592 {
593 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
594 	const struct aq_hw_ops *hw_ops;
595 	int err = 0;
596 
597 	hw_ops = aq_nic->aq_hw_ops;
598 	if (config->tx_type == HWTSTAMP_TX_ON ||
599 	    config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) {
600 		aq_ptp_prepare_filters(aq_ptp);
601 		if (hw_ops->hw_filter_l3l4_set) {
602 			err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
603 							 &aq_ptp->udp_filter);
604 		}
605 		if (!err && hw_ops->hw_filter_l2_set) {
606 			err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw,
607 						       &aq_ptp->eth_type_filter);
608 		}
609 		aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
610 	} else {
611 		aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4;
612 		if (hw_ops->hw_filter_l3l4_set) {
613 			err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
614 							 &aq_ptp->udp_filter);
615 		}
616 		if (!err && hw_ops->hw_filter_l2_clear) {
617 			err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw,
618 							&aq_ptp->eth_type_filter);
619 		}
620 		aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
621 	}
622 
623 	if (err)
624 		return -EREMOTEIO;
625 
626 	aq_ptp->hwtstamp_config = *config;
627 
628 	return 0;
629 }
630 
aq_ptp_ring(struct aq_nic_s * aq_nic,struct aq_ring_s * ring)631 bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
632 {
633 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
634 
635 	if (!aq_ptp)
636 		return false;
637 
638 	return &aq_ptp->ptp_tx == ring ||
639 	       &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
640 }
641 
aq_ptp_extract_ts(struct aq_nic_s * aq_nic,struct skb_shared_hwtstamps * shhwtstamps,u8 * p,unsigned int len)642 u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
643 		      unsigned int len)
644 {
645 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
646 	u64 timestamp = 0;
647 	u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
648 						   p, len, &timestamp);
649 
650 	if (ret > 0)
651 		aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp);
652 
653 	return ret;
654 }
655 
aq_ptp_poll(struct napi_struct * napi,int budget)656 static int aq_ptp_poll(struct napi_struct *napi, int budget)
657 {
658 	struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
659 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
660 	bool was_cleaned = false;
661 	int work_done = 0;
662 	int err;
663 
664 	/* Processing PTP TX traffic */
665 	err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
666 							&aq_ptp->ptp_tx);
667 	if (err < 0)
668 		goto err_exit;
669 
670 	if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
671 		aq_ring_tx_clean(&aq_ptp->ptp_tx);
672 
673 		was_cleaned = true;
674 	}
675 
676 	/* Processing HW_TIMESTAMP RX traffic */
677 	err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
678 							 &aq_ptp->hwts_rx);
679 	if (err < 0)
680 		goto err_exit;
681 
682 	if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
683 		aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
684 
685 		err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
686 							      &aq_ptp->hwts_rx);
687 		if (err < 0)
688 			goto err_exit;
689 
690 		was_cleaned = true;
691 	}
692 
693 	/* Processing PTP RX traffic */
694 	err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
695 						    &aq_ptp->ptp_rx);
696 	if (err < 0)
697 		goto err_exit;
698 
699 	if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
700 		unsigned int sw_tail_old;
701 
702 		err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
703 		if (err < 0)
704 			goto err_exit;
705 
706 		sw_tail_old = aq_ptp->ptp_rx.sw_tail;
707 		err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
708 		if (err < 0)
709 			goto err_exit;
710 
711 		err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
712 							 &aq_ptp->ptp_rx,
713 							 sw_tail_old);
714 		if (err < 0)
715 			goto err_exit;
716 	}
717 
718 	if (was_cleaned)
719 		work_done = budget;
720 
721 	if (work_done < budget) {
722 		napi_complete_done(napi, work_done);
723 		aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
724 					BIT_ULL(aq_ptp->ptp_ring_param.vec_idx));
725 	}
726 
727 err_exit:
728 	return work_done;
729 }
730 
aq_ptp_isr(int irq,void * private)731 static irqreturn_t aq_ptp_isr(int irq, void *private)
732 {
733 	struct aq_ptp_s *aq_ptp = private;
734 	int err = 0;
735 
736 	if (!aq_ptp) {
737 		err = -EINVAL;
738 		goto err_exit;
739 	}
740 	napi_schedule(&aq_ptp->napi);
741 
742 err_exit:
743 	return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
744 }
745 
aq_ptp_xmit(struct aq_nic_s * aq_nic,struct sk_buff * skb)746 int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
747 {
748 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
749 	struct aq_ring_s *ring = &aq_ptp->ptp_tx;
750 	unsigned long irq_flags;
751 	int err = NETDEV_TX_OK;
752 	unsigned int frags;
753 
754 	if (skb->len <= 0) {
755 		dev_kfree_skb_any(skb);
756 		goto err_exit;
757 	}
758 
759 	frags = skb_shinfo(skb)->nr_frags + 1;
760 	/* Frags cannot be bigger 16KB
761 	 * because PTP usually works
762 	 * without Jumbo even in a background
763 	 */
764 	if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
765 		/* Drop packet because it doesn't make sence to delay it */
766 		dev_kfree_skb_any(skb);
767 		goto err_exit;
768 	}
769 
770 	err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
771 	if (err) {
772 		netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
773 			   ring->size);
774 		return NETDEV_TX_BUSY;
775 	}
776 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
777 	aq_ptp_tx_timeout_start(aq_ptp);
778 	skb_tx_timestamp(skb);
779 
780 	spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
781 	frags = aq_nic_map_skb(aq_nic, skb, ring);
782 
783 	if (likely(frags)) {
784 		err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
785 						       ring, frags);
786 		if (err >= 0) {
787 			u64_stats_update_begin(&ring->stats.tx.syncp);
788 			++ring->stats.tx.packets;
789 			ring->stats.tx.bytes += skb->len;
790 			u64_stats_update_end(&ring->stats.tx.syncp);
791 		}
792 	} else {
793 		err = NETDEV_TX_BUSY;
794 	}
795 	spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
796 
797 err_exit:
798 	return err;
799 }
800 
aq_ptp_service_task(struct aq_nic_s * aq_nic)801 void aq_ptp_service_task(struct aq_nic_s *aq_nic)
802 {
803 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
804 
805 	if (!aq_ptp)
806 		return;
807 
808 	aq_ptp_tx_timeout_check(aq_ptp);
809 }
810 
aq_ptp_irq_alloc(struct aq_nic_s * aq_nic)811 int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
812 {
813 	struct pci_dev *pdev = aq_nic->pdev;
814 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
815 	int err = 0;
816 
817 	if (!aq_ptp)
818 		return 0;
819 
820 	if (pdev->msix_enabled || pdev->msi_enabled) {
821 		err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
822 				  aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
823 	} else {
824 		err = -EINVAL;
825 		goto err_exit;
826 	}
827 
828 err_exit:
829 	return err;
830 }
831 
aq_ptp_irq_free(struct aq_nic_s * aq_nic)832 void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
833 {
834 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
835 	struct pci_dev *pdev = aq_nic->pdev;
836 
837 	if (!aq_ptp)
838 		return;
839 
840 	free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
841 }
842 
aq_ptp_ring_init(struct aq_nic_s * aq_nic)843 int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
844 {
845 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
846 	int err = 0;
847 
848 	if (!aq_ptp)
849 		return 0;
850 
851 	err = aq_ring_init(&aq_ptp->ptp_tx, ATL_RING_TX);
852 	if (err < 0)
853 		goto err_exit;
854 	err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
855 						 &aq_ptp->ptp_tx,
856 						 &aq_ptp->ptp_ring_param);
857 	if (err < 0)
858 		goto err_exit;
859 
860 	err = aq_ring_init(&aq_ptp->ptp_rx, ATL_RING_RX);
861 	if (err < 0)
862 		goto err_exit;
863 	err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
864 						 &aq_ptp->ptp_rx,
865 						 &aq_ptp->ptp_ring_param);
866 	if (err < 0)
867 		goto err_exit;
868 
869 	err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
870 	if (err < 0)
871 		goto err_rx_free;
872 	err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
873 						 &aq_ptp->ptp_rx,
874 						 0U);
875 	if (err < 0)
876 		goto err_rx_free;
877 
878 	err = aq_ring_init(&aq_ptp->hwts_rx, ATL_RING_RX);
879 	if (err < 0)
880 		goto err_rx_free;
881 	err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
882 						 &aq_ptp->hwts_rx,
883 						 &aq_ptp->ptp_ring_param);
884 	if (err < 0)
885 		goto err_exit;
886 	err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
887 						      &aq_ptp->hwts_rx);
888 	if (err < 0)
889 		goto err_exit;
890 
891 	return err;
892 
893 err_rx_free:
894 	aq_ring_rx_deinit(&aq_ptp->ptp_rx);
895 err_exit:
896 	return err;
897 }
898 
aq_ptp_ring_start(struct aq_nic_s * aq_nic)899 int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
900 {
901 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
902 	int err = 0;
903 
904 	if (!aq_ptp)
905 		return 0;
906 
907 	err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx);
908 	if (err < 0)
909 		goto err_exit;
910 
911 	err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx);
912 	if (err < 0)
913 		goto err_exit;
914 
915 	err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw,
916 						  &aq_ptp->hwts_rx);
917 	if (err < 0)
918 		goto err_exit;
919 
920 	napi_enable(&aq_ptp->napi);
921 
922 err_exit:
923 	return err;
924 }
925 
aq_ptp_ring_stop(struct aq_nic_s * aq_nic)926 void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
927 {
928 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
929 
930 	if (!aq_ptp)
931 		return;
932 
933 	aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx);
934 	aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
935 
936 	aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
937 
938 	napi_disable(&aq_ptp->napi);
939 }
940 
aq_ptp_ring_deinit(struct aq_nic_s * aq_nic)941 void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
942 {
943 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
944 
945 	if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic)
946 		return;
947 
948 	aq_ring_tx_clean(&aq_ptp->ptp_tx);
949 	aq_ring_rx_deinit(&aq_ptp->ptp_rx);
950 }
951 
aq_ptp_ring_alloc(struct aq_nic_s * aq_nic)952 int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
953 {
954 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
955 	unsigned int tx_ring_idx, rx_ring_idx;
956 	int err;
957 
958 	if (!aq_ptp)
959 		return 0;
960 
961 	tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
962 
963 	err = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
964 			       tx_ring_idx, &aq_nic->aq_nic_cfg);
965 	if (err)
966 		goto err_exit;
967 
968 	rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
969 
970 	err = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
971 			       rx_ring_idx, &aq_nic->aq_nic_cfg);
972 	if (err)
973 		goto err_exit_ptp_tx;
974 
975 	err = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
976 				    aq_nic->aq_nic_cfg.rxds,
977 				    aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
978 	if (err)
979 		goto err_exit_ptp_rx;
980 
981 	err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
982 	if (err != 0) {
983 		err = -ENOMEM;
984 		goto err_exit_hwts_rx;
985 	}
986 
987 	aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
988 	aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
989 			aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
990 	cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
991 			&aq_ptp->ptp_ring_param.affinity_mask);
992 
993 	return 0;
994 
995 err_exit_hwts_rx:
996 	aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
997 err_exit_ptp_rx:
998 	aq_ring_free(&aq_ptp->ptp_rx);
999 err_exit_ptp_tx:
1000 	aq_ring_free(&aq_ptp->ptp_tx);
1001 err_exit:
1002 	return err;
1003 }
1004 
aq_ptp_ring_free(struct aq_nic_s * aq_nic)1005 void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
1006 {
1007 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1008 
1009 	if (!aq_ptp)
1010 		return;
1011 
1012 	aq_ring_free(&aq_ptp->ptp_tx);
1013 	aq_ring_free(&aq_ptp->ptp_rx);
1014 	aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
1015 
1016 	aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
1017 }
1018 
1019 #define MAX_PTP_GPIO_COUNT 4
1020 
1021 static struct ptp_clock_info aq_ptp_clock = {
1022 	.owner		= THIS_MODULE,
1023 	.name		= "atlantic ptp",
1024 	.max_adj	= 999999999,
1025 	.n_ext_ts	= 0,
1026 	.pps		= 0,
1027 	.adjfine	= aq_ptp_adjfine,
1028 	.adjtime	= aq_ptp_adjtime,
1029 	.gettime64	= aq_ptp_gettime,
1030 	.settime64	= aq_ptp_settime,
1031 	.n_per_out	= 0,
1032 	.enable		= aq_ptp_gpio_feature_enable,
1033 	.n_pins		= 0,
1034 	.verify		= aq_ptp_verify,
1035 	.pin_config	= NULL,
1036 };
1037 
1038 #define ptp_offset_init(__idx, __mbps, __egress, __ingress)   do { \
1039 		ptp_offset[__idx].mbps = (__mbps); \
1040 		ptp_offset[__idx].egress = (__egress); \
1041 		ptp_offset[__idx].ingress = (__ingress); } \
1042 		while (0)
1043 
aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset * offsets)1044 static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets)
1045 {
1046 	int i;
1047 
1048 	/* Load offsets for PTP */
1049 	for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
1050 		switch (i) {
1051 		/* 100M */
1052 		case ptp_offset_idx_100:
1053 			ptp_offset_init(i, 100,
1054 					offsets->egress_100,
1055 					offsets->ingress_100);
1056 			break;
1057 		/* 1G */
1058 		case ptp_offset_idx_1000:
1059 			ptp_offset_init(i, 1000,
1060 					offsets->egress_1000,
1061 					offsets->ingress_1000);
1062 			break;
1063 		/* 2.5G */
1064 		case ptp_offset_idx_2500:
1065 			ptp_offset_init(i, 2500,
1066 					offsets->egress_2500,
1067 					offsets->ingress_2500);
1068 			break;
1069 		/* 5G */
1070 		case ptp_offset_idx_5000:
1071 			ptp_offset_init(i, 5000,
1072 					offsets->egress_5000,
1073 					offsets->ingress_5000);
1074 			break;
1075 		/* 10G */
1076 		case ptp_offset_idx_10000:
1077 			ptp_offset_init(i, 10000,
1078 					offsets->egress_10000,
1079 					offsets->ingress_10000);
1080 			break;
1081 		}
1082 	}
1083 }
1084 
aq_ptp_offset_init(const struct hw_atl_ptp_offset * offsets)1085 static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets)
1086 {
1087 	memset(ptp_offset, 0, sizeof(ptp_offset));
1088 
1089 	aq_ptp_offset_init_from_fw(offsets);
1090 }
1091 
aq_ptp_gpio_init(struct ptp_clock_info * info,struct hw_atl_info * hw_info)1092 static void aq_ptp_gpio_init(struct ptp_clock_info *info,
1093 			     struct hw_atl_info *hw_info)
1094 {
1095 	struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
1096 	u32 extts_pin_cnt = 0;
1097 	u32 out_pin_cnt = 0;
1098 	u32 i;
1099 
1100 	memset(pin_desc, 0, sizeof(pin_desc));
1101 
1102 	for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) {
1103 		if (hw_info->gpio_pin[i] ==
1104 		    (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) {
1105 			snprintf(pin_desc[out_pin_cnt].name,
1106 				 sizeof(pin_desc[out_pin_cnt].name),
1107 				 "AQ_GPIO%d", i);
1108 			pin_desc[out_pin_cnt].index = out_pin_cnt;
1109 			pin_desc[out_pin_cnt].chan = out_pin_cnt;
1110 			pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT;
1111 		}
1112 	}
1113 
1114 	info->n_per_out = out_pin_cnt;
1115 
1116 	if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) {
1117 		extts_pin_cnt += 1;
1118 
1119 		snprintf(pin_desc[out_pin_cnt].name,
1120 			 sizeof(pin_desc[out_pin_cnt].name),
1121 			  "AQ_GPIO%d", out_pin_cnt);
1122 		pin_desc[out_pin_cnt].index = out_pin_cnt;
1123 		pin_desc[out_pin_cnt].chan = 0;
1124 		pin_desc[out_pin_cnt].func = PTP_PF_EXTTS;
1125 	}
1126 
1127 	info->n_pins = out_pin_cnt + extts_pin_cnt;
1128 	info->n_ext_ts = extts_pin_cnt;
1129 
1130 	if (!info->n_pins)
1131 		return;
1132 
1133 	info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc),
1134 				   GFP_KERNEL);
1135 
1136 	if (!info->pin_config)
1137 		return;
1138 
1139 	memcpy(info->pin_config, &pin_desc,
1140 	       sizeof(struct ptp_pin_desc) * info->n_pins);
1141 }
1142 
aq_ptp_clock_init(struct aq_nic_s * aq_nic)1143 void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
1144 {
1145 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1146 	struct timespec64 ts;
1147 
1148 	ktime_get_real_ts64(&ts);
1149 	aq_ptp_settime(&aq_ptp->ptp_info, &ts);
1150 }
1151 
1152 static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
1153 
aq_ptp_init(struct aq_nic_s * aq_nic,unsigned int idx_vec)1154 int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
1155 {
1156 	bool a1_ptp = ATL_HW_IS_CHIP_FEATURE(aq_nic->aq_hw, ATLANTIC);
1157 	struct hw_atl_utils_mbox mbox;
1158 	struct ptp_clock *clock;
1159 	struct aq_ptp_s *aq_ptp;
1160 	int err = 0;
1161 
1162 	if (!a1_ptp) {
1163 		aq_nic->aq_ptp = NULL;
1164 		return 0;
1165 	}
1166 
1167 	if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
1168 		aq_nic->aq_ptp = NULL;
1169 		return 0;
1170 	}
1171 
1172 	if (!aq_nic->aq_fw_ops->enable_ptp) {
1173 		aq_nic->aq_ptp = NULL;
1174 		return 0;
1175 	}
1176 
1177 	hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox);
1178 
1179 	if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) {
1180 		aq_nic->aq_ptp = NULL;
1181 		return 0;
1182 	}
1183 
1184 	aq_ptp_offset_init(&mbox.info.ptp_offset);
1185 
1186 	aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
1187 	if (!aq_ptp) {
1188 		err = -ENOMEM;
1189 		goto err_exit;
1190 	}
1191 
1192 	aq_ptp->aq_nic = aq_nic;
1193 	aq_ptp->a1_ptp = a1_ptp;
1194 
1195 	spin_lock_init(&aq_ptp->ptp_lock);
1196 	spin_lock_init(&aq_ptp->ptp_ring_lock);
1197 
1198 	aq_ptp->ptp_info = aq_ptp_clock;
1199 	aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
1200 	clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
1201 	if (IS_ERR(clock)) {
1202 		netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
1203 		err = PTR_ERR(clock);
1204 		goto err_exit;
1205 	}
1206 	aq_ptp->ptp_clock = clock;
1207 	aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
1208 
1209 	atomic_set(&aq_ptp->offset_egress, 0);
1210 	atomic_set(&aq_ptp->offset_ingress, 0);
1211 
1212 	netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, aq_ptp_poll);
1213 
1214 	aq_ptp->idx_vector = idx_vec;
1215 
1216 	aq_nic->aq_ptp = aq_ptp;
1217 
1218 	/* enable ptp counter */
1219 	aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE);
1220 	mutex_lock(&aq_nic->fwreq_mutex);
1221 	aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1);
1222 	aq_ptp_clock_init(aq_nic);
1223 	mutex_unlock(&aq_nic->fwreq_mutex);
1224 
1225 	INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb);
1226 	aq_ptp->eth_type_filter.location =
1227 			aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype);
1228 	aq_ptp->udp_filter.location =
1229 			aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4);
1230 
1231 	return 0;
1232 
1233 err_exit:
1234 	if (aq_ptp)
1235 		kfree(aq_ptp->ptp_info.pin_config);
1236 	kfree(aq_ptp);
1237 	aq_nic->aq_ptp = NULL;
1238 	return err;
1239 }
1240 
aq_ptp_unregister(struct aq_nic_s * aq_nic)1241 void aq_ptp_unregister(struct aq_nic_s *aq_nic)
1242 {
1243 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1244 
1245 	if (!aq_ptp)
1246 		return;
1247 
1248 	ptp_clock_unregister(aq_ptp->ptp_clock);
1249 }
1250 
aq_ptp_free(struct aq_nic_s * aq_nic)1251 void aq_ptp_free(struct aq_nic_s *aq_nic)
1252 {
1253 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1254 
1255 	if (!aq_ptp)
1256 		return;
1257 
1258 	aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype,
1259 			      aq_ptp->eth_type_filter.location);
1260 	aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4,
1261 			      aq_ptp->udp_filter.location);
1262 	cancel_delayed_work_sync(&aq_ptp->poll_sync);
1263 	/* disable ptp */
1264 	mutex_lock(&aq_nic->fwreq_mutex);
1265 	aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
1266 	mutex_unlock(&aq_nic->fwreq_mutex);
1267 
1268 	kfree(aq_ptp->ptp_info.pin_config);
1269 
1270 	netif_napi_del(&aq_ptp->napi);
1271 	kfree(aq_ptp);
1272 	aq_nic->aq_ptp = NULL;
1273 }
1274 
aq_ptp_get_ptp_clock(struct aq_ptp_s * aq_ptp)1275 struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
1276 {
1277 	return aq_ptp->ptp_clock;
1278 }
1279 
1280 /* PTP external GPIO nanoseconds count */
aq_ptp_get_sync1588_ts(struct aq_nic_s * aq_nic)1281 static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic)
1282 {
1283 	u64 ts = 0;
1284 
1285 	if (aq_nic->aq_hw_ops->hw_get_sync_ts)
1286 		aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts);
1287 
1288 	return ts;
1289 }
1290 
aq_ptp_start_work(struct aq_ptp_s * aq_ptp)1291 static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp)
1292 {
1293 	if (aq_ptp->extts_pin_enabled) {
1294 		aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
1295 		aq_ptp->last_sync1588_ts =
1296 				aq_ptp_get_sync1588_ts(aq_ptp->aq_nic);
1297 		schedule_delayed_work(&aq_ptp->poll_sync,
1298 				      msecs_to_jiffies(aq_ptp->poll_timeout_ms));
1299 	}
1300 }
1301 
aq_ptp_link_change(struct aq_nic_s * aq_nic)1302 int aq_ptp_link_change(struct aq_nic_s *aq_nic)
1303 {
1304 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1305 
1306 	if (!aq_ptp)
1307 		return 0;
1308 
1309 	if (aq_nic->aq_hw->aq_link_status.mbps)
1310 		aq_ptp_start_work(aq_ptp);
1311 	else
1312 		cancel_delayed_work_sync(&aq_ptp->poll_sync);
1313 
1314 	return 0;
1315 }
1316 
aq_ptp_sync_ts_updated(struct aq_ptp_s * aq_ptp,u64 * new_ts)1317 static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts)
1318 {
1319 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
1320 	u64 sync_ts2;
1321 	u64 sync_ts;
1322 
1323 	sync_ts = aq_ptp_get_sync1588_ts(aq_nic);
1324 
1325 	if (sync_ts != aq_ptp->last_sync1588_ts) {
1326 		sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
1327 		if (sync_ts != sync_ts2) {
1328 			sync_ts = sync_ts2;
1329 			sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
1330 			if (sync_ts != sync_ts2) {
1331 				netdev_err(aq_nic->ndev,
1332 					   "%s: Unable to get correct GPIO TS",
1333 					   __func__);
1334 				sync_ts = 0;
1335 			}
1336 		}
1337 
1338 		*new_ts = sync_ts;
1339 		return true;
1340 	}
1341 	return false;
1342 }
1343 
aq_ptp_check_sync1588(struct aq_ptp_s * aq_ptp)1344 static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
1345 {
1346 	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
1347 	u64 sync_ts;
1348 
1349 	 /* Sync1588 pin was triggered */
1350 	if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) {
1351 		if (aq_ptp->extts_pin_enabled) {
1352 			struct ptp_clock_event ptp_event;
1353 			u64 time = 0;
1354 
1355 			aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw,
1356 							      sync_ts, &time);
1357 			ptp_event.index = aq_ptp->ptp_info.n_pins - 1;
1358 			ptp_event.timestamp = time;
1359 
1360 			ptp_event.type = PTP_CLOCK_EXTTS;
1361 			ptp_clock_event(aq_ptp->ptp_clock, &ptp_event);
1362 		}
1363 
1364 		aq_ptp->last_sync1588_ts = sync_ts;
1365 	}
1366 
1367 	return 0;
1368 }
1369 
aq_ptp_poll_sync_work_cb(struct work_struct * w)1370 static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
1371 {
1372 	struct delayed_work *dw = to_delayed_work(w);
1373 	struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
1374 
1375 	aq_ptp_check_sync1588(aq_ptp);
1376 
1377 	if (aq_ptp->extts_pin_enabled) {
1378 		unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms);
1379 
1380 		schedule_delayed_work(&aq_ptp->poll_sync, timeout);
1381 	}
1382 }
1383 
aq_ptp_get_ring_cnt(struct aq_nic_s * aq_nic,const enum atl_ring_type ring_type)1384 int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type)
1385 {
1386 	if (!aq_nic->aq_ptp)
1387 		return 0;
1388 
1389 	/* Additional RX ring is allocated for PTP HWTS on A1 */
1390 	return (aq_nic->aq_ptp->a1_ptp && ring_type == ATL_RING_RX) ? 2 : 1;
1391 }
1392 
aq_ptp_get_stats(struct aq_nic_s * aq_nic,u64 * data)1393 u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data)
1394 {
1395 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1396 	unsigned int count = 0U;
1397 
1398 	if (!aq_ptp)
1399 		return data;
1400 
1401 	count = aq_ring_fill_stats_data(&aq_ptp->ptp_rx, data);
1402 	data += count;
1403 	count = aq_ring_fill_stats_data(&aq_ptp->ptp_tx, data);
1404 	data += count;
1405 
1406 	if (aq_ptp->a1_ptp) {
1407 		/* Only Receive ring for HWTS */
1408 		count = aq_ring_fill_stats_data(&aq_ptp->hwts_rx, data);
1409 		data += count;
1410 	}
1411 
1412 	return data;
1413 }
1414 
1415 #endif
1416