xref: /linux/drivers/net/ethernet/cadence/macb_ptp.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * 1588 PTP support for Cadence GEM device.
4  *
5  * Copyright (C) 2017 Cadence Design Systems - https://www.cadence.com
6  *
7  * Authors: Rafal Ozieblo <rafalo@cadence.com>
8  *          Bartosz Folta <bfolta@cadence.com>
9  */
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/clk.h>
13 #include <linux/device.h>
14 #include <linux/etherdevice.h>
15 #include <linux/platform_device.h>
16 #include <linux/time64.h>
17 #include <linux/ptp_classify.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_vlan.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/circ_buf.h>
22 #include <linux/spinlock.h>
23 
24 #include "macb.h"
25 
26 #define  GEM_PTP_TIMER_NAME "gem-ptp-timer"
27 
28 static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
29 					       struct macb_dma_desc *desc)
30 {
31 	if (bp->hw_dma_cap == HW_DMA_CAP_PTP)
32 		return (struct macb_dma_desc_ptp *)
33 				((u8 *)desc + sizeof(struct macb_dma_desc));
34 	if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP)
35 		return (struct macb_dma_desc_ptp *)
36 				((u8 *)desc + sizeof(struct macb_dma_desc)
37 				+ sizeof(struct macb_dma_desc_64));
38 	return NULL;
39 }
40 
41 static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
42 {
43 	struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
44 	unsigned long flags;
45 	long first, second;
46 	u32 secl, sech;
47 
48 	spin_lock_irqsave(&bp->tsu_clk_lock, flags);
49 	first = gem_readl(bp, TN);
50 	secl = gem_readl(bp, TSL);
51 	sech = gem_readl(bp, TSH);
52 	second = gem_readl(bp, TN);
53 
54 	/* test for nsec rollover */
55 	if (first > second) {
56 		/* if so, use later read & re-read seconds
57 		 * (assume all done within 1s)
58 		 */
59 		ts->tv_nsec = gem_readl(bp, TN);
60 		secl = gem_readl(bp, TSL);
61 		sech = gem_readl(bp, TSH);
62 	} else {
63 		ts->tv_nsec = first;
64 	}
65 
66 	spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
67 	ts->tv_sec = (((u64)sech << GEM_TSL_SIZE) | secl)
68 			& TSU_SEC_MAX_VAL;
69 	return 0;
70 }
71 
72 static int gem_tsu_set_time(struct ptp_clock_info *ptp,
73 			    const struct timespec64 *ts)
74 {
75 	struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
76 	unsigned long flags;
77 	u32 ns, sech, secl;
78 
79 	secl = (u32)ts->tv_sec;
80 	sech = (ts->tv_sec >> GEM_TSL_SIZE) & ((1 << GEM_TSH_SIZE) - 1);
81 	ns = ts->tv_nsec;
82 
83 	spin_lock_irqsave(&bp->tsu_clk_lock, flags);
84 
85 	/* TSH doesn't latch the time and no atomicity! */
86 	gem_writel(bp, TN, 0); /* clear to avoid overflow */
87 	gem_writel(bp, TSH, sech);
88 	/* write lower bits 2nd, for synchronized secs update */
89 	gem_writel(bp, TSL, secl);
90 	gem_writel(bp, TN, ns);
91 
92 	spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
93 
94 	return 0;
95 }
96 
97 static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec)
98 {
99 	unsigned long flags;
100 
101 	/* tsu_timer_incr register must be written after
102 	 * the tsu_timer_incr_sub_ns register and the write operation
103 	 * will cause the value written to the tsu_timer_incr_sub_ns register
104 	 * to take effect.
105 	 */
106 	spin_lock_irqsave(&bp->tsu_clk_lock, flags);
107 	/* RegBit[15:0] = Subns[23:8]; RegBit[31:24] = Subns[7:0] */
108 	gem_writel(bp, TISUBN, GEM_BF(SUBNSINCRL, incr_spec->sub_ns) |
109 		   GEM_BF(SUBNSINCRH, (incr_spec->sub_ns >>
110 			  GEM_SUBNSINCRL_SIZE)));
111 	gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns));
112 	spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
113 
114 	return 0;
115 }
116 
117 static int gem_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
118 {
119 	struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
120 	struct tsu_incr incr_spec;
121 	bool neg_adj = false;
122 	u32 word;
123 	u64 adj;
124 
125 	if (scaled_ppm < 0) {
126 		neg_adj = true;
127 		scaled_ppm = -scaled_ppm;
128 	}
129 
130 	/* Adjustment is relative to base frequency */
131 	incr_spec.sub_ns = bp->tsu_incr.sub_ns;
132 	incr_spec.ns = bp->tsu_incr.ns;
133 
134 	/* scaling: unused(8bit) | ns(8bit) | fractions(16bit) */
135 	word = ((u64)incr_spec.ns << GEM_SUBNSINCR_SIZE) + incr_spec.sub_ns;
136 	adj = (u64)scaled_ppm * word;
137 	/* Divide with rounding, equivalent to floating dividing:
138 	 * (temp / USEC_PER_SEC) + 0.5
139 	 */
140 	adj += (USEC_PER_SEC >> 1);
141 	adj >>= PPM_FRACTION; /* remove fractions */
142 	adj = div_u64(adj, USEC_PER_SEC);
143 	adj = neg_adj ? (word - adj) : (word + adj);
144 
145 	incr_spec.ns = (adj >> GEM_SUBNSINCR_SIZE)
146 			& ((1 << GEM_NSINCR_SIZE) - 1);
147 	incr_spec.sub_ns = adj & ((1 << GEM_SUBNSINCR_SIZE) - 1);
148 	gem_tsu_incr_set(bp, &incr_spec);
149 	return 0;
150 }
151 
152 static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
153 {
154 	struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
155 	struct timespec64 now, then = ns_to_timespec64(delta);
156 	u32 adj, sign = 0;
157 
158 	if (delta < 0) {
159 		sign = 1;
160 		delta = -delta;
161 	}
162 
163 	if (delta > TSU_NSEC_MAX_VAL) {
164 		gem_tsu_get_time(&bp->ptp_clock_info, &now);
165 		now = timespec64_add(now, then);
166 
167 		gem_tsu_set_time(&bp->ptp_clock_info,
168 				 (const struct timespec64 *)&now);
169 	} else {
170 		adj = (sign << GEM_ADDSUB_OFFSET) | delta;
171 
172 		gem_writel(bp, TA, adj);
173 	}
174 
175 	return 0;
176 }
177 
178 static int gem_ptp_enable(struct ptp_clock_info *ptp,
179 			  struct ptp_clock_request *rq, int on)
180 {
181 	return -EOPNOTSUPP;
182 }
183 
184 static const struct ptp_clock_info gem_ptp_caps_template = {
185 	.owner		= THIS_MODULE,
186 	.name		= GEM_PTP_TIMER_NAME,
187 	.max_adj	= 0,
188 	.n_alarm	= 0,
189 	.n_ext_ts	= 0,
190 	.n_per_out	= 0,
191 	.n_pins		= 0,
192 	.pps		= 1,
193 	.adjfine	= gem_ptp_adjfine,
194 	.adjtime	= gem_ptp_adjtime,
195 	.gettime64	= gem_tsu_get_time,
196 	.settime64	= gem_tsu_set_time,
197 	.enable		= gem_ptp_enable,
198 };
199 
200 static void gem_ptp_init_timer(struct macb *bp)
201 {
202 	u32 rem = 0;
203 	u64 adj;
204 
205 	bp->tsu_incr.ns = div_u64_rem(NSEC_PER_SEC, bp->tsu_rate, &rem);
206 	if (rem) {
207 		adj = rem;
208 		adj <<= GEM_SUBNSINCR_SIZE;
209 		bp->tsu_incr.sub_ns = div_u64(adj, bp->tsu_rate);
210 	} else {
211 		bp->tsu_incr.sub_ns = 0;
212 	}
213 }
214 
215 static void gem_ptp_init_tsu(struct macb *bp)
216 {
217 	struct timespec64 ts;
218 
219 	/* 1. get current system time */
220 	ts = ns_to_timespec64(ktime_to_ns(ktime_get_real()));
221 
222 	/* 2. set ptp timer */
223 	gem_tsu_set_time(&bp->ptp_clock_info, &ts);
224 
225 	/* 3. set PTP timer increment value to BASE_INCREMENT */
226 	gem_tsu_incr_set(bp, &bp->tsu_incr);
227 
228 	gem_writel(bp, TA, 0);
229 }
230 
231 static void gem_ptp_clear_timer(struct macb *bp)
232 {
233 	bp->tsu_incr.sub_ns = 0;
234 	bp->tsu_incr.ns = 0;
235 
236 	gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, 0));
237 	gem_writel(bp, TI, GEM_BF(NSINCR, 0));
238 	gem_writel(bp, TA, 0);
239 }
240 
241 static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
242 			    u32 dma_desc_ts_2, struct timespec64 *ts)
243 {
244 	struct timespec64 tsu;
245 
246 	ts->tv_sec = (GEM_BFEXT(DMA_SECH, dma_desc_ts_2) << GEM_DMA_SECL_SIZE) |
247 			GEM_BFEXT(DMA_SECL, dma_desc_ts_1);
248 	ts->tv_nsec = GEM_BFEXT(DMA_NSEC, dma_desc_ts_1);
249 
250 	/* TSU overlapping workaround
251 	 * The timestamp only contains lower few bits of seconds,
252 	 * so add value from 1588 timer
253 	 */
254 	gem_tsu_get_time(&bp->ptp_clock_info, &tsu);
255 
256 	/* If the top bit is set in the timestamp,
257 	 * but not in 1588 timer, it has rolled over,
258 	 * so subtract max size
259 	 */
260 	if ((ts->tv_sec & (GEM_DMA_SEC_TOP >> 1)) &&
261 	    !(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1)))
262 		ts->tv_sec -= GEM_DMA_SEC_TOP;
263 
264 	ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
265 
266 	return 0;
267 }
268 
269 void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
270 		     struct macb_dma_desc *desc)
271 {
272 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
273 	struct macb_dma_desc_ptp *desc_ptp;
274 	struct timespec64 ts;
275 
276 	if (GEM_BFEXT(DMA_RXVALID, desc->addr)) {
277 		desc_ptp = macb_ptp_desc(bp, desc);
278 		gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
279 		memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
280 		shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
281 	}
282 }
283 
284 static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb,
285 			  struct macb_dma_desc_ptp *desc_ptp)
286 {
287 	struct skb_shared_hwtstamps shhwtstamps;
288 	struct timespec64 ts;
289 
290 	gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
291 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
292 	shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
293 	skb_tstamp_tx(skb, &shhwtstamps);
294 }
295 
296 int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
297 		    struct macb_dma_desc *desc)
298 {
299 	unsigned long tail = READ_ONCE(queue->tx_ts_tail);
300 	unsigned long head = queue->tx_ts_head;
301 	struct macb_dma_desc_ptp *desc_ptp;
302 	struct gem_tx_ts *tx_timestamp;
303 
304 	if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl))
305 		return -EINVAL;
306 
307 	if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
308 		return -ENOMEM;
309 
310 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
311 	desc_ptp = macb_ptp_desc(queue->bp, desc);
312 	tx_timestamp = &queue->tx_timestamps[head];
313 	tx_timestamp->skb = skb;
314 	/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
315 	dma_rmb();
316 	tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
317 	tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
318 	/* move head */
319 	smp_store_release(&queue->tx_ts_head,
320 			  (head + 1) & (PTP_TS_BUFFER_SIZE - 1));
321 
322 	schedule_work(&queue->tx_ts_task);
323 	return 0;
324 }
325 
326 static void gem_tx_timestamp_flush(struct work_struct *work)
327 {
328 	struct macb_queue *queue =
329 			container_of(work, struct macb_queue, tx_ts_task);
330 	unsigned long head, tail;
331 	struct gem_tx_ts *tx_ts;
332 
333 	/* take current head */
334 	head = smp_load_acquire(&queue->tx_ts_head);
335 	tail = queue->tx_ts_tail;
336 
337 	while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) {
338 		tx_ts = &queue->tx_timestamps[tail];
339 		gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
340 		/* cleanup */
341 		dev_kfree_skb_any(tx_ts->skb);
342 		/* remove old tail */
343 		smp_store_release(&queue->tx_ts_tail,
344 				  (tail + 1) & (PTP_TS_BUFFER_SIZE - 1));
345 		tail = queue->tx_ts_tail;
346 	}
347 }
348 
349 void gem_ptp_init(struct net_device *dev)
350 {
351 	struct macb *bp = netdev_priv(dev);
352 	struct macb_queue *queue;
353 	unsigned int q;
354 
355 	bp->ptp_clock_info = gem_ptp_caps_template;
356 
357 	/* nominal frequency and maximum adjustment in ppb */
358 	bp->tsu_rate = bp->ptp_info->get_tsu_rate(bp);
359 	bp->ptp_clock_info.max_adj = bp->ptp_info->get_ptp_max_adj();
360 	gem_ptp_init_timer(bp);
361 	bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &dev->dev);
362 	if (IS_ERR(bp->ptp_clock)) {
363 		pr_err("ptp clock register failed: %ld\n",
364 			PTR_ERR(bp->ptp_clock));
365 		bp->ptp_clock = NULL;
366 		return;
367 	} else if (bp->ptp_clock == NULL) {
368 		pr_err("ptp clock register failed\n");
369 		return;
370 	}
371 
372 	spin_lock_init(&bp->tsu_clk_lock);
373 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
374 		queue->tx_ts_head = 0;
375 		queue->tx_ts_tail = 0;
376 		INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
377 	}
378 
379 	gem_ptp_init_tsu(bp);
380 
381 	dev_info(&bp->pdev->dev, "%s ptp clock registered.\n",
382 		 GEM_PTP_TIMER_NAME);
383 }
384 
385 void gem_ptp_remove(struct net_device *ndev)
386 {
387 	struct macb *bp = netdev_priv(ndev);
388 
389 	if (bp->ptp_clock)
390 		ptp_clock_unregister(bp->ptp_clock);
391 
392 	gem_ptp_clear_timer(bp);
393 
394 	dev_info(&bp->pdev->dev, "%s ptp clock unregistered.\n",
395 		 GEM_PTP_TIMER_NAME);
396 }
397 
398 static int gem_ptp_set_ts_mode(struct macb *bp,
399 			       enum macb_bd_control tx_bd_control,
400 			       enum macb_bd_control rx_bd_control)
401 {
402 	gem_writel(bp, TXBDCTRL, GEM_BF(TXTSMODE, tx_bd_control));
403 	gem_writel(bp, RXBDCTRL, GEM_BF(RXTSMODE, rx_bd_control));
404 
405 	return 0;
406 }
407 
408 int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
409 {
410 	struct hwtstamp_config *tstamp_config;
411 	struct macb *bp = netdev_priv(dev);
412 
413 	tstamp_config = &bp->tstamp_config;
414 	if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
415 		return -EOPNOTSUPP;
416 
417 	if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config)))
418 		return -EFAULT;
419 	else
420 		return 0;
421 }
422 
423 static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
424 {
425 	u32 reg_val;
426 
427 	reg_val = macb_readl(bp, NCR);
428 
429 	if (enable)
430 		macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE));
431 	else
432 		macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
433 
434 	return 0;
435 }
436 
437 int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
438 {
439 	enum macb_bd_control tx_bd_control = TSTAMP_DISABLED;
440 	enum macb_bd_control rx_bd_control = TSTAMP_DISABLED;
441 	struct hwtstamp_config *tstamp_config;
442 	struct macb *bp = netdev_priv(dev);
443 	u32 regval;
444 
445 	tstamp_config = &bp->tstamp_config;
446 	if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
447 		return -EOPNOTSUPP;
448 
449 	if (copy_from_user(tstamp_config, ifr->ifr_data,
450 			   sizeof(*tstamp_config)))
451 		return -EFAULT;
452 
453 	/* reserved for future extensions */
454 	if (tstamp_config->flags)
455 		return -EINVAL;
456 
457 	switch (tstamp_config->tx_type) {
458 	case HWTSTAMP_TX_OFF:
459 		break;
460 	case HWTSTAMP_TX_ONESTEP_SYNC:
461 		if (gem_ptp_set_one_step_sync(bp, 1) != 0)
462 			return -ERANGE;
463 		fallthrough;
464 	case HWTSTAMP_TX_ON:
465 		tx_bd_control = TSTAMP_ALL_FRAMES;
466 		break;
467 	default:
468 		return -ERANGE;
469 	}
470 
471 	switch (tstamp_config->rx_filter) {
472 	case HWTSTAMP_FILTER_NONE:
473 		break;
474 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
475 		break;
476 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
477 		break;
478 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
479 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
480 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
481 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
482 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
483 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
484 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
485 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
486 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
487 		rx_bd_control =  TSTAMP_ALL_PTP_FRAMES;
488 		tstamp_config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
489 		regval = macb_readl(bp, NCR);
490 		macb_writel(bp, NCR, (regval | MACB_BIT(SRTSM)));
491 		break;
492 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
493 	case HWTSTAMP_FILTER_ALL:
494 		rx_bd_control = TSTAMP_ALL_FRAMES;
495 		tstamp_config->rx_filter = HWTSTAMP_FILTER_ALL;
496 		break;
497 	default:
498 		tstamp_config->rx_filter = HWTSTAMP_FILTER_NONE;
499 		return -ERANGE;
500 	}
501 
502 	if (gem_ptp_set_ts_mode(bp, tx_bd_control, rx_bd_control) != 0)
503 		return -ERANGE;
504 
505 	if (copy_to_user(ifr->ifr_data, tstamp_config, sizeof(*tstamp_config)))
506 		return -EFAULT;
507 	else
508 		return 0;
509 }
510 
511