1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Fast Ethernet Controller (ENET) PTP driver for MX6x.
4 *
5 * Copyright (C) 2012 Freescale Semiconductor, Inc.
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/bitops.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/etherdevice.h>
15 #include <linux/fec.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/ioport.h>
19 #include <linux/irq.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/netdevice.h>
23 #include <linux/of.h>
24 #include <linux/of_net.h>
25 #include <linux/pci.h>
26 #include <linux/phy.h>
27 #include <linux/platform_device.h>
28 #include <linux/ptrace.h>
29 #include <linux/skbuff.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33 #include <linux/workqueue.h>
34
35 #include "fec.h"
36
37 /* FEC 1588 register bits */
38 #define FEC_T_CTRL_SLAVE 0x00002000
39 #define FEC_T_CTRL_CAPTURE 0x00000800
40 #define FEC_T_CTRL_RESTART 0x00000200
41 #define FEC_T_CTRL_PERIOD_RST 0x00000030
42 #define FEC_T_CTRL_PERIOD_EN 0x00000010
43 #define FEC_T_CTRL_ENABLE 0x00000001
44
45 #define FEC_T_INC_MASK 0x0000007f
46 #define FEC_T_INC_OFFSET 0
47 #define FEC_T_INC_CORR_MASK 0x00007f00
48 #define FEC_T_INC_CORR_OFFSET 8
49
50 #define FEC_T_CTRL_PINPER 0x00000080
51 #define FEC_T_TF0_MASK 0x00000001
52 #define FEC_T_TF0_OFFSET 0
53 #define FEC_T_TF1_MASK 0x00000002
54 #define FEC_T_TF1_OFFSET 1
55 #define FEC_T_TF2_MASK 0x00000004
56 #define FEC_T_TF2_OFFSET 2
57 #define FEC_T_TF3_MASK 0x00000008
58 #define FEC_T_TF3_OFFSET 3
59 #define FEC_T_TDRE_MASK 0x00000001
60 #define FEC_T_TDRE_OFFSET 0
61 #define FEC_T_TMODE_MASK 0x0000003C
62 #define FEC_T_TMODE_OFFSET 2
63 #define FEC_T_TIE_MASK 0x00000040
64 #define FEC_T_TIE_OFFSET 6
65 #define FEC_T_TF_MASK 0x00000080
66 #define FEC_T_TF_OFFSET 7
67
68 #define FEC_ATIME_CTRL 0x400
69 #define FEC_ATIME 0x404
70 #define FEC_ATIME_EVT_OFFSET 0x408
71 #define FEC_ATIME_EVT_PERIOD 0x40c
72 #define FEC_ATIME_CORR 0x410
73 #define FEC_ATIME_INC 0x414
74 #define FEC_TS_TIMESTAMP 0x418
75
76 #define FEC_TGSR 0x604
77 #define FEC_TCSR(n) (0x608 + n * 0x08)
78 #define FEC_TCCR(n) (0x60C + n * 0x08)
79 #define MAX_TIMER_CHANNEL 3
80 #define FEC_TMODE_TOGGLE 0x05
81 #define FEC_HIGH_PULSE 0x0F
82
83 #define FEC_CC_MULT (1 << 31)
84 #define FEC_COUNTER_PERIOD (1 << 31)
85 #define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
86 #define DEFAULT_PPS_CHANNEL 0
87
88 #define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL
89 #define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
90
91 /**
92 * fec_ptp_read - read raw cycle counter (to be used by time counter)
93 * @cc: the cyclecounter structure
94 *
95 * this function reads the cyclecounter registers and is called by the
96 * cyclecounter structure used to construct a ns counter from the
97 * arbitrary fixed point registers
98 */
fec_ptp_read(struct cyclecounter * cc)99 static u64 fec_ptp_read(struct cyclecounter *cc)
100 {
101 struct fec_enet_private *fep =
102 container_of(cc, struct fec_enet_private, cc);
103 u32 tempval;
104
105 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
106 tempval |= FEC_T_CTRL_CAPTURE;
107 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
108
109 if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
110 udelay(1);
111
112 return readl(fep->hwp + FEC_ATIME);
113 }
114
115 /**
116 * fec_ptp_enable_pps
117 * @fep: the fec_enet_private structure handle
118 * @enable: enable the channel pps output
119 *
120 * This function enables the PPS output on the timer channel.
121 */
fec_ptp_enable_pps(struct fec_enet_private * fep,uint enable)122 static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
123 {
124 unsigned long flags;
125 u32 val, tempval;
126 struct timespec64 ts;
127 u64 ns;
128
129 spin_lock_irqsave(&fep->tmreg_lock, flags);
130
131 if (fep->perout_enable) {
132 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
133 dev_err(&fep->pdev->dev, "PEROUT is running");
134 return -EBUSY;
135 }
136
137 if (fep->pps_enable == enable) {
138 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
139 return 0;
140 }
141
142 if (enable) {
143 /* clear capture or output compare interrupt status if have.
144 */
145 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
146
147 /* It is recommended to double check the TMODE field in the
148 * TCSR register to be cleared before the first compare counter
149 * is written into TCCR register. Just add a double check.
150 */
151 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
152 do {
153 val &= ~(FEC_T_TMODE_MASK);
154 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
155 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
156 } while (val & FEC_T_TMODE_MASK);
157
158 /* Dummy read counter to update the counter */
159 timecounter_read(&fep->tc);
160 /* We want to find the first compare event in the next
161 * second point. So we need to know what the ptp time
162 * is now and how many nanoseconds is ahead to get next second.
163 * The remaining nanosecond ahead before the next second would be
164 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
165 * to current timer would be next second.
166 */
167 tempval = fec_ptp_read(&fep->cc);
168 /* Convert the ptp local counter to 1588 timestamp */
169 ns = timecounter_cyc2time(&fep->tc, tempval);
170 ts = ns_to_timespec64(ns);
171
172 /* The tempval is less than 3 seconds, and so val is less than
173 * 4 seconds. No overflow for 32bit calculation.
174 */
175 val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
176
177 /* Need to consider the situation that the current time is
178 * very close to the second point, which means NSEC_PER_SEC
179 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
180 * is still running when we calculate the first compare event, it is
181 * possible that the remaining nanoseconds run out before the compare
182 * counter is calculated and written into TCCR register. To avoid
183 * this possibility, we will set the compare event to be the next
184 * of next second. The current setting is 31-bit timer and wrap
185 * around over 2 seconds. So it is okay to set the next of next
186 * seond for the timer.
187 */
188 val += NSEC_PER_SEC;
189
190 /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
191 * ptp counter, which maybe cause 32-bit wrap. Since the
192 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
193 * We can ensure the wrap will not cause issue. If the offset
194 * is bigger than fep->cc.mask would be a error.
195 */
196 val &= fep->cc.mask;
197 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
198
199 /* Calculate the second the compare event timestamp */
200 fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
201
202 /* * Enable compare event when overflow */
203 val = readl(fep->hwp + FEC_ATIME_CTRL);
204 val |= FEC_T_CTRL_PINPER;
205 writel(val, fep->hwp + FEC_ATIME_CTRL);
206
207 /* Compare channel setting. */
208 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
209 val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
210 val &= ~(1 << FEC_T_TDRE_OFFSET);
211 val &= ~(FEC_T_TMODE_MASK);
212 val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
213 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
214
215 /* Write the second compare event timestamp and calculate
216 * the third timestamp. Refer the TCCR register detail in the spec.
217 */
218 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
219 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
220 } else {
221 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
222 }
223
224 fep->pps_enable = enable;
225 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
226
227 return 0;
228 }
229
fec_ptp_pps_perout(struct fec_enet_private * fep)230 static int fec_ptp_pps_perout(struct fec_enet_private *fep)
231 {
232 u32 compare_val, ptp_hc, temp_val;
233 u64 curr_time;
234 unsigned long flags;
235
236 spin_lock_irqsave(&fep->tmreg_lock, flags);
237
238 /* Update time counter */
239 timecounter_read(&fep->tc);
240
241 /* Get the current ptp hardware time counter */
242 ptp_hc = fec_ptp_read(&fep->cc);
243
244 /* Convert the ptp local counter to 1588 timestamp */
245 curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
246
247 /* If the pps start time less than current time add 100ms, just return.
248 * Because the software might not able to set the comparison time into
249 * the FEC_TCCR register in time and missed the start time.
250 */
251 if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
252 fep->perout_enable = false;
253 dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
254 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
255 return -1;
256 }
257
258 compare_val = fep->perout_stime - curr_time + ptp_hc;
259 compare_val &= fep->cc.mask;
260
261 writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel));
262 fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask;
263
264 /* Enable compare event when overflow */
265 temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
266 temp_val |= FEC_T_CTRL_PINPER;
267 writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
268
269 /* Compare channel setting. */
270 temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
271 temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
272 temp_val &= ~(1 << FEC_T_TDRE_OFFSET);
273 temp_val &= ~(FEC_T_TMODE_MASK);
274 temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET);
275 writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel));
276
277 /* Write the second compare event timestamp and calculate
278 * the third timestamp. Refer the TCCR register detail in the spec.
279 */
280 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
281 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
282 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
283
284 return 0;
285 }
286
fec_ptp_pps_perout_handler(struct hrtimer * timer)287 static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
288 {
289 struct fec_enet_private *fep = container_of(timer,
290 struct fec_enet_private, perout_timer);
291
292 fec_ptp_pps_perout(fep);
293
294 return HRTIMER_NORESTART;
295 }
296
297 /**
298 * fec_ptp_start_cyclecounter - create the cycle counter from hw
299 * @ndev: network device
300 *
301 * this function initializes the timecounter and cyclecounter
302 * structures for use in generated a ns counter from the arbitrary
303 * fixed point cycles registers in the hardware.
304 */
fec_ptp_start_cyclecounter(struct net_device * ndev)305 void fec_ptp_start_cyclecounter(struct net_device *ndev)
306 {
307 struct fec_enet_private *fep = netdev_priv(ndev);
308 unsigned long flags;
309 int inc;
310
311 inc = 1000000000 / fep->cycle_speed;
312
313 /* grab the ptp lock */
314 spin_lock_irqsave(&fep->tmreg_lock, flags);
315
316 /* 1ns counter */
317 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
318
319 /* use 31-bit timer counter */
320 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
321
322 writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
323 fep->hwp + FEC_ATIME_CTRL);
324
325 memset(&fep->cc, 0, sizeof(fep->cc));
326 fep->cc.read = fec_ptp_read;
327 fep->cc.mask = CLOCKSOURCE_MASK(31);
328 fep->cc.shift = 31;
329 fep->cc.mult = FEC_CC_MULT;
330
331 /* reset the ns time counter */
332 timecounter_init(&fep->tc, &fep->cc, 0);
333
334 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
335 }
336
337 /**
338 * fec_ptp_adjfine - adjust ptp cycle frequency
339 * @ptp: the ptp clock structure
340 * @scaled_ppm: scaled parts per million adjustment from base
341 *
342 * Adjust the frequency of the ptp cycle counter by the
343 * indicated amount from the base frequency.
344 *
345 * Scaled parts per million is ppm with a 16-bit binary fractional field.
346 *
347 * Because ENET hardware frequency adjust is complex,
348 * using software method to do that.
349 */
fec_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)350 static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
351 {
352 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
353 unsigned long flags;
354 int neg_adj = 0;
355 u32 i, tmp;
356 u32 corr_inc, corr_period;
357 u32 corr_ns;
358 u64 lhs, rhs;
359
360 struct fec_enet_private *fep =
361 container_of(ptp, struct fec_enet_private, ptp_caps);
362
363 if (ppb == 0)
364 return 0;
365
366 if (ppb < 0) {
367 ppb = -ppb;
368 neg_adj = 1;
369 }
370
371 /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
372 * Try to find the corr_inc between 1 to fep->ptp_inc to
373 * meet adjustment requirement.
374 */
375 lhs = NSEC_PER_SEC;
376 rhs = (u64)ppb * (u64)fep->ptp_inc;
377 for (i = 1; i <= fep->ptp_inc; i++) {
378 if (lhs >= rhs) {
379 corr_inc = i;
380 corr_period = div_u64(lhs, rhs);
381 break;
382 }
383 lhs += NSEC_PER_SEC;
384 }
385 /* Not found? Set it to high value - double speed
386 * correct in every clock step.
387 */
388 if (i > fep->ptp_inc) {
389 corr_inc = fep->ptp_inc;
390 corr_period = 1;
391 }
392
393 if (neg_adj)
394 corr_ns = fep->ptp_inc - corr_inc;
395 else
396 corr_ns = fep->ptp_inc + corr_inc;
397
398 spin_lock_irqsave(&fep->tmreg_lock, flags);
399
400 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
401 tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
402 writel(tmp, fep->hwp + FEC_ATIME_INC);
403 corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
404 writel(corr_period, fep->hwp + FEC_ATIME_CORR);
405 /* dummy read to update the timer. */
406 timecounter_read(&fep->tc);
407
408 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
409
410 return 0;
411 }
412
413 /**
414 * fec_ptp_adjtime
415 * @ptp: the ptp clock structure
416 * @delta: offset to adjust the cycle counter by
417 *
418 * adjust the timer by resetting the timecounter structure.
419 */
fec_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)420 static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
421 {
422 struct fec_enet_private *fep =
423 container_of(ptp, struct fec_enet_private, ptp_caps);
424 unsigned long flags;
425
426 spin_lock_irqsave(&fep->tmreg_lock, flags);
427 timecounter_adjtime(&fep->tc, delta);
428 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
429
430 return 0;
431 }
432
433 /**
434 * fec_ptp_gettime
435 * @ptp: the ptp clock structure
436 * @ts: timespec structure to hold the current time value
437 *
438 * read the timecounter and return the correct value on ns,
439 * after converting it into a struct timespec.
440 */
fec_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)441 static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
442 {
443 struct fec_enet_private *fep =
444 container_of(ptp, struct fec_enet_private, ptp_caps);
445 u64 ns;
446 unsigned long flags;
447
448 mutex_lock(&fep->ptp_clk_mutex);
449 /* Check the ptp clock */
450 if (!fep->ptp_clk_on) {
451 mutex_unlock(&fep->ptp_clk_mutex);
452 return -EINVAL;
453 }
454 spin_lock_irqsave(&fep->tmreg_lock, flags);
455 ns = timecounter_read(&fep->tc);
456 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
457 mutex_unlock(&fep->ptp_clk_mutex);
458
459 *ts = ns_to_timespec64(ns);
460
461 return 0;
462 }
463
464 /**
465 * fec_ptp_settime
466 * @ptp: the ptp clock structure
467 * @ts: the timespec containing the new time for the cycle counter
468 *
469 * reset the timecounter to use a new base value instead of the kernel
470 * wall timer value.
471 */
fec_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)472 static int fec_ptp_settime(struct ptp_clock_info *ptp,
473 const struct timespec64 *ts)
474 {
475 struct fec_enet_private *fep =
476 container_of(ptp, struct fec_enet_private, ptp_caps);
477
478 u64 ns;
479 unsigned long flags;
480 u32 counter;
481
482 mutex_lock(&fep->ptp_clk_mutex);
483 /* Check the ptp clock */
484 if (!fep->ptp_clk_on) {
485 mutex_unlock(&fep->ptp_clk_mutex);
486 return -EINVAL;
487 }
488
489 ns = timespec64_to_ns(ts);
490 /* Get the timer value based on timestamp.
491 * Update the counter with the masked value.
492 */
493 counter = ns & fep->cc.mask;
494
495 spin_lock_irqsave(&fep->tmreg_lock, flags);
496 writel(counter, fep->hwp + FEC_ATIME);
497 timecounter_init(&fep->tc, &fep->cc, ns);
498 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
499 mutex_unlock(&fep->ptp_clk_mutex);
500 return 0;
501 }
502
fec_ptp_pps_disable(struct fec_enet_private * fep,uint channel)503 static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
504 {
505 unsigned long flags;
506
507 hrtimer_cancel(&fep->perout_timer);
508
509 spin_lock_irqsave(&fep->tmreg_lock, flags);
510 fep->perout_enable = false;
511 writel(0, fep->hwp + FEC_TCSR(channel));
512 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
513
514 return 0;
515 }
516
517 /**
518 * fec_ptp_enable
519 * @ptp: the ptp clock structure
520 * @rq: the requested feature to change
521 * @on: whether to enable or disable the feature
522 *
523 */
fec_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)524 static int fec_ptp_enable(struct ptp_clock_info *ptp,
525 struct ptp_clock_request *rq, int on)
526 {
527 struct fec_enet_private *fep =
528 container_of(ptp, struct fec_enet_private, ptp_caps);
529 ktime_t timeout;
530 struct timespec64 start_time, period;
531 u64 curr_time, delta, period_ns;
532 unsigned long flags;
533 int ret = 0;
534
535 if (rq->type == PTP_CLK_REQ_PPS) {
536 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
537
538 ret = fec_ptp_enable_pps(fep, on);
539
540 return ret;
541 } else if (rq->type == PTP_CLK_REQ_PEROUT) {
542 u32 reload_period;
543
544 /* Reject requests with unsupported flags */
545 if (rq->perout.flags)
546 return -EOPNOTSUPP;
547
548 if (rq->perout.index != fep->pps_channel)
549 return -EOPNOTSUPP;
550
551 period.tv_sec = rq->perout.period.sec;
552 period.tv_nsec = rq->perout.period.nsec;
553 period_ns = timespec64_to_ns(&period);
554
555 /* FEC PTP timer only has 31 bits, so if the period exceed
556 * 4s is not supported.
557 */
558 if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) {
559 dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n");
560 return -EOPNOTSUPP;
561 }
562
563 reload_period = div_u64(period_ns, 2);
564 if (on && reload_period) {
565 u64 perout_stime;
566
567 /* Convert 1588 timestamp to ns*/
568 start_time.tv_sec = rq->perout.start.sec;
569 start_time.tv_nsec = rq->perout.start.nsec;
570 perout_stime = timespec64_to_ns(&start_time);
571
572 mutex_lock(&fep->ptp_clk_mutex);
573 if (!fep->ptp_clk_on) {
574 dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n");
575 mutex_unlock(&fep->ptp_clk_mutex);
576 return -EOPNOTSUPP;
577 }
578 spin_lock_irqsave(&fep->tmreg_lock, flags);
579
580 if (fep->pps_enable) {
581 dev_err(&fep->pdev->dev, "PPS is running");
582 ret = -EBUSY;
583 goto unlock;
584 }
585
586 if (fep->perout_enable) {
587 dev_err(&fep->pdev->dev,
588 "PEROUT has been enabled\n");
589 ret = -EBUSY;
590 goto unlock;
591 }
592
593 /* Read current timestamp */
594 curr_time = timecounter_read(&fep->tc);
595 if (perout_stime <= curr_time) {
596 dev_err(&fep->pdev->dev,
597 "Start time must be greater than current time\n");
598 ret = -EINVAL;
599 goto unlock;
600 }
601
602 /* Calculate time difference */
603 delta = perout_stime - curr_time;
604 fep->reload_period = reload_period;
605 fep->perout_stime = perout_stime;
606 fep->perout_enable = true;
607
608 unlock:
609 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
610 mutex_unlock(&fep->ptp_clk_mutex);
611
612 if (ret)
613 return ret;
614
615 /* Because the timer counter of FEC only has 31-bits, correspondingly,
616 * the time comparison register FEC_TCCR also only low 31 bits can be
617 * set. If the start time of pps signal exceeds current time more than
618 * 0x80000000 ns, a software timer is used and the timer expires about
619 * 1 second before the start time to be able to set FEC_TCCR.
620 */
621 if (delta > FEC_PTP_MAX_NSEC_COUNTER) {
622 timeout = ns_to_ktime(delta - NSEC_PER_SEC);
623 hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL);
624 } else {
625 return fec_ptp_pps_perout(fep);
626 }
627 } else {
628 fec_ptp_pps_disable(fep, fep->pps_channel);
629 }
630
631 return 0;
632 } else {
633 return -EOPNOTSUPP;
634 }
635 }
636
fec_ptp_set(struct net_device * ndev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)637 int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config,
638 struct netlink_ext_ack *extack)
639 {
640 struct fec_enet_private *fep = netdev_priv(ndev);
641
642 switch (config->tx_type) {
643 case HWTSTAMP_TX_OFF:
644 fep->hwts_tx_en = 0;
645 break;
646 case HWTSTAMP_TX_ON:
647 fep->hwts_tx_en = 1;
648 break;
649 default:
650 return -ERANGE;
651 }
652
653 switch (config->rx_filter) {
654 case HWTSTAMP_FILTER_NONE:
655 fep->hwts_rx_en = 0;
656 break;
657
658 default:
659 fep->hwts_rx_en = 1;
660 config->rx_filter = HWTSTAMP_FILTER_ALL;
661 break;
662 }
663
664 return 0;
665 }
666
fec_ptp_get(struct net_device * ndev,struct kernel_hwtstamp_config * config)667 void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config)
668 {
669 struct fec_enet_private *fep = netdev_priv(ndev);
670
671 config->flags = 0;
672 config->tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
673 config->rx_filter = (fep->hwts_rx_en ?
674 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
675 }
676
677 /*
678 * fec_time_keep - call timecounter_read every second to avoid timer overrun
679 * because ENET just support 32bit counter, will timeout in 4s
680 */
fec_time_keep(struct work_struct * work)681 static void fec_time_keep(struct work_struct *work)
682 {
683 struct delayed_work *dwork = to_delayed_work(work);
684 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
685 unsigned long flags;
686
687 mutex_lock(&fep->ptp_clk_mutex);
688 if (fep->ptp_clk_on) {
689 spin_lock_irqsave(&fep->tmreg_lock, flags);
690 timecounter_read(&fep->tc);
691 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
692 }
693 mutex_unlock(&fep->ptp_clk_mutex);
694
695 schedule_delayed_work(&fep->time_keep, HZ);
696 }
697
698 /* This function checks the pps event and reloads the timer compare counter. */
fec_pps_interrupt(int irq,void * dev_id)699 static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
700 {
701 struct net_device *ndev = dev_id;
702 struct fec_enet_private *fep = netdev_priv(ndev);
703 u32 val;
704 u8 channel = fep->pps_channel;
705 struct ptp_clock_event event;
706
707 val = readl(fep->hwp + FEC_TCSR(channel));
708 if (val & FEC_T_TF_MASK) {
709 /* Write the next next compare(not the next according the spec)
710 * value to the register
711 */
712 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
713 do {
714 writel(val, fep->hwp + FEC_TCSR(channel));
715 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
716
717 /* Update the counter; */
718 fep->next_counter = (fep->next_counter + fep->reload_period) &
719 fep->cc.mask;
720
721 if (fep->pps_enable) {
722 event.type = PTP_CLOCK_PPS;
723 ptp_clock_event(fep->ptp_clock, &event);
724 }
725
726 return IRQ_HANDLED;
727 }
728
729 return IRQ_NONE;
730 }
731
732 /**
733 * fec_ptp_init
734 * @pdev: The FEC network adapter
735 * @irq_idx: the interrupt index
736 *
737 * This function performs the required steps for enabling ptp
738 * support. If ptp support has already been loaded it simply calls the
739 * cyclecounter init routine and exits.
740 */
741
fec_ptp_init(struct platform_device * pdev,int irq_idx)742 void fec_ptp_init(struct platform_device *pdev, int irq_idx)
743 {
744 struct net_device *ndev = platform_get_drvdata(pdev);
745 struct fec_enet_private *fep = netdev_priv(ndev);
746 struct device_node *np = fep->pdev->dev.of_node;
747 int irq;
748 int ret;
749
750 fep->ptp_caps.owner = THIS_MODULE;
751 strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
752
753 fep->pps_channel = DEFAULT_PPS_CHANNEL;
754 of_property_read_u32(np, "fsl,pps-channel", &fep->pps_channel);
755
756 fep->ptp_caps.max_adj = 250000000;
757 fep->ptp_caps.n_alarm = 0;
758 fep->ptp_caps.n_ext_ts = 0;
759 fep->ptp_caps.n_per_out = 1;
760 fep->ptp_caps.n_pins = 0;
761 fep->ptp_caps.pps = 1;
762 fep->ptp_caps.adjfine = fec_ptp_adjfine;
763 fep->ptp_caps.adjtime = fec_ptp_adjtime;
764 fep->ptp_caps.gettime64 = fec_ptp_gettime;
765 fep->ptp_caps.settime64 = fec_ptp_settime;
766 fep->ptp_caps.enable = fec_ptp_enable;
767
768 fep->cycle_speed = clk_get_rate(fep->clk_ptp);
769 if (!fep->cycle_speed) {
770 fep->cycle_speed = NSEC_PER_SEC;
771 dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
772 }
773 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
774
775 spin_lock_init(&fep->tmreg_lock);
776
777 fec_ptp_start_cyclecounter(ndev);
778
779 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
780
781 hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
782 HRTIMER_MODE_REL);
783
784 irq = platform_get_irq_byname_optional(pdev, "pps");
785 if (irq < 0)
786 irq = platform_get_irq_optional(pdev, irq_idx);
787 /* Failure to get an irq is not fatal,
788 * only the PTP_CLOCK_PPS clock events should stop
789 */
790 if (irq >= 0) {
791 ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
792 0, pdev->name, ndev);
793 if (ret < 0)
794 dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
795 ret);
796 }
797
798 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
799 if (IS_ERR(fep->ptp_clock)) {
800 fep->ptp_clock = NULL;
801 dev_err(&pdev->dev, "ptp_clock_register failed\n");
802 }
803
804 schedule_delayed_work(&fep->time_keep, HZ);
805 }
806
fec_ptp_save_state(struct fec_enet_private * fep)807 void fec_ptp_save_state(struct fec_enet_private *fep)
808 {
809 unsigned long flags;
810 u32 atime_inc_corr;
811
812 spin_lock_irqsave(&fep->tmreg_lock, flags);
813
814 fep->ptp_saved_state.pps_enable = fep->pps_enable;
815
816 fep->ptp_saved_state.ns_phc = timecounter_read(&fep->tc);
817 fep->ptp_saved_state.ns_sys = ktime_get_ns();
818
819 fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
820 atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
821 fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
822
823 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
824 }
825
826 /* Restore PTP functionality after a reset */
fec_ptp_restore_state(struct fec_enet_private * fep)827 void fec_ptp_restore_state(struct fec_enet_private *fep)
828 {
829 u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
830 unsigned long flags;
831 u32 counter;
832 u64 ns;
833
834 spin_lock_irqsave(&fep->tmreg_lock, flags);
835
836 /* Reset turned it off, so adjust our status flag */
837 fep->pps_enable = 0;
838
839 writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
840 atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
841 writel(atime_inc, fep->hwp + FEC_ATIME_INC);
842
843 ns = ktime_get_ns() - fep->ptp_saved_state.ns_sys + fep->ptp_saved_state.ns_phc;
844 counter = ns & fep->cc.mask;
845 writel(counter, fep->hwp + FEC_ATIME);
846 timecounter_init(&fep->tc, &fep->cc, ns);
847
848 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
849
850 /* Restart PPS if needed */
851 if (fep->ptp_saved_state.pps_enable) {
852 /* Re-enable PPS */
853 fec_ptp_enable_pps(fep, 1);
854 }
855 }
856
fec_ptp_stop(struct platform_device * pdev)857 void fec_ptp_stop(struct platform_device *pdev)
858 {
859 struct net_device *ndev = platform_get_drvdata(pdev);
860 struct fec_enet_private *fep = netdev_priv(ndev);
861
862 if (fep->pps_enable)
863 fec_ptp_enable_pps(fep, 0);
864
865 cancel_delayed_work_sync(&fep->time_keep);
866 hrtimer_cancel(&fep->perout_timer);
867 if (fep->ptp_clock)
868 ptp_clock_unregister(fep->ptp_clock);
869 }
870