1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Fast Ethernet Controller (ENET) PTP driver for MX6x. 4 * 5 * Copyright (C) 2012 Freescale Semiconductor, Inc. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/bitops.h> 11 #include <linux/clk.h> 12 #include <linux/delay.h> 13 #include <linux/errno.h> 14 #include <linux/etherdevice.h> 15 #include <linux/fec.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/ioport.h> 19 #include <linux/irq.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/netdevice.h> 23 #include <linux/of.h> 24 #include <linux/of_net.h> 25 #include <linux/pci.h> 26 #include <linux/phy.h> 27 #include <linux/platform_device.h> 28 #include <linux/ptrace.h> 29 #include <linux/skbuff.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/string.h> 33 #include <linux/workqueue.h> 34 35 #include "fec.h" 36 37 /* FEC 1588 register bits */ 38 #define FEC_T_CTRL_SLAVE 0x00002000 39 #define FEC_T_CTRL_CAPTURE 0x00000800 40 #define FEC_T_CTRL_RESTART 0x00000200 41 #define FEC_T_CTRL_PERIOD_RST 0x00000030 42 #define FEC_T_CTRL_PERIOD_EN 0x00000010 43 #define FEC_T_CTRL_ENABLE 0x00000001 44 45 #define FEC_T_INC_MASK 0x0000007f 46 #define FEC_T_INC_OFFSET 0 47 #define FEC_T_INC_CORR_MASK 0x00007f00 48 #define FEC_T_INC_CORR_OFFSET 8 49 50 #define FEC_T_CTRL_PINPER 0x00000080 51 #define FEC_T_TF0_MASK 0x00000001 52 #define FEC_T_TF0_OFFSET 0 53 #define FEC_T_TF1_MASK 0x00000002 54 #define FEC_T_TF1_OFFSET 1 55 #define FEC_T_TF2_MASK 0x00000004 56 #define FEC_T_TF2_OFFSET 2 57 #define FEC_T_TF3_MASK 0x00000008 58 #define FEC_T_TF3_OFFSET 3 59 #define FEC_T_TDRE_MASK 0x00000001 60 #define FEC_T_TDRE_OFFSET 0 61 #define FEC_T_TMODE_MASK 0x0000003C 62 #define FEC_T_TMODE_OFFSET 2 63 #define FEC_T_TIE_MASK 0x00000040 64 #define FEC_T_TIE_OFFSET 6 65 #define FEC_T_TF_MASK 0x00000080 66 #define FEC_T_TF_OFFSET 7 67 68 #define FEC_ATIME_CTRL 0x400 69 #define FEC_ATIME 0x404 70 #define FEC_ATIME_EVT_OFFSET 0x408 71 #define FEC_ATIME_EVT_PERIOD 0x40c 72 #define FEC_ATIME_CORR 0x410 73 #define FEC_ATIME_INC 0x414 74 #define FEC_TS_TIMESTAMP 0x418 75 76 #define FEC_TGSR 0x604 77 #define FEC_TCSR(n) (0x608 + n * 0x08) 78 #define FEC_TCCR(n) (0x60C + n * 0x08) 79 #define MAX_TIMER_CHANNEL 3 80 #define FEC_TMODE_TOGGLE 0x05 81 #define FEC_HIGH_PULSE 0x0F 82 83 #define FEC_CC_MULT (1 << 31) 84 #define FEC_COUNTER_PERIOD (1 << 31) 85 #define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC 86 #define DEFAULT_PPS_CHANNEL 0 87 88 #define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL 89 #define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL 90 91 /** 92 * fec_ptp_read - read raw cycle counter (to be used by time counter) 93 * @cc: the cyclecounter structure 94 * 95 * this function reads the cyclecounter registers and is called by the 96 * cyclecounter structure used to construct a ns counter from the 97 * arbitrary fixed point registers 98 */ 99 static u64 fec_ptp_read(const struct cyclecounter *cc) 100 { 101 struct fec_enet_private *fep = 102 container_of(cc, struct fec_enet_private, cc); 103 u32 tempval; 104 105 tempval = readl(fep->hwp + FEC_ATIME_CTRL); 106 tempval |= FEC_T_CTRL_CAPTURE; 107 writel(tempval, fep->hwp + FEC_ATIME_CTRL); 108 109 if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) 110 udelay(1); 111 112 return readl(fep->hwp + FEC_ATIME); 113 } 114 115 /** 116 * fec_ptp_enable_pps 117 * @fep: the fec_enet_private structure handle 118 * @enable: enable the channel pps output 119 * 120 * This function enables the PPS output on the timer channel. 121 */ 122 static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) 123 { 124 unsigned long flags; 125 u32 val, tempval; 126 struct timespec64 ts; 127 u64 ns; 128 129 spin_lock_irqsave(&fep->tmreg_lock, flags); 130 131 if (fep->pps_enable == enable) { 132 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 133 return 0; 134 } 135 136 if (enable) { 137 /* clear capture or output compare interrupt status if have. 138 */ 139 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); 140 141 /* It is recommended to double check the TMODE field in the 142 * TCSR register to be cleared before the first compare counter 143 * is written into TCCR register. Just add a double check. 144 */ 145 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 146 do { 147 val &= ~(FEC_T_TMODE_MASK); 148 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); 149 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 150 } while (val & FEC_T_TMODE_MASK); 151 152 /* Dummy read counter to update the counter */ 153 timecounter_read(&fep->tc); 154 /* We want to find the first compare event in the next 155 * second point. So we need to know what the ptp time 156 * is now and how many nanoseconds is ahead to get next second. 157 * The remaining nanosecond ahead before the next second would be 158 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds 159 * to current timer would be next second. 160 */ 161 tempval = fec_ptp_read(&fep->cc); 162 /* Convert the ptp local counter to 1588 timestamp */ 163 ns = timecounter_cyc2time(&fep->tc, tempval); 164 ts = ns_to_timespec64(ns); 165 166 /* The tempval is less than 3 seconds, and so val is less than 167 * 4 seconds. No overflow for 32bit calculation. 168 */ 169 val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval; 170 171 /* Need to consider the situation that the current time is 172 * very close to the second point, which means NSEC_PER_SEC 173 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer 174 * is still running when we calculate the first compare event, it is 175 * possible that the remaining nanoseconds run out before the compare 176 * counter is calculated and written into TCCR register. To avoid 177 * this possibility, we will set the compare event to be the next 178 * of next second. The current setting is 31-bit timer and wrap 179 * around over 2 seconds. So it is okay to set the next of next 180 * seond for the timer. 181 */ 182 val += NSEC_PER_SEC; 183 184 /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current 185 * ptp counter, which maybe cause 32-bit wrap. Since the 186 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second. 187 * We can ensure the wrap will not cause issue. If the offset 188 * is bigger than fep->cc.mask would be a error. 189 */ 190 val &= fep->cc.mask; 191 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel)); 192 193 /* Calculate the second the compare event timestamp */ 194 fep->next_counter = (val + fep->reload_period) & fep->cc.mask; 195 196 /* * Enable compare event when overflow */ 197 val = readl(fep->hwp + FEC_ATIME_CTRL); 198 val |= FEC_T_CTRL_PINPER; 199 writel(val, fep->hwp + FEC_ATIME_CTRL); 200 201 /* Compare channel setting. */ 202 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 203 val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); 204 val &= ~(1 << FEC_T_TDRE_OFFSET); 205 val &= ~(FEC_T_TMODE_MASK); 206 val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET); 207 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); 208 209 /* Write the second compare event timestamp and calculate 210 * the third timestamp. Refer the TCCR register detail in the spec. 211 */ 212 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); 213 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; 214 } else { 215 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel)); 216 } 217 218 fep->pps_enable = enable; 219 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 220 221 return 0; 222 } 223 224 static int fec_ptp_pps_perout(struct fec_enet_private *fep) 225 { 226 u32 compare_val, ptp_hc, temp_val; 227 u64 curr_time; 228 unsigned long flags; 229 230 spin_lock_irqsave(&fep->tmreg_lock, flags); 231 232 /* Update time counter */ 233 timecounter_read(&fep->tc); 234 235 /* Get the current ptp hardware time counter */ 236 ptp_hc = fec_ptp_read(&fep->cc); 237 238 /* Convert the ptp local counter to 1588 timestamp */ 239 curr_time = timecounter_cyc2time(&fep->tc, ptp_hc); 240 241 /* If the pps start time less than current time add 100ms, just return. 242 * Because the software might not able to set the comparison time into 243 * the FEC_TCCR register in time and missed the start time. 244 */ 245 if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) { 246 dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n"); 247 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 248 return -1; 249 } 250 251 compare_val = fep->perout_stime - curr_time + ptp_hc; 252 compare_val &= fep->cc.mask; 253 254 writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel)); 255 fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask; 256 257 /* Enable compare event when overflow */ 258 temp_val = readl(fep->hwp + FEC_ATIME_CTRL); 259 temp_val |= FEC_T_CTRL_PINPER; 260 writel(temp_val, fep->hwp + FEC_ATIME_CTRL); 261 262 /* Compare channel setting. */ 263 temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 264 temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); 265 temp_val &= ~(1 << FEC_T_TDRE_OFFSET); 266 temp_val &= ~(FEC_T_TMODE_MASK); 267 temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET); 268 writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel)); 269 270 /* Write the second compare event timestamp and calculate 271 * the third timestamp. Refer the TCCR register detail in the spec. 272 */ 273 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); 274 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; 275 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 276 277 return 0; 278 } 279 280 static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer) 281 { 282 struct fec_enet_private *fep = container_of(timer, 283 struct fec_enet_private, perout_timer); 284 285 fec_ptp_pps_perout(fep); 286 287 return HRTIMER_NORESTART; 288 } 289 290 /** 291 * fec_ptp_start_cyclecounter - create the cycle counter from hw 292 * @ndev: network device 293 * 294 * this function initializes the timecounter and cyclecounter 295 * structures for use in generated a ns counter from the arbitrary 296 * fixed point cycles registers in the hardware. 297 */ 298 void fec_ptp_start_cyclecounter(struct net_device *ndev) 299 { 300 struct fec_enet_private *fep = netdev_priv(ndev); 301 unsigned long flags; 302 int inc; 303 304 inc = 1000000000 / fep->cycle_speed; 305 306 /* grab the ptp lock */ 307 spin_lock_irqsave(&fep->tmreg_lock, flags); 308 309 /* 1ns counter */ 310 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); 311 312 /* use 31-bit timer counter */ 313 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD); 314 315 writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST, 316 fep->hwp + FEC_ATIME_CTRL); 317 318 memset(&fep->cc, 0, sizeof(fep->cc)); 319 fep->cc.read = fec_ptp_read; 320 fep->cc.mask = CLOCKSOURCE_MASK(31); 321 fep->cc.shift = 31; 322 fep->cc.mult = FEC_CC_MULT; 323 324 /* reset the ns time counter */ 325 timecounter_init(&fep->tc, &fep->cc, 0); 326 327 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 328 } 329 330 /** 331 * fec_ptp_adjfine - adjust ptp cycle frequency 332 * @ptp: the ptp clock structure 333 * @scaled_ppm: scaled parts per million adjustment from base 334 * 335 * Adjust the frequency of the ptp cycle counter by the 336 * indicated amount from the base frequency. 337 * 338 * Scaled parts per million is ppm with a 16-bit binary fractional field. 339 * 340 * Because ENET hardware frequency adjust is complex, 341 * using software method to do that. 342 */ 343 static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 344 { 345 s32 ppb = scaled_ppm_to_ppb(scaled_ppm); 346 unsigned long flags; 347 int neg_adj = 0; 348 u32 i, tmp; 349 u32 corr_inc, corr_period; 350 u32 corr_ns; 351 u64 lhs, rhs; 352 353 struct fec_enet_private *fep = 354 container_of(ptp, struct fec_enet_private, ptp_caps); 355 356 if (ppb == 0) 357 return 0; 358 359 if (ppb < 0) { 360 ppb = -ppb; 361 neg_adj = 1; 362 } 363 364 /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC; 365 * Try to find the corr_inc between 1 to fep->ptp_inc to 366 * meet adjustment requirement. 367 */ 368 lhs = NSEC_PER_SEC; 369 rhs = (u64)ppb * (u64)fep->ptp_inc; 370 for (i = 1; i <= fep->ptp_inc; i++) { 371 if (lhs >= rhs) { 372 corr_inc = i; 373 corr_period = div_u64(lhs, rhs); 374 break; 375 } 376 lhs += NSEC_PER_SEC; 377 } 378 /* Not found? Set it to high value - double speed 379 * correct in every clock step. 380 */ 381 if (i > fep->ptp_inc) { 382 corr_inc = fep->ptp_inc; 383 corr_period = 1; 384 } 385 386 if (neg_adj) 387 corr_ns = fep->ptp_inc - corr_inc; 388 else 389 corr_ns = fep->ptp_inc + corr_inc; 390 391 spin_lock_irqsave(&fep->tmreg_lock, flags); 392 393 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; 394 tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; 395 writel(tmp, fep->hwp + FEC_ATIME_INC); 396 corr_period = corr_period > 1 ? corr_period - 1 : corr_period; 397 writel(corr_period, fep->hwp + FEC_ATIME_CORR); 398 /* dummy read to update the timer. */ 399 timecounter_read(&fep->tc); 400 401 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 402 403 return 0; 404 } 405 406 /** 407 * fec_ptp_adjtime 408 * @ptp: the ptp clock structure 409 * @delta: offset to adjust the cycle counter by 410 * 411 * adjust the timer by resetting the timecounter structure. 412 */ 413 static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 414 { 415 struct fec_enet_private *fep = 416 container_of(ptp, struct fec_enet_private, ptp_caps); 417 unsigned long flags; 418 419 spin_lock_irqsave(&fep->tmreg_lock, flags); 420 timecounter_adjtime(&fep->tc, delta); 421 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 422 423 return 0; 424 } 425 426 /** 427 * fec_ptp_gettime 428 * @ptp: the ptp clock structure 429 * @ts: timespec structure to hold the current time value 430 * 431 * read the timecounter and return the correct value on ns, 432 * after converting it into a struct timespec. 433 */ 434 static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 435 { 436 struct fec_enet_private *fep = 437 container_of(ptp, struct fec_enet_private, ptp_caps); 438 u64 ns; 439 unsigned long flags; 440 441 mutex_lock(&fep->ptp_clk_mutex); 442 /* Check the ptp clock */ 443 if (!fep->ptp_clk_on) { 444 mutex_unlock(&fep->ptp_clk_mutex); 445 return -EINVAL; 446 } 447 spin_lock_irqsave(&fep->tmreg_lock, flags); 448 ns = timecounter_read(&fep->tc); 449 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 450 mutex_unlock(&fep->ptp_clk_mutex); 451 452 *ts = ns_to_timespec64(ns); 453 454 return 0; 455 } 456 457 /** 458 * fec_ptp_settime 459 * @ptp: the ptp clock structure 460 * @ts: the timespec containing the new time for the cycle counter 461 * 462 * reset the timecounter to use a new base value instead of the kernel 463 * wall timer value. 464 */ 465 static int fec_ptp_settime(struct ptp_clock_info *ptp, 466 const struct timespec64 *ts) 467 { 468 struct fec_enet_private *fep = 469 container_of(ptp, struct fec_enet_private, ptp_caps); 470 471 u64 ns; 472 unsigned long flags; 473 u32 counter; 474 475 mutex_lock(&fep->ptp_clk_mutex); 476 /* Check the ptp clock */ 477 if (!fep->ptp_clk_on) { 478 mutex_unlock(&fep->ptp_clk_mutex); 479 return -EINVAL; 480 } 481 482 ns = timespec64_to_ns(ts); 483 /* Get the timer value based on timestamp. 484 * Update the counter with the masked value. 485 */ 486 counter = ns & fep->cc.mask; 487 488 spin_lock_irqsave(&fep->tmreg_lock, flags); 489 writel(counter, fep->hwp + FEC_ATIME); 490 timecounter_init(&fep->tc, &fep->cc, ns); 491 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 492 mutex_unlock(&fep->ptp_clk_mutex); 493 return 0; 494 } 495 496 static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel) 497 { 498 unsigned long flags; 499 500 spin_lock_irqsave(&fep->tmreg_lock, flags); 501 writel(0, fep->hwp + FEC_TCSR(channel)); 502 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 503 504 return 0; 505 } 506 507 /** 508 * fec_ptp_enable 509 * @ptp: the ptp clock structure 510 * @rq: the requested feature to change 511 * @on: whether to enable or disable the feature 512 * 513 */ 514 static int fec_ptp_enable(struct ptp_clock_info *ptp, 515 struct ptp_clock_request *rq, int on) 516 { 517 struct fec_enet_private *fep = 518 container_of(ptp, struct fec_enet_private, ptp_caps); 519 ktime_t timeout; 520 struct timespec64 start_time, period; 521 u64 curr_time, delta, period_ns; 522 unsigned long flags; 523 int ret = 0; 524 525 if (rq->type == PTP_CLK_REQ_PPS) { 526 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; 527 528 ret = fec_ptp_enable_pps(fep, on); 529 530 return ret; 531 } else if (rq->type == PTP_CLK_REQ_PEROUT) { 532 /* Reject requests with unsupported flags */ 533 if (rq->perout.flags) 534 return -EOPNOTSUPP; 535 536 if (rq->perout.index != fep->pps_channel) 537 return -EOPNOTSUPP; 538 539 period.tv_sec = rq->perout.period.sec; 540 period.tv_nsec = rq->perout.period.nsec; 541 period_ns = timespec64_to_ns(&period); 542 543 /* FEC PTP timer only has 31 bits, so if the period exceed 544 * 4s is not supported. 545 */ 546 if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) { 547 dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n"); 548 return -EOPNOTSUPP; 549 } 550 551 fep->reload_period = div_u64(period_ns, 2); 552 if (on && fep->reload_period) { 553 /* Convert 1588 timestamp to ns*/ 554 start_time.tv_sec = rq->perout.start.sec; 555 start_time.tv_nsec = rq->perout.start.nsec; 556 fep->perout_stime = timespec64_to_ns(&start_time); 557 558 mutex_lock(&fep->ptp_clk_mutex); 559 if (!fep->ptp_clk_on) { 560 dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n"); 561 mutex_unlock(&fep->ptp_clk_mutex); 562 return -EOPNOTSUPP; 563 } 564 spin_lock_irqsave(&fep->tmreg_lock, flags); 565 /* Read current timestamp */ 566 curr_time = timecounter_read(&fep->tc); 567 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 568 mutex_unlock(&fep->ptp_clk_mutex); 569 570 /* Calculate time difference */ 571 delta = fep->perout_stime - curr_time; 572 573 if (fep->perout_stime <= curr_time) { 574 dev_err(&fep->pdev->dev, "Start time must larger than current time!\n"); 575 return -EINVAL; 576 } 577 578 /* Because the timer counter of FEC only has 31-bits, correspondingly, 579 * the time comparison register FEC_TCCR also only low 31 bits can be 580 * set. If the start time of pps signal exceeds current time more than 581 * 0x80000000 ns, a software timer is used and the timer expires about 582 * 1 second before the start time to be able to set FEC_TCCR. 583 */ 584 if (delta > FEC_PTP_MAX_NSEC_COUNTER) { 585 timeout = ns_to_ktime(delta - NSEC_PER_SEC); 586 hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL); 587 } else { 588 return fec_ptp_pps_perout(fep); 589 } 590 } else { 591 fec_ptp_pps_disable(fep, fep->pps_channel); 592 } 593 594 return 0; 595 } else { 596 return -EOPNOTSUPP; 597 } 598 } 599 600 int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config, 601 struct netlink_ext_ack *extack) 602 { 603 struct fec_enet_private *fep = netdev_priv(ndev); 604 605 switch (config->tx_type) { 606 case HWTSTAMP_TX_OFF: 607 fep->hwts_tx_en = 0; 608 break; 609 case HWTSTAMP_TX_ON: 610 fep->hwts_tx_en = 1; 611 break; 612 default: 613 return -ERANGE; 614 } 615 616 switch (config->rx_filter) { 617 case HWTSTAMP_FILTER_NONE: 618 fep->hwts_rx_en = 0; 619 break; 620 621 default: 622 fep->hwts_rx_en = 1; 623 config->rx_filter = HWTSTAMP_FILTER_ALL; 624 break; 625 } 626 627 return 0; 628 } 629 630 void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config) 631 { 632 struct fec_enet_private *fep = netdev_priv(ndev); 633 634 config->flags = 0; 635 config->tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 636 config->rx_filter = (fep->hwts_rx_en ? 637 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); 638 } 639 640 /* 641 * fec_time_keep - call timecounter_read every second to avoid timer overrun 642 * because ENET just support 32bit counter, will timeout in 4s 643 */ 644 static void fec_time_keep(struct work_struct *work) 645 { 646 struct delayed_work *dwork = to_delayed_work(work); 647 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); 648 unsigned long flags; 649 650 mutex_lock(&fep->ptp_clk_mutex); 651 if (fep->ptp_clk_on) { 652 spin_lock_irqsave(&fep->tmreg_lock, flags); 653 timecounter_read(&fep->tc); 654 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 655 } 656 mutex_unlock(&fep->ptp_clk_mutex); 657 658 schedule_delayed_work(&fep->time_keep, HZ); 659 } 660 661 /* This function checks the pps event and reloads the timer compare counter. */ 662 static irqreturn_t fec_pps_interrupt(int irq, void *dev_id) 663 { 664 struct net_device *ndev = dev_id; 665 struct fec_enet_private *fep = netdev_priv(ndev); 666 u32 val; 667 u8 channel = fep->pps_channel; 668 struct ptp_clock_event event; 669 670 val = readl(fep->hwp + FEC_TCSR(channel)); 671 if (val & FEC_T_TF_MASK) { 672 /* Write the next next compare(not the next according the spec) 673 * value to the register 674 */ 675 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); 676 do { 677 writel(val, fep->hwp + FEC_TCSR(channel)); 678 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); 679 680 /* Update the counter; */ 681 fep->next_counter = (fep->next_counter + fep->reload_period) & 682 fep->cc.mask; 683 684 event.type = PTP_CLOCK_PPS; 685 ptp_clock_event(fep->ptp_clock, &event); 686 return IRQ_HANDLED; 687 } 688 689 return IRQ_NONE; 690 } 691 692 /** 693 * fec_ptp_init 694 * @pdev: The FEC network adapter 695 * @irq_idx: the interrupt index 696 * 697 * This function performs the required steps for enabling ptp 698 * support. If ptp support has already been loaded it simply calls the 699 * cyclecounter init routine and exits. 700 */ 701 702 void fec_ptp_init(struct platform_device *pdev, int irq_idx) 703 { 704 struct net_device *ndev = platform_get_drvdata(pdev); 705 struct fec_enet_private *fep = netdev_priv(ndev); 706 struct device_node *np = fep->pdev->dev.of_node; 707 int irq; 708 int ret; 709 710 fep->ptp_caps.owner = THIS_MODULE; 711 strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); 712 713 fep->pps_channel = DEFAULT_PPS_CHANNEL; 714 of_property_read_u32(np, "fsl,pps-channel", &fep->pps_channel); 715 716 fep->ptp_caps.max_adj = 250000000; 717 fep->ptp_caps.n_alarm = 0; 718 fep->ptp_caps.n_ext_ts = 0; 719 fep->ptp_caps.n_per_out = 1; 720 fep->ptp_caps.n_pins = 0; 721 fep->ptp_caps.pps = 1; 722 fep->ptp_caps.adjfine = fec_ptp_adjfine; 723 fep->ptp_caps.adjtime = fec_ptp_adjtime; 724 fep->ptp_caps.gettime64 = fec_ptp_gettime; 725 fep->ptp_caps.settime64 = fec_ptp_settime; 726 fep->ptp_caps.enable = fec_ptp_enable; 727 728 fep->cycle_speed = clk_get_rate(fep->clk_ptp); 729 if (!fep->cycle_speed) { 730 fep->cycle_speed = NSEC_PER_SEC; 731 dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n"); 732 } 733 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; 734 735 spin_lock_init(&fep->tmreg_lock); 736 737 fec_ptp_start_cyclecounter(ndev); 738 739 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); 740 741 hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME, 742 HRTIMER_MODE_REL); 743 744 irq = platform_get_irq_byname_optional(pdev, "pps"); 745 if (irq < 0) 746 irq = platform_get_irq_optional(pdev, irq_idx); 747 /* Failure to get an irq is not fatal, 748 * only the PTP_CLOCK_PPS clock events should stop 749 */ 750 if (irq >= 0) { 751 ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt, 752 0, pdev->name, ndev); 753 if (ret < 0) 754 dev_warn(&pdev->dev, "request for pps irq failed(%d)\n", 755 ret); 756 } 757 758 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); 759 if (IS_ERR(fep->ptp_clock)) { 760 fep->ptp_clock = NULL; 761 dev_err(&pdev->dev, "ptp_clock_register failed\n"); 762 } 763 764 schedule_delayed_work(&fep->time_keep, HZ); 765 } 766 767 void fec_ptp_save_state(struct fec_enet_private *fep) 768 { 769 unsigned long flags; 770 u32 atime_inc_corr; 771 772 spin_lock_irqsave(&fep->tmreg_lock, flags); 773 774 fep->ptp_saved_state.pps_enable = fep->pps_enable; 775 776 fep->ptp_saved_state.ns_phc = timecounter_read(&fep->tc); 777 fep->ptp_saved_state.ns_sys = ktime_get_ns(); 778 779 fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR); 780 atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK; 781 fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET); 782 783 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 784 } 785 786 /* Restore PTP functionality after a reset */ 787 void fec_ptp_restore_state(struct fec_enet_private *fep) 788 { 789 u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; 790 unsigned long flags; 791 u32 counter; 792 u64 ns; 793 794 spin_lock_irqsave(&fep->tmreg_lock, flags); 795 796 /* Reset turned it off, so adjust our status flag */ 797 fep->pps_enable = 0; 798 799 writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR); 800 atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET; 801 writel(atime_inc, fep->hwp + FEC_ATIME_INC); 802 803 ns = ktime_get_ns() - fep->ptp_saved_state.ns_sys + fep->ptp_saved_state.ns_phc; 804 counter = ns & fep->cc.mask; 805 writel(counter, fep->hwp + FEC_ATIME); 806 timecounter_init(&fep->tc, &fep->cc, ns); 807 808 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 809 810 /* Restart PPS if needed */ 811 if (fep->ptp_saved_state.pps_enable) { 812 /* Re-enable PPS */ 813 fec_ptp_enable_pps(fep, 1); 814 } 815 } 816 817 void fec_ptp_stop(struct platform_device *pdev) 818 { 819 struct net_device *ndev = platform_get_drvdata(pdev); 820 struct fec_enet_private *fep = netdev_priv(ndev); 821 822 if (fep->pps_enable) 823 fec_ptp_enable_pps(fep, 0); 824 825 cancel_delayed_work_sync(&fep->time_keep); 826 hrtimer_cancel(&fep->perout_timer); 827 if (fep->ptp_clock) 828 ptp_clock_unregister(fep->ptp_clock); 829 } 830