1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Fast Ethernet Controller (ENET) PTP driver for MX6x. 4 * 5 * Copyright (C) 2012 Freescale Semiconductor, Inc. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/ptrace.h> 14 #include <linux/errno.h> 15 #include <linux/ioport.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/delay.h> 20 #include <linux/netdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/spinlock.h> 24 #include <linux/workqueue.h> 25 #include <linux/bitops.h> 26 #include <linux/io.h> 27 #include <linux/irq.h> 28 #include <linux/clk.h> 29 #include <linux/platform_device.h> 30 #include <linux/phy.h> 31 #include <linux/fec.h> 32 #include <linux/of.h> 33 #include <linux/of_gpio.h> 34 #include <linux/of_net.h> 35 36 #include "fec.h" 37 38 /* FEC 1588 register bits */ 39 #define FEC_T_CTRL_SLAVE 0x00002000 40 #define FEC_T_CTRL_CAPTURE 0x00000800 41 #define FEC_T_CTRL_RESTART 0x00000200 42 #define FEC_T_CTRL_PERIOD_RST 0x00000030 43 #define FEC_T_CTRL_PERIOD_EN 0x00000010 44 #define FEC_T_CTRL_ENABLE 0x00000001 45 46 #define FEC_T_INC_MASK 0x0000007f 47 #define FEC_T_INC_OFFSET 0 48 #define FEC_T_INC_CORR_MASK 0x00007f00 49 #define FEC_T_INC_CORR_OFFSET 8 50 51 #define FEC_T_CTRL_PINPER 0x00000080 52 #define FEC_T_TF0_MASK 0x00000001 53 #define FEC_T_TF0_OFFSET 0 54 #define FEC_T_TF1_MASK 0x00000002 55 #define FEC_T_TF1_OFFSET 1 56 #define FEC_T_TF2_MASK 0x00000004 57 #define FEC_T_TF2_OFFSET 2 58 #define FEC_T_TF3_MASK 0x00000008 59 #define FEC_T_TF3_OFFSET 3 60 #define FEC_T_TDRE_MASK 0x00000001 61 #define FEC_T_TDRE_OFFSET 0 62 #define FEC_T_TMODE_MASK 0x0000003C 63 #define FEC_T_TMODE_OFFSET 2 64 #define FEC_T_TIE_MASK 0x00000040 65 #define FEC_T_TIE_OFFSET 6 66 #define FEC_T_TF_MASK 0x00000080 67 #define FEC_T_TF_OFFSET 7 68 69 #define FEC_ATIME_CTRL 0x400 70 #define FEC_ATIME 0x404 71 #define FEC_ATIME_EVT_OFFSET 0x408 72 #define FEC_ATIME_EVT_PERIOD 0x40c 73 #define FEC_ATIME_CORR 0x410 74 #define FEC_ATIME_INC 0x414 75 #define FEC_TS_TIMESTAMP 0x418 76 77 #define FEC_TGSR 0x604 78 #define FEC_TCSR(n) (0x608 + n * 0x08) 79 #define FEC_TCCR(n) (0x60C + n * 0x08) 80 #define MAX_TIMER_CHANNEL 3 81 #define FEC_TMODE_TOGGLE 0x05 82 #define FEC_HIGH_PULSE 0x0F 83 84 #define FEC_CC_MULT (1 << 31) 85 #define FEC_COUNTER_PERIOD (1 << 31) 86 #define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC 87 #define DEFAULT_PPS_CHANNEL 0 88 89 #define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL 90 #define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL 91 92 /** 93 * fec_ptp_read - read raw cycle counter (to be used by time counter) 94 * @cc: the cyclecounter structure 95 * 96 * this function reads the cyclecounter registers and is called by the 97 * cyclecounter structure used to construct a ns counter from the 98 * arbitrary fixed point registers 99 */ 100 static u64 fec_ptp_read(const struct cyclecounter *cc) 101 { 102 struct fec_enet_private *fep = 103 container_of(cc, struct fec_enet_private, cc); 104 u32 tempval; 105 106 tempval = readl(fep->hwp + FEC_ATIME_CTRL); 107 tempval |= FEC_T_CTRL_CAPTURE; 108 writel(tempval, fep->hwp + FEC_ATIME_CTRL); 109 110 if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) 111 udelay(1); 112 113 return readl(fep->hwp + FEC_ATIME); 114 } 115 116 /** 117 * fec_ptp_enable_pps 118 * @fep: the fec_enet_private structure handle 119 * @enable: enable the channel pps output 120 * 121 * This function enble the PPS ouput on the timer channel. 122 */ 123 static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) 124 { 125 unsigned long flags; 126 u32 val, tempval; 127 struct timespec64 ts; 128 u64 ns; 129 130 spin_lock_irqsave(&fep->tmreg_lock, flags); 131 132 if (fep->pps_enable == enable) { 133 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 134 return 0; 135 } 136 137 if (enable) { 138 /* clear capture or output compare interrupt status if have. 139 */ 140 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); 141 142 /* It is recommended to double check the TMODE field in the 143 * TCSR register to be cleared before the first compare counter 144 * is written into TCCR register. Just add a double check. 145 */ 146 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 147 do { 148 val &= ~(FEC_T_TMODE_MASK); 149 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); 150 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 151 } while (val & FEC_T_TMODE_MASK); 152 153 /* Dummy read counter to update the counter */ 154 timecounter_read(&fep->tc); 155 /* We want to find the first compare event in the next 156 * second point. So we need to know what the ptp time 157 * is now and how many nanoseconds is ahead to get next second. 158 * The remaining nanosecond ahead before the next second would be 159 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds 160 * to current timer would be next second. 161 */ 162 tempval = fec_ptp_read(&fep->cc); 163 /* Convert the ptp local counter to 1588 timestamp */ 164 ns = timecounter_cyc2time(&fep->tc, tempval); 165 ts = ns_to_timespec64(ns); 166 167 /* The tempval is less than 3 seconds, and so val is less than 168 * 4 seconds. No overflow for 32bit calculation. 169 */ 170 val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval; 171 172 /* Need to consider the situation that the current time is 173 * very close to the second point, which means NSEC_PER_SEC 174 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer 175 * is still running when we calculate the first compare event, it is 176 * possible that the remaining nanoseonds run out before the compare 177 * counter is calculated and written into TCCR register. To avoid 178 * this possibility, we will set the compare event to be the next 179 * of next second. The current setting is 31-bit timer and wrap 180 * around over 2 seconds. So it is okay to set the next of next 181 * seond for the timer. 182 */ 183 val += NSEC_PER_SEC; 184 185 /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current 186 * ptp counter, which maybe cause 32-bit wrap. Since the 187 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second. 188 * We can ensure the wrap will not cause issue. If the offset 189 * is bigger than fep->cc.mask would be a error. 190 */ 191 val &= fep->cc.mask; 192 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel)); 193 194 /* Calculate the second the compare event timestamp */ 195 fep->next_counter = (val + fep->reload_period) & fep->cc.mask; 196 197 /* * Enable compare event when overflow */ 198 val = readl(fep->hwp + FEC_ATIME_CTRL); 199 val |= FEC_T_CTRL_PINPER; 200 writel(val, fep->hwp + FEC_ATIME_CTRL); 201 202 /* Compare channel setting. */ 203 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 204 val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); 205 val &= ~(1 << FEC_T_TDRE_OFFSET); 206 val &= ~(FEC_T_TMODE_MASK); 207 val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET); 208 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); 209 210 /* Write the second compare event timestamp and calculate 211 * the third timestamp. Refer the TCCR register detail in the spec. 212 */ 213 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); 214 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; 215 } else { 216 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel)); 217 } 218 219 fep->pps_enable = enable; 220 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 221 222 return 0; 223 } 224 225 static int fec_ptp_pps_perout(struct fec_enet_private *fep) 226 { 227 u32 compare_val, ptp_hc, temp_val; 228 u64 curr_time; 229 unsigned long flags; 230 231 spin_lock_irqsave(&fep->tmreg_lock, flags); 232 233 /* Update time counter */ 234 timecounter_read(&fep->tc); 235 236 /* Get the current ptp hardware time counter */ 237 ptp_hc = fec_ptp_read(&fep->cc); 238 239 /* Convert the ptp local counter to 1588 timestamp */ 240 curr_time = timecounter_cyc2time(&fep->tc, ptp_hc); 241 242 /* If the pps start time less than current time add 100ms, just return. 243 * Because the software might not able to set the comparison time into 244 * the FEC_TCCR register in time and missed the start time. 245 */ 246 if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) { 247 dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n"); 248 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 249 return -1; 250 } 251 252 compare_val = fep->perout_stime - curr_time + ptp_hc; 253 compare_val &= fep->cc.mask; 254 255 writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel)); 256 fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask; 257 258 /* Enable compare event when overflow */ 259 temp_val = readl(fep->hwp + FEC_ATIME_CTRL); 260 temp_val |= FEC_T_CTRL_PINPER; 261 writel(temp_val, fep->hwp + FEC_ATIME_CTRL); 262 263 /* Compare channel setting. */ 264 temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 265 temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); 266 temp_val &= ~(1 << FEC_T_TDRE_OFFSET); 267 temp_val &= ~(FEC_T_TMODE_MASK); 268 temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET); 269 writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel)); 270 271 /* Write the second compare event timestamp and calculate 272 * the third timestamp. Refer the TCCR register detail in the spec. 273 */ 274 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); 275 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; 276 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 277 278 return 0; 279 } 280 281 static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer) 282 { 283 struct fec_enet_private *fep = container_of(timer, 284 struct fec_enet_private, perout_timer); 285 286 fec_ptp_pps_perout(fep); 287 288 return HRTIMER_NORESTART; 289 } 290 291 /** 292 * fec_ptp_start_cyclecounter - create the cycle counter from hw 293 * @ndev: network device 294 * 295 * this function initializes the timecounter and cyclecounter 296 * structures for use in generated a ns counter from the arbitrary 297 * fixed point cycles registers in the hardware. 298 */ 299 void fec_ptp_start_cyclecounter(struct net_device *ndev) 300 { 301 struct fec_enet_private *fep = netdev_priv(ndev); 302 unsigned long flags; 303 int inc; 304 305 inc = 1000000000 / fep->cycle_speed; 306 307 /* grab the ptp lock */ 308 spin_lock_irqsave(&fep->tmreg_lock, flags); 309 310 /* 1ns counter */ 311 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); 312 313 /* use 31-bit timer counter */ 314 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD); 315 316 writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST, 317 fep->hwp + FEC_ATIME_CTRL); 318 319 memset(&fep->cc, 0, sizeof(fep->cc)); 320 fep->cc.read = fec_ptp_read; 321 fep->cc.mask = CLOCKSOURCE_MASK(31); 322 fep->cc.shift = 31; 323 fep->cc.mult = FEC_CC_MULT; 324 325 /* reset the ns time counter */ 326 timecounter_init(&fep->tc, &fep->cc, 0); 327 328 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 329 } 330 331 /** 332 * fec_ptp_adjfine - adjust ptp cycle frequency 333 * @ptp: the ptp clock structure 334 * @scaled_ppm: scaled parts per million adjustment from base 335 * 336 * Adjust the frequency of the ptp cycle counter by the 337 * indicated amount from the base frequency. 338 * 339 * Scaled parts per million is ppm with a 16-bit binary fractional field. 340 * 341 * Because ENET hardware frequency adjust is complex, 342 * using software method to do that. 343 */ 344 static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 345 { 346 s32 ppb = scaled_ppm_to_ppb(scaled_ppm); 347 unsigned long flags; 348 int neg_adj = 0; 349 u32 i, tmp; 350 u32 corr_inc, corr_period; 351 u32 corr_ns; 352 u64 lhs, rhs; 353 354 struct fec_enet_private *fep = 355 container_of(ptp, struct fec_enet_private, ptp_caps); 356 357 if (ppb == 0) 358 return 0; 359 360 if (ppb < 0) { 361 ppb = -ppb; 362 neg_adj = 1; 363 } 364 365 /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC; 366 * Try to find the corr_inc between 1 to fep->ptp_inc to 367 * meet adjustment requirement. 368 */ 369 lhs = NSEC_PER_SEC; 370 rhs = (u64)ppb * (u64)fep->ptp_inc; 371 for (i = 1; i <= fep->ptp_inc; i++) { 372 if (lhs >= rhs) { 373 corr_inc = i; 374 corr_period = div_u64(lhs, rhs); 375 break; 376 } 377 lhs += NSEC_PER_SEC; 378 } 379 /* Not found? Set it to high value - double speed 380 * correct in every clock step. 381 */ 382 if (i > fep->ptp_inc) { 383 corr_inc = fep->ptp_inc; 384 corr_period = 1; 385 } 386 387 if (neg_adj) 388 corr_ns = fep->ptp_inc - corr_inc; 389 else 390 corr_ns = fep->ptp_inc + corr_inc; 391 392 spin_lock_irqsave(&fep->tmreg_lock, flags); 393 394 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; 395 tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; 396 writel(tmp, fep->hwp + FEC_ATIME_INC); 397 corr_period = corr_period > 1 ? corr_period - 1 : corr_period; 398 writel(corr_period, fep->hwp + FEC_ATIME_CORR); 399 /* dummy read to update the timer. */ 400 timecounter_read(&fep->tc); 401 402 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 403 404 return 0; 405 } 406 407 /** 408 * fec_ptp_adjtime 409 * @ptp: the ptp clock structure 410 * @delta: offset to adjust the cycle counter by 411 * 412 * adjust the timer by resetting the timecounter structure. 413 */ 414 static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 415 { 416 struct fec_enet_private *fep = 417 container_of(ptp, struct fec_enet_private, ptp_caps); 418 unsigned long flags; 419 420 spin_lock_irqsave(&fep->tmreg_lock, flags); 421 timecounter_adjtime(&fep->tc, delta); 422 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 423 424 return 0; 425 } 426 427 /** 428 * fec_ptp_gettime 429 * @ptp: the ptp clock structure 430 * @ts: timespec structure to hold the current time value 431 * 432 * read the timecounter and return the correct value on ns, 433 * after converting it into a struct timespec. 434 */ 435 static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 436 { 437 struct fec_enet_private *fep = 438 container_of(ptp, struct fec_enet_private, ptp_caps); 439 u64 ns; 440 unsigned long flags; 441 442 mutex_lock(&fep->ptp_clk_mutex); 443 /* Check the ptp clock */ 444 if (!fep->ptp_clk_on) { 445 mutex_unlock(&fep->ptp_clk_mutex); 446 return -EINVAL; 447 } 448 spin_lock_irqsave(&fep->tmreg_lock, flags); 449 ns = timecounter_read(&fep->tc); 450 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 451 mutex_unlock(&fep->ptp_clk_mutex); 452 453 *ts = ns_to_timespec64(ns); 454 455 return 0; 456 } 457 458 /** 459 * fec_ptp_settime 460 * @ptp: the ptp clock structure 461 * @ts: the timespec containing the new time for the cycle counter 462 * 463 * reset the timecounter to use a new base value instead of the kernel 464 * wall timer value. 465 */ 466 static int fec_ptp_settime(struct ptp_clock_info *ptp, 467 const struct timespec64 *ts) 468 { 469 struct fec_enet_private *fep = 470 container_of(ptp, struct fec_enet_private, ptp_caps); 471 472 u64 ns; 473 unsigned long flags; 474 u32 counter; 475 476 mutex_lock(&fep->ptp_clk_mutex); 477 /* Check the ptp clock */ 478 if (!fep->ptp_clk_on) { 479 mutex_unlock(&fep->ptp_clk_mutex); 480 return -EINVAL; 481 } 482 483 ns = timespec64_to_ns(ts); 484 /* Get the timer value based on timestamp. 485 * Update the counter with the masked value. 486 */ 487 counter = ns & fep->cc.mask; 488 489 spin_lock_irqsave(&fep->tmreg_lock, flags); 490 writel(counter, fep->hwp + FEC_ATIME); 491 timecounter_init(&fep->tc, &fep->cc, ns); 492 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 493 mutex_unlock(&fep->ptp_clk_mutex); 494 return 0; 495 } 496 497 static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel) 498 { 499 unsigned long flags; 500 501 spin_lock_irqsave(&fep->tmreg_lock, flags); 502 writel(0, fep->hwp + FEC_TCSR(channel)); 503 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 504 505 return 0; 506 } 507 508 /** 509 * fec_ptp_enable 510 * @ptp: the ptp clock structure 511 * @rq: the requested feature to change 512 * @on: whether to enable or disable the feature 513 * 514 */ 515 static int fec_ptp_enable(struct ptp_clock_info *ptp, 516 struct ptp_clock_request *rq, int on) 517 { 518 struct fec_enet_private *fep = 519 container_of(ptp, struct fec_enet_private, ptp_caps); 520 ktime_t timeout; 521 struct timespec64 start_time, period; 522 u64 curr_time, delta, period_ns; 523 unsigned long flags; 524 int ret = 0; 525 526 if (rq->type == PTP_CLK_REQ_PPS) { 527 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; 528 529 ret = fec_ptp_enable_pps(fep, on); 530 531 return ret; 532 } else if (rq->type == PTP_CLK_REQ_PEROUT) { 533 /* Reject requests with unsupported flags */ 534 if (rq->perout.flags) 535 return -EOPNOTSUPP; 536 537 if (rq->perout.index != fep->pps_channel) 538 return -EOPNOTSUPP; 539 540 period.tv_sec = rq->perout.period.sec; 541 period.tv_nsec = rq->perout.period.nsec; 542 period_ns = timespec64_to_ns(&period); 543 544 /* FEC PTP timer only has 31 bits, so if the period exceed 545 * 4s is not supported. 546 */ 547 if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) { 548 dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n"); 549 return -EOPNOTSUPP; 550 } 551 552 fep->reload_period = div_u64(period_ns, 2); 553 if (on && fep->reload_period) { 554 /* Convert 1588 timestamp to ns*/ 555 start_time.tv_sec = rq->perout.start.sec; 556 start_time.tv_nsec = rq->perout.start.nsec; 557 fep->perout_stime = timespec64_to_ns(&start_time); 558 559 mutex_lock(&fep->ptp_clk_mutex); 560 if (!fep->ptp_clk_on) { 561 dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n"); 562 mutex_unlock(&fep->ptp_clk_mutex); 563 return -EOPNOTSUPP; 564 } 565 spin_lock_irqsave(&fep->tmreg_lock, flags); 566 /* Read current timestamp */ 567 curr_time = timecounter_read(&fep->tc); 568 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 569 mutex_unlock(&fep->ptp_clk_mutex); 570 571 /* Calculate time difference */ 572 delta = fep->perout_stime - curr_time; 573 574 if (fep->perout_stime <= curr_time) { 575 dev_err(&fep->pdev->dev, "Start time must larger than current time!\n"); 576 return -EINVAL; 577 } 578 579 /* Because the timer counter of FEC only has 31-bits, correspondingly, 580 * the time comparison register FEC_TCCR also only low 31 bits can be 581 * set. If the start time of pps signal exceeds current time more than 582 * 0x80000000 ns, a software timer is used and the timer expires about 583 * 1 second before the start time to be able to set FEC_TCCR. 584 */ 585 if (delta > FEC_PTP_MAX_NSEC_COUNTER) { 586 timeout = ns_to_ktime(delta - NSEC_PER_SEC); 587 hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL); 588 } else { 589 return fec_ptp_pps_perout(fep); 590 } 591 } else { 592 fec_ptp_pps_disable(fep, fep->pps_channel); 593 } 594 595 return 0; 596 } else { 597 return -EOPNOTSUPP; 598 } 599 } 600 601 int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config, 602 struct netlink_ext_ack *extack) 603 { 604 struct fec_enet_private *fep = netdev_priv(ndev); 605 606 switch (config->tx_type) { 607 case HWTSTAMP_TX_OFF: 608 fep->hwts_tx_en = 0; 609 break; 610 case HWTSTAMP_TX_ON: 611 fep->hwts_tx_en = 1; 612 break; 613 default: 614 return -ERANGE; 615 } 616 617 switch (config->rx_filter) { 618 case HWTSTAMP_FILTER_NONE: 619 fep->hwts_rx_en = 0; 620 break; 621 622 default: 623 fep->hwts_rx_en = 1; 624 config->rx_filter = HWTSTAMP_FILTER_ALL; 625 break; 626 } 627 628 return 0; 629 } 630 631 void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config) 632 { 633 struct fec_enet_private *fep = netdev_priv(ndev); 634 635 config->flags = 0; 636 config->tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 637 config->rx_filter = (fep->hwts_rx_en ? 638 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); 639 } 640 641 /* 642 * fec_time_keep - call timecounter_read every second to avoid timer overrun 643 * because ENET just support 32bit counter, will timeout in 4s 644 */ 645 static void fec_time_keep(struct work_struct *work) 646 { 647 struct delayed_work *dwork = to_delayed_work(work); 648 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); 649 unsigned long flags; 650 651 mutex_lock(&fep->ptp_clk_mutex); 652 if (fep->ptp_clk_on) { 653 spin_lock_irqsave(&fep->tmreg_lock, flags); 654 timecounter_read(&fep->tc); 655 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 656 } 657 mutex_unlock(&fep->ptp_clk_mutex); 658 659 schedule_delayed_work(&fep->time_keep, HZ); 660 } 661 662 /* This function checks the pps event and reloads the timer compare counter. */ 663 static irqreturn_t fec_pps_interrupt(int irq, void *dev_id) 664 { 665 struct net_device *ndev = dev_id; 666 struct fec_enet_private *fep = netdev_priv(ndev); 667 u32 val; 668 u8 channel = fep->pps_channel; 669 struct ptp_clock_event event; 670 671 val = readl(fep->hwp + FEC_TCSR(channel)); 672 if (val & FEC_T_TF_MASK) { 673 /* Write the next next compare(not the next according the spec) 674 * value to the register 675 */ 676 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); 677 do { 678 writel(val, fep->hwp + FEC_TCSR(channel)); 679 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); 680 681 /* Update the counter; */ 682 fep->next_counter = (fep->next_counter + fep->reload_period) & 683 fep->cc.mask; 684 685 event.type = PTP_CLOCK_PPS; 686 ptp_clock_event(fep->ptp_clock, &event); 687 return IRQ_HANDLED; 688 } 689 690 return IRQ_NONE; 691 } 692 693 /** 694 * fec_ptp_init 695 * @pdev: The FEC network adapter 696 * @irq_idx: the interrupt index 697 * 698 * This function performs the required steps for enabling ptp 699 * support. If ptp support has already been loaded it simply calls the 700 * cyclecounter init routine and exits. 701 */ 702 703 void fec_ptp_init(struct platform_device *pdev, int irq_idx) 704 { 705 struct net_device *ndev = platform_get_drvdata(pdev); 706 struct fec_enet_private *fep = netdev_priv(ndev); 707 struct device_node *np = fep->pdev->dev.of_node; 708 int irq; 709 int ret; 710 711 fep->ptp_caps.owner = THIS_MODULE; 712 strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); 713 714 fep->pps_channel = DEFAULT_PPS_CHANNEL; 715 of_property_read_u32(np, "fsl,pps-channel", &fep->pps_channel); 716 717 fep->ptp_caps.max_adj = 250000000; 718 fep->ptp_caps.n_alarm = 0; 719 fep->ptp_caps.n_ext_ts = 0; 720 fep->ptp_caps.n_per_out = 1; 721 fep->ptp_caps.n_pins = 0; 722 fep->ptp_caps.pps = 1; 723 fep->ptp_caps.adjfine = fec_ptp_adjfine; 724 fep->ptp_caps.adjtime = fec_ptp_adjtime; 725 fep->ptp_caps.gettime64 = fec_ptp_gettime; 726 fep->ptp_caps.settime64 = fec_ptp_settime; 727 fep->ptp_caps.enable = fec_ptp_enable; 728 729 fep->cycle_speed = clk_get_rate(fep->clk_ptp); 730 if (!fep->cycle_speed) { 731 fep->cycle_speed = NSEC_PER_SEC; 732 dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n"); 733 } 734 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; 735 736 spin_lock_init(&fep->tmreg_lock); 737 738 fec_ptp_start_cyclecounter(ndev); 739 740 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); 741 742 hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL); 743 fep->perout_timer.function = fec_ptp_pps_perout_handler; 744 745 irq = platform_get_irq_byname_optional(pdev, "pps"); 746 if (irq < 0) 747 irq = platform_get_irq_optional(pdev, irq_idx); 748 /* Failure to get an irq is not fatal, 749 * only the PTP_CLOCK_PPS clock events should stop 750 */ 751 if (irq >= 0) { 752 ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt, 753 0, pdev->name, ndev); 754 if (ret < 0) 755 dev_warn(&pdev->dev, "request for pps irq failed(%d)\n", 756 ret); 757 } 758 759 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); 760 if (IS_ERR(fep->ptp_clock)) { 761 fep->ptp_clock = NULL; 762 dev_err(&pdev->dev, "ptp_clock_register failed\n"); 763 } 764 765 schedule_delayed_work(&fep->time_keep, HZ); 766 } 767 768 void fec_ptp_save_state(struct fec_enet_private *fep) 769 { 770 unsigned long flags; 771 u32 atime_inc_corr; 772 773 spin_lock_irqsave(&fep->tmreg_lock, flags); 774 775 fep->ptp_saved_state.pps_enable = fep->pps_enable; 776 777 fep->ptp_saved_state.ns_phc = timecounter_read(&fep->tc); 778 fep->ptp_saved_state.ns_sys = ktime_get_ns(); 779 780 fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR); 781 atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK; 782 fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET); 783 784 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 785 } 786 787 /* Restore PTP functionality after a reset */ 788 void fec_ptp_restore_state(struct fec_enet_private *fep) 789 { 790 u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; 791 unsigned long flags; 792 u32 counter; 793 u64 ns; 794 795 spin_lock_irqsave(&fep->tmreg_lock, flags); 796 797 /* Reset turned it off, so adjust our status flag */ 798 fep->pps_enable = 0; 799 800 writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR); 801 atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET; 802 writel(atime_inc, fep->hwp + FEC_ATIME_INC); 803 804 ns = ktime_get_ns() - fep->ptp_saved_state.ns_sys + fep->ptp_saved_state.ns_phc; 805 counter = ns & fep->cc.mask; 806 writel(counter, fep->hwp + FEC_ATIME); 807 timecounter_init(&fep->tc, &fep->cc, ns); 808 809 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 810 811 /* Restart PPS if needed */ 812 if (fep->ptp_saved_state.pps_enable) { 813 /* Re-enable PPS */ 814 fec_ptp_enable_pps(fep, 1); 815 } 816 } 817 818 void fec_ptp_stop(struct platform_device *pdev) 819 { 820 struct net_device *ndev = platform_get_drvdata(pdev); 821 struct fec_enet_private *fep = netdev_priv(ndev); 822 823 if (fep->pps_enable) 824 fec_ptp_enable_pps(fep, 0); 825 826 cancel_delayed_work_sync(&fep->time_keep); 827 hrtimer_cancel(&fep->perout_timer); 828 if (fep->ptp_clock) 829 ptp_clock_unregister(fep->ptp_clock); 830 } 831