1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Fast Ethernet Controller (ENET) PTP driver for MX6x. 4 * 5 * Copyright (C) 2012 Freescale Semiconductor, Inc. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/ptrace.h> 14 #include <linux/errno.h> 15 #include <linux/ioport.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/delay.h> 20 #include <linux/netdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/spinlock.h> 24 #include <linux/workqueue.h> 25 #include <linux/bitops.h> 26 #include <linux/io.h> 27 #include <linux/irq.h> 28 #include <linux/clk.h> 29 #include <linux/platform_device.h> 30 #include <linux/phy.h> 31 #include <linux/fec.h> 32 #include <linux/of.h> 33 #include <linux/of_gpio.h> 34 #include <linux/of_net.h> 35 36 #include "fec.h" 37 38 /* FEC 1588 register bits */ 39 #define FEC_T_CTRL_SLAVE 0x00002000 40 #define FEC_T_CTRL_CAPTURE 0x00000800 41 #define FEC_T_CTRL_RESTART 0x00000200 42 #define FEC_T_CTRL_PERIOD_RST 0x00000030 43 #define FEC_T_CTRL_PERIOD_EN 0x00000010 44 #define FEC_T_CTRL_ENABLE 0x00000001 45 46 #define FEC_T_INC_MASK 0x0000007f 47 #define FEC_T_INC_OFFSET 0 48 #define FEC_T_INC_CORR_MASK 0x00007f00 49 #define FEC_T_INC_CORR_OFFSET 8 50 51 #define FEC_T_CTRL_PINPER 0x00000080 52 #define FEC_T_TF0_MASK 0x00000001 53 #define FEC_T_TF0_OFFSET 0 54 #define FEC_T_TF1_MASK 0x00000002 55 #define FEC_T_TF1_OFFSET 1 56 #define FEC_T_TF2_MASK 0x00000004 57 #define FEC_T_TF2_OFFSET 2 58 #define FEC_T_TF3_MASK 0x00000008 59 #define FEC_T_TF3_OFFSET 3 60 #define FEC_T_TDRE_MASK 0x00000001 61 #define FEC_T_TDRE_OFFSET 0 62 #define FEC_T_TMODE_MASK 0x0000003C 63 #define FEC_T_TMODE_OFFSET 2 64 #define FEC_T_TIE_MASK 0x00000040 65 #define FEC_T_TIE_OFFSET 6 66 #define FEC_T_TF_MASK 0x00000080 67 #define FEC_T_TF_OFFSET 7 68 69 #define FEC_ATIME_CTRL 0x400 70 #define FEC_ATIME 0x404 71 #define FEC_ATIME_EVT_OFFSET 0x408 72 #define FEC_ATIME_EVT_PERIOD 0x40c 73 #define FEC_ATIME_CORR 0x410 74 #define FEC_ATIME_INC 0x414 75 #define FEC_TS_TIMESTAMP 0x418 76 77 #define FEC_TGSR 0x604 78 #define FEC_TCSR(n) (0x608 + n * 0x08) 79 #define FEC_TCCR(n) (0x60C + n * 0x08) 80 #define MAX_TIMER_CHANNEL 3 81 #define FEC_TMODE_TOGGLE 0x05 82 #define FEC_HIGH_PULSE 0x0F 83 84 #define FEC_CC_MULT (1 << 31) 85 #define FEC_COUNTER_PERIOD (1 << 31) 86 #define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC 87 #define FEC_CHANNLE_0 0 88 #define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0 89 90 #define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL 91 #define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL 92 93 /** 94 * fec_ptp_read - read raw cycle counter (to be used by time counter) 95 * @cc: the cyclecounter structure 96 * 97 * this function reads the cyclecounter registers and is called by the 98 * cyclecounter structure used to construct a ns counter from the 99 * arbitrary fixed point registers 100 */ 101 static u64 fec_ptp_read(const struct cyclecounter *cc) 102 { 103 struct fec_enet_private *fep = 104 container_of(cc, struct fec_enet_private, cc); 105 u32 tempval; 106 107 tempval = readl(fep->hwp + FEC_ATIME_CTRL); 108 tempval |= FEC_T_CTRL_CAPTURE; 109 writel(tempval, fep->hwp + FEC_ATIME_CTRL); 110 111 if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) 112 udelay(1); 113 114 return readl(fep->hwp + FEC_ATIME); 115 } 116 117 /** 118 * fec_ptp_enable_pps 119 * @fep: the fec_enet_private structure handle 120 * @enable: enable the channel pps output 121 * 122 * This function enble the PPS ouput on the timer channel. 123 */ 124 static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) 125 { 126 unsigned long flags; 127 u32 val, tempval; 128 struct timespec64 ts; 129 u64 ns; 130 131 spin_lock_irqsave(&fep->tmreg_lock, flags); 132 133 if (fep->pps_enable == enable) { 134 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 135 return 0; 136 } 137 138 if (enable) { 139 /* clear capture or output compare interrupt status if have. 140 */ 141 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); 142 143 /* It is recommended to double check the TMODE field in the 144 * TCSR register to be cleared before the first compare counter 145 * is written into TCCR register. Just add a double check. 146 */ 147 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 148 do { 149 val &= ~(FEC_T_TMODE_MASK); 150 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); 151 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 152 } while (val & FEC_T_TMODE_MASK); 153 154 /* Dummy read counter to update the counter */ 155 timecounter_read(&fep->tc); 156 /* We want to find the first compare event in the next 157 * second point. So we need to know what the ptp time 158 * is now and how many nanoseconds is ahead to get next second. 159 * The remaining nanosecond ahead before the next second would be 160 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds 161 * to current timer would be next second. 162 */ 163 tempval = fec_ptp_read(&fep->cc); 164 /* Convert the ptp local counter to 1588 timestamp */ 165 ns = timecounter_cyc2time(&fep->tc, tempval); 166 ts = ns_to_timespec64(ns); 167 168 /* The tempval is less than 3 seconds, and so val is less than 169 * 4 seconds. No overflow for 32bit calculation. 170 */ 171 val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval; 172 173 /* Need to consider the situation that the current time is 174 * very close to the second point, which means NSEC_PER_SEC 175 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer 176 * is still running when we calculate the first compare event, it is 177 * possible that the remaining nanoseonds run out before the compare 178 * counter is calculated and written into TCCR register. To avoid 179 * this possibility, we will set the compare event to be the next 180 * of next second. The current setting is 31-bit timer and wrap 181 * around over 2 seconds. So it is okay to set the next of next 182 * seond for the timer. 183 */ 184 val += NSEC_PER_SEC; 185 186 /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current 187 * ptp counter, which maybe cause 32-bit wrap. Since the 188 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second. 189 * We can ensure the wrap will not cause issue. If the offset 190 * is bigger than fep->cc.mask would be a error. 191 */ 192 val &= fep->cc.mask; 193 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel)); 194 195 /* Calculate the second the compare event timestamp */ 196 fep->next_counter = (val + fep->reload_period) & fep->cc.mask; 197 198 /* * Enable compare event when overflow */ 199 val = readl(fep->hwp + FEC_ATIME_CTRL); 200 val |= FEC_T_CTRL_PINPER; 201 writel(val, fep->hwp + FEC_ATIME_CTRL); 202 203 /* Compare channel setting. */ 204 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 205 val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); 206 val &= ~(1 << FEC_T_TDRE_OFFSET); 207 val &= ~(FEC_T_TMODE_MASK); 208 val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET); 209 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); 210 211 /* Write the second compare event timestamp and calculate 212 * the third timestamp. Refer the TCCR register detail in the spec. 213 */ 214 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); 215 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; 216 } else { 217 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel)); 218 } 219 220 fep->pps_enable = enable; 221 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 222 223 return 0; 224 } 225 226 static int fec_ptp_pps_perout(struct fec_enet_private *fep) 227 { 228 u32 compare_val, ptp_hc, temp_val; 229 u64 curr_time; 230 unsigned long flags; 231 232 spin_lock_irqsave(&fep->tmreg_lock, flags); 233 234 /* Update time counter */ 235 timecounter_read(&fep->tc); 236 237 /* Get the current ptp hardware time counter */ 238 ptp_hc = fec_ptp_read(&fep->cc); 239 240 /* Convert the ptp local counter to 1588 timestamp */ 241 curr_time = timecounter_cyc2time(&fep->tc, ptp_hc); 242 243 /* If the pps start time less than current time add 100ms, just return. 244 * Because the software might not able to set the comparison time into 245 * the FEC_TCCR register in time and missed the start time. 246 */ 247 if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) { 248 dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n"); 249 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 250 return -1; 251 } 252 253 compare_val = fep->perout_stime - curr_time + ptp_hc; 254 compare_val &= fep->cc.mask; 255 256 writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel)); 257 fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask; 258 259 /* Enable compare event when overflow */ 260 temp_val = readl(fep->hwp + FEC_ATIME_CTRL); 261 temp_val |= FEC_T_CTRL_PINPER; 262 writel(temp_val, fep->hwp + FEC_ATIME_CTRL); 263 264 /* Compare channel setting. */ 265 temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 266 temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); 267 temp_val &= ~(1 << FEC_T_TDRE_OFFSET); 268 temp_val &= ~(FEC_T_TMODE_MASK); 269 temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET); 270 writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel)); 271 272 /* Write the second compare event timestamp and calculate 273 * the third timestamp. Refer the TCCR register detail in the spec. 274 */ 275 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); 276 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; 277 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 278 279 return 0; 280 } 281 282 static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer) 283 { 284 struct fec_enet_private *fep = container_of(timer, 285 struct fec_enet_private, perout_timer); 286 287 fec_ptp_pps_perout(fep); 288 289 return HRTIMER_NORESTART; 290 } 291 292 /** 293 * fec_ptp_start_cyclecounter - create the cycle counter from hw 294 * @ndev: network device 295 * 296 * this function initializes the timecounter and cyclecounter 297 * structures for use in generated a ns counter from the arbitrary 298 * fixed point cycles registers in the hardware. 299 */ 300 void fec_ptp_start_cyclecounter(struct net_device *ndev) 301 { 302 struct fec_enet_private *fep = netdev_priv(ndev); 303 unsigned long flags; 304 int inc; 305 306 inc = 1000000000 / fep->cycle_speed; 307 308 /* grab the ptp lock */ 309 spin_lock_irqsave(&fep->tmreg_lock, flags); 310 311 /* 1ns counter */ 312 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); 313 314 /* use 31-bit timer counter */ 315 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD); 316 317 writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST, 318 fep->hwp + FEC_ATIME_CTRL); 319 320 memset(&fep->cc, 0, sizeof(fep->cc)); 321 fep->cc.read = fec_ptp_read; 322 fep->cc.mask = CLOCKSOURCE_MASK(31); 323 fep->cc.shift = 31; 324 fep->cc.mult = FEC_CC_MULT; 325 326 /* reset the ns time counter */ 327 timecounter_init(&fep->tc, &fep->cc, 0); 328 329 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 330 } 331 332 /** 333 * fec_ptp_adjfine - adjust ptp cycle frequency 334 * @ptp: the ptp clock structure 335 * @scaled_ppm: scaled parts per million adjustment from base 336 * 337 * Adjust the frequency of the ptp cycle counter by the 338 * indicated amount from the base frequency. 339 * 340 * Scaled parts per million is ppm with a 16-bit binary fractional field. 341 * 342 * Because ENET hardware frequency adjust is complex, 343 * using software method to do that. 344 */ 345 static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 346 { 347 s32 ppb = scaled_ppm_to_ppb(scaled_ppm); 348 unsigned long flags; 349 int neg_adj = 0; 350 u32 i, tmp; 351 u32 corr_inc, corr_period; 352 u32 corr_ns; 353 u64 lhs, rhs; 354 355 struct fec_enet_private *fep = 356 container_of(ptp, struct fec_enet_private, ptp_caps); 357 358 if (ppb == 0) 359 return 0; 360 361 if (ppb < 0) { 362 ppb = -ppb; 363 neg_adj = 1; 364 } 365 366 /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC; 367 * Try to find the corr_inc between 1 to fep->ptp_inc to 368 * meet adjustment requirement. 369 */ 370 lhs = NSEC_PER_SEC; 371 rhs = (u64)ppb * (u64)fep->ptp_inc; 372 for (i = 1; i <= fep->ptp_inc; i++) { 373 if (lhs >= rhs) { 374 corr_inc = i; 375 corr_period = div_u64(lhs, rhs); 376 break; 377 } 378 lhs += NSEC_PER_SEC; 379 } 380 /* Not found? Set it to high value - double speed 381 * correct in every clock step. 382 */ 383 if (i > fep->ptp_inc) { 384 corr_inc = fep->ptp_inc; 385 corr_period = 1; 386 } 387 388 if (neg_adj) 389 corr_ns = fep->ptp_inc - corr_inc; 390 else 391 corr_ns = fep->ptp_inc + corr_inc; 392 393 spin_lock_irqsave(&fep->tmreg_lock, flags); 394 395 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; 396 tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; 397 writel(tmp, fep->hwp + FEC_ATIME_INC); 398 corr_period = corr_period > 1 ? corr_period - 1 : corr_period; 399 writel(corr_period, fep->hwp + FEC_ATIME_CORR); 400 /* dummy read to update the timer. */ 401 timecounter_read(&fep->tc); 402 403 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 404 405 return 0; 406 } 407 408 /** 409 * fec_ptp_adjtime 410 * @ptp: the ptp clock structure 411 * @delta: offset to adjust the cycle counter by 412 * 413 * adjust the timer by resetting the timecounter structure. 414 */ 415 static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 416 { 417 struct fec_enet_private *fep = 418 container_of(ptp, struct fec_enet_private, ptp_caps); 419 unsigned long flags; 420 421 spin_lock_irqsave(&fep->tmreg_lock, flags); 422 timecounter_adjtime(&fep->tc, delta); 423 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 424 425 return 0; 426 } 427 428 /** 429 * fec_ptp_gettime 430 * @ptp: the ptp clock structure 431 * @ts: timespec structure to hold the current time value 432 * 433 * read the timecounter and return the correct value on ns, 434 * after converting it into a struct timespec. 435 */ 436 static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 437 { 438 struct fec_enet_private *fep = 439 container_of(ptp, struct fec_enet_private, ptp_caps); 440 u64 ns; 441 unsigned long flags; 442 443 mutex_lock(&fep->ptp_clk_mutex); 444 /* Check the ptp clock */ 445 if (!fep->ptp_clk_on) { 446 mutex_unlock(&fep->ptp_clk_mutex); 447 return -EINVAL; 448 } 449 spin_lock_irqsave(&fep->tmreg_lock, flags); 450 ns = timecounter_read(&fep->tc); 451 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 452 mutex_unlock(&fep->ptp_clk_mutex); 453 454 *ts = ns_to_timespec64(ns); 455 456 return 0; 457 } 458 459 /** 460 * fec_ptp_settime 461 * @ptp: the ptp clock structure 462 * @ts: the timespec containing the new time for the cycle counter 463 * 464 * reset the timecounter to use a new base value instead of the kernel 465 * wall timer value. 466 */ 467 static int fec_ptp_settime(struct ptp_clock_info *ptp, 468 const struct timespec64 *ts) 469 { 470 struct fec_enet_private *fep = 471 container_of(ptp, struct fec_enet_private, ptp_caps); 472 473 u64 ns; 474 unsigned long flags; 475 u32 counter; 476 477 mutex_lock(&fep->ptp_clk_mutex); 478 /* Check the ptp clock */ 479 if (!fep->ptp_clk_on) { 480 mutex_unlock(&fep->ptp_clk_mutex); 481 return -EINVAL; 482 } 483 484 ns = timespec64_to_ns(ts); 485 /* Get the timer value based on timestamp. 486 * Update the counter with the masked value. 487 */ 488 counter = ns & fep->cc.mask; 489 490 spin_lock_irqsave(&fep->tmreg_lock, flags); 491 writel(counter, fep->hwp + FEC_ATIME); 492 timecounter_init(&fep->tc, &fep->cc, ns); 493 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 494 mutex_unlock(&fep->ptp_clk_mutex); 495 return 0; 496 } 497 498 static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel) 499 { 500 unsigned long flags; 501 502 spin_lock_irqsave(&fep->tmreg_lock, flags); 503 writel(0, fep->hwp + FEC_TCSR(channel)); 504 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 505 506 return 0; 507 } 508 509 /** 510 * fec_ptp_enable 511 * @ptp: the ptp clock structure 512 * @rq: the requested feature to change 513 * @on: whether to enable or disable the feature 514 * 515 */ 516 static int fec_ptp_enable(struct ptp_clock_info *ptp, 517 struct ptp_clock_request *rq, int on) 518 { 519 struct fec_enet_private *fep = 520 container_of(ptp, struct fec_enet_private, ptp_caps); 521 ktime_t timeout; 522 struct timespec64 start_time, period; 523 u64 curr_time, delta, period_ns; 524 unsigned long flags; 525 int ret = 0; 526 527 if (rq->type == PTP_CLK_REQ_PPS) { 528 fep->pps_channel = DEFAULT_PPS_CHANNEL; 529 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; 530 531 ret = fec_ptp_enable_pps(fep, on); 532 533 return ret; 534 } else if (rq->type == PTP_CLK_REQ_PEROUT) { 535 /* Reject requests with unsupported flags */ 536 if (rq->perout.flags) 537 return -EOPNOTSUPP; 538 539 if (rq->perout.index != DEFAULT_PPS_CHANNEL) 540 return -EOPNOTSUPP; 541 542 fep->pps_channel = DEFAULT_PPS_CHANNEL; 543 period.tv_sec = rq->perout.period.sec; 544 period.tv_nsec = rq->perout.period.nsec; 545 period_ns = timespec64_to_ns(&period); 546 547 /* FEC PTP timer only has 31 bits, so if the period exceed 548 * 4s is not supported. 549 */ 550 if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) { 551 dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n"); 552 return -EOPNOTSUPP; 553 } 554 555 fep->reload_period = div_u64(period_ns, 2); 556 if (on && fep->reload_period) { 557 /* Convert 1588 timestamp to ns*/ 558 start_time.tv_sec = rq->perout.start.sec; 559 start_time.tv_nsec = rq->perout.start.nsec; 560 fep->perout_stime = timespec64_to_ns(&start_time); 561 562 mutex_lock(&fep->ptp_clk_mutex); 563 if (!fep->ptp_clk_on) { 564 dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n"); 565 mutex_unlock(&fep->ptp_clk_mutex); 566 return -EOPNOTSUPP; 567 } 568 spin_lock_irqsave(&fep->tmreg_lock, flags); 569 /* Read current timestamp */ 570 curr_time = timecounter_read(&fep->tc); 571 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 572 mutex_unlock(&fep->ptp_clk_mutex); 573 574 /* Calculate time difference */ 575 delta = fep->perout_stime - curr_time; 576 577 if (fep->perout_stime <= curr_time) { 578 dev_err(&fep->pdev->dev, "Start time must larger than current time!\n"); 579 return -EINVAL; 580 } 581 582 /* Because the timer counter of FEC only has 31-bits, correspondingly, 583 * the time comparison register FEC_TCCR also only low 31 bits can be 584 * set. If the start time of pps signal exceeds current time more than 585 * 0x80000000 ns, a software timer is used and the timer expires about 586 * 1 second before the start time to be able to set FEC_TCCR. 587 */ 588 if (delta > FEC_PTP_MAX_NSEC_COUNTER) { 589 timeout = ns_to_ktime(delta - NSEC_PER_SEC); 590 hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL); 591 } else { 592 return fec_ptp_pps_perout(fep); 593 } 594 } else { 595 fec_ptp_pps_disable(fep, fep->pps_channel); 596 } 597 598 return 0; 599 } else { 600 return -EOPNOTSUPP; 601 } 602 } 603 604 int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config, 605 struct netlink_ext_ack *extack) 606 { 607 struct fec_enet_private *fep = netdev_priv(ndev); 608 609 switch (config->tx_type) { 610 case HWTSTAMP_TX_OFF: 611 fep->hwts_tx_en = 0; 612 break; 613 case HWTSTAMP_TX_ON: 614 fep->hwts_tx_en = 1; 615 break; 616 default: 617 return -ERANGE; 618 } 619 620 switch (config->rx_filter) { 621 case HWTSTAMP_FILTER_NONE: 622 fep->hwts_rx_en = 0; 623 break; 624 625 default: 626 fep->hwts_rx_en = 1; 627 config->rx_filter = HWTSTAMP_FILTER_ALL; 628 break; 629 } 630 631 return 0; 632 } 633 634 void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config) 635 { 636 struct fec_enet_private *fep = netdev_priv(ndev); 637 638 config->flags = 0; 639 config->tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 640 config->rx_filter = (fep->hwts_rx_en ? 641 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); 642 } 643 644 /* 645 * fec_time_keep - call timecounter_read every second to avoid timer overrun 646 * because ENET just support 32bit counter, will timeout in 4s 647 */ 648 static void fec_time_keep(struct work_struct *work) 649 { 650 struct delayed_work *dwork = to_delayed_work(work); 651 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); 652 unsigned long flags; 653 654 mutex_lock(&fep->ptp_clk_mutex); 655 if (fep->ptp_clk_on) { 656 spin_lock_irqsave(&fep->tmreg_lock, flags); 657 timecounter_read(&fep->tc); 658 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 659 } 660 mutex_unlock(&fep->ptp_clk_mutex); 661 662 schedule_delayed_work(&fep->time_keep, HZ); 663 } 664 665 /* This function checks the pps event and reloads the timer compare counter. */ 666 static irqreturn_t fec_pps_interrupt(int irq, void *dev_id) 667 { 668 struct net_device *ndev = dev_id; 669 struct fec_enet_private *fep = netdev_priv(ndev); 670 u32 val; 671 u8 channel = fep->pps_channel; 672 struct ptp_clock_event event; 673 674 val = readl(fep->hwp + FEC_TCSR(channel)); 675 if (val & FEC_T_TF_MASK) { 676 /* Write the next next compare(not the next according the spec) 677 * value to the register 678 */ 679 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); 680 do { 681 writel(val, fep->hwp + FEC_TCSR(channel)); 682 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); 683 684 /* Update the counter; */ 685 fep->next_counter = (fep->next_counter + fep->reload_period) & 686 fep->cc.mask; 687 688 event.type = PTP_CLOCK_PPS; 689 ptp_clock_event(fep->ptp_clock, &event); 690 return IRQ_HANDLED; 691 } 692 693 return IRQ_NONE; 694 } 695 696 /** 697 * fec_ptp_init 698 * @pdev: The FEC network adapter 699 * @irq_idx: the interrupt index 700 * 701 * This function performs the required steps for enabling ptp 702 * support. If ptp support has already been loaded it simply calls the 703 * cyclecounter init routine and exits. 704 */ 705 706 void fec_ptp_init(struct platform_device *pdev, int irq_idx) 707 { 708 struct net_device *ndev = platform_get_drvdata(pdev); 709 struct fec_enet_private *fep = netdev_priv(ndev); 710 int irq; 711 int ret; 712 713 fep->ptp_caps.owner = THIS_MODULE; 714 strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); 715 716 fep->ptp_caps.max_adj = 250000000; 717 fep->ptp_caps.n_alarm = 0; 718 fep->ptp_caps.n_ext_ts = 0; 719 fep->ptp_caps.n_per_out = 1; 720 fep->ptp_caps.n_pins = 0; 721 fep->ptp_caps.pps = 1; 722 fep->ptp_caps.adjfine = fec_ptp_adjfine; 723 fep->ptp_caps.adjtime = fec_ptp_adjtime; 724 fep->ptp_caps.gettime64 = fec_ptp_gettime; 725 fep->ptp_caps.settime64 = fec_ptp_settime; 726 fep->ptp_caps.enable = fec_ptp_enable; 727 728 fep->cycle_speed = clk_get_rate(fep->clk_ptp); 729 if (!fep->cycle_speed) { 730 fep->cycle_speed = NSEC_PER_SEC; 731 dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n"); 732 } 733 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; 734 735 spin_lock_init(&fep->tmreg_lock); 736 737 fec_ptp_start_cyclecounter(ndev); 738 739 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); 740 741 hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL); 742 fep->perout_timer.function = fec_ptp_pps_perout_handler; 743 744 irq = platform_get_irq_byname_optional(pdev, "pps"); 745 if (irq < 0) 746 irq = platform_get_irq_optional(pdev, irq_idx); 747 /* Failure to get an irq is not fatal, 748 * only the PTP_CLOCK_PPS clock events should stop 749 */ 750 if (irq >= 0) { 751 ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt, 752 0, pdev->name, ndev); 753 if (ret < 0) 754 dev_warn(&pdev->dev, "request for pps irq failed(%d)\n", 755 ret); 756 } 757 758 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); 759 if (IS_ERR(fep->ptp_clock)) { 760 fep->ptp_clock = NULL; 761 dev_err(&pdev->dev, "ptp_clock_register failed\n"); 762 } 763 764 schedule_delayed_work(&fep->time_keep, HZ); 765 } 766 767 void fec_ptp_stop(struct platform_device *pdev) 768 { 769 struct net_device *ndev = platform_get_drvdata(pdev); 770 struct fec_enet_private *fep = netdev_priv(ndev); 771 772 if (fep->pps_enable) 773 fec_ptp_enable_pps(fep, 0); 774 775 cancel_delayed_work_sync(&fep->time_keep); 776 hrtimer_cancel(&fep->perout_timer); 777 if (fep->ptp_clock) 778 ptp_clock_unregister(fep->ptp_clock); 779 } 780