1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Fast Ethernet Controller (ENET) PTP driver for MX6x. 4 * 5 * Copyright (C) 2012 Freescale Semiconductor, Inc. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/ptrace.h> 14 #include <linux/errno.h> 15 #include <linux/ioport.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/delay.h> 20 #include <linux/netdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/spinlock.h> 24 #include <linux/workqueue.h> 25 #include <linux/bitops.h> 26 #include <linux/io.h> 27 #include <linux/irq.h> 28 #include <linux/clk.h> 29 #include <linux/platform_device.h> 30 #include <linux/phy.h> 31 #include <linux/fec.h> 32 #include <linux/of.h> 33 #include <linux/of_device.h> 34 #include <linux/of_gpio.h> 35 #include <linux/of_net.h> 36 37 #include "fec.h" 38 39 /* FEC 1588 register bits */ 40 #define FEC_T_CTRL_SLAVE 0x00002000 41 #define FEC_T_CTRL_CAPTURE 0x00000800 42 #define FEC_T_CTRL_RESTART 0x00000200 43 #define FEC_T_CTRL_PERIOD_RST 0x00000030 44 #define FEC_T_CTRL_PERIOD_EN 0x00000010 45 #define FEC_T_CTRL_ENABLE 0x00000001 46 47 #define FEC_T_INC_MASK 0x0000007f 48 #define FEC_T_INC_OFFSET 0 49 #define FEC_T_INC_CORR_MASK 0x00007f00 50 #define FEC_T_INC_CORR_OFFSET 8 51 52 #define FEC_T_CTRL_PINPER 0x00000080 53 #define FEC_T_TF0_MASK 0x00000001 54 #define FEC_T_TF0_OFFSET 0 55 #define FEC_T_TF1_MASK 0x00000002 56 #define FEC_T_TF1_OFFSET 1 57 #define FEC_T_TF2_MASK 0x00000004 58 #define FEC_T_TF2_OFFSET 2 59 #define FEC_T_TF3_MASK 0x00000008 60 #define FEC_T_TF3_OFFSET 3 61 #define FEC_T_TDRE_MASK 0x00000001 62 #define FEC_T_TDRE_OFFSET 0 63 #define FEC_T_TMODE_MASK 0x0000003C 64 #define FEC_T_TMODE_OFFSET 2 65 #define FEC_T_TIE_MASK 0x00000040 66 #define FEC_T_TIE_OFFSET 6 67 #define FEC_T_TF_MASK 0x00000080 68 #define FEC_T_TF_OFFSET 7 69 70 #define FEC_ATIME_CTRL 0x400 71 #define FEC_ATIME 0x404 72 #define FEC_ATIME_EVT_OFFSET 0x408 73 #define FEC_ATIME_EVT_PERIOD 0x40c 74 #define FEC_ATIME_CORR 0x410 75 #define FEC_ATIME_INC 0x414 76 #define FEC_TS_TIMESTAMP 0x418 77 78 #define FEC_TGSR 0x604 79 #define FEC_TCSR(n) (0x608 + n * 0x08) 80 #define FEC_TCCR(n) (0x60C + n * 0x08) 81 #define MAX_TIMER_CHANNEL 3 82 #define FEC_TMODE_TOGGLE 0x05 83 #define FEC_HIGH_PULSE 0x0F 84 85 #define FEC_CC_MULT (1 << 31) 86 #define FEC_COUNTER_PERIOD (1 << 31) 87 #define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC 88 #define FEC_CHANNLE_0 0 89 #define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0 90 91 /** 92 * fec_ptp_enable_pps 93 * @fep: the fec_enet_private structure handle 94 * @enable: enable the channel pps output 95 * 96 * This function enble the PPS ouput on the timer channel. 97 */ 98 static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) 99 { 100 unsigned long flags; 101 u32 val, tempval; 102 int inc; 103 struct timespec64 ts; 104 u64 ns; 105 val = 0; 106 107 if (!(fep->hwts_tx_en || fep->hwts_rx_en)) { 108 dev_err(&fep->pdev->dev, "No ptp stack is running\n"); 109 return -EINVAL; 110 } 111 112 if (fep->pps_enable == enable) 113 return 0; 114 115 fep->pps_channel = DEFAULT_PPS_CHANNEL; 116 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; 117 inc = fep->ptp_inc; 118 119 spin_lock_irqsave(&fep->tmreg_lock, flags); 120 121 if (enable) { 122 /* clear capture or output compare interrupt status if have. 123 */ 124 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); 125 126 /* It is recommended to double check the TMODE field in the 127 * TCSR register to be cleared before the first compare counter 128 * is written into TCCR register. Just add a double check. 129 */ 130 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 131 do { 132 val &= ~(FEC_T_TMODE_MASK); 133 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); 134 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 135 } while (val & FEC_T_TMODE_MASK); 136 137 /* Dummy read counter to update the counter */ 138 timecounter_read(&fep->tc); 139 /* We want to find the first compare event in the next 140 * second point. So we need to know what the ptp time 141 * is now and how many nanoseconds is ahead to get next second. 142 * The remaining nanosecond ahead before the next second would be 143 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds 144 * to current timer would be next second. 145 */ 146 tempval = readl(fep->hwp + FEC_ATIME_CTRL); 147 tempval |= FEC_T_CTRL_CAPTURE; 148 writel(tempval, fep->hwp + FEC_ATIME_CTRL); 149 150 tempval = readl(fep->hwp + FEC_ATIME); 151 /* Convert the ptp local counter to 1588 timestamp */ 152 ns = timecounter_cyc2time(&fep->tc, tempval); 153 ts = ns_to_timespec64(ns); 154 155 /* The tempval is less than 3 seconds, and so val is less than 156 * 4 seconds. No overflow for 32bit calculation. 157 */ 158 val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval; 159 160 /* Need to consider the situation that the current time is 161 * very close to the second point, which means NSEC_PER_SEC 162 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer 163 * is still running when we calculate the first compare event, it is 164 * possible that the remaining nanoseonds run out before the compare 165 * counter is calculated and written into TCCR register. To avoid 166 * this possibility, we will set the compare event to be the next 167 * of next second. The current setting is 31-bit timer and wrap 168 * around over 2 seconds. So it is okay to set the next of next 169 * seond for the timer. 170 */ 171 val += NSEC_PER_SEC; 172 173 /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current 174 * ptp counter, which maybe cause 32-bit wrap. Since the 175 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second. 176 * We can ensure the wrap will not cause issue. If the offset 177 * is bigger than fep->cc.mask would be a error. 178 */ 179 val &= fep->cc.mask; 180 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel)); 181 182 /* Calculate the second the compare event timestamp */ 183 fep->next_counter = (val + fep->reload_period) & fep->cc.mask; 184 185 /* * Enable compare event when overflow */ 186 val = readl(fep->hwp + FEC_ATIME_CTRL); 187 val |= FEC_T_CTRL_PINPER; 188 writel(val, fep->hwp + FEC_ATIME_CTRL); 189 190 /* Compare channel setting. */ 191 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); 192 val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); 193 val &= ~(1 << FEC_T_TDRE_OFFSET); 194 val &= ~(FEC_T_TMODE_MASK); 195 val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET); 196 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); 197 198 /* Write the second compare event timestamp and calculate 199 * the third timestamp. Refer the TCCR register detail in the spec. 200 */ 201 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); 202 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; 203 } else { 204 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel)); 205 } 206 207 fep->pps_enable = enable; 208 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 209 210 return 0; 211 } 212 213 /** 214 * fec_ptp_read - read raw cycle counter (to be used by time counter) 215 * @cc: the cyclecounter structure 216 * 217 * this function reads the cyclecounter registers and is called by the 218 * cyclecounter structure used to construct a ns counter from the 219 * arbitrary fixed point registers 220 */ 221 static u64 fec_ptp_read(const struct cyclecounter *cc) 222 { 223 struct fec_enet_private *fep = 224 container_of(cc, struct fec_enet_private, cc); 225 const struct platform_device_id *id_entry = 226 platform_get_device_id(fep->pdev); 227 u32 tempval; 228 229 tempval = readl(fep->hwp + FEC_ATIME_CTRL); 230 tempval |= FEC_T_CTRL_CAPTURE; 231 writel(tempval, fep->hwp + FEC_ATIME_CTRL); 232 233 if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE) 234 udelay(1); 235 236 return readl(fep->hwp + FEC_ATIME); 237 } 238 239 /** 240 * fec_ptp_start_cyclecounter - create the cycle counter from hw 241 * @ndev: network device 242 * 243 * this function initializes the timecounter and cyclecounter 244 * structures for use in generated a ns counter from the arbitrary 245 * fixed point cycles registers in the hardware. 246 */ 247 void fec_ptp_start_cyclecounter(struct net_device *ndev) 248 { 249 struct fec_enet_private *fep = netdev_priv(ndev); 250 unsigned long flags; 251 int inc; 252 253 inc = 1000000000 / fep->cycle_speed; 254 255 /* grab the ptp lock */ 256 spin_lock_irqsave(&fep->tmreg_lock, flags); 257 258 /* 1ns counter */ 259 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); 260 261 /* use 31-bit timer counter */ 262 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD); 263 264 writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST, 265 fep->hwp + FEC_ATIME_CTRL); 266 267 memset(&fep->cc, 0, sizeof(fep->cc)); 268 fep->cc.read = fec_ptp_read; 269 fep->cc.mask = CLOCKSOURCE_MASK(31); 270 fep->cc.shift = 31; 271 fep->cc.mult = FEC_CC_MULT; 272 273 /* reset the ns time counter */ 274 timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real())); 275 276 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 277 } 278 279 /** 280 * fec_ptp_adjfreq - adjust ptp cycle frequency 281 * @ptp: the ptp clock structure 282 * @ppb: parts per billion adjustment from base 283 * 284 * Adjust the frequency of the ptp cycle counter by the 285 * indicated ppb from the base frequency. 286 * 287 * Because ENET hardware frequency adjust is complex, 288 * using software method to do that. 289 */ 290 static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 291 { 292 unsigned long flags; 293 int neg_adj = 0; 294 u32 i, tmp; 295 u32 corr_inc, corr_period; 296 u32 corr_ns; 297 u64 lhs, rhs; 298 299 struct fec_enet_private *fep = 300 container_of(ptp, struct fec_enet_private, ptp_caps); 301 302 if (ppb == 0) 303 return 0; 304 305 if (ppb < 0) { 306 ppb = -ppb; 307 neg_adj = 1; 308 } 309 310 /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC; 311 * Try to find the corr_inc between 1 to fep->ptp_inc to 312 * meet adjustment requirement. 313 */ 314 lhs = NSEC_PER_SEC; 315 rhs = (u64)ppb * (u64)fep->ptp_inc; 316 for (i = 1; i <= fep->ptp_inc; i++) { 317 if (lhs >= rhs) { 318 corr_inc = i; 319 corr_period = div_u64(lhs, rhs); 320 break; 321 } 322 lhs += NSEC_PER_SEC; 323 } 324 /* Not found? Set it to high value - double speed 325 * correct in every clock step. 326 */ 327 if (i > fep->ptp_inc) { 328 corr_inc = fep->ptp_inc; 329 corr_period = 1; 330 } 331 332 if (neg_adj) 333 corr_ns = fep->ptp_inc - corr_inc; 334 else 335 corr_ns = fep->ptp_inc + corr_inc; 336 337 spin_lock_irqsave(&fep->tmreg_lock, flags); 338 339 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; 340 tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; 341 writel(tmp, fep->hwp + FEC_ATIME_INC); 342 corr_period = corr_period > 1 ? corr_period - 1 : corr_period; 343 writel(corr_period, fep->hwp + FEC_ATIME_CORR); 344 /* dummy read to update the timer. */ 345 timecounter_read(&fep->tc); 346 347 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 348 349 return 0; 350 } 351 352 /** 353 * fec_ptp_adjtime 354 * @ptp: the ptp clock structure 355 * @delta: offset to adjust the cycle counter by 356 * 357 * adjust the timer by resetting the timecounter structure. 358 */ 359 static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 360 { 361 struct fec_enet_private *fep = 362 container_of(ptp, struct fec_enet_private, ptp_caps); 363 unsigned long flags; 364 365 spin_lock_irqsave(&fep->tmreg_lock, flags); 366 timecounter_adjtime(&fep->tc, delta); 367 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 368 369 return 0; 370 } 371 372 /** 373 * fec_ptp_gettime 374 * @ptp: the ptp clock structure 375 * @ts: timespec structure to hold the current time value 376 * 377 * read the timecounter and return the correct value on ns, 378 * after converting it into a struct timespec. 379 */ 380 static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 381 { 382 struct fec_enet_private *adapter = 383 container_of(ptp, struct fec_enet_private, ptp_caps); 384 u64 ns; 385 unsigned long flags; 386 387 spin_lock_irqsave(&adapter->tmreg_lock, flags); 388 ns = timecounter_read(&adapter->tc); 389 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 390 391 *ts = ns_to_timespec64(ns); 392 393 return 0; 394 } 395 396 /** 397 * fec_ptp_settime 398 * @ptp: the ptp clock structure 399 * @ts: the timespec containing the new time for the cycle counter 400 * 401 * reset the timecounter to use a new base value instead of the kernel 402 * wall timer value. 403 */ 404 static int fec_ptp_settime(struct ptp_clock_info *ptp, 405 const struct timespec64 *ts) 406 { 407 struct fec_enet_private *fep = 408 container_of(ptp, struct fec_enet_private, ptp_caps); 409 410 u64 ns; 411 unsigned long flags; 412 u32 counter; 413 414 mutex_lock(&fep->ptp_clk_mutex); 415 /* Check the ptp clock */ 416 if (!fep->ptp_clk_on) { 417 mutex_unlock(&fep->ptp_clk_mutex); 418 return -EINVAL; 419 } 420 421 ns = timespec64_to_ns(ts); 422 /* Get the timer value based on timestamp. 423 * Update the counter with the masked value. 424 */ 425 counter = ns & fep->cc.mask; 426 427 spin_lock_irqsave(&fep->tmreg_lock, flags); 428 writel(counter, fep->hwp + FEC_ATIME); 429 timecounter_init(&fep->tc, &fep->cc, ns); 430 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 431 mutex_unlock(&fep->ptp_clk_mutex); 432 return 0; 433 } 434 435 /** 436 * fec_ptp_enable 437 * @ptp: the ptp clock structure 438 * @rq: the requested feature to change 439 * @on: whether to enable or disable the feature 440 * 441 */ 442 static int fec_ptp_enable(struct ptp_clock_info *ptp, 443 struct ptp_clock_request *rq, int on) 444 { 445 struct fec_enet_private *fep = 446 container_of(ptp, struct fec_enet_private, ptp_caps); 447 int ret = 0; 448 449 if (rq->type == PTP_CLK_REQ_PPS) { 450 ret = fec_ptp_enable_pps(fep, on); 451 452 return ret; 453 } 454 return -EOPNOTSUPP; 455 } 456 457 int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) 458 { 459 struct fec_enet_private *fep = netdev_priv(ndev); 460 461 struct hwtstamp_config config; 462 463 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 464 return -EFAULT; 465 466 /* reserved for future extensions */ 467 if (config.flags) 468 return -EINVAL; 469 470 switch (config.tx_type) { 471 case HWTSTAMP_TX_OFF: 472 fep->hwts_tx_en = 0; 473 break; 474 case HWTSTAMP_TX_ON: 475 fep->hwts_tx_en = 1; 476 break; 477 default: 478 return -ERANGE; 479 } 480 481 switch (config.rx_filter) { 482 case HWTSTAMP_FILTER_NONE: 483 if (fep->hwts_rx_en) 484 fep->hwts_rx_en = 0; 485 config.rx_filter = HWTSTAMP_FILTER_NONE; 486 break; 487 488 default: 489 fep->hwts_rx_en = 1; 490 config.rx_filter = HWTSTAMP_FILTER_ALL; 491 break; 492 } 493 494 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 495 -EFAULT : 0; 496 } 497 498 int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr) 499 { 500 struct fec_enet_private *fep = netdev_priv(ndev); 501 struct hwtstamp_config config; 502 503 config.flags = 0; 504 config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 505 config.rx_filter = (fep->hwts_rx_en ? 506 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); 507 508 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 509 -EFAULT : 0; 510 } 511 512 /** 513 * fec_time_keep - call timecounter_read every second to avoid timer overrun 514 * because ENET just support 32bit counter, will timeout in 4s 515 */ 516 static void fec_time_keep(struct work_struct *work) 517 { 518 struct delayed_work *dwork = to_delayed_work(work); 519 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); 520 u64 ns; 521 unsigned long flags; 522 523 mutex_lock(&fep->ptp_clk_mutex); 524 if (fep->ptp_clk_on) { 525 spin_lock_irqsave(&fep->tmreg_lock, flags); 526 ns = timecounter_read(&fep->tc); 527 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 528 } 529 mutex_unlock(&fep->ptp_clk_mutex); 530 531 schedule_delayed_work(&fep->time_keep, HZ); 532 } 533 534 /* This function checks the pps event and reloads the timer compare counter. */ 535 static irqreturn_t fec_pps_interrupt(int irq, void *dev_id) 536 { 537 struct net_device *ndev = dev_id; 538 struct fec_enet_private *fep = netdev_priv(ndev); 539 u32 val; 540 u8 channel = fep->pps_channel; 541 struct ptp_clock_event event; 542 543 val = readl(fep->hwp + FEC_TCSR(channel)); 544 if (val & FEC_T_TF_MASK) { 545 /* Write the next next compare(not the next according the spec) 546 * value to the register 547 */ 548 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); 549 do { 550 writel(val, fep->hwp + FEC_TCSR(channel)); 551 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); 552 553 /* Update the counter; */ 554 fep->next_counter = (fep->next_counter + fep->reload_period) & 555 fep->cc.mask; 556 557 event.type = PTP_CLOCK_PPS; 558 ptp_clock_event(fep->ptp_clock, &event); 559 return IRQ_HANDLED; 560 } 561 562 return IRQ_NONE; 563 } 564 565 /** 566 * fec_ptp_init 567 * @ndev: The FEC network adapter 568 * 569 * This function performs the required steps for enabling ptp 570 * support. If ptp support has already been loaded it simply calls the 571 * cyclecounter init routine and exits. 572 */ 573 574 void fec_ptp_init(struct platform_device *pdev, int irq_idx) 575 { 576 struct net_device *ndev = platform_get_drvdata(pdev); 577 struct fec_enet_private *fep = netdev_priv(ndev); 578 int irq; 579 int ret; 580 581 fep->ptp_caps.owner = THIS_MODULE; 582 snprintf(fep->ptp_caps.name, 16, "fec ptp"); 583 584 fep->ptp_caps.max_adj = 250000000; 585 fep->ptp_caps.n_alarm = 0; 586 fep->ptp_caps.n_ext_ts = 0; 587 fep->ptp_caps.n_per_out = 0; 588 fep->ptp_caps.n_pins = 0; 589 fep->ptp_caps.pps = 1; 590 fep->ptp_caps.adjfreq = fec_ptp_adjfreq; 591 fep->ptp_caps.adjtime = fec_ptp_adjtime; 592 fep->ptp_caps.gettime64 = fec_ptp_gettime; 593 fep->ptp_caps.settime64 = fec_ptp_settime; 594 fep->ptp_caps.enable = fec_ptp_enable; 595 596 fep->cycle_speed = clk_get_rate(fep->clk_ptp); 597 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; 598 599 spin_lock_init(&fep->tmreg_lock); 600 601 fec_ptp_start_cyclecounter(ndev); 602 603 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); 604 605 irq = platform_get_irq_byname(pdev, "pps"); 606 if (irq < 0) 607 irq = platform_get_irq(pdev, irq_idx); 608 /* Failure to get an irq is not fatal, 609 * only the PTP_CLOCK_PPS clock events should stop 610 */ 611 if (irq >= 0) { 612 ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt, 613 0, pdev->name, ndev); 614 if (ret < 0) 615 dev_warn(&pdev->dev, "request for pps irq failed(%d)\n", 616 ret); 617 } 618 619 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); 620 if (IS_ERR(fep->ptp_clock)) { 621 fep->ptp_clock = NULL; 622 pr_err("ptp_clock_register failed\n"); 623 } 624 625 schedule_delayed_work(&fep->time_keep, HZ); 626 } 627 628 void fec_ptp_stop(struct platform_device *pdev) 629 { 630 struct net_device *ndev = platform_get_drvdata(pdev); 631 struct fec_enet_private *fep = netdev_priv(ndev); 632 633 cancel_delayed_work_sync(&fep->time_keep); 634 if (fep->ptp_clock) 635 ptp_clock_unregister(fep->ptp_clock); 636 } 637