1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * NXP NETC V4 Timer driver 4 * Copyright 2025 NXP 5 */ 6 7 #include <linux/bitfield.h> 8 #include <linux/clk.h> 9 #include <linux/fsl/netc_global.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_platform.h> 13 #include <linux/pci.h> 14 #include <linux/ptp_clock_kernel.h> 15 16 #define NETC_TMR_PCI_VENDOR_NXP 0x1131 17 18 #define NETC_TMR_CTRL 0x0080 19 #define TMR_CTRL_CK_SEL GENMASK(1, 0) 20 #define TMR_CTRL_TE BIT(2) 21 #define TMR_ETEP(i) BIT(8 + (i)) 22 #define TMR_COMP_MODE BIT(15) 23 #define TMR_CTRL_TCLK_PERIOD GENMASK(25, 16) 24 #define TMR_CTRL_PPL(i) BIT(27 - (i)) 25 #define TMR_CTRL_FS BIT(28) 26 27 #define NETC_TMR_TEVENT 0x0084 28 #define TMR_TEVNET_PPEN(i) BIT(7 - (i)) 29 #define TMR_TEVENT_PPEN_ALL GENMASK(7, 5) 30 #define TMR_TEVENT_ALMEN(i) BIT(16 + (i)) 31 #define TMR_TEVENT_ETS_THREN(i) BIT(20 + (i)) 32 #define TMR_TEVENT_ETSEN(i) BIT(24 + (i)) 33 #define TMR_TEVENT_ETS_OVEN(i) BIT(28 + (i)) 34 #define TMR_TEVENT_ETS(i) (TMR_TEVENT_ETS_THREN(i) | \ 35 TMR_TEVENT_ETSEN(i) | \ 36 TMR_TEVENT_ETS_OVEN(i)) 37 38 #define NETC_TMR_TEMASK 0x0088 39 #define NETC_TMR_STAT 0x0094 40 #define TMR_STAT_ETS_VLD(i) BIT(24 + (i)) 41 42 #define NETC_TMR_CNT_L 0x0098 43 #define NETC_TMR_CNT_H 0x009c 44 #define NETC_TMR_ADD 0x00a0 45 #define NETC_TMR_PRSC 0x00a8 46 #define NETC_TMR_ECTRL 0x00ac 47 #define NETC_TMR_OFF_L 0x00b0 48 #define NETC_TMR_OFF_H 0x00b4 49 50 /* i = 0, 1, i indicates the index of TMR_ALARM */ 51 #define NETC_TMR_ALARM_L(i) (0x00b8 + (i) * 8) 52 #define NETC_TMR_ALARM_H(i) (0x00bc + (i) * 8) 53 54 /* i = 0, 1, 2. i indicates the index of TMR_FIPER. */ 55 #define NETC_TMR_FIPER(i) (0x00d0 + (i) * 4) 56 57 #define NETC_TMR_FIPER_CTRL 0x00dc 58 #define FIPER_CTRL_DIS(i) (BIT(7) << (i) * 8) 59 #define FIPER_CTRL_PG(i) (BIT(6) << (i) * 8) 60 #define FIPER_CTRL_FS_ALARM(i) (BIT(5) << (i) * 8) 61 #define FIPER_CTRL_PW(i) (GENMASK(4, 0) << (i) * 8) 62 #define FIPER_CTRL_SET_PW(i, v) (((v) & GENMASK(4, 0)) << 8 * (i)) 63 64 /* i = 0, 1, i indicates the index of TMR_ETTS */ 65 #define NETC_TMR_ETTS_L(i) (0x00e0 + (i) * 8) 66 #define NETC_TMR_ETTS_H(i) (0x00e4 + (i) * 8) 67 #define NETC_TMR_CUR_TIME_L 0x00f0 68 #define NETC_TMR_CUR_TIME_H 0x00f4 69 70 #define NETC_TMR_REGS_BAR 0 71 #define NETC_GLOBAL_OFFSET 0x10000 72 #define NETC_GLOBAL_IPBRR0 0xbf8 73 #define IPBRR0_IP_REV GENMASK(15, 0) 74 #define NETC_REV_4_1 0x0401 75 76 #define NETC_TMR_FIPER_NUM 3 77 #define NETC_TMR_INVALID_CHANNEL NETC_TMR_FIPER_NUM 78 #define NETC_TMR_DEFAULT_PRSC 2 79 #define NETC_TMR_DEFAULT_ALARM GENMASK_ULL(63, 0) 80 #define NETC_TMR_DEFAULT_FIPER GENMASK(31, 0) 81 #define NETC_TMR_FIPER_MAX_PW GENMASK(4, 0) 82 #define NETC_TMR_ALARM_NUM 2 83 #define NETC_TMR_DEFAULT_ETTF_THR 7 84 85 /* 1588 timer reference clock source select */ 86 #define NETC_TMR_CCM_TIMER1 0 /* enet_timer1_clk_root, from CCM */ 87 #define NETC_TMR_SYSTEM_CLK 1 /* enet_clk_root/2, from CCM */ 88 #define NETC_TMR_EXT_OSC 2 /* tmr_1588_clk, from IO pins */ 89 90 #define NETC_TMR_SYSCLK_333M 333333333U 91 92 enum netc_pp_type { 93 NETC_PP_PPS = 1, 94 NETC_PP_PEROUT, 95 }; 96 97 struct netc_pp { 98 enum netc_pp_type type; 99 bool enabled; 100 int alarm_id; 101 u32 period; /* pulse period, ns */ 102 u64 stime; /* start time, ns */ 103 }; 104 105 struct netc_timer { 106 void __iomem *base; 107 struct pci_dev *pdev; 108 spinlock_t lock; /* Prevent concurrent access to registers */ 109 110 struct ptp_clock *clock; 111 struct ptp_clock_info caps; 112 u32 clk_select; 113 u32 clk_freq; 114 u32 oclk_prsc; 115 /* High 32-bit is integer part, low 32-bit is fractional part */ 116 u64 period; 117 118 int irq; 119 char irq_name[24]; 120 int revision; 121 u32 tmr_emask; 122 u8 pps_channel; 123 u8 fs_alarm_num; 124 u8 fs_alarm_bitmap; 125 struct netc_pp pp[NETC_TMR_FIPER_NUM]; /* periodic pulse */ 126 }; 127 128 #define netc_timer_rd(p, o) netc_read((p)->base + (o)) 129 #define netc_timer_wr(p, o, v) netc_write((p)->base + (o), v) 130 #define ptp_to_netc_timer(ptp) container_of((ptp), struct netc_timer, caps) 131 132 static const char *const timer_clk_src[] = { 133 "ccm", 134 "ext" 135 }; 136 137 static void netc_timer_cnt_write(struct netc_timer *priv, u64 ns) 138 { 139 u32 tmr_cnt_h = upper_32_bits(ns); 140 u32 tmr_cnt_l = lower_32_bits(ns); 141 142 /* Writes to the TMR_CNT_L register copies the written value 143 * into the shadow TMR_CNT_L register. Writes to the TMR_CNT_H 144 * register copies the values written into the shadow TMR_CNT_H 145 * register. Contents of the shadow registers are copied into 146 * the TMR_CNT_L and TMR_CNT_H registers following a write into 147 * the TMR_CNT_H register. So the user must writes to TMR_CNT_L 148 * register first. Other H/L registers should have the same 149 * behavior. 150 */ 151 netc_timer_wr(priv, NETC_TMR_CNT_L, tmr_cnt_l); 152 netc_timer_wr(priv, NETC_TMR_CNT_H, tmr_cnt_h); 153 } 154 155 static u64 netc_timer_offset_read(struct netc_timer *priv) 156 { 157 u32 tmr_off_l, tmr_off_h; 158 u64 offset; 159 160 tmr_off_l = netc_timer_rd(priv, NETC_TMR_OFF_L); 161 tmr_off_h = netc_timer_rd(priv, NETC_TMR_OFF_H); 162 offset = (((u64)tmr_off_h) << 32) | tmr_off_l; 163 164 return offset; 165 } 166 167 static void netc_timer_offset_write(struct netc_timer *priv, u64 offset) 168 { 169 u32 tmr_off_h = upper_32_bits(offset); 170 u32 tmr_off_l = lower_32_bits(offset); 171 172 netc_timer_wr(priv, NETC_TMR_OFF_L, tmr_off_l); 173 netc_timer_wr(priv, NETC_TMR_OFF_H, tmr_off_h); 174 } 175 176 static u64 netc_timer_cur_time_read(struct netc_timer *priv) 177 { 178 u32 time_h, time_l; 179 u64 ns; 180 181 /* The user should read NETC_TMR_CUR_TIME_L first to 182 * get correct current time. 183 */ 184 time_l = netc_timer_rd(priv, NETC_TMR_CUR_TIME_L); 185 time_h = netc_timer_rd(priv, NETC_TMR_CUR_TIME_H); 186 ns = (u64)time_h << 32 | time_l; 187 188 return ns; 189 } 190 191 static void netc_timer_alarm_write(struct netc_timer *priv, 192 u64 alarm, int index) 193 { 194 u32 alarm_h = upper_32_bits(alarm); 195 u32 alarm_l = lower_32_bits(alarm); 196 197 netc_timer_wr(priv, NETC_TMR_ALARM_L(index), alarm_l); 198 netc_timer_wr(priv, NETC_TMR_ALARM_H(index), alarm_h); 199 } 200 201 static u32 netc_timer_get_integral_period(struct netc_timer *priv) 202 { 203 u32 tmr_ctrl, integral_period; 204 205 tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL); 206 integral_period = FIELD_GET(TMR_CTRL_TCLK_PERIOD, tmr_ctrl); 207 208 return integral_period; 209 } 210 211 static u32 netc_timer_calculate_fiper_pw(struct netc_timer *priv, 212 u32 fiper) 213 { 214 u64 divisor, pulse_width; 215 216 /* Set the FIPER pulse width to half FIPER interval by default. 217 * pulse_width = (fiper / 2) / TMR_GCLK_period, 218 * TMR_GCLK_period = NSEC_PER_SEC / TMR_GCLK_freq, 219 * TMR_GCLK_freq = (clk_freq / oclk_prsc) Hz, 220 * so pulse_width = fiper * clk_freq / (2 * NSEC_PER_SEC * oclk_prsc). 221 */ 222 divisor = mul_u32_u32(2 * NSEC_PER_SEC, priv->oclk_prsc); 223 pulse_width = div64_u64(mul_u32_u32(fiper, priv->clk_freq), divisor); 224 225 /* The FIPER_PW field only has 5 bits, need to update oclk_prsc */ 226 if (pulse_width > NETC_TMR_FIPER_MAX_PW) 227 pulse_width = NETC_TMR_FIPER_MAX_PW; 228 229 return pulse_width; 230 } 231 232 static void netc_timer_set_pps_alarm(struct netc_timer *priv, int channel, 233 u32 integral_period) 234 { 235 struct netc_pp *pp = &priv->pp[channel]; 236 u64 alarm; 237 238 /* Get the alarm value */ 239 alarm = netc_timer_cur_time_read(priv) + NSEC_PER_MSEC; 240 alarm = roundup_u64(alarm, NSEC_PER_SEC); 241 alarm = roundup_u64(alarm, integral_period); 242 243 netc_timer_alarm_write(priv, alarm, pp->alarm_id); 244 } 245 246 static void netc_timer_set_perout_alarm(struct netc_timer *priv, int channel, 247 u32 integral_period) 248 { 249 u64 cur_time = netc_timer_cur_time_read(priv); 250 struct netc_pp *pp = &priv->pp[channel]; 251 u64 alarm, delta, min_time; 252 u32 period = pp->period; 253 u64 stime = pp->stime; 254 255 min_time = cur_time + NSEC_PER_MSEC + period; 256 if (stime < min_time) { 257 delta = min_time - stime; 258 stime += roundup_u64(delta, period); 259 } 260 261 alarm = roundup_u64(stime - period, integral_period); 262 netc_timer_alarm_write(priv, alarm, pp->alarm_id); 263 } 264 265 static int netc_timer_get_alarm_id(struct netc_timer *priv) 266 { 267 int i; 268 269 for (i = 0; i < priv->fs_alarm_num; i++) { 270 if (!(priv->fs_alarm_bitmap & BIT(i))) { 271 priv->fs_alarm_bitmap |= BIT(i); 272 break; 273 } 274 } 275 276 return i; 277 } 278 279 static u64 netc_timer_get_gclk_period(struct netc_timer *priv) 280 { 281 /* TMR_GCLK_freq = (clk_freq / oclk_prsc) Hz. 282 * TMR_GCLK_period = NSEC_PER_SEC / TMR_GCLK_freq. 283 * TMR_GCLK_period = (NSEC_PER_SEC * oclk_prsc) / clk_freq 284 */ 285 286 return div_u64(mul_u32_u32(NSEC_PER_SEC, priv->oclk_prsc), 287 priv->clk_freq); 288 } 289 290 static void netc_timer_enable_periodic_pulse(struct netc_timer *priv, 291 u8 channel) 292 { 293 u32 fiper_pw, fiper, fiper_ctrl, integral_period; 294 struct netc_pp *pp = &priv->pp[channel]; 295 int alarm_id = pp->alarm_id; 296 297 integral_period = netc_timer_get_integral_period(priv); 298 /* Set to desired FIPER interval in ns - TCLK_PERIOD */ 299 fiper = pp->period - integral_period; 300 fiper_pw = netc_timer_calculate_fiper_pw(priv, fiper); 301 302 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 303 fiper_ctrl &= ~(FIPER_CTRL_DIS(channel) | FIPER_CTRL_PW(channel) | 304 FIPER_CTRL_FS_ALARM(channel)); 305 fiper_ctrl |= FIPER_CTRL_SET_PW(channel, fiper_pw); 306 fiper_ctrl |= alarm_id ? FIPER_CTRL_FS_ALARM(channel) : 0; 307 308 priv->tmr_emask |= TMR_TEVNET_PPEN(channel) | 309 TMR_TEVENT_ALMEN(alarm_id); 310 311 if (pp->type == NETC_PP_PPS) 312 netc_timer_set_pps_alarm(priv, channel, integral_period); 313 else 314 netc_timer_set_perout_alarm(priv, channel, integral_period); 315 316 netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask); 317 netc_timer_wr(priv, NETC_TMR_FIPER(channel), fiper); 318 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 319 } 320 321 static void netc_timer_disable_periodic_pulse(struct netc_timer *priv, 322 u8 channel) 323 { 324 struct netc_pp *pp = &priv->pp[channel]; 325 int alarm_id = pp->alarm_id; 326 u32 fiper_ctrl; 327 328 if (!pp->enabled) 329 return; 330 331 priv->tmr_emask &= ~(TMR_TEVNET_PPEN(channel) | 332 TMR_TEVENT_ALMEN(alarm_id)); 333 334 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 335 fiper_ctrl |= FIPER_CTRL_DIS(channel); 336 337 netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, alarm_id); 338 netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask); 339 netc_timer_wr(priv, NETC_TMR_FIPER(channel), NETC_TMR_DEFAULT_FIPER); 340 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 341 } 342 343 static u8 netc_timer_select_pps_channel(struct netc_timer *priv) 344 { 345 int i; 346 347 for (i = 0; i < NETC_TMR_FIPER_NUM; i++) { 348 if (!priv->pp[i].enabled) 349 return i; 350 } 351 352 return NETC_TMR_INVALID_CHANNEL; 353 } 354 355 /* Note that users should not use this API to output PPS signal on 356 * external pins, because PTP_CLK_REQ_PPS trigger internal PPS event 357 * for input into kernel PPS subsystem. See: 358 * https://lore.kernel.org/r/20201117213826.18235-1-a.fatoum@pengutronix.de 359 */ 360 static int netc_timer_enable_pps(struct netc_timer *priv, 361 struct ptp_clock_request *rq, int on) 362 { 363 struct device *dev = &priv->pdev->dev; 364 unsigned long flags; 365 struct netc_pp *pp; 366 int err = 0; 367 368 spin_lock_irqsave(&priv->lock, flags); 369 370 if (on) { 371 int alarm_id; 372 u8 channel; 373 374 if (priv->pps_channel < NETC_TMR_FIPER_NUM) { 375 channel = priv->pps_channel; 376 } else { 377 channel = netc_timer_select_pps_channel(priv); 378 if (channel == NETC_TMR_INVALID_CHANNEL) { 379 dev_err(dev, "No available FIPERs\n"); 380 err = -EBUSY; 381 goto unlock_spinlock; 382 } 383 } 384 385 pp = &priv->pp[channel]; 386 if (pp->enabled) 387 goto unlock_spinlock; 388 389 alarm_id = netc_timer_get_alarm_id(priv); 390 if (alarm_id == priv->fs_alarm_num) { 391 dev_err(dev, "No available ALARMs\n"); 392 err = -EBUSY; 393 goto unlock_spinlock; 394 } 395 396 pp->enabled = true; 397 pp->type = NETC_PP_PPS; 398 pp->alarm_id = alarm_id; 399 pp->period = NSEC_PER_SEC; 400 priv->pps_channel = channel; 401 402 netc_timer_enable_periodic_pulse(priv, channel); 403 } else { 404 /* pps_channel is invalid if PPS is not enabled, so no 405 * processing is needed. 406 */ 407 if (priv->pps_channel >= NETC_TMR_FIPER_NUM) 408 goto unlock_spinlock; 409 410 netc_timer_disable_periodic_pulse(priv, priv->pps_channel); 411 pp = &priv->pp[priv->pps_channel]; 412 priv->fs_alarm_bitmap &= ~BIT(pp->alarm_id); 413 memset(pp, 0, sizeof(*pp)); 414 priv->pps_channel = NETC_TMR_INVALID_CHANNEL; 415 } 416 417 unlock_spinlock: 418 spin_unlock_irqrestore(&priv->lock, flags); 419 420 return err; 421 } 422 423 static int net_timer_enable_perout(struct netc_timer *priv, 424 struct ptp_clock_request *rq, int on) 425 { 426 struct device *dev = &priv->pdev->dev; 427 u32 channel = rq->perout.index; 428 unsigned long flags; 429 struct netc_pp *pp; 430 int err = 0; 431 432 spin_lock_irqsave(&priv->lock, flags); 433 434 pp = &priv->pp[channel]; 435 if (pp->type == NETC_PP_PPS) { 436 dev_err(dev, "FIPER%u is being used for PPS\n", channel); 437 err = -EBUSY; 438 goto unlock_spinlock; 439 } 440 441 if (on) { 442 u64 period_ns, gclk_period, max_period, min_period; 443 struct timespec64 period, stime; 444 u32 integral_period; 445 int alarm_id; 446 447 period.tv_sec = rq->perout.period.sec; 448 period.tv_nsec = rq->perout.period.nsec; 449 period_ns = timespec64_to_ns(&period); 450 451 integral_period = netc_timer_get_integral_period(priv); 452 max_period = (u64)NETC_TMR_DEFAULT_FIPER + integral_period; 453 gclk_period = netc_timer_get_gclk_period(priv); 454 min_period = gclk_period * 4 + integral_period; 455 if (period_ns > max_period || period_ns < min_period) { 456 dev_err(dev, "The period range is %llu ~ %llu\n", 457 min_period, max_period); 458 err = -EINVAL; 459 goto unlock_spinlock; 460 } 461 462 if (pp->enabled) { 463 alarm_id = pp->alarm_id; 464 } else { 465 alarm_id = netc_timer_get_alarm_id(priv); 466 if (alarm_id == priv->fs_alarm_num) { 467 dev_err(dev, "No available ALARMs\n"); 468 err = -EBUSY; 469 goto unlock_spinlock; 470 } 471 472 pp->type = NETC_PP_PEROUT; 473 pp->enabled = true; 474 pp->alarm_id = alarm_id; 475 } 476 477 stime.tv_sec = rq->perout.start.sec; 478 stime.tv_nsec = rq->perout.start.nsec; 479 pp->stime = timespec64_to_ns(&stime); 480 pp->period = period_ns; 481 482 netc_timer_enable_periodic_pulse(priv, channel); 483 } else { 484 netc_timer_disable_periodic_pulse(priv, channel); 485 priv->fs_alarm_bitmap &= ~BIT(pp->alarm_id); 486 memset(pp, 0, sizeof(*pp)); 487 } 488 489 unlock_spinlock: 490 spin_unlock_irqrestore(&priv->lock, flags); 491 492 return err; 493 } 494 495 static void netc_timer_handle_etts_event(struct netc_timer *priv, int index, 496 bool update_event) 497 { 498 struct ptp_clock_event event; 499 u32 etts_l = 0, etts_h = 0; 500 501 while (netc_timer_rd(priv, NETC_TMR_STAT) & TMR_STAT_ETS_VLD(index)) { 502 etts_l = netc_timer_rd(priv, NETC_TMR_ETTS_L(index)); 503 etts_h = netc_timer_rd(priv, NETC_TMR_ETTS_H(index)); 504 } 505 506 /* Invalid time stamp */ 507 if (!etts_l && !etts_h) 508 return; 509 510 if (update_event) { 511 event.type = PTP_CLOCK_EXTTS; 512 event.index = index; 513 event.timestamp = (u64)etts_h << 32; 514 event.timestamp |= etts_l; 515 ptp_clock_event(priv->clock, &event); 516 } 517 } 518 519 static int netc_timer_enable_extts(struct netc_timer *priv, 520 struct ptp_clock_request *rq, int on) 521 { 522 int index = rq->extts.index; 523 unsigned long flags; 524 u32 tmr_ctrl; 525 526 /* Reject requests to enable time stamping on both edges */ 527 if ((rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES) 528 return -EOPNOTSUPP; 529 530 spin_lock_irqsave(&priv->lock, flags); 531 532 netc_timer_handle_etts_event(priv, rq->extts.index, false); 533 if (on) { 534 tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL); 535 if (rq->extts.flags & PTP_FALLING_EDGE) 536 tmr_ctrl |= TMR_ETEP(index); 537 else 538 tmr_ctrl &= ~TMR_ETEP(index); 539 540 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 541 priv->tmr_emask |= TMR_TEVENT_ETS(index); 542 } else { 543 priv->tmr_emask &= ~TMR_TEVENT_ETS(index); 544 } 545 546 netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask); 547 548 spin_unlock_irqrestore(&priv->lock, flags); 549 550 return 0; 551 } 552 553 static void netc_timer_disable_fiper(struct netc_timer *priv) 554 { 555 u32 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 556 int i; 557 558 for (i = 0; i < NETC_TMR_FIPER_NUM; i++) { 559 if (!priv->pp[i].enabled) 560 continue; 561 562 fiper_ctrl |= FIPER_CTRL_DIS(i); 563 netc_timer_wr(priv, NETC_TMR_FIPER(i), NETC_TMR_DEFAULT_FIPER); 564 } 565 566 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 567 } 568 569 static void netc_timer_enable_fiper(struct netc_timer *priv) 570 { 571 u32 integral_period = netc_timer_get_integral_period(priv); 572 u32 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 573 int i; 574 575 for (i = 0; i < NETC_TMR_FIPER_NUM; i++) { 576 struct netc_pp *pp = &priv->pp[i]; 577 u32 fiper; 578 579 if (!pp->enabled) 580 continue; 581 582 fiper_ctrl &= ~FIPER_CTRL_DIS(i); 583 584 if (pp->type == NETC_PP_PPS) 585 netc_timer_set_pps_alarm(priv, i, integral_period); 586 else if (pp->type == NETC_PP_PEROUT) 587 netc_timer_set_perout_alarm(priv, i, integral_period); 588 589 fiper = pp->period - integral_period; 590 netc_timer_wr(priv, NETC_TMR_FIPER(i), fiper); 591 } 592 593 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 594 } 595 596 static int netc_timer_enable(struct ptp_clock_info *ptp, 597 struct ptp_clock_request *rq, int on) 598 { 599 struct netc_timer *priv = ptp_to_netc_timer(ptp); 600 601 switch (rq->type) { 602 case PTP_CLK_REQ_PPS: 603 return netc_timer_enable_pps(priv, rq, on); 604 case PTP_CLK_REQ_PEROUT: 605 return net_timer_enable_perout(priv, rq, on); 606 case PTP_CLK_REQ_EXTTS: 607 return netc_timer_enable_extts(priv, rq, on); 608 default: 609 return -EOPNOTSUPP; 610 } 611 } 612 613 static int netc_timer_perout_loopback(struct ptp_clock_info *ptp, 614 unsigned int index, int on) 615 { 616 struct netc_timer *priv = ptp_to_netc_timer(ptp); 617 unsigned long flags; 618 u32 tmr_ctrl; 619 620 spin_lock_irqsave(&priv->lock, flags); 621 622 tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL); 623 if (on) 624 tmr_ctrl |= TMR_CTRL_PPL(index); 625 else 626 tmr_ctrl &= ~TMR_CTRL_PPL(index); 627 628 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 629 630 spin_unlock_irqrestore(&priv->lock, flags); 631 632 return 0; 633 } 634 635 static void netc_timer_adjust_period(struct netc_timer *priv, u64 period) 636 { 637 u32 fractional_period = lower_32_bits(period); 638 u32 integral_period = upper_32_bits(period); 639 u32 tmr_ctrl, old_tmr_ctrl; 640 unsigned long flags; 641 642 spin_lock_irqsave(&priv->lock, flags); 643 644 old_tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL); 645 tmr_ctrl = u32_replace_bits(old_tmr_ctrl, integral_period, 646 TMR_CTRL_TCLK_PERIOD); 647 if (tmr_ctrl != old_tmr_ctrl) { 648 netc_timer_disable_fiper(priv); 649 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 650 netc_timer_enable_fiper(priv); 651 } 652 653 netc_timer_wr(priv, NETC_TMR_ADD, fractional_period); 654 655 spin_unlock_irqrestore(&priv->lock, flags); 656 } 657 658 static int netc_timer_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 659 { 660 struct netc_timer *priv = ptp_to_netc_timer(ptp); 661 u64 new_period; 662 663 new_period = adjust_by_scaled_ppm(priv->period, scaled_ppm); 664 netc_timer_adjust_period(priv, new_period); 665 666 return 0; 667 } 668 669 static int netc_timer_adjtime(struct ptp_clock_info *ptp, s64 delta) 670 { 671 struct netc_timer *priv = ptp_to_netc_timer(ptp); 672 unsigned long flags; 673 s64 tmr_off; 674 675 spin_lock_irqsave(&priv->lock, flags); 676 677 netc_timer_disable_fiper(priv); 678 679 /* Adjusting TMROFF instead of TMR_CNT is that the timer 680 * counter keeps increasing during reading and writing 681 * TMR_CNT, which will cause latency. 682 */ 683 tmr_off = netc_timer_offset_read(priv); 684 tmr_off += delta; 685 netc_timer_offset_write(priv, tmr_off); 686 687 netc_timer_enable_fiper(priv); 688 689 spin_unlock_irqrestore(&priv->lock, flags); 690 691 return 0; 692 } 693 694 static int netc_timer_gettimex64(struct ptp_clock_info *ptp, 695 struct timespec64 *ts, 696 struct ptp_system_timestamp *sts) 697 { 698 struct netc_timer *priv = ptp_to_netc_timer(ptp); 699 unsigned long flags; 700 u64 ns; 701 702 spin_lock_irqsave(&priv->lock, flags); 703 704 ptp_read_system_prets(sts); 705 ns = netc_timer_cur_time_read(priv); 706 ptp_read_system_postts(sts); 707 708 spin_unlock_irqrestore(&priv->lock, flags); 709 710 *ts = ns_to_timespec64(ns); 711 712 return 0; 713 } 714 715 static int netc_timer_settime64(struct ptp_clock_info *ptp, 716 const struct timespec64 *ts) 717 { 718 struct netc_timer *priv = ptp_to_netc_timer(ptp); 719 u64 ns = timespec64_to_ns(ts); 720 unsigned long flags; 721 722 spin_lock_irqsave(&priv->lock, flags); 723 724 netc_timer_disable_fiper(priv); 725 netc_timer_offset_write(priv, 0); 726 netc_timer_cnt_write(priv, ns); 727 netc_timer_enable_fiper(priv); 728 729 spin_unlock_irqrestore(&priv->lock, flags); 730 731 return 0; 732 } 733 734 static const struct ptp_clock_info netc_timer_ptp_caps = { 735 .owner = THIS_MODULE, 736 .name = "NETC Timer PTP clock", 737 .max_adj = 500000000, 738 .n_pins = 0, 739 .n_alarm = 2, 740 .pps = 1, 741 .n_per_out = 3, 742 .n_ext_ts = 2, 743 .n_per_lp = 2, 744 .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE | 745 PTP_STRICT_FLAGS, 746 .adjfine = netc_timer_adjfine, 747 .adjtime = netc_timer_adjtime, 748 .gettimex64 = netc_timer_gettimex64, 749 .settime64 = netc_timer_settime64, 750 .enable = netc_timer_enable, 751 .perout_loopback = netc_timer_perout_loopback, 752 }; 753 754 static void netc_timer_init(struct netc_timer *priv) 755 { 756 u32 fractional_period = lower_32_bits(priv->period); 757 u32 integral_period = upper_32_bits(priv->period); 758 u32 tmr_ctrl, fiper_ctrl; 759 struct timespec64 now; 760 u64 ns; 761 int i; 762 763 /* Software must enable timer first and the clock selected must be 764 * active, otherwise, the registers which are in the timer clock 765 * domain are not accessible. 766 */ 767 tmr_ctrl = FIELD_PREP(TMR_CTRL_CK_SEL, priv->clk_select) | 768 TMR_CTRL_TE | TMR_CTRL_FS; 769 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 770 netc_timer_wr(priv, NETC_TMR_PRSC, priv->oclk_prsc); 771 772 /* Disable FIPER by default */ 773 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 774 for (i = 0; i < NETC_TMR_FIPER_NUM; i++) { 775 fiper_ctrl |= FIPER_CTRL_DIS(i); 776 fiper_ctrl &= ~FIPER_CTRL_PG(i); 777 } 778 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 779 netc_timer_wr(priv, NETC_TMR_ECTRL, NETC_TMR_DEFAULT_ETTF_THR); 780 781 ktime_get_real_ts64(&now); 782 ns = timespec64_to_ns(&now); 783 netc_timer_cnt_write(priv, ns); 784 785 /* Allow atomic writes to TCLK_PERIOD and TMR_ADD, An update to 786 * TCLK_PERIOD does not take effect until TMR_ADD is written. 787 */ 788 tmr_ctrl |= FIELD_PREP(TMR_CTRL_TCLK_PERIOD, integral_period) | 789 TMR_COMP_MODE; 790 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 791 netc_timer_wr(priv, NETC_TMR_ADD, fractional_period); 792 } 793 794 static int netc_timer_pci_probe(struct pci_dev *pdev) 795 { 796 struct device *dev = &pdev->dev; 797 struct netc_timer *priv; 798 int err; 799 800 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 801 if (!priv) 802 return -ENOMEM; 803 804 pcie_flr(pdev); 805 err = pci_enable_device_mem(pdev); 806 if (err) 807 return dev_err_probe(dev, err, "Failed to enable device\n"); 808 809 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 810 err = pci_request_mem_regions(pdev, KBUILD_MODNAME); 811 if (err) { 812 dev_err(dev, "pci_request_regions() failed, err:%pe\n", 813 ERR_PTR(err)); 814 goto disable_dev; 815 } 816 817 pci_set_master(pdev); 818 819 priv->pdev = pdev; 820 priv->base = pci_ioremap_bar(pdev, NETC_TMR_REGS_BAR); 821 if (!priv->base) { 822 err = -ENOMEM; 823 goto release_mem_regions; 824 } 825 826 pci_set_drvdata(pdev, priv); 827 828 return 0; 829 830 release_mem_regions: 831 pci_release_mem_regions(pdev); 832 disable_dev: 833 pci_disable_device(pdev); 834 835 return err; 836 } 837 838 static void netc_timer_pci_remove(struct pci_dev *pdev) 839 { 840 struct netc_timer *priv = pci_get_drvdata(pdev); 841 842 iounmap(priv->base); 843 pci_release_mem_regions(pdev); 844 pci_disable_device(pdev); 845 } 846 847 static int netc_timer_get_reference_clk_source(struct netc_timer *priv) 848 { 849 struct device *dev = &priv->pdev->dev; 850 struct clk *clk; 851 int i; 852 853 /* Select NETC system clock as the reference clock by default */ 854 priv->clk_select = NETC_TMR_SYSTEM_CLK; 855 priv->clk_freq = NETC_TMR_SYSCLK_333M; 856 857 /* Update the clock source of the reference clock if the clock 858 * is specified in DT node. 859 */ 860 for (i = 0; i < ARRAY_SIZE(timer_clk_src); i++) { 861 clk = devm_clk_get_optional_enabled(dev, timer_clk_src[i]); 862 if (IS_ERR(clk)) 863 return dev_err_probe(dev, PTR_ERR(clk), 864 "Failed to enable clock\n"); 865 866 if (clk) { 867 priv->clk_freq = clk_get_rate(clk); 868 priv->clk_select = i ? NETC_TMR_EXT_OSC : 869 NETC_TMR_CCM_TIMER1; 870 break; 871 } 872 } 873 874 /* The period is a 64-bit number, the high 32-bit is the integer 875 * part of the period, the low 32-bit is the fractional part of 876 * the period. In order to get the desired 32-bit fixed-point 877 * format, multiply the numerator of the fraction by 2^32. 878 */ 879 priv->period = div_u64((u64)NSEC_PER_SEC << 32, priv->clk_freq); 880 881 return 0; 882 } 883 884 static int netc_timer_parse_dt(struct netc_timer *priv) 885 { 886 return netc_timer_get_reference_clk_source(priv); 887 } 888 889 static irqreturn_t netc_timer_isr(int irq, void *data) 890 { 891 struct netc_timer *priv = data; 892 struct ptp_clock_event event; 893 u32 tmr_event; 894 895 spin_lock(&priv->lock); 896 897 tmr_event = netc_timer_rd(priv, NETC_TMR_TEVENT); 898 tmr_event &= priv->tmr_emask; 899 /* Clear interrupts status */ 900 netc_timer_wr(priv, NETC_TMR_TEVENT, tmr_event); 901 902 if (tmr_event & TMR_TEVENT_ALMEN(0)) 903 netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, 0); 904 905 if (tmr_event & TMR_TEVENT_ALMEN(1)) 906 netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, 1); 907 908 if (tmr_event & TMR_TEVENT_PPEN_ALL) { 909 event.type = PTP_CLOCK_PPS; 910 ptp_clock_event(priv->clock, &event); 911 } 912 913 if (tmr_event & TMR_TEVENT_ETS(0)) 914 netc_timer_handle_etts_event(priv, 0, true); 915 916 if (tmr_event & TMR_TEVENT_ETS(1)) 917 netc_timer_handle_etts_event(priv, 1, true); 918 919 spin_unlock(&priv->lock); 920 921 return IRQ_HANDLED; 922 } 923 924 static int netc_timer_init_msix_irq(struct netc_timer *priv) 925 { 926 struct pci_dev *pdev = priv->pdev; 927 int err, n; 928 929 n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); 930 if (n != 1) { 931 err = (n < 0) ? n : -EPERM; 932 dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed\n"); 933 return err; 934 } 935 936 priv->irq = pci_irq_vector(pdev, 0); 937 err = request_irq(priv->irq, netc_timer_isr, 0, priv->irq_name, priv); 938 if (err) { 939 dev_err(&pdev->dev, "request_irq() failed\n"); 940 pci_free_irq_vectors(pdev); 941 942 return err; 943 } 944 945 return 0; 946 } 947 948 static void netc_timer_free_msix_irq(struct netc_timer *priv) 949 { 950 struct pci_dev *pdev = priv->pdev; 951 952 disable_irq(priv->irq); 953 free_irq(priv->irq, priv); 954 pci_free_irq_vectors(pdev); 955 } 956 957 static int netc_timer_get_global_ip_rev(struct netc_timer *priv) 958 { 959 u32 val; 960 961 val = netc_timer_rd(priv, NETC_GLOBAL_OFFSET + NETC_GLOBAL_IPBRR0); 962 963 return val & IPBRR0_IP_REV; 964 } 965 966 static int netc_timer_probe(struct pci_dev *pdev, 967 const struct pci_device_id *id) 968 { 969 struct device *dev = &pdev->dev; 970 struct netc_timer *priv; 971 int err; 972 973 err = netc_timer_pci_probe(pdev); 974 if (err) 975 return err; 976 977 priv = pci_get_drvdata(pdev); 978 priv->revision = netc_timer_get_global_ip_rev(priv); 979 if (priv->revision == NETC_REV_4_1) 980 priv->fs_alarm_num = 1; 981 else 982 priv->fs_alarm_num = NETC_TMR_ALARM_NUM; 983 984 err = netc_timer_parse_dt(priv); 985 if (err) 986 goto timer_pci_remove; 987 988 priv->caps = netc_timer_ptp_caps; 989 priv->oclk_prsc = NETC_TMR_DEFAULT_PRSC; 990 priv->pps_channel = NETC_TMR_INVALID_CHANNEL; 991 spin_lock_init(&priv->lock); 992 snprintf(priv->irq_name, sizeof(priv->irq_name), "ptp-netc %s", 993 pci_name(pdev)); 994 995 err = netc_timer_init_msix_irq(priv); 996 if (err) 997 goto timer_pci_remove; 998 999 netc_timer_init(priv); 1000 priv->clock = ptp_clock_register(&priv->caps, dev); 1001 if (IS_ERR(priv->clock)) { 1002 err = PTR_ERR(priv->clock); 1003 goto free_msix_irq; 1004 } 1005 1006 return 0; 1007 1008 free_msix_irq: 1009 netc_timer_free_msix_irq(priv); 1010 timer_pci_remove: 1011 netc_timer_pci_remove(pdev); 1012 1013 return err; 1014 } 1015 1016 static void netc_timer_remove(struct pci_dev *pdev) 1017 { 1018 struct netc_timer *priv = pci_get_drvdata(pdev); 1019 1020 netc_timer_wr(priv, NETC_TMR_TEMASK, 0); 1021 netc_timer_wr(priv, NETC_TMR_CTRL, 0); 1022 ptp_clock_unregister(priv->clock); 1023 netc_timer_free_msix_irq(priv); 1024 netc_timer_pci_remove(pdev); 1025 } 1026 1027 static const struct pci_device_id netc_timer_id_table[] = { 1028 { PCI_DEVICE(NETC_TMR_PCI_VENDOR_NXP, 0xee02) }, 1029 { } 1030 }; 1031 MODULE_DEVICE_TABLE(pci, netc_timer_id_table); 1032 1033 static struct pci_driver netc_timer_driver = { 1034 .name = KBUILD_MODNAME, 1035 .id_table = netc_timer_id_table, 1036 .probe = netc_timer_probe, 1037 .remove = netc_timer_remove, 1038 }; 1039 module_pci_driver(netc_timer_driver); 1040 1041 MODULE_DESCRIPTION("NXP NETC Timer PTP Driver"); 1042 MODULE_LICENSE("Dual BSD/GPL"); 1043