1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * NXP NETC V4 Timer driver 4 * Copyright 2025 NXP 5 */ 6 7 #include <linux/bitfield.h> 8 #include <linux/clk.h> 9 #include <linux/fsl/netc_global.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_platform.h> 13 #include <linux/pci.h> 14 #include <linux/ptp_clock_kernel.h> 15 16 #define NETC_TMR_PCI_VENDOR_NXP 0x1131 17 18 #define NETC_TMR_CTRL 0x0080 19 #define TMR_CTRL_CK_SEL GENMASK(1, 0) 20 #define TMR_CTRL_TE BIT(2) 21 #define TMR_ETEP(i) BIT(8 + (i)) 22 #define TMR_COMP_MODE BIT(15) 23 #define TMR_CTRL_TCLK_PERIOD GENMASK(25, 16) 24 #define TMR_CTRL_PPL(i) BIT(27 - (i)) 25 #define TMR_CTRL_FS BIT(28) 26 27 #define NETC_TMR_TEVENT 0x0084 28 #define TMR_TEVNET_PPEN(i) BIT(7 - (i)) 29 #define TMR_TEVENT_PPEN_ALL GENMASK(7, 5) 30 #define TMR_TEVENT_ALMEN(i) BIT(16 + (i)) 31 #define TMR_TEVENT_ETS_THREN(i) BIT(20 + (i)) 32 #define TMR_TEVENT_ETSEN(i) BIT(24 + (i)) 33 #define TMR_TEVENT_ETS_OVEN(i) BIT(28 + (i)) 34 #define TMR_TEVENT_ETS(i) (TMR_TEVENT_ETS_THREN(i) | \ 35 TMR_TEVENT_ETSEN(i) | \ 36 TMR_TEVENT_ETS_OVEN(i)) 37 38 #define NETC_TMR_TEMASK 0x0088 39 #define NETC_TMR_STAT 0x0094 40 #define TMR_STAT_ETS_VLD(i) BIT(24 + (i)) 41 42 #define NETC_TMR_CNT_L 0x0098 43 #define NETC_TMR_CNT_H 0x009c 44 #define NETC_TMR_ADD 0x00a0 45 #define NETC_TMR_PRSC 0x00a8 46 #define NETC_TMR_ECTRL 0x00ac 47 #define NETC_TMR_OFF_L 0x00b0 48 #define NETC_TMR_OFF_H 0x00b4 49 50 /* i = 0, 1, i indicates the index of TMR_ALARM */ 51 #define NETC_TMR_ALARM_L(i) (0x00b8 + (i) * 8) 52 #define NETC_TMR_ALARM_H(i) (0x00bc + (i) * 8) 53 54 /* i = 0, 1, 2. i indicates the index of TMR_FIPER. */ 55 #define NETC_TMR_FIPER(i) (0x00d0 + (i) * 4) 56 57 #define NETC_TMR_FIPER_CTRL 0x00dc 58 #define FIPER_CTRL_DIS(i) (BIT(7) << (i) * 8) 59 #define FIPER_CTRL_PG(i) (BIT(6) << (i) * 8) 60 #define FIPER_CTRL_FS_ALARM(i) (BIT(5) << (i) * 8) 61 #define FIPER_CTRL_PW(i) (GENMASK(4, 0) << (i) * 8) 62 #define FIPER_CTRL_SET_PW(i, v) (((v) & GENMASK(4, 0)) << 8 * (i)) 63 64 /* i = 0, 1, i indicates the index of TMR_ETTS */ 65 #define NETC_TMR_ETTS_L(i) (0x00e0 + (i) * 8) 66 #define NETC_TMR_ETTS_H(i) (0x00e4 + (i) * 8) 67 #define NETC_TMR_CUR_TIME_L 0x00f0 68 #define NETC_TMR_CUR_TIME_H 0x00f4 69 70 #define NETC_TMR_REGS_BAR 0 71 #define NETC_GLOBAL_OFFSET 0x10000 72 #define NETC_GLOBAL_IPBRR0 0xbf8 73 #define IPBRR0_IP_REV GENMASK(15, 0) 74 #define NETC_REV_4_1 0x0401 75 76 #define NETC_TMR_FIPER_NUM 3 77 #define NETC_TMR_INVALID_CHANNEL NETC_TMR_FIPER_NUM 78 #define NETC_TMR_DEFAULT_PRSC 2 79 #define NETC_TMR_DEFAULT_ALARM GENMASK_ULL(63, 0) 80 #define NETC_TMR_DEFAULT_FIPER GENMASK(31, 0) 81 #define NETC_TMR_FIPER_MAX_PW GENMASK(4, 0) 82 #define NETC_TMR_ALARM_NUM 2 83 #define NETC_TMR_DEFAULT_ETTF_THR 7 84 85 /* 1588 timer reference clock source select */ 86 #define NETC_TMR_CCM_TIMER1 0 /* enet_timer1_clk_root, from CCM */ 87 #define NETC_TMR_SYSTEM_CLK 1 /* enet_clk_root/2, from CCM */ 88 #define NETC_TMR_EXT_OSC 2 /* tmr_1588_clk, from IO pins */ 89 90 #define NETC_TMR_SYSCLK_333M 333333333U 91 92 enum netc_pp_type { 93 NETC_PP_PPS = 1, 94 NETC_PP_PEROUT, 95 }; 96 97 struct netc_pp { 98 enum netc_pp_type type; 99 bool enabled; 100 int alarm_id; 101 u32 period; /* pulse period, ns */ 102 u64 stime; /* start time, ns */ 103 }; 104 105 struct netc_timer { 106 void __iomem *base; 107 struct pci_dev *pdev; 108 spinlock_t lock; /* Prevent concurrent access to registers */ 109 110 struct ptp_clock *clock; 111 struct ptp_clock_info caps; 112 u32 clk_select; 113 u32 clk_freq; 114 u32 oclk_prsc; 115 /* High 32-bit is integer part, low 32-bit is fractional part */ 116 u64 period; 117 118 int irq; 119 char irq_name[24]; 120 int revision; 121 u32 tmr_emask; 122 u8 pps_channel; 123 u8 fs_alarm_num; 124 u8 fs_alarm_bitmap; 125 struct netc_pp pp[NETC_TMR_FIPER_NUM]; /* periodic pulse */ 126 }; 127 128 #define netc_timer_rd(p, o) netc_read((p)->base + (o)) 129 #define netc_timer_wr(p, o, v) netc_write((p)->base + (o), v) 130 #define ptp_to_netc_timer(ptp) container_of((ptp), struct netc_timer, caps) 131 132 static const char *const timer_clk_src[] = { 133 "ccm", 134 "ext" 135 }; 136 137 static void netc_timer_cnt_write(struct netc_timer *priv, u64 ns) 138 { 139 u32 tmr_cnt_h = upper_32_bits(ns); 140 u32 tmr_cnt_l = lower_32_bits(ns); 141 142 /* Writes to the TMR_CNT_L register copies the written value 143 * into the shadow TMR_CNT_L register. Writes to the TMR_CNT_H 144 * register copies the values written into the shadow TMR_CNT_H 145 * register. Contents of the shadow registers are copied into 146 * the TMR_CNT_L and TMR_CNT_H registers following a write into 147 * the TMR_CNT_H register. So the user must writes to TMR_CNT_L 148 * register first. Other H/L registers should have the same 149 * behavior. 150 */ 151 netc_timer_wr(priv, NETC_TMR_CNT_L, tmr_cnt_l); 152 netc_timer_wr(priv, NETC_TMR_CNT_H, tmr_cnt_h); 153 } 154 155 static u64 netc_timer_offset_read(struct netc_timer *priv) 156 { 157 u32 tmr_off_l, tmr_off_h; 158 u64 offset; 159 160 tmr_off_l = netc_timer_rd(priv, NETC_TMR_OFF_L); 161 tmr_off_h = netc_timer_rd(priv, NETC_TMR_OFF_H); 162 offset = (((u64)tmr_off_h) << 32) | tmr_off_l; 163 164 return offset; 165 } 166 167 static void netc_timer_offset_write(struct netc_timer *priv, u64 offset) 168 { 169 u32 tmr_off_h = upper_32_bits(offset); 170 u32 tmr_off_l = lower_32_bits(offset); 171 172 netc_timer_wr(priv, NETC_TMR_OFF_L, tmr_off_l); 173 netc_timer_wr(priv, NETC_TMR_OFF_H, tmr_off_h); 174 } 175 176 static u64 netc_timer_cur_time_read(struct netc_timer *priv) 177 { 178 u32 time_h, time_l; 179 u64 ns; 180 181 /* The user should read NETC_TMR_CUR_TIME_L first to 182 * get correct current time. 183 */ 184 time_l = netc_timer_rd(priv, NETC_TMR_CUR_TIME_L); 185 time_h = netc_timer_rd(priv, NETC_TMR_CUR_TIME_H); 186 ns = (u64)time_h << 32 | time_l; 187 188 return ns; 189 } 190 191 static void netc_timer_alarm_write(struct netc_timer *priv, 192 u64 alarm, int index) 193 { 194 u32 alarm_h = upper_32_bits(alarm); 195 u32 alarm_l = lower_32_bits(alarm); 196 197 netc_timer_wr(priv, NETC_TMR_ALARM_L(index), alarm_l); 198 netc_timer_wr(priv, NETC_TMR_ALARM_H(index), alarm_h); 199 } 200 201 static u32 netc_timer_get_integral_period(struct netc_timer *priv) 202 { 203 u32 tmr_ctrl, integral_period; 204 205 tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL); 206 integral_period = FIELD_GET(TMR_CTRL_TCLK_PERIOD, tmr_ctrl); 207 208 return integral_period; 209 } 210 211 static u32 netc_timer_calculate_fiper_pw(struct netc_timer *priv, 212 u32 fiper) 213 { 214 u64 divisor, pulse_width; 215 216 /* Set the FIPER pulse width to half FIPER interval by default. 217 * pulse_width = (fiper / 2) / TMR_GCLK_period, 218 * TMR_GCLK_period = NSEC_PER_SEC / TMR_GCLK_freq, 219 * TMR_GCLK_freq = (clk_freq / oclk_prsc) Hz, 220 * so pulse_width = fiper * clk_freq / (2 * NSEC_PER_SEC * oclk_prsc). 221 */ 222 divisor = mul_u32_u32(2 * NSEC_PER_SEC, priv->oclk_prsc); 223 pulse_width = div64_u64(mul_u32_u32(fiper, priv->clk_freq), divisor); 224 225 /* The FIPER_PW field only has 5 bits, need to update oclk_prsc */ 226 if (pulse_width > NETC_TMR_FIPER_MAX_PW) 227 pulse_width = NETC_TMR_FIPER_MAX_PW; 228 229 return pulse_width; 230 } 231 232 static void netc_timer_set_pps_alarm(struct netc_timer *priv, int channel, 233 u32 integral_period) 234 { 235 struct netc_pp *pp = &priv->pp[channel]; 236 u64 alarm; 237 238 /* Get the alarm value */ 239 alarm = netc_timer_cur_time_read(priv) + NSEC_PER_MSEC; 240 alarm = roundup_u64(alarm, NSEC_PER_SEC); 241 alarm = roundup_u64(alarm, integral_period); 242 243 netc_timer_alarm_write(priv, alarm, pp->alarm_id); 244 } 245 246 static void netc_timer_set_perout_alarm(struct netc_timer *priv, int channel, 247 u32 integral_period) 248 { 249 u64 cur_time = netc_timer_cur_time_read(priv); 250 struct netc_pp *pp = &priv->pp[channel]; 251 u64 alarm, delta, min_time; 252 u32 period = pp->period; 253 u64 stime = pp->stime; 254 255 min_time = cur_time + NSEC_PER_MSEC + period; 256 if (stime < min_time) { 257 delta = min_time - stime; 258 stime += roundup_u64(delta, period); 259 } 260 261 alarm = roundup_u64(stime - period, integral_period); 262 netc_timer_alarm_write(priv, alarm, pp->alarm_id); 263 } 264 265 static int netc_timer_get_alarm_id(struct netc_timer *priv) 266 { 267 int i; 268 269 for (i = 0; i < priv->fs_alarm_num; i++) { 270 if (!(priv->fs_alarm_bitmap & BIT(i))) { 271 priv->fs_alarm_bitmap |= BIT(i); 272 break; 273 } 274 } 275 276 return i; 277 } 278 279 static u64 netc_timer_get_gclk_period(struct netc_timer *priv) 280 { 281 /* TMR_GCLK_freq = (clk_freq / oclk_prsc) Hz. 282 * TMR_GCLK_period = NSEC_PER_SEC / TMR_GCLK_freq. 283 * TMR_GCLK_period = (NSEC_PER_SEC * oclk_prsc) / clk_freq 284 */ 285 286 return div_u64(mul_u32_u32(NSEC_PER_SEC, priv->oclk_prsc), 287 priv->clk_freq); 288 } 289 290 static void netc_timer_enable_periodic_pulse(struct netc_timer *priv, 291 u8 channel) 292 { 293 u32 fiper_pw, fiper, fiper_ctrl, integral_period; 294 struct netc_pp *pp = &priv->pp[channel]; 295 int alarm_id = pp->alarm_id; 296 297 integral_period = netc_timer_get_integral_period(priv); 298 /* Set to desired FIPER interval in ns - TCLK_PERIOD */ 299 fiper = pp->period - integral_period; 300 fiper_pw = netc_timer_calculate_fiper_pw(priv, fiper); 301 302 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 303 fiper_ctrl &= ~(FIPER_CTRL_DIS(channel) | FIPER_CTRL_PW(channel) | 304 FIPER_CTRL_FS_ALARM(channel)); 305 fiper_ctrl |= FIPER_CTRL_SET_PW(channel, fiper_pw); 306 fiper_ctrl |= alarm_id ? FIPER_CTRL_FS_ALARM(channel) : 0; 307 308 priv->tmr_emask |= TMR_TEVENT_ALMEN(alarm_id); 309 310 if (pp->type == NETC_PP_PPS) { 311 priv->tmr_emask |= TMR_TEVNET_PPEN(channel); 312 netc_timer_set_pps_alarm(priv, channel, integral_period); 313 } else { 314 netc_timer_set_perout_alarm(priv, channel, integral_period); 315 } 316 317 netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask); 318 netc_timer_wr(priv, NETC_TMR_FIPER(channel), fiper); 319 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 320 } 321 322 static void netc_timer_disable_periodic_pulse(struct netc_timer *priv, 323 u8 channel) 324 { 325 struct netc_pp *pp = &priv->pp[channel]; 326 int alarm_id = pp->alarm_id; 327 u32 fiper_ctrl; 328 329 if (!pp->enabled) 330 return; 331 332 priv->tmr_emask &= ~(TMR_TEVNET_PPEN(channel) | 333 TMR_TEVENT_ALMEN(alarm_id)); 334 335 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 336 fiper_ctrl |= FIPER_CTRL_DIS(channel); 337 338 netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, alarm_id); 339 netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask); 340 netc_timer_wr(priv, NETC_TMR_FIPER(channel), NETC_TMR_DEFAULT_FIPER); 341 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 342 } 343 344 static u8 netc_timer_select_pps_channel(struct netc_timer *priv) 345 { 346 int i; 347 348 for (i = 0; i < NETC_TMR_FIPER_NUM; i++) { 349 if (!priv->pp[i].enabled) 350 return i; 351 } 352 353 return NETC_TMR_INVALID_CHANNEL; 354 } 355 356 /* Note that users should not use this API to output PPS signal on 357 * external pins, because PTP_CLK_REQ_PPS trigger internal PPS event 358 * for input into kernel PPS subsystem. See: 359 * https://lore.kernel.org/r/20201117213826.18235-1-a.fatoum@pengutronix.de 360 */ 361 static int netc_timer_enable_pps(struct netc_timer *priv, 362 struct ptp_clock_request *rq, int on) 363 { 364 struct device *dev = &priv->pdev->dev; 365 unsigned long flags; 366 struct netc_pp *pp; 367 int err = 0; 368 369 spin_lock_irqsave(&priv->lock, flags); 370 371 if (on) { 372 int alarm_id; 373 u8 channel; 374 375 if (priv->pps_channel < NETC_TMR_FIPER_NUM) { 376 channel = priv->pps_channel; 377 } else { 378 channel = netc_timer_select_pps_channel(priv); 379 if (channel == NETC_TMR_INVALID_CHANNEL) { 380 dev_err(dev, "No available FIPERs\n"); 381 err = -EBUSY; 382 goto unlock_spinlock; 383 } 384 } 385 386 pp = &priv->pp[channel]; 387 if (pp->enabled) 388 goto unlock_spinlock; 389 390 alarm_id = netc_timer_get_alarm_id(priv); 391 if (alarm_id == priv->fs_alarm_num) { 392 dev_err(dev, "No available ALARMs\n"); 393 err = -EBUSY; 394 goto unlock_spinlock; 395 } 396 397 pp->enabled = true; 398 pp->type = NETC_PP_PPS; 399 pp->alarm_id = alarm_id; 400 pp->period = NSEC_PER_SEC; 401 priv->pps_channel = channel; 402 403 netc_timer_enable_periodic_pulse(priv, channel); 404 } else { 405 /* pps_channel is invalid if PPS is not enabled, so no 406 * processing is needed. 407 */ 408 if (priv->pps_channel >= NETC_TMR_FIPER_NUM) 409 goto unlock_spinlock; 410 411 netc_timer_disable_periodic_pulse(priv, priv->pps_channel); 412 pp = &priv->pp[priv->pps_channel]; 413 priv->fs_alarm_bitmap &= ~BIT(pp->alarm_id); 414 memset(pp, 0, sizeof(*pp)); 415 priv->pps_channel = NETC_TMR_INVALID_CHANNEL; 416 } 417 418 unlock_spinlock: 419 spin_unlock_irqrestore(&priv->lock, flags); 420 421 return err; 422 } 423 424 static int net_timer_enable_perout(struct netc_timer *priv, 425 struct ptp_clock_request *rq, int on) 426 { 427 struct device *dev = &priv->pdev->dev; 428 u32 channel = rq->perout.index; 429 unsigned long flags; 430 struct netc_pp *pp; 431 int err = 0; 432 433 spin_lock_irqsave(&priv->lock, flags); 434 435 pp = &priv->pp[channel]; 436 if (pp->type == NETC_PP_PPS) { 437 dev_err(dev, "FIPER%u is being used for PPS\n", channel); 438 err = -EBUSY; 439 goto unlock_spinlock; 440 } 441 442 if (on) { 443 u64 period_ns, gclk_period, max_period, min_period; 444 struct timespec64 period, stime; 445 u32 integral_period; 446 int alarm_id; 447 448 period.tv_sec = rq->perout.period.sec; 449 period.tv_nsec = rq->perout.period.nsec; 450 period_ns = timespec64_to_ns(&period); 451 452 integral_period = netc_timer_get_integral_period(priv); 453 max_period = (u64)NETC_TMR_DEFAULT_FIPER + integral_period; 454 gclk_period = netc_timer_get_gclk_period(priv); 455 min_period = gclk_period * 4 + integral_period; 456 if (period_ns > max_period || period_ns < min_period) { 457 dev_err(dev, "The period range is %llu ~ %llu\n", 458 min_period, max_period); 459 err = -EINVAL; 460 goto unlock_spinlock; 461 } 462 463 if (pp->enabled) { 464 alarm_id = pp->alarm_id; 465 } else { 466 alarm_id = netc_timer_get_alarm_id(priv); 467 if (alarm_id == priv->fs_alarm_num) { 468 dev_err(dev, "No available ALARMs\n"); 469 err = -EBUSY; 470 goto unlock_spinlock; 471 } 472 473 pp->type = NETC_PP_PEROUT; 474 pp->enabled = true; 475 pp->alarm_id = alarm_id; 476 } 477 478 stime.tv_sec = rq->perout.start.sec; 479 stime.tv_nsec = rq->perout.start.nsec; 480 pp->stime = timespec64_to_ns(&stime); 481 pp->period = period_ns; 482 483 netc_timer_enable_periodic_pulse(priv, channel); 484 } else { 485 netc_timer_disable_periodic_pulse(priv, channel); 486 priv->fs_alarm_bitmap &= ~BIT(pp->alarm_id); 487 memset(pp, 0, sizeof(*pp)); 488 } 489 490 unlock_spinlock: 491 spin_unlock_irqrestore(&priv->lock, flags); 492 493 return err; 494 } 495 496 static void netc_timer_handle_etts_event(struct netc_timer *priv, int index, 497 bool update_event) 498 { 499 struct ptp_clock_event event; 500 u32 etts_l = 0, etts_h = 0; 501 502 while (netc_timer_rd(priv, NETC_TMR_STAT) & TMR_STAT_ETS_VLD(index)) { 503 etts_l = netc_timer_rd(priv, NETC_TMR_ETTS_L(index)); 504 etts_h = netc_timer_rd(priv, NETC_TMR_ETTS_H(index)); 505 } 506 507 /* Invalid time stamp */ 508 if (!etts_l && !etts_h) 509 return; 510 511 if (update_event) { 512 event.type = PTP_CLOCK_EXTTS; 513 event.index = index; 514 event.timestamp = (u64)etts_h << 32; 515 event.timestamp |= etts_l; 516 ptp_clock_event(priv->clock, &event); 517 } 518 } 519 520 static int netc_timer_enable_extts(struct netc_timer *priv, 521 struct ptp_clock_request *rq, int on) 522 { 523 int index = rq->extts.index; 524 unsigned long flags; 525 u32 tmr_ctrl; 526 527 /* Reject requests to enable time stamping on both edges */ 528 if ((rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES) 529 return -EOPNOTSUPP; 530 531 spin_lock_irqsave(&priv->lock, flags); 532 533 netc_timer_handle_etts_event(priv, rq->extts.index, false); 534 if (on) { 535 tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL); 536 if (rq->extts.flags & PTP_FALLING_EDGE) 537 tmr_ctrl |= TMR_ETEP(index); 538 else 539 tmr_ctrl &= ~TMR_ETEP(index); 540 541 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 542 priv->tmr_emask |= TMR_TEVENT_ETS(index); 543 } else { 544 priv->tmr_emask &= ~TMR_TEVENT_ETS(index); 545 } 546 547 netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask); 548 549 spin_unlock_irqrestore(&priv->lock, flags); 550 551 return 0; 552 } 553 554 static void netc_timer_disable_fiper(struct netc_timer *priv) 555 { 556 u32 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 557 int i; 558 559 for (i = 0; i < NETC_TMR_FIPER_NUM; i++) { 560 if (!priv->pp[i].enabled) 561 continue; 562 563 fiper_ctrl |= FIPER_CTRL_DIS(i); 564 netc_timer_wr(priv, NETC_TMR_FIPER(i), NETC_TMR_DEFAULT_FIPER); 565 } 566 567 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 568 } 569 570 static void netc_timer_enable_fiper(struct netc_timer *priv) 571 { 572 u32 integral_period = netc_timer_get_integral_period(priv); 573 u32 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 574 int i; 575 576 for (i = 0; i < NETC_TMR_FIPER_NUM; i++) { 577 struct netc_pp *pp = &priv->pp[i]; 578 u32 fiper; 579 580 if (!pp->enabled) 581 continue; 582 583 fiper_ctrl &= ~FIPER_CTRL_DIS(i); 584 585 if (pp->type == NETC_PP_PPS) 586 netc_timer_set_pps_alarm(priv, i, integral_period); 587 else if (pp->type == NETC_PP_PEROUT) 588 netc_timer_set_perout_alarm(priv, i, integral_period); 589 590 fiper = pp->period - integral_period; 591 netc_timer_wr(priv, NETC_TMR_FIPER(i), fiper); 592 } 593 594 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 595 } 596 597 static int netc_timer_enable(struct ptp_clock_info *ptp, 598 struct ptp_clock_request *rq, int on) 599 { 600 struct netc_timer *priv = ptp_to_netc_timer(ptp); 601 602 switch (rq->type) { 603 case PTP_CLK_REQ_PPS: 604 return netc_timer_enable_pps(priv, rq, on); 605 case PTP_CLK_REQ_PEROUT: 606 return net_timer_enable_perout(priv, rq, on); 607 case PTP_CLK_REQ_EXTTS: 608 return netc_timer_enable_extts(priv, rq, on); 609 default: 610 return -EOPNOTSUPP; 611 } 612 } 613 614 static int netc_timer_perout_loopback(struct ptp_clock_info *ptp, 615 unsigned int index, int on) 616 { 617 struct netc_timer *priv = ptp_to_netc_timer(ptp); 618 unsigned long flags; 619 u32 tmr_ctrl; 620 621 spin_lock_irqsave(&priv->lock, flags); 622 623 tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL); 624 if (on) 625 tmr_ctrl |= TMR_CTRL_PPL(index); 626 else 627 tmr_ctrl &= ~TMR_CTRL_PPL(index); 628 629 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 630 631 spin_unlock_irqrestore(&priv->lock, flags); 632 633 return 0; 634 } 635 636 static void netc_timer_adjust_period(struct netc_timer *priv, u64 period) 637 { 638 u32 fractional_period = lower_32_bits(period); 639 u32 integral_period = upper_32_bits(period); 640 u32 tmr_ctrl, old_tmr_ctrl; 641 unsigned long flags; 642 643 spin_lock_irqsave(&priv->lock, flags); 644 645 old_tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL); 646 tmr_ctrl = u32_replace_bits(old_tmr_ctrl, integral_period, 647 TMR_CTRL_TCLK_PERIOD); 648 if (tmr_ctrl != old_tmr_ctrl) { 649 netc_timer_disable_fiper(priv); 650 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 651 netc_timer_enable_fiper(priv); 652 } 653 654 netc_timer_wr(priv, NETC_TMR_ADD, fractional_period); 655 656 spin_unlock_irqrestore(&priv->lock, flags); 657 } 658 659 static int netc_timer_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 660 { 661 struct netc_timer *priv = ptp_to_netc_timer(ptp); 662 u64 new_period; 663 664 new_period = adjust_by_scaled_ppm(priv->period, scaled_ppm); 665 netc_timer_adjust_period(priv, new_period); 666 667 return 0; 668 } 669 670 static int netc_timer_adjtime(struct ptp_clock_info *ptp, s64 delta) 671 { 672 struct netc_timer *priv = ptp_to_netc_timer(ptp); 673 unsigned long flags; 674 s64 tmr_off; 675 676 spin_lock_irqsave(&priv->lock, flags); 677 678 netc_timer_disable_fiper(priv); 679 680 /* Adjusting TMROFF instead of TMR_CNT is that the timer 681 * counter keeps increasing during reading and writing 682 * TMR_CNT, which will cause latency. 683 */ 684 tmr_off = netc_timer_offset_read(priv); 685 tmr_off += delta; 686 netc_timer_offset_write(priv, tmr_off); 687 688 netc_timer_enable_fiper(priv); 689 690 spin_unlock_irqrestore(&priv->lock, flags); 691 692 return 0; 693 } 694 695 static int netc_timer_gettimex64(struct ptp_clock_info *ptp, 696 struct timespec64 *ts, 697 struct ptp_system_timestamp *sts) 698 { 699 struct netc_timer *priv = ptp_to_netc_timer(ptp); 700 unsigned long flags; 701 u64 ns; 702 703 spin_lock_irqsave(&priv->lock, flags); 704 705 ptp_read_system_prets(sts); 706 ns = netc_timer_cur_time_read(priv); 707 ptp_read_system_postts(sts); 708 709 spin_unlock_irqrestore(&priv->lock, flags); 710 711 *ts = ns_to_timespec64(ns); 712 713 return 0; 714 } 715 716 static int netc_timer_settime64(struct ptp_clock_info *ptp, 717 const struct timespec64 *ts) 718 { 719 struct netc_timer *priv = ptp_to_netc_timer(ptp); 720 u64 ns = timespec64_to_ns(ts); 721 unsigned long flags; 722 723 spin_lock_irqsave(&priv->lock, flags); 724 725 netc_timer_disable_fiper(priv); 726 netc_timer_offset_write(priv, 0); 727 netc_timer_cnt_write(priv, ns); 728 netc_timer_enable_fiper(priv); 729 730 spin_unlock_irqrestore(&priv->lock, flags); 731 732 return 0; 733 } 734 735 static const struct ptp_clock_info netc_timer_ptp_caps = { 736 .owner = THIS_MODULE, 737 .name = "NETC Timer PTP clock", 738 .max_adj = 500000000, 739 .n_pins = 0, 740 .n_alarm = 2, 741 .pps = 1, 742 .n_per_out = 3, 743 .n_ext_ts = 2, 744 .n_per_lp = 2, 745 .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE | 746 PTP_STRICT_FLAGS, 747 .adjfine = netc_timer_adjfine, 748 .adjtime = netc_timer_adjtime, 749 .gettimex64 = netc_timer_gettimex64, 750 .settime64 = netc_timer_settime64, 751 .enable = netc_timer_enable, 752 .perout_loopback = netc_timer_perout_loopback, 753 }; 754 755 static void netc_timer_init(struct netc_timer *priv) 756 { 757 u32 fractional_period = lower_32_bits(priv->period); 758 u32 integral_period = upper_32_bits(priv->period); 759 u32 tmr_ctrl, fiper_ctrl; 760 struct timespec64 now; 761 u64 ns; 762 int i; 763 764 /* Software must enable timer first and the clock selected must be 765 * active, otherwise, the registers which are in the timer clock 766 * domain are not accessible. 767 */ 768 tmr_ctrl = FIELD_PREP(TMR_CTRL_CK_SEL, priv->clk_select) | 769 TMR_CTRL_TE | TMR_CTRL_FS; 770 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 771 netc_timer_wr(priv, NETC_TMR_PRSC, priv->oclk_prsc); 772 773 /* Disable FIPER by default */ 774 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL); 775 for (i = 0; i < NETC_TMR_FIPER_NUM; i++) { 776 fiper_ctrl |= FIPER_CTRL_DIS(i); 777 fiper_ctrl &= ~FIPER_CTRL_PG(i); 778 } 779 netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl); 780 netc_timer_wr(priv, NETC_TMR_ECTRL, NETC_TMR_DEFAULT_ETTF_THR); 781 782 ktime_get_real_ts64(&now); 783 ns = timespec64_to_ns(&now); 784 netc_timer_cnt_write(priv, ns); 785 786 /* Allow atomic writes to TCLK_PERIOD and TMR_ADD, An update to 787 * TCLK_PERIOD does not take effect until TMR_ADD is written. 788 */ 789 tmr_ctrl |= FIELD_PREP(TMR_CTRL_TCLK_PERIOD, integral_period) | 790 TMR_COMP_MODE; 791 netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl); 792 netc_timer_wr(priv, NETC_TMR_ADD, fractional_period); 793 } 794 795 static int netc_timer_pci_probe(struct pci_dev *pdev) 796 { 797 struct device *dev = &pdev->dev; 798 struct netc_timer *priv; 799 int err; 800 801 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 802 if (!priv) 803 return -ENOMEM; 804 805 pcie_flr(pdev); 806 err = pci_enable_device_mem(pdev); 807 if (err) 808 return dev_err_probe(dev, err, "Failed to enable device\n"); 809 810 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 811 err = pci_request_mem_regions(pdev, KBUILD_MODNAME); 812 if (err) { 813 dev_err(dev, "pci_request_regions() failed, err:%pe\n", 814 ERR_PTR(err)); 815 goto disable_dev; 816 } 817 818 pci_set_master(pdev); 819 820 priv->pdev = pdev; 821 priv->base = pci_ioremap_bar(pdev, NETC_TMR_REGS_BAR); 822 if (!priv->base) { 823 err = -ENOMEM; 824 goto release_mem_regions; 825 } 826 827 pci_set_drvdata(pdev, priv); 828 829 return 0; 830 831 release_mem_regions: 832 pci_release_mem_regions(pdev); 833 disable_dev: 834 pci_disable_device(pdev); 835 836 return err; 837 } 838 839 static void netc_timer_pci_remove(struct pci_dev *pdev) 840 { 841 struct netc_timer *priv = pci_get_drvdata(pdev); 842 843 iounmap(priv->base); 844 pci_release_mem_regions(pdev); 845 pci_disable_device(pdev); 846 } 847 848 static int netc_timer_get_reference_clk_source(struct netc_timer *priv) 849 { 850 struct device *dev = &priv->pdev->dev; 851 struct clk *clk; 852 int i; 853 854 /* Select NETC system clock as the reference clock by default */ 855 priv->clk_select = NETC_TMR_SYSTEM_CLK; 856 priv->clk_freq = NETC_TMR_SYSCLK_333M; 857 858 /* Update the clock source of the reference clock if the clock 859 * is specified in DT node. 860 */ 861 for (i = 0; i < ARRAY_SIZE(timer_clk_src); i++) { 862 clk = devm_clk_get_optional_enabled(dev, timer_clk_src[i]); 863 if (IS_ERR(clk)) 864 return dev_err_probe(dev, PTR_ERR(clk), 865 "Failed to enable clock\n"); 866 867 if (clk) { 868 priv->clk_freq = clk_get_rate(clk); 869 priv->clk_select = i ? NETC_TMR_EXT_OSC : 870 NETC_TMR_CCM_TIMER1; 871 break; 872 } 873 } 874 875 /* The period is a 64-bit number, the high 32-bit is the integer 876 * part of the period, the low 32-bit is the fractional part of 877 * the period. In order to get the desired 32-bit fixed-point 878 * format, multiply the numerator of the fraction by 2^32. 879 */ 880 priv->period = div_u64((u64)NSEC_PER_SEC << 32, priv->clk_freq); 881 882 return 0; 883 } 884 885 static int netc_timer_parse_dt(struct netc_timer *priv) 886 { 887 return netc_timer_get_reference_clk_source(priv); 888 } 889 890 static irqreturn_t netc_timer_isr(int irq, void *data) 891 { 892 struct netc_timer *priv = data; 893 struct ptp_clock_event event; 894 u32 tmr_event; 895 896 spin_lock(&priv->lock); 897 898 tmr_event = netc_timer_rd(priv, NETC_TMR_TEVENT); 899 tmr_event &= priv->tmr_emask; 900 /* Clear interrupts status */ 901 netc_timer_wr(priv, NETC_TMR_TEVENT, tmr_event); 902 903 if (tmr_event & TMR_TEVENT_ALMEN(0)) 904 netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, 0); 905 906 if (tmr_event & TMR_TEVENT_ALMEN(1)) 907 netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, 1); 908 909 if (tmr_event & TMR_TEVENT_PPEN_ALL) { 910 event.type = PTP_CLOCK_PPS; 911 ptp_clock_event(priv->clock, &event); 912 } 913 914 if (tmr_event & TMR_TEVENT_ETS(0)) 915 netc_timer_handle_etts_event(priv, 0, true); 916 917 if (tmr_event & TMR_TEVENT_ETS(1)) 918 netc_timer_handle_etts_event(priv, 1, true); 919 920 spin_unlock(&priv->lock); 921 922 return IRQ_HANDLED; 923 } 924 925 static int netc_timer_init_msix_irq(struct netc_timer *priv) 926 { 927 struct pci_dev *pdev = priv->pdev; 928 int err, n; 929 930 n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); 931 if (n != 1) { 932 err = (n < 0) ? n : -EPERM; 933 dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed\n"); 934 return err; 935 } 936 937 priv->irq = pci_irq_vector(pdev, 0); 938 err = request_irq(priv->irq, netc_timer_isr, 0, priv->irq_name, priv); 939 if (err) { 940 dev_err(&pdev->dev, "request_irq() failed\n"); 941 pci_free_irq_vectors(pdev); 942 943 return err; 944 } 945 946 return 0; 947 } 948 949 static void netc_timer_free_msix_irq(struct netc_timer *priv) 950 { 951 struct pci_dev *pdev = priv->pdev; 952 953 disable_irq(priv->irq); 954 free_irq(priv->irq, priv); 955 pci_free_irq_vectors(pdev); 956 } 957 958 static int netc_timer_get_global_ip_rev(struct netc_timer *priv) 959 { 960 u32 val; 961 962 val = netc_timer_rd(priv, NETC_GLOBAL_OFFSET + NETC_GLOBAL_IPBRR0); 963 964 return val & IPBRR0_IP_REV; 965 } 966 967 static int netc_timer_probe(struct pci_dev *pdev, 968 const struct pci_device_id *id) 969 { 970 struct device *dev = &pdev->dev; 971 struct netc_timer *priv; 972 int err; 973 974 err = netc_timer_pci_probe(pdev); 975 if (err) 976 return err; 977 978 priv = pci_get_drvdata(pdev); 979 priv->revision = netc_timer_get_global_ip_rev(priv); 980 if (priv->revision == NETC_REV_4_1) 981 priv->fs_alarm_num = 1; 982 else 983 priv->fs_alarm_num = NETC_TMR_ALARM_NUM; 984 985 err = netc_timer_parse_dt(priv); 986 if (err) 987 goto timer_pci_remove; 988 989 priv->caps = netc_timer_ptp_caps; 990 priv->oclk_prsc = NETC_TMR_DEFAULT_PRSC; 991 priv->pps_channel = NETC_TMR_INVALID_CHANNEL; 992 spin_lock_init(&priv->lock); 993 snprintf(priv->irq_name, sizeof(priv->irq_name), "ptp-netc %s", 994 pci_name(pdev)); 995 996 err = netc_timer_init_msix_irq(priv); 997 if (err) 998 goto timer_pci_remove; 999 1000 netc_timer_init(priv); 1001 priv->clock = ptp_clock_register(&priv->caps, dev); 1002 if (IS_ERR(priv->clock)) { 1003 err = PTR_ERR(priv->clock); 1004 goto free_msix_irq; 1005 } 1006 1007 return 0; 1008 1009 free_msix_irq: 1010 netc_timer_free_msix_irq(priv); 1011 timer_pci_remove: 1012 netc_timer_pci_remove(pdev); 1013 1014 return err; 1015 } 1016 1017 static void netc_timer_remove(struct pci_dev *pdev) 1018 { 1019 struct netc_timer *priv = pci_get_drvdata(pdev); 1020 1021 netc_timer_wr(priv, NETC_TMR_TEMASK, 0); 1022 netc_timer_wr(priv, NETC_TMR_CTRL, 0); 1023 ptp_clock_unregister(priv->clock); 1024 netc_timer_free_msix_irq(priv); 1025 netc_timer_pci_remove(pdev); 1026 } 1027 1028 static const struct pci_device_id netc_timer_id_table[] = { 1029 { PCI_DEVICE(NETC_TMR_PCI_VENDOR_NXP, 0xee02) }, 1030 { } 1031 }; 1032 MODULE_DEVICE_TABLE(pci, netc_timer_id_table); 1033 1034 static struct pci_driver netc_timer_driver = { 1035 .name = KBUILD_MODNAME, 1036 .id_table = netc_timer_id_table, 1037 .probe = netc_timer_probe, 1038 .remove = netc_timer_remove, 1039 }; 1040 module_pci_driver(netc_timer_driver); 1041 1042 MODULE_DESCRIPTION("NXP NETC Timer PTP Driver"); 1043 MODULE_LICENSE("Dual BSD/GPL"); 1044