1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2024 Microchip Technology 3 4 #include "microchip_rds_ptp.h" 5 6 static int mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock *clock, 7 u32 offset, enum mchp_rds_ptp_base base) 8 { 9 struct phy_device *phydev = clock->phydev; 10 u32 addr; 11 12 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 13 BASE_CLK(clock))); 14 15 return phy_read_mmd(phydev, PTP_MMD(clock), addr); 16 } 17 18 static int mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock *clock, 19 u32 offset, enum mchp_rds_ptp_base base, 20 u16 val) 21 { 22 struct phy_device *phydev = clock->phydev; 23 u32 addr; 24 25 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 26 BASE_CLK(clock))); 27 28 return phy_write_mmd(phydev, PTP_MMD(clock), addr, val); 29 } 30 31 static int mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock *clock, 32 u32 offset, enum mchp_rds_ptp_base base, 33 u16 mask, u16 val) 34 { 35 struct phy_device *phydev = clock->phydev; 36 u32 addr; 37 38 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 39 BASE_CLK(clock))); 40 41 return phy_modify_mmd(phydev, PTP_MMD(clock), addr, mask, val); 42 } 43 44 static int mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock *clock, 45 u32 offset, enum mchp_rds_ptp_base base, 46 u16 val) 47 { 48 struct phy_device *phydev = clock->phydev; 49 u32 addr; 50 51 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 52 BASE_CLK(clock))); 53 54 return phy_set_bits_mmd(phydev, PTP_MMD(clock), addr, val); 55 } 56 57 static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock, 58 enum mchp_rds_ptp_fifo_dir dir) 59 { 60 int rc; 61 62 if (dir == MCHP_RDS_PTP_EGRESS_FIFO) 63 skb_queue_purge(&clock->tx_queue); 64 else 65 skb_queue_purge(&clock->rx_queue); 66 67 for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) { 68 rc = mchp_rds_phy_read_mmd(clock, 69 dir == MCHP_RDS_PTP_EGRESS_FIFO ? 70 MCHP_RDS_PTP_TX_MSG_HDR2 : 71 MCHP_RDS_PTP_RX_MSG_HDR2, 72 MCHP_RDS_PTP_PORT); 73 if (rc < 0) 74 return rc; 75 } 76 return mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS, 77 MCHP_RDS_PTP_PORT); 78 } 79 80 static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock, 81 bool enable) 82 { 83 /* Enable or disable ptp interrupts */ 84 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_INT_EN, 85 MCHP_RDS_PTP_PORT, 86 enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0); 87 } 88 89 static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts, 90 struct sk_buff *skb, int type) 91 { 92 struct mchp_rds_ptp_clock *clock = container_of(mii_ts, 93 struct mchp_rds_ptp_clock, 94 mii_ts); 95 96 switch (clock->hwts_tx_type) { 97 case HWTSTAMP_TX_ONESTEP_SYNC: 98 if (ptp_msg_is_sync(skb, type)) { 99 kfree_skb(skb); 100 return; 101 } 102 fallthrough; 103 case HWTSTAMP_TX_ON: 104 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 105 skb_queue_tail(&clock->tx_queue, skb); 106 break; 107 case HWTSTAMP_TX_OFF: 108 default: 109 kfree_skb(skb); 110 break; 111 } 112 } 113 114 static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig) 115 { 116 struct ptp_header *ptp_header; 117 int type; 118 119 skb_push(skb, ETH_HLEN); 120 type = ptp_classify_raw(skb); 121 if (type == PTP_CLASS_NONE) 122 return false; 123 124 ptp_header = ptp_parse_header(skb, type); 125 if (!ptp_header) 126 return false; 127 128 skb_pull_inline(skb, ETH_HLEN); 129 130 *sig = (__force u16)(ntohs(ptp_header->sequence_id)); 131 132 return true; 133 } 134 135 static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock, 136 struct mchp_rds_ptp_rx_ts *rx_ts) 137 { 138 struct skb_shared_hwtstamps *shhwtstamps; 139 struct sk_buff *skb, *skb_tmp; 140 unsigned long flags; 141 bool rc = false; 142 u16 skb_sig; 143 144 spin_lock_irqsave(&clock->rx_queue.lock, flags); 145 skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) { 146 if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig)) 147 continue; 148 149 if (skb_sig != rx_ts->seq_id) 150 continue; 151 152 __skb_unlink(skb, &clock->rx_queue); 153 154 rc = true; 155 break; 156 } 157 spin_unlock_irqrestore(&clock->rx_queue.lock, flags); 158 159 if (rc) { 160 shhwtstamps = skb_hwtstamps(skb); 161 shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec); 162 netif_rx(skb); 163 } 164 165 return rc; 166 } 167 168 static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock, 169 struct mchp_rds_ptp_rx_ts *rx_ts) 170 { 171 unsigned long flags; 172 173 /* If we failed to match the skb add it to the queue for when 174 * the frame will come 175 */ 176 if (!mchp_rds_ptp_match_skb(clock, rx_ts)) { 177 spin_lock_irqsave(&clock->rx_ts_lock, flags); 178 list_add(&rx_ts->list, &clock->rx_ts_list); 179 spin_unlock_irqrestore(&clock->rx_ts_lock, flags); 180 } else { 181 kfree(rx_ts); 182 } 183 } 184 185 static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock *clock, 186 struct sk_buff *skb) 187 { 188 struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL; 189 struct skb_shared_hwtstamps *shhwtstamps; 190 unsigned long flags; 191 u16 skb_sig; 192 193 if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig)) 194 return; 195 196 /* Iterate over all RX timestamps and match it with the received skbs */ 197 spin_lock_irqsave(&clock->rx_ts_lock, flags); 198 list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) { 199 /* Check if we found the signature we were looking for. */ 200 if (skb_sig != rx_ts->seq_id) 201 continue; 202 203 shhwtstamps = skb_hwtstamps(skb); 204 shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec); 205 netif_rx(skb); 206 207 rx_ts_var = rx_ts; 208 209 break; 210 } 211 spin_unlock_irqrestore(&clock->rx_ts_lock, flags); 212 213 if (rx_ts_var) { 214 list_del(&rx_ts_var->list); 215 kfree(rx_ts_var); 216 } else { 217 skb_queue_tail(&clock->rx_queue, skb); 218 } 219 } 220 221 static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts, 222 struct sk_buff *skb, int type) 223 { 224 struct mchp_rds_ptp_clock *clock = container_of(mii_ts, 225 struct mchp_rds_ptp_clock, 226 mii_ts); 227 228 if (clock->rx_filter == HWTSTAMP_FILTER_NONE || 229 type == PTP_CLASS_NONE) 230 return false; 231 232 if ((type & clock->version) == 0 || (type & clock->layer) == 0) 233 return false; 234 235 /* Here if match occurs skb is sent to application, If not skb is added 236 * to queue and sending skb to application will get handled when 237 * interrupt occurs i.e., it get handles in interrupt handler. By 238 * any means skb will reach the application so we should not return 239 * false here if skb doesn't matches. 240 */ 241 mchp_rds_ptp_match_rx_skb(clock, skb); 242 243 return true; 244 } 245 246 static int mchp_rds_ptp_hwtstamp(struct mii_timestamper *mii_ts, 247 struct kernel_hwtstamp_config *config, 248 struct netlink_ext_ack *extack) 249 { 250 struct mchp_rds_ptp_clock *clock = 251 container_of(mii_ts, struct mchp_rds_ptp_clock, 252 mii_ts); 253 struct mchp_rds_ptp_rx_ts *rx_ts, *tmp; 254 int txcfg = 0, rxcfg = 0; 255 unsigned long flags; 256 int rc; 257 258 clock->hwts_tx_type = config->tx_type; 259 clock->rx_filter = config->rx_filter; 260 261 switch (config->rx_filter) { 262 case HWTSTAMP_FILTER_NONE: 263 clock->layer = 0; 264 clock->version = 0; 265 break; 266 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 267 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 268 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 269 clock->layer = PTP_CLASS_L4; 270 clock->version = PTP_CLASS_V2; 271 break; 272 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 273 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 274 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 275 clock->layer = PTP_CLASS_L2; 276 clock->version = PTP_CLASS_V2; 277 break; 278 case HWTSTAMP_FILTER_PTP_V2_EVENT: 279 case HWTSTAMP_FILTER_PTP_V2_SYNC: 280 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 281 clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2; 282 clock->version = PTP_CLASS_V2; 283 break; 284 default: 285 return -ERANGE; 286 } 287 288 /* Setup parsing of the frames and enable the timestamping for ptp 289 * frames 290 */ 291 if (clock->layer & PTP_CLASS_L2) { 292 rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN; 293 txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN; 294 } 295 if (clock->layer & PTP_CLASS_L4) { 296 rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN | 297 MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN; 298 txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN | 299 MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN; 300 } 301 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG, 302 MCHP_RDS_PTP_PORT, rxcfg); 303 if (rc < 0) 304 return rc; 305 306 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG, 307 MCHP_RDS_PTP_PORT, txcfg); 308 if (rc < 0) 309 return rc; 310 311 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_TIMESTAMP_EN, 312 MCHP_RDS_PTP_PORT, 313 MCHP_RDS_PTP_TIMESTAMP_EN_ALL); 314 if (rc < 0) 315 return rc; 316 317 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_TIMESTAMP_EN, 318 MCHP_RDS_PTP_PORT, 319 MCHP_RDS_PTP_TIMESTAMP_EN_ALL); 320 if (rc < 0) 321 return rc; 322 323 if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) 324 /* Enable / disable of the TX timestamp in the SYNC frames */ 325 rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD, 326 MCHP_RDS_PTP_PORT, 327 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT, 328 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT); 329 else 330 rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD, 331 MCHP_RDS_PTP_PORT, 332 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT, 333 (u16)~MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT); 334 335 if (rc < 0) 336 return rc; 337 338 /* In case of multiple starts and stops, these needs to be cleared */ 339 spin_lock_irqsave(&clock->rx_ts_lock, flags); 340 list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) { 341 list_del(&rx_ts->list); 342 kfree(rx_ts); 343 } 344 spin_unlock_irqrestore(&clock->rx_ts_lock, flags); 345 346 rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO); 347 if (rc < 0) 348 return rc; 349 350 rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO); 351 if (rc < 0) 352 return rc; 353 354 /* Now enable the timestamping interrupts */ 355 rc = mchp_rds_ptp_config_intr(clock, 356 config->rx_filter != HWTSTAMP_FILTER_NONE); 357 358 return rc < 0 ? rc : 0; 359 } 360 361 static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts, 362 struct kernel_ethtool_ts_info *info) 363 { 364 struct mchp_rds_ptp_clock *clock = container_of(mii_ts, 365 struct mchp_rds_ptp_clock, 366 mii_ts); 367 368 info->phc_index = ptp_clock_index(clock->ptp_clock); 369 370 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 371 SOF_TIMESTAMPING_RX_HARDWARE | 372 SOF_TIMESTAMPING_RAW_HARDWARE; 373 374 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | 375 BIT(HWTSTAMP_TX_ONESTEP_SYNC); 376 377 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 378 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 379 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 380 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); 381 382 return 0; 383 } 384 385 static int mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info *info, s64 delta) 386 { 387 struct mchp_rds_ptp_clock *clock = container_of(info, 388 struct mchp_rds_ptp_clock, 389 caps); 390 struct timespec64 ts; 391 bool add = true; 392 int rc = 0; 393 u32 nsec; 394 s32 sec; 395 396 /* The HW allows up to 15 sec to adjust the time, but here we limit to 397 * 10 sec the adjustment. The reason is, in case the adjustment is 14 398 * sec and 999999999 nsec, then we add 8ns to compensate the actual 399 * increment so the value can be bigger than 15 sec. Therefore limit the 400 * possible adjustments so we will not have these corner cases 401 */ 402 if (delta > 10000000000LL || delta < -10000000000LL) { 403 /* The timeadjustment is too big, so fall back using set time */ 404 u64 now; 405 406 info->gettime64(info, &ts); 407 408 now = ktime_to_ns(timespec64_to_ktime(ts)); 409 ts = ns_to_timespec64(now + delta); 410 411 info->settime64(info, &ts); 412 return 0; 413 } 414 sec = div_u64_rem(abs(delta), NSEC_PER_SEC, &nsec); 415 if (delta < 0 && nsec != 0) { 416 /* It is not allowed to adjust low the nsec part, therefore 417 * subtract more from second part and add to nanosecond such 418 * that would roll over, so the second part will increase 419 */ 420 sec--; 421 nsec = NSEC_PER_SEC - nsec; 422 } 423 424 /* Calculate the adjustments and the direction */ 425 if (delta < 0) 426 add = false; 427 428 if (nsec > 0) { 429 /* add 8 ns to cover the likely normal increment */ 430 nsec += 8; 431 432 if (nsec >= NSEC_PER_SEC) { 433 /* carry into seconds */ 434 sec++; 435 nsec -= NSEC_PER_SEC; 436 } 437 } 438 439 mutex_lock(&clock->ptp_lock); 440 if (sec) { 441 sec = abs(sec); 442 443 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO, 444 MCHP_RDS_PTP_CLOCK, sec); 445 if (rc < 0) 446 goto out_unlock; 447 448 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI, 449 MCHP_RDS_PTP_CLOCK, 450 ((add ? 451 MCHP_RDS_PTP_STEP_ADJ_HI_DIR : 452 0) | ((sec >> 16) & 453 GENMASK(13, 0)))); 454 if (rc < 0) 455 goto out_unlock; 456 457 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 458 MCHP_RDS_PTP_CLOCK, 459 MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC); 460 if (rc < 0) 461 goto out_unlock; 462 } 463 464 if (nsec) { 465 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO, 466 MCHP_RDS_PTP_CLOCK, 467 nsec & GENMASK(15, 0)); 468 if (rc < 0) 469 goto out_unlock; 470 471 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI, 472 MCHP_RDS_PTP_CLOCK, 473 (nsec >> 16) & GENMASK(13, 0)); 474 if (rc < 0) 475 goto out_unlock; 476 477 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 478 MCHP_RDS_PTP_CLOCK, 479 MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC); 480 } 481 482 out_unlock: 483 mutex_unlock(&clock->ptp_lock); 484 485 return rc; 486 } 487 488 static int mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info *info, 489 long scaled_ppm) 490 { 491 struct mchp_rds_ptp_clock *clock = container_of(info, 492 struct mchp_rds_ptp_clock, 493 caps); 494 u16 rate_lo, rate_hi; 495 bool faster = true; 496 u32 rate; 497 int rc; 498 499 if (!scaled_ppm) 500 return 0; 501 502 if (scaled_ppm < 0) { 503 scaled_ppm = -scaled_ppm; 504 faster = false; 505 } 506 507 rate = MCHP_RDS_PTP_1PPM_FORMAT * (upper_16_bits(scaled_ppm)); 508 rate += (MCHP_RDS_PTP_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16; 509 510 rate_lo = rate & GENMASK(15, 0); 511 rate_hi = (rate >> 16) & GENMASK(13, 0); 512 513 if (faster) 514 rate_hi |= MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR; 515 516 mutex_lock(&clock->ptp_lock); 517 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_HI, 518 MCHP_RDS_PTP_CLOCK, rate_hi); 519 if (rc < 0) 520 goto error; 521 522 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_LO, 523 MCHP_RDS_PTP_CLOCK, rate_lo); 524 if (rc > 0) 525 rc = 0; 526 error: 527 mutex_unlock(&clock->ptp_lock); 528 529 return rc; 530 } 531 532 static int mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info *info, 533 struct timespec64 *ts) 534 { 535 struct mchp_rds_ptp_clock *clock = container_of(info, 536 struct mchp_rds_ptp_clock, 537 caps); 538 time64_t secs; 539 int rc = 0; 540 s64 nsecs; 541 542 mutex_lock(&clock->ptp_lock); 543 /* Set read bit to 1 to save current values of 1588 local time counter 544 * into PTP LTC seconds and nanoseconds registers. 545 */ 546 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 547 MCHP_RDS_PTP_CLOCK, 548 MCHP_RDS_PTP_CMD_CTL_CLOCK_READ); 549 if (rc < 0) 550 goto out_unlock; 551 552 /* Get LTC clock values */ 553 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_HI, 554 MCHP_RDS_PTP_CLOCK); 555 if (rc < 0) 556 goto out_unlock; 557 secs = rc << 16; 558 559 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_MID, 560 MCHP_RDS_PTP_CLOCK); 561 if (rc < 0) 562 goto out_unlock; 563 secs |= rc; 564 secs <<= 16; 565 566 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_LO, 567 MCHP_RDS_PTP_CLOCK); 568 if (rc < 0) 569 goto out_unlock; 570 secs |= rc; 571 572 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_HI, 573 MCHP_RDS_PTP_CLOCK); 574 if (rc < 0) 575 goto out_unlock; 576 nsecs = (rc & GENMASK(13, 0)); 577 nsecs <<= 16; 578 579 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_LO, 580 MCHP_RDS_PTP_CLOCK); 581 if (rc < 0) 582 goto out_unlock; 583 nsecs |= rc; 584 585 set_normalized_timespec64(ts, secs, nsecs); 586 587 if (rc > 0) 588 rc = 0; 589 out_unlock: 590 mutex_unlock(&clock->ptp_lock); 591 592 return rc; 593 } 594 595 static int mchp_rds_ptp_ltc_settime64(struct ptp_clock_info *info, 596 const struct timespec64 *ts) 597 { 598 struct mchp_rds_ptp_clock *clock = container_of(info, 599 struct mchp_rds_ptp_clock, 600 caps); 601 int rc; 602 603 mutex_lock(&clock->ptp_lock); 604 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_LO, 605 MCHP_RDS_PTP_CLOCK, 606 lower_16_bits(ts->tv_sec)); 607 if (rc < 0) 608 goto out_unlock; 609 610 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_MID, 611 MCHP_RDS_PTP_CLOCK, 612 upper_16_bits(ts->tv_sec)); 613 if (rc < 0) 614 goto out_unlock; 615 616 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_HI, 617 MCHP_RDS_PTP_CLOCK, 618 upper_32_bits(ts->tv_sec) & GENMASK(15, 0)); 619 if (rc < 0) 620 goto out_unlock; 621 622 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_LO, 623 MCHP_RDS_PTP_CLOCK, 624 lower_16_bits(ts->tv_nsec)); 625 if (rc < 0) 626 goto out_unlock; 627 628 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_HI, 629 MCHP_RDS_PTP_CLOCK, 630 upper_16_bits(ts->tv_nsec) & GENMASK(13, 0)); 631 if (rc < 0) 632 goto out_unlock; 633 634 /* Set load bit to 1 to write PTP LTC seconds and nanoseconds 635 * registers to 1588 local time counter. 636 */ 637 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 638 MCHP_RDS_PTP_CLOCK, 639 MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD); 640 if (rc > 0) 641 rc = 0; 642 out_unlock: 643 mutex_unlock(&clock->ptp_lock); 644 645 return rc; 646 } 647 648 static bool mchp_rds_ptp_get_sig_tx(struct sk_buff *skb, u16 *sig) 649 { 650 struct ptp_header *ptp_header; 651 int type; 652 653 type = ptp_classify_raw(skb); 654 if (type == PTP_CLASS_NONE) 655 return false; 656 657 ptp_header = ptp_parse_header(skb, type); 658 if (!ptp_header) 659 return false; 660 661 *sig = (__force u16)(ntohs(ptp_header->sequence_id)); 662 663 return true; 664 } 665 666 static void mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock *clock, 667 u32 seconds, u32 nsec, u16 seq_id) 668 { 669 struct skb_shared_hwtstamps shhwtstamps; 670 struct sk_buff *skb, *skb_tmp; 671 unsigned long flags; 672 bool rc = false; 673 u16 skb_sig; 674 675 spin_lock_irqsave(&clock->tx_queue.lock, flags); 676 skb_queue_walk_safe(&clock->tx_queue, skb, skb_tmp) { 677 if (!mchp_rds_ptp_get_sig_tx(skb, &skb_sig)) 678 continue; 679 680 if (skb_sig != seq_id) 681 continue; 682 683 __skb_unlink(skb, &clock->tx_queue); 684 rc = true; 685 break; 686 } 687 spin_unlock_irqrestore(&clock->tx_queue.lock, flags); 688 689 if (rc) { 690 shhwtstamps.hwtstamp = ktime_set(seconds, nsec); 691 skb_complete_tx_timestamp(skb, &shhwtstamps); 692 } 693 } 694 695 static struct mchp_rds_ptp_rx_ts 696 *mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock *clock) 697 { 698 struct phy_device *phydev = clock->phydev; 699 struct mchp_rds_ptp_rx_ts *rx_ts = NULL; 700 u32 sec, nsec; 701 int rc; 702 703 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_HI, 704 MCHP_RDS_PTP_PORT); 705 if (rc < 0) 706 goto error; 707 if (!(rc & MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID)) { 708 phydev_err(phydev, "RX Timestamp is not valid!\n"); 709 goto error; 710 } 711 nsec = (rc & GENMASK(13, 0)) << 16; 712 713 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_LO, 714 MCHP_RDS_PTP_PORT); 715 if (rc < 0) 716 goto error; 717 nsec |= rc; 718 719 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_HI, 720 MCHP_RDS_PTP_PORT); 721 if (rc < 0) 722 goto error; 723 sec = rc << 16; 724 725 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_LO, 726 MCHP_RDS_PTP_PORT); 727 if (rc < 0) 728 goto error; 729 sec |= rc; 730 731 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_MSG_HDR2, 732 MCHP_RDS_PTP_PORT); 733 if (rc < 0) 734 goto error; 735 736 rx_ts = kmalloc(sizeof(*rx_ts), GFP_KERNEL); 737 if (!rx_ts) 738 return NULL; 739 740 rx_ts->seconds = sec; 741 rx_ts->nsec = nsec; 742 rx_ts->seq_id = rc; 743 744 error: 745 return rx_ts; 746 } 747 748 static void mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock *clock) 749 { 750 int caps; 751 752 do { 753 struct mchp_rds_ptp_rx_ts *rx_ts; 754 755 rx_ts = mchp_rds_ptp_get_rx_ts(clock); 756 if (rx_ts) 757 mchp_rds_ptp_match_rx_ts(clock, rx_ts); 758 759 caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO, 760 MCHP_RDS_PTP_PORT); 761 if (caps < 0) 762 return; 763 } while (MCHP_RDS_PTP_RX_TS_CNT(caps) > 0); 764 } 765 766 static bool mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock *clock, 767 u32 *sec, u32 *nsec, u16 *seq) 768 { 769 int rc; 770 771 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_HI, 772 MCHP_RDS_PTP_PORT); 773 if (rc < 0) 774 return false; 775 if (!(rc & MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID)) 776 return false; 777 *nsec = (rc & GENMASK(13, 0)) << 16; 778 779 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_LO, 780 MCHP_RDS_PTP_PORT); 781 if (rc < 0) 782 return false; 783 *nsec = *nsec | rc; 784 785 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_HI, 786 MCHP_RDS_PTP_PORT); 787 if (rc < 0) 788 return false; 789 *sec = rc << 16; 790 791 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_LO, 792 MCHP_RDS_PTP_PORT); 793 if (rc < 0) 794 return false; 795 *sec = *sec | rc; 796 797 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_MSG_HDR2, 798 MCHP_RDS_PTP_PORT); 799 if (rc < 0) 800 return false; 801 802 *seq = rc; 803 804 return true; 805 } 806 807 static void mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock *clock) 808 { 809 int caps; 810 811 do { 812 u32 sec, nsec; 813 u16 seq; 814 815 if (mchp_rds_ptp_get_tx_ts(clock, &sec, &nsec, &seq)) 816 mchp_rds_ptp_match_tx_skb(clock, sec, nsec, seq); 817 818 caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO, 819 MCHP_RDS_PTP_PORT); 820 if (caps < 0) 821 return; 822 } while (MCHP_RDS_PTP_TX_TS_CNT(caps) > 0); 823 } 824 825 int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock, 826 u16 reg, u16 val, bool clear) 827 { 828 if (clear) 829 return phy_clear_bits_mmd(clock->phydev, PTP_MMD(clock), reg, 830 val); 831 else 832 return phy_set_bits_mmd(clock->phydev, PTP_MMD(clock), reg, 833 val); 834 } 835 EXPORT_SYMBOL_GPL(mchp_rds_ptp_top_config_intr); 836 837 irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock) 838 { 839 int irq_sts; 840 841 /* To handle rogue interrupt scenarios */ 842 if (!clock) 843 return IRQ_NONE; 844 845 do { 846 irq_sts = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS, 847 MCHP_RDS_PTP_PORT); 848 if (irq_sts < 0) 849 return IRQ_NONE; 850 851 if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_EN) 852 mchp_rds_ptp_process_rx_ts(clock); 853 854 if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_EN) 855 mchp_rds_ptp_process_tx_ts(clock); 856 857 if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN) 858 mchp_rds_ptp_flush_fifo(clock, 859 MCHP_RDS_PTP_EGRESS_FIFO); 860 861 if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN) 862 mchp_rds_ptp_flush_fifo(clock, 863 MCHP_RDS_PTP_INGRESS_FIFO); 864 } while (irq_sts & (MCHP_RDS_PTP_INT_RX_TS_EN | 865 MCHP_RDS_PTP_INT_TX_TS_EN | 866 MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN | 867 MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN)); 868 869 return IRQ_HANDLED; 870 } 871 EXPORT_SYMBOL_GPL(mchp_rds_ptp_handle_interrupt); 872 873 static int mchp_rds_ptp_init(struct mchp_rds_ptp_clock *clock) 874 { 875 int rc; 876 877 /* Disable PTP */ 878 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 879 MCHP_RDS_PTP_CLOCK, 880 MCHP_RDS_PTP_CMD_CTL_DIS); 881 if (rc < 0) 882 return rc; 883 884 /* Disable TSU */ 885 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG, 886 MCHP_RDS_PTP_PORT, 0); 887 if (rc < 0) 888 return rc; 889 890 /* Clear PTP interrupt status registers */ 891 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_HARD_RESET, 892 MCHP_RDS_PTP_PORT, 893 MCHP_RDS_PTP_TSU_HARDRESET); 894 if (rc < 0) 895 return rc; 896 897 /* Predictor enable */ 898 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LATENCY_CORRECTION_CTL, 899 MCHP_RDS_PTP_CLOCK, 900 MCHP_RDS_PTP_LATENCY_SETTING); 901 if (rc < 0) 902 return rc; 903 904 /* Configure PTP operational mode */ 905 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_OP_MODE, 906 MCHP_RDS_PTP_CLOCK, 907 MCHP_RDS_PTP_OP_MODE_STANDALONE); 908 if (rc < 0) 909 return rc; 910 911 /* Reference clock configuration */ 912 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_REF_CLK_CFG, 913 MCHP_RDS_PTP_CLOCK, 914 MCHP_RDS_PTP_REF_CLK_CFG_SET); 915 if (rc < 0) 916 return rc; 917 918 /* Classifier configurations */ 919 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG, 920 MCHP_RDS_PTP_PORT, 0); 921 if (rc < 0) 922 return rc; 923 924 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG, 925 MCHP_RDS_PTP_PORT, 0); 926 if (rc < 0) 927 return rc; 928 929 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN, 930 MCHP_RDS_PTP_PORT, 0); 931 if (rc < 0) 932 return rc; 933 934 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN, 935 MCHP_RDS_PTP_PORT, 0); 936 if (rc < 0) 937 return rc; 938 939 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN, 940 MCHP_RDS_PTP_PORT, 0); 941 if (rc < 0) 942 return rc; 943 944 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN, 945 MCHP_RDS_PTP_PORT, 0); 946 if (rc < 0) 947 return rc; 948 949 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_VERSION, 950 MCHP_RDS_PTP_PORT, 951 MCHP_RDS_PTP_MAX_VERSION(0xff) | 952 MCHP_RDS_PTP_MIN_VERSION(0x0)); 953 if (rc < 0) 954 return rc; 955 956 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_VERSION, 957 MCHP_RDS_PTP_PORT, 958 MCHP_RDS_PTP_MAX_VERSION(0xff) | 959 MCHP_RDS_PTP_MIN_VERSION(0x0)); 960 if (rc < 0) 961 return rc; 962 963 /* Enable TSU */ 964 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG, 965 MCHP_RDS_PTP_PORT, 966 MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN); 967 if (rc < 0) 968 return rc; 969 970 /* Enable PTP */ 971 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 972 MCHP_RDS_PTP_CLOCK, 973 MCHP_RDS_PTP_CMD_CTL_EN); 974 } 975 976 struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd, 977 u16 clk_base_addr, 978 u16 port_base_addr) 979 { 980 struct mchp_rds_ptp_clock *clock; 981 int rc; 982 983 clock = devm_kzalloc(&phydev->mdio.dev, sizeof(*clock), GFP_KERNEL); 984 if (!clock) 985 return ERR_PTR(-ENOMEM); 986 987 clock->port_base_addr = port_base_addr; 988 clock->clk_base_addr = clk_base_addr; 989 clock->mmd = mmd; 990 991 mutex_init(&clock->ptp_lock); 992 /* Register PTP clock */ 993 clock->caps.owner = THIS_MODULE; 994 snprintf(clock->caps.name, 30, "%s", phydev->drv->name); 995 clock->caps.max_adj = MCHP_RDS_PTP_MAX_ADJ; 996 clock->caps.n_ext_ts = 0; 997 clock->caps.pps = 0; 998 clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine; 999 clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime; 1000 clock->caps.gettime64 = mchp_rds_ptp_ltc_gettime64; 1001 clock->caps.settime64 = mchp_rds_ptp_ltc_settime64; 1002 clock->ptp_clock = ptp_clock_register(&clock->caps, 1003 &phydev->mdio.dev); 1004 if (IS_ERR(clock->ptp_clock)) 1005 return ERR_PTR(-EINVAL); 1006 1007 /* Check if PHC support is missing at the configuration level */ 1008 if (!clock->ptp_clock) 1009 return NULL; 1010 1011 /* Initialize the SW */ 1012 skb_queue_head_init(&clock->tx_queue); 1013 skb_queue_head_init(&clock->rx_queue); 1014 INIT_LIST_HEAD(&clock->rx_ts_list); 1015 spin_lock_init(&clock->rx_ts_lock); 1016 1017 clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp; 1018 clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp; 1019 clock->mii_ts.hwtstamp = mchp_rds_ptp_hwtstamp; 1020 clock->mii_ts.ts_info = mchp_rds_ptp_ts_info; 1021 1022 phydev->mii_ts = &clock->mii_ts; 1023 1024 /* Timestamp selected by default to keep legacy API */ 1025 phydev->default_timestamp = true; 1026 1027 clock->phydev = phydev; 1028 1029 rc = mchp_rds_ptp_init(clock); 1030 if (rc < 0) 1031 return ERR_PTR(rc); 1032 1033 return clock; 1034 } 1035 EXPORT_SYMBOL_GPL(mchp_rds_ptp_probe); 1036 1037 MODULE_LICENSE("GPL"); 1038 MODULE_DESCRIPTION("MICROCHIP PHY RDS PTP driver"); 1039 MODULE_AUTHOR("Divya Koppera"); 1040