1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2024 Microchip Technology 3 4 #include "microchip_rds_ptp.h" 5 6 static int mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock *clock, 7 u32 offset, enum mchp_rds_ptp_base base) 8 { 9 struct phy_device *phydev = clock->phydev; 10 u32 addr; 11 12 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 13 BASE_CLK(clock))); 14 15 return phy_read_mmd(phydev, PTP_MMD(clock), addr); 16 } 17 18 static int mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock *clock, 19 u32 offset, enum mchp_rds_ptp_base base, 20 u16 val) 21 { 22 struct phy_device *phydev = clock->phydev; 23 u32 addr; 24 25 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 26 BASE_CLK(clock))); 27 28 return phy_write_mmd(phydev, PTP_MMD(clock), addr, val); 29 } 30 31 static int mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock *clock, 32 u32 offset, enum mchp_rds_ptp_base base, 33 u16 mask, u16 val) 34 { 35 struct phy_device *phydev = clock->phydev; 36 u32 addr; 37 38 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 39 BASE_CLK(clock))); 40 41 return phy_modify_mmd(phydev, PTP_MMD(clock), addr, mask, val); 42 } 43 44 static int mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock *clock, 45 u32 offset, enum mchp_rds_ptp_base base, 46 u16 val) 47 { 48 struct phy_device *phydev = clock->phydev; 49 u32 addr; 50 51 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 52 BASE_CLK(clock))); 53 54 return phy_set_bits_mmd(phydev, PTP_MMD(clock), addr, val); 55 } 56 57 static int mchp_get_pulsewidth(struct phy_device *phydev, 58 struct ptp_perout_request *perout_request, 59 int *pulse_width) 60 { 61 struct timespec64 ts_period; 62 s64 ts_on_nsec, period_nsec; 63 struct timespec64 ts_on; 64 static const s64 sup_on_necs[] = { 65 100, /* 100ns */ 66 500, /* 500ns */ 67 1000, /* 1us */ 68 5000, /* 5us */ 69 10000, /* 10us */ 70 50000, /* 50us */ 71 100000, /* 100us */ 72 500000, /* 500us */ 73 1000000, /* 1ms */ 74 5000000, /* 5ms */ 75 10000000, /* 10ms */ 76 50000000, /* 50ms */ 77 100000000, /* 100ms */ 78 200000000, /* 200ms */ 79 }; 80 81 ts_period.tv_sec = perout_request->period.sec; 82 ts_period.tv_nsec = perout_request->period.nsec; 83 84 ts_on.tv_sec = perout_request->on.sec; 85 ts_on.tv_nsec = perout_request->on.nsec; 86 ts_on_nsec = timespec64_to_ns(&ts_on); 87 period_nsec = timespec64_to_ns(&ts_period); 88 89 if (period_nsec < 200) { 90 phydev_warn(phydev, "perout period small, minimum is 200ns\n"); 91 return -EOPNOTSUPP; 92 } 93 94 for (int i = 0; i < ARRAY_SIZE(sup_on_necs); i++) { 95 if (ts_on_nsec <= sup_on_necs[i]) { 96 *pulse_width = i; 97 break; 98 } 99 } 100 101 phydev_info(phydev, "pulse width is %d\n", *pulse_width); 102 return 0; 103 } 104 105 static int mchp_general_event_config(struct mchp_rds_ptp_clock *clock, 106 int pulse_width) 107 { 108 int general_config; 109 110 general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG, 111 MCHP_RDS_PTP_CLOCK); 112 if (general_config < 0) 113 return general_config; 114 115 general_config &= ~MCHP_RDS_PTP_GEN_CFG_LTC_EVT_MASK; 116 general_config |= MCHP_RDS_PTP_GEN_CFG_LTC_EVT_SET(pulse_width); 117 general_config &= ~MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD; 118 general_config |= MCHP_RDS_PTP_GEN_CFG_POLARITY; 119 120 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG, 121 MCHP_RDS_PTP_CLOCK, general_config); 122 } 123 124 static int mchp_set_clock_reload(struct mchp_rds_ptp_clock *clock, 125 s64 period_sec, u32 period_nsec) 126 { 127 int rc; 128 129 rc = mchp_rds_phy_write_mmd(clock, 130 MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_LO, 131 MCHP_RDS_PTP_CLOCK, 132 lower_16_bits(period_sec)); 133 if (rc < 0) 134 return rc; 135 136 rc = mchp_rds_phy_write_mmd(clock, 137 MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_HI, 138 MCHP_RDS_PTP_CLOCK, 139 upper_16_bits(period_sec)); 140 if (rc < 0) 141 return rc; 142 143 rc = mchp_rds_phy_write_mmd(clock, 144 MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_LO, 145 MCHP_RDS_PTP_CLOCK, 146 lower_16_bits(period_nsec)); 147 if (rc < 0) 148 return rc; 149 150 return mchp_rds_phy_write_mmd(clock, 151 MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_HI, 152 MCHP_RDS_PTP_CLOCK, 153 upper_16_bits(period_nsec) & 0x3fff); 154 } 155 156 static int mchp_set_clock_target(struct mchp_rds_ptp_clock *clock, 157 s64 start_sec, u32 start_nsec) 158 { 159 int rc; 160 161 /* Set the start time */ 162 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_LO, 163 MCHP_RDS_PTP_CLOCK, 164 lower_16_bits(start_sec)); 165 if (rc < 0) 166 return rc; 167 168 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_HI, 169 MCHP_RDS_PTP_CLOCK, 170 upper_16_bits(start_sec)); 171 if (rc < 0) 172 return rc; 173 174 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_LO, 175 MCHP_RDS_PTP_CLOCK, 176 lower_16_bits(start_nsec)); 177 if (rc < 0) 178 return rc; 179 180 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_HI, 181 MCHP_RDS_PTP_CLOCK, 182 upper_16_bits(start_nsec) & 0x3fff); 183 } 184 185 static int mchp_rds_ptp_perout_off(struct mchp_rds_ptp_clock *clock) 186 { 187 u16 general_config; 188 int rc; 189 190 /* Set target to too far in the future, effectively disabling it */ 191 rc = mchp_set_clock_target(clock, 0xFFFFFFFF, 0); 192 if (rc < 0) 193 return rc; 194 195 general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG, 196 MCHP_RDS_PTP_CLOCK); 197 general_config |= MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD; 198 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG, 199 MCHP_RDS_PTP_CLOCK, general_config); 200 if (rc < 0) 201 return rc; 202 203 clock->mchp_rds_ptp_event = -1; 204 205 return 0; 206 } 207 208 static bool mchp_get_event(struct mchp_rds_ptp_clock *clock, int pin) 209 { 210 if (clock->mchp_rds_ptp_event < 0 && pin == clock->event_pin) { 211 clock->mchp_rds_ptp_event = pin; 212 return true; 213 } 214 215 return false; 216 } 217 218 static int mchp_rds_ptp_perout(struct ptp_clock_info *ptpci, 219 struct ptp_perout_request *perout, int on) 220 { 221 struct mchp_rds_ptp_clock *clock = container_of(ptpci, 222 struct mchp_rds_ptp_clock, 223 caps); 224 struct phy_device *phydev = clock->phydev; 225 int ret, event_pin, pulsewidth; 226 227 event_pin = ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT, 228 perout->index); 229 if (event_pin != clock->event_pin) 230 return -EINVAL; 231 232 if (!on) { 233 ret = mchp_rds_ptp_perout_off(clock); 234 return ret; 235 } 236 237 if (!mchp_get_event(clock, event_pin)) 238 return -EINVAL; 239 240 ret = mchp_get_pulsewidth(phydev, perout, &pulsewidth); 241 if (ret < 0) 242 return ret; 243 244 /* Configure to pulse every period */ 245 ret = mchp_general_event_config(clock, pulsewidth); 246 if (ret < 0) 247 return ret; 248 249 ret = mchp_set_clock_target(clock, perout->start.sec, 250 perout->start.nsec); 251 if (ret < 0) 252 return ret; 253 254 return mchp_set_clock_reload(clock, perout->period.sec, 255 perout->period.nsec); 256 } 257 258 static int mchp_rds_ptpci_enable(struct ptp_clock_info *ptpci, 259 struct ptp_clock_request *request, int on) 260 { 261 switch (request->type) { 262 case PTP_CLK_REQ_PEROUT: 263 return mchp_rds_ptp_perout(ptpci, &request->perout, on); 264 default: 265 return -EINVAL; 266 } 267 } 268 269 static int mchp_rds_ptpci_verify(struct ptp_clock_info *ptpci, unsigned int pin, 270 enum ptp_pin_function func, unsigned int chan) 271 { 272 struct mchp_rds_ptp_clock *clock = container_of(ptpci, 273 struct mchp_rds_ptp_clock, 274 caps); 275 276 if (!(pin == clock->event_pin && chan == 0)) 277 return -1; 278 279 switch (func) { 280 case PTP_PF_NONE: 281 case PTP_PF_PEROUT: 282 break; 283 default: 284 return -1; 285 } 286 287 return 0; 288 } 289 290 static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock, 291 enum mchp_rds_ptp_fifo_dir dir) 292 { 293 int rc; 294 295 if (dir == MCHP_RDS_PTP_EGRESS_FIFO) 296 skb_queue_purge(&clock->tx_queue); 297 else 298 skb_queue_purge(&clock->rx_queue); 299 300 for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) { 301 rc = mchp_rds_phy_read_mmd(clock, 302 dir == MCHP_RDS_PTP_EGRESS_FIFO ? 303 MCHP_RDS_PTP_TX_MSG_HDR2 : 304 MCHP_RDS_PTP_RX_MSG_HDR2, 305 MCHP_RDS_PTP_PORT); 306 if (rc < 0) 307 return rc; 308 } 309 return mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS, 310 MCHP_RDS_PTP_PORT); 311 } 312 313 static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock, 314 bool enable) 315 { 316 /* Enable or disable ptp interrupts */ 317 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_INT_EN, 318 MCHP_RDS_PTP_PORT, 319 enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0); 320 } 321 322 static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts, 323 struct sk_buff *skb, int type) 324 { 325 struct mchp_rds_ptp_clock *clock = container_of(mii_ts, 326 struct mchp_rds_ptp_clock, 327 mii_ts); 328 329 switch (clock->hwts_tx_type) { 330 case HWTSTAMP_TX_ONESTEP_SYNC: 331 if (ptp_msg_is_sync(skb, type)) { 332 kfree_skb(skb); 333 return; 334 } 335 fallthrough; 336 case HWTSTAMP_TX_ON: 337 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 338 skb_queue_tail(&clock->tx_queue, skb); 339 break; 340 case HWTSTAMP_TX_OFF: 341 default: 342 kfree_skb(skb); 343 break; 344 } 345 } 346 347 static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig) 348 { 349 struct ptp_header *ptp_header; 350 int type; 351 352 skb_push(skb, ETH_HLEN); 353 type = ptp_classify_raw(skb); 354 if (type == PTP_CLASS_NONE) 355 return false; 356 357 ptp_header = ptp_parse_header(skb, type); 358 if (!ptp_header) 359 return false; 360 361 skb_pull_inline(skb, ETH_HLEN); 362 363 *sig = (__force u16)(ntohs(ptp_header->sequence_id)); 364 365 return true; 366 } 367 368 static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock, 369 struct mchp_rds_ptp_rx_ts *rx_ts) 370 { 371 struct skb_shared_hwtstamps *shhwtstamps; 372 struct sk_buff *skb, *skb_tmp; 373 unsigned long flags; 374 bool rc = false; 375 u16 skb_sig; 376 377 spin_lock_irqsave(&clock->rx_queue.lock, flags); 378 skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) { 379 if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig)) 380 continue; 381 382 if (skb_sig != rx_ts->seq_id) 383 continue; 384 385 __skb_unlink(skb, &clock->rx_queue); 386 387 rc = true; 388 break; 389 } 390 spin_unlock_irqrestore(&clock->rx_queue.lock, flags); 391 392 if (rc) { 393 shhwtstamps = skb_hwtstamps(skb); 394 shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec); 395 netif_rx(skb); 396 } 397 398 return rc; 399 } 400 401 static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock, 402 struct mchp_rds_ptp_rx_ts *rx_ts) 403 { 404 unsigned long flags; 405 406 /* If we failed to match the skb add it to the queue for when 407 * the frame will come 408 */ 409 if (!mchp_rds_ptp_match_skb(clock, rx_ts)) { 410 spin_lock_irqsave(&clock->rx_ts_lock, flags); 411 list_add(&rx_ts->list, &clock->rx_ts_list); 412 spin_unlock_irqrestore(&clock->rx_ts_lock, flags); 413 } else { 414 kfree(rx_ts); 415 } 416 } 417 418 static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock *clock, 419 struct sk_buff *skb) 420 { 421 struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL; 422 struct skb_shared_hwtstamps *shhwtstamps; 423 unsigned long flags; 424 u16 skb_sig; 425 426 if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig)) 427 return; 428 429 /* Iterate over all RX timestamps and match it with the received skbs */ 430 spin_lock_irqsave(&clock->rx_ts_lock, flags); 431 list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) { 432 /* Check if we found the signature we were looking for. */ 433 if (skb_sig != rx_ts->seq_id) 434 continue; 435 436 shhwtstamps = skb_hwtstamps(skb); 437 shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec); 438 netif_rx(skb); 439 440 rx_ts_var = rx_ts; 441 442 break; 443 } 444 spin_unlock_irqrestore(&clock->rx_ts_lock, flags); 445 446 if (rx_ts_var) { 447 list_del(&rx_ts_var->list); 448 kfree(rx_ts_var); 449 } else { 450 skb_queue_tail(&clock->rx_queue, skb); 451 } 452 } 453 454 static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts, 455 struct sk_buff *skb, int type) 456 { 457 struct mchp_rds_ptp_clock *clock = container_of(mii_ts, 458 struct mchp_rds_ptp_clock, 459 mii_ts); 460 461 if (clock->rx_filter == HWTSTAMP_FILTER_NONE || 462 type == PTP_CLASS_NONE) 463 return false; 464 465 if ((type & clock->version) == 0 || (type & clock->layer) == 0) 466 return false; 467 468 /* Here if match occurs skb is sent to application, If not skb is added 469 * to queue and sending skb to application will get handled when 470 * interrupt occurs i.e., it get handles in interrupt handler. By 471 * any means skb will reach the application so we should not return 472 * false here if skb doesn't matches. 473 */ 474 mchp_rds_ptp_match_rx_skb(clock, skb); 475 476 return true; 477 } 478 479 static int mchp_rds_ptp_hwtstamp_get(struct mii_timestamper *mii_ts, 480 struct kernel_hwtstamp_config *config) 481 { 482 struct mchp_rds_ptp_clock *clock = 483 container_of(mii_ts, struct mchp_rds_ptp_clock, 484 mii_ts); 485 config->tx_type = clock->hwts_tx_type; 486 config->rx_filter = clock->rx_filter; 487 488 return 0; 489 } 490 491 static int mchp_rds_ptp_hwtstamp_set(struct mii_timestamper *mii_ts, 492 struct kernel_hwtstamp_config *config, 493 struct netlink_ext_ack *extack) 494 { 495 struct mchp_rds_ptp_clock *clock = 496 container_of(mii_ts, struct mchp_rds_ptp_clock, 497 mii_ts); 498 struct mchp_rds_ptp_rx_ts *rx_ts, *tmp; 499 int txcfg = 0, rxcfg = 0; 500 unsigned long flags; 501 int rc; 502 503 switch (config->rx_filter) { 504 case HWTSTAMP_FILTER_NONE: 505 clock->layer = 0; 506 clock->version = 0; 507 break; 508 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 509 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 510 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 511 clock->layer = PTP_CLASS_L4; 512 clock->version = PTP_CLASS_V2; 513 break; 514 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 515 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 516 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 517 clock->layer = PTP_CLASS_L2; 518 clock->version = PTP_CLASS_V2; 519 break; 520 case HWTSTAMP_FILTER_PTP_V2_EVENT: 521 case HWTSTAMP_FILTER_PTP_V2_SYNC: 522 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 523 clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2; 524 clock->version = PTP_CLASS_V2; 525 break; 526 default: 527 return -ERANGE; 528 } 529 530 switch (config->tx_type) { 531 case HWTSTAMP_TX_ONESTEP_SYNC: 532 case HWTSTAMP_TX_ON: 533 case HWTSTAMP_TX_OFF: 534 break; 535 default: 536 return -ERANGE; 537 } 538 539 /* Setup parsing of the frames and enable the timestamping for ptp 540 * frames 541 */ 542 if (clock->layer & PTP_CLASS_L2) { 543 rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN; 544 txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN; 545 } 546 if (clock->layer & PTP_CLASS_L4) { 547 rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN | 548 MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN; 549 txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN | 550 MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN; 551 } 552 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG, 553 MCHP_RDS_PTP_PORT, rxcfg); 554 if (rc < 0) 555 return rc; 556 557 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG, 558 MCHP_RDS_PTP_PORT, txcfg); 559 if (rc < 0) 560 return rc; 561 562 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_TIMESTAMP_EN, 563 MCHP_RDS_PTP_PORT, 564 MCHP_RDS_PTP_TIMESTAMP_EN_ALL); 565 if (rc < 0) 566 return rc; 567 568 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_TIMESTAMP_EN, 569 MCHP_RDS_PTP_PORT, 570 MCHP_RDS_PTP_TIMESTAMP_EN_ALL); 571 if (rc < 0) 572 return rc; 573 574 if (config->tx_type == HWTSTAMP_TX_ONESTEP_SYNC) 575 /* Enable / disable of the TX timestamp in the SYNC frames */ 576 rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD, 577 MCHP_RDS_PTP_PORT, 578 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT, 579 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT); 580 else 581 rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD, 582 MCHP_RDS_PTP_PORT, 583 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT, 584 (u16)~MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT); 585 586 if (rc < 0) 587 return rc; 588 589 /* In case of multiple starts and stops, these needs to be cleared */ 590 spin_lock_irqsave(&clock->rx_ts_lock, flags); 591 list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) { 592 list_del(&rx_ts->list); 593 kfree(rx_ts); 594 } 595 spin_unlock_irqrestore(&clock->rx_ts_lock, flags); 596 597 rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO); 598 if (rc < 0) 599 return rc; 600 601 rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO); 602 if (rc < 0) 603 return rc; 604 605 /* Now enable the timestamping interrupts */ 606 rc = mchp_rds_ptp_config_intr(clock, 607 config->rx_filter != HWTSTAMP_FILTER_NONE); 608 if (rc < 0) 609 return rc; 610 611 clock->hwts_tx_type = config->tx_type; 612 clock->rx_filter = config->rx_filter; 613 614 return 0; 615 } 616 617 static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts, 618 struct kernel_ethtool_ts_info *info) 619 { 620 struct mchp_rds_ptp_clock *clock = container_of(mii_ts, 621 struct mchp_rds_ptp_clock, 622 mii_ts); 623 624 info->phc_index = ptp_clock_index(clock->ptp_clock); 625 626 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 627 SOF_TIMESTAMPING_RX_HARDWARE | 628 SOF_TIMESTAMPING_RAW_HARDWARE; 629 630 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | 631 BIT(HWTSTAMP_TX_ONESTEP_SYNC); 632 633 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 634 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 635 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 636 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); 637 638 return 0; 639 } 640 641 static int mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info *info, s64 delta) 642 { 643 struct mchp_rds_ptp_clock *clock = container_of(info, 644 struct mchp_rds_ptp_clock, 645 caps); 646 struct timespec64 ts; 647 bool add = true; 648 int rc = 0; 649 u32 nsec; 650 s32 sec; 651 652 /* The HW allows up to 15 sec to adjust the time, but here we limit to 653 * 10 sec the adjustment. The reason is, in case the adjustment is 14 654 * sec and 999999999 nsec, then we add 8ns to compensate the actual 655 * increment so the value can be bigger than 15 sec. Therefore limit the 656 * possible adjustments so we will not have these corner cases 657 */ 658 if (delta > 10000000000LL || delta < -10000000000LL) { 659 /* The timeadjustment is too big, so fall back using set time */ 660 u64 now; 661 662 info->gettime64(info, &ts); 663 664 now = ktime_to_ns(timespec64_to_ktime(ts)); 665 ts = ns_to_timespec64(now + delta); 666 667 info->settime64(info, &ts); 668 return 0; 669 } 670 sec = div_u64_rem(abs(delta), NSEC_PER_SEC, &nsec); 671 if (delta < 0 && nsec != 0) { 672 /* It is not allowed to adjust low the nsec part, therefore 673 * subtract more from second part and add to nanosecond such 674 * that would roll over, so the second part will increase 675 */ 676 sec--; 677 nsec = NSEC_PER_SEC - nsec; 678 } 679 680 /* Calculate the adjustments and the direction */ 681 if (delta < 0) 682 add = false; 683 684 if (nsec > 0) { 685 /* add 8 ns to cover the likely normal increment */ 686 nsec += 8; 687 688 if (nsec >= NSEC_PER_SEC) { 689 /* carry into seconds */ 690 sec++; 691 nsec -= NSEC_PER_SEC; 692 } 693 } 694 695 mutex_lock(&clock->ptp_lock); 696 if (sec) { 697 sec = abs(sec); 698 699 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO, 700 MCHP_RDS_PTP_CLOCK, sec); 701 if (rc < 0) 702 goto out_unlock; 703 704 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI, 705 MCHP_RDS_PTP_CLOCK, 706 ((add ? 707 MCHP_RDS_PTP_STEP_ADJ_HI_DIR : 708 0) | ((sec >> 16) & 709 GENMASK(13, 0)))); 710 if (rc < 0) 711 goto out_unlock; 712 713 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 714 MCHP_RDS_PTP_CLOCK, 715 MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC); 716 if (rc < 0) 717 goto out_unlock; 718 } 719 720 if (nsec) { 721 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO, 722 MCHP_RDS_PTP_CLOCK, 723 nsec & GENMASK(15, 0)); 724 if (rc < 0) 725 goto out_unlock; 726 727 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI, 728 MCHP_RDS_PTP_CLOCK, 729 (nsec >> 16) & GENMASK(13, 0)); 730 if (rc < 0) 731 goto out_unlock; 732 733 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 734 MCHP_RDS_PTP_CLOCK, 735 MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC); 736 } 737 738 mutex_unlock(&clock->ptp_lock); 739 info->gettime64(info, &ts); 740 mutex_lock(&clock->ptp_lock); 741 742 /* Target update is required for pulse generation on events that 743 * are enabled 744 */ 745 if (clock->mchp_rds_ptp_event >= 0) 746 mchp_set_clock_target(clock, 747 ts.tv_sec + MCHP_RDS_PTP_BUFFER_TIME, 0); 748 out_unlock: 749 mutex_unlock(&clock->ptp_lock); 750 751 return rc; 752 } 753 754 static int mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info *info, 755 long scaled_ppm) 756 { 757 struct mchp_rds_ptp_clock *clock = container_of(info, 758 struct mchp_rds_ptp_clock, 759 caps); 760 u16 rate_lo, rate_hi; 761 bool faster = true; 762 u32 rate; 763 int rc; 764 765 if (!scaled_ppm) 766 return 0; 767 768 if (scaled_ppm < 0) { 769 scaled_ppm = -scaled_ppm; 770 faster = false; 771 } 772 773 rate = MCHP_RDS_PTP_1PPM_FORMAT * (upper_16_bits(scaled_ppm)); 774 rate += (MCHP_RDS_PTP_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16; 775 776 rate_lo = rate & GENMASK(15, 0); 777 rate_hi = (rate >> 16) & GENMASK(13, 0); 778 779 if (faster) 780 rate_hi |= MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR; 781 782 mutex_lock(&clock->ptp_lock); 783 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_HI, 784 MCHP_RDS_PTP_CLOCK, rate_hi); 785 if (rc < 0) 786 goto error; 787 788 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_LO, 789 MCHP_RDS_PTP_CLOCK, rate_lo); 790 if (rc > 0) 791 rc = 0; 792 error: 793 mutex_unlock(&clock->ptp_lock); 794 795 return rc; 796 } 797 798 static int mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info *info, 799 struct timespec64 *ts) 800 { 801 struct mchp_rds_ptp_clock *clock = container_of(info, 802 struct mchp_rds_ptp_clock, 803 caps); 804 time64_t secs; 805 int rc = 0; 806 s64 nsecs; 807 808 mutex_lock(&clock->ptp_lock); 809 /* Set read bit to 1 to save current values of 1588 local time counter 810 * into PTP LTC seconds and nanoseconds registers. 811 */ 812 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 813 MCHP_RDS_PTP_CLOCK, 814 MCHP_RDS_PTP_CMD_CTL_CLOCK_READ); 815 if (rc < 0) 816 goto out_unlock; 817 818 /* Get LTC clock values */ 819 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_HI, 820 MCHP_RDS_PTP_CLOCK); 821 if (rc < 0) 822 goto out_unlock; 823 secs = rc << 16; 824 825 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_MID, 826 MCHP_RDS_PTP_CLOCK); 827 if (rc < 0) 828 goto out_unlock; 829 secs |= rc; 830 secs <<= 16; 831 832 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_LO, 833 MCHP_RDS_PTP_CLOCK); 834 if (rc < 0) 835 goto out_unlock; 836 secs |= rc; 837 838 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_HI, 839 MCHP_RDS_PTP_CLOCK); 840 if (rc < 0) 841 goto out_unlock; 842 nsecs = (rc & GENMASK(13, 0)); 843 nsecs <<= 16; 844 845 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_LO, 846 MCHP_RDS_PTP_CLOCK); 847 if (rc < 0) 848 goto out_unlock; 849 nsecs |= rc; 850 851 set_normalized_timespec64(ts, secs, nsecs); 852 853 if (rc > 0) 854 rc = 0; 855 out_unlock: 856 mutex_unlock(&clock->ptp_lock); 857 858 return rc; 859 } 860 861 static int mchp_rds_ptp_ltc_settime64(struct ptp_clock_info *info, 862 const struct timespec64 *ts) 863 { 864 struct mchp_rds_ptp_clock *clock = container_of(info, 865 struct mchp_rds_ptp_clock, 866 caps); 867 int rc; 868 869 mutex_lock(&clock->ptp_lock); 870 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_LO, 871 MCHP_RDS_PTP_CLOCK, 872 lower_16_bits(ts->tv_sec)); 873 if (rc < 0) 874 goto out_unlock; 875 876 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_MID, 877 MCHP_RDS_PTP_CLOCK, 878 upper_16_bits(ts->tv_sec)); 879 if (rc < 0) 880 goto out_unlock; 881 882 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_HI, 883 MCHP_RDS_PTP_CLOCK, 884 upper_32_bits(ts->tv_sec) & GENMASK(15, 0)); 885 if (rc < 0) 886 goto out_unlock; 887 888 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_LO, 889 MCHP_RDS_PTP_CLOCK, 890 lower_16_bits(ts->tv_nsec)); 891 if (rc < 0) 892 goto out_unlock; 893 894 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_HI, 895 MCHP_RDS_PTP_CLOCK, 896 upper_16_bits(ts->tv_nsec) & GENMASK(13, 0)); 897 if (rc < 0) 898 goto out_unlock; 899 900 /* Set load bit to 1 to write PTP LTC seconds and nanoseconds 901 * registers to 1588 local time counter. 902 */ 903 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 904 MCHP_RDS_PTP_CLOCK, 905 MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD); 906 if (rc > 0) 907 rc = 0; 908 out_unlock: 909 mutex_unlock(&clock->ptp_lock); 910 911 return rc; 912 } 913 914 static bool mchp_rds_ptp_get_sig_tx(struct sk_buff *skb, u16 *sig) 915 { 916 struct ptp_header *ptp_header; 917 int type; 918 919 type = ptp_classify_raw(skb); 920 if (type == PTP_CLASS_NONE) 921 return false; 922 923 ptp_header = ptp_parse_header(skb, type); 924 if (!ptp_header) 925 return false; 926 927 *sig = (__force u16)(ntohs(ptp_header->sequence_id)); 928 929 return true; 930 } 931 932 static void mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock *clock, 933 u32 seconds, u32 nsec, u16 seq_id) 934 { 935 struct skb_shared_hwtstamps shhwtstamps; 936 struct sk_buff *skb, *skb_tmp; 937 unsigned long flags; 938 bool rc = false; 939 u16 skb_sig; 940 941 spin_lock_irqsave(&clock->tx_queue.lock, flags); 942 skb_queue_walk_safe(&clock->tx_queue, skb, skb_tmp) { 943 if (!mchp_rds_ptp_get_sig_tx(skb, &skb_sig)) 944 continue; 945 946 if (skb_sig != seq_id) 947 continue; 948 949 __skb_unlink(skb, &clock->tx_queue); 950 rc = true; 951 break; 952 } 953 spin_unlock_irqrestore(&clock->tx_queue.lock, flags); 954 955 if (rc) { 956 shhwtstamps.hwtstamp = ktime_set(seconds, nsec); 957 skb_complete_tx_timestamp(skb, &shhwtstamps); 958 } 959 } 960 961 static struct mchp_rds_ptp_rx_ts 962 *mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock *clock) 963 { 964 struct phy_device *phydev = clock->phydev; 965 struct mchp_rds_ptp_rx_ts *rx_ts = NULL; 966 u32 sec, nsec; 967 int rc; 968 969 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_HI, 970 MCHP_RDS_PTP_PORT); 971 if (rc < 0) 972 goto error; 973 if (!(rc & MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID)) { 974 phydev_err(phydev, "RX Timestamp is not valid!\n"); 975 goto error; 976 } 977 nsec = (rc & GENMASK(13, 0)) << 16; 978 979 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_LO, 980 MCHP_RDS_PTP_PORT); 981 if (rc < 0) 982 goto error; 983 nsec |= rc; 984 985 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_HI, 986 MCHP_RDS_PTP_PORT); 987 if (rc < 0) 988 goto error; 989 sec = rc << 16; 990 991 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_LO, 992 MCHP_RDS_PTP_PORT); 993 if (rc < 0) 994 goto error; 995 sec |= rc; 996 997 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_MSG_HDR2, 998 MCHP_RDS_PTP_PORT); 999 if (rc < 0) 1000 goto error; 1001 1002 rx_ts = kmalloc(sizeof(*rx_ts), GFP_KERNEL); 1003 if (!rx_ts) 1004 return NULL; 1005 1006 rx_ts->seconds = sec; 1007 rx_ts->nsec = nsec; 1008 rx_ts->seq_id = rc; 1009 1010 error: 1011 return rx_ts; 1012 } 1013 1014 static void mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock *clock) 1015 { 1016 int caps; 1017 1018 do { 1019 struct mchp_rds_ptp_rx_ts *rx_ts; 1020 1021 rx_ts = mchp_rds_ptp_get_rx_ts(clock); 1022 if (rx_ts) 1023 mchp_rds_ptp_match_rx_ts(clock, rx_ts); 1024 1025 caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO, 1026 MCHP_RDS_PTP_PORT); 1027 if (caps < 0) 1028 return; 1029 } while (MCHP_RDS_PTP_RX_TS_CNT(caps) > 0); 1030 } 1031 1032 static bool mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock *clock, 1033 u32 *sec, u32 *nsec, u16 *seq) 1034 { 1035 int rc; 1036 1037 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_HI, 1038 MCHP_RDS_PTP_PORT); 1039 if (rc < 0) 1040 return false; 1041 if (!(rc & MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID)) 1042 return false; 1043 *nsec = (rc & GENMASK(13, 0)) << 16; 1044 1045 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_LO, 1046 MCHP_RDS_PTP_PORT); 1047 if (rc < 0) 1048 return false; 1049 *nsec = *nsec | rc; 1050 1051 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_HI, 1052 MCHP_RDS_PTP_PORT); 1053 if (rc < 0) 1054 return false; 1055 *sec = rc << 16; 1056 1057 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_LO, 1058 MCHP_RDS_PTP_PORT); 1059 if (rc < 0) 1060 return false; 1061 *sec = *sec | rc; 1062 1063 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_MSG_HDR2, 1064 MCHP_RDS_PTP_PORT); 1065 if (rc < 0) 1066 return false; 1067 1068 *seq = rc; 1069 1070 return true; 1071 } 1072 1073 static void mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock *clock) 1074 { 1075 int caps; 1076 1077 do { 1078 u32 sec, nsec; 1079 u16 seq; 1080 1081 if (mchp_rds_ptp_get_tx_ts(clock, &sec, &nsec, &seq)) 1082 mchp_rds_ptp_match_tx_skb(clock, sec, nsec, seq); 1083 1084 caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO, 1085 MCHP_RDS_PTP_PORT); 1086 if (caps < 0) 1087 return; 1088 } while (MCHP_RDS_PTP_TX_TS_CNT(caps) > 0); 1089 } 1090 1091 int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock, 1092 u16 reg, u16 val, bool clear) 1093 { 1094 if (clear) 1095 return phy_clear_bits_mmd(clock->phydev, PTP_MMD(clock), reg, 1096 val); 1097 else 1098 return phy_set_bits_mmd(clock->phydev, PTP_MMD(clock), reg, 1099 val); 1100 } 1101 EXPORT_SYMBOL_GPL(mchp_rds_ptp_top_config_intr); 1102 1103 irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock) 1104 { 1105 int irq_sts; 1106 1107 /* To handle rogue interrupt scenarios */ 1108 if (!clock) 1109 return IRQ_NONE; 1110 1111 do { 1112 irq_sts = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS, 1113 MCHP_RDS_PTP_PORT); 1114 if (irq_sts < 0) 1115 return IRQ_NONE; 1116 1117 if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_EN) 1118 mchp_rds_ptp_process_rx_ts(clock); 1119 1120 if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_EN) 1121 mchp_rds_ptp_process_tx_ts(clock); 1122 1123 if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN) 1124 mchp_rds_ptp_flush_fifo(clock, 1125 MCHP_RDS_PTP_EGRESS_FIFO); 1126 1127 if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN) 1128 mchp_rds_ptp_flush_fifo(clock, 1129 MCHP_RDS_PTP_INGRESS_FIFO); 1130 } while (irq_sts & (MCHP_RDS_PTP_INT_RX_TS_EN | 1131 MCHP_RDS_PTP_INT_TX_TS_EN | 1132 MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN | 1133 MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN)); 1134 1135 return IRQ_HANDLED; 1136 } 1137 EXPORT_SYMBOL_GPL(mchp_rds_ptp_handle_interrupt); 1138 1139 static int mchp_rds_ptp_init(struct mchp_rds_ptp_clock *clock) 1140 { 1141 int rc; 1142 1143 /* Disable PTP */ 1144 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 1145 MCHP_RDS_PTP_CLOCK, 1146 MCHP_RDS_PTP_CMD_CTL_DIS); 1147 if (rc < 0) 1148 return rc; 1149 1150 /* Disable TSU */ 1151 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG, 1152 MCHP_RDS_PTP_PORT, 0); 1153 if (rc < 0) 1154 return rc; 1155 1156 /* Clear PTP interrupt status registers */ 1157 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_HARD_RESET, 1158 MCHP_RDS_PTP_PORT, 1159 MCHP_RDS_PTP_TSU_HARDRESET); 1160 if (rc < 0) 1161 return rc; 1162 1163 /* Predictor enable */ 1164 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LATENCY_CORRECTION_CTL, 1165 MCHP_RDS_PTP_CLOCK, 1166 MCHP_RDS_PTP_LATENCY_SETTING); 1167 if (rc < 0) 1168 return rc; 1169 1170 /* Configure PTP operational mode */ 1171 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_OP_MODE, 1172 MCHP_RDS_PTP_CLOCK, 1173 MCHP_RDS_PTP_OP_MODE_STANDALONE); 1174 if (rc < 0) 1175 return rc; 1176 1177 /* Reference clock configuration */ 1178 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_REF_CLK_CFG, 1179 MCHP_RDS_PTP_CLOCK, 1180 MCHP_RDS_PTP_REF_CLK_CFG_SET); 1181 if (rc < 0) 1182 return rc; 1183 1184 /* Classifier configurations */ 1185 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG, 1186 MCHP_RDS_PTP_PORT, 0); 1187 if (rc < 0) 1188 return rc; 1189 1190 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG, 1191 MCHP_RDS_PTP_PORT, 0); 1192 if (rc < 0) 1193 return rc; 1194 1195 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN, 1196 MCHP_RDS_PTP_PORT, 0); 1197 if (rc < 0) 1198 return rc; 1199 1200 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN, 1201 MCHP_RDS_PTP_PORT, 0); 1202 if (rc < 0) 1203 return rc; 1204 1205 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN, 1206 MCHP_RDS_PTP_PORT, 0); 1207 if (rc < 0) 1208 return rc; 1209 1210 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN, 1211 MCHP_RDS_PTP_PORT, 0); 1212 if (rc < 0) 1213 return rc; 1214 1215 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_VERSION, 1216 MCHP_RDS_PTP_PORT, 1217 MCHP_RDS_PTP_MAX_VERSION(0xff) | 1218 MCHP_RDS_PTP_MIN_VERSION(0x0)); 1219 if (rc < 0) 1220 return rc; 1221 1222 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_VERSION, 1223 MCHP_RDS_PTP_PORT, 1224 MCHP_RDS_PTP_MAX_VERSION(0xff) | 1225 MCHP_RDS_PTP_MIN_VERSION(0x0)); 1226 if (rc < 0) 1227 return rc; 1228 1229 /* Enable TSU */ 1230 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG, 1231 MCHP_RDS_PTP_PORT, 1232 MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN); 1233 if (rc < 0) 1234 return rc; 1235 1236 /* Enable PTP */ 1237 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 1238 MCHP_RDS_PTP_CLOCK, 1239 MCHP_RDS_PTP_CMD_CTL_EN); 1240 } 1241 1242 struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd, 1243 u16 clk_base_addr, 1244 u16 port_base_addr) 1245 { 1246 struct mchp_rds_ptp_clock *clock; 1247 int rc; 1248 1249 clock = devm_kzalloc(&phydev->mdio.dev, sizeof(*clock), GFP_KERNEL); 1250 if (!clock) 1251 return ERR_PTR(-ENOMEM); 1252 1253 clock->port_base_addr = port_base_addr; 1254 clock->clk_base_addr = clk_base_addr; 1255 clock->mmd = mmd; 1256 1257 mutex_init(&clock->ptp_lock); 1258 clock->pin_config = devm_kmalloc_array(&phydev->mdio.dev, 1259 MCHP_RDS_PTP_N_PIN, 1260 sizeof(*clock->pin_config), 1261 GFP_KERNEL); 1262 if (!clock->pin_config) 1263 return ERR_PTR(-ENOMEM); 1264 1265 for (int i = 0; i < MCHP_RDS_PTP_N_PIN; ++i) { 1266 struct ptp_pin_desc *p = &clock->pin_config[i]; 1267 1268 memset(p, 0, sizeof(*p)); 1269 snprintf(p->name, sizeof(p->name), "pin%d", i); 1270 p->index = i; 1271 p->func = PTP_PF_NONE; 1272 } 1273 /* Register PTP clock */ 1274 clock->caps.owner = THIS_MODULE; 1275 snprintf(clock->caps.name, 30, "%s", phydev->drv->name); 1276 clock->caps.max_adj = MCHP_RDS_PTP_MAX_ADJ; 1277 clock->caps.n_ext_ts = 0; 1278 clock->caps.pps = 0; 1279 clock->caps.n_pins = MCHP_RDS_PTP_N_PIN; 1280 clock->caps.n_per_out = MCHP_RDS_PTP_N_PEROUT; 1281 clock->caps.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE; 1282 clock->caps.pin_config = clock->pin_config; 1283 clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine; 1284 clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime; 1285 clock->caps.gettime64 = mchp_rds_ptp_ltc_gettime64; 1286 clock->caps.settime64 = mchp_rds_ptp_ltc_settime64; 1287 clock->caps.enable = mchp_rds_ptpci_enable; 1288 clock->caps.verify = mchp_rds_ptpci_verify; 1289 clock->caps.getcrosststamp = NULL; 1290 clock->ptp_clock = ptp_clock_register(&clock->caps, 1291 &phydev->mdio.dev); 1292 if (IS_ERR(clock->ptp_clock)) 1293 return ERR_PTR(-EINVAL); 1294 1295 /* Check if PHC support is missing at the configuration level */ 1296 if (!clock->ptp_clock) 1297 return NULL; 1298 1299 /* Initialize the SW */ 1300 skb_queue_head_init(&clock->tx_queue); 1301 skb_queue_head_init(&clock->rx_queue); 1302 INIT_LIST_HEAD(&clock->rx_ts_list); 1303 spin_lock_init(&clock->rx_ts_lock); 1304 1305 clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp; 1306 clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp; 1307 clock->mii_ts.hwtstamp_set = mchp_rds_ptp_hwtstamp_set; 1308 clock->mii_ts.hwtstamp_get = mchp_rds_ptp_hwtstamp_get; 1309 clock->mii_ts.ts_info = mchp_rds_ptp_ts_info; 1310 1311 phydev->mii_ts = &clock->mii_ts; 1312 1313 clock->mchp_rds_ptp_event = -1; 1314 1315 /* Timestamp selected by default to keep legacy API */ 1316 phydev->default_timestamp = true; 1317 1318 clock->phydev = phydev; 1319 1320 rc = mchp_rds_ptp_init(clock); 1321 if (rc < 0) 1322 return ERR_PTR(rc); 1323 1324 return clock; 1325 } 1326 EXPORT_SYMBOL_GPL(mchp_rds_ptp_probe); 1327 1328 MODULE_LICENSE("GPL"); 1329 MODULE_DESCRIPTION("MICROCHIP PHY RDS PTP driver"); 1330 MODULE_AUTHOR("Divya Koppera"); 1331