1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2024 Microchip Technology 3 4 #include "microchip_rds_ptp.h" 5 6 static int mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock *clock, 7 u32 offset, enum mchp_rds_ptp_base base) 8 { 9 struct phy_device *phydev = clock->phydev; 10 u32 addr; 11 12 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 13 BASE_CLK(clock))); 14 15 return phy_read_mmd(phydev, PTP_MMD(clock), addr); 16 } 17 18 static int mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock *clock, 19 u32 offset, enum mchp_rds_ptp_base base, 20 u16 val) 21 { 22 struct phy_device *phydev = clock->phydev; 23 u32 addr; 24 25 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 26 BASE_CLK(clock))); 27 28 return phy_write_mmd(phydev, PTP_MMD(clock), addr, val); 29 } 30 31 static int mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock *clock, 32 u32 offset, enum mchp_rds_ptp_base base, 33 u16 mask, u16 val) 34 { 35 struct phy_device *phydev = clock->phydev; 36 u32 addr; 37 38 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 39 BASE_CLK(clock))); 40 41 return phy_modify_mmd(phydev, PTP_MMD(clock), addr, mask, val); 42 } 43 44 static int mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock *clock, 45 u32 offset, enum mchp_rds_ptp_base base, 46 u16 val) 47 { 48 struct phy_device *phydev = clock->phydev; 49 u32 addr; 50 51 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : 52 BASE_CLK(clock))); 53 54 return phy_set_bits_mmd(phydev, PTP_MMD(clock), addr, val); 55 } 56 57 static int mchp_get_pulsewidth(struct phy_device *phydev, 58 struct ptp_perout_request *perout_request, 59 int *pulse_width) 60 { 61 struct timespec64 ts_period; 62 s64 ts_on_nsec, period_nsec; 63 struct timespec64 ts_on; 64 static const s64 sup_on_necs[] = { 65 100, /* 100ns */ 66 500, /* 500ns */ 67 1000, /* 1us */ 68 5000, /* 5us */ 69 10000, /* 10us */ 70 50000, /* 50us */ 71 100000, /* 100us */ 72 500000, /* 500us */ 73 1000000, /* 1ms */ 74 5000000, /* 5ms */ 75 10000000, /* 10ms */ 76 50000000, /* 50ms */ 77 100000000, /* 100ms */ 78 200000000, /* 200ms */ 79 }; 80 81 ts_period.tv_sec = perout_request->period.sec; 82 ts_period.tv_nsec = perout_request->period.nsec; 83 84 ts_on.tv_sec = perout_request->on.sec; 85 ts_on.tv_nsec = perout_request->on.nsec; 86 ts_on_nsec = timespec64_to_ns(&ts_on); 87 period_nsec = timespec64_to_ns(&ts_period); 88 89 if (period_nsec < 200) { 90 phydev_warn(phydev, "perout period small, minimum is 200ns\n"); 91 return -EOPNOTSUPP; 92 } 93 94 for (int i = 0; i < ARRAY_SIZE(sup_on_necs); i++) { 95 if (ts_on_nsec <= sup_on_necs[i]) { 96 *pulse_width = i; 97 break; 98 } 99 } 100 101 phydev_info(phydev, "pulse width is %d\n", *pulse_width); 102 return 0; 103 } 104 105 static int mchp_general_event_config(struct mchp_rds_ptp_clock *clock, 106 int pulse_width) 107 { 108 int general_config; 109 110 general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG, 111 MCHP_RDS_PTP_CLOCK); 112 if (general_config < 0) 113 return general_config; 114 115 general_config &= ~MCHP_RDS_PTP_GEN_CFG_LTC_EVT_MASK; 116 general_config |= MCHP_RDS_PTP_GEN_CFG_LTC_EVT_SET(pulse_width); 117 general_config &= ~MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD; 118 general_config |= MCHP_RDS_PTP_GEN_CFG_POLARITY; 119 120 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG, 121 MCHP_RDS_PTP_CLOCK, general_config); 122 } 123 124 static int mchp_set_clock_reload(struct mchp_rds_ptp_clock *clock, 125 s64 period_sec, u32 period_nsec) 126 { 127 int rc; 128 129 rc = mchp_rds_phy_write_mmd(clock, 130 MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_LO, 131 MCHP_RDS_PTP_CLOCK, 132 lower_16_bits(period_sec)); 133 if (rc < 0) 134 return rc; 135 136 rc = mchp_rds_phy_write_mmd(clock, 137 MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_HI, 138 MCHP_RDS_PTP_CLOCK, 139 upper_16_bits(period_sec)); 140 if (rc < 0) 141 return rc; 142 143 rc = mchp_rds_phy_write_mmd(clock, 144 MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_LO, 145 MCHP_RDS_PTP_CLOCK, 146 lower_16_bits(period_nsec)); 147 if (rc < 0) 148 return rc; 149 150 return mchp_rds_phy_write_mmd(clock, 151 MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_HI, 152 MCHP_RDS_PTP_CLOCK, 153 upper_16_bits(period_nsec) & 0x3fff); 154 } 155 156 static int mchp_set_clock_target(struct mchp_rds_ptp_clock *clock, 157 s64 start_sec, u32 start_nsec) 158 { 159 int rc; 160 161 /* Set the start time */ 162 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_LO, 163 MCHP_RDS_PTP_CLOCK, 164 lower_16_bits(start_sec)); 165 if (rc < 0) 166 return rc; 167 168 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_HI, 169 MCHP_RDS_PTP_CLOCK, 170 upper_16_bits(start_sec)); 171 if (rc < 0) 172 return rc; 173 174 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_LO, 175 MCHP_RDS_PTP_CLOCK, 176 lower_16_bits(start_nsec)); 177 if (rc < 0) 178 return rc; 179 180 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_HI, 181 MCHP_RDS_PTP_CLOCK, 182 upper_16_bits(start_nsec) & 0x3fff); 183 } 184 185 static int mchp_rds_ptp_perout_off(struct mchp_rds_ptp_clock *clock) 186 { 187 u16 general_config; 188 int rc; 189 190 /* Set target to too far in the future, effectively disabling it */ 191 rc = mchp_set_clock_target(clock, 0xFFFFFFFF, 0); 192 if (rc < 0) 193 return rc; 194 195 general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG, 196 MCHP_RDS_PTP_CLOCK); 197 general_config |= MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD; 198 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG, 199 MCHP_RDS_PTP_CLOCK, general_config); 200 if (rc < 0) 201 return rc; 202 203 clock->mchp_rds_ptp_event = -1; 204 205 return 0; 206 } 207 208 static bool mchp_get_event(struct mchp_rds_ptp_clock *clock, int pin) 209 { 210 if (clock->mchp_rds_ptp_event < 0 && pin == clock->event_pin) { 211 clock->mchp_rds_ptp_event = pin; 212 return true; 213 } 214 215 return false; 216 } 217 218 static int mchp_rds_ptp_perout(struct ptp_clock_info *ptpci, 219 struct ptp_perout_request *perout, int on) 220 { 221 struct mchp_rds_ptp_clock *clock = container_of(ptpci, 222 struct mchp_rds_ptp_clock, 223 caps); 224 struct phy_device *phydev = clock->phydev; 225 int ret, event_pin, pulsewidth; 226 227 /* Reject requests with unsupported flags */ 228 if (perout->flags & ~PTP_PEROUT_DUTY_CYCLE) 229 return -EOPNOTSUPP; 230 231 event_pin = ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT, 232 perout->index); 233 if (event_pin != clock->event_pin) 234 return -EINVAL; 235 236 if (!on) { 237 ret = mchp_rds_ptp_perout_off(clock); 238 return ret; 239 } 240 241 if (!mchp_get_event(clock, event_pin)) 242 return -EINVAL; 243 244 ret = mchp_get_pulsewidth(phydev, perout, &pulsewidth); 245 if (ret < 0) 246 return ret; 247 248 /* Configure to pulse every period */ 249 ret = mchp_general_event_config(clock, pulsewidth); 250 if (ret < 0) 251 return ret; 252 253 ret = mchp_set_clock_target(clock, perout->start.sec, 254 perout->start.nsec); 255 if (ret < 0) 256 return ret; 257 258 return mchp_set_clock_reload(clock, perout->period.sec, 259 perout->period.nsec); 260 } 261 262 static int mchp_rds_ptpci_enable(struct ptp_clock_info *ptpci, 263 struct ptp_clock_request *request, int on) 264 { 265 switch (request->type) { 266 case PTP_CLK_REQ_PEROUT: 267 return mchp_rds_ptp_perout(ptpci, &request->perout, on); 268 default: 269 return -EINVAL; 270 } 271 } 272 273 static int mchp_rds_ptpci_verify(struct ptp_clock_info *ptpci, unsigned int pin, 274 enum ptp_pin_function func, unsigned int chan) 275 { 276 struct mchp_rds_ptp_clock *clock = container_of(ptpci, 277 struct mchp_rds_ptp_clock, 278 caps); 279 280 if (!(pin == clock->event_pin && chan == 0)) 281 return -1; 282 283 switch (func) { 284 case PTP_PF_NONE: 285 case PTP_PF_PEROUT: 286 break; 287 default: 288 return -1; 289 } 290 291 return 0; 292 } 293 294 static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock, 295 enum mchp_rds_ptp_fifo_dir dir) 296 { 297 int rc; 298 299 if (dir == MCHP_RDS_PTP_EGRESS_FIFO) 300 skb_queue_purge(&clock->tx_queue); 301 else 302 skb_queue_purge(&clock->rx_queue); 303 304 for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) { 305 rc = mchp_rds_phy_read_mmd(clock, 306 dir == MCHP_RDS_PTP_EGRESS_FIFO ? 307 MCHP_RDS_PTP_TX_MSG_HDR2 : 308 MCHP_RDS_PTP_RX_MSG_HDR2, 309 MCHP_RDS_PTP_PORT); 310 if (rc < 0) 311 return rc; 312 } 313 return mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS, 314 MCHP_RDS_PTP_PORT); 315 } 316 317 static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock, 318 bool enable) 319 { 320 /* Enable or disable ptp interrupts */ 321 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_INT_EN, 322 MCHP_RDS_PTP_PORT, 323 enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0); 324 } 325 326 static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts, 327 struct sk_buff *skb, int type) 328 { 329 struct mchp_rds_ptp_clock *clock = container_of(mii_ts, 330 struct mchp_rds_ptp_clock, 331 mii_ts); 332 333 switch (clock->hwts_tx_type) { 334 case HWTSTAMP_TX_ONESTEP_SYNC: 335 if (ptp_msg_is_sync(skb, type)) { 336 kfree_skb(skb); 337 return; 338 } 339 fallthrough; 340 case HWTSTAMP_TX_ON: 341 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 342 skb_queue_tail(&clock->tx_queue, skb); 343 break; 344 case HWTSTAMP_TX_OFF: 345 default: 346 kfree_skb(skb); 347 break; 348 } 349 } 350 351 static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig) 352 { 353 struct ptp_header *ptp_header; 354 int type; 355 356 skb_push(skb, ETH_HLEN); 357 type = ptp_classify_raw(skb); 358 if (type == PTP_CLASS_NONE) 359 return false; 360 361 ptp_header = ptp_parse_header(skb, type); 362 if (!ptp_header) 363 return false; 364 365 skb_pull_inline(skb, ETH_HLEN); 366 367 *sig = (__force u16)(ntohs(ptp_header->sequence_id)); 368 369 return true; 370 } 371 372 static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock, 373 struct mchp_rds_ptp_rx_ts *rx_ts) 374 { 375 struct skb_shared_hwtstamps *shhwtstamps; 376 struct sk_buff *skb, *skb_tmp; 377 unsigned long flags; 378 bool rc = false; 379 u16 skb_sig; 380 381 spin_lock_irqsave(&clock->rx_queue.lock, flags); 382 skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) { 383 if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig)) 384 continue; 385 386 if (skb_sig != rx_ts->seq_id) 387 continue; 388 389 __skb_unlink(skb, &clock->rx_queue); 390 391 rc = true; 392 break; 393 } 394 spin_unlock_irqrestore(&clock->rx_queue.lock, flags); 395 396 if (rc) { 397 shhwtstamps = skb_hwtstamps(skb); 398 shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec); 399 netif_rx(skb); 400 } 401 402 return rc; 403 } 404 405 static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock, 406 struct mchp_rds_ptp_rx_ts *rx_ts) 407 { 408 unsigned long flags; 409 410 /* If we failed to match the skb add it to the queue for when 411 * the frame will come 412 */ 413 if (!mchp_rds_ptp_match_skb(clock, rx_ts)) { 414 spin_lock_irqsave(&clock->rx_ts_lock, flags); 415 list_add(&rx_ts->list, &clock->rx_ts_list); 416 spin_unlock_irqrestore(&clock->rx_ts_lock, flags); 417 } else { 418 kfree(rx_ts); 419 } 420 } 421 422 static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock *clock, 423 struct sk_buff *skb) 424 { 425 struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL; 426 struct skb_shared_hwtstamps *shhwtstamps; 427 unsigned long flags; 428 u16 skb_sig; 429 430 if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig)) 431 return; 432 433 /* Iterate over all RX timestamps and match it with the received skbs */ 434 spin_lock_irqsave(&clock->rx_ts_lock, flags); 435 list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) { 436 /* Check if we found the signature we were looking for. */ 437 if (skb_sig != rx_ts->seq_id) 438 continue; 439 440 shhwtstamps = skb_hwtstamps(skb); 441 shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec); 442 netif_rx(skb); 443 444 rx_ts_var = rx_ts; 445 446 break; 447 } 448 spin_unlock_irqrestore(&clock->rx_ts_lock, flags); 449 450 if (rx_ts_var) { 451 list_del(&rx_ts_var->list); 452 kfree(rx_ts_var); 453 } else { 454 skb_queue_tail(&clock->rx_queue, skb); 455 } 456 } 457 458 static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts, 459 struct sk_buff *skb, int type) 460 { 461 struct mchp_rds_ptp_clock *clock = container_of(mii_ts, 462 struct mchp_rds_ptp_clock, 463 mii_ts); 464 465 if (clock->rx_filter == HWTSTAMP_FILTER_NONE || 466 type == PTP_CLASS_NONE) 467 return false; 468 469 if ((type & clock->version) == 0 || (type & clock->layer) == 0) 470 return false; 471 472 /* Here if match occurs skb is sent to application, If not skb is added 473 * to queue and sending skb to application will get handled when 474 * interrupt occurs i.e., it get handles in interrupt handler. By 475 * any means skb will reach the application so we should not return 476 * false here if skb doesn't matches. 477 */ 478 mchp_rds_ptp_match_rx_skb(clock, skb); 479 480 return true; 481 } 482 483 static int mchp_rds_ptp_hwtstamp(struct mii_timestamper *mii_ts, 484 struct kernel_hwtstamp_config *config, 485 struct netlink_ext_ack *extack) 486 { 487 struct mchp_rds_ptp_clock *clock = 488 container_of(mii_ts, struct mchp_rds_ptp_clock, 489 mii_ts); 490 struct mchp_rds_ptp_rx_ts *rx_ts, *tmp; 491 int txcfg = 0, rxcfg = 0; 492 unsigned long flags; 493 int rc; 494 495 clock->hwts_tx_type = config->tx_type; 496 clock->rx_filter = config->rx_filter; 497 498 switch (config->rx_filter) { 499 case HWTSTAMP_FILTER_NONE: 500 clock->layer = 0; 501 clock->version = 0; 502 break; 503 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 504 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 505 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 506 clock->layer = PTP_CLASS_L4; 507 clock->version = PTP_CLASS_V2; 508 break; 509 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 510 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 511 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 512 clock->layer = PTP_CLASS_L2; 513 clock->version = PTP_CLASS_V2; 514 break; 515 case HWTSTAMP_FILTER_PTP_V2_EVENT: 516 case HWTSTAMP_FILTER_PTP_V2_SYNC: 517 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 518 clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2; 519 clock->version = PTP_CLASS_V2; 520 break; 521 default: 522 return -ERANGE; 523 } 524 525 /* Setup parsing of the frames and enable the timestamping for ptp 526 * frames 527 */ 528 if (clock->layer & PTP_CLASS_L2) { 529 rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN; 530 txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN; 531 } 532 if (clock->layer & PTP_CLASS_L4) { 533 rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN | 534 MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN; 535 txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN | 536 MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN; 537 } 538 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG, 539 MCHP_RDS_PTP_PORT, rxcfg); 540 if (rc < 0) 541 return rc; 542 543 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG, 544 MCHP_RDS_PTP_PORT, txcfg); 545 if (rc < 0) 546 return rc; 547 548 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_TIMESTAMP_EN, 549 MCHP_RDS_PTP_PORT, 550 MCHP_RDS_PTP_TIMESTAMP_EN_ALL); 551 if (rc < 0) 552 return rc; 553 554 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_TIMESTAMP_EN, 555 MCHP_RDS_PTP_PORT, 556 MCHP_RDS_PTP_TIMESTAMP_EN_ALL); 557 if (rc < 0) 558 return rc; 559 560 if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) 561 /* Enable / disable of the TX timestamp in the SYNC frames */ 562 rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD, 563 MCHP_RDS_PTP_PORT, 564 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT, 565 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT); 566 else 567 rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD, 568 MCHP_RDS_PTP_PORT, 569 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT, 570 (u16)~MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT); 571 572 if (rc < 0) 573 return rc; 574 575 /* In case of multiple starts and stops, these needs to be cleared */ 576 spin_lock_irqsave(&clock->rx_ts_lock, flags); 577 list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) { 578 list_del(&rx_ts->list); 579 kfree(rx_ts); 580 } 581 spin_unlock_irqrestore(&clock->rx_ts_lock, flags); 582 583 rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO); 584 if (rc < 0) 585 return rc; 586 587 rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO); 588 if (rc < 0) 589 return rc; 590 591 /* Now enable the timestamping interrupts */ 592 rc = mchp_rds_ptp_config_intr(clock, 593 config->rx_filter != HWTSTAMP_FILTER_NONE); 594 595 return rc < 0 ? rc : 0; 596 } 597 598 static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts, 599 struct kernel_ethtool_ts_info *info) 600 { 601 struct mchp_rds_ptp_clock *clock = container_of(mii_ts, 602 struct mchp_rds_ptp_clock, 603 mii_ts); 604 605 info->phc_index = ptp_clock_index(clock->ptp_clock); 606 607 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 608 SOF_TIMESTAMPING_RX_HARDWARE | 609 SOF_TIMESTAMPING_RAW_HARDWARE; 610 611 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | 612 BIT(HWTSTAMP_TX_ONESTEP_SYNC); 613 614 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 615 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 616 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 617 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); 618 619 return 0; 620 } 621 622 static int mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info *info, s64 delta) 623 { 624 struct mchp_rds_ptp_clock *clock = container_of(info, 625 struct mchp_rds_ptp_clock, 626 caps); 627 struct timespec64 ts; 628 bool add = true; 629 int rc = 0; 630 u32 nsec; 631 s32 sec; 632 633 /* The HW allows up to 15 sec to adjust the time, but here we limit to 634 * 10 sec the adjustment. The reason is, in case the adjustment is 14 635 * sec and 999999999 nsec, then we add 8ns to compensate the actual 636 * increment so the value can be bigger than 15 sec. Therefore limit the 637 * possible adjustments so we will not have these corner cases 638 */ 639 if (delta > 10000000000LL || delta < -10000000000LL) { 640 /* The timeadjustment is too big, so fall back using set time */ 641 u64 now; 642 643 info->gettime64(info, &ts); 644 645 now = ktime_to_ns(timespec64_to_ktime(ts)); 646 ts = ns_to_timespec64(now + delta); 647 648 info->settime64(info, &ts); 649 return 0; 650 } 651 sec = div_u64_rem(abs(delta), NSEC_PER_SEC, &nsec); 652 if (delta < 0 && nsec != 0) { 653 /* It is not allowed to adjust low the nsec part, therefore 654 * subtract more from second part and add to nanosecond such 655 * that would roll over, so the second part will increase 656 */ 657 sec--; 658 nsec = NSEC_PER_SEC - nsec; 659 } 660 661 /* Calculate the adjustments and the direction */ 662 if (delta < 0) 663 add = false; 664 665 if (nsec > 0) { 666 /* add 8 ns to cover the likely normal increment */ 667 nsec += 8; 668 669 if (nsec >= NSEC_PER_SEC) { 670 /* carry into seconds */ 671 sec++; 672 nsec -= NSEC_PER_SEC; 673 } 674 } 675 676 mutex_lock(&clock->ptp_lock); 677 if (sec) { 678 sec = abs(sec); 679 680 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO, 681 MCHP_RDS_PTP_CLOCK, sec); 682 if (rc < 0) 683 goto out_unlock; 684 685 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI, 686 MCHP_RDS_PTP_CLOCK, 687 ((add ? 688 MCHP_RDS_PTP_STEP_ADJ_HI_DIR : 689 0) | ((sec >> 16) & 690 GENMASK(13, 0)))); 691 if (rc < 0) 692 goto out_unlock; 693 694 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 695 MCHP_RDS_PTP_CLOCK, 696 MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC); 697 if (rc < 0) 698 goto out_unlock; 699 } 700 701 if (nsec) { 702 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO, 703 MCHP_RDS_PTP_CLOCK, 704 nsec & GENMASK(15, 0)); 705 if (rc < 0) 706 goto out_unlock; 707 708 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI, 709 MCHP_RDS_PTP_CLOCK, 710 (nsec >> 16) & GENMASK(13, 0)); 711 if (rc < 0) 712 goto out_unlock; 713 714 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 715 MCHP_RDS_PTP_CLOCK, 716 MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC); 717 } 718 719 mutex_unlock(&clock->ptp_lock); 720 info->gettime64(info, &ts); 721 mutex_lock(&clock->ptp_lock); 722 723 /* Target update is required for pulse generation on events that 724 * are enabled 725 */ 726 if (clock->mchp_rds_ptp_event >= 0) 727 mchp_set_clock_target(clock, 728 ts.tv_sec + MCHP_RDS_PTP_BUFFER_TIME, 0); 729 out_unlock: 730 mutex_unlock(&clock->ptp_lock); 731 732 return rc; 733 } 734 735 static int mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info *info, 736 long scaled_ppm) 737 { 738 struct mchp_rds_ptp_clock *clock = container_of(info, 739 struct mchp_rds_ptp_clock, 740 caps); 741 u16 rate_lo, rate_hi; 742 bool faster = true; 743 u32 rate; 744 int rc; 745 746 if (!scaled_ppm) 747 return 0; 748 749 if (scaled_ppm < 0) { 750 scaled_ppm = -scaled_ppm; 751 faster = false; 752 } 753 754 rate = MCHP_RDS_PTP_1PPM_FORMAT * (upper_16_bits(scaled_ppm)); 755 rate += (MCHP_RDS_PTP_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16; 756 757 rate_lo = rate & GENMASK(15, 0); 758 rate_hi = (rate >> 16) & GENMASK(13, 0); 759 760 if (faster) 761 rate_hi |= MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR; 762 763 mutex_lock(&clock->ptp_lock); 764 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_HI, 765 MCHP_RDS_PTP_CLOCK, rate_hi); 766 if (rc < 0) 767 goto error; 768 769 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_LO, 770 MCHP_RDS_PTP_CLOCK, rate_lo); 771 if (rc > 0) 772 rc = 0; 773 error: 774 mutex_unlock(&clock->ptp_lock); 775 776 return rc; 777 } 778 779 static int mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info *info, 780 struct timespec64 *ts) 781 { 782 struct mchp_rds_ptp_clock *clock = container_of(info, 783 struct mchp_rds_ptp_clock, 784 caps); 785 time64_t secs; 786 int rc = 0; 787 s64 nsecs; 788 789 mutex_lock(&clock->ptp_lock); 790 /* Set read bit to 1 to save current values of 1588 local time counter 791 * into PTP LTC seconds and nanoseconds registers. 792 */ 793 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 794 MCHP_RDS_PTP_CLOCK, 795 MCHP_RDS_PTP_CMD_CTL_CLOCK_READ); 796 if (rc < 0) 797 goto out_unlock; 798 799 /* Get LTC clock values */ 800 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_HI, 801 MCHP_RDS_PTP_CLOCK); 802 if (rc < 0) 803 goto out_unlock; 804 secs = rc << 16; 805 806 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_MID, 807 MCHP_RDS_PTP_CLOCK); 808 if (rc < 0) 809 goto out_unlock; 810 secs |= rc; 811 secs <<= 16; 812 813 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_LO, 814 MCHP_RDS_PTP_CLOCK); 815 if (rc < 0) 816 goto out_unlock; 817 secs |= rc; 818 819 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_HI, 820 MCHP_RDS_PTP_CLOCK); 821 if (rc < 0) 822 goto out_unlock; 823 nsecs = (rc & GENMASK(13, 0)); 824 nsecs <<= 16; 825 826 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_LO, 827 MCHP_RDS_PTP_CLOCK); 828 if (rc < 0) 829 goto out_unlock; 830 nsecs |= rc; 831 832 set_normalized_timespec64(ts, secs, nsecs); 833 834 if (rc > 0) 835 rc = 0; 836 out_unlock: 837 mutex_unlock(&clock->ptp_lock); 838 839 return rc; 840 } 841 842 static int mchp_rds_ptp_ltc_settime64(struct ptp_clock_info *info, 843 const struct timespec64 *ts) 844 { 845 struct mchp_rds_ptp_clock *clock = container_of(info, 846 struct mchp_rds_ptp_clock, 847 caps); 848 int rc; 849 850 mutex_lock(&clock->ptp_lock); 851 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_LO, 852 MCHP_RDS_PTP_CLOCK, 853 lower_16_bits(ts->tv_sec)); 854 if (rc < 0) 855 goto out_unlock; 856 857 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_MID, 858 MCHP_RDS_PTP_CLOCK, 859 upper_16_bits(ts->tv_sec)); 860 if (rc < 0) 861 goto out_unlock; 862 863 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_HI, 864 MCHP_RDS_PTP_CLOCK, 865 upper_32_bits(ts->tv_sec) & GENMASK(15, 0)); 866 if (rc < 0) 867 goto out_unlock; 868 869 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_LO, 870 MCHP_RDS_PTP_CLOCK, 871 lower_16_bits(ts->tv_nsec)); 872 if (rc < 0) 873 goto out_unlock; 874 875 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_HI, 876 MCHP_RDS_PTP_CLOCK, 877 upper_16_bits(ts->tv_nsec) & GENMASK(13, 0)); 878 if (rc < 0) 879 goto out_unlock; 880 881 /* Set load bit to 1 to write PTP LTC seconds and nanoseconds 882 * registers to 1588 local time counter. 883 */ 884 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 885 MCHP_RDS_PTP_CLOCK, 886 MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD); 887 if (rc > 0) 888 rc = 0; 889 out_unlock: 890 mutex_unlock(&clock->ptp_lock); 891 892 return rc; 893 } 894 895 static bool mchp_rds_ptp_get_sig_tx(struct sk_buff *skb, u16 *sig) 896 { 897 struct ptp_header *ptp_header; 898 int type; 899 900 type = ptp_classify_raw(skb); 901 if (type == PTP_CLASS_NONE) 902 return false; 903 904 ptp_header = ptp_parse_header(skb, type); 905 if (!ptp_header) 906 return false; 907 908 *sig = (__force u16)(ntohs(ptp_header->sequence_id)); 909 910 return true; 911 } 912 913 static void mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock *clock, 914 u32 seconds, u32 nsec, u16 seq_id) 915 { 916 struct skb_shared_hwtstamps shhwtstamps; 917 struct sk_buff *skb, *skb_tmp; 918 unsigned long flags; 919 bool rc = false; 920 u16 skb_sig; 921 922 spin_lock_irqsave(&clock->tx_queue.lock, flags); 923 skb_queue_walk_safe(&clock->tx_queue, skb, skb_tmp) { 924 if (!mchp_rds_ptp_get_sig_tx(skb, &skb_sig)) 925 continue; 926 927 if (skb_sig != seq_id) 928 continue; 929 930 __skb_unlink(skb, &clock->tx_queue); 931 rc = true; 932 break; 933 } 934 spin_unlock_irqrestore(&clock->tx_queue.lock, flags); 935 936 if (rc) { 937 shhwtstamps.hwtstamp = ktime_set(seconds, nsec); 938 skb_complete_tx_timestamp(skb, &shhwtstamps); 939 } 940 } 941 942 static struct mchp_rds_ptp_rx_ts 943 *mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock *clock) 944 { 945 struct phy_device *phydev = clock->phydev; 946 struct mchp_rds_ptp_rx_ts *rx_ts = NULL; 947 u32 sec, nsec; 948 int rc; 949 950 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_HI, 951 MCHP_RDS_PTP_PORT); 952 if (rc < 0) 953 goto error; 954 if (!(rc & MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID)) { 955 phydev_err(phydev, "RX Timestamp is not valid!\n"); 956 goto error; 957 } 958 nsec = (rc & GENMASK(13, 0)) << 16; 959 960 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_LO, 961 MCHP_RDS_PTP_PORT); 962 if (rc < 0) 963 goto error; 964 nsec |= rc; 965 966 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_HI, 967 MCHP_RDS_PTP_PORT); 968 if (rc < 0) 969 goto error; 970 sec = rc << 16; 971 972 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_LO, 973 MCHP_RDS_PTP_PORT); 974 if (rc < 0) 975 goto error; 976 sec |= rc; 977 978 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_MSG_HDR2, 979 MCHP_RDS_PTP_PORT); 980 if (rc < 0) 981 goto error; 982 983 rx_ts = kmalloc(sizeof(*rx_ts), GFP_KERNEL); 984 if (!rx_ts) 985 return NULL; 986 987 rx_ts->seconds = sec; 988 rx_ts->nsec = nsec; 989 rx_ts->seq_id = rc; 990 991 error: 992 return rx_ts; 993 } 994 995 static void mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock *clock) 996 { 997 int caps; 998 999 do { 1000 struct mchp_rds_ptp_rx_ts *rx_ts; 1001 1002 rx_ts = mchp_rds_ptp_get_rx_ts(clock); 1003 if (rx_ts) 1004 mchp_rds_ptp_match_rx_ts(clock, rx_ts); 1005 1006 caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO, 1007 MCHP_RDS_PTP_PORT); 1008 if (caps < 0) 1009 return; 1010 } while (MCHP_RDS_PTP_RX_TS_CNT(caps) > 0); 1011 } 1012 1013 static bool mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock *clock, 1014 u32 *sec, u32 *nsec, u16 *seq) 1015 { 1016 int rc; 1017 1018 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_HI, 1019 MCHP_RDS_PTP_PORT); 1020 if (rc < 0) 1021 return false; 1022 if (!(rc & MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID)) 1023 return false; 1024 *nsec = (rc & GENMASK(13, 0)) << 16; 1025 1026 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_LO, 1027 MCHP_RDS_PTP_PORT); 1028 if (rc < 0) 1029 return false; 1030 *nsec = *nsec | rc; 1031 1032 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_HI, 1033 MCHP_RDS_PTP_PORT); 1034 if (rc < 0) 1035 return false; 1036 *sec = rc << 16; 1037 1038 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_LO, 1039 MCHP_RDS_PTP_PORT); 1040 if (rc < 0) 1041 return false; 1042 *sec = *sec | rc; 1043 1044 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_MSG_HDR2, 1045 MCHP_RDS_PTP_PORT); 1046 if (rc < 0) 1047 return false; 1048 1049 *seq = rc; 1050 1051 return true; 1052 } 1053 1054 static void mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock *clock) 1055 { 1056 int caps; 1057 1058 do { 1059 u32 sec, nsec; 1060 u16 seq; 1061 1062 if (mchp_rds_ptp_get_tx_ts(clock, &sec, &nsec, &seq)) 1063 mchp_rds_ptp_match_tx_skb(clock, sec, nsec, seq); 1064 1065 caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO, 1066 MCHP_RDS_PTP_PORT); 1067 if (caps < 0) 1068 return; 1069 } while (MCHP_RDS_PTP_TX_TS_CNT(caps) > 0); 1070 } 1071 1072 int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock, 1073 u16 reg, u16 val, bool clear) 1074 { 1075 if (clear) 1076 return phy_clear_bits_mmd(clock->phydev, PTP_MMD(clock), reg, 1077 val); 1078 else 1079 return phy_set_bits_mmd(clock->phydev, PTP_MMD(clock), reg, 1080 val); 1081 } 1082 EXPORT_SYMBOL_GPL(mchp_rds_ptp_top_config_intr); 1083 1084 irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock) 1085 { 1086 int irq_sts; 1087 1088 /* To handle rogue interrupt scenarios */ 1089 if (!clock) 1090 return IRQ_NONE; 1091 1092 do { 1093 irq_sts = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS, 1094 MCHP_RDS_PTP_PORT); 1095 if (irq_sts < 0) 1096 return IRQ_NONE; 1097 1098 if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_EN) 1099 mchp_rds_ptp_process_rx_ts(clock); 1100 1101 if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_EN) 1102 mchp_rds_ptp_process_tx_ts(clock); 1103 1104 if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN) 1105 mchp_rds_ptp_flush_fifo(clock, 1106 MCHP_RDS_PTP_EGRESS_FIFO); 1107 1108 if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN) 1109 mchp_rds_ptp_flush_fifo(clock, 1110 MCHP_RDS_PTP_INGRESS_FIFO); 1111 } while (irq_sts & (MCHP_RDS_PTP_INT_RX_TS_EN | 1112 MCHP_RDS_PTP_INT_TX_TS_EN | 1113 MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN | 1114 MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN)); 1115 1116 return IRQ_HANDLED; 1117 } 1118 EXPORT_SYMBOL_GPL(mchp_rds_ptp_handle_interrupt); 1119 1120 static int mchp_rds_ptp_init(struct mchp_rds_ptp_clock *clock) 1121 { 1122 int rc; 1123 1124 /* Disable PTP */ 1125 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 1126 MCHP_RDS_PTP_CLOCK, 1127 MCHP_RDS_PTP_CMD_CTL_DIS); 1128 if (rc < 0) 1129 return rc; 1130 1131 /* Disable TSU */ 1132 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG, 1133 MCHP_RDS_PTP_PORT, 0); 1134 if (rc < 0) 1135 return rc; 1136 1137 /* Clear PTP interrupt status registers */ 1138 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_HARD_RESET, 1139 MCHP_RDS_PTP_PORT, 1140 MCHP_RDS_PTP_TSU_HARDRESET); 1141 if (rc < 0) 1142 return rc; 1143 1144 /* Predictor enable */ 1145 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LATENCY_CORRECTION_CTL, 1146 MCHP_RDS_PTP_CLOCK, 1147 MCHP_RDS_PTP_LATENCY_SETTING); 1148 if (rc < 0) 1149 return rc; 1150 1151 /* Configure PTP operational mode */ 1152 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_OP_MODE, 1153 MCHP_RDS_PTP_CLOCK, 1154 MCHP_RDS_PTP_OP_MODE_STANDALONE); 1155 if (rc < 0) 1156 return rc; 1157 1158 /* Reference clock configuration */ 1159 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_REF_CLK_CFG, 1160 MCHP_RDS_PTP_CLOCK, 1161 MCHP_RDS_PTP_REF_CLK_CFG_SET); 1162 if (rc < 0) 1163 return rc; 1164 1165 /* Classifier configurations */ 1166 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG, 1167 MCHP_RDS_PTP_PORT, 0); 1168 if (rc < 0) 1169 return rc; 1170 1171 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG, 1172 MCHP_RDS_PTP_PORT, 0); 1173 if (rc < 0) 1174 return rc; 1175 1176 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN, 1177 MCHP_RDS_PTP_PORT, 0); 1178 if (rc < 0) 1179 return rc; 1180 1181 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN, 1182 MCHP_RDS_PTP_PORT, 0); 1183 if (rc < 0) 1184 return rc; 1185 1186 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN, 1187 MCHP_RDS_PTP_PORT, 0); 1188 if (rc < 0) 1189 return rc; 1190 1191 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN, 1192 MCHP_RDS_PTP_PORT, 0); 1193 if (rc < 0) 1194 return rc; 1195 1196 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_VERSION, 1197 MCHP_RDS_PTP_PORT, 1198 MCHP_RDS_PTP_MAX_VERSION(0xff) | 1199 MCHP_RDS_PTP_MIN_VERSION(0x0)); 1200 if (rc < 0) 1201 return rc; 1202 1203 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_VERSION, 1204 MCHP_RDS_PTP_PORT, 1205 MCHP_RDS_PTP_MAX_VERSION(0xff) | 1206 MCHP_RDS_PTP_MIN_VERSION(0x0)); 1207 if (rc < 0) 1208 return rc; 1209 1210 /* Enable TSU */ 1211 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG, 1212 MCHP_RDS_PTP_PORT, 1213 MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN); 1214 if (rc < 0) 1215 return rc; 1216 1217 /* Enable PTP */ 1218 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL, 1219 MCHP_RDS_PTP_CLOCK, 1220 MCHP_RDS_PTP_CMD_CTL_EN); 1221 } 1222 1223 struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd, 1224 u16 clk_base_addr, 1225 u16 port_base_addr) 1226 { 1227 struct mchp_rds_ptp_clock *clock; 1228 int rc; 1229 1230 clock = devm_kzalloc(&phydev->mdio.dev, sizeof(*clock), GFP_KERNEL); 1231 if (!clock) 1232 return ERR_PTR(-ENOMEM); 1233 1234 clock->port_base_addr = port_base_addr; 1235 clock->clk_base_addr = clk_base_addr; 1236 clock->mmd = mmd; 1237 1238 mutex_init(&clock->ptp_lock); 1239 clock->pin_config = devm_kmalloc_array(&phydev->mdio.dev, 1240 MCHP_RDS_PTP_N_PIN, 1241 sizeof(*clock->pin_config), 1242 GFP_KERNEL); 1243 if (!clock->pin_config) 1244 return ERR_PTR(-ENOMEM); 1245 1246 for (int i = 0; i < MCHP_RDS_PTP_N_PIN; ++i) { 1247 struct ptp_pin_desc *p = &clock->pin_config[i]; 1248 1249 memset(p, 0, sizeof(*p)); 1250 snprintf(p->name, sizeof(p->name), "pin%d", i); 1251 p->index = i; 1252 p->func = PTP_PF_NONE; 1253 } 1254 /* Register PTP clock */ 1255 clock->caps.owner = THIS_MODULE; 1256 snprintf(clock->caps.name, 30, "%s", phydev->drv->name); 1257 clock->caps.max_adj = MCHP_RDS_PTP_MAX_ADJ; 1258 clock->caps.n_ext_ts = 0; 1259 clock->caps.pps = 0; 1260 clock->caps.n_pins = MCHP_RDS_PTP_N_PIN; 1261 clock->caps.n_per_out = MCHP_RDS_PTP_N_PEROUT; 1262 clock->caps.pin_config = clock->pin_config; 1263 clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine; 1264 clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime; 1265 clock->caps.gettime64 = mchp_rds_ptp_ltc_gettime64; 1266 clock->caps.settime64 = mchp_rds_ptp_ltc_settime64; 1267 clock->caps.enable = mchp_rds_ptpci_enable; 1268 clock->caps.verify = mchp_rds_ptpci_verify; 1269 clock->caps.getcrosststamp = NULL; 1270 clock->ptp_clock = ptp_clock_register(&clock->caps, 1271 &phydev->mdio.dev); 1272 if (IS_ERR(clock->ptp_clock)) 1273 return ERR_PTR(-EINVAL); 1274 1275 /* Check if PHC support is missing at the configuration level */ 1276 if (!clock->ptp_clock) 1277 return NULL; 1278 1279 /* Initialize the SW */ 1280 skb_queue_head_init(&clock->tx_queue); 1281 skb_queue_head_init(&clock->rx_queue); 1282 INIT_LIST_HEAD(&clock->rx_ts_list); 1283 spin_lock_init(&clock->rx_ts_lock); 1284 1285 clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp; 1286 clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp; 1287 clock->mii_ts.hwtstamp = mchp_rds_ptp_hwtstamp; 1288 clock->mii_ts.ts_info = mchp_rds_ptp_ts_info; 1289 1290 phydev->mii_ts = &clock->mii_ts; 1291 1292 clock->mchp_rds_ptp_event = -1; 1293 1294 /* Timestamp selected by default to keep legacy API */ 1295 phydev->default_timestamp = true; 1296 1297 clock->phydev = phydev; 1298 1299 rc = mchp_rds_ptp_init(clock); 1300 if (rc < 0) 1301 return ERR_PTR(rc); 1302 1303 return clock; 1304 } 1305 EXPORT_SYMBOL_GPL(mchp_rds_ptp_probe); 1306 1307 MODULE_LICENSE("GPL"); 1308 MODULE_DESCRIPTION("MICROCHIP PHY RDS PTP driver"); 1309 MODULE_AUTHOR("Divya Koppera"); 1310