1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip Sparx5 Switch driver 3 * 4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. 5 * 6 * The Sparx5 Chip Register Model can be browsed at this location: 7 * https://github.com/microchip-ung/sparx-5_reginfo 8 */ 9 #include <linux/ptp_classify.h> 10 11 #include "sparx5_main_regs.h" 12 #include "sparx5_main.h" 13 14 #define TOD_ACC_PIN 0x4 15 16 enum { 17 PTP_PIN_ACTION_IDLE = 0, 18 PTP_PIN_ACTION_LOAD, 19 PTP_PIN_ACTION_SAVE, 20 PTP_PIN_ACTION_CLOCK, 21 PTP_PIN_ACTION_DELTA, 22 PTP_PIN_ACTION_TOD 23 }; 24 25 static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5) 26 { 27 /* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625) 28 * 1.99609375000(500), 3.99218750000(250) as reference 29 * The value is calculated as following: 30 * (1/1000000)/((2^-59)/X) 31 */ 32 33 u64 res = 0; 34 35 switch (sparx5->coreclock) { 36 case SPX5_CORE_CLOCK_250MHZ: 37 res = 2301339409586; 38 break; 39 case SPX5_CORE_CLOCK_328MHZ: 40 res = 1756832768924; 41 break; 42 case SPX5_CORE_CLOCK_500MHZ: 43 res = 1150669704793; 44 break; 45 case SPX5_CORE_CLOCK_625MHZ: 46 res = 920535763834; 47 break; 48 default: 49 WARN(1, "Invalid core clock"); 50 break; 51 } 52 53 return res; 54 } 55 56 static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5) 57 { 58 u64 res = 0; 59 60 switch (sparx5->coreclock) { 61 case SPX5_CORE_CLOCK_250MHZ: 62 res = 0x1FF0000000000000; 63 break; 64 case SPX5_CORE_CLOCK_328MHZ: 65 res = 0x18604697DD0F9B5B; 66 break; 67 case SPX5_CORE_CLOCK_500MHZ: 68 res = 0x0FF8000000000000; 69 break; 70 case SPX5_CORE_CLOCK_625MHZ: 71 res = 0x0CC6666666666666; 72 break; 73 default: 74 WARN(1, "Invalid core clock"); 75 break; 76 } 77 78 return res; 79 } 80 81 int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, 82 struct kernel_hwtstamp_config *cfg, 83 struct netlink_ext_ack *extack) 84 { 85 struct sparx5 *sparx5 = port->sparx5; 86 struct sparx5_phc *phc; 87 88 /* For now don't allow to run ptp on ports that are part of a bridge, 89 * because in case of transparent clock the HW will still forward the 90 * frames, so there would be duplicate frames 91 */ 92 93 if (test_bit(port->portno, sparx5->bridge_mask)) 94 return -EINVAL; 95 96 switch (cfg->tx_type) { 97 case HWTSTAMP_TX_ON: 98 port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; 99 break; 100 case HWTSTAMP_TX_ONESTEP_SYNC: 101 port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP; 102 break; 103 case HWTSTAMP_TX_OFF: 104 port->ptp_cmd = IFH_REW_OP_NOOP; 105 break; 106 default: 107 return -ERANGE; 108 } 109 110 switch (cfg->rx_filter) { 111 case HWTSTAMP_FILTER_NONE: 112 break; 113 case HWTSTAMP_FILTER_ALL: 114 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 115 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 116 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 117 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 118 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 119 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 120 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 121 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 122 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 123 case HWTSTAMP_FILTER_PTP_V2_EVENT: 124 case HWTSTAMP_FILTER_PTP_V2_SYNC: 125 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 126 case HWTSTAMP_FILTER_NTP_ALL: 127 cfg->rx_filter = HWTSTAMP_FILTER_ALL; 128 break; 129 default: 130 return -ERANGE; 131 } 132 133 /* Commit back the result & save it */ 134 mutex_lock(&sparx5->ptp_lock); 135 phc = &sparx5->phc[SPARX5_PHC_PORT]; 136 phc->hwtstamp_config = *cfg; 137 mutex_unlock(&sparx5->ptp_lock); 138 139 return 0; 140 } 141 142 void sparx5_ptp_hwtstamp_get(struct sparx5_port *port, 143 struct kernel_hwtstamp_config *cfg) 144 { 145 struct sparx5 *sparx5 = port->sparx5; 146 struct sparx5_phc *phc; 147 148 phc = &sparx5->phc[SPARX5_PHC_PORT]; 149 *cfg = phc->hwtstamp_config; 150 } 151 152 static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb, 153 u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset) 154 { 155 struct ptp_header *header; 156 u8 msgtype; 157 int type; 158 159 if (port->ptp_cmd == IFH_REW_OP_NOOP) { 160 *rew_op = IFH_REW_OP_NOOP; 161 *pdu_type = IFH_PDU_TYPE_NONE; 162 *pdu_w16_offset = 0; 163 return; 164 } 165 166 type = ptp_classify_raw(skb); 167 if (type == PTP_CLASS_NONE) { 168 *rew_op = IFH_REW_OP_NOOP; 169 *pdu_type = IFH_PDU_TYPE_NONE; 170 *pdu_w16_offset = 0; 171 return; 172 } 173 174 header = ptp_parse_header(skb, type); 175 if (!header) { 176 *rew_op = IFH_REW_OP_NOOP; 177 *pdu_type = IFH_PDU_TYPE_NONE; 178 *pdu_w16_offset = 0; 179 return; 180 } 181 182 *pdu_w16_offset = 7; 183 if (type & PTP_CLASS_L2) 184 *pdu_type = IFH_PDU_TYPE_PTP; 185 if (type & PTP_CLASS_IPV4) 186 *pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP; 187 if (type & PTP_CLASS_IPV6) 188 *pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP; 189 190 if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { 191 *rew_op = IFH_REW_OP_TWO_STEP_PTP; 192 return; 193 } 194 195 /* If it is sync and run 1 step then set the correct operation, 196 * otherwise run as 2 step 197 */ 198 msgtype = ptp_get_msgtype(header, type); 199 if ((msgtype & 0xf) == 0) { 200 *rew_op = IFH_REW_OP_ONE_STEP_PTP; 201 return; 202 } 203 204 *rew_op = IFH_REW_OP_TWO_STEP_PTP; 205 } 206 207 static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port) 208 { 209 struct sk_buff *skb, *skb_tmp; 210 unsigned long flags; 211 212 spin_lock_irqsave(&port->tx_skbs.lock, flags); 213 skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { 214 if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT, 215 jiffies) 216 break; 217 218 __skb_unlink(skb, &port->tx_skbs); 219 dev_kfree_skb_any(skb); 220 } 221 spin_unlock_irqrestore(&port->tx_skbs.lock, flags); 222 } 223 224 int sparx5_ptp_txtstamp_request(struct sparx5_port *port, 225 struct sk_buff *skb) 226 { 227 struct sparx5 *sparx5 = port->sparx5; 228 u8 rew_op, pdu_type, pdu_w16_offset; 229 unsigned long flags; 230 231 sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset); 232 SPARX5_SKB_CB(skb)->rew_op = rew_op; 233 SPARX5_SKB_CB(skb)->pdu_type = pdu_type; 234 SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset; 235 236 if (rew_op != IFH_REW_OP_TWO_STEP_PTP) 237 return 0; 238 239 sparx5_ptp_txtstamp_old_release(port); 240 241 spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags); 242 if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) { 243 spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); 244 return -EBUSY; 245 } 246 247 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 248 249 skb_queue_tail(&port->tx_skbs, skb); 250 SPARX5_SKB_CB(skb)->ts_id = port->ts_id; 251 SPARX5_SKB_CB(skb)->jiffies = jiffies; 252 253 sparx5->ptp_skbs++; 254 port->ts_id++; 255 if (port->ts_id == SPARX5_MAX_PTP_ID) 256 port->ts_id = 0; 257 258 spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); 259 260 return 0; 261 } 262 263 void sparx5_ptp_txtstamp_release(struct sparx5_port *port, 264 struct sk_buff *skb) 265 { 266 struct sparx5 *sparx5 = port->sparx5; 267 unsigned long flags; 268 269 spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags); 270 port->ts_id--; 271 sparx5->ptp_skbs--; 272 skb_unlink(skb, &port->tx_skbs); 273 spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); 274 } 275 276 void sparx5_get_hwtimestamp(struct sparx5 *sparx5, 277 struct timespec64 *ts, 278 u32 nsec) 279 { 280 /* Read current PTP time to get seconds */ 281 const struct sparx5_consts *consts = sparx5->data->consts; 282 unsigned long flags; 283 u32 curr_nsec; 284 285 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); 286 287 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | 288 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) | 289 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), 290 PTP_PTP_PIN_CFG_PTP_PIN_ACTION | 291 PTP_PTP_PIN_CFG_PTP_PIN_DOM | 292 PTP_PTP_PIN_CFG_PTP_PIN_SYNC, 293 sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); 294 295 ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin)); 296 curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin)); 297 298 ts->tv_nsec = nsec; 299 300 /* Sec has incremented since the ts was registered */ 301 if (curr_nsec < nsec) 302 ts->tv_sec--; 303 304 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); 305 } 306 EXPORT_SYMBOL_GPL(sparx5_get_hwtimestamp); 307 308 irqreturn_t sparx5_ptp_irq_handler(int irq, void *args) 309 { 310 int budget = SPARX5_MAX_PTP_ID; 311 struct sparx5 *sparx5 = args; 312 313 while (budget--) { 314 struct sk_buff *skb, *skb_tmp, *skb_match = NULL; 315 struct skb_shared_hwtstamps shhwtstamps; 316 struct sparx5_port *port; 317 struct timespec64 ts; 318 unsigned long flags; 319 u32 val, id, txport; 320 u32 delay; 321 322 val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL); 323 324 /* Check if a timestamp can be retrieved */ 325 if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD)) 326 break; 327 328 WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL); 329 330 if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX)) 331 continue; 332 333 /* Retrieve the ts Tx port */ 334 txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val); 335 336 /* Retrieve its associated skb */ 337 port = sparx5->ports[txport]; 338 339 /* Retrieve the delay */ 340 delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP); 341 delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay); 342 343 /* Get next timestamp from fifo, which needs to be the 344 * rx timestamp which represents the id of the frame 345 */ 346 spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1), 347 REW_PTP_TWOSTEP_CTRL_PTP_NXT, 348 sparx5, REW_PTP_TWOSTEP_CTRL); 349 350 val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL); 351 352 /* Check if a timestamp can be retried */ 353 if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD)) 354 break; 355 356 /* Read RX timestamping to get the ID */ 357 id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP); 358 id <<= 8; 359 id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS); 360 361 spin_lock_irqsave(&port->tx_skbs.lock, flags); 362 skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { 363 if (SPARX5_SKB_CB(skb)->ts_id != id) 364 continue; 365 366 __skb_unlink(skb, &port->tx_skbs); 367 skb_match = skb; 368 break; 369 } 370 spin_unlock_irqrestore(&port->tx_skbs.lock, flags); 371 372 /* Next ts */ 373 spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1), 374 REW_PTP_TWOSTEP_CTRL_PTP_NXT, 375 sparx5, REW_PTP_TWOSTEP_CTRL); 376 377 if (WARN_ON(!skb_match)) 378 continue; 379 380 spin_lock(&sparx5->ptp_ts_id_lock); 381 sparx5->ptp_skbs--; 382 spin_unlock(&sparx5->ptp_ts_id_lock); 383 384 /* Get the h/w timestamp */ 385 sparx5_get_hwtimestamp(sparx5, &ts, delay); 386 387 /* Set the timestamp into the skb */ 388 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 389 skb_tstamp_tx(skb_match, &shhwtstamps); 390 391 dev_kfree_skb_any(skb_match); 392 } 393 394 return IRQ_HANDLED; 395 } 396 397 static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 398 { 399 struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); 400 struct sparx5 *sparx5 = phc->sparx5; 401 unsigned long flags; 402 bool neg_adj = 0; 403 u64 tod_inc; 404 u64 ref; 405 406 if (!scaled_ppm) 407 return 0; 408 409 if (scaled_ppm < 0) { 410 neg_adj = 1; 411 scaled_ppm = -scaled_ppm; 412 } 413 414 tod_inc = sparx5_ptp_get_nominal_value(sparx5); 415 416 /* The multiplication is split in 2 separate additions because of 417 * overflow issues. If scaled_ppm with 16bit fractional part was bigger 418 * than 20ppm then we got overflow. 419 */ 420 ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16); 421 ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16; 422 tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref; 423 424 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); 425 426 spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)), 427 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, 428 sparx5, PTP_PTP_DOM_CFG); 429 430 spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5, 431 PTP_CLK_PER_CFG(phc->index, 0)); 432 spx5_wr((u32)(tod_inc >> 32), sparx5, 433 PTP_CLK_PER_CFG(phc->index, 1)); 434 435 spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0), 436 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5, 437 PTP_PTP_DOM_CFG); 438 439 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); 440 441 return 0; 442 } 443 444 static int sparx5_ptp_settime64(struct ptp_clock_info *ptp, 445 const struct timespec64 *ts) 446 { 447 struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); 448 struct sparx5 *sparx5 = phc->sparx5; 449 const struct sparx5_consts *consts; 450 unsigned long flags; 451 452 consts = sparx5->data->consts; 453 454 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); 455 456 /* Must be in IDLE mode before the time can be loaded */ 457 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | 458 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | 459 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), 460 PTP_PTP_PIN_CFG_PTP_PIN_ACTION | 461 PTP_PTP_PIN_CFG_PTP_PIN_DOM | 462 PTP_PTP_PIN_CFG_PTP_PIN_SYNC, 463 sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); 464 465 /* Set new value */ 466 spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)), 467 sparx5, PTP_PTP_TOD_SEC_MSB(consts->tod_pin)); 468 spx5_wr(lower_32_bits(ts->tv_sec), 469 sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin)); 470 spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin)); 471 472 /* Apply new values */ 473 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) | 474 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | 475 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), 476 PTP_PTP_PIN_CFG_PTP_PIN_ACTION | 477 PTP_PTP_PIN_CFG_PTP_PIN_DOM | 478 PTP_PTP_PIN_CFG_PTP_PIN_SYNC, 479 sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); 480 481 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); 482 483 return 0; 484 } 485 486 int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) 487 { 488 struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); 489 struct sparx5 *sparx5 = phc->sparx5; 490 const struct sparx5_consts *consts; 491 unsigned long flags; 492 time64_t s; 493 s64 ns; 494 495 consts = sparx5->data->consts; 496 497 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); 498 499 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | 500 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | 501 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), 502 PTP_PTP_PIN_CFG_PTP_PIN_ACTION | 503 PTP_PTP_PIN_CFG_PTP_PIN_DOM | 504 PTP_PTP_PIN_CFG_PTP_PIN_SYNC, 505 sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); 506 507 s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(consts->tod_pin)); 508 s <<= 32; 509 s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin)); 510 ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin)); 511 ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC; 512 513 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); 514 515 /* Deal with negative values */ 516 if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { 517 s--; 518 ns &= 0xf; 519 ns += 999999984; 520 } 521 522 set_normalized_timespec64(ts, s, ns); 523 return 0; 524 } 525 526 static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 527 { 528 struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); 529 struct sparx5 *sparx5 = phc->sparx5; 530 const struct sparx5_consts *consts; 531 532 consts = sparx5->data->consts; 533 534 if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) { 535 unsigned long flags; 536 537 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); 538 539 /* Must be in IDLE mode before the time can be loaded */ 540 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | 541 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | 542 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), 543 PTP_PTP_PIN_CFG_PTP_PIN_ACTION | 544 PTP_PTP_PIN_CFG_PTP_PIN_DOM | 545 PTP_PTP_PIN_CFG_PTP_PIN_SYNC, 546 sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); 547 548 spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta), 549 sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin)); 550 551 /* Adjust time with the value of PTP_TOD_NSEC */ 552 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) | 553 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | 554 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), 555 PTP_PTP_PIN_CFG_PTP_PIN_ACTION | 556 PTP_PTP_PIN_CFG_PTP_PIN_DOM | 557 PTP_PTP_PIN_CFG_PTP_PIN_SYNC, 558 sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); 559 560 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); 561 } else { 562 /* Fall back using sparx5_ptp_settime64 which is not exact */ 563 struct timespec64 ts; 564 u64 now; 565 566 sparx5_ptp_gettime64(ptp, &ts); 567 568 now = ktime_to_ns(timespec64_to_ktime(ts)); 569 ts = ns_to_timespec64(now + delta); 570 571 sparx5_ptp_settime64(ptp, &ts); 572 } 573 574 return 0; 575 } 576 577 static struct ptp_clock_info sparx5_ptp_clock_info = { 578 .owner = THIS_MODULE, 579 .name = "sparx5 ptp", 580 .max_adj = 200000, 581 .gettime64 = sparx5_ptp_gettime64, 582 .settime64 = sparx5_ptp_settime64, 583 .adjtime = sparx5_ptp_adjtime, 584 .adjfine = sparx5_ptp_adjfine, 585 }; 586 587 static int sparx5_ptp_phc_init(struct sparx5 *sparx5, 588 int index, 589 struct ptp_clock_info *clock_info) 590 { 591 struct sparx5_phc *phc = &sparx5->phc[index]; 592 593 phc->info = *clock_info; 594 phc->clock = ptp_clock_register(&phc->info, sparx5->dev); 595 if (IS_ERR(phc->clock)) 596 return PTR_ERR(phc->clock); 597 598 phc->index = index; 599 phc->sparx5 = sparx5; 600 601 /* PTP Rx stamping is always enabled. */ 602 phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 603 604 return 0; 605 } 606 607 int sparx5_ptp_init(struct sparx5 *sparx5) 608 { 609 u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5); 610 struct sparx5_port *port; 611 int err, i; 612 613 if (!sparx5->ptp) 614 return 0; 615 616 for (i = 0; i < SPARX5_PHC_COUNT; ++i) { 617 err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info); 618 if (err) 619 return err; 620 } 621 622 spin_lock_init(&sparx5->ptp_clock_lock); 623 spin_lock_init(&sparx5->ptp_ts_id_lock); 624 mutex_init(&sparx5->ptp_lock); 625 626 /* Disable master counters */ 627 spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG); 628 629 /* Configure the nominal TOD increment per clock cycle */ 630 spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7), 631 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, 632 sparx5, PTP_PTP_DOM_CFG); 633 634 for (i = 0; i < SPARX5_PHC_COUNT; ++i) { 635 spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5, 636 PTP_CLK_PER_CFG(i, 0)); 637 spx5_wr((u32)(tod_adj >> 32), sparx5, 638 PTP_CLK_PER_CFG(i, 1)); 639 } 640 641 spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0), 642 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, 643 sparx5, PTP_PTP_DOM_CFG); 644 645 /* Enable master counters */ 646 spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG); 647 648 for (i = 0; i < sparx5->data->consts->n_ports; i++) { 649 port = sparx5->ports[i]; 650 if (!port) 651 continue; 652 653 skb_queue_head_init(&port->tx_skbs); 654 } 655 656 return 0; 657 } 658 659 void sparx5_ptp_deinit(struct sparx5 *sparx5) 660 { 661 struct sparx5_port *port; 662 int i; 663 664 for (i = 0; i < sparx5->data->consts->n_ports; i++) { 665 port = sparx5->ports[i]; 666 if (!port) 667 continue; 668 669 skb_queue_purge(&port->tx_skbs); 670 } 671 672 for (i = 0; i < SPARX5_PHC_COUNT; ++i) 673 ptp_clock_unregister(sparx5->phc[i].clock); 674 } 675 676 void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, 677 u64 timestamp) 678 { 679 struct skb_shared_hwtstamps *shhwtstamps; 680 struct sparx5_phc *phc; 681 struct timespec64 ts; 682 u64 full_ts_in_ns; 683 684 if (!sparx5->ptp) 685 return; 686 687 phc = &sparx5->phc[SPARX5_PHC_PORT]; 688 sparx5_ptp_gettime64(&phc->info, &ts); 689 690 if (ts.tv_nsec < timestamp) 691 ts.tv_sec--; 692 ts.tv_nsec = timestamp; 693 full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); 694 695 shhwtstamps = skb_hwtstamps(skb); 696 shhwtstamps->hwtstamp = full_ts_in_ns; 697 } 698