1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2021 Hisilicon Limited. 3 4 #include <linux/skbuff.h> 5 #include "hclge_main.h" 6 #include "hnae3.h" 7 8 static int hclge_ptp_get_cycle(struct hclge_dev *hdev) 9 { 10 struct hclge_ptp *ptp = hdev->ptp; 11 12 ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) & 13 HCLGE_PTP_CYCLE_QUO_MASK; 14 ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG); 15 ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG); 16 17 if (ptp->cycle.den == 0) { 18 dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n"); 19 return -EINVAL; 20 } 21 22 return 0; 23 } 24 25 static int hclge_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 26 { 27 struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); 28 struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle; 29 u64 adj_val, adj_base; 30 unsigned long flags; 31 u32 quo, numerator; 32 33 adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer; 34 adj_val = adjust_by_scaled_ppm(adj_base, scaled_ppm); 35 36 /* This clock cycle is defined by three part: quotient, numerator 37 * and denominator. For example, 2.5ns, the quotient is 2, 38 * denominator is fixed to ptp->cycle.den, and numerator 39 * is 0.5 * ptp->cycle.den. 40 */ 41 quo = div_u64_rem(adj_val, cycle->den, &numerator); 42 43 spin_lock_irqsave(&hdev->ptp->lock, flags); 44 writel(quo & HCLGE_PTP_CYCLE_QUO_MASK, 45 hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG); 46 writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG); 47 writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG); 48 writel(HCLGE_PTP_CYCLE_ADJ_EN, 49 hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG); 50 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 51 52 return 0; 53 } 54 55 bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb) 56 { 57 struct hclge_vport *vport = hclge_get_vport(handle); 58 struct hclge_dev *hdev = vport->back; 59 struct hclge_ptp *ptp = hdev->ptp; 60 61 if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) || 62 test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) { 63 ptp->tx_skipped++; 64 return false; 65 } 66 67 ptp->tx_start = jiffies; 68 ptp->tx_skb = skb_get(skb); 69 ptp->tx_cnt++; 70 71 return true; 72 } 73 74 void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev) 75 { 76 struct sk_buff *skb = hdev->ptp->tx_skb; 77 struct skb_shared_hwtstamps hwts; 78 u32 hi, lo; 79 u64 ns; 80 81 ns = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_NSEC_REG) & 82 HCLGE_PTP_TX_TS_NSEC_MASK; 83 lo = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_L_REG); 84 hi = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_H_REG) & 85 HCLGE_PTP_TX_TS_SEC_H_MASK; 86 hdev->ptp->last_tx_seqid = readl(hdev->ptp->io_base + 87 HCLGE_PTP_TX_TS_SEQID_REG); 88 89 if (skb) { 90 hdev->ptp->tx_skb = NULL; 91 hdev->ptp->tx_cleaned++; 92 93 ns += (((u64)hi) << 32 | lo) * NSEC_PER_SEC; 94 hwts.hwtstamp = ns_to_ktime(ns); 95 skb_tstamp_tx(skb, &hwts); 96 dev_kfree_skb_any(skb); 97 } 98 99 clear_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state); 100 } 101 102 void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, 103 u32 nsec, u32 sec) 104 { 105 struct hclge_vport *vport = hclge_get_vport(handle); 106 struct hclge_dev *hdev = vport->back; 107 unsigned long flags; 108 u64 ns = nsec; 109 u32 sec_h; 110 111 if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags)) 112 return; 113 114 /* Since the BD does not have enough space for the higher 16 bits of 115 * second, and this part will not change frequently, so read it 116 * from register. 117 */ 118 spin_lock_irqsave(&hdev->ptp->lock, flags); 119 sec_h = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG); 120 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 121 122 ns += (((u64)sec_h) << HCLGE_PTP_SEC_H_OFFSET | sec) * NSEC_PER_SEC; 123 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 124 hdev->ptp->last_rx = jiffies; 125 hdev->ptp->rx_cnt++; 126 } 127 128 static int hclge_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 129 struct ptp_system_timestamp *sts) 130 { 131 struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); 132 unsigned long flags; 133 u32 hi, lo; 134 u64 ns; 135 136 spin_lock_irqsave(&hdev->ptp->lock, flags); 137 ns = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_NSEC_REG); 138 hi = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG); 139 lo = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_L_REG); 140 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 141 142 ns += (((u64)hi) << HCLGE_PTP_SEC_H_OFFSET | lo) * NSEC_PER_SEC; 143 *ts = ns_to_timespec64(ns); 144 145 return 0; 146 } 147 148 static int hclge_ptp_settime(struct ptp_clock_info *ptp, 149 const struct timespec64 *ts) 150 { 151 struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); 152 unsigned long flags; 153 154 spin_lock_irqsave(&hdev->ptp->lock, flags); 155 writel(ts->tv_nsec, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG); 156 writel(ts->tv_sec >> HCLGE_PTP_SEC_H_OFFSET, 157 hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_H_REG); 158 writel(ts->tv_sec & HCLGE_PTP_SEC_L_MASK, 159 hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_L_REG); 160 /* synchronize the time of phc */ 161 writel(HCLGE_PTP_TIME_SYNC_EN, 162 hdev->ptp->io_base + HCLGE_PTP_TIME_SYNC_REG); 163 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 164 165 return 0; 166 } 167 168 static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 169 { 170 struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); 171 unsigned long flags; 172 bool is_neg = false; 173 u32 adj_val = 0; 174 175 if (delta < 0) { 176 adj_val |= HCLGE_PTP_TIME_NSEC_NEG; 177 delta = -delta; 178 is_neg = true; 179 } 180 181 if (delta > HCLGE_PTP_TIME_NSEC_MASK) { 182 struct timespec64 ts; 183 s64 ns; 184 185 hclge_ptp_gettimex(ptp, &ts, NULL); 186 ns = timespec64_to_ns(&ts); 187 ns = is_neg ? ns - delta : ns + delta; 188 ts = ns_to_timespec64(ns); 189 return hclge_ptp_settime(ptp, &ts); 190 } 191 192 adj_val |= delta & HCLGE_PTP_TIME_NSEC_MASK; 193 194 spin_lock_irqsave(&hdev->ptp->lock, flags); 195 writel(adj_val, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG); 196 writel(HCLGE_PTP_TIME_ADJ_EN, 197 hdev->ptp->io_base + HCLGE_PTP_TIME_ADJ_REG); 198 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 199 200 return 0; 201 } 202 203 int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr) 204 { 205 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) 206 return -EOPNOTSUPP; 207 208 return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg, 209 sizeof(struct hwtstamp_config)) ? -EFAULT : 0; 210 } 211 212 static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en) 213 { 214 struct hclge_ptp_int_cmd *req; 215 struct hclge_desc desc; 216 int ret; 217 218 req = (struct hclge_ptp_int_cmd *)desc.data; 219 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_INT_EN, false); 220 req->int_en = en ? 1 : 0; 221 222 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 223 if (ret) 224 dev_err(&hdev->pdev->dev, 225 "failed to %s ptp interrupt, ret = %d\n", 226 en ? "enable" : "disable", ret); 227 228 return ret; 229 } 230 231 int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg) 232 { 233 struct hclge_ptp_cfg_cmd *req; 234 struct hclge_desc desc; 235 int ret; 236 237 req = (struct hclge_ptp_cfg_cmd *)desc.data; 238 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, true); 239 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 240 if (ret) { 241 dev_err(&hdev->pdev->dev, 242 "failed to query ptp config, ret = %d\n", ret); 243 return ret; 244 } 245 246 *cfg = le32_to_cpu(req->cfg); 247 248 return 0; 249 } 250 251 static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg) 252 { 253 struct hclge_ptp_cfg_cmd *req; 254 struct hclge_desc desc; 255 int ret; 256 257 req = (struct hclge_ptp_cfg_cmd *)desc.data; 258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, false); 259 req->cfg = cpu_to_le32(cfg); 260 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 261 if (ret) 262 dev_err(&hdev->pdev->dev, 263 "failed to config ptp, ret = %d\n", ret); 264 265 return ret; 266 } 267 268 static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg, 269 unsigned long *flags, u32 *ptp_cfg) 270 { 271 switch (cfg->tx_type) { 272 case HWTSTAMP_TX_OFF: 273 clear_bit(HCLGE_PTP_FLAG_TX_EN, flags); 274 break; 275 case HWTSTAMP_TX_ON: 276 set_bit(HCLGE_PTP_FLAG_TX_EN, flags); 277 *ptp_cfg |= HCLGE_PTP_TX_EN_B; 278 break; 279 default: 280 return -ERANGE; 281 } 282 283 return 0; 284 } 285 286 static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg, 287 unsigned long *flags, u32 *ptp_cfg) 288 { 289 int rx_filter = cfg->rx_filter; 290 291 switch (cfg->rx_filter) { 292 case HWTSTAMP_FILTER_NONE: 293 clear_bit(HCLGE_PTP_FLAG_RX_EN, flags); 294 break; 295 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 296 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 297 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 298 set_bit(HCLGE_PTP_FLAG_RX_EN, flags); 299 *ptp_cfg |= HCLGE_PTP_RX_EN_B; 300 *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT; 301 rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 302 break; 303 case HWTSTAMP_FILTER_PTP_V2_EVENT: 304 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 305 case HWTSTAMP_FILTER_PTP_V2_SYNC: 306 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 307 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 308 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 309 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 310 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 311 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 312 set_bit(HCLGE_PTP_FLAG_RX_EN, flags); 313 *ptp_cfg |= HCLGE_PTP_RX_EN_B; 314 *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT; 315 *ptp_cfg |= HCLGE_PTP_MSG1_V2_DEFAULT << HCLGE_PTP_MSG1_SHIFT; 316 *ptp_cfg |= HCLGE_PTP_MSG0_V2_EVENT << HCLGE_PTP_MSG0_SHIFT; 317 *ptp_cfg |= HCLGE_PTP_MSG_TYPE_V2 << HCLGE_PTP_MSG_TYPE_SHIFT; 318 rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 319 break; 320 case HWTSTAMP_FILTER_ALL: 321 default: 322 return -ERANGE; 323 } 324 325 cfg->rx_filter = rx_filter; 326 327 return 0; 328 } 329 330 static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev, 331 struct hwtstamp_config *cfg) 332 { 333 unsigned long flags = hdev->ptp->flags; 334 u32 ptp_cfg = 0; 335 int ret; 336 337 if (test_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags)) 338 ptp_cfg |= HCLGE_PTP_EN_B; 339 340 ret = hclge_ptp_set_tx_mode(cfg, &flags, &ptp_cfg); 341 if (ret) 342 return ret; 343 344 ret = hclge_ptp_set_rx_mode(cfg, &flags, &ptp_cfg); 345 if (ret) 346 return ret; 347 348 ret = hclge_ptp_cfg(hdev, ptp_cfg); 349 if (ret) 350 return ret; 351 352 hdev->ptp->flags = flags; 353 hdev->ptp->ptp_cfg = ptp_cfg; 354 355 return 0; 356 } 357 358 int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr) 359 { 360 struct hwtstamp_config cfg; 361 int ret; 362 363 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) { 364 dev_err(&hdev->pdev->dev, "phc is unsupported\n"); 365 return -EOPNOTSUPP; 366 } 367 368 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 369 return -EFAULT; 370 371 ret = hclge_ptp_set_ts_mode(hdev, &cfg); 372 if (ret) 373 return ret; 374 375 hdev->ptp->ts_cfg = cfg; 376 377 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 378 } 379 380 int hclge_ptp_get_ts_info(struct hnae3_handle *handle, 381 struct ethtool_ts_info *info) 382 { 383 struct hclge_vport *vport = hclge_get_vport(handle); 384 struct hclge_dev *hdev = vport->back; 385 386 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) { 387 dev_err(&hdev->pdev->dev, "phc is unsupported\n"); 388 return -EOPNOTSUPP; 389 } 390 391 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 392 SOF_TIMESTAMPING_RX_SOFTWARE | 393 SOF_TIMESTAMPING_SOFTWARE | 394 SOF_TIMESTAMPING_TX_HARDWARE | 395 SOF_TIMESTAMPING_RX_HARDWARE | 396 SOF_TIMESTAMPING_RAW_HARDWARE; 397 398 if (hdev->ptp->clock) 399 info->phc_index = ptp_clock_index(hdev->ptp->clock); 400 else 401 info->phc_index = -1; 402 403 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 404 405 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 406 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 407 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 408 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); 409 410 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 411 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 412 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | 413 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 414 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | 415 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 416 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 417 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); 418 419 return 0; 420 } 421 422 static int hclge_ptp_create_clock(struct hclge_dev *hdev) 423 { 424 struct hclge_ptp *ptp; 425 426 ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL); 427 if (!ptp) 428 return -ENOMEM; 429 430 ptp->hdev = hdev; 431 snprintf(ptp->info.name, sizeof(ptp->info.name), "%s", 432 HCLGE_DRIVER_NAME); 433 ptp->info.owner = THIS_MODULE; 434 ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX; 435 ptp->info.n_ext_ts = 0; 436 ptp->info.pps = 0; 437 ptp->info.adjfine = hclge_ptp_adjfine; 438 ptp->info.adjtime = hclge_ptp_adjtime; 439 ptp->info.gettimex64 = hclge_ptp_gettimex; 440 ptp->info.settime64 = hclge_ptp_settime; 441 442 ptp->info.n_alarm = 0; 443 ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev); 444 if (IS_ERR(ptp->clock)) { 445 dev_err(&hdev->pdev->dev, 446 "%d failed to register ptp clock, ret = %ld\n", 447 ptp->info.n_alarm, PTR_ERR(ptp->clock)); 448 return -ENODEV; 449 } else if (!ptp->clock) { 450 dev_err(&hdev->pdev->dev, "failed to register ptp clock\n"); 451 return -ENODEV; 452 } 453 454 spin_lock_init(&ptp->lock); 455 ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; 456 ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; 457 ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; 458 hdev->ptp = ptp; 459 460 return 0; 461 } 462 463 static void hclge_ptp_destroy_clock(struct hclge_dev *hdev) 464 { 465 ptp_clock_unregister(hdev->ptp->clock); 466 hdev->ptp->clock = NULL; 467 devm_kfree(&hdev->pdev->dev, hdev->ptp); 468 hdev->ptp = NULL; 469 } 470 471 int hclge_ptp_init(struct hclge_dev *hdev) 472 { 473 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 474 struct timespec64 ts; 475 int ret; 476 477 if (!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps)) 478 return 0; 479 480 if (!hdev->ptp) { 481 ret = hclge_ptp_create_clock(hdev); 482 if (ret) 483 return ret; 484 485 ret = hclge_ptp_get_cycle(hdev); 486 if (ret) 487 return ret; 488 } 489 490 ret = hclge_ptp_int_en(hdev, true); 491 if (ret) 492 goto out; 493 494 set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags); 495 ret = hclge_ptp_adjfine(&hdev->ptp->info, 0); 496 if (ret) { 497 dev_err(&hdev->pdev->dev, 498 "failed to init freq, ret = %d\n", ret); 499 goto out; 500 } 501 502 ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg); 503 if (ret) { 504 dev_err(&hdev->pdev->dev, 505 "failed to init ts mode, ret = %d\n", ret); 506 goto out; 507 } 508 509 ktime_get_real_ts64(&ts); 510 ret = hclge_ptp_settime(&hdev->ptp->info, &ts); 511 if (ret) { 512 dev_err(&hdev->pdev->dev, 513 "failed to init ts time, ret = %d\n", ret); 514 goto out; 515 } 516 517 set_bit(HCLGE_STATE_PTP_EN, &hdev->state); 518 dev_info(&hdev->pdev->dev, "phc initializes ok!\n"); 519 520 return 0; 521 522 out: 523 hclge_ptp_destroy_clock(hdev); 524 525 return ret; 526 } 527 528 void hclge_ptp_uninit(struct hclge_dev *hdev) 529 { 530 struct hclge_ptp *ptp = hdev->ptp; 531 532 if (!ptp) 533 return; 534 535 hclge_ptp_int_en(hdev, false); 536 clear_bit(HCLGE_STATE_PTP_EN, &hdev->state); 537 clear_bit(HCLGE_PTP_FLAG_EN, &ptp->flags); 538 ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; 539 ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; 540 541 if (hclge_ptp_set_ts_mode(hdev, &ptp->ts_cfg)) 542 dev_err(&hdev->pdev->dev, "failed to disable phc\n"); 543 544 if (ptp->tx_skb) { 545 struct sk_buff *skb = ptp->tx_skb; 546 547 ptp->tx_skb = NULL; 548 dev_kfree_skb_any(skb); 549 } 550 551 hclge_ptp_destroy_clock(hdev); 552 } 553