1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/interrupt.h> 9 #include <linux/pci.h> 10 #include <net/page_pool/helpers.h> 11 #include <net/tso.h> 12 #include <linux/bitfield.h> 13 #include <net/xfrm.h> 14 15 #include "otx2_reg.h" 16 #include "otx2_common.h" 17 #include "otx2_struct.h" 18 #include "cn10k.h" 19 20 static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf) 21 { 22 return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en; 23 } 24 25 static void otx2_nix_rq_op_stats(struct queue_stats *stats, 26 struct otx2_nic *pfvf, int qidx) 27 { 28 u64 incr = (u64)qidx << 32; 29 u64 *ptr; 30 31 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); 32 stats->bytes = otx2_atomic64_add(incr, ptr); 33 34 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); 35 stats->pkts = otx2_atomic64_add(incr, ptr); 36 } 37 38 static void otx2_nix_sq_op_stats(struct queue_stats *stats, 39 struct otx2_nic *pfvf, int qidx) 40 { 41 u64 incr = (u64)qidx << 32; 42 u64 *ptr; 43 44 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); 45 stats->bytes = otx2_atomic64_add(incr, ptr); 46 47 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); 48 stats->pkts = otx2_atomic64_add(incr, ptr); 49 } 50 51 void otx2_update_lmac_stats(struct otx2_nic *pfvf) 52 { 53 struct msg_req *req; 54 55 if (!netif_running(pfvf->netdev)) 56 return; 57 58 mutex_lock(&pfvf->mbox.lock); 59 req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); 60 if (!req) { 61 mutex_unlock(&pfvf->mbox.lock); 62 return; 63 } 64 65 otx2_sync_mbox_msg(&pfvf->mbox); 66 mutex_unlock(&pfvf->mbox.lock); 67 } 68 69 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf) 70 { 71 struct msg_req *req; 72 73 if (!netif_running(pfvf->netdev)) 74 return; 75 mutex_lock(&pfvf->mbox.lock); 76 req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox); 77 if (req) 78 otx2_sync_mbox_msg(&pfvf->mbox); 79 mutex_unlock(&pfvf->mbox.lock); 80 } 81 82 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) 83 { 84 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; 85 86 if (!pfvf->qset.rq) 87 return 0; 88 89 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); 90 return 1; 91 } 92 EXPORT_SYMBOL(otx2_update_rq_stats); 93 94 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) 95 { 96 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; 97 98 if (!pfvf->qset.sq) 99 return 0; 100 101 if (qidx >= pfvf->hw.non_qos_queues) { 102 if (!test_bit(qidx - pfvf->hw.non_qos_queues, pfvf->qos.qos_sq_bmap)) 103 return 0; 104 } 105 106 otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); 107 return 1; 108 } 109 EXPORT_SYMBOL(otx2_update_sq_stats); 110 111 void otx2_get_dev_stats(struct otx2_nic *pfvf) 112 { 113 struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; 114 115 dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); 116 dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); 117 dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); 118 dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST); 119 dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST); 120 dev_stats->rx_frames = dev_stats->rx_bcast_frames + 121 dev_stats->rx_mcast_frames + 122 dev_stats->rx_ucast_frames; 123 124 dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); 125 dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP); 126 dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST); 127 dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST); 128 dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST); 129 dev_stats->tx_frames = dev_stats->tx_bcast_frames + 130 dev_stats->tx_mcast_frames + 131 dev_stats->tx_ucast_frames; 132 } 133 134 void otx2_get_stats64(struct net_device *netdev, 135 struct rtnl_link_stats64 *stats) 136 { 137 struct otx2_nic *pfvf = netdev_priv(netdev); 138 struct otx2_dev_stats *dev_stats; 139 140 otx2_get_dev_stats(pfvf); 141 142 dev_stats = &pfvf->hw.dev_stats; 143 stats->rx_bytes = dev_stats->rx_bytes; 144 stats->rx_packets = dev_stats->rx_frames; 145 stats->rx_dropped = dev_stats->rx_drops; 146 stats->multicast = dev_stats->rx_mcast_frames; 147 148 stats->tx_bytes = dev_stats->tx_bytes; 149 stats->tx_packets = dev_stats->tx_frames; 150 stats->tx_dropped = dev_stats->tx_drops; 151 } 152 EXPORT_SYMBOL(otx2_get_stats64); 153 154 /* Sync MAC address with RVU AF */ 155 static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) 156 { 157 struct nix_set_mac_addr *req; 158 int err; 159 160 mutex_lock(&pfvf->mbox.lock); 161 req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); 162 if (!req) { 163 mutex_unlock(&pfvf->mbox.lock); 164 return -ENOMEM; 165 } 166 167 ether_addr_copy(req->mac_addr, mac); 168 169 err = otx2_sync_mbox_msg(&pfvf->mbox); 170 mutex_unlock(&pfvf->mbox.lock); 171 return err; 172 } 173 174 static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, 175 struct net_device *netdev) 176 { 177 struct nix_get_mac_addr_rsp *rsp; 178 struct mbox_msghdr *msghdr; 179 struct msg_req *req; 180 int err; 181 182 mutex_lock(&pfvf->mbox.lock); 183 req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); 184 if (!req) { 185 mutex_unlock(&pfvf->mbox.lock); 186 return -ENOMEM; 187 } 188 189 err = otx2_sync_mbox_msg(&pfvf->mbox); 190 if (err) { 191 mutex_unlock(&pfvf->mbox.lock); 192 return err; 193 } 194 195 msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 196 if (IS_ERR(msghdr)) { 197 mutex_unlock(&pfvf->mbox.lock); 198 return PTR_ERR(msghdr); 199 } 200 rsp = (struct nix_get_mac_addr_rsp *)msghdr; 201 eth_hw_addr_set(netdev, rsp->mac_addr); 202 mutex_unlock(&pfvf->mbox.lock); 203 204 return 0; 205 } 206 207 int otx2_set_mac_address(struct net_device *netdev, void *p) 208 { 209 struct otx2_nic *pfvf = netdev_priv(netdev); 210 struct sockaddr *addr = p; 211 212 if (!is_valid_ether_addr(addr->sa_data)) 213 return -EADDRNOTAVAIL; 214 215 if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { 216 eth_hw_addr_set(netdev, addr->sa_data); 217 /* update dmac field in vlan offload rule */ 218 if (netif_running(netdev) && 219 pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) 220 otx2_install_rxvlan_offload_flow(pfvf); 221 /* update dmac address in ntuple and DMAC filter list */ 222 if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) 223 otx2_dmacflt_update_pfmac_flow(pfvf); 224 } else { 225 return -EPERM; 226 } 227 228 return 0; 229 } 230 EXPORT_SYMBOL(otx2_set_mac_address); 231 232 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) 233 { 234 struct nix_frs_cfg *req; 235 u16 maxlen; 236 int err; 237 238 maxlen = pfvf->hw.max_mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; 239 240 mutex_lock(&pfvf->mbox.lock); 241 req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); 242 if (!req) { 243 mutex_unlock(&pfvf->mbox.lock); 244 return -ENOMEM; 245 } 246 247 req->maxlen = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; 248 249 /* Use max receive length supported by hardware for loopback devices */ 250 if (is_otx2_lbkvf(pfvf->pdev)) 251 req->maxlen = maxlen; 252 253 err = otx2_sync_mbox_msg(&pfvf->mbox); 254 mutex_unlock(&pfvf->mbox.lock); 255 return err; 256 } 257 EXPORT_SYMBOL(otx2_hw_set_mtu); 258 259 int otx2_config_pause_frm(struct otx2_nic *pfvf) 260 { 261 struct cgx_pause_frm_cfg *req; 262 int err; 263 264 if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) 265 return 0; 266 267 mutex_lock(&pfvf->mbox.lock); 268 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); 269 if (!req) { 270 err = -ENOMEM; 271 goto unlock; 272 } 273 274 req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED); 275 req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED); 276 req->set = 1; 277 278 err = otx2_sync_mbox_msg(&pfvf->mbox); 279 unlock: 280 mutex_unlock(&pfvf->mbox.lock); 281 return err; 282 } 283 EXPORT_SYMBOL(otx2_config_pause_frm); 284 285 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) 286 { 287 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 288 struct nix_rss_flowkey_cfg_rsp *rsp; 289 struct nix_rss_flowkey_cfg *req; 290 int err; 291 292 mutex_lock(&pfvf->mbox.lock); 293 req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); 294 if (!req) { 295 mutex_unlock(&pfvf->mbox.lock); 296 return -ENOMEM; 297 } 298 req->mcam_index = -1; /* Default or reserved index */ 299 req->flowkey_cfg = rss->flowkey_cfg; 300 req->group = DEFAULT_RSS_CONTEXT_GROUP; 301 302 err = otx2_sync_mbox_msg(&pfvf->mbox); 303 if (err) 304 goto fail; 305 306 rsp = (struct nix_rss_flowkey_cfg_rsp *) 307 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 308 if (IS_ERR(rsp)) { 309 err = PTR_ERR(rsp); 310 goto fail; 311 } 312 313 pfvf->hw.flowkey_alg_idx = rsp->alg_idx; 314 fail: 315 mutex_unlock(&pfvf->mbox.lock); 316 return err; 317 } 318 319 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id) 320 { 321 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 322 const int index = rss->rss_size * ctx_id; 323 struct mbox *mbox = &pfvf->mbox; 324 struct otx2_rss_ctx *rss_ctx; 325 struct nix_aq_enq_req *aq; 326 int idx, err; 327 328 mutex_lock(&mbox->lock); 329 rss_ctx = rss->rss_ctx[ctx_id]; 330 /* Get memory to put this msg */ 331 for (idx = 0; idx < rss->rss_size; idx++) { 332 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); 333 if (!aq) { 334 /* The shared memory buffer can be full. 335 * Flush it and retry 336 */ 337 err = otx2_sync_mbox_msg(mbox); 338 if (err) { 339 mutex_unlock(&mbox->lock); 340 return err; 341 } 342 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); 343 if (!aq) { 344 mutex_unlock(&mbox->lock); 345 return -ENOMEM; 346 } 347 } 348 349 aq->rss.rq = rss_ctx->ind_tbl[idx]; 350 351 /* Fill AQ info */ 352 aq->qidx = index + idx; 353 aq->ctype = NIX_AQ_CTYPE_RSS; 354 aq->op = NIX_AQ_INSTOP_INIT; 355 } 356 err = otx2_sync_mbox_msg(mbox); 357 mutex_unlock(&mbox->lock); 358 return err; 359 } 360 361 void otx2_set_rss_key(struct otx2_nic *pfvf) 362 { 363 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 364 u64 *key = (u64 *)&rss->key[4]; 365 int idx; 366 367 /* 352bit or 44byte key needs to be configured as below 368 * NIX_LF_RX_SECRETX0 = key<351:288> 369 * NIX_LF_RX_SECRETX1 = key<287:224> 370 * NIX_LF_RX_SECRETX2 = key<223:160> 371 * NIX_LF_RX_SECRETX3 = key<159:96> 372 * NIX_LF_RX_SECRETX4 = key<95:32> 373 * NIX_LF_RX_SECRETX5<63:32> = key<31:0> 374 */ 375 otx2_write64(pfvf, NIX_LF_RX_SECRETX(5), 376 (u64)(*((u32 *)&rss->key)) << 32); 377 idx = sizeof(rss->key) / sizeof(u64); 378 while (idx > 0) { 379 idx--; 380 otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++); 381 } 382 } 383 384 int otx2_rss_init(struct otx2_nic *pfvf) 385 { 386 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 387 struct otx2_rss_ctx *rss_ctx; 388 int idx, ret = 0; 389 390 rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); 391 392 /* Init RSS key if it is not setup already */ 393 if (!rss->enable) 394 netdev_rss_key_fill(rss->key, sizeof(rss->key)); 395 otx2_set_rss_key(pfvf); 396 397 if (!netif_is_rxfh_configured(pfvf->netdev)) { 398 /* Set RSS group 0 as default indirection table */ 399 rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size, 400 GFP_KERNEL); 401 if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]) 402 return -ENOMEM; 403 404 rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]; 405 for (idx = 0; idx < rss->rss_size; idx++) 406 rss_ctx->ind_tbl[idx] = 407 ethtool_rxfh_indir_default(idx, 408 pfvf->hw.rx_queues); 409 } 410 ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP); 411 if (ret) 412 return ret; 413 414 /* Flowkey or hash config to be used for generating flow tag */ 415 rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg : 416 NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 | 417 NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP | 418 NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN | 419 NIX_FLOW_KEY_TYPE_IPV4_PROTO; 420 421 ret = otx2_set_flowkey_cfg(pfvf); 422 if (ret) 423 return ret; 424 425 rss->enable = true; 426 return 0; 427 } 428 429 /* Setup UDP segmentation algorithm in HW */ 430 static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4) 431 { 432 struct nix_lso_format *field; 433 434 field = (struct nix_lso_format *)&lso->fields[0]; 435 lso->field_mask = GENMASK(18, 0); 436 437 /* IP's Length field */ 438 field->layer = NIX_TXLAYER_OL3; 439 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 440 field->offset = v4 ? 2 : 4; 441 field->sizem1 = 1; /* i.e 2 bytes */ 442 field->alg = NIX_LSOALG_ADD_PAYLEN; 443 field++; 444 445 /* No ID field in IPv6 header */ 446 if (v4) { 447 /* Increment IPID */ 448 field->layer = NIX_TXLAYER_OL3; 449 field->offset = 4; 450 field->sizem1 = 1; /* i.e 2 bytes */ 451 field->alg = NIX_LSOALG_ADD_SEGNUM; 452 field++; 453 } 454 455 /* Update length in UDP header */ 456 field->layer = NIX_TXLAYER_OL4; 457 field->offset = 4; 458 field->sizem1 = 1; 459 field->alg = NIX_LSOALG_ADD_PAYLEN; 460 } 461 462 /* Setup segmentation algorithms in HW and retrieve algorithm index */ 463 void otx2_setup_segmentation(struct otx2_nic *pfvf) 464 { 465 struct nix_lso_format_cfg_rsp *rsp; 466 struct nix_lso_format_cfg *lso; 467 struct otx2_hw *hw = &pfvf->hw; 468 int err; 469 470 mutex_lock(&pfvf->mbox.lock); 471 472 /* UDPv4 segmentation */ 473 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); 474 if (!lso) 475 goto fail; 476 477 /* Setup UDP/IP header fields that HW should update per segment */ 478 otx2_setup_udp_segmentation(lso, true); 479 480 err = otx2_sync_mbox_msg(&pfvf->mbox); 481 if (err) 482 goto fail; 483 484 rsp = (struct nix_lso_format_cfg_rsp *) 485 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); 486 if (IS_ERR(rsp)) 487 goto fail; 488 489 hw->lso_udpv4_idx = rsp->lso_format_idx; 490 491 /* UDPv6 segmentation */ 492 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); 493 if (!lso) 494 goto fail; 495 496 /* Setup UDP/IP header fields that HW should update per segment */ 497 otx2_setup_udp_segmentation(lso, false); 498 499 err = otx2_sync_mbox_msg(&pfvf->mbox); 500 if (err) 501 goto fail; 502 503 rsp = (struct nix_lso_format_cfg_rsp *) 504 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); 505 if (IS_ERR(rsp)) 506 goto fail; 507 508 hw->lso_udpv6_idx = rsp->lso_format_idx; 509 mutex_unlock(&pfvf->mbox.lock); 510 return; 511 fail: 512 mutex_unlock(&pfvf->mbox.lock); 513 netdev_info(pfvf->netdev, 514 "Failed to get LSO index for UDP GSO offload, disabling\n"); 515 pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4; 516 } 517 518 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) 519 { 520 /* Configure CQE interrupt coalescing parameters 521 * 522 * HW triggers an irq when ECOUNT > cq_ecount_wait, hence 523 * set 1 less than cq_ecount_wait. And cq_time_wait is in 524 * usecs, convert that to 100ns count. 525 */ 526 otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx), 527 ((u64)(pfvf->hw.cq_time_wait * 10) << 48) | 528 ((u64)pfvf->hw.cq_qcount_wait << 32) | 529 (pfvf->hw.cq_ecount_wait - 1)); 530 } 531 532 static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, 533 dma_addr_t *dma) 534 { 535 unsigned int offset = 0; 536 struct page *page; 537 size_t sz; 538 539 sz = SKB_DATA_ALIGN(pool->rbsize); 540 sz = ALIGN(sz, OTX2_ALIGN); 541 542 page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC); 543 if (unlikely(!page)) 544 return -ENOMEM; 545 546 *dma = page_pool_get_dma_addr(page) + offset; 547 return 0; 548 } 549 550 static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 551 dma_addr_t *dma) 552 { 553 u8 *buf; 554 555 if (pool->page_pool) 556 return otx2_alloc_pool_buf(pfvf, pool, dma); 557 558 buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN); 559 if (unlikely(!buf)) 560 return -ENOMEM; 561 562 *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, 563 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 564 if (unlikely(dma_mapping_error(pfvf->dev, *dma))) { 565 page_frag_free(buf); 566 return -ENOMEM; 567 } 568 569 return 0; 570 } 571 572 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 573 dma_addr_t *dma) 574 { 575 int ret; 576 577 local_bh_disable(); 578 ret = __otx2_alloc_rbuf(pfvf, pool, dma); 579 local_bh_enable(); 580 return ret; 581 } 582 583 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 584 dma_addr_t *dma) 585 { 586 if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) 587 return -ENOMEM; 588 return 0; 589 } 590 591 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) 592 { 593 struct otx2_nic *pfvf = netdev_priv(netdev); 594 595 schedule_work(&pfvf->reset_task); 596 } 597 EXPORT_SYMBOL(otx2_tx_timeout); 598 599 void otx2_get_mac_from_af(struct net_device *netdev) 600 { 601 struct otx2_nic *pfvf = netdev_priv(netdev); 602 int err; 603 604 err = otx2_hw_get_mac_addr(pfvf, netdev); 605 if (err) 606 dev_warn(pfvf->dev, "Failed to read mac from hardware\n"); 607 608 /* If AF doesn't provide a valid MAC, generate a random one */ 609 if (!is_valid_ether_addr(netdev->dev_addr)) 610 eth_hw_addr_random(netdev); 611 } 612 EXPORT_SYMBOL(otx2_get_mac_from_af); 613 614 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc) 615 { 616 u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC]; 617 struct otx2_hw *hw = &pfvf->hw; 618 struct nix_txschq_config *req; 619 u64 schq, parent; 620 u64 dwrr_val; 621 622 dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); 623 624 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); 625 if (!req) 626 return -ENOMEM; 627 628 req->lvl = lvl; 629 req->num_regs = 1; 630 631 schq_list = hw->txschq_list; 632 #ifdef CONFIG_DCB 633 if (txschq_for_pfc) 634 schq_list = pfvf->pfc_schq_list; 635 #endif 636 637 schq = schq_list[lvl][prio]; 638 /* Set topology e.t.c configuration */ 639 if (lvl == NIX_TXSCH_LVL_SMQ) { 640 req->reg[0] = NIX_AF_SMQX_CFG(schq); 641 req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU; 642 req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | 643 (0x2ULL << 36); 644 /* Set link type for DWRR MTU selection on CN10K silicons */ 645 if (!is_dev_otx2(pfvf->pdev)) 646 req->regval[0] |= FIELD_PREP(GENMASK_ULL(58, 57), 647 (u64)hw->smq_link_type); 648 req->num_regs++; 649 /* MDQ config */ 650 parent = schq_list[NIX_TXSCH_LVL_TL4][prio]; 651 req->reg[1] = NIX_AF_MDQX_PARENT(schq); 652 req->regval[1] = parent << 16; 653 req->num_regs++; 654 /* Set DWRR quantum */ 655 req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); 656 req->regval[2] = dwrr_val; 657 } else if (lvl == NIX_TXSCH_LVL_TL4) { 658 int sdp_chan = hw->tx_chan_base + prio; 659 660 if (is_otx2_sdp_rep(pfvf->pdev)) 661 prio = 0; 662 parent = schq_list[NIX_TXSCH_LVL_TL3][prio]; 663 req->reg[0] = NIX_AF_TL4X_PARENT(schq); 664 req->regval[0] = (u64)parent << 16; 665 req->num_regs++; 666 req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); 667 req->regval[1] = dwrr_val; 668 if (is_otx2_sdp_rep(pfvf->pdev)) { 669 req->num_regs++; 670 req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq); 671 req->regval[2] = BIT_ULL(12) | BIT_ULL(13) | 672 (sdp_chan & 0xff); 673 } 674 } else if (lvl == NIX_TXSCH_LVL_TL3) { 675 parent = schq_list[NIX_TXSCH_LVL_TL2][prio]; 676 req->reg[0] = NIX_AF_TL3X_PARENT(schq); 677 req->regval[0] = (u64)parent << 16; 678 req->num_regs++; 679 req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); 680 req->regval[1] = dwrr_val; 681 if (lvl == hw->txschq_link_cfg_lvl && 682 !is_otx2_sdp_rep(pfvf->pdev)) { 683 req->num_regs++; 684 req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); 685 /* Enable this queue and backpressure 686 * and set relative channel 687 */ 688 req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; 689 } 690 } else if (lvl == NIX_TXSCH_LVL_TL2) { 691 parent = schq_list[NIX_TXSCH_LVL_TL1][prio]; 692 req->reg[0] = NIX_AF_TL2X_PARENT(schq); 693 req->regval[0] = (u64)parent << 16; 694 695 req->num_regs++; 696 req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); 697 req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val; 698 699 if (lvl == hw->txschq_link_cfg_lvl && 700 !is_otx2_sdp_rep(pfvf->pdev)) { 701 req->num_regs++; 702 req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); 703 /* Enable this queue and backpressure 704 * and set relative channel 705 */ 706 req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; 707 } 708 } else if (lvl == NIX_TXSCH_LVL_TL1) { 709 /* Default config for TL1. 710 * For VF this is always ignored. 711 */ 712 713 /* On CN10K, if RR_WEIGHT is greater than 16384, HW will 714 * clip it to 16384, so configuring a 24bit max value 715 * will work on both OTx2 and CN10K. 716 */ 717 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); 718 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; 719 720 req->num_regs++; 721 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); 722 req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1; 723 724 req->num_regs++; 725 req->reg[2] = NIX_AF_TL1X_CIR(schq); 726 req->regval[2] = 0; 727 } 728 729 return otx2_sync_mbox_msg(&pfvf->mbox); 730 } 731 EXPORT_SYMBOL(otx2_txschq_config); 732 733 int otx2_smq_flush(struct otx2_nic *pfvf, int smq) 734 { 735 struct nix_txschq_config *req; 736 int rc; 737 738 mutex_lock(&pfvf->mbox.lock); 739 740 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); 741 if (!req) { 742 mutex_unlock(&pfvf->mbox.lock); 743 return -ENOMEM; 744 } 745 746 req->lvl = NIX_TXSCH_LVL_SMQ; 747 req->reg[0] = NIX_AF_SMQX_CFG(smq); 748 req->regval[0] |= BIT_ULL(49); 749 req->num_regs++; 750 751 rc = otx2_sync_mbox_msg(&pfvf->mbox); 752 mutex_unlock(&pfvf->mbox.lock); 753 return rc; 754 } 755 EXPORT_SYMBOL(otx2_smq_flush); 756 757 int otx2_txsch_alloc(struct otx2_nic *pfvf) 758 { 759 int chan_cnt = pfvf->hw.tx_chan_cnt; 760 struct nix_txsch_alloc_req *req; 761 struct nix_txsch_alloc_rsp *rsp; 762 int lvl, schq, rc; 763 764 /* Get memory to put this msg */ 765 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); 766 if (!req) 767 return -ENOMEM; 768 769 /* Request one schq per level */ 770 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) 771 req->schq[lvl] = 1; 772 773 if (is_otx2_sdp_rep(pfvf->pdev) && chan_cnt > 1) { 774 req->schq[NIX_TXSCH_LVL_SMQ] = chan_cnt; 775 req->schq[NIX_TXSCH_LVL_TL4] = chan_cnt; 776 } 777 778 rc = otx2_sync_mbox_msg(&pfvf->mbox); 779 if (rc) 780 return rc; 781 782 rsp = (struct nix_txsch_alloc_rsp *) 783 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 784 if (IS_ERR(rsp)) 785 return PTR_ERR(rsp); 786 787 /* Setup transmit scheduler list */ 788 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 789 pfvf->hw.txschq_cnt[lvl] = rsp->schq[lvl]; 790 for (schq = 0; schq < rsp->schq[lvl]; schq++) 791 pfvf->hw.txschq_list[lvl][schq] = 792 rsp->schq_list[lvl][schq]; 793 } 794 795 pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; 796 pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; 797 798 return 0; 799 } 800 801 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq) 802 { 803 struct nix_txsch_free_req *free_req; 804 int err; 805 806 mutex_lock(&pfvf->mbox.lock); 807 808 free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); 809 if (!free_req) { 810 mutex_unlock(&pfvf->mbox.lock); 811 netdev_err(pfvf->netdev, 812 "Failed alloc txschq free req\n"); 813 return; 814 } 815 816 free_req->schq_lvl = lvl; 817 free_req->schq = schq; 818 819 err = otx2_sync_mbox_msg(&pfvf->mbox); 820 if (err) { 821 netdev_err(pfvf->netdev, 822 "Failed stop txschq %d at level %d\n", schq, lvl); 823 } 824 825 mutex_unlock(&pfvf->mbox.lock); 826 } 827 EXPORT_SYMBOL(otx2_txschq_free_one); 828 829 void otx2_txschq_stop(struct otx2_nic *pfvf) 830 { 831 int lvl, schq, idx; 832 833 /* free non QOS TLx nodes */ 834 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 835 for (idx = 0; idx < pfvf->hw.txschq_cnt[lvl]; idx++) { 836 otx2_txschq_free_one(pfvf, lvl, 837 pfvf->hw.txschq_list[lvl][idx]); 838 } 839 } 840 841 /* Clear the txschq list */ 842 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 843 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) 844 pfvf->hw.txschq_list[lvl][schq] = 0; 845 } 846 847 } 848 849 void otx2_sqb_flush(struct otx2_nic *pfvf) 850 { 851 int qidx, sqe_tail, sqe_head; 852 struct otx2_snd_queue *sq; 853 u64 incr, *ptr, val; 854 855 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); 856 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { 857 sq = &pfvf->qset.sq[qidx]; 858 if (!sq->sqb_ptrs) 859 continue; 860 861 incr = (u64)qidx << 32; 862 val = otx2_atomic64_add(incr, ptr); 863 sqe_head = (val >> 20) & 0x3F; 864 sqe_tail = (val >> 28) & 0x3F; 865 if (sqe_head != sqe_tail) 866 usleep_range(50, 60); 867 } 868 } 869 870 /* RED and drop levels of CQ on packet reception. 871 * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty). 872 */ 873 #define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize)) 874 #define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize)) 875 876 /* RED and drop levels of AURA for packet reception. 877 * For AURA level is measure of fullness (0x0 = empty, 255 = full). 878 * Eg: For RQ length 1K, for pass/drop level 204/230. 879 * RED accepts pkts if free pointers > 102 & <= 205. 880 * Drops pkts if free pointers < 102. 881 */ 882 #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ 883 #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ 884 #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ 885 886 static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) 887 { 888 struct otx2_qset *qset = &pfvf->qset; 889 struct nix_aq_enq_req *aq; 890 891 /* Get memory to put this msg */ 892 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); 893 if (!aq) 894 return -ENOMEM; 895 896 aq->rq.cq = qidx; 897 aq->rq.ena = 1; 898 aq->rq.pb_caching = 1; 899 aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */ 900 aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1; 901 aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */ 902 aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */ 903 aq->rq.qint_idx = 0; 904 aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */ 905 aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */ 906 aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); 907 aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); 908 aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA; 909 aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA; 910 911 /* Fill AQ info */ 912 aq->qidx = qidx; 913 aq->ctype = NIX_AQ_CTYPE_RQ; 914 aq->op = NIX_AQ_INSTOP_INIT; 915 916 return otx2_sync_mbox_msg(&pfvf->mbox); 917 } 918 919 int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) 920 { 921 struct otx2_nic *pfvf = dev; 922 struct otx2_snd_queue *sq; 923 struct nix_aq_enq_req *aq; 924 925 sq = &pfvf->qset.sq[qidx]; 926 sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); 927 /* Get memory to put this msg */ 928 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); 929 if (!aq) 930 return -ENOMEM; 931 932 aq->sq.cq = pfvf->hw.rx_queues + qidx; 933 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ 934 aq->sq.cq_ena = 1; 935 aq->sq.ena = 1; 936 aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); 937 aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); 938 aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; 939 aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ 940 aq->sq.sqb_aura = sqb_aura; 941 aq->sq.sq_int_ena = NIX_SQINT_BITS; 942 aq->sq.qint_idx = 0; 943 /* Due pipelining impact minimum 2000 unused SQ CQE's 944 * need to maintain to avoid CQ overflow. 945 */ 946 aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); 947 948 /* Fill AQ info */ 949 aq->qidx = qidx; 950 aq->ctype = NIX_AQ_CTYPE_SQ; 951 aq->op = NIX_AQ_INSTOP_INIT; 952 953 return otx2_sync_mbox_msg(&pfvf->mbox); 954 } 955 956 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) 957 { 958 struct otx2_qset *qset = &pfvf->qset; 959 struct otx2_snd_queue *sq; 960 struct otx2_pool *pool; 961 u8 chan_offset; 962 int err; 963 964 pool = &pfvf->qset.pool[sqb_aura]; 965 sq = &qset->sq[qidx]; 966 sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128; 967 sq->sqe_cnt = qset->sqe_cnt; 968 969 err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size); 970 if (err) 971 return err; 972 973 /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG. 974 * SG of NIX and CPT are same in size. Allocate memory for CPT SG 975 * same as NIX SQE for base address alignment. 976 * Layout of a NIX SQE and CPT SG entry: 977 * ----------------------------- 978 * | CPT Scatter Gather | 979 * | (SQE SIZE) | 980 * | | 981 * ----------------------------- 982 * | NIX SQE | 983 * | (SQE SIZE) | 984 * | | 985 * ----------------------------- 986 */ 987 err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt, 988 sq->sqe_size * 2); 989 if (err) 990 return err; 991 992 err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64); 993 if (err) 994 return err; 995 996 if (qidx < pfvf->hw.tx_queues) { 997 err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, 998 TSO_HEADER_SIZE); 999 if (err) 1000 return err; 1001 } 1002 1003 sq->sqe_base = sq->sqe->base; 1004 sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); 1005 if (!sq->sg) 1006 return -ENOMEM; 1007 1008 if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { 1009 err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, 1010 sizeof(*sq->timestamps)); 1011 if (err) { 1012 kfree(sq->sg); 1013 sq->sg = NULL; 1014 return err; 1015 } 1016 } 1017 1018 sq->head = 0; 1019 sq->cons_head = 0; 1020 sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; 1021 sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; 1022 /* Set SQE threshold to 10% of total SQEs */ 1023 sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100; 1024 sq->aura_id = sqb_aura; 1025 sq->aura_fc_addr = pool->fc_addr->base; 1026 sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); 1027 1028 sq->stats.bytes = 0; 1029 sq->stats.pkts = 0; 1030 1031 chan_offset = qidx % pfvf->hw.tx_chan_cnt; 1032 err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura); 1033 if (err) { 1034 kfree(sq->sg); 1035 sq->sg = NULL; 1036 return err; 1037 } 1038 1039 return 0; 1040 1041 } 1042 1043 static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) 1044 { 1045 struct otx2_qset *qset = &pfvf->qset; 1046 int err, pool_id, non_xdp_queues; 1047 struct nix_aq_enq_req *aq; 1048 struct otx2_cq_queue *cq; 1049 1050 cq = &qset->cq[qidx]; 1051 cq->cq_idx = qidx; 1052 non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues; 1053 if (qidx < pfvf->hw.rx_queues) { 1054 cq->cq_type = CQ_RX; 1055 cq->cint_idx = qidx; 1056 cq->cqe_cnt = qset->rqe_cnt; 1057 if (pfvf->xdp_prog) 1058 xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); 1059 } else if (qidx < non_xdp_queues) { 1060 cq->cq_type = CQ_TX; 1061 cq->cint_idx = qidx - pfvf->hw.rx_queues; 1062 cq->cqe_cnt = qset->sqe_cnt; 1063 } else { 1064 if (pfvf->hw.xdp_queues && 1065 qidx < non_xdp_queues + pfvf->hw.xdp_queues) { 1066 cq->cq_type = CQ_XDP; 1067 cq->cint_idx = qidx - non_xdp_queues; 1068 cq->cqe_cnt = qset->sqe_cnt; 1069 } else { 1070 cq->cq_type = CQ_QOS; 1071 cq->cint_idx = qidx - non_xdp_queues - 1072 pfvf->hw.xdp_queues; 1073 cq->cqe_cnt = qset->sqe_cnt; 1074 } 1075 } 1076 cq->cqe_size = pfvf->qset.xqe_size; 1077 1078 /* Allocate memory for CQEs */ 1079 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); 1080 if (err) 1081 return err; 1082 1083 /* Save CQE CPU base for faster reference */ 1084 cq->cqe_base = cq->cqe->base; 1085 /* In case where all RQs auras point to single pool, 1086 * all CQs receive buffer pool also point to same pool. 1087 */ 1088 pool_id = ((cq->cq_type == CQ_RX) && 1089 (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx; 1090 cq->rbpool = &qset->pool[pool_id]; 1091 cq->refill_task_sched = false; 1092 1093 /* Get memory to put this msg */ 1094 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); 1095 if (!aq) 1096 return -ENOMEM; 1097 1098 aq->cq.ena = 1; 1099 aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4); 1100 aq->cq.caching = 1; 1101 aq->cq.base = cq->cqe->iova; 1102 aq->cq.cint_idx = cq->cint_idx; 1103 aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS; 1104 aq->cq.qint_idx = 0; 1105 aq->cq.avg_level = 255; 1106 1107 if (qidx < pfvf->hw.rx_queues) { 1108 aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); 1109 aq->cq.drop_ena = 1; 1110 1111 if (!is_otx2_lbkvf(pfvf->pdev)) { 1112 /* Enable receive CQ backpressure */ 1113 aq->cq.bp_ena = 1; 1114 #ifdef CONFIG_DCB 1115 aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]]; 1116 #else 1117 aq->cq.bpid = pfvf->bpid[0]; 1118 #endif 1119 1120 /* Set backpressure level is same as cq pass level */ 1121 aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); 1122 } 1123 } 1124 1125 /* Fill AQ info */ 1126 aq->qidx = qidx; 1127 aq->ctype = NIX_AQ_CTYPE_CQ; 1128 aq->op = NIX_AQ_INSTOP_INIT; 1129 1130 return otx2_sync_mbox_msg(&pfvf->mbox); 1131 } 1132 1133 static void otx2_pool_refill_task(struct work_struct *work) 1134 { 1135 struct otx2_cq_queue *cq; 1136 struct refill_work *wrk; 1137 struct otx2_nic *pfvf; 1138 int qidx; 1139 1140 wrk = container_of(work, struct refill_work, pool_refill_work.work); 1141 pfvf = wrk->pf; 1142 qidx = wrk - pfvf->refill_wrk; 1143 cq = &pfvf->qset.cq[qidx]; 1144 1145 cq->refill_task_sched = false; 1146 1147 local_bh_disable(); 1148 napi_schedule(wrk->napi); 1149 local_bh_enable(); 1150 } 1151 1152 int otx2_config_nix_queues(struct otx2_nic *pfvf) 1153 { 1154 int qidx, err; 1155 1156 /* Initialize RX queues */ 1157 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 1158 u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); 1159 1160 err = otx2_rq_init(pfvf, qidx, lpb_aura); 1161 if (err) 1162 return err; 1163 } 1164 1165 /* Initialize TX queues */ 1166 for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) { 1167 u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); 1168 1169 err = otx2_sq_init(pfvf, qidx, sqb_aura); 1170 if (err) 1171 return err; 1172 } 1173 1174 /* Initialize completion queues */ 1175 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { 1176 err = otx2_cq_init(pfvf, qidx); 1177 if (err) 1178 return err; 1179 } 1180 1181 pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf, 1182 NIX_LF_CQ_OP_STATUS); 1183 1184 /* Initialize work queue for receive buffer refill */ 1185 pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, 1186 sizeof(struct refill_work), GFP_KERNEL); 1187 if (!pfvf->refill_wrk) 1188 return -ENOMEM; 1189 1190 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { 1191 pfvf->refill_wrk[qidx].pf = pfvf; 1192 INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work, 1193 otx2_pool_refill_task); 1194 } 1195 return 0; 1196 } 1197 1198 int otx2_config_nix(struct otx2_nic *pfvf) 1199 { 1200 struct nix_lf_alloc_req *nixlf; 1201 struct nix_lf_alloc_rsp *rsp; 1202 int err; 1203 1204 pfvf->qset.xqe_size = pfvf->hw.xqe_size; 1205 1206 /* Get memory to put this msg */ 1207 nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); 1208 if (!nixlf) 1209 return -ENOMEM; 1210 1211 /* Set RQ/SQ/CQ counts */ 1212 nixlf->rq_cnt = pfvf->hw.rx_queues; 1213 nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf); 1214 nixlf->cq_cnt = pfvf->qset.cq_cnt; 1215 nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; 1216 nixlf->rss_grps = MAX_RSS_GROUPS; 1217 nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64; 1218 /* We don't know absolute NPA LF idx attached. 1219 * AF will replace 'RVU_DEFAULT_PF_FUNC' with 1220 * NPA LF attached to this RVU PF/VF. 1221 */ 1222 nixlf->npa_func = RVU_DEFAULT_PF_FUNC; 1223 /* Disable alignment pad, enable L2 length check, 1224 * enable L4 TCP/UDP checksum verification. 1225 */ 1226 nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37); 1227 1228 err = otx2_sync_mbox_msg(&pfvf->mbox); 1229 if (err) 1230 return err; 1231 1232 rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, 1233 &nixlf->hdr); 1234 if (IS_ERR(rsp)) 1235 return PTR_ERR(rsp); 1236 1237 if (rsp->qints < 1) 1238 return -ENXIO; 1239 1240 return rsp->hdr.rc; 1241 } 1242 1243 void otx2_sq_free_sqbs(struct otx2_nic *pfvf) 1244 { 1245 struct otx2_qset *qset = &pfvf->qset; 1246 struct otx2_hw *hw = &pfvf->hw; 1247 struct otx2_snd_queue *sq; 1248 int sqb, qidx; 1249 u64 iova, pa; 1250 1251 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { 1252 sq = &qset->sq[qidx]; 1253 if (!sq->sqb_ptrs) 1254 continue; 1255 for (sqb = 0; sqb < sq->sqb_count; sqb++) { 1256 if (!sq->sqb_ptrs[sqb]) 1257 continue; 1258 iova = sq->sqb_ptrs[sqb]; 1259 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); 1260 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, 1261 DMA_FROM_DEVICE, 1262 DMA_ATTR_SKIP_CPU_SYNC); 1263 put_page(virt_to_page(phys_to_virt(pa))); 1264 } 1265 sq->sqb_count = 0; 1266 } 1267 } 1268 1269 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, 1270 u64 iova, int size) 1271 { 1272 struct page *page; 1273 u64 pa; 1274 1275 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); 1276 page = virt_to_head_page(phys_to_virt(pa)); 1277 1278 if (pool->page_pool) { 1279 page_pool_put_full_page(pool->page_pool, page, true); 1280 } else { 1281 dma_unmap_page_attrs(pfvf->dev, iova, size, 1282 DMA_FROM_DEVICE, 1283 DMA_ATTR_SKIP_CPU_SYNC); 1284 1285 put_page(page); 1286 } 1287 } 1288 1289 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) 1290 { 1291 int pool_id, pool_start = 0, pool_end = 0, size = 0; 1292 struct otx2_pool *pool; 1293 u64 iova; 1294 1295 if (type == AURA_NIX_SQ) { 1296 pool_start = otx2_get_pool_idx(pfvf, type, 0); 1297 pool_end = pool_start + pfvf->hw.sqpool_cnt; 1298 size = pfvf->hw.sqb_size; 1299 } 1300 if (type == AURA_NIX_RQ) { 1301 pool_start = otx2_get_pool_idx(pfvf, type, 0); 1302 pool_end = pfvf->hw.rqpool_cnt; 1303 size = pfvf->rbsize; 1304 } 1305 1306 /* Free SQB and RQB pointers from the aura pool */ 1307 for (pool_id = pool_start; pool_id < pool_end; pool_id++) { 1308 iova = otx2_aura_allocptr(pfvf, pool_id); 1309 pool = &pfvf->qset.pool[pool_id]; 1310 while (iova) { 1311 if (type == AURA_NIX_RQ) 1312 iova -= OTX2_HEAD_ROOM; 1313 1314 otx2_free_bufs(pfvf, pool, iova, size); 1315 1316 iova = otx2_aura_allocptr(pfvf, pool_id); 1317 } 1318 } 1319 } 1320 1321 void otx2_aura_pool_free(struct otx2_nic *pfvf) 1322 { 1323 struct otx2_pool *pool; 1324 int pool_id; 1325 1326 if (!pfvf->qset.pool) 1327 return; 1328 1329 for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { 1330 pool = &pfvf->qset.pool[pool_id]; 1331 qmem_free(pfvf->dev, pool->stack); 1332 qmem_free(pfvf->dev, pool->fc_addr); 1333 page_pool_destroy(pool->page_pool); 1334 pool->page_pool = NULL; 1335 } 1336 devm_kfree(pfvf->dev, pfvf->qset.pool); 1337 pfvf->qset.pool = NULL; 1338 } 1339 1340 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 1341 int pool_id, int numptrs) 1342 { 1343 struct npa_aq_enq_req *aq; 1344 struct otx2_pool *pool; 1345 int err; 1346 1347 pool = &pfvf->qset.pool[pool_id]; 1348 1349 /* Allocate memory for HW to update Aura count. 1350 * Alloc one cache line, so that it fits all FC_STYPE modes. 1351 */ 1352 if (!pool->fc_addr) { 1353 err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); 1354 if (err) 1355 return err; 1356 } 1357 1358 /* Initialize this aura's context via AF */ 1359 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); 1360 if (!aq) { 1361 /* Shared mbox memory buffer is full, flush it and retry */ 1362 err = otx2_sync_mbox_msg(&pfvf->mbox); 1363 if (err) 1364 return err; 1365 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); 1366 if (!aq) 1367 return -ENOMEM; 1368 } 1369 1370 aq->aura_id = aura_id; 1371 /* Will be filled by AF with correct pool context address */ 1372 aq->aura.pool_addr = pool_id; 1373 aq->aura.pool_caching = 1; 1374 aq->aura.shift = ilog2(numptrs) - 8; 1375 aq->aura.count = numptrs; 1376 aq->aura.limit = numptrs; 1377 aq->aura.avg_level = 255; 1378 aq->aura.ena = 1; 1379 aq->aura.fc_ena = 1; 1380 aq->aura.fc_addr = pool->fc_addr->iova; 1381 aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ 1382 1383 /* Enable backpressure for RQ aura */ 1384 if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { 1385 aq->aura.bp_ena = 0; 1386 /* If NIX1 LF is attached then specify NIX1_RX. 1387 * 1388 * Below NPA_AURA_S[BP_ENA] is set according to the 1389 * NPA_BPINTF_E enumeration given as: 1390 * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so 1391 * NIX0_RX is 0x0 + 0*0x1 = 0 1392 * NIX1_RX is 0x0 + 1*0x1 = 1 1393 * But in HRM it is given that 1394 * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to 1395 * NIX-RX based on [BP] level. One bit per NIX-RX; index 1396 * enumerated by NPA_BPINTF_E." 1397 */ 1398 if (pfvf->nix_blkaddr == BLKADDR_NIX1) 1399 aq->aura.bp_ena = 1; 1400 #ifdef CONFIG_DCB 1401 aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]]; 1402 #else 1403 aq->aura.nix0_bpid = pfvf->bpid[0]; 1404 #endif 1405 1406 /* Set backpressure level for RQ's Aura */ 1407 aq->aura.bp = RQ_BP_LVL_AURA; 1408 } 1409 1410 /* Fill AQ info */ 1411 aq->ctype = NPA_AQ_CTYPE_AURA; 1412 aq->op = NPA_AQ_INSTOP_INIT; 1413 1414 return 0; 1415 } 1416 1417 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 1418 int stack_pages, int numptrs, int buf_size, int type) 1419 { 1420 struct page_pool_params pp_params = { 0 }; 1421 struct npa_aq_enq_req *aq; 1422 struct otx2_pool *pool; 1423 int err; 1424 1425 pool = &pfvf->qset.pool[pool_id]; 1426 /* Alloc memory for stack which is used to store buffer pointers */ 1427 err = qmem_alloc(pfvf->dev, &pool->stack, 1428 stack_pages, pfvf->hw.stack_pg_bytes); 1429 if (err) 1430 return err; 1431 1432 pool->rbsize = buf_size; 1433 1434 /* Initialize this pool's context via AF */ 1435 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); 1436 if (!aq) { 1437 /* Shared mbox memory buffer is full, flush it and retry */ 1438 err = otx2_sync_mbox_msg(&pfvf->mbox); 1439 if (err) { 1440 qmem_free(pfvf->dev, pool->stack); 1441 return err; 1442 } 1443 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); 1444 if (!aq) { 1445 qmem_free(pfvf->dev, pool->stack); 1446 return -ENOMEM; 1447 } 1448 } 1449 1450 aq->aura_id = pool_id; 1451 aq->pool.stack_base = pool->stack->iova; 1452 aq->pool.stack_caching = 1; 1453 aq->pool.ena = 1; 1454 aq->pool.buf_size = buf_size / 128; 1455 aq->pool.stack_max_pages = stack_pages; 1456 aq->pool.shift = ilog2(numptrs) - 8; 1457 aq->pool.ptr_start = 0; 1458 aq->pool.ptr_end = ~0ULL; 1459 1460 /* Fill AQ info */ 1461 aq->ctype = NPA_AQ_CTYPE_POOL; 1462 aq->op = NPA_AQ_INSTOP_INIT; 1463 1464 if (type != AURA_NIX_RQ) { 1465 pool->page_pool = NULL; 1466 return 0; 1467 } 1468 1469 pp_params.order = get_order(buf_size); 1470 pp_params.flags = PP_FLAG_DMA_MAP; 1471 pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); 1472 pp_params.nid = NUMA_NO_NODE; 1473 pp_params.dev = pfvf->dev; 1474 pp_params.dma_dir = DMA_FROM_DEVICE; 1475 pool->page_pool = page_pool_create(&pp_params); 1476 if (IS_ERR(pool->page_pool)) { 1477 netdev_err(pfvf->netdev, "Creation of page pool failed\n"); 1478 return PTR_ERR(pool->page_pool); 1479 } 1480 1481 return 0; 1482 } 1483 1484 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) 1485 { 1486 int qidx, pool_id, stack_pages, num_sqbs; 1487 struct otx2_qset *qset = &pfvf->qset; 1488 struct otx2_hw *hw = &pfvf->hw; 1489 struct otx2_snd_queue *sq; 1490 struct otx2_pool *pool; 1491 dma_addr_t bufptr; 1492 int err, ptr; 1493 1494 /* Calculate number of SQBs needed. 1495 * 1496 * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB. 1497 * Last SQE is used for pointing to next SQB. 1498 */ 1499 num_sqbs = (hw->sqb_size / 128) - 1; 1500 num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; 1501 1502 /* Get no of stack pages needed */ 1503 stack_pages = 1504 (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; 1505 1506 for (qidx = 0; qidx < hw->non_qos_queues; qidx++) { 1507 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); 1508 /* Initialize aura context */ 1509 err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); 1510 if (err) 1511 goto fail; 1512 1513 /* Initialize pool context */ 1514 err = otx2_pool_init(pfvf, pool_id, stack_pages, 1515 num_sqbs, hw->sqb_size, AURA_NIX_SQ); 1516 if (err) 1517 goto fail; 1518 } 1519 1520 /* Flush accumulated messages */ 1521 err = otx2_sync_mbox_msg(&pfvf->mbox); 1522 if (err) 1523 goto fail; 1524 1525 /* Allocate pointers and free them to aura/pool */ 1526 for (qidx = 0; qidx < hw->non_qos_queues; qidx++) { 1527 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); 1528 pool = &pfvf->qset.pool[pool_id]; 1529 1530 sq = &qset->sq[qidx]; 1531 sq->sqb_count = 0; 1532 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); 1533 if (!sq->sqb_ptrs) { 1534 err = -ENOMEM; 1535 goto err_mem; 1536 } 1537 1538 for (ptr = 0; ptr < num_sqbs; ptr++) { 1539 err = otx2_alloc_rbuf(pfvf, pool, &bufptr); 1540 if (err) 1541 goto err_mem; 1542 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); 1543 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; 1544 } 1545 } 1546 1547 err_mem: 1548 return err ? -ENOMEM : 0; 1549 1550 fail: 1551 otx2_mbox_reset(&pfvf->mbox.mbox, 0); 1552 otx2_aura_pool_free(pfvf); 1553 return err; 1554 } 1555 1556 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) 1557 { 1558 struct otx2_hw *hw = &pfvf->hw; 1559 int stack_pages, pool_id, rq; 1560 struct otx2_pool *pool; 1561 int err, ptr, num_ptrs; 1562 dma_addr_t bufptr; 1563 1564 num_ptrs = pfvf->qset.rqe_cnt; 1565 1566 stack_pages = 1567 (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; 1568 1569 for (rq = 0; rq < hw->rx_queues; rq++) { 1570 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq); 1571 /* Initialize aura context */ 1572 err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs); 1573 if (err) 1574 goto fail; 1575 } 1576 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { 1577 err = otx2_pool_init(pfvf, pool_id, stack_pages, 1578 num_ptrs, pfvf->rbsize, AURA_NIX_RQ); 1579 if (err) 1580 goto fail; 1581 } 1582 1583 /* Flush accumulated messages */ 1584 err = otx2_sync_mbox_msg(&pfvf->mbox); 1585 if (err) 1586 goto fail; 1587 1588 /* Allocate pointers and free them to aura/pool */ 1589 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { 1590 pool = &pfvf->qset.pool[pool_id]; 1591 for (ptr = 0; ptr < num_ptrs; ptr++) { 1592 err = otx2_alloc_rbuf(pfvf, pool, &bufptr); 1593 if (err) 1594 return -ENOMEM; 1595 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, 1596 bufptr + OTX2_HEAD_ROOM); 1597 } 1598 } 1599 return 0; 1600 fail: 1601 otx2_mbox_reset(&pfvf->mbox.mbox, 0); 1602 otx2_aura_pool_free(pfvf); 1603 return err; 1604 } 1605 1606 int otx2_config_npa(struct otx2_nic *pfvf) 1607 { 1608 struct otx2_qset *qset = &pfvf->qset; 1609 struct npa_lf_alloc_req *npalf; 1610 struct otx2_hw *hw = &pfvf->hw; 1611 int aura_cnt; 1612 1613 /* Pool - Stack of free buffer pointers 1614 * Aura - Alloc/frees pointers from/to pool for NIX DMA. 1615 */ 1616 1617 if (!hw->pool_cnt) 1618 return -EINVAL; 1619 1620 qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt, 1621 sizeof(struct otx2_pool), GFP_KERNEL); 1622 if (!qset->pool) 1623 return -ENOMEM; 1624 1625 /* Get memory to put this msg */ 1626 npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox); 1627 if (!npalf) 1628 return -ENOMEM; 1629 1630 /* Set aura and pool counts */ 1631 npalf->nr_pools = hw->pool_cnt; 1632 aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt)); 1633 npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1; 1634 1635 return otx2_sync_mbox_msg(&pfvf->mbox); 1636 } 1637 1638 int otx2_detach_resources(struct mbox *mbox) 1639 { 1640 struct rsrc_detach *detach; 1641 1642 mutex_lock(&mbox->lock); 1643 detach = otx2_mbox_alloc_msg_detach_resources(mbox); 1644 if (!detach) { 1645 mutex_unlock(&mbox->lock); 1646 return -ENOMEM; 1647 } 1648 1649 /* detach all */ 1650 detach->partial = false; 1651 1652 /* Send detach request to AF */ 1653 otx2_sync_mbox_msg(mbox); 1654 mutex_unlock(&mbox->lock); 1655 return 0; 1656 } 1657 EXPORT_SYMBOL(otx2_detach_resources); 1658 1659 int otx2_attach_npa_nix(struct otx2_nic *pfvf) 1660 { 1661 struct rsrc_attach *attach; 1662 struct msg_req *msix; 1663 int err; 1664 1665 mutex_lock(&pfvf->mbox.lock); 1666 /* Get memory to put this msg */ 1667 attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); 1668 if (!attach) { 1669 mutex_unlock(&pfvf->mbox.lock); 1670 return -ENOMEM; 1671 } 1672 1673 attach->npalf = true; 1674 attach->nixlf = true; 1675 1676 /* Send attach request to AF */ 1677 err = otx2_sync_mbox_msg(&pfvf->mbox); 1678 if (err) { 1679 mutex_unlock(&pfvf->mbox.lock); 1680 return err; 1681 } 1682 1683 pfvf->nix_blkaddr = BLKADDR_NIX0; 1684 1685 /* If the platform has two NIX blocks then LF may be 1686 * allocated from NIX1. 1687 */ 1688 if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL) 1689 pfvf->nix_blkaddr = BLKADDR_NIX1; 1690 1691 /* Get NPA and NIX MSIX vector offsets */ 1692 msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); 1693 if (!msix) { 1694 mutex_unlock(&pfvf->mbox.lock); 1695 return -ENOMEM; 1696 } 1697 1698 err = otx2_sync_mbox_msg(&pfvf->mbox); 1699 if (err) { 1700 mutex_unlock(&pfvf->mbox.lock); 1701 return err; 1702 } 1703 mutex_unlock(&pfvf->mbox.lock); 1704 1705 if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || 1706 pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { 1707 dev_err(pfvf->dev, 1708 "RVUPF: Invalid MSIX vector offset for NPA/NIX\n"); 1709 return -EINVAL; 1710 } 1711 1712 return 0; 1713 } 1714 EXPORT_SYMBOL(otx2_attach_npa_nix); 1715 1716 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) 1717 { 1718 struct hwctx_disable_req *req; 1719 1720 mutex_lock(&mbox->lock); 1721 /* Request AQ to disable this context */ 1722 if (npa) 1723 req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox); 1724 else 1725 req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox); 1726 1727 if (!req) { 1728 mutex_unlock(&mbox->lock); 1729 return; 1730 } 1731 1732 req->ctype = type; 1733 1734 if (otx2_sync_mbox_msg(mbox)) 1735 dev_err(mbox->pfvf->dev, "%s failed to disable context\n", 1736 __func__); 1737 1738 mutex_unlock(&mbox->lock); 1739 } 1740 1741 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) 1742 { 1743 struct nix_bp_cfg_req *req; 1744 1745 if (enable) 1746 req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox); 1747 else 1748 req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox); 1749 1750 if (!req) 1751 return -ENOMEM; 1752 1753 req->chan_base = 0; 1754 if (otx2_is_pfc_enabled(pfvf)) { 1755 req->chan_cnt = IEEE_8021QAZ_MAX_TCS; 1756 req->bpid_per_chan = 1; 1757 } else { 1758 req->chan_cnt = 1; 1759 req->bpid_per_chan = 0; 1760 } 1761 1762 return otx2_sync_mbox_msg(&pfvf->mbox); 1763 } 1764 EXPORT_SYMBOL(otx2_nix_config_bp); 1765 1766 int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable) 1767 { 1768 struct nix_bp_cfg_req *req; 1769 1770 if (enable) 1771 req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox); 1772 else 1773 req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox); 1774 1775 if (!req) 1776 return -ENOMEM; 1777 1778 req->chan_base = 0; 1779 if (otx2_is_pfc_enabled(pfvf)) { 1780 req->chan_cnt = IEEE_8021QAZ_MAX_TCS; 1781 req->bpid_per_chan = 1; 1782 } else { 1783 req->chan_cnt = 1; 1784 req->bpid_per_chan = 0; 1785 } 1786 1787 return otx2_sync_mbox_msg(&pfvf->mbox); 1788 } 1789 EXPORT_SYMBOL(otx2_nix_cpt_config_bp); 1790 1791 /* Mbox message handlers */ 1792 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 1793 struct cgx_stats_rsp *rsp) 1794 { 1795 int id; 1796 1797 for (id = 0; id < CGX_RX_STATS_COUNT; id++) 1798 pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id]; 1799 for (id = 0; id < CGX_TX_STATS_COUNT; id++) 1800 pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id]; 1801 } 1802 1803 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, 1804 struct cgx_fec_stats_rsp *rsp) 1805 { 1806 pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks; 1807 pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks; 1808 } 1809 1810 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 1811 struct npa_lf_alloc_rsp *rsp) 1812 { 1813 pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; 1814 pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; 1815 } 1816 EXPORT_SYMBOL(mbox_handler_npa_lf_alloc); 1817 1818 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 1819 struct nix_lf_alloc_rsp *rsp) 1820 { 1821 pfvf->hw.sqb_size = rsp->sqb_size; 1822 pfvf->hw.rx_chan_base = rsp->rx_chan_base; 1823 pfvf->hw.tx_chan_base = rsp->tx_chan_base; 1824 pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt; 1825 pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt; 1826 pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; 1827 pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; 1828 pfvf->hw.cgx_links = rsp->cgx_links; 1829 pfvf->hw.lbk_links = rsp->lbk_links; 1830 pfvf->hw.tx_link = rsp->tx_link; 1831 } 1832 EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); 1833 1834 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 1835 struct msix_offset_rsp *rsp) 1836 { 1837 pfvf->hw.npa_msixoff = rsp->npa_msixoff; 1838 pfvf->hw.nix_msixoff = rsp->nix_msixoff; 1839 } 1840 EXPORT_SYMBOL(mbox_handler_msix_offset); 1841 1842 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 1843 struct nix_bp_cfg_rsp *rsp) 1844 { 1845 int chan, chan_id; 1846 1847 for (chan = 0; chan < rsp->chan_cnt; chan++) { 1848 chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F); 1849 pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF; 1850 } 1851 } 1852 EXPORT_SYMBOL(mbox_handler_nix_bp_enable); 1853 1854 void otx2_free_cints(struct otx2_nic *pfvf, int n) 1855 { 1856 struct otx2_qset *qset = &pfvf->qset; 1857 struct otx2_hw *hw = &pfvf->hw; 1858 int irq, qidx; 1859 1860 for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START; 1861 qidx < n; 1862 qidx++, irq++) { 1863 int vector = pci_irq_vector(pfvf->pdev, irq); 1864 1865 irq_set_affinity_hint(vector, NULL); 1866 free_cpumask_var(hw->affinity_mask[irq]); 1867 free_irq(vector, &qset->napi[qidx]); 1868 } 1869 } 1870 EXPORT_SYMBOL(otx2_free_cints); 1871 1872 void otx2_set_cints_affinity(struct otx2_nic *pfvf) 1873 { 1874 struct otx2_hw *hw = &pfvf->hw; 1875 int vec, cpu, irq, cint; 1876 1877 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; 1878 cpu = cpumask_first(cpu_online_mask); 1879 1880 /* CQ interrupts */ 1881 for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) { 1882 if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL)) 1883 return; 1884 1885 cpumask_set_cpu(cpu, hw->affinity_mask[vec]); 1886 1887 irq = pci_irq_vector(pfvf->pdev, vec); 1888 irq_set_affinity_hint(irq, hw->affinity_mask[vec]); 1889 1890 cpu = cpumask_next(cpu, cpu_online_mask); 1891 if (unlikely(cpu >= nr_cpu_ids)) 1892 cpu = 0; 1893 } 1894 } 1895 1896 static u32 get_dwrr_mtu(struct otx2_nic *pfvf, struct nix_hw_info *hw) 1897 { 1898 if (is_otx2_lbkvf(pfvf->pdev)) { 1899 pfvf->hw.smq_link_type = SMQ_LINK_TYPE_LBK; 1900 return hw->lbk_dwrr_mtu; 1901 } 1902 1903 pfvf->hw.smq_link_type = SMQ_LINK_TYPE_RPM; 1904 return hw->rpm_dwrr_mtu; 1905 } 1906 1907 u16 otx2_get_max_mtu(struct otx2_nic *pfvf) 1908 { 1909 struct nix_hw_info *rsp; 1910 struct msg_req *req; 1911 u16 max_mtu; 1912 int rc; 1913 1914 mutex_lock(&pfvf->mbox.lock); 1915 1916 req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox); 1917 if (!req) { 1918 rc = -ENOMEM; 1919 goto out; 1920 } 1921 1922 rc = otx2_sync_mbox_msg(&pfvf->mbox); 1923 if (!rc) { 1924 rsp = (struct nix_hw_info *) 1925 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 1926 if (IS_ERR(rsp)) { 1927 rc = PTR_ERR(rsp); 1928 goto out; 1929 } 1930 1931 /* HW counts VLAN insertion bytes (8 for double tag) 1932 * irrespective of whether SQE is requesting to insert VLAN 1933 * in the packet or not. Hence these 8 bytes have to be 1934 * discounted from max packet size otherwise HW will throw 1935 * SMQ errors 1936 */ 1937 max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN; 1938 1939 /* Also save DWRR MTU, needed for DWRR weight calculation */ 1940 pfvf->hw.dwrr_mtu = get_dwrr_mtu(pfvf, rsp); 1941 if (!pfvf->hw.dwrr_mtu) 1942 pfvf->hw.dwrr_mtu = 1; 1943 } 1944 1945 out: 1946 mutex_unlock(&pfvf->mbox.lock); 1947 if (rc) { 1948 dev_warn(pfvf->dev, 1949 "Failed to get MTU from hardware setting default value(1500)\n"); 1950 max_mtu = 1500; 1951 } 1952 return max_mtu; 1953 } 1954 EXPORT_SYMBOL(otx2_get_max_mtu); 1955 1956 int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features) 1957 { 1958 netdev_features_t changed = features ^ netdev->features; 1959 struct otx2_nic *pfvf = netdev_priv(netdev); 1960 bool ntuple = !!(features & NETIF_F_NTUPLE); 1961 bool tc = !!(features & NETIF_F_HW_TC); 1962 1963 if ((changed & NETIF_F_NTUPLE) && !ntuple) 1964 otx2_destroy_ntuple_flows(pfvf); 1965 1966 if ((changed & NETIF_F_NTUPLE) && ntuple) { 1967 if (!pfvf->flow_cfg->max_flows) { 1968 netdev_err(netdev, 1969 "Can't enable NTUPLE, MCAM entries not allocated\n"); 1970 return -EINVAL; 1971 } 1972 } 1973 1974 if ((changed & NETIF_F_HW_TC) && !tc && 1975 otx2_tc_flower_rule_cnt(pfvf)) { 1976 netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); 1977 return -EBUSY; 1978 } 1979 1980 if ((changed & NETIF_F_NTUPLE) && ntuple && 1981 otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) { 1982 netdev_err(netdev, 1983 "Can't enable NTUPLE when TC flower offload is active, disable TC rules and retry\n"); 1984 return -EINVAL; 1985 } 1986 1987 return 0; 1988 } 1989 EXPORT_SYMBOL(otx2_handle_ntuple_tc_features); 1990 1991 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 1992 int __weak \ 1993 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 1994 struct _req_type *req, \ 1995 struct _rsp_type *rsp) \ 1996 { \ 1997 /* Nothing to do here */ \ 1998 return 0; \ 1999 } \ 2000 EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); 2001 MBOX_UP_CGX_MESSAGES 2002 MBOX_UP_MCS_MESSAGES 2003 #undef M 2004 2005 dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, 2006 struct sk_buff *skb, int seg, int *len) 2007 { 2008 enum dma_data_direction dir = DMA_TO_DEVICE; 2009 const skb_frag_t *frag; 2010 struct page *page; 2011 int offset; 2012 2013 /* Crypto hardware need write permission for ipsec crypto offload */ 2014 if (unlikely(xfrm_offload(skb))) { 2015 dir = DMA_BIDIRECTIONAL; 2016 skb = skb_unshare(skb, GFP_ATOMIC); 2017 } 2018 2019 /* First segment is always skb->data */ 2020 if (!seg) { 2021 page = virt_to_page(skb->data); 2022 offset = offset_in_page(skb->data); 2023 *len = skb_headlen(skb); 2024 } else { 2025 frag = &skb_shinfo(skb)->frags[seg - 1]; 2026 page = skb_frag_page(frag); 2027 offset = skb_frag_off(frag); 2028 *len = skb_frag_size(frag); 2029 } 2030 return otx2_dma_map_page(pfvf, page, offset, *len, dir); 2031 } 2032 2033 void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) 2034 { 2035 enum dma_data_direction dir = DMA_TO_DEVICE; 2036 struct sk_buff *skb = NULL; 2037 int seg; 2038 2039 skb = (struct sk_buff *)sg->skb; 2040 if (unlikely(xfrm_offload(skb))) 2041 dir = DMA_BIDIRECTIONAL; 2042 2043 for (seg = 0; seg < sg->num_segs; seg++) { 2044 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], 2045 sg->size[seg], dir); 2046 } 2047 sg->num_segs = 0; 2048 } 2049