1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #include <uapi/linux/bpf.h> 5 6 #include <linux/debugfs.h> 7 #include <linux/inetdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/ethtool.h> 10 #include <linux/filter.h> 11 #include <linux/mm.h> 12 #include <linux/pci.h> 13 #include <linux/export.h> 14 #include <linux/skbuff.h> 15 16 #include <net/checksum.h> 17 #include <net/ip6_checksum.h> 18 #include <net/netdev_lock.h> 19 #include <net/page_pool/helpers.h> 20 #include <net/xdp.h> 21 22 #include <net/mana/mana.h> 23 #include <net/mana/mana_auxiliary.h> 24 #include <net/mana/hw_channel.h> 25 26 static DEFINE_IDA(mana_adev_ida); 27 28 static int mana_adev_idx_alloc(void) 29 { 30 return ida_alloc(&mana_adev_ida, GFP_KERNEL); 31 } 32 33 static void mana_adev_idx_free(int idx) 34 { 35 ida_free(&mana_adev_ida, idx); 36 } 37 38 static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count, 39 loff_t *pos) 40 { 41 struct gdma_queue *gdma_q = filp->private_data; 42 43 return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr, 44 gdma_q->queue_size); 45 } 46 47 static const struct file_operations mana_dbg_q_fops = { 48 .owner = THIS_MODULE, 49 .open = simple_open, 50 .read = mana_dbg_q_read, 51 }; 52 53 static bool mana_en_need_log(struct mana_port_context *apc, int err) 54 { 55 if (apc && apc->ac && apc->ac->gdma_dev && 56 apc->ac->gdma_dev->gdma_context) 57 return mana_need_log(apc->ac->gdma_dev->gdma_context, err); 58 else 59 return true; 60 } 61 62 static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page, 63 bool from_pool) 64 { 65 if (from_pool) 66 page_pool_put_full_page(rxq->page_pool, page, false); 67 else 68 put_page(page); 69 } 70 71 /* Microsoft Azure Network Adapter (MANA) functions */ 72 73 static int mana_open(struct net_device *ndev) 74 { 75 struct mana_port_context *apc = netdev_priv(ndev); 76 int err; 77 err = mana_alloc_queues(ndev); 78 79 if (err) { 80 netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err); 81 return err; 82 } 83 84 apc->port_is_up = true; 85 86 /* Ensure port state updated before txq state */ 87 smp_wmb(); 88 89 netif_tx_wake_all_queues(ndev); 90 netdev_dbg(ndev, "%s successful\n", __func__); 91 return 0; 92 } 93 94 static int mana_close(struct net_device *ndev) 95 { 96 struct mana_port_context *apc = netdev_priv(ndev); 97 98 if (!apc->port_is_up) 99 return 0; 100 101 return mana_detach(ndev, true); 102 } 103 104 static void mana_link_state_handle(struct work_struct *w) 105 { 106 struct mana_context *ac; 107 struct net_device *ndev; 108 u32 link_event; 109 bool link_up; 110 int i; 111 112 ac = container_of(w, struct mana_context, link_change_work); 113 114 rtnl_lock(); 115 116 link_event = READ_ONCE(ac->link_event); 117 118 if (link_event == HWC_DATA_HW_LINK_CONNECT) 119 link_up = true; 120 else if (link_event == HWC_DATA_HW_LINK_DISCONNECT) 121 link_up = false; 122 else 123 goto out; 124 125 /* Process all ports */ 126 for (i = 0; i < ac->num_ports; i++) { 127 ndev = ac->ports[i]; 128 if (!ndev) 129 continue; 130 131 if (link_up) { 132 netif_carrier_on(ndev); 133 134 __netdev_notify_peers(ndev); 135 } else { 136 netif_carrier_off(ndev); 137 } 138 } 139 140 out: 141 rtnl_unlock(); 142 } 143 144 static bool mana_can_tx(struct gdma_queue *wq) 145 { 146 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE; 147 } 148 149 static unsigned int mana_checksum_info(struct sk_buff *skb) 150 { 151 if (skb->protocol == htons(ETH_P_IP)) { 152 struct iphdr *ip = ip_hdr(skb); 153 154 if (ip->protocol == IPPROTO_TCP) 155 return IPPROTO_TCP; 156 157 if (ip->protocol == IPPROTO_UDP) 158 return IPPROTO_UDP; 159 } else if (skb->protocol == htons(ETH_P_IPV6)) { 160 struct ipv6hdr *ip6 = ipv6_hdr(skb); 161 162 if (ip6->nexthdr == IPPROTO_TCP) 163 return IPPROTO_TCP; 164 165 if (ip6->nexthdr == IPPROTO_UDP) 166 return IPPROTO_UDP; 167 } 168 169 /* No csum offloading */ 170 return 0; 171 } 172 173 static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash, 174 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey) 175 { 176 ash->dma_handle[sg_i] = da; 177 ash->size[sg_i] = sge_len; 178 179 tp->wqe_req.sgl[sg_i].address = da; 180 tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey; 181 tp->wqe_req.sgl[sg_i].size = sge_len; 182 } 183 184 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, 185 struct mana_tx_package *tp, int gso_hs) 186 { 187 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; 188 int hsg = 1; /* num of SGEs of linear part */ 189 struct gdma_dev *gd = apc->ac->gdma_dev; 190 int skb_hlen = skb_headlen(skb); 191 int sge0_len, sge1_len = 0; 192 struct gdma_context *gc; 193 struct device *dev; 194 skb_frag_t *frag; 195 dma_addr_t da; 196 int sg_i; 197 int i; 198 199 gc = gd->gdma_context; 200 dev = gc->dev; 201 202 if (gso_hs && gso_hs < skb_hlen) { 203 sge0_len = gso_hs; 204 sge1_len = skb_hlen - gso_hs; 205 } else { 206 sge0_len = skb_hlen; 207 } 208 209 da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE); 210 if (dma_mapping_error(dev, da)) 211 return -ENOMEM; 212 213 mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey); 214 215 if (sge1_len) { 216 sg_i = 1; 217 da = dma_map_single(dev, skb->data + sge0_len, sge1_len, 218 DMA_TO_DEVICE); 219 if (dma_mapping_error(dev, da)) 220 goto frag_err; 221 222 mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey); 223 hsg = 2; 224 } 225 226 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 227 sg_i = hsg + i; 228 229 frag = &skb_shinfo(skb)->frags[i]; 230 da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 231 DMA_TO_DEVICE); 232 if (dma_mapping_error(dev, da)) 233 goto frag_err; 234 235 mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag), 236 gd->gpa_mkey); 237 } 238 239 return 0; 240 241 frag_err: 242 if (net_ratelimit()) 243 netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n", 244 skb->len); 245 for (i = sg_i - 1; i >= hsg; i--) 246 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], 247 DMA_TO_DEVICE); 248 249 for (i = hsg - 1; i >= 0; i--) 250 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], 251 DMA_TO_DEVICE); 252 253 return -ENOMEM; 254 } 255 256 /* Handle the case when GSO SKB linear length is too large. 257 * MANA NIC requires GSO packets to put only the packet header to SGE0. 258 * So, we need 2 SGEs for the skb linear part which contains more than the 259 * header. 260 * Return a positive value for the number of SGEs, or a negative value 261 * for an error. 262 */ 263 static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb, 264 int gso_hs) 265 { 266 int num_sge = 1 + skb_shinfo(skb)->nr_frags; 267 int skb_hlen = skb_headlen(skb); 268 269 if (gso_hs < skb_hlen) { 270 num_sge++; 271 } else if (gso_hs > skb_hlen) { 272 if (net_ratelimit()) 273 netdev_err(ndev, 274 "TX nonlinear head: hs:%d, skb_hlen:%d\n", 275 gso_hs, skb_hlen); 276 277 return -EINVAL; 278 } 279 280 return num_sge; 281 } 282 283 /* Get the GSO packet's header size */ 284 static int mana_get_gso_hs(struct sk_buff *skb) 285 { 286 int gso_hs; 287 288 if (skb->encapsulation) { 289 gso_hs = skb_inner_tcp_all_headers(skb); 290 } else { 291 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 292 gso_hs = skb_transport_offset(skb) + 293 sizeof(struct udphdr); 294 } else { 295 gso_hs = skb_tcp_all_headers(skb); 296 } 297 } 298 299 return gso_hs; 300 } 301 302 static void mana_per_port_queue_reset_work_handler(struct work_struct *work) 303 { 304 struct mana_port_context *apc = container_of(work, 305 struct mana_port_context, 306 queue_reset_work); 307 struct net_device *ndev = apc->ndev; 308 int err; 309 310 rtnl_lock(); 311 312 /* Pre-allocate buffers to prevent failure in mana_attach later */ 313 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues); 314 if (err) { 315 netdev_err(ndev, "Insufficient memory for reset post tx stall detection\n"); 316 goto out; 317 } 318 319 err = mana_detach(ndev, false); 320 if (err) { 321 netdev_err(ndev, "mana_detach failed: %d\n", err); 322 goto dealloc_pre_rxbufs; 323 } 324 325 err = mana_attach(ndev); 326 if (err) 327 netdev_err(ndev, "mana_attach failed: %d\n", err); 328 329 dealloc_pre_rxbufs: 330 mana_pre_dealloc_rxbufs(apc); 331 out: 332 rtnl_unlock(); 333 } 334 335 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) 336 { 337 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; 338 struct mana_port_context *apc = netdev_priv(ndev); 339 int gso_hs = 0; /* zero for non-GSO pkts */ 340 u16 txq_idx = skb_get_queue_mapping(skb); 341 struct gdma_dev *gd = apc->ac->gdma_dev; 342 bool ipv4 = false, ipv6 = false; 343 struct mana_tx_package pkg = {}; 344 struct netdev_queue *net_txq; 345 struct mana_stats_tx *tx_stats; 346 struct gdma_queue *gdma_sq; 347 int err, len, num_gso_seg; 348 unsigned int csum_type; 349 struct mana_txq *txq; 350 struct mana_cq *cq; 351 352 if (unlikely(!apc->port_is_up)) 353 goto tx_drop; 354 355 if (skb_cow_head(skb, MANA_HEADROOM)) 356 goto tx_drop_count; 357 358 txq = &apc->tx_qp[txq_idx].txq; 359 gdma_sq = txq->gdma_sq; 360 cq = &apc->tx_qp[txq_idx].tx_cq; 361 tx_stats = &txq->stats; 362 363 BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES); 364 if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES && 365 skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) { 366 /* GSO skb with Hardware SGE limit exceeded is not expected here 367 * as they are handled in mana_features_check() callback 368 */ 369 if (skb_linearize(skb)) { 370 netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n", 371 skb_shinfo(skb)->nr_frags, 372 skb_is_gso(skb)); 373 goto tx_drop_count; 374 } 375 apc->eth_stats.tx_linear_pkt_cnt++; 376 } 377 378 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; 379 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; 380 381 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { 382 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; 383 pkt_fmt = MANA_LONG_PKT_FMT; 384 } else { 385 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; 386 } 387 388 if (skb_vlan_tag_present(skb)) { 389 pkt_fmt = MANA_LONG_PKT_FMT; 390 pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1; 391 pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb); 392 pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb); 393 pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb); 394 } 395 396 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt; 397 398 if (pkt_fmt == MANA_SHORT_PKT_FMT) { 399 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob); 400 u64_stats_update_begin(&tx_stats->syncp); 401 tx_stats->short_pkt_fmt++; 402 u64_stats_update_end(&tx_stats->syncp); 403 } else { 404 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob); 405 u64_stats_update_begin(&tx_stats->syncp); 406 tx_stats->long_pkt_fmt++; 407 u64_stats_update_end(&tx_stats->syncp); 408 } 409 410 pkg.wqe_req.inline_oob_data = &pkg.tx_oob; 411 pkg.wqe_req.flags = 0; 412 pkg.wqe_req.client_data_unit = 0; 413 414 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; 415 416 if (skb->protocol == htons(ETH_P_IP)) 417 ipv4 = true; 418 else if (skb->protocol == htons(ETH_P_IPV6)) 419 ipv6 = true; 420 421 if (skb_is_gso(skb)) { 422 int num_sge; 423 424 gso_hs = mana_get_gso_hs(skb); 425 426 num_sge = mana_fix_skb_head(ndev, skb, gso_hs); 427 if (num_sge > 0) 428 pkg.wqe_req.num_sge = num_sge; 429 else 430 goto tx_drop_count; 431 432 u64_stats_update_begin(&tx_stats->syncp); 433 if (skb->encapsulation) { 434 tx_stats->tso_inner_packets++; 435 tx_stats->tso_inner_bytes += skb->len - gso_hs; 436 } else { 437 tx_stats->tso_packets++; 438 tx_stats->tso_bytes += skb->len - gso_hs; 439 } 440 u64_stats_update_end(&tx_stats->syncp); 441 442 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; 443 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; 444 445 pkg.tx_oob.s_oob.comp_iphdr_csum = 1; 446 pkg.tx_oob.s_oob.comp_tcp_csum = 1; 447 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); 448 449 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; 450 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0; 451 if (ipv4) { 452 ip_hdr(skb)->tot_len = 0; 453 ip_hdr(skb)->check = 0; 454 tcp_hdr(skb)->check = 455 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 456 ip_hdr(skb)->daddr, 0, 457 IPPROTO_TCP, 0); 458 } else { 459 ipv6_hdr(skb)->payload_len = 0; 460 tcp_hdr(skb)->check = 461 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 462 &ipv6_hdr(skb)->daddr, 0, 463 IPPROTO_TCP, 0); 464 } 465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 466 csum_type = mana_checksum_info(skb); 467 468 u64_stats_update_begin(&tx_stats->syncp); 469 tx_stats->csum_partial++; 470 u64_stats_update_end(&tx_stats->syncp); 471 472 if (csum_type == IPPROTO_TCP) { 473 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; 474 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; 475 476 pkg.tx_oob.s_oob.comp_tcp_csum = 1; 477 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); 478 479 } else if (csum_type == IPPROTO_UDP) { 480 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; 481 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; 482 483 pkg.tx_oob.s_oob.comp_udp_csum = 1; 484 } else { 485 /* Can't do offload of this type of checksum */ 486 if (skb_checksum_help(skb)) 487 goto tx_drop_count; 488 } 489 } 490 491 if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { 492 pkg.wqe_req.sgl = pkg.sgl_array; 493 } else { 494 pkg.sgl_ptr = kmalloc_objs(struct gdma_sge, pkg.wqe_req.num_sge, 495 GFP_ATOMIC); 496 if (!pkg.sgl_ptr) 497 goto tx_drop_count; 498 499 pkg.wqe_req.sgl = pkg.sgl_ptr; 500 } 501 502 if (mana_map_skb(skb, apc, &pkg, gso_hs)) { 503 u64_stats_update_begin(&tx_stats->syncp); 504 tx_stats->mana_map_err++; 505 u64_stats_update_end(&tx_stats->syncp); 506 goto free_sgl_ptr; 507 } 508 509 skb_queue_tail(&txq->pending_skbs, skb); 510 511 len = skb->len; 512 num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 513 net_txq = netdev_get_tx_queue(ndev, txq_idx); 514 515 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req, 516 (struct gdma_posted_wqe_info *)skb->cb); 517 if (!mana_can_tx(gdma_sq)) { 518 netif_tx_stop_queue(net_txq); 519 apc->eth_stats.stop_queue++; 520 } 521 522 if (err) { 523 (void)skb_dequeue_tail(&txq->pending_skbs); 524 mana_unmap_skb(skb, apc); 525 netdev_warn(ndev, "Failed to post TX OOB: %d\n", err); 526 goto free_sgl_ptr; 527 } 528 529 err = NETDEV_TX_OK; 530 atomic_inc(&txq->pending_sends); 531 532 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); 533 534 /* skb may be freed after mana_gd_post_work_request. Do not use it. */ 535 skb = NULL; 536 537 /* Populated the packet and bytes counters based on post GSO packet 538 * calculations 539 */ 540 tx_stats = &txq->stats; 541 u64_stats_update_begin(&tx_stats->syncp); 542 tx_stats->packets += num_gso_seg; 543 tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs); 544 u64_stats_update_end(&tx_stats->syncp); 545 546 if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) { 547 netif_tx_wake_queue(net_txq); 548 apc->eth_stats.wake_queue++; 549 } 550 551 kfree(pkg.sgl_ptr); 552 return err; 553 554 free_sgl_ptr: 555 kfree(pkg.sgl_ptr); 556 tx_drop_count: 557 ndev->stats.tx_dropped++; 558 tx_drop: 559 dev_kfree_skb_any(skb); 560 return NETDEV_TX_OK; 561 } 562 563 #if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES) 564 static netdev_features_t mana_features_check(struct sk_buff *skb, 565 struct net_device *ndev, 566 netdev_features_t features) 567 { 568 if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) { 569 /* Exceeds HW SGE limit. 570 * GSO case: 571 * Disable GSO so the stack will software-segment the skb 572 * into smaller skbs that fit the SGE budget. 573 * Non-GSO case: 574 * The xmit path will attempt skb_linearize() as a fallback. 575 */ 576 features &= ~NETIF_F_GSO_MASK; 577 } 578 return features; 579 } 580 #endif 581 582 static void mana_get_stats64(struct net_device *ndev, 583 struct rtnl_link_stats64 *st) 584 { 585 struct mana_port_context *apc = netdev_priv(ndev); 586 unsigned int num_queues = apc->num_queues; 587 struct mana_stats_rx *rx_stats; 588 struct mana_stats_tx *tx_stats; 589 unsigned int start; 590 u64 packets, bytes; 591 int q; 592 593 if (!apc->port_is_up) 594 return; 595 596 netdev_stats_to_stats64(st, &ndev->stats); 597 598 if (apc->ac->hwc_timeout_occurred) 599 netdev_warn_once(ndev, "HWC timeout occurred\n"); 600 601 st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe; 602 603 for (q = 0; q < num_queues; q++) { 604 rx_stats = &apc->rxqs[q]->stats; 605 606 do { 607 start = u64_stats_fetch_begin(&rx_stats->syncp); 608 packets = rx_stats->packets; 609 bytes = rx_stats->bytes; 610 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 611 612 st->rx_packets += packets; 613 st->rx_bytes += bytes; 614 } 615 616 for (q = 0; q < num_queues; q++) { 617 tx_stats = &apc->tx_qp[q].txq.stats; 618 619 do { 620 start = u64_stats_fetch_begin(&tx_stats->syncp); 621 packets = tx_stats->packets; 622 bytes = tx_stats->bytes; 623 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 624 625 st->tx_packets += packets; 626 st->tx_bytes += bytes; 627 } 628 } 629 630 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, 631 int old_q) 632 { 633 struct mana_port_context *apc = netdev_priv(ndev); 634 u32 hash = skb_get_hash(skb); 635 struct sock *sk = skb->sk; 636 int txq; 637 638 txq = apc->indir_table[hash & (apc->indir_table_sz - 1)]; 639 640 if (txq != old_q && sk && sk_fullsock(sk) && 641 rcu_access_pointer(sk->sk_dst_cache)) 642 sk_tx_queue_set(sk, txq); 643 644 return txq; 645 } 646 647 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb, 648 struct net_device *sb_dev) 649 { 650 int txq; 651 652 if (ndev->real_num_tx_queues == 1) 653 return 0; 654 655 txq = sk_tx_queue_get(skb->sk); 656 657 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { 658 if (skb_rx_queue_recorded(skb)) 659 txq = skb_get_rx_queue(skb); 660 else 661 txq = mana_get_tx_queue(ndev, skb, txq); 662 } 663 664 return txq; 665 } 666 667 /* Release pre-allocated RX buffers */ 668 void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc) 669 { 670 struct device *dev; 671 int i; 672 673 dev = mpc->ac->gdma_dev->gdma_context->dev; 674 675 if (!mpc->rxbufs_pre) 676 goto out1; 677 678 if (!mpc->das_pre) 679 goto out2; 680 681 while (mpc->rxbpre_total) { 682 i = --mpc->rxbpre_total; 683 dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize, 684 DMA_FROM_DEVICE); 685 put_page(virt_to_head_page(mpc->rxbufs_pre[i])); 686 } 687 688 kfree(mpc->das_pre); 689 mpc->das_pre = NULL; 690 691 out2: 692 kfree(mpc->rxbufs_pre); 693 mpc->rxbufs_pre = NULL; 694 695 out1: 696 mpc->rxbpre_datasize = 0; 697 mpc->rxbpre_alloc_size = 0; 698 mpc->rxbpre_headroom = 0; 699 } 700 701 /* Get a buffer from the pre-allocated RX buffers */ 702 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) 703 { 704 struct net_device *ndev = rxq->ndev; 705 struct mana_port_context *mpc; 706 void *va; 707 708 mpc = netdev_priv(ndev); 709 710 if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) { 711 netdev_err(ndev, "No RX pre-allocated bufs\n"); 712 return NULL; 713 } 714 715 /* Check sizes to catch unexpected coding error */ 716 if (mpc->rxbpre_datasize != rxq->datasize) { 717 netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n", 718 mpc->rxbpre_datasize, rxq->datasize); 719 return NULL; 720 } 721 722 if (mpc->rxbpre_alloc_size != rxq->alloc_size) { 723 netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n", 724 mpc->rxbpre_alloc_size, rxq->alloc_size); 725 return NULL; 726 } 727 728 if (mpc->rxbpre_headroom != rxq->headroom) { 729 netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n", 730 mpc->rxbpre_headroom, rxq->headroom); 731 return NULL; 732 } 733 734 mpc->rxbpre_total--; 735 736 *da = mpc->das_pre[mpc->rxbpre_total]; 737 va = mpc->rxbufs_pre[mpc->rxbpre_total]; 738 mpc->rxbufs_pre[mpc->rxbpre_total] = NULL; 739 740 /* Deallocate the array after all buffers are gone */ 741 if (!mpc->rxbpre_total) 742 mana_pre_dealloc_rxbufs(mpc); 743 744 return va; 745 } 746 747 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */ 748 static void mana_get_rxbuf_cfg(struct mana_port_context *apc, 749 int mtu, u32 *datasize, u32 *alloc_size, 750 u32 *headroom, u32 *frag_count) 751 { 752 u32 len, buf_size; 753 754 /* Calculate datasize first (consistent across all cases) */ 755 *datasize = mtu + ETH_HLEN; 756 757 /* For xdp and jumbo frames make sure only one packet fits per page */ 758 if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) { 759 if (mana_xdp_get(apc)) { 760 *headroom = XDP_PACKET_HEADROOM; 761 *alloc_size = PAGE_SIZE; 762 } else { 763 *headroom = 0; /* no support for XDP */ 764 *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + 765 *headroom); 766 } 767 768 *frag_count = 1; 769 return; 770 } 771 772 /* Standard MTU case - optimize for multiple packets per page */ 773 *headroom = 0; 774 775 /* Calculate base buffer size needed */ 776 len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom); 777 buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT); 778 779 /* Calculate how many packets can fit in a page */ 780 *frag_count = PAGE_SIZE / buf_size; 781 *alloc_size = buf_size; 782 } 783 784 int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues) 785 { 786 struct device *dev; 787 struct page *page; 788 dma_addr_t da; 789 int num_rxb; 790 void *va; 791 int i; 792 793 mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize, 794 &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom, 795 &mpc->rxbpre_frag_count); 796 797 dev = mpc->ac->gdma_dev->gdma_context->dev; 798 799 num_rxb = num_queues * mpc->rx_queue_size; 800 801 WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n"); 802 mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL); 803 if (!mpc->rxbufs_pre) 804 goto error; 805 806 mpc->das_pre = kmalloc_objs(dma_addr_t, num_rxb); 807 if (!mpc->das_pre) 808 goto error; 809 810 mpc->rxbpre_total = 0; 811 812 for (i = 0; i < num_rxb; i++) { 813 page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size)); 814 if (!page) 815 goto error; 816 817 va = page_to_virt(page); 818 819 da = dma_map_single(dev, va + mpc->rxbpre_headroom, 820 mpc->rxbpre_datasize, DMA_FROM_DEVICE); 821 if (dma_mapping_error(dev, da)) { 822 put_page(page); 823 goto error; 824 } 825 826 mpc->rxbufs_pre[i] = va; 827 mpc->das_pre[i] = da; 828 mpc->rxbpre_total = i + 1; 829 } 830 831 return 0; 832 833 error: 834 netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues); 835 mana_pre_dealloc_rxbufs(mpc); 836 return -ENOMEM; 837 } 838 839 static int mana_change_mtu(struct net_device *ndev, int new_mtu) 840 { 841 struct mana_port_context *mpc = netdev_priv(ndev); 842 unsigned int old_mtu = ndev->mtu; 843 int err; 844 845 /* Pre-allocate buffers to prevent failure in mana_attach later */ 846 err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues); 847 if (err) { 848 netdev_err(ndev, "Insufficient memory for new MTU\n"); 849 return err; 850 } 851 852 err = mana_detach(ndev, false); 853 if (err) { 854 netdev_err(ndev, "mana_detach failed: %d\n", err); 855 goto out; 856 } 857 858 WRITE_ONCE(ndev->mtu, new_mtu); 859 860 err = mana_attach(ndev); 861 if (err) { 862 netdev_err(ndev, "mana_attach failed: %d\n", err); 863 WRITE_ONCE(ndev->mtu, old_mtu); 864 } 865 866 out: 867 mana_pre_dealloc_rxbufs(mpc); 868 return err; 869 } 870 871 static void mana_tx_timeout(struct net_device *netdev, unsigned int txqueue) 872 { 873 struct mana_port_context *apc = netdev_priv(netdev); 874 struct mana_context *ac = apc->ac; 875 struct gdma_context *gc = ac->gdma_dev->gdma_context; 876 877 /* Already in service, hence tx queue reset is not required.*/ 878 if (gc->in_service) 879 return; 880 881 /* Note: If there are pending queue reset work for this port(apc), 882 * subsequent request queued up from here are ignored. This is because 883 * we are using the same work instance per port(apc). 884 */ 885 queue_work(ac->per_port_queue_reset_wq, &apc->queue_reset_work); 886 } 887 888 static int mana_shaper_set(struct net_shaper_binding *binding, 889 const struct net_shaper *shaper, 890 struct netlink_ext_ack *extack) 891 { 892 struct mana_port_context *apc = netdev_priv(binding->netdev); 893 u32 old_speed, rate; 894 int err; 895 896 if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) { 897 NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev"); 898 return -EINVAL; 899 } 900 901 if (apc->handle.id && shaper->handle.id != apc->handle.id) { 902 NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers"); 903 return -EOPNOTSUPP; 904 } 905 906 if (!shaper->bw_max || (shaper->bw_max % 100000000)) { 907 NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth"); 908 return -EINVAL; 909 } 910 911 rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */ 912 rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */ 913 914 /* Get current speed */ 915 err = mana_query_link_cfg(apc); 916 old_speed = (err) ? SPEED_UNKNOWN : apc->speed; 917 918 if (!err) { 919 err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE); 920 apc->speed = (err) ? old_speed : rate; 921 apc->handle = (err) ? apc->handle : shaper->handle; 922 } 923 924 return err; 925 } 926 927 static int mana_shaper_del(struct net_shaper_binding *binding, 928 const struct net_shaper_handle *handle, 929 struct netlink_ext_ack *extack) 930 { 931 struct mana_port_context *apc = netdev_priv(binding->netdev); 932 int err; 933 934 err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE); 935 936 if (!err) { 937 /* Reset mana port context parameters */ 938 apc->handle.id = 0; 939 apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC; 940 apc->speed = apc->max_speed; 941 } 942 943 return err; 944 } 945 946 static void mana_shaper_cap(struct net_shaper_binding *binding, 947 enum net_shaper_scope scope, 948 unsigned long *flags) 949 { 950 *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) | 951 BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS); 952 } 953 954 static const struct net_shaper_ops mana_shaper_ops = { 955 .set = mana_shaper_set, 956 .delete = mana_shaper_del, 957 .capabilities = mana_shaper_cap, 958 }; 959 960 static const struct net_device_ops mana_devops = { 961 .ndo_open = mana_open, 962 .ndo_stop = mana_close, 963 .ndo_select_queue = mana_select_queue, 964 #if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES) 965 .ndo_features_check = mana_features_check, 966 #endif 967 .ndo_start_xmit = mana_start_xmit, 968 .ndo_validate_addr = eth_validate_addr, 969 .ndo_get_stats64 = mana_get_stats64, 970 .ndo_bpf = mana_bpf, 971 .ndo_xdp_xmit = mana_xdp_xmit, 972 .ndo_change_mtu = mana_change_mtu, 973 .ndo_tx_timeout = mana_tx_timeout, 974 .net_shaper_ops = &mana_shaper_ops, 975 }; 976 977 static void mana_cleanup_port_context(struct mana_port_context *apc) 978 { 979 /* 980 * make sure subsequent cleanup attempts don't end up removing already 981 * cleaned dentry pointer 982 */ 983 debugfs_remove(apc->mana_port_debugfs); 984 apc->mana_port_debugfs = NULL; 985 kfree(apc->rxqs); 986 apc->rxqs = NULL; 987 } 988 989 static void mana_cleanup_indir_table(struct mana_port_context *apc) 990 { 991 apc->indir_table_sz = 0; 992 kfree(apc->indir_table); 993 kfree(apc->rxobj_table); 994 } 995 996 static int mana_init_port_context(struct mana_port_context *apc) 997 { 998 apc->rxqs = kzalloc_objs(struct mana_rxq *, apc->num_queues); 999 1000 return !apc->rxqs ? -ENOMEM : 0; 1001 } 1002 1003 static int mana_send_request(struct mana_context *ac, void *in_buf, 1004 u32 in_len, void *out_buf, u32 out_len) 1005 { 1006 struct gdma_context *gc = ac->gdma_dev->gdma_context; 1007 struct gdma_resp_hdr *resp = out_buf; 1008 struct gdma_req_hdr *req = in_buf; 1009 struct device *dev = gc->dev; 1010 static atomic_t activity_id; 1011 int err; 1012 1013 req->dev_id = gc->mana.dev_id; 1014 req->activity_id = atomic_inc_return(&activity_id); 1015 1016 err = mana_gd_send_request(gc, in_len, in_buf, out_len, 1017 out_buf); 1018 if (err || resp->status) { 1019 if (err == -EOPNOTSUPP) 1020 return err; 1021 1022 if (req->req.msg_type != MANA_QUERY_PHY_STAT && 1023 mana_need_log(gc, err)) 1024 dev_err(dev, "Failed to send mana message: %d, 0x%x\n", 1025 err, resp->status); 1026 return err ? err : -EPROTO; 1027 } 1028 1029 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || 1030 req->activity_id != resp->activity_id) { 1031 dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n", 1032 req->dev_id.as_uint32, resp->dev_id.as_uint32, 1033 req->activity_id, resp->activity_id); 1034 return -EPROTO; 1035 } 1036 1037 return 0; 1038 } 1039 1040 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr, 1041 const enum mana_command_code expected_code, 1042 const u32 min_size) 1043 { 1044 if (resp_hdr->response.msg_type != expected_code) 1045 return -EPROTO; 1046 1047 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) 1048 return -EPROTO; 1049 1050 if (resp_hdr->response.msg_size < min_size) 1051 return -EPROTO; 1052 1053 return 0; 1054 } 1055 1056 static int mana_pf_register_hw_vport(struct mana_port_context *apc) 1057 { 1058 struct mana_register_hw_vport_resp resp = {}; 1059 struct mana_register_hw_vport_req req = {}; 1060 int err; 1061 1062 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT, 1063 sizeof(req), sizeof(resp)); 1064 req.attached_gfid = 1; 1065 req.is_pf_default_vport = 1; 1066 req.allow_all_ether_types = 1; 1067 1068 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1069 sizeof(resp)); 1070 if (err) { 1071 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); 1072 return err; 1073 } 1074 1075 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT, 1076 sizeof(resp)); 1077 if (err || resp.hdr.status) { 1078 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", 1079 err, resp.hdr.status); 1080 return err ? err : -EPROTO; 1081 } 1082 1083 apc->port_handle = resp.hw_vport_handle; 1084 return 0; 1085 } 1086 1087 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc) 1088 { 1089 struct mana_deregister_hw_vport_resp resp = {}; 1090 struct mana_deregister_hw_vport_req req = {}; 1091 int err; 1092 1093 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT, 1094 sizeof(req), sizeof(resp)); 1095 req.hw_vport_handle = apc->port_handle; 1096 1097 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1098 sizeof(resp)); 1099 if (err) { 1100 if (mana_en_need_log(apc, err)) 1101 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", 1102 err); 1103 1104 return; 1105 } 1106 1107 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT, 1108 sizeof(resp)); 1109 if (err || resp.hdr.status) 1110 netdev_err(apc->ndev, 1111 "Failed to deregister hw vPort: %d, 0x%x\n", 1112 err, resp.hdr.status); 1113 } 1114 1115 static int mana_pf_register_filter(struct mana_port_context *apc) 1116 { 1117 struct mana_register_filter_resp resp = {}; 1118 struct mana_register_filter_req req = {}; 1119 int err; 1120 1121 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER, 1122 sizeof(req), sizeof(resp)); 1123 req.vport = apc->port_handle; 1124 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); 1125 1126 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1127 sizeof(resp)); 1128 if (err) { 1129 netdev_err(apc->ndev, "Failed to register filter: %d\n", err); 1130 return err; 1131 } 1132 1133 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER, 1134 sizeof(resp)); 1135 if (err || resp.hdr.status) { 1136 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", 1137 err, resp.hdr.status); 1138 return err ? err : -EPROTO; 1139 } 1140 1141 apc->pf_filter_handle = resp.filter_handle; 1142 return 0; 1143 } 1144 1145 static void mana_pf_deregister_filter(struct mana_port_context *apc) 1146 { 1147 struct mana_deregister_filter_resp resp = {}; 1148 struct mana_deregister_filter_req req = {}; 1149 int err; 1150 1151 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER, 1152 sizeof(req), sizeof(resp)); 1153 req.filter_handle = apc->pf_filter_handle; 1154 1155 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1156 sizeof(resp)); 1157 if (err) { 1158 if (mana_en_need_log(apc, err)) 1159 netdev_err(apc->ndev, "Failed to unregister filter: %d\n", 1160 err); 1161 1162 return; 1163 } 1164 1165 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER, 1166 sizeof(resp)); 1167 if (err || resp.hdr.status) 1168 netdev_err(apc->ndev, 1169 "Failed to deregister filter: %d, 0x%x\n", 1170 err, resp.hdr.status); 1171 } 1172 1173 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, 1174 u32 proto_minor_ver, u32 proto_micro_ver, 1175 u16 *max_num_vports, u8 *bm_hostmode) 1176 { 1177 struct gdma_context *gc = ac->gdma_dev->gdma_context; 1178 struct mana_query_device_cfg_resp resp = {}; 1179 struct mana_query_device_cfg_req req = {}; 1180 struct device *dev = gc->dev; 1181 int err = 0; 1182 1183 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG, 1184 sizeof(req), sizeof(resp)); 1185 1186 req.hdr.resp.msg_version = GDMA_MESSAGE_V3; 1187 1188 req.proto_major_ver = proto_major_ver; 1189 req.proto_minor_ver = proto_minor_ver; 1190 req.proto_micro_ver = proto_micro_ver; 1191 1192 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); 1193 if (err) { 1194 dev_err(dev, "Failed to query config: %d", err); 1195 return err; 1196 } 1197 1198 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG, 1199 sizeof(resp)); 1200 if (err || resp.hdr.status) { 1201 dev_err(dev, "Invalid query result: %d, 0x%x\n", err, 1202 resp.hdr.status); 1203 if (!err) 1204 err = -EPROTO; 1205 return err; 1206 } 1207 1208 *max_num_vports = resp.max_num_vports; 1209 1210 if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2) 1211 gc->adapter_mtu = resp.adapter_mtu; 1212 else 1213 gc->adapter_mtu = ETH_FRAME_LEN; 1214 1215 if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3) 1216 *bm_hostmode = resp.bm_hostmode; 1217 else 1218 *bm_hostmode = 0; 1219 1220 debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu); 1221 1222 return 0; 1223 } 1224 1225 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index, 1226 u32 *max_sq, u32 *max_rq, u32 *num_indir_entry) 1227 { 1228 struct mana_query_vport_cfg_resp resp = {}; 1229 struct mana_query_vport_cfg_req req = {}; 1230 int err; 1231 1232 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG, 1233 sizeof(req), sizeof(resp)); 1234 1235 req.vport_index = vport_index; 1236 1237 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1238 sizeof(resp)); 1239 if (err) 1240 return err; 1241 1242 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG, 1243 sizeof(resp)); 1244 if (err) 1245 return err; 1246 1247 if (resp.hdr.status) 1248 return -EPROTO; 1249 1250 *max_sq = resp.max_num_sq; 1251 *max_rq = resp.max_num_rq; 1252 if (resp.num_indirection_ent > 0 && 1253 resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE && 1254 is_power_of_2(resp.num_indirection_ent)) { 1255 *num_indir_entry = resp.num_indirection_ent; 1256 } else { 1257 netdev_warn(apc->ndev, 1258 "Setting indirection table size to default %d for vPort %d\n", 1259 MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx); 1260 *num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE; 1261 } 1262 1263 apc->port_handle = resp.vport; 1264 ether_addr_copy(apc->mac_addr, resp.mac_addr); 1265 1266 return 0; 1267 } 1268 1269 void mana_uncfg_vport(struct mana_port_context *apc) 1270 { 1271 mutex_lock(&apc->vport_mutex); 1272 apc->vport_use_count--; 1273 WARN_ON(apc->vport_use_count < 0); 1274 mutex_unlock(&apc->vport_mutex); 1275 } 1276 EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA"); 1277 1278 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 1279 u32 doorbell_pg_id) 1280 { 1281 struct mana_config_vport_resp resp = {}; 1282 struct mana_config_vport_req req = {}; 1283 int err; 1284 1285 /* This function is used to program the Ethernet port in the hardware 1286 * table. It can be called from the Ethernet driver or the RDMA driver. 1287 * 1288 * For Ethernet usage, the hardware supports only one active user on a 1289 * physical port. The driver checks on the port usage before programming 1290 * the hardware when creating the RAW QP (RDMA driver) or exposing the 1291 * device to kernel NET layer (Ethernet driver). 1292 * 1293 * Because the RDMA driver doesn't know in advance which QP type the 1294 * user will create, it exposes the device with all its ports. The user 1295 * may not be able to create RAW QP on a port if this port is already 1296 * in used by the Ethernet driver from the kernel. 1297 * 1298 * This physical port limitation only applies to the RAW QP. For RC QP, 1299 * the hardware doesn't have this limitation. The user can create RC 1300 * QPs on a physical port up to the hardware limits independent of the 1301 * Ethernet usage on the same port. 1302 */ 1303 mutex_lock(&apc->vport_mutex); 1304 if (apc->vport_use_count > 0) { 1305 mutex_unlock(&apc->vport_mutex); 1306 return -EBUSY; 1307 } 1308 apc->vport_use_count++; 1309 mutex_unlock(&apc->vport_mutex); 1310 1311 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX, 1312 sizeof(req), sizeof(resp)); 1313 req.vport = apc->port_handle; 1314 req.pdid = protection_dom_id; 1315 req.doorbell_pageid = doorbell_pg_id; 1316 1317 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1318 sizeof(resp)); 1319 if (err) { 1320 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); 1321 goto out; 1322 } 1323 1324 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX, 1325 sizeof(resp)); 1326 if (err || resp.hdr.status) { 1327 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", 1328 err, resp.hdr.status); 1329 if (!err) 1330 err = -EPROTO; 1331 1332 goto out; 1333 } 1334 1335 apc->tx_shortform_allowed = resp.short_form_allowed; 1336 apc->tx_vp_offset = resp.tx_vport_offset; 1337 1338 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n", 1339 apc->port_handle, protection_dom_id, doorbell_pg_id); 1340 out: 1341 if (err) 1342 mana_uncfg_vport(apc); 1343 1344 return err; 1345 } 1346 EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA"); 1347 1348 static int mana_cfg_vport_steering(struct mana_port_context *apc, 1349 enum TRI_STATE rx, 1350 bool update_default_rxobj, bool update_key, 1351 bool update_tab) 1352 { 1353 struct mana_cfg_rx_steer_req_v2 *req; 1354 struct mana_cfg_rx_steer_resp resp = {}; 1355 struct net_device *ndev = apc->ndev; 1356 u32 req_buf_size; 1357 int err; 1358 1359 req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz); 1360 req = kzalloc(req_buf_size, GFP_KERNEL); 1361 if (!req) 1362 return -ENOMEM; 1363 1364 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, 1365 sizeof(resp)); 1366 1367 req->hdr.req.msg_version = GDMA_MESSAGE_V2; 1368 1369 req->vport = apc->port_handle; 1370 req->num_indir_entries = apc->indir_table_sz; 1371 req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2, 1372 indir_tab); 1373 req->rx_enable = rx; 1374 req->rss_enable = apc->rss_state; 1375 req->update_default_rxobj = update_default_rxobj; 1376 req->update_hashkey = update_key; 1377 req->update_indir_tab = update_tab; 1378 req->default_rxobj = apc->default_rxobj; 1379 req->cqe_coalescing_enable = 0; 1380 1381 if (update_key) 1382 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); 1383 1384 if (update_tab) 1385 memcpy(req->indir_tab, apc->rxobj_table, 1386 flex_array_size(req, indir_tab, req->num_indir_entries)); 1387 1388 err = mana_send_request(apc->ac, req, req_buf_size, &resp, 1389 sizeof(resp)); 1390 if (err) { 1391 if (mana_en_need_log(apc, err)) 1392 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); 1393 1394 goto out; 1395 } 1396 1397 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX, 1398 sizeof(resp)); 1399 if (err) { 1400 netdev_err(ndev, "vPort RX configuration failed: %d\n", err); 1401 goto out; 1402 } 1403 1404 if (resp.hdr.status) { 1405 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", 1406 resp.hdr.status); 1407 err = -EPROTO; 1408 } 1409 1410 netdev_info(ndev, "Configured steering vPort %llu entries %u\n", 1411 apc->port_handle, apc->indir_table_sz); 1412 out: 1413 kfree(req); 1414 return err; 1415 } 1416 1417 int mana_query_link_cfg(struct mana_port_context *apc) 1418 { 1419 struct net_device *ndev = apc->ndev; 1420 struct mana_query_link_config_resp resp = {}; 1421 struct mana_query_link_config_req req = {}; 1422 int err; 1423 1424 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG, 1425 sizeof(req), sizeof(resp)); 1426 1427 req.vport = apc->port_handle; 1428 req.hdr.resp.msg_version = GDMA_MESSAGE_V2; 1429 1430 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1431 sizeof(resp)); 1432 1433 if (err) { 1434 if (err == -EOPNOTSUPP) { 1435 netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n"); 1436 return err; 1437 } 1438 netdev_err(ndev, "Failed to query link config: %d\n", err); 1439 return err; 1440 } 1441 1442 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG, 1443 sizeof(resp)); 1444 1445 if (err || resp.hdr.status) { 1446 netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err, 1447 resp.hdr.status); 1448 if (!err) 1449 err = -EOPNOTSUPP; 1450 return err; 1451 } 1452 1453 if (resp.qos_unconfigured) { 1454 err = -EINVAL; 1455 return err; 1456 } 1457 apc->speed = resp.link_speed_mbps; 1458 apc->max_speed = resp.qos_speed_mbps; 1459 return 0; 1460 } 1461 1462 int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, 1463 int enable_clamping) 1464 { 1465 struct mana_set_bw_clamp_resp resp = {}; 1466 struct mana_set_bw_clamp_req req = {}; 1467 struct net_device *ndev = apc->ndev; 1468 int err; 1469 1470 mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP, 1471 sizeof(req), sizeof(resp)); 1472 req.vport = apc->port_handle; 1473 req.link_speed_mbps = speed; 1474 req.enable_clamping = enable_clamping; 1475 1476 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1477 sizeof(resp)); 1478 1479 if (err) { 1480 if (err == -EOPNOTSUPP) { 1481 netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n"); 1482 return err; 1483 } 1484 netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d", 1485 speed, err); 1486 return err; 1487 } 1488 1489 err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP, 1490 sizeof(resp)); 1491 1492 if (err || resp.hdr.status) { 1493 netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err, 1494 resp.hdr.status); 1495 if (!err) 1496 err = -EOPNOTSUPP; 1497 return err; 1498 } 1499 1500 if (resp.qos_unconfigured) 1501 netdev_info(ndev, "QoS is unconfigured\n"); 1502 1503 return 0; 1504 } 1505 1506 int mana_create_wq_obj(struct mana_port_context *apc, 1507 mana_handle_t vport, 1508 u32 wq_type, struct mana_obj_spec *wq_spec, 1509 struct mana_obj_spec *cq_spec, 1510 mana_handle_t *wq_obj) 1511 { 1512 struct mana_create_wqobj_resp resp = {}; 1513 struct mana_create_wqobj_req req = {}; 1514 struct net_device *ndev = apc->ndev; 1515 int err; 1516 1517 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ, 1518 sizeof(req), sizeof(resp)); 1519 req.vport = vport; 1520 req.wq_type = wq_type; 1521 req.wq_gdma_region = wq_spec->gdma_region; 1522 req.cq_gdma_region = cq_spec->gdma_region; 1523 req.wq_size = wq_spec->queue_size; 1524 req.cq_size = cq_spec->queue_size; 1525 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; 1526 req.cq_parent_qid = cq_spec->attached_eq; 1527 1528 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1529 sizeof(resp)); 1530 if (err) { 1531 netdev_err(ndev, "Failed to create WQ object: %d\n", err); 1532 goto out; 1533 } 1534 1535 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ, 1536 sizeof(resp)); 1537 if (err || resp.hdr.status) { 1538 netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err, 1539 resp.hdr.status); 1540 if (!err) 1541 err = -EPROTO; 1542 goto out; 1543 } 1544 1545 if (resp.wq_obj == INVALID_MANA_HANDLE) { 1546 netdev_err(ndev, "Got an invalid WQ object handle\n"); 1547 err = -EPROTO; 1548 goto out; 1549 } 1550 1551 *wq_obj = resp.wq_obj; 1552 wq_spec->queue_index = resp.wq_id; 1553 cq_spec->queue_index = resp.cq_id; 1554 1555 return 0; 1556 out: 1557 return err; 1558 } 1559 EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA"); 1560 1561 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 1562 mana_handle_t wq_obj) 1563 { 1564 struct mana_destroy_wqobj_resp resp = {}; 1565 struct mana_destroy_wqobj_req req = {}; 1566 struct net_device *ndev = apc->ndev; 1567 int err; 1568 1569 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ, 1570 sizeof(req), sizeof(resp)); 1571 req.wq_type = wq_type; 1572 req.wq_obj_handle = wq_obj; 1573 1574 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1575 sizeof(resp)); 1576 if (err) { 1577 if (mana_en_need_log(apc, err)) 1578 netdev_err(ndev, "Failed to destroy WQ object: %d\n", err); 1579 1580 return; 1581 } 1582 1583 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ, 1584 sizeof(resp)); 1585 if (err || resp.hdr.status) 1586 netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err, 1587 resp.hdr.status); 1588 } 1589 EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA"); 1590 1591 static void mana_destroy_eq(struct mana_context *ac) 1592 { 1593 struct gdma_context *gc = ac->gdma_dev->gdma_context; 1594 struct gdma_queue *eq; 1595 int i; 1596 1597 if (!ac->eqs) 1598 return; 1599 1600 debugfs_remove_recursive(ac->mana_eqs_debugfs); 1601 ac->mana_eqs_debugfs = NULL; 1602 1603 for (i = 0; i < gc->max_num_queues; i++) { 1604 eq = ac->eqs[i].eq; 1605 if (!eq) 1606 continue; 1607 1608 mana_gd_destroy_queue(gc, eq); 1609 } 1610 1611 kfree(ac->eqs); 1612 ac->eqs = NULL; 1613 } 1614 1615 static void mana_create_eq_debugfs(struct mana_context *ac, int i) 1616 { 1617 struct mana_eq eq = ac->eqs[i]; 1618 char eqnum[32]; 1619 1620 sprintf(eqnum, "eq%d", i); 1621 eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs); 1622 debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head); 1623 debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail); 1624 debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops); 1625 } 1626 1627 static int mana_create_eq(struct mana_context *ac) 1628 { 1629 struct gdma_dev *gd = ac->gdma_dev; 1630 struct gdma_context *gc = gd->gdma_context; 1631 struct gdma_queue_spec spec = {}; 1632 int err; 1633 int i; 1634 1635 ac->eqs = kzalloc_objs(struct mana_eq, gc->max_num_queues); 1636 if (!ac->eqs) 1637 return -ENOMEM; 1638 1639 spec.type = GDMA_EQ; 1640 spec.monitor_avl_buf = false; 1641 spec.queue_size = EQ_SIZE; 1642 spec.eq.callback = NULL; 1643 spec.eq.context = ac->eqs; 1644 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; 1645 1646 ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs); 1647 1648 for (i = 0; i < gc->max_num_queues; i++) { 1649 spec.eq.msix_index = (i + 1) % gc->num_msix_usable; 1650 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); 1651 if (err) { 1652 dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err); 1653 goto out; 1654 } 1655 mana_create_eq_debugfs(ac, i); 1656 } 1657 1658 return 0; 1659 out: 1660 mana_destroy_eq(ac); 1661 return err; 1662 } 1663 1664 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) 1665 { 1666 struct mana_fence_rq_resp resp = {}; 1667 struct mana_fence_rq_req req = {}; 1668 int err; 1669 1670 init_completion(&rxq->fence_event); 1671 1672 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ, 1673 sizeof(req), sizeof(resp)); 1674 req.wq_obj_handle = rxq->rxobj; 1675 1676 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1677 sizeof(resp)); 1678 if (err) { 1679 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n", 1680 rxq->rxq_idx, err); 1681 return err; 1682 } 1683 1684 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp)); 1685 if (err || resp.hdr.status) { 1686 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", 1687 rxq->rxq_idx, err, resp.hdr.status); 1688 if (!err) 1689 err = -EPROTO; 1690 1691 return err; 1692 } 1693 1694 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) { 1695 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n", 1696 rxq->rxq_idx); 1697 return -ETIMEDOUT; 1698 } 1699 1700 return 0; 1701 } 1702 1703 static void mana_fence_rqs(struct mana_port_context *apc) 1704 { 1705 unsigned int rxq_idx; 1706 struct mana_rxq *rxq; 1707 int err; 1708 1709 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { 1710 rxq = apc->rxqs[rxq_idx]; 1711 err = mana_fence_rq(apc, rxq); 1712 1713 /* In case of any error, use sleep instead. */ 1714 if (err) 1715 msleep(100); 1716 } 1717 } 1718 1719 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) 1720 { 1721 u32 used_space_old; 1722 u32 used_space_new; 1723 1724 used_space_old = wq->head - wq->tail; 1725 used_space_new = wq->head - (wq->tail + num_units); 1726 1727 if (WARN_ON_ONCE(used_space_new > used_space_old)) 1728 return -ERANGE; 1729 1730 wq->tail += num_units; 1731 return 0; 1732 } 1733 1734 void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) 1735 { 1736 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; 1737 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 1738 struct device *dev = gc->dev; 1739 int hsg, i; 1740 1741 /* Number of SGEs of linear part */ 1742 hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1; 1743 1744 for (i = 0; i < hsg; i++) 1745 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], 1746 DMA_TO_DEVICE); 1747 1748 for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++) 1749 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], 1750 DMA_TO_DEVICE); 1751 } 1752 1753 static void mana_poll_tx_cq(struct mana_cq *cq) 1754 { 1755 struct gdma_comp *completions = cq->gdma_comp_buf; 1756 struct gdma_posted_wqe_info *wqe_info; 1757 unsigned int pkt_transmitted = 0; 1758 unsigned int wqe_unit_cnt = 0; 1759 struct mana_txq *txq = cq->txq; 1760 struct mana_port_context *apc; 1761 struct netdev_queue *net_txq; 1762 struct gdma_queue *gdma_wq; 1763 unsigned int avail_space; 1764 struct net_device *ndev; 1765 struct sk_buff *skb; 1766 bool txq_stopped; 1767 int comp_read; 1768 int i; 1769 1770 ndev = txq->ndev; 1771 apc = netdev_priv(ndev); 1772 1773 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, 1774 CQE_POLLING_BUFFER); 1775 1776 if (comp_read < 1) 1777 return; 1778 1779 for (i = 0; i < comp_read; i++) { 1780 struct mana_tx_comp_oob *cqe_oob; 1781 1782 if (WARN_ON_ONCE(!completions[i].is_sq)) 1783 return; 1784 1785 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data; 1786 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != 1787 MANA_CQE_COMPLETION)) 1788 return; 1789 1790 switch (cqe_oob->cqe_hdr.cqe_type) { 1791 case CQE_TX_OKAY: 1792 break; 1793 1794 case CQE_TX_SA_DROP: 1795 case CQE_TX_MTU_DROP: 1796 case CQE_TX_INVALID_OOB: 1797 case CQE_TX_INVALID_ETH_TYPE: 1798 case CQE_TX_HDR_PROCESSING_ERROR: 1799 case CQE_TX_VF_DISABLED: 1800 case CQE_TX_VPORT_IDX_OUT_OF_RANGE: 1801 case CQE_TX_VPORT_DISABLED: 1802 case CQE_TX_VLAN_TAGGING_VIOLATION: 1803 if (net_ratelimit()) 1804 netdev_err(ndev, "TX: CQE error %d\n", 1805 cqe_oob->cqe_hdr.cqe_type); 1806 1807 apc->eth_stats.tx_cqe_err++; 1808 break; 1809 1810 default: 1811 /* If the CQE type is unknown, log an error, 1812 * and still free the SKB, update tail, etc. 1813 */ 1814 if (net_ratelimit()) 1815 netdev_err(ndev, "TX: unknown CQE type %d\n", 1816 cqe_oob->cqe_hdr.cqe_type); 1817 1818 apc->eth_stats.tx_cqe_unknown_type++; 1819 break; 1820 } 1821 1822 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) 1823 return; 1824 1825 skb = skb_dequeue(&txq->pending_skbs); 1826 if (WARN_ON_ONCE(!skb)) 1827 return; 1828 1829 wqe_info = (struct gdma_posted_wqe_info *)skb->cb; 1830 wqe_unit_cnt += wqe_info->wqe_size_in_bu; 1831 1832 mana_unmap_skb(skb, apc); 1833 1834 napi_consume_skb(skb, cq->budget); 1835 1836 pkt_transmitted++; 1837 } 1838 1839 if (WARN_ON_ONCE(wqe_unit_cnt == 0)) 1840 return; 1841 1842 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); 1843 1844 gdma_wq = txq->gdma_sq; 1845 avail_space = mana_gd_wq_avail_space(gdma_wq); 1846 1847 /* Ensure tail updated before checking q stop */ 1848 smp_mb(); 1849 1850 net_txq = txq->net_txq; 1851 txq_stopped = netif_tx_queue_stopped(net_txq); 1852 1853 /* Ensure checking txq_stopped before apc->port_is_up. */ 1854 smp_rmb(); 1855 1856 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { 1857 netif_tx_wake_queue(net_txq); 1858 apc->eth_stats.wake_queue++; 1859 } 1860 1861 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) 1862 WARN_ON_ONCE(1); 1863 1864 cq->work_done = pkt_transmitted; 1865 } 1866 1867 static void mana_post_pkt_rxq(struct mana_rxq *rxq) 1868 { 1869 struct mana_recv_buf_oob *recv_buf_oob; 1870 u32 curr_index; 1871 int err; 1872 1873 curr_index = rxq->buf_index++; 1874 if (rxq->buf_index == rxq->num_rx_buf) 1875 rxq->buf_index = 0; 1876 1877 recv_buf_oob = &rxq->rx_oobs[curr_index]; 1878 1879 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, 1880 &recv_buf_oob->wqe_inf); 1881 if (WARN_ON_ONCE(err)) 1882 return; 1883 1884 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); 1885 } 1886 1887 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, 1888 uint pkt_len, struct xdp_buff *xdp) 1889 { 1890 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size); 1891 1892 if (!skb) 1893 return NULL; 1894 1895 if (xdp->data_hard_start) { 1896 u32 metasize = xdp->data - xdp->data_meta; 1897 1898 skb_reserve(skb, xdp->data - xdp->data_hard_start); 1899 skb_put(skb, xdp->data_end - xdp->data); 1900 if (metasize) 1901 skb_metadata_set(skb, metasize); 1902 return skb; 1903 } 1904 1905 skb_reserve(skb, rxq->headroom); 1906 skb_put(skb, pkt_len); 1907 1908 return skb; 1909 } 1910 1911 static void mana_rx_skb(void *buf_va, bool from_pool, 1912 struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq) 1913 { 1914 struct mana_stats_rx *rx_stats = &rxq->stats; 1915 struct net_device *ndev = rxq->ndev; 1916 uint pkt_len = cqe->ppi[0].pkt_len; 1917 u16 rxq_idx = rxq->rxq_idx; 1918 struct napi_struct *napi; 1919 struct xdp_buff xdp = {}; 1920 struct sk_buff *skb; 1921 u32 hash_value; 1922 u32 act; 1923 1924 rxq->rx_cq.work_done++; 1925 napi = &rxq->rx_cq.napi; 1926 1927 if (!buf_va) { 1928 ++ndev->stats.rx_dropped; 1929 return; 1930 } 1931 1932 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len); 1933 1934 if (act == XDP_REDIRECT && !rxq->xdp_rc) 1935 return; 1936 1937 if (act != XDP_PASS && act != XDP_TX) 1938 goto drop_xdp; 1939 1940 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp); 1941 1942 if (!skb) 1943 goto drop; 1944 1945 if (from_pool) 1946 skb_mark_for_recycle(skb); 1947 1948 skb->dev = napi->dev; 1949 1950 skb->protocol = eth_type_trans(skb, ndev); 1951 skb_checksum_none_assert(skb); 1952 skb_record_rx_queue(skb, rxq_idx); 1953 1954 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { 1955 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) 1956 skb->ip_summed = CHECKSUM_UNNECESSARY; 1957 } 1958 1959 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { 1960 hash_value = cqe->ppi[0].pkt_hash; 1961 1962 if (cqe->rx_hashtype & MANA_HASH_L4) 1963 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4); 1964 else 1965 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3); 1966 } 1967 1968 if (cqe->rx_vlantag_present) { 1969 u16 vlan_tci = cqe->rx_vlan_id; 1970 1971 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 1972 } 1973 1974 u64_stats_update_begin(&rx_stats->syncp); 1975 rx_stats->packets++; 1976 rx_stats->bytes += pkt_len; 1977 1978 if (act == XDP_TX) 1979 rx_stats->xdp_tx++; 1980 u64_stats_update_end(&rx_stats->syncp); 1981 1982 if (act == XDP_TX) { 1983 skb_set_queue_mapping(skb, rxq_idx); 1984 mana_xdp_tx(skb, ndev); 1985 return; 1986 } 1987 1988 napi_gro_receive(napi, skb); 1989 1990 return; 1991 1992 drop_xdp: 1993 u64_stats_update_begin(&rx_stats->syncp); 1994 rx_stats->xdp_drop++; 1995 u64_stats_update_end(&rx_stats->syncp); 1996 1997 drop: 1998 if (from_pool) { 1999 if (rxq->frag_count == 1) 2000 page_pool_recycle_direct(rxq->page_pool, 2001 virt_to_head_page(buf_va)); 2002 else 2003 page_pool_free_va(rxq->page_pool, buf_va, true); 2004 } else { 2005 WARN_ON_ONCE(rxq->xdp_save_va); 2006 /* Save for reuse */ 2007 rxq->xdp_save_va = buf_va; 2008 } 2009 2010 ++ndev->stats.rx_dropped; 2011 2012 return; 2013 } 2014 2015 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, 2016 dma_addr_t *da, bool *from_pool) 2017 { 2018 struct page *page; 2019 u32 offset; 2020 void *va; 2021 *from_pool = false; 2022 2023 /* Don't use fragments for jumbo frames or XDP where it's 1 fragment 2024 * per page. 2025 */ 2026 if (rxq->frag_count == 1) { 2027 /* Reuse XDP dropped page if available */ 2028 if (rxq->xdp_save_va) { 2029 va = rxq->xdp_save_va; 2030 page = virt_to_head_page(va); 2031 rxq->xdp_save_va = NULL; 2032 } else { 2033 page = page_pool_dev_alloc_pages(rxq->page_pool); 2034 if (!page) 2035 return NULL; 2036 2037 *from_pool = true; 2038 va = page_to_virt(page); 2039 } 2040 2041 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, 2042 DMA_FROM_DEVICE); 2043 if (dma_mapping_error(dev, *da)) { 2044 mana_put_rx_page(rxq, page, *from_pool); 2045 return NULL; 2046 } 2047 2048 return va; 2049 } 2050 2051 page = page_pool_dev_alloc_frag(rxq->page_pool, &offset, 2052 rxq->alloc_size); 2053 if (!page) 2054 return NULL; 2055 2056 va = page_to_virt(page) + offset; 2057 *da = page_pool_get_dma_addr(page) + offset + rxq->headroom; 2058 *from_pool = true; 2059 2060 return va; 2061 } 2062 2063 /* Allocate frag for rx buffer, and save the old buf */ 2064 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, 2065 struct mana_recv_buf_oob *rxoob, void **old_buf, 2066 bool *old_fp) 2067 { 2068 bool from_pool; 2069 dma_addr_t da; 2070 void *va; 2071 2072 va = mana_get_rxfrag(rxq, dev, &da, &from_pool); 2073 if (!va) 2074 return; 2075 if (!rxoob->from_pool || rxq->frag_count == 1) 2076 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, 2077 DMA_FROM_DEVICE); 2078 *old_buf = rxoob->buf_va; 2079 *old_fp = rxoob->from_pool; 2080 2081 rxoob->buf_va = va; 2082 rxoob->sgl[0].address = da; 2083 rxoob->from_pool = from_pool; 2084 } 2085 2086 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, 2087 struct gdma_comp *cqe) 2088 { 2089 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; 2090 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; 2091 struct net_device *ndev = rxq->ndev; 2092 struct mana_recv_buf_oob *rxbuf_oob; 2093 struct mana_port_context *apc; 2094 struct device *dev = gc->dev; 2095 void *old_buf = NULL; 2096 u32 curr, pktlen; 2097 bool old_fp; 2098 2099 apc = netdev_priv(ndev); 2100 2101 switch (oob->cqe_hdr.cqe_type) { 2102 case CQE_RX_OKAY: 2103 break; 2104 2105 case CQE_RX_TRUNCATED: 2106 ++ndev->stats.rx_dropped; 2107 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; 2108 netdev_warn_once(ndev, "Dropped a truncated packet\n"); 2109 goto drop; 2110 2111 case CQE_RX_COALESCED_4: 2112 netdev_err(ndev, "RX coalescing is unsupported\n"); 2113 apc->eth_stats.rx_coalesced_err++; 2114 return; 2115 2116 case CQE_RX_OBJECT_FENCE: 2117 complete(&rxq->fence_event); 2118 return; 2119 2120 default: 2121 netdev_err(ndev, "Unknown RX CQE type = %d\n", 2122 oob->cqe_hdr.cqe_type); 2123 apc->eth_stats.rx_cqe_unknown_type++; 2124 return; 2125 } 2126 2127 pktlen = oob->ppi[0].pkt_len; 2128 2129 if (pktlen == 0) { 2130 /* data packets should never have packetlength of zero */ 2131 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n", 2132 rxq->gdma_id, cq->gdma_id, rxq->rxobj); 2133 return; 2134 } 2135 2136 curr = rxq->buf_index; 2137 rxbuf_oob = &rxq->rx_oobs[curr]; 2138 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); 2139 2140 mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp); 2141 2142 /* Unsuccessful refill will have old_buf == NULL. 2143 * In this case, mana_rx_skb() will drop the packet. 2144 */ 2145 mana_rx_skb(old_buf, old_fp, oob, rxq); 2146 2147 drop: 2148 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); 2149 2150 mana_post_pkt_rxq(rxq); 2151 } 2152 2153 static void mana_poll_rx_cq(struct mana_cq *cq) 2154 { 2155 struct gdma_comp *comp = cq->gdma_comp_buf; 2156 struct mana_rxq *rxq = cq->rxq; 2157 int comp_read, i; 2158 2159 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); 2160 WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); 2161 2162 rxq->xdp_flush = false; 2163 2164 for (i = 0; i < comp_read; i++) { 2165 if (WARN_ON_ONCE(comp[i].is_sq)) 2166 return; 2167 2168 /* verify recv cqe references the right rxq */ 2169 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) 2170 return; 2171 2172 mana_process_rx_cqe(rxq, cq, &comp[i]); 2173 } 2174 2175 if (comp_read > 0) { 2176 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; 2177 2178 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); 2179 } 2180 2181 if (rxq->xdp_flush) 2182 xdp_do_flush(); 2183 } 2184 2185 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) 2186 { 2187 struct mana_cq *cq = context; 2188 int w; 2189 2190 WARN_ON_ONCE(cq->gdma_cq != gdma_queue); 2191 2192 if (cq->type == MANA_CQ_TYPE_RX) 2193 mana_poll_rx_cq(cq); 2194 else 2195 mana_poll_tx_cq(cq); 2196 2197 w = cq->work_done; 2198 cq->work_done_since_doorbell += w; 2199 2200 if (w < cq->budget) { 2201 mana_gd_ring_cq(gdma_queue, SET_ARM_BIT); 2202 cq->work_done_since_doorbell = 0; 2203 napi_complete_done(&cq->napi, w); 2204 } else if (cq->work_done_since_doorbell > 2205 cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) { 2206 /* MANA hardware requires at least one doorbell ring every 8 2207 * wraparounds of CQ even if there is no need to arm the CQ. 2208 * This driver rings the doorbell as soon as we have exceeded 2209 * 4 wraparounds. 2210 */ 2211 mana_gd_ring_cq(gdma_queue, 0); 2212 cq->work_done_since_doorbell = 0; 2213 } 2214 2215 return w; 2216 } 2217 2218 static int mana_poll(struct napi_struct *napi, int budget) 2219 { 2220 struct mana_cq *cq = container_of(napi, struct mana_cq, napi); 2221 int w; 2222 2223 cq->work_done = 0; 2224 cq->budget = budget; 2225 2226 w = mana_cq_handler(cq, cq->gdma_cq); 2227 2228 return min(w, budget); 2229 } 2230 2231 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue) 2232 { 2233 struct mana_cq *cq = context; 2234 2235 napi_schedule_irqoff(&cq->napi); 2236 } 2237 2238 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) 2239 { 2240 struct gdma_dev *gd = apc->ac->gdma_dev; 2241 2242 if (!cq->gdma_cq) 2243 return; 2244 2245 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); 2246 } 2247 2248 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) 2249 { 2250 struct gdma_dev *gd = apc->ac->gdma_dev; 2251 2252 if (!txq->gdma_sq) 2253 return; 2254 2255 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); 2256 } 2257 2258 static void mana_destroy_txq(struct mana_port_context *apc) 2259 { 2260 struct napi_struct *napi; 2261 int i; 2262 2263 if (!apc->tx_qp) 2264 return; 2265 2266 for (i = 0; i < apc->num_queues; i++) { 2267 debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs); 2268 apc->tx_qp[i].mana_tx_debugfs = NULL; 2269 2270 napi = &apc->tx_qp[i].tx_cq.napi; 2271 if (apc->tx_qp[i].txq.napi_initialized) { 2272 napi_synchronize(napi); 2273 napi_disable_locked(napi); 2274 netif_napi_del_locked(napi); 2275 apc->tx_qp[i].txq.napi_initialized = false; 2276 } 2277 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); 2278 2279 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); 2280 2281 mana_deinit_txq(apc, &apc->tx_qp[i].txq); 2282 } 2283 2284 kfree(apc->tx_qp); 2285 apc->tx_qp = NULL; 2286 } 2287 2288 static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx) 2289 { 2290 struct mana_tx_qp *tx_qp = &apc->tx_qp[idx]; 2291 char qnum[32]; 2292 2293 sprintf(qnum, "TX-%d", idx); 2294 tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); 2295 debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs, 2296 &tx_qp->txq.gdma_sq->head); 2297 debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs, 2298 &tx_qp->txq.gdma_sq->tail); 2299 debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs, 2300 &tx_qp->txq.pending_skbs.qlen); 2301 debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs, 2302 &tx_qp->tx_cq.gdma_cq->head); 2303 debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs, 2304 &tx_qp->tx_cq.gdma_cq->tail); 2305 debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs, 2306 &tx_qp->tx_cq.budget); 2307 debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs, 2308 tx_qp->txq.gdma_sq, &mana_dbg_q_fops); 2309 debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs, 2310 tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops); 2311 } 2312 2313 static int mana_create_txq(struct mana_port_context *apc, 2314 struct net_device *net) 2315 { 2316 struct mana_context *ac = apc->ac; 2317 struct gdma_dev *gd = ac->gdma_dev; 2318 struct mana_obj_spec wq_spec; 2319 struct mana_obj_spec cq_spec; 2320 struct gdma_queue_spec spec; 2321 struct gdma_context *gc; 2322 struct mana_txq *txq; 2323 struct mana_cq *cq; 2324 u32 txq_size; 2325 u32 cq_size; 2326 int err; 2327 int i; 2328 2329 apc->tx_qp = kzalloc_objs(struct mana_tx_qp, apc->num_queues); 2330 if (!apc->tx_qp) 2331 return -ENOMEM; 2332 2333 /* The minimum size of the WQE is 32 bytes, hence 2334 * apc->tx_queue_size represents the maximum number of WQEs 2335 * the SQ can store. This value is then used to size other queues 2336 * to prevent overflow. 2337 * Also note that the txq_size is always going to be MANA_PAGE_ALIGNED, 2338 * as min val of apc->tx_queue_size is 128 and that would make 2339 * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size 2340 * are always power of two 2341 */ 2342 txq_size = apc->tx_queue_size * 32; 2343 2344 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE; 2345 2346 gc = gd->gdma_context; 2347 2348 for (i = 0; i < apc->num_queues; i++) { 2349 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; 2350 2351 /* Create SQ */ 2352 txq = &apc->tx_qp[i].txq; 2353 2354 u64_stats_init(&txq->stats.syncp); 2355 txq->ndev = net; 2356 txq->net_txq = netdev_get_tx_queue(net, i); 2357 txq->vp_offset = apc->tx_vp_offset; 2358 txq->napi_initialized = false; 2359 skb_queue_head_init(&txq->pending_skbs); 2360 2361 memset(&spec, 0, sizeof(spec)); 2362 spec.type = GDMA_SQ; 2363 spec.monitor_avl_buf = true; 2364 spec.queue_size = txq_size; 2365 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); 2366 if (err) 2367 goto out; 2368 2369 /* Create SQ's CQ */ 2370 cq = &apc->tx_qp[i].tx_cq; 2371 cq->type = MANA_CQ_TYPE_TX; 2372 2373 cq->txq = txq; 2374 2375 memset(&spec, 0, sizeof(spec)); 2376 spec.type = GDMA_CQ; 2377 spec.monitor_avl_buf = false; 2378 spec.queue_size = cq_size; 2379 spec.cq.callback = mana_schedule_napi; 2380 spec.cq.parent_eq = ac->eqs[i].eq; 2381 spec.cq.context = cq; 2382 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); 2383 if (err) 2384 goto out; 2385 2386 memset(&wq_spec, 0, sizeof(wq_spec)); 2387 memset(&cq_spec, 0, sizeof(cq_spec)); 2388 2389 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; 2390 wq_spec.queue_size = txq->gdma_sq->queue_size; 2391 2392 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 2393 cq_spec.queue_size = cq->gdma_cq->queue_size; 2394 cq_spec.modr_ctx_id = 0; 2395 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; 2396 2397 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, 2398 &wq_spec, &cq_spec, 2399 &apc->tx_qp[i].tx_object); 2400 2401 if (err) 2402 goto out; 2403 2404 txq->gdma_sq->id = wq_spec.queue_index; 2405 cq->gdma_cq->id = cq_spec.queue_index; 2406 2407 txq->gdma_sq->mem_info.dma_region_handle = 2408 GDMA_INVALID_DMA_REGION; 2409 cq->gdma_cq->mem_info.dma_region_handle = 2410 GDMA_INVALID_DMA_REGION; 2411 2412 txq->gdma_txq_id = txq->gdma_sq->id; 2413 2414 cq->gdma_id = cq->gdma_cq->id; 2415 2416 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { 2417 err = -EINVAL; 2418 goto out; 2419 } 2420 2421 gc->cq_table[cq->gdma_id] = cq->gdma_cq; 2422 2423 mana_create_txq_debugfs(apc, i); 2424 2425 set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state); 2426 netif_napi_add_locked(net, &cq->napi, mana_poll); 2427 napi_enable_locked(&cq->napi); 2428 txq->napi_initialized = true; 2429 2430 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); 2431 } 2432 2433 return 0; 2434 out: 2435 netdev_err(net, "Failed to create %d TX queues, %d\n", 2436 apc->num_queues, err); 2437 mana_destroy_txq(apc); 2438 return err; 2439 } 2440 2441 static void mana_destroy_rxq(struct mana_port_context *apc, 2442 struct mana_rxq *rxq, bool napi_initialized) 2443 2444 { 2445 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 2446 struct mana_recv_buf_oob *rx_oob; 2447 struct device *dev = gc->dev; 2448 struct napi_struct *napi; 2449 struct page *page; 2450 int i; 2451 2452 if (!rxq) 2453 return; 2454 2455 debugfs_remove_recursive(rxq->mana_rx_debugfs); 2456 rxq->mana_rx_debugfs = NULL; 2457 2458 napi = &rxq->rx_cq.napi; 2459 2460 if (napi_initialized) { 2461 napi_synchronize(napi); 2462 2463 napi_disable_locked(napi); 2464 netif_napi_del_locked(napi); 2465 } 2466 xdp_rxq_info_unreg(&rxq->xdp_rxq); 2467 2468 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); 2469 2470 mana_deinit_cq(apc, &rxq->rx_cq); 2471 2472 if (rxq->xdp_save_va) 2473 put_page(virt_to_head_page(rxq->xdp_save_va)); 2474 2475 for (i = 0; i < rxq->num_rx_buf; i++) { 2476 rx_oob = &rxq->rx_oobs[i]; 2477 2478 if (!rx_oob->buf_va) 2479 continue; 2480 2481 page = virt_to_head_page(rx_oob->buf_va); 2482 2483 if (rxq->frag_count == 1 || !rx_oob->from_pool) { 2484 dma_unmap_single(dev, rx_oob->sgl[0].address, 2485 rx_oob->sgl[0].size, DMA_FROM_DEVICE); 2486 mana_put_rx_page(rxq, page, rx_oob->from_pool); 2487 } else { 2488 page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true); 2489 } 2490 2491 rx_oob->buf_va = NULL; 2492 } 2493 2494 page_pool_destroy(rxq->page_pool); 2495 2496 if (rxq->gdma_rq) 2497 mana_gd_destroy_queue(gc, rxq->gdma_rq); 2498 2499 kfree(rxq); 2500 } 2501 2502 static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, 2503 struct mana_rxq *rxq, struct device *dev) 2504 { 2505 struct mana_port_context *mpc = netdev_priv(rxq->ndev); 2506 bool from_pool = false; 2507 dma_addr_t da; 2508 void *va; 2509 2510 if (mpc->rxbufs_pre) 2511 va = mana_get_rxbuf_pre(rxq, &da); 2512 else 2513 va = mana_get_rxfrag(rxq, dev, &da, &from_pool); 2514 2515 if (!va) 2516 return -ENOMEM; 2517 2518 rx_oob->buf_va = va; 2519 rx_oob->from_pool = from_pool; 2520 2521 rx_oob->sgl[0].address = da; 2522 rx_oob->sgl[0].size = rxq->datasize; 2523 rx_oob->sgl[0].mem_key = mem_key; 2524 2525 return 0; 2526 } 2527 2528 #define MANA_WQE_HEADER_SIZE 16 2529 #define MANA_WQE_SGE_SIZE 16 2530 2531 static int mana_alloc_rx_wqe(struct mana_port_context *apc, 2532 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) 2533 { 2534 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 2535 struct mana_recv_buf_oob *rx_oob; 2536 struct device *dev = gc->dev; 2537 u32 buf_idx; 2538 int ret; 2539 2540 WARN_ON(rxq->datasize == 0); 2541 2542 *rxq_size = 0; 2543 *cq_size = 0; 2544 2545 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { 2546 rx_oob = &rxq->rx_oobs[buf_idx]; 2547 memset(rx_oob, 0, sizeof(*rx_oob)); 2548 2549 rx_oob->num_sge = 1; 2550 2551 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, 2552 dev); 2553 if (ret) 2554 return ret; 2555 2556 rx_oob->wqe_req.sgl = rx_oob->sgl; 2557 rx_oob->wqe_req.num_sge = rx_oob->num_sge; 2558 rx_oob->wqe_req.inline_oob_size = 0; 2559 rx_oob->wqe_req.inline_oob_data = NULL; 2560 rx_oob->wqe_req.flags = 0; 2561 rx_oob->wqe_req.client_data_unit = 0; 2562 2563 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE + 2564 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); 2565 *cq_size += COMP_ENTRY_SIZE; 2566 } 2567 2568 return 0; 2569 } 2570 2571 static int mana_push_wqe(struct mana_rxq *rxq) 2572 { 2573 struct mana_recv_buf_oob *rx_oob; 2574 u32 buf_idx; 2575 int err; 2576 2577 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { 2578 rx_oob = &rxq->rx_oobs[buf_idx]; 2579 2580 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, 2581 &rx_oob->wqe_inf); 2582 if (err) 2583 return -ENOSPC; 2584 } 2585 2586 return 0; 2587 } 2588 2589 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) 2590 { 2591 struct mana_port_context *mpc = netdev_priv(rxq->ndev); 2592 struct page_pool_params pprm = {}; 2593 int ret; 2594 2595 pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1; 2596 pprm.nid = gc->numa_node; 2597 pprm.napi = &rxq->rx_cq.napi; 2598 pprm.netdev = rxq->ndev; 2599 pprm.order = get_order(rxq->alloc_size); 2600 pprm.queue_idx = rxq->rxq_idx; 2601 pprm.dev = gc->dev; 2602 2603 /* Let the page pool do the dma map when page sharing with multiple 2604 * fragments enabled for rx buffers. 2605 */ 2606 if (rxq->frag_count > 1) { 2607 pprm.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 2608 pprm.max_len = PAGE_SIZE; 2609 pprm.dma_dir = DMA_FROM_DEVICE; 2610 } 2611 2612 rxq->page_pool = page_pool_create(&pprm); 2613 2614 if (IS_ERR(rxq->page_pool)) { 2615 ret = PTR_ERR(rxq->page_pool); 2616 rxq->page_pool = NULL; 2617 return ret; 2618 } 2619 2620 return 0; 2621 } 2622 2623 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, 2624 u32 rxq_idx, struct mana_eq *eq, 2625 struct net_device *ndev) 2626 { 2627 struct gdma_dev *gd = apc->ac->gdma_dev; 2628 struct mana_obj_spec wq_spec; 2629 struct mana_obj_spec cq_spec; 2630 struct gdma_queue_spec spec; 2631 struct mana_cq *cq = NULL; 2632 struct gdma_context *gc; 2633 u32 cq_size, rq_size; 2634 struct mana_rxq *rxq; 2635 int err; 2636 2637 gc = gd->gdma_context; 2638 2639 rxq = kzalloc_flex(*rxq, rx_oobs, apc->rx_queue_size); 2640 if (!rxq) 2641 return NULL; 2642 2643 rxq->ndev = ndev; 2644 rxq->num_rx_buf = apc->rx_queue_size; 2645 rxq->rxq_idx = rxq_idx; 2646 rxq->rxobj = INVALID_MANA_HANDLE; 2647 2648 mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size, 2649 &rxq->headroom, &rxq->frag_count); 2650 /* Create page pool for RX queue */ 2651 err = mana_create_page_pool(rxq, gc); 2652 if (err) { 2653 netdev_err(ndev, "Create page pool err:%d\n", err); 2654 goto out; 2655 } 2656 2657 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); 2658 if (err) 2659 goto out; 2660 2661 rq_size = MANA_PAGE_ALIGN(rq_size); 2662 cq_size = MANA_PAGE_ALIGN(cq_size); 2663 2664 /* Create RQ */ 2665 memset(&spec, 0, sizeof(spec)); 2666 spec.type = GDMA_RQ; 2667 spec.monitor_avl_buf = true; 2668 spec.queue_size = rq_size; 2669 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); 2670 if (err) 2671 goto out; 2672 2673 /* Create RQ's CQ */ 2674 cq = &rxq->rx_cq; 2675 cq->type = MANA_CQ_TYPE_RX; 2676 cq->rxq = rxq; 2677 2678 memset(&spec, 0, sizeof(spec)); 2679 spec.type = GDMA_CQ; 2680 spec.monitor_avl_buf = false; 2681 spec.queue_size = cq_size; 2682 spec.cq.callback = mana_schedule_napi; 2683 spec.cq.parent_eq = eq->eq; 2684 spec.cq.context = cq; 2685 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); 2686 if (err) 2687 goto out; 2688 2689 memset(&wq_spec, 0, sizeof(wq_spec)); 2690 memset(&cq_spec, 0, sizeof(cq_spec)); 2691 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; 2692 wq_spec.queue_size = rxq->gdma_rq->queue_size; 2693 2694 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 2695 cq_spec.queue_size = cq->gdma_cq->queue_size; 2696 cq_spec.modr_ctx_id = 0; 2697 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; 2698 2699 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, 2700 &wq_spec, &cq_spec, &rxq->rxobj); 2701 if (err) 2702 goto out; 2703 2704 rxq->gdma_rq->id = wq_spec.queue_index; 2705 cq->gdma_cq->id = cq_spec.queue_index; 2706 2707 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 2708 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 2709 2710 rxq->gdma_id = rxq->gdma_rq->id; 2711 cq->gdma_id = cq->gdma_cq->id; 2712 2713 err = mana_push_wqe(rxq); 2714 if (err) 2715 goto out; 2716 2717 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { 2718 err = -EINVAL; 2719 goto out; 2720 } 2721 2722 gc->cq_table[cq->gdma_id] = cq->gdma_cq; 2723 2724 netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1); 2725 2726 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, 2727 cq->napi.napi_id)); 2728 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 2729 rxq->page_pool)); 2730 2731 napi_enable_locked(&cq->napi); 2732 2733 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); 2734 out: 2735 if (!err) 2736 return rxq; 2737 2738 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err); 2739 2740 mana_destroy_rxq(apc, rxq, false); 2741 2742 if (cq) 2743 mana_deinit_cq(apc, cq); 2744 2745 return NULL; 2746 } 2747 2748 static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx) 2749 { 2750 struct mana_rxq *rxq; 2751 char qnum[32]; 2752 2753 rxq = apc->rxqs[idx]; 2754 2755 sprintf(qnum, "RX-%d", idx); 2756 rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); 2757 debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head); 2758 debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail); 2759 debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf); 2760 debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs, 2761 &rxq->rx_cq.gdma_cq->head); 2762 debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs, 2763 &rxq->rx_cq.gdma_cq->tail); 2764 debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget); 2765 debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops); 2766 debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq, 2767 &mana_dbg_q_fops); 2768 } 2769 2770 static int mana_add_rx_queues(struct mana_port_context *apc, 2771 struct net_device *ndev) 2772 { 2773 struct mana_context *ac = apc->ac; 2774 struct mana_rxq *rxq; 2775 int err = 0; 2776 int i; 2777 2778 for (i = 0; i < apc->num_queues; i++) { 2779 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); 2780 if (!rxq) { 2781 err = -ENOMEM; 2782 netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err); 2783 goto out; 2784 } 2785 2786 u64_stats_init(&rxq->stats.syncp); 2787 2788 apc->rxqs[i] = rxq; 2789 2790 mana_create_rxq_debugfs(apc, i); 2791 } 2792 2793 apc->default_rxobj = apc->rxqs[0]->rxobj; 2794 out: 2795 return err; 2796 } 2797 2798 static void mana_destroy_vport(struct mana_port_context *apc) 2799 { 2800 struct gdma_dev *gd = apc->ac->gdma_dev; 2801 struct mana_rxq *rxq; 2802 u32 rxq_idx; 2803 2804 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { 2805 rxq = apc->rxqs[rxq_idx]; 2806 if (!rxq) 2807 continue; 2808 2809 mana_destroy_rxq(apc, rxq, true); 2810 apc->rxqs[rxq_idx] = NULL; 2811 } 2812 2813 mana_destroy_txq(apc); 2814 mana_uncfg_vport(apc); 2815 2816 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) 2817 mana_pf_deregister_hw_vport(apc); 2818 } 2819 2820 static int mana_create_vport(struct mana_port_context *apc, 2821 struct net_device *net) 2822 { 2823 struct gdma_dev *gd = apc->ac->gdma_dev; 2824 int err; 2825 2826 apc->default_rxobj = INVALID_MANA_HANDLE; 2827 2828 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) { 2829 err = mana_pf_register_hw_vport(apc); 2830 if (err) 2831 return err; 2832 } 2833 2834 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); 2835 if (err) 2836 return err; 2837 2838 return mana_create_txq(apc, net); 2839 } 2840 2841 static int mana_rss_table_alloc(struct mana_port_context *apc) 2842 { 2843 if (!apc->indir_table_sz) { 2844 netdev_err(apc->ndev, 2845 "Indirection table size not set for vPort %d\n", 2846 apc->port_idx); 2847 return -EINVAL; 2848 } 2849 2850 apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); 2851 if (!apc->indir_table) 2852 return -ENOMEM; 2853 2854 apc->rxobj_table = kzalloc_objs(mana_handle_t, apc->indir_table_sz); 2855 if (!apc->rxobj_table) { 2856 kfree(apc->indir_table); 2857 return -ENOMEM; 2858 } 2859 2860 return 0; 2861 } 2862 2863 static void mana_rss_table_init(struct mana_port_context *apc) 2864 { 2865 int i; 2866 2867 for (i = 0; i < apc->indir_table_sz; i++) 2868 apc->indir_table[i] = 2869 ethtool_rxfh_indir_default(i, apc->num_queues); 2870 } 2871 2872 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, 2873 bool update_hash, bool update_tab) 2874 { 2875 u32 queue_idx; 2876 int err; 2877 int i; 2878 2879 if (update_tab) { 2880 for (i = 0; i < apc->indir_table_sz; i++) { 2881 queue_idx = apc->indir_table[i]; 2882 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; 2883 } 2884 } 2885 2886 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); 2887 if (err) 2888 return err; 2889 2890 mana_fence_rqs(apc); 2891 2892 return 0; 2893 } 2894 2895 int mana_query_gf_stats(struct mana_context *ac) 2896 { 2897 struct gdma_context *gc = ac->gdma_dev->gdma_context; 2898 struct mana_query_gf_stat_resp resp = {}; 2899 struct mana_query_gf_stat_req req = {}; 2900 struct device *dev = gc->dev; 2901 int err; 2902 2903 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT, 2904 sizeof(req), sizeof(resp)); 2905 req.hdr.resp.msg_version = GDMA_MESSAGE_V2; 2906 req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE | 2907 STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED | 2908 STATISTICS_FLAGS_HC_RX_BYTES | 2909 STATISTICS_FLAGS_HC_RX_UCAST_PACKETS | 2910 STATISTICS_FLAGS_HC_RX_UCAST_BYTES | 2911 STATISTICS_FLAGS_HC_RX_MCAST_PACKETS | 2912 STATISTICS_FLAGS_HC_RX_MCAST_BYTES | 2913 STATISTICS_FLAGS_HC_RX_BCAST_PACKETS | 2914 STATISTICS_FLAGS_HC_RX_BCAST_BYTES | 2915 STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED | 2916 STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED | 2917 STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS | 2918 STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT | 2919 STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT | 2920 STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT | 2921 STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT | 2922 STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT | 2923 STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION | 2924 STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB | 2925 STATISTICS_FLAGS_HC_TX_BYTES | 2926 STATISTICS_FLAGS_HC_TX_UCAST_PACKETS | 2927 STATISTICS_FLAGS_HC_TX_UCAST_BYTES | 2928 STATISTICS_FLAGS_HC_TX_MCAST_PACKETS | 2929 STATISTICS_FLAGS_HC_TX_MCAST_BYTES | 2930 STATISTICS_FLAGS_HC_TX_BCAST_PACKETS | 2931 STATISTICS_FLAGS_HC_TX_BCAST_BYTES | 2932 STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR; 2933 2934 err = mana_send_request(ac, &req, sizeof(req), &resp, 2935 sizeof(resp)); 2936 if (err) { 2937 dev_err(dev, "Failed to query GF stats: %d\n", err); 2938 return err; 2939 } 2940 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT, 2941 sizeof(resp)); 2942 if (err || resp.hdr.status) { 2943 dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err, 2944 resp.hdr.status); 2945 return err; 2946 } 2947 2948 ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; 2949 ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; 2950 ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes; 2951 ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; 2952 ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; 2953 ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; 2954 ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; 2955 ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; 2956 ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; 2957 ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; 2958 ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; 2959 ac->hc_stats.hc_tx_err_inval_vportoffset_pkt = 2960 resp.tx_err_inval_vport_offset_pkt; 2961 ac->hc_stats.hc_tx_err_vlan_enforcement = 2962 resp.tx_err_vlan_enforcement; 2963 ac->hc_stats.hc_tx_err_eth_type_enforcement = 2964 resp.tx_err_ethtype_enforcement; 2965 ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; 2966 ac->hc_stats.hc_tx_err_sqpdid_enforcement = 2967 resp.tx_err_SQPDID_enforcement; 2968 ac->hc_stats.hc_tx_err_cqpdid_enforcement = 2969 resp.tx_err_CQPDID_enforcement; 2970 ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; 2971 ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; 2972 ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes; 2973 ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; 2974 ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; 2975 ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; 2976 ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; 2977 ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; 2978 ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; 2979 ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma; 2980 2981 return 0; 2982 } 2983 2984 void mana_query_phy_stats(struct mana_port_context *apc) 2985 { 2986 struct mana_query_phy_stat_resp resp = {}; 2987 struct mana_query_phy_stat_req req = {}; 2988 struct net_device *ndev = apc->ndev; 2989 int err; 2990 2991 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT, 2992 sizeof(req), sizeof(resp)); 2993 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 2994 sizeof(resp)); 2995 if (err) 2996 return; 2997 2998 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT, 2999 sizeof(resp)); 3000 if (err || resp.hdr.status) { 3001 netdev_err(ndev, 3002 "Failed to query PHY stats: %d, resp:0x%x\n", 3003 err, resp.hdr.status); 3004 return; 3005 } 3006 3007 /* Aggregate drop counters */ 3008 apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy; 3009 apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy; 3010 3011 /* Per TC traffic Counters */ 3012 apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy; 3013 apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy; 3014 apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy; 3015 apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy; 3016 apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy; 3017 apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy; 3018 apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy; 3019 apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy; 3020 apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy; 3021 apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy; 3022 apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy; 3023 apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy; 3024 apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy; 3025 apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy; 3026 apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy; 3027 apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy; 3028 3029 /* Per TC byte Counters */ 3030 apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy; 3031 apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy; 3032 apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy; 3033 apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy; 3034 apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy; 3035 apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy; 3036 apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy; 3037 apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy; 3038 apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy; 3039 apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy; 3040 apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy; 3041 apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy; 3042 apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy; 3043 apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy; 3044 apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy; 3045 apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy; 3046 3047 /* Per TC pause Counters */ 3048 apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy; 3049 apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy; 3050 apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy; 3051 apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy; 3052 apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy; 3053 apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy; 3054 apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy; 3055 apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy; 3056 apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy; 3057 apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy; 3058 apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy; 3059 apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy; 3060 apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy; 3061 apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy; 3062 apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy; 3063 apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy; 3064 } 3065 3066 static int mana_init_port(struct net_device *ndev) 3067 { 3068 struct mana_port_context *apc = netdev_priv(ndev); 3069 struct gdma_dev *gd = apc->ac->gdma_dev; 3070 u32 max_txq, max_rxq, max_queues; 3071 int port_idx = apc->port_idx; 3072 struct gdma_context *gc; 3073 char vport[32]; 3074 int err; 3075 3076 err = mana_init_port_context(apc); 3077 if (err) 3078 return err; 3079 3080 gc = gd->gdma_context; 3081 3082 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, 3083 &apc->indir_table_sz); 3084 if (err) { 3085 netdev_err(ndev, "Failed to query info for vPort %d\n", 3086 port_idx); 3087 goto reset_apc; 3088 } 3089 3090 max_queues = min_t(u32, max_txq, max_rxq); 3091 if (apc->max_queues > max_queues) 3092 apc->max_queues = max_queues; 3093 3094 if (apc->num_queues > apc->max_queues) 3095 apc->num_queues = apc->max_queues; 3096 3097 eth_hw_addr_set(ndev, apc->mac_addr); 3098 sprintf(vport, "vport%d", port_idx); 3099 apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs); 3100 return 0; 3101 3102 reset_apc: 3103 mana_cleanup_port_context(apc); 3104 return err; 3105 } 3106 3107 int mana_alloc_queues(struct net_device *ndev) 3108 { 3109 struct mana_port_context *apc = netdev_priv(ndev); 3110 struct gdma_dev *gd = apc->ac->gdma_dev; 3111 int err; 3112 3113 err = mana_create_vport(apc, ndev); 3114 if (err) { 3115 netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err); 3116 return err; 3117 } 3118 3119 err = netif_set_real_num_tx_queues(ndev, apc->num_queues); 3120 if (err) { 3121 netdev_err(ndev, 3122 "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n", 3123 apc->num_queues, err); 3124 goto destroy_vport; 3125 } 3126 3127 err = mana_add_rx_queues(apc, ndev); 3128 if (err) 3129 goto destroy_vport; 3130 3131 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; 3132 3133 err = netif_set_real_num_rx_queues(ndev, apc->num_queues); 3134 if (err) { 3135 netdev_err(ndev, 3136 "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n", 3137 apc->num_queues, err); 3138 goto destroy_vport; 3139 } 3140 3141 mana_rss_table_init(apc); 3142 3143 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); 3144 if (err) { 3145 netdev_err(ndev, "Failed to configure RSS table: %d\n", err); 3146 goto destroy_vport; 3147 } 3148 3149 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) { 3150 err = mana_pf_register_filter(apc); 3151 if (err) 3152 goto destroy_vport; 3153 } 3154 3155 mana_chn_setxdp(apc, mana_xdp_get(apc)); 3156 3157 return 0; 3158 3159 destroy_vport: 3160 mana_destroy_vport(apc); 3161 return err; 3162 } 3163 3164 int mana_attach(struct net_device *ndev) 3165 { 3166 struct mana_port_context *apc = netdev_priv(ndev); 3167 int err; 3168 3169 ASSERT_RTNL(); 3170 3171 err = mana_init_port(ndev); 3172 if (err) 3173 return err; 3174 3175 if (apc->port_st_save) { 3176 err = mana_alloc_queues(ndev); 3177 if (err) { 3178 mana_cleanup_port_context(apc); 3179 return err; 3180 } 3181 } 3182 3183 apc->port_is_up = apc->port_st_save; 3184 3185 /* Ensure port state updated before txq state */ 3186 smp_wmb(); 3187 3188 netif_device_attach(ndev); 3189 3190 return 0; 3191 } 3192 3193 static int mana_dealloc_queues(struct net_device *ndev) 3194 { 3195 struct mana_port_context *apc = netdev_priv(ndev); 3196 unsigned long timeout = jiffies + 120 * HZ; 3197 struct gdma_dev *gd = apc->ac->gdma_dev; 3198 struct mana_txq *txq; 3199 struct sk_buff *skb; 3200 int i, err; 3201 u32 tsleep; 3202 3203 if (apc->port_is_up) 3204 return -EINVAL; 3205 3206 mana_chn_setxdp(apc, NULL); 3207 3208 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) 3209 mana_pf_deregister_filter(apc); 3210 3211 /* No packet can be transmitted now since apc->port_is_up is false. 3212 * There is still a tiny chance that mana_poll_tx_cq() can re-enable 3213 * a txq because it may not timely see apc->port_is_up being cleared 3214 * to false, but it doesn't matter since mana_start_xmit() drops any 3215 * new packets due to apc->port_is_up being false. 3216 * 3217 * Drain all the in-flight TX packets. 3218 * A timeout of 120 seconds for all the queues is used. 3219 * This will break the while loop when h/w is not responding. 3220 * This value of 120 has been decided here considering max 3221 * number of queues. 3222 */ 3223 3224 for (i = 0; i < apc->num_queues; i++) { 3225 txq = &apc->tx_qp[i].txq; 3226 tsleep = 1000; 3227 while (atomic_read(&txq->pending_sends) > 0 && 3228 time_before(jiffies, timeout)) { 3229 usleep_range(tsleep, tsleep + 1000); 3230 tsleep <<= 1; 3231 } 3232 if (atomic_read(&txq->pending_sends)) { 3233 err = pcie_flr(to_pci_dev(gd->gdma_context->dev)); 3234 if (err) { 3235 netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n", 3236 err, atomic_read(&txq->pending_sends), 3237 txq->gdma_txq_id); 3238 } 3239 break; 3240 } 3241 } 3242 3243 for (i = 0; i < apc->num_queues; i++) { 3244 txq = &apc->tx_qp[i].txq; 3245 while ((skb = skb_dequeue(&txq->pending_skbs))) { 3246 mana_unmap_skb(skb, apc); 3247 dev_kfree_skb_any(skb); 3248 } 3249 atomic_set(&txq->pending_sends, 0); 3250 } 3251 /* We're 100% sure the queues can no longer be woken up, because 3252 * we're sure now mana_poll_tx_cq() can't be running. 3253 */ 3254 3255 apc->rss_state = TRI_STATE_FALSE; 3256 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false); 3257 if (err && mana_en_need_log(apc, err)) 3258 netdev_err(ndev, "Failed to disable vPort: %d\n", err); 3259 3260 /* Even in err case, still need to cleanup the vPort */ 3261 mana_destroy_vport(apc); 3262 3263 return 0; 3264 } 3265 3266 int mana_detach(struct net_device *ndev, bool from_close) 3267 { 3268 struct mana_port_context *apc = netdev_priv(ndev); 3269 int err; 3270 3271 ASSERT_RTNL(); 3272 3273 apc->port_st_save = apc->port_is_up; 3274 apc->port_is_up = false; 3275 3276 /* Ensure port state updated before txq state */ 3277 smp_wmb(); 3278 3279 netif_tx_disable(ndev); 3280 3281 if (apc->port_st_save) { 3282 err = mana_dealloc_queues(ndev); 3283 if (err) { 3284 netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err); 3285 return err; 3286 } 3287 } 3288 3289 if (!from_close) { 3290 netif_device_detach(ndev); 3291 mana_cleanup_port_context(apc); 3292 } 3293 3294 return 0; 3295 } 3296 3297 static int mana_probe_port(struct mana_context *ac, int port_idx, 3298 struct net_device **ndev_storage) 3299 { 3300 struct gdma_context *gc = ac->gdma_dev->gdma_context; 3301 struct mana_port_context *apc; 3302 struct net_device *ndev; 3303 int err; 3304 3305 ndev = alloc_etherdev_mq(sizeof(struct mana_port_context), 3306 gc->max_num_queues); 3307 if (!ndev) 3308 return -ENOMEM; 3309 3310 *ndev_storage = ndev; 3311 3312 apc = netdev_priv(ndev); 3313 apc->ac = ac; 3314 apc->ndev = ndev; 3315 apc->max_queues = gc->max_num_queues; 3316 apc->num_queues = gc->max_num_queues; 3317 apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE; 3318 apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE; 3319 apc->port_handle = INVALID_MANA_HANDLE; 3320 apc->pf_filter_handle = INVALID_MANA_HANDLE; 3321 apc->port_idx = port_idx; 3322 3323 mutex_init(&apc->vport_mutex); 3324 apc->vport_use_count = 0; 3325 3326 ndev->netdev_ops = &mana_devops; 3327 ndev->ethtool_ops = &mana_ethtool_ops; 3328 ndev->mtu = ETH_DATA_LEN; 3329 ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; 3330 ndev->min_mtu = ETH_MIN_MTU; 3331 ndev->needed_headroom = MANA_HEADROOM; 3332 ndev->dev_port = port_idx; 3333 /* Recommended timeout based on HW FPGA re-config scenario. */ 3334 ndev->watchdog_timeo = 15 * HZ; 3335 SET_NETDEV_DEV(ndev, gc->dev); 3336 3337 netif_set_tso_max_size(ndev, GSO_MAX_SIZE); 3338 3339 netif_carrier_off(ndev); 3340 3341 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); 3342 3343 err = mana_init_port(ndev); 3344 if (err) 3345 goto free_net; 3346 3347 err = mana_rss_table_alloc(apc); 3348 if (err) 3349 goto reset_apc; 3350 3351 /* Initialize the per port queue reset work.*/ 3352 INIT_WORK(&apc->queue_reset_work, 3353 mana_per_port_queue_reset_work_handler); 3354 3355 netdev_lockdep_set_classes(ndev); 3356 3357 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3358 ndev->hw_features |= NETIF_F_RXCSUM; 3359 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3360 ndev->hw_features |= NETIF_F_RXHASH; 3361 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | 3362 NETIF_F_HW_VLAN_CTAG_RX; 3363 ndev->vlan_features = ndev->features; 3364 xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC | 3365 NETDEV_XDP_ACT_REDIRECT | 3366 NETDEV_XDP_ACT_NDO_XMIT); 3367 3368 err = register_netdev(ndev); 3369 if (err) { 3370 netdev_err(ndev, "Unable to register netdev.\n"); 3371 goto free_indir; 3372 } 3373 3374 netif_carrier_on(ndev); 3375 3376 debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed); 3377 3378 return 0; 3379 3380 free_indir: 3381 mana_cleanup_indir_table(apc); 3382 reset_apc: 3383 mana_cleanup_port_context(apc); 3384 free_net: 3385 *ndev_storage = NULL; 3386 netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err); 3387 free_netdev(ndev); 3388 return err; 3389 } 3390 3391 static void adev_release(struct device *dev) 3392 { 3393 struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev); 3394 3395 kfree(madev); 3396 } 3397 3398 static void remove_adev(struct gdma_dev *gd) 3399 { 3400 struct auxiliary_device *adev = gd->adev; 3401 int id = adev->id; 3402 3403 auxiliary_device_delete(adev); 3404 auxiliary_device_uninit(adev); 3405 3406 mana_adev_idx_free(id); 3407 gd->adev = NULL; 3408 } 3409 3410 static int add_adev(struct gdma_dev *gd, const char *name) 3411 { 3412 struct auxiliary_device *adev; 3413 struct mana_adev *madev; 3414 int ret; 3415 3416 madev = kzalloc_obj(*madev); 3417 if (!madev) 3418 return -ENOMEM; 3419 3420 adev = &madev->adev; 3421 ret = mana_adev_idx_alloc(); 3422 if (ret < 0) 3423 goto idx_fail; 3424 adev->id = ret; 3425 3426 adev->name = name; 3427 adev->dev.parent = gd->gdma_context->dev; 3428 adev->dev.release = adev_release; 3429 madev->mdev = gd; 3430 3431 ret = auxiliary_device_init(adev); 3432 if (ret) 3433 goto init_fail; 3434 3435 /* madev is owned by the auxiliary device */ 3436 madev = NULL; 3437 ret = auxiliary_device_add(adev); 3438 if (ret) 3439 goto add_fail; 3440 3441 gd->adev = adev; 3442 dev_dbg(gd->gdma_context->dev, 3443 "Auxiliary device added successfully\n"); 3444 return 0; 3445 3446 add_fail: 3447 auxiliary_device_uninit(adev); 3448 3449 init_fail: 3450 mana_adev_idx_free(adev->id); 3451 3452 idx_fail: 3453 kfree(madev); 3454 3455 return ret; 3456 } 3457 3458 static void mana_rdma_service_handle(struct work_struct *work) 3459 { 3460 struct mana_service_work *serv_work = 3461 container_of(work, struct mana_service_work, work); 3462 struct gdma_dev *gd = serv_work->gdma_dev; 3463 struct device *dev = gd->gdma_context->dev; 3464 int ret; 3465 3466 if (READ_ONCE(gd->rdma_teardown)) 3467 goto out; 3468 3469 switch (serv_work->event) { 3470 case GDMA_SERVICE_TYPE_RDMA_SUSPEND: 3471 if (!gd->adev || gd->is_suspended) 3472 break; 3473 3474 remove_adev(gd); 3475 gd->is_suspended = true; 3476 break; 3477 3478 case GDMA_SERVICE_TYPE_RDMA_RESUME: 3479 if (!gd->is_suspended) 3480 break; 3481 3482 ret = add_adev(gd, "rdma"); 3483 if (ret) 3484 dev_err(dev, "Failed to add adev on resume: %d\n", ret); 3485 else 3486 gd->is_suspended = false; 3487 break; 3488 3489 default: 3490 dev_warn(dev, "unknown adev service event %u\n", 3491 serv_work->event); 3492 break; 3493 } 3494 3495 out: 3496 kfree(serv_work); 3497 } 3498 3499 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event) 3500 { 3501 struct gdma_dev *gd = &gc->mana_ib; 3502 struct mana_service_work *serv_work; 3503 3504 if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) { 3505 /* RDMA device is not detected on pci */ 3506 return 0; 3507 } 3508 3509 serv_work = kzalloc_obj(*serv_work, GFP_ATOMIC); 3510 if (!serv_work) 3511 return -ENOMEM; 3512 3513 serv_work->event = event; 3514 serv_work->gdma_dev = gd; 3515 3516 INIT_WORK(&serv_work->work, mana_rdma_service_handle); 3517 queue_work(gc->service_wq, &serv_work->work); 3518 3519 return 0; 3520 } 3521 3522 #define MANA_GF_STATS_PERIOD (2 * HZ) 3523 3524 static void mana_gf_stats_work_handler(struct work_struct *work) 3525 { 3526 struct mana_context *ac = 3527 container_of(to_delayed_work(work), struct mana_context, gf_stats_work); 3528 int err; 3529 3530 err = mana_query_gf_stats(ac); 3531 if (err == -ETIMEDOUT) { 3532 /* HWC timeout detected - reset stats and stop rescheduling */ 3533 ac->hwc_timeout_occurred = true; 3534 memset(&ac->hc_stats, 0, sizeof(ac->hc_stats)); 3535 return; 3536 } 3537 schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD); 3538 } 3539 3540 int mana_probe(struct gdma_dev *gd, bool resuming) 3541 { 3542 struct gdma_context *gc = gd->gdma_context; 3543 struct mana_context *ac = gd->driver_data; 3544 struct mana_port_context *apc = NULL; 3545 struct device *dev = gc->dev; 3546 u8 bm_hostmode = 0; 3547 u16 num_ports = 0; 3548 int err; 3549 int i; 3550 3551 dev_info(dev, 3552 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n", 3553 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION); 3554 3555 err = mana_gd_register_device(gd); 3556 if (err) 3557 return err; 3558 3559 if (!resuming) { 3560 ac = kzalloc_obj(*ac); 3561 if (!ac) 3562 return -ENOMEM; 3563 3564 ac->gdma_dev = gd; 3565 gd->driver_data = ac; 3566 } 3567 3568 err = mana_create_eq(ac); 3569 if (err) { 3570 dev_err(dev, "Failed to create EQs: %d\n", err); 3571 goto out; 3572 } 3573 3574 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, 3575 MANA_MICRO_VERSION, &num_ports, &bm_hostmode); 3576 if (err) 3577 goto out; 3578 3579 ac->bm_hostmode = bm_hostmode; 3580 3581 if (!resuming) { 3582 ac->num_ports = num_ports; 3583 3584 INIT_WORK(&ac->link_change_work, mana_link_state_handle); 3585 } else { 3586 if (ac->num_ports != num_ports) { 3587 dev_err(dev, "The number of vPorts changed: %d->%d\n", 3588 ac->num_ports, num_ports); 3589 err = -EPROTO; 3590 goto out; 3591 } 3592 3593 enable_work(&ac->link_change_work); 3594 } 3595 3596 if (ac->num_ports == 0) 3597 dev_err(dev, "Failed to detect any vPort\n"); 3598 3599 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) 3600 ac->num_ports = MAX_PORTS_IN_MANA_DEV; 3601 3602 ac->per_port_queue_reset_wq = 3603 create_singlethread_workqueue("mana_per_port_queue_reset_wq"); 3604 if (!ac->per_port_queue_reset_wq) { 3605 dev_err(dev, "Failed to allocate per port queue reset workqueue\n"); 3606 err = -ENOMEM; 3607 goto out; 3608 } 3609 3610 if (!resuming) { 3611 for (i = 0; i < ac->num_ports; i++) { 3612 err = mana_probe_port(ac, i, &ac->ports[i]); 3613 /* we log the port for which the probe failed and stop 3614 * probes for subsequent ports. 3615 * Note that we keep running ports, for which the probes 3616 * were successful, unless add_adev fails too 3617 */ 3618 if (err) { 3619 dev_err(dev, "Probe Failed for port %d\n", i); 3620 break; 3621 } 3622 } 3623 } else { 3624 for (i = 0; i < ac->num_ports; i++) { 3625 rtnl_lock(); 3626 apc = netdev_priv(ac->ports[i]); 3627 enable_work(&apc->queue_reset_work); 3628 err = mana_attach(ac->ports[i]); 3629 rtnl_unlock(); 3630 /* we log the port for which the attach failed and stop 3631 * attach for subsequent ports 3632 * Note that we keep running ports, for which the attach 3633 * were successful, unless add_adev fails too 3634 */ 3635 if (err) { 3636 dev_err(dev, "Attach Failed for port %d\n", i); 3637 break; 3638 } 3639 } 3640 } 3641 3642 err = add_adev(gd, "eth"); 3643 3644 INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler); 3645 schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD); 3646 3647 out: 3648 if (err) { 3649 mana_remove(gd, false); 3650 } else { 3651 dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n", 3652 gd, gd->dev_id.as_uint32, ac->num_ports, 3653 gd->dev_id.type, gd->dev_id.instance); 3654 dev_dbg(dev, "%s succeeded\n", __func__); 3655 } 3656 3657 return err; 3658 } 3659 3660 void mana_remove(struct gdma_dev *gd, bool suspending) 3661 { 3662 struct gdma_context *gc = gd->gdma_context; 3663 struct mana_context *ac = gd->driver_data; 3664 struct mana_port_context *apc; 3665 struct device *dev = gc->dev; 3666 struct net_device *ndev; 3667 int err; 3668 int i; 3669 3670 disable_work_sync(&ac->link_change_work); 3671 cancel_delayed_work_sync(&ac->gf_stats_work); 3672 3673 /* adev currently doesn't support suspending, always remove it */ 3674 if (gd->adev) 3675 remove_adev(gd); 3676 3677 for (i = 0; i < ac->num_ports; i++) { 3678 ndev = ac->ports[i]; 3679 if (!ndev) { 3680 if (i == 0) 3681 dev_err(dev, "No net device to remove\n"); 3682 goto out; 3683 } 3684 3685 apc = netdev_priv(ndev); 3686 disable_work_sync(&apc->queue_reset_work); 3687 3688 /* All cleanup actions should stay after rtnl_lock(), otherwise 3689 * other functions may access partially cleaned up data. 3690 */ 3691 rtnl_lock(); 3692 3693 err = mana_detach(ndev, false); 3694 if (err) 3695 netdev_err(ndev, "Failed to detach vPort %d: %d\n", 3696 i, err); 3697 3698 if (suspending) { 3699 /* No need to unregister the ndev. */ 3700 rtnl_unlock(); 3701 continue; 3702 } 3703 3704 unregister_netdevice(ndev); 3705 mana_cleanup_indir_table(apc); 3706 3707 rtnl_unlock(); 3708 3709 free_netdev(ndev); 3710 } 3711 3712 mana_destroy_eq(ac); 3713 out: 3714 if (ac->per_port_queue_reset_wq) { 3715 destroy_workqueue(ac->per_port_queue_reset_wq); 3716 ac->per_port_queue_reset_wq = NULL; 3717 } 3718 3719 mana_gd_deregister_device(gd); 3720 3721 if (suspending) 3722 return; 3723 3724 gd->driver_data = NULL; 3725 gd->gdma_context = NULL; 3726 kfree(ac); 3727 dev_dbg(dev, "%s succeeded\n", __func__); 3728 } 3729 3730 int mana_rdma_probe(struct gdma_dev *gd) 3731 { 3732 int err = 0; 3733 3734 if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) { 3735 /* RDMA device is not detected on pci */ 3736 return err; 3737 } 3738 3739 err = mana_gd_register_device(gd); 3740 if (err) 3741 return err; 3742 3743 err = add_adev(gd, "rdma"); 3744 if (err) 3745 mana_gd_deregister_device(gd); 3746 3747 return err; 3748 } 3749 3750 void mana_rdma_remove(struct gdma_dev *gd) 3751 { 3752 struct gdma_context *gc = gd->gdma_context; 3753 3754 if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) { 3755 /* RDMA device is not detected on pci */ 3756 return; 3757 } 3758 3759 WRITE_ONCE(gd->rdma_teardown, true); 3760 3761 if (gc->service_wq) 3762 flush_workqueue(gc->service_wq); 3763 3764 if (gd->adev) 3765 remove_adev(gd); 3766 3767 mana_gd_deregister_device(gd); 3768 } 3769 3770 struct net_device *mana_get_primary_netdev(struct mana_context *ac, 3771 u32 port_index, 3772 netdevice_tracker *tracker) 3773 { 3774 struct net_device *ndev; 3775 3776 if (port_index >= ac->num_ports) 3777 return NULL; 3778 3779 rcu_read_lock(); 3780 3781 /* If mana is used in netvsc, the upper netdevice should be returned. */ 3782 ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]); 3783 3784 /* If there is no upper device, use the parent Ethernet device */ 3785 if (!ndev) 3786 ndev = ac->ports[port_index]; 3787 3788 netdev_hold(ndev, tracker, GFP_ATOMIC); 3789 rcu_read_unlock(); 3790 3791 return ndev; 3792 } 3793 EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA"); 3794