1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #include <uapi/linux/bpf.h> 5 6 #include <linux/inetdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/ethtool.h> 9 #include <linux/filter.h> 10 #include <linux/mm.h> 11 12 #include <net/checksum.h> 13 #include <net/ip6_checksum.h> 14 #include <net/xdp.h> 15 16 #include <net/mana/mana.h> 17 #include <net/mana/mana_auxiliary.h> 18 19 static DEFINE_IDA(mana_adev_ida); 20 21 static int mana_adev_idx_alloc(void) 22 { 23 return ida_alloc(&mana_adev_ida, GFP_KERNEL); 24 } 25 26 static void mana_adev_idx_free(int idx) 27 { 28 ida_free(&mana_adev_ida, idx); 29 } 30 31 /* Microsoft Azure Network Adapter (MANA) functions */ 32 33 static int mana_open(struct net_device *ndev) 34 { 35 struct mana_port_context *apc = netdev_priv(ndev); 36 int err; 37 38 err = mana_alloc_queues(ndev); 39 if (err) 40 return err; 41 42 apc->port_is_up = true; 43 44 /* Ensure port state updated before txq state */ 45 smp_wmb(); 46 47 netif_carrier_on(ndev); 48 netif_tx_wake_all_queues(ndev); 49 50 return 0; 51 } 52 53 static int mana_close(struct net_device *ndev) 54 { 55 struct mana_port_context *apc = netdev_priv(ndev); 56 57 if (!apc->port_is_up) 58 return 0; 59 60 return mana_detach(ndev, true); 61 } 62 63 static bool mana_can_tx(struct gdma_queue *wq) 64 { 65 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE; 66 } 67 68 static unsigned int mana_checksum_info(struct sk_buff *skb) 69 { 70 if (skb->protocol == htons(ETH_P_IP)) { 71 struct iphdr *ip = ip_hdr(skb); 72 73 if (ip->protocol == IPPROTO_TCP) 74 return IPPROTO_TCP; 75 76 if (ip->protocol == IPPROTO_UDP) 77 return IPPROTO_UDP; 78 } else if (skb->protocol == htons(ETH_P_IPV6)) { 79 struct ipv6hdr *ip6 = ipv6_hdr(skb); 80 81 if (ip6->nexthdr == IPPROTO_TCP) 82 return IPPROTO_TCP; 83 84 if (ip6->nexthdr == IPPROTO_UDP) 85 return IPPROTO_UDP; 86 } 87 88 /* No csum offloading */ 89 return 0; 90 } 91 92 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, 93 struct mana_tx_package *tp) 94 { 95 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; 96 struct gdma_dev *gd = apc->ac->gdma_dev; 97 struct gdma_context *gc; 98 struct device *dev; 99 skb_frag_t *frag; 100 dma_addr_t da; 101 int i; 102 103 gc = gd->gdma_context; 104 dev = gc->dev; 105 da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 106 107 if (dma_mapping_error(dev, da)) 108 return -ENOMEM; 109 110 ash->dma_handle[0] = da; 111 ash->size[0] = skb_headlen(skb); 112 113 tp->wqe_req.sgl[0].address = ash->dma_handle[0]; 114 tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; 115 tp->wqe_req.sgl[0].size = ash->size[0]; 116 117 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 118 frag = &skb_shinfo(skb)->frags[i]; 119 da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 120 DMA_TO_DEVICE); 121 122 if (dma_mapping_error(dev, da)) 123 goto frag_err; 124 125 ash->dma_handle[i + 1] = da; 126 ash->size[i + 1] = skb_frag_size(frag); 127 128 tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; 129 tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; 130 tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; 131 } 132 133 return 0; 134 135 frag_err: 136 for (i = i - 1; i >= 0; i--) 137 dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], 138 DMA_TO_DEVICE); 139 140 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); 141 142 return -ENOMEM; 143 } 144 145 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) 146 { 147 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; 148 struct mana_port_context *apc = netdev_priv(ndev); 149 u16 txq_idx = skb_get_queue_mapping(skb); 150 struct gdma_dev *gd = apc->ac->gdma_dev; 151 bool ipv4 = false, ipv6 = false; 152 struct mana_tx_package pkg = {}; 153 struct netdev_queue *net_txq; 154 struct mana_stats_tx *tx_stats; 155 struct gdma_queue *gdma_sq; 156 unsigned int csum_type; 157 struct mana_txq *txq; 158 struct mana_cq *cq; 159 int err, len; 160 u16 ihs; 161 162 if (unlikely(!apc->port_is_up)) 163 goto tx_drop; 164 165 if (skb_cow_head(skb, MANA_HEADROOM)) 166 goto tx_drop_count; 167 168 txq = &apc->tx_qp[txq_idx].txq; 169 gdma_sq = txq->gdma_sq; 170 cq = &apc->tx_qp[txq_idx].tx_cq; 171 tx_stats = &txq->stats; 172 173 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; 174 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; 175 176 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { 177 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; 178 pkt_fmt = MANA_LONG_PKT_FMT; 179 } else { 180 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; 181 } 182 183 if (skb_vlan_tag_present(skb)) { 184 pkt_fmt = MANA_LONG_PKT_FMT; 185 pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1; 186 pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb); 187 pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb); 188 pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb); 189 } 190 191 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt; 192 193 if (pkt_fmt == MANA_SHORT_PKT_FMT) { 194 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob); 195 u64_stats_update_begin(&tx_stats->syncp); 196 tx_stats->short_pkt_fmt++; 197 u64_stats_update_end(&tx_stats->syncp); 198 } else { 199 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob); 200 u64_stats_update_begin(&tx_stats->syncp); 201 tx_stats->long_pkt_fmt++; 202 u64_stats_update_end(&tx_stats->syncp); 203 } 204 205 pkg.wqe_req.inline_oob_data = &pkg.tx_oob; 206 pkg.wqe_req.flags = 0; 207 pkg.wqe_req.client_data_unit = 0; 208 209 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; 210 WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); 211 212 if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { 213 pkg.wqe_req.sgl = pkg.sgl_array; 214 } else { 215 pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, 216 sizeof(struct gdma_sge), 217 GFP_ATOMIC); 218 if (!pkg.sgl_ptr) 219 goto tx_drop_count; 220 221 pkg.wqe_req.sgl = pkg.sgl_ptr; 222 } 223 224 if (skb->protocol == htons(ETH_P_IP)) 225 ipv4 = true; 226 else if (skb->protocol == htons(ETH_P_IPV6)) 227 ipv6 = true; 228 229 if (skb_is_gso(skb)) { 230 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; 231 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; 232 233 pkg.tx_oob.s_oob.comp_iphdr_csum = 1; 234 pkg.tx_oob.s_oob.comp_tcp_csum = 1; 235 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); 236 237 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; 238 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0; 239 if (ipv4) { 240 ip_hdr(skb)->tot_len = 0; 241 ip_hdr(skb)->check = 0; 242 tcp_hdr(skb)->check = 243 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 244 ip_hdr(skb)->daddr, 0, 245 IPPROTO_TCP, 0); 246 } else { 247 ipv6_hdr(skb)->payload_len = 0; 248 tcp_hdr(skb)->check = 249 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 250 &ipv6_hdr(skb)->daddr, 0, 251 IPPROTO_TCP, 0); 252 } 253 254 if (skb->encapsulation) { 255 ihs = skb_inner_tcp_all_headers(skb); 256 u64_stats_update_begin(&tx_stats->syncp); 257 tx_stats->tso_inner_packets++; 258 tx_stats->tso_inner_bytes += skb->len - ihs; 259 u64_stats_update_end(&tx_stats->syncp); 260 } else { 261 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 262 ihs = skb_transport_offset(skb) + sizeof(struct udphdr); 263 } else { 264 ihs = skb_tcp_all_headers(skb); 265 if (ipv6_has_hopopt_jumbo(skb)) 266 ihs -= sizeof(struct hop_jumbo_hdr); 267 } 268 269 u64_stats_update_begin(&tx_stats->syncp); 270 tx_stats->tso_packets++; 271 tx_stats->tso_bytes += skb->len - ihs; 272 u64_stats_update_end(&tx_stats->syncp); 273 } 274 275 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 276 csum_type = mana_checksum_info(skb); 277 278 u64_stats_update_begin(&tx_stats->syncp); 279 tx_stats->csum_partial++; 280 u64_stats_update_end(&tx_stats->syncp); 281 282 if (csum_type == IPPROTO_TCP) { 283 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; 284 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; 285 286 pkg.tx_oob.s_oob.comp_tcp_csum = 1; 287 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); 288 289 } else if (csum_type == IPPROTO_UDP) { 290 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; 291 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; 292 293 pkg.tx_oob.s_oob.comp_udp_csum = 1; 294 } else { 295 /* Can't do offload of this type of checksum */ 296 if (skb_checksum_help(skb)) 297 goto free_sgl_ptr; 298 } 299 } 300 301 if (mana_map_skb(skb, apc, &pkg)) { 302 u64_stats_update_begin(&tx_stats->syncp); 303 tx_stats->mana_map_err++; 304 u64_stats_update_end(&tx_stats->syncp); 305 goto free_sgl_ptr; 306 } 307 308 skb_queue_tail(&txq->pending_skbs, skb); 309 310 len = skb->len; 311 net_txq = netdev_get_tx_queue(ndev, txq_idx); 312 313 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req, 314 (struct gdma_posted_wqe_info *)skb->cb); 315 if (!mana_can_tx(gdma_sq)) { 316 netif_tx_stop_queue(net_txq); 317 apc->eth_stats.stop_queue++; 318 } 319 320 if (err) { 321 (void)skb_dequeue_tail(&txq->pending_skbs); 322 netdev_warn(ndev, "Failed to post TX OOB: %d\n", err); 323 err = NETDEV_TX_BUSY; 324 goto tx_busy; 325 } 326 327 err = NETDEV_TX_OK; 328 atomic_inc(&txq->pending_sends); 329 330 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); 331 332 /* skb may be freed after mana_gd_post_work_request. Do not use it. */ 333 skb = NULL; 334 335 tx_stats = &txq->stats; 336 u64_stats_update_begin(&tx_stats->syncp); 337 tx_stats->packets++; 338 tx_stats->bytes += len; 339 u64_stats_update_end(&tx_stats->syncp); 340 341 tx_busy: 342 if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) { 343 netif_tx_wake_queue(net_txq); 344 apc->eth_stats.wake_queue++; 345 } 346 347 kfree(pkg.sgl_ptr); 348 return err; 349 350 free_sgl_ptr: 351 kfree(pkg.sgl_ptr); 352 tx_drop_count: 353 ndev->stats.tx_dropped++; 354 tx_drop: 355 dev_kfree_skb_any(skb); 356 return NETDEV_TX_OK; 357 } 358 359 static void mana_get_stats64(struct net_device *ndev, 360 struct rtnl_link_stats64 *st) 361 { 362 struct mana_port_context *apc = netdev_priv(ndev); 363 unsigned int num_queues = apc->num_queues; 364 struct mana_stats_rx *rx_stats; 365 struct mana_stats_tx *tx_stats; 366 unsigned int start; 367 u64 packets, bytes; 368 int q; 369 370 if (!apc->port_is_up) 371 return; 372 373 netdev_stats_to_stats64(st, &ndev->stats); 374 375 for (q = 0; q < num_queues; q++) { 376 rx_stats = &apc->rxqs[q]->stats; 377 378 do { 379 start = u64_stats_fetch_begin(&rx_stats->syncp); 380 packets = rx_stats->packets; 381 bytes = rx_stats->bytes; 382 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 383 384 st->rx_packets += packets; 385 st->rx_bytes += bytes; 386 } 387 388 for (q = 0; q < num_queues; q++) { 389 tx_stats = &apc->tx_qp[q].txq.stats; 390 391 do { 392 start = u64_stats_fetch_begin(&tx_stats->syncp); 393 packets = tx_stats->packets; 394 bytes = tx_stats->bytes; 395 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 396 397 st->tx_packets += packets; 398 st->tx_bytes += bytes; 399 } 400 } 401 402 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, 403 int old_q) 404 { 405 struct mana_port_context *apc = netdev_priv(ndev); 406 u32 hash = skb_get_hash(skb); 407 struct sock *sk = skb->sk; 408 int txq; 409 410 txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK]; 411 412 if (txq != old_q && sk && sk_fullsock(sk) && 413 rcu_access_pointer(sk->sk_dst_cache)) 414 sk_tx_queue_set(sk, txq); 415 416 return txq; 417 } 418 419 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb, 420 struct net_device *sb_dev) 421 { 422 int txq; 423 424 if (ndev->real_num_tx_queues == 1) 425 return 0; 426 427 txq = sk_tx_queue_get(skb->sk); 428 429 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { 430 if (skb_rx_queue_recorded(skb)) 431 txq = skb_get_rx_queue(skb); 432 else 433 txq = mana_get_tx_queue(ndev, skb, txq); 434 } 435 436 return txq; 437 } 438 439 /* Release pre-allocated RX buffers */ 440 static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc) 441 { 442 struct device *dev; 443 int i; 444 445 dev = mpc->ac->gdma_dev->gdma_context->dev; 446 447 if (!mpc->rxbufs_pre) 448 goto out1; 449 450 if (!mpc->das_pre) 451 goto out2; 452 453 while (mpc->rxbpre_total) { 454 i = --mpc->rxbpre_total; 455 dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize, 456 DMA_FROM_DEVICE); 457 put_page(virt_to_head_page(mpc->rxbufs_pre[i])); 458 } 459 460 kfree(mpc->das_pre); 461 mpc->das_pre = NULL; 462 463 out2: 464 kfree(mpc->rxbufs_pre); 465 mpc->rxbufs_pre = NULL; 466 467 out1: 468 mpc->rxbpre_datasize = 0; 469 mpc->rxbpre_alloc_size = 0; 470 mpc->rxbpre_headroom = 0; 471 } 472 473 /* Get a buffer from the pre-allocated RX buffers */ 474 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) 475 { 476 struct net_device *ndev = rxq->ndev; 477 struct mana_port_context *mpc; 478 void *va; 479 480 mpc = netdev_priv(ndev); 481 482 if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) { 483 netdev_err(ndev, "No RX pre-allocated bufs\n"); 484 return NULL; 485 } 486 487 /* Check sizes to catch unexpected coding error */ 488 if (mpc->rxbpre_datasize != rxq->datasize) { 489 netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n", 490 mpc->rxbpre_datasize, rxq->datasize); 491 return NULL; 492 } 493 494 if (mpc->rxbpre_alloc_size != rxq->alloc_size) { 495 netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n", 496 mpc->rxbpre_alloc_size, rxq->alloc_size); 497 return NULL; 498 } 499 500 if (mpc->rxbpre_headroom != rxq->headroom) { 501 netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n", 502 mpc->rxbpre_headroom, rxq->headroom); 503 return NULL; 504 } 505 506 mpc->rxbpre_total--; 507 508 *da = mpc->das_pre[mpc->rxbpre_total]; 509 va = mpc->rxbufs_pre[mpc->rxbpre_total]; 510 mpc->rxbufs_pre[mpc->rxbpre_total] = NULL; 511 512 /* Deallocate the array after all buffers are gone */ 513 if (!mpc->rxbpre_total) 514 mana_pre_dealloc_rxbufs(mpc); 515 516 return va; 517 } 518 519 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */ 520 static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size, 521 u32 *headroom) 522 { 523 if (mtu > MANA_XDP_MTU_MAX) 524 *headroom = 0; /* no support for XDP */ 525 else 526 *headroom = XDP_PACKET_HEADROOM; 527 528 *alloc_size = mtu + MANA_RXBUF_PAD + *headroom; 529 530 *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN); 531 } 532 533 static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) 534 { 535 struct device *dev; 536 struct page *page; 537 dma_addr_t da; 538 int num_rxb; 539 void *va; 540 int i; 541 542 mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize, 543 &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom); 544 545 dev = mpc->ac->gdma_dev->gdma_context->dev; 546 547 num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE; 548 549 WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n"); 550 mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL); 551 if (!mpc->rxbufs_pre) 552 goto error; 553 554 mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL); 555 if (!mpc->das_pre) 556 goto error; 557 558 mpc->rxbpre_total = 0; 559 560 for (i = 0; i < num_rxb; i++) { 561 if (mpc->rxbpre_alloc_size > PAGE_SIZE) { 562 va = netdev_alloc_frag(mpc->rxbpre_alloc_size); 563 if (!va) 564 goto error; 565 566 page = virt_to_head_page(va); 567 /* Check if the frag falls back to single page */ 568 if (compound_order(page) < 569 get_order(mpc->rxbpre_alloc_size)) { 570 put_page(page); 571 goto error; 572 } 573 } else { 574 page = dev_alloc_page(); 575 if (!page) 576 goto error; 577 578 va = page_to_virt(page); 579 } 580 581 da = dma_map_single(dev, va + mpc->rxbpre_headroom, 582 mpc->rxbpre_datasize, DMA_FROM_DEVICE); 583 if (dma_mapping_error(dev, da)) { 584 put_page(virt_to_head_page(va)); 585 goto error; 586 } 587 588 mpc->rxbufs_pre[i] = va; 589 mpc->das_pre[i] = da; 590 mpc->rxbpre_total = i + 1; 591 } 592 593 return 0; 594 595 error: 596 mana_pre_dealloc_rxbufs(mpc); 597 return -ENOMEM; 598 } 599 600 static int mana_change_mtu(struct net_device *ndev, int new_mtu) 601 { 602 struct mana_port_context *mpc = netdev_priv(ndev); 603 unsigned int old_mtu = ndev->mtu; 604 int err; 605 606 /* Pre-allocate buffers to prevent failure in mana_attach later */ 607 err = mana_pre_alloc_rxbufs(mpc, new_mtu); 608 if (err) { 609 netdev_err(ndev, "Insufficient memory for new MTU\n"); 610 return err; 611 } 612 613 err = mana_detach(ndev, false); 614 if (err) { 615 netdev_err(ndev, "mana_detach failed: %d\n", err); 616 goto out; 617 } 618 619 ndev->mtu = new_mtu; 620 621 err = mana_attach(ndev); 622 if (err) { 623 netdev_err(ndev, "mana_attach failed: %d\n", err); 624 ndev->mtu = old_mtu; 625 } 626 627 out: 628 mana_pre_dealloc_rxbufs(mpc); 629 return err; 630 } 631 632 static const struct net_device_ops mana_devops = { 633 .ndo_open = mana_open, 634 .ndo_stop = mana_close, 635 .ndo_select_queue = mana_select_queue, 636 .ndo_start_xmit = mana_start_xmit, 637 .ndo_validate_addr = eth_validate_addr, 638 .ndo_get_stats64 = mana_get_stats64, 639 .ndo_bpf = mana_bpf, 640 .ndo_xdp_xmit = mana_xdp_xmit, 641 .ndo_change_mtu = mana_change_mtu, 642 }; 643 644 static void mana_cleanup_port_context(struct mana_port_context *apc) 645 { 646 kfree(apc->rxqs); 647 apc->rxqs = NULL; 648 } 649 650 static int mana_init_port_context(struct mana_port_context *apc) 651 { 652 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), 653 GFP_KERNEL); 654 655 return !apc->rxqs ? -ENOMEM : 0; 656 } 657 658 static int mana_send_request(struct mana_context *ac, void *in_buf, 659 u32 in_len, void *out_buf, u32 out_len) 660 { 661 struct gdma_context *gc = ac->gdma_dev->gdma_context; 662 struct gdma_resp_hdr *resp = out_buf; 663 struct gdma_req_hdr *req = in_buf; 664 struct device *dev = gc->dev; 665 static atomic_t activity_id; 666 int err; 667 668 req->dev_id = gc->mana.dev_id; 669 req->activity_id = atomic_inc_return(&activity_id); 670 671 err = mana_gd_send_request(gc, in_len, in_buf, out_len, 672 out_buf); 673 if (err || resp->status) { 674 dev_err(dev, "Failed to send mana message: %d, 0x%x\n", 675 err, resp->status); 676 return err ? err : -EPROTO; 677 } 678 679 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || 680 req->activity_id != resp->activity_id) { 681 dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n", 682 req->dev_id.as_uint32, resp->dev_id.as_uint32, 683 req->activity_id, resp->activity_id); 684 return -EPROTO; 685 } 686 687 return 0; 688 } 689 690 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr, 691 const enum mana_command_code expected_code, 692 const u32 min_size) 693 { 694 if (resp_hdr->response.msg_type != expected_code) 695 return -EPROTO; 696 697 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) 698 return -EPROTO; 699 700 if (resp_hdr->response.msg_size < min_size) 701 return -EPROTO; 702 703 return 0; 704 } 705 706 static int mana_pf_register_hw_vport(struct mana_port_context *apc) 707 { 708 struct mana_register_hw_vport_resp resp = {}; 709 struct mana_register_hw_vport_req req = {}; 710 int err; 711 712 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT, 713 sizeof(req), sizeof(resp)); 714 req.attached_gfid = 1; 715 req.is_pf_default_vport = 1; 716 req.allow_all_ether_types = 1; 717 718 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 719 sizeof(resp)); 720 if (err) { 721 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); 722 return err; 723 } 724 725 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT, 726 sizeof(resp)); 727 if (err || resp.hdr.status) { 728 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", 729 err, resp.hdr.status); 730 return err ? err : -EPROTO; 731 } 732 733 apc->port_handle = resp.hw_vport_handle; 734 return 0; 735 } 736 737 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc) 738 { 739 struct mana_deregister_hw_vport_resp resp = {}; 740 struct mana_deregister_hw_vport_req req = {}; 741 int err; 742 743 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT, 744 sizeof(req), sizeof(resp)); 745 req.hw_vport_handle = apc->port_handle; 746 747 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 748 sizeof(resp)); 749 if (err) { 750 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", 751 err); 752 return; 753 } 754 755 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT, 756 sizeof(resp)); 757 if (err || resp.hdr.status) 758 netdev_err(apc->ndev, 759 "Failed to deregister hw vPort: %d, 0x%x\n", 760 err, resp.hdr.status); 761 } 762 763 static int mana_pf_register_filter(struct mana_port_context *apc) 764 { 765 struct mana_register_filter_resp resp = {}; 766 struct mana_register_filter_req req = {}; 767 int err; 768 769 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER, 770 sizeof(req), sizeof(resp)); 771 req.vport = apc->port_handle; 772 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); 773 774 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 775 sizeof(resp)); 776 if (err) { 777 netdev_err(apc->ndev, "Failed to register filter: %d\n", err); 778 return err; 779 } 780 781 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER, 782 sizeof(resp)); 783 if (err || resp.hdr.status) { 784 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", 785 err, resp.hdr.status); 786 return err ? err : -EPROTO; 787 } 788 789 apc->pf_filter_handle = resp.filter_handle; 790 return 0; 791 } 792 793 static void mana_pf_deregister_filter(struct mana_port_context *apc) 794 { 795 struct mana_deregister_filter_resp resp = {}; 796 struct mana_deregister_filter_req req = {}; 797 int err; 798 799 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER, 800 sizeof(req), sizeof(resp)); 801 req.filter_handle = apc->pf_filter_handle; 802 803 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 804 sizeof(resp)); 805 if (err) { 806 netdev_err(apc->ndev, "Failed to unregister filter: %d\n", 807 err); 808 return; 809 } 810 811 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER, 812 sizeof(resp)); 813 if (err || resp.hdr.status) 814 netdev_err(apc->ndev, 815 "Failed to deregister filter: %d, 0x%x\n", 816 err, resp.hdr.status); 817 } 818 819 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, 820 u32 proto_minor_ver, u32 proto_micro_ver, 821 u16 *max_num_vports) 822 { 823 struct gdma_context *gc = ac->gdma_dev->gdma_context; 824 struct mana_query_device_cfg_resp resp = {}; 825 struct mana_query_device_cfg_req req = {}; 826 struct device *dev = gc->dev; 827 int err = 0; 828 829 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG, 830 sizeof(req), sizeof(resp)); 831 832 req.hdr.resp.msg_version = GDMA_MESSAGE_V2; 833 834 req.proto_major_ver = proto_major_ver; 835 req.proto_minor_ver = proto_minor_ver; 836 req.proto_micro_ver = proto_micro_ver; 837 838 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); 839 if (err) { 840 dev_err(dev, "Failed to query config: %d", err); 841 return err; 842 } 843 844 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG, 845 sizeof(resp)); 846 if (err || resp.hdr.status) { 847 dev_err(dev, "Invalid query result: %d, 0x%x\n", err, 848 resp.hdr.status); 849 if (!err) 850 err = -EPROTO; 851 return err; 852 } 853 854 *max_num_vports = resp.max_num_vports; 855 856 if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2) 857 gc->adapter_mtu = resp.adapter_mtu; 858 else 859 gc->adapter_mtu = ETH_FRAME_LEN; 860 861 return 0; 862 } 863 864 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index, 865 u32 *max_sq, u32 *max_rq, u32 *num_indir_entry) 866 { 867 struct mana_query_vport_cfg_resp resp = {}; 868 struct mana_query_vport_cfg_req req = {}; 869 int err; 870 871 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG, 872 sizeof(req), sizeof(resp)); 873 874 req.vport_index = vport_index; 875 876 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 877 sizeof(resp)); 878 if (err) 879 return err; 880 881 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG, 882 sizeof(resp)); 883 if (err) 884 return err; 885 886 if (resp.hdr.status) 887 return -EPROTO; 888 889 *max_sq = resp.max_num_sq; 890 *max_rq = resp.max_num_rq; 891 *num_indir_entry = resp.num_indirection_ent; 892 893 apc->port_handle = resp.vport; 894 ether_addr_copy(apc->mac_addr, resp.mac_addr); 895 896 return 0; 897 } 898 899 void mana_uncfg_vport(struct mana_port_context *apc) 900 { 901 mutex_lock(&apc->vport_mutex); 902 apc->vport_use_count--; 903 WARN_ON(apc->vport_use_count < 0); 904 mutex_unlock(&apc->vport_mutex); 905 } 906 EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA); 907 908 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 909 u32 doorbell_pg_id) 910 { 911 struct mana_config_vport_resp resp = {}; 912 struct mana_config_vport_req req = {}; 913 int err; 914 915 /* This function is used to program the Ethernet port in the hardware 916 * table. It can be called from the Ethernet driver or the RDMA driver. 917 * 918 * For Ethernet usage, the hardware supports only one active user on a 919 * physical port. The driver checks on the port usage before programming 920 * the hardware when creating the RAW QP (RDMA driver) or exposing the 921 * device to kernel NET layer (Ethernet driver). 922 * 923 * Because the RDMA driver doesn't know in advance which QP type the 924 * user will create, it exposes the device with all its ports. The user 925 * may not be able to create RAW QP on a port if this port is already 926 * in used by the Ethernet driver from the kernel. 927 * 928 * This physical port limitation only applies to the RAW QP. For RC QP, 929 * the hardware doesn't have this limitation. The user can create RC 930 * QPs on a physical port up to the hardware limits independent of the 931 * Ethernet usage on the same port. 932 */ 933 mutex_lock(&apc->vport_mutex); 934 if (apc->vport_use_count > 0) { 935 mutex_unlock(&apc->vport_mutex); 936 return -EBUSY; 937 } 938 apc->vport_use_count++; 939 mutex_unlock(&apc->vport_mutex); 940 941 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX, 942 sizeof(req), sizeof(resp)); 943 req.vport = apc->port_handle; 944 req.pdid = protection_dom_id; 945 req.doorbell_pageid = doorbell_pg_id; 946 947 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 948 sizeof(resp)); 949 if (err) { 950 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); 951 goto out; 952 } 953 954 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX, 955 sizeof(resp)); 956 if (err || resp.hdr.status) { 957 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", 958 err, resp.hdr.status); 959 if (!err) 960 err = -EPROTO; 961 962 goto out; 963 } 964 965 apc->tx_shortform_allowed = resp.short_form_allowed; 966 apc->tx_vp_offset = resp.tx_vport_offset; 967 968 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n", 969 apc->port_handle, protection_dom_id, doorbell_pg_id); 970 out: 971 if (err) 972 mana_uncfg_vport(apc); 973 974 return err; 975 } 976 EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA); 977 978 static int mana_cfg_vport_steering(struct mana_port_context *apc, 979 enum TRI_STATE rx, 980 bool update_default_rxobj, bool update_key, 981 bool update_tab) 982 { 983 u16 num_entries = MANA_INDIRECT_TABLE_SIZE; 984 struct mana_cfg_rx_steer_req_v2 *req; 985 struct mana_cfg_rx_steer_resp resp = {}; 986 struct net_device *ndev = apc->ndev; 987 mana_handle_t *req_indir_tab; 988 u32 req_buf_size; 989 int err; 990 991 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries; 992 req = kzalloc(req_buf_size, GFP_KERNEL); 993 if (!req) 994 return -ENOMEM; 995 996 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, 997 sizeof(resp)); 998 999 req->hdr.req.msg_version = GDMA_MESSAGE_V2; 1000 1001 req->vport = apc->port_handle; 1002 req->num_indir_entries = num_entries; 1003 req->indir_tab_offset = sizeof(*req); 1004 req->rx_enable = rx; 1005 req->rss_enable = apc->rss_state; 1006 req->update_default_rxobj = update_default_rxobj; 1007 req->update_hashkey = update_key; 1008 req->update_indir_tab = update_tab; 1009 req->default_rxobj = apc->default_rxobj; 1010 req->cqe_coalescing_enable = 0; 1011 1012 if (update_key) 1013 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); 1014 1015 if (update_tab) { 1016 req_indir_tab = (mana_handle_t *)(req + 1); 1017 memcpy(req_indir_tab, apc->rxobj_table, 1018 req->num_indir_entries * sizeof(mana_handle_t)); 1019 } 1020 1021 err = mana_send_request(apc->ac, req, req_buf_size, &resp, 1022 sizeof(resp)); 1023 if (err) { 1024 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); 1025 goto out; 1026 } 1027 1028 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX, 1029 sizeof(resp)); 1030 if (err) { 1031 netdev_err(ndev, "vPort RX configuration failed: %d\n", err); 1032 goto out; 1033 } 1034 1035 if (resp.hdr.status) { 1036 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", 1037 resp.hdr.status); 1038 err = -EPROTO; 1039 } 1040 1041 netdev_info(ndev, "Configured steering vPort %llu entries %u\n", 1042 apc->port_handle, num_entries); 1043 out: 1044 kfree(req); 1045 return err; 1046 } 1047 1048 int mana_create_wq_obj(struct mana_port_context *apc, 1049 mana_handle_t vport, 1050 u32 wq_type, struct mana_obj_spec *wq_spec, 1051 struct mana_obj_spec *cq_spec, 1052 mana_handle_t *wq_obj) 1053 { 1054 struct mana_create_wqobj_resp resp = {}; 1055 struct mana_create_wqobj_req req = {}; 1056 struct net_device *ndev = apc->ndev; 1057 int err; 1058 1059 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ, 1060 sizeof(req), sizeof(resp)); 1061 req.vport = vport; 1062 req.wq_type = wq_type; 1063 req.wq_gdma_region = wq_spec->gdma_region; 1064 req.cq_gdma_region = cq_spec->gdma_region; 1065 req.wq_size = wq_spec->queue_size; 1066 req.cq_size = cq_spec->queue_size; 1067 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; 1068 req.cq_parent_qid = cq_spec->attached_eq; 1069 1070 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1071 sizeof(resp)); 1072 if (err) { 1073 netdev_err(ndev, "Failed to create WQ object: %d\n", err); 1074 goto out; 1075 } 1076 1077 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ, 1078 sizeof(resp)); 1079 if (err || resp.hdr.status) { 1080 netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err, 1081 resp.hdr.status); 1082 if (!err) 1083 err = -EPROTO; 1084 goto out; 1085 } 1086 1087 if (resp.wq_obj == INVALID_MANA_HANDLE) { 1088 netdev_err(ndev, "Got an invalid WQ object handle\n"); 1089 err = -EPROTO; 1090 goto out; 1091 } 1092 1093 *wq_obj = resp.wq_obj; 1094 wq_spec->queue_index = resp.wq_id; 1095 cq_spec->queue_index = resp.cq_id; 1096 1097 return 0; 1098 out: 1099 return err; 1100 } 1101 EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA); 1102 1103 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 1104 mana_handle_t wq_obj) 1105 { 1106 struct mana_destroy_wqobj_resp resp = {}; 1107 struct mana_destroy_wqobj_req req = {}; 1108 struct net_device *ndev = apc->ndev; 1109 int err; 1110 1111 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ, 1112 sizeof(req), sizeof(resp)); 1113 req.wq_type = wq_type; 1114 req.wq_obj_handle = wq_obj; 1115 1116 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1117 sizeof(resp)); 1118 if (err) { 1119 netdev_err(ndev, "Failed to destroy WQ object: %d\n", err); 1120 return; 1121 } 1122 1123 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ, 1124 sizeof(resp)); 1125 if (err || resp.hdr.status) 1126 netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err, 1127 resp.hdr.status); 1128 } 1129 EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA); 1130 1131 static void mana_destroy_eq(struct mana_context *ac) 1132 { 1133 struct gdma_context *gc = ac->gdma_dev->gdma_context; 1134 struct gdma_queue *eq; 1135 int i; 1136 1137 if (!ac->eqs) 1138 return; 1139 1140 for (i = 0; i < gc->max_num_queues; i++) { 1141 eq = ac->eqs[i].eq; 1142 if (!eq) 1143 continue; 1144 1145 mana_gd_destroy_queue(gc, eq); 1146 } 1147 1148 kfree(ac->eqs); 1149 ac->eqs = NULL; 1150 } 1151 1152 static int mana_create_eq(struct mana_context *ac) 1153 { 1154 struct gdma_dev *gd = ac->gdma_dev; 1155 struct gdma_context *gc = gd->gdma_context; 1156 struct gdma_queue_spec spec = {}; 1157 int err; 1158 int i; 1159 1160 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), 1161 GFP_KERNEL); 1162 if (!ac->eqs) 1163 return -ENOMEM; 1164 1165 spec.type = GDMA_EQ; 1166 spec.monitor_avl_buf = false; 1167 spec.queue_size = EQ_SIZE; 1168 spec.eq.callback = NULL; 1169 spec.eq.context = ac->eqs; 1170 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; 1171 1172 for (i = 0; i < gc->max_num_queues; i++) { 1173 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); 1174 if (err) 1175 goto out; 1176 } 1177 1178 return 0; 1179 out: 1180 mana_destroy_eq(ac); 1181 return err; 1182 } 1183 1184 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) 1185 { 1186 struct mana_fence_rq_resp resp = {}; 1187 struct mana_fence_rq_req req = {}; 1188 int err; 1189 1190 init_completion(&rxq->fence_event); 1191 1192 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ, 1193 sizeof(req), sizeof(resp)); 1194 req.wq_obj_handle = rxq->rxobj; 1195 1196 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1197 sizeof(resp)); 1198 if (err) { 1199 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n", 1200 rxq->rxq_idx, err); 1201 return err; 1202 } 1203 1204 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp)); 1205 if (err || resp.hdr.status) { 1206 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", 1207 rxq->rxq_idx, err, resp.hdr.status); 1208 if (!err) 1209 err = -EPROTO; 1210 1211 return err; 1212 } 1213 1214 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) { 1215 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n", 1216 rxq->rxq_idx); 1217 return -ETIMEDOUT; 1218 } 1219 1220 return 0; 1221 } 1222 1223 static void mana_fence_rqs(struct mana_port_context *apc) 1224 { 1225 unsigned int rxq_idx; 1226 struct mana_rxq *rxq; 1227 int err; 1228 1229 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { 1230 rxq = apc->rxqs[rxq_idx]; 1231 err = mana_fence_rq(apc, rxq); 1232 1233 /* In case of any error, use sleep instead. */ 1234 if (err) 1235 msleep(100); 1236 } 1237 } 1238 1239 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) 1240 { 1241 u32 used_space_old; 1242 u32 used_space_new; 1243 1244 used_space_old = wq->head - wq->tail; 1245 used_space_new = wq->head - (wq->tail + num_units); 1246 1247 if (WARN_ON_ONCE(used_space_new > used_space_old)) 1248 return -ERANGE; 1249 1250 wq->tail += num_units; 1251 return 0; 1252 } 1253 1254 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) 1255 { 1256 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; 1257 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 1258 struct device *dev = gc->dev; 1259 int i; 1260 1261 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); 1262 1263 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) 1264 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], 1265 DMA_TO_DEVICE); 1266 } 1267 1268 static void mana_poll_tx_cq(struct mana_cq *cq) 1269 { 1270 struct gdma_comp *completions = cq->gdma_comp_buf; 1271 struct gdma_posted_wqe_info *wqe_info; 1272 unsigned int pkt_transmitted = 0; 1273 unsigned int wqe_unit_cnt = 0; 1274 struct mana_txq *txq = cq->txq; 1275 struct mana_port_context *apc; 1276 struct netdev_queue *net_txq; 1277 struct gdma_queue *gdma_wq; 1278 unsigned int avail_space; 1279 struct net_device *ndev; 1280 struct sk_buff *skb; 1281 bool txq_stopped; 1282 int comp_read; 1283 int i; 1284 1285 ndev = txq->ndev; 1286 apc = netdev_priv(ndev); 1287 1288 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, 1289 CQE_POLLING_BUFFER); 1290 1291 if (comp_read < 1) 1292 return; 1293 1294 for (i = 0; i < comp_read; i++) { 1295 struct mana_tx_comp_oob *cqe_oob; 1296 1297 if (WARN_ON_ONCE(!completions[i].is_sq)) 1298 return; 1299 1300 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data; 1301 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != 1302 MANA_CQE_COMPLETION)) 1303 return; 1304 1305 switch (cqe_oob->cqe_hdr.cqe_type) { 1306 case CQE_TX_OKAY: 1307 break; 1308 1309 case CQE_TX_SA_DROP: 1310 case CQE_TX_MTU_DROP: 1311 case CQE_TX_INVALID_OOB: 1312 case CQE_TX_INVALID_ETH_TYPE: 1313 case CQE_TX_HDR_PROCESSING_ERROR: 1314 case CQE_TX_VF_DISABLED: 1315 case CQE_TX_VPORT_IDX_OUT_OF_RANGE: 1316 case CQE_TX_VPORT_DISABLED: 1317 case CQE_TX_VLAN_TAGGING_VIOLATION: 1318 WARN_ONCE(1, "TX: CQE error %d: ignored.\n", 1319 cqe_oob->cqe_hdr.cqe_type); 1320 apc->eth_stats.tx_cqe_err++; 1321 break; 1322 1323 default: 1324 /* If the CQE type is unexpected, log an error, assert, 1325 * and go through the error path. 1326 */ 1327 WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", 1328 cqe_oob->cqe_hdr.cqe_type); 1329 apc->eth_stats.tx_cqe_unknown_type++; 1330 return; 1331 } 1332 1333 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) 1334 return; 1335 1336 skb = skb_dequeue(&txq->pending_skbs); 1337 if (WARN_ON_ONCE(!skb)) 1338 return; 1339 1340 wqe_info = (struct gdma_posted_wqe_info *)skb->cb; 1341 wqe_unit_cnt += wqe_info->wqe_size_in_bu; 1342 1343 mana_unmap_skb(skb, apc); 1344 1345 napi_consume_skb(skb, cq->budget); 1346 1347 pkt_transmitted++; 1348 } 1349 1350 if (WARN_ON_ONCE(wqe_unit_cnt == 0)) 1351 return; 1352 1353 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); 1354 1355 gdma_wq = txq->gdma_sq; 1356 avail_space = mana_gd_wq_avail_space(gdma_wq); 1357 1358 /* Ensure tail updated before checking q stop */ 1359 smp_mb(); 1360 1361 net_txq = txq->net_txq; 1362 txq_stopped = netif_tx_queue_stopped(net_txq); 1363 1364 /* Ensure checking txq_stopped before apc->port_is_up. */ 1365 smp_rmb(); 1366 1367 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { 1368 netif_tx_wake_queue(net_txq); 1369 apc->eth_stats.wake_queue++; 1370 } 1371 1372 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) 1373 WARN_ON_ONCE(1); 1374 1375 cq->work_done = pkt_transmitted; 1376 } 1377 1378 static void mana_post_pkt_rxq(struct mana_rxq *rxq) 1379 { 1380 struct mana_recv_buf_oob *recv_buf_oob; 1381 u32 curr_index; 1382 int err; 1383 1384 curr_index = rxq->buf_index++; 1385 if (rxq->buf_index == rxq->num_rx_buf) 1386 rxq->buf_index = 0; 1387 1388 recv_buf_oob = &rxq->rx_oobs[curr_index]; 1389 1390 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, 1391 &recv_buf_oob->wqe_inf); 1392 if (WARN_ON_ONCE(err)) 1393 return; 1394 1395 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); 1396 } 1397 1398 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, 1399 uint pkt_len, struct xdp_buff *xdp) 1400 { 1401 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size); 1402 1403 if (!skb) 1404 return NULL; 1405 1406 if (xdp->data_hard_start) { 1407 skb_reserve(skb, xdp->data - xdp->data_hard_start); 1408 skb_put(skb, xdp->data_end - xdp->data); 1409 return skb; 1410 } 1411 1412 skb_reserve(skb, rxq->headroom); 1413 skb_put(skb, pkt_len); 1414 1415 return skb; 1416 } 1417 1418 static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, 1419 struct mana_rxq *rxq) 1420 { 1421 struct mana_stats_rx *rx_stats = &rxq->stats; 1422 struct net_device *ndev = rxq->ndev; 1423 uint pkt_len = cqe->ppi[0].pkt_len; 1424 u16 rxq_idx = rxq->rxq_idx; 1425 struct napi_struct *napi; 1426 struct xdp_buff xdp = {}; 1427 struct sk_buff *skb; 1428 u32 hash_value; 1429 u32 act; 1430 1431 rxq->rx_cq.work_done++; 1432 napi = &rxq->rx_cq.napi; 1433 1434 if (!buf_va) { 1435 ++ndev->stats.rx_dropped; 1436 return; 1437 } 1438 1439 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len); 1440 1441 if (act == XDP_REDIRECT && !rxq->xdp_rc) 1442 return; 1443 1444 if (act != XDP_PASS && act != XDP_TX) 1445 goto drop_xdp; 1446 1447 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp); 1448 1449 if (!skb) 1450 goto drop; 1451 1452 skb->dev = napi->dev; 1453 1454 skb->protocol = eth_type_trans(skb, ndev); 1455 skb_checksum_none_assert(skb); 1456 skb_record_rx_queue(skb, rxq_idx); 1457 1458 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { 1459 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) 1460 skb->ip_summed = CHECKSUM_UNNECESSARY; 1461 } 1462 1463 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { 1464 hash_value = cqe->ppi[0].pkt_hash; 1465 1466 if (cqe->rx_hashtype & MANA_HASH_L4) 1467 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4); 1468 else 1469 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3); 1470 } 1471 1472 if (cqe->rx_vlantag_present) { 1473 u16 vlan_tci = cqe->rx_vlan_id; 1474 1475 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 1476 } 1477 1478 u64_stats_update_begin(&rx_stats->syncp); 1479 rx_stats->packets++; 1480 rx_stats->bytes += pkt_len; 1481 1482 if (act == XDP_TX) 1483 rx_stats->xdp_tx++; 1484 u64_stats_update_end(&rx_stats->syncp); 1485 1486 if (act == XDP_TX) { 1487 skb_set_queue_mapping(skb, rxq_idx); 1488 mana_xdp_tx(skb, ndev); 1489 return; 1490 } 1491 1492 napi_gro_receive(napi, skb); 1493 1494 return; 1495 1496 drop_xdp: 1497 u64_stats_update_begin(&rx_stats->syncp); 1498 rx_stats->xdp_drop++; 1499 u64_stats_update_end(&rx_stats->syncp); 1500 1501 drop: 1502 WARN_ON_ONCE(rxq->xdp_save_va); 1503 /* Save for reuse */ 1504 rxq->xdp_save_va = buf_va; 1505 1506 ++ndev->stats.rx_dropped; 1507 1508 return; 1509 } 1510 1511 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, 1512 dma_addr_t *da, bool is_napi) 1513 { 1514 struct page *page; 1515 void *va; 1516 1517 /* Reuse XDP dropped page if available */ 1518 if (rxq->xdp_save_va) { 1519 va = rxq->xdp_save_va; 1520 rxq->xdp_save_va = NULL; 1521 } else if (rxq->alloc_size > PAGE_SIZE) { 1522 if (is_napi) 1523 va = napi_alloc_frag(rxq->alloc_size); 1524 else 1525 va = netdev_alloc_frag(rxq->alloc_size); 1526 1527 if (!va) 1528 return NULL; 1529 1530 page = virt_to_head_page(va); 1531 /* Check if the frag falls back to single page */ 1532 if (compound_order(page) < get_order(rxq->alloc_size)) { 1533 put_page(page); 1534 return NULL; 1535 } 1536 } else { 1537 page = dev_alloc_page(); 1538 if (!page) 1539 return NULL; 1540 1541 va = page_to_virt(page); 1542 } 1543 1544 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, 1545 DMA_FROM_DEVICE); 1546 if (dma_mapping_error(dev, *da)) { 1547 put_page(virt_to_head_page(va)); 1548 return NULL; 1549 } 1550 1551 return va; 1552 } 1553 1554 /* Allocate frag for rx buffer, and save the old buf */ 1555 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, 1556 struct mana_recv_buf_oob *rxoob, void **old_buf) 1557 { 1558 dma_addr_t da; 1559 void *va; 1560 1561 va = mana_get_rxfrag(rxq, dev, &da, true); 1562 if (!va) 1563 return; 1564 1565 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, 1566 DMA_FROM_DEVICE); 1567 *old_buf = rxoob->buf_va; 1568 1569 rxoob->buf_va = va; 1570 rxoob->sgl[0].address = da; 1571 } 1572 1573 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, 1574 struct gdma_comp *cqe) 1575 { 1576 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; 1577 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; 1578 struct net_device *ndev = rxq->ndev; 1579 struct mana_recv_buf_oob *rxbuf_oob; 1580 struct mana_port_context *apc; 1581 struct device *dev = gc->dev; 1582 void *old_buf = NULL; 1583 u32 curr, pktlen; 1584 1585 apc = netdev_priv(ndev); 1586 1587 switch (oob->cqe_hdr.cqe_type) { 1588 case CQE_RX_OKAY: 1589 break; 1590 1591 case CQE_RX_TRUNCATED: 1592 ++ndev->stats.rx_dropped; 1593 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; 1594 netdev_warn_once(ndev, "Dropped a truncated packet\n"); 1595 goto drop; 1596 1597 case CQE_RX_COALESCED_4: 1598 netdev_err(ndev, "RX coalescing is unsupported\n"); 1599 apc->eth_stats.rx_coalesced_err++; 1600 return; 1601 1602 case CQE_RX_OBJECT_FENCE: 1603 complete(&rxq->fence_event); 1604 return; 1605 1606 default: 1607 netdev_err(ndev, "Unknown RX CQE type = %d\n", 1608 oob->cqe_hdr.cqe_type); 1609 apc->eth_stats.rx_cqe_unknown_type++; 1610 return; 1611 } 1612 1613 pktlen = oob->ppi[0].pkt_len; 1614 1615 if (pktlen == 0) { 1616 /* data packets should never have packetlength of zero */ 1617 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n", 1618 rxq->gdma_id, cq->gdma_id, rxq->rxobj); 1619 return; 1620 } 1621 1622 curr = rxq->buf_index; 1623 rxbuf_oob = &rxq->rx_oobs[curr]; 1624 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); 1625 1626 mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf); 1627 1628 /* Unsuccessful refill will have old_buf == NULL. 1629 * In this case, mana_rx_skb() will drop the packet. 1630 */ 1631 mana_rx_skb(old_buf, oob, rxq); 1632 1633 drop: 1634 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); 1635 1636 mana_post_pkt_rxq(rxq); 1637 } 1638 1639 static void mana_poll_rx_cq(struct mana_cq *cq) 1640 { 1641 struct gdma_comp *comp = cq->gdma_comp_buf; 1642 struct mana_rxq *rxq = cq->rxq; 1643 int comp_read, i; 1644 1645 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); 1646 WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); 1647 1648 rxq->xdp_flush = false; 1649 1650 for (i = 0; i < comp_read; i++) { 1651 if (WARN_ON_ONCE(comp[i].is_sq)) 1652 return; 1653 1654 /* verify recv cqe references the right rxq */ 1655 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) 1656 return; 1657 1658 mana_process_rx_cqe(rxq, cq, &comp[i]); 1659 } 1660 1661 if (comp_read > 0) { 1662 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; 1663 1664 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); 1665 } 1666 1667 if (rxq->xdp_flush) 1668 xdp_do_flush(); 1669 } 1670 1671 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) 1672 { 1673 struct mana_cq *cq = context; 1674 u8 arm_bit; 1675 int w; 1676 1677 WARN_ON_ONCE(cq->gdma_cq != gdma_queue); 1678 1679 if (cq->type == MANA_CQ_TYPE_RX) 1680 mana_poll_rx_cq(cq); 1681 else 1682 mana_poll_tx_cq(cq); 1683 1684 w = cq->work_done; 1685 1686 if (w < cq->budget && 1687 napi_complete_done(&cq->napi, w)) { 1688 arm_bit = SET_ARM_BIT; 1689 } else { 1690 arm_bit = 0; 1691 } 1692 1693 mana_gd_ring_cq(gdma_queue, arm_bit); 1694 1695 return w; 1696 } 1697 1698 static int mana_poll(struct napi_struct *napi, int budget) 1699 { 1700 struct mana_cq *cq = container_of(napi, struct mana_cq, napi); 1701 int w; 1702 1703 cq->work_done = 0; 1704 cq->budget = budget; 1705 1706 w = mana_cq_handler(cq, cq->gdma_cq); 1707 1708 return min(w, budget); 1709 } 1710 1711 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue) 1712 { 1713 struct mana_cq *cq = context; 1714 1715 napi_schedule_irqoff(&cq->napi); 1716 } 1717 1718 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) 1719 { 1720 struct gdma_dev *gd = apc->ac->gdma_dev; 1721 1722 if (!cq->gdma_cq) 1723 return; 1724 1725 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); 1726 } 1727 1728 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) 1729 { 1730 struct gdma_dev *gd = apc->ac->gdma_dev; 1731 1732 if (!txq->gdma_sq) 1733 return; 1734 1735 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); 1736 } 1737 1738 static void mana_destroy_txq(struct mana_port_context *apc) 1739 { 1740 struct napi_struct *napi; 1741 int i; 1742 1743 if (!apc->tx_qp) 1744 return; 1745 1746 for (i = 0; i < apc->num_queues; i++) { 1747 napi = &apc->tx_qp[i].tx_cq.napi; 1748 napi_synchronize(napi); 1749 napi_disable(napi); 1750 netif_napi_del(napi); 1751 1752 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); 1753 1754 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); 1755 1756 mana_deinit_txq(apc, &apc->tx_qp[i].txq); 1757 } 1758 1759 kfree(apc->tx_qp); 1760 apc->tx_qp = NULL; 1761 } 1762 1763 static int mana_create_txq(struct mana_port_context *apc, 1764 struct net_device *net) 1765 { 1766 struct mana_context *ac = apc->ac; 1767 struct gdma_dev *gd = ac->gdma_dev; 1768 struct mana_obj_spec wq_spec; 1769 struct mana_obj_spec cq_spec; 1770 struct gdma_queue_spec spec; 1771 struct gdma_context *gc; 1772 struct mana_txq *txq; 1773 struct mana_cq *cq; 1774 u32 txq_size; 1775 u32 cq_size; 1776 int err; 1777 int i; 1778 1779 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), 1780 GFP_KERNEL); 1781 if (!apc->tx_qp) 1782 return -ENOMEM; 1783 1784 /* The minimum size of the WQE is 32 bytes, hence 1785 * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs 1786 * the SQ can store. This value is then used to size other queues 1787 * to prevent overflow. 1788 */ 1789 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; 1790 BUILD_BUG_ON(!PAGE_ALIGNED(txq_size)); 1791 1792 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; 1793 cq_size = PAGE_ALIGN(cq_size); 1794 1795 gc = gd->gdma_context; 1796 1797 for (i = 0; i < apc->num_queues; i++) { 1798 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; 1799 1800 /* Create SQ */ 1801 txq = &apc->tx_qp[i].txq; 1802 1803 u64_stats_init(&txq->stats.syncp); 1804 txq->ndev = net; 1805 txq->net_txq = netdev_get_tx_queue(net, i); 1806 txq->vp_offset = apc->tx_vp_offset; 1807 skb_queue_head_init(&txq->pending_skbs); 1808 1809 memset(&spec, 0, sizeof(spec)); 1810 spec.type = GDMA_SQ; 1811 spec.monitor_avl_buf = true; 1812 spec.queue_size = txq_size; 1813 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); 1814 if (err) 1815 goto out; 1816 1817 /* Create SQ's CQ */ 1818 cq = &apc->tx_qp[i].tx_cq; 1819 cq->type = MANA_CQ_TYPE_TX; 1820 1821 cq->txq = txq; 1822 1823 memset(&spec, 0, sizeof(spec)); 1824 spec.type = GDMA_CQ; 1825 spec.monitor_avl_buf = false; 1826 spec.queue_size = cq_size; 1827 spec.cq.callback = mana_schedule_napi; 1828 spec.cq.parent_eq = ac->eqs[i].eq; 1829 spec.cq.context = cq; 1830 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); 1831 if (err) 1832 goto out; 1833 1834 memset(&wq_spec, 0, sizeof(wq_spec)); 1835 memset(&cq_spec, 0, sizeof(cq_spec)); 1836 1837 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; 1838 wq_spec.queue_size = txq->gdma_sq->queue_size; 1839 1840 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 1841 cq_spec.queue_size = cq->gdma_cq->queue_size; 1842 cq_spec.modr_ctx_id = 0; 1843 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; 1844 1845 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, 1846 &wq_spec, &cq_spec, 1847 &apc->tx_qp[i].tx_object); 1848 1849 if (err) 1850 goto out; 1851 1852 txq->gdma_sq->id = wq_spec.queue_index; 1853 cq->gdma_cq->id = cq_spec.queue_index; 1854 1855 txq->gdma_sq->mem_info.dma_region_handle = 1856 GDMA_INVALID_DMA_REGION; 1857 cq->gdma_cq->mem_info.dma_region_handle = 1858 GDMA_INVALID_DMA_REGION; 1859 1860 txq->gdma_txq_id = txq->gdma_sq->id; 1861 1862 cq->gdma_id = cq->gdma_cq->id; 1863 1864 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { 1865 err = -EINVAL; 1866 goto out; 1867 } 1868 1869 gc->cq_table[cq->gdma_id] = cq->gdma_cq; 1870 1871 netif_napi_add_tx(net, &cq->napi, mana_poll); 1872 napi_enable(&cq->napi); 1873 1874 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); 1875 } 1876 1877 return 0; 1878 out: 1879 mana_destroy_txq(apc); 1880 return err; 1881 } 1882 1883 static void mana_destroy_rxq(struct mana_port_context *apc, 1884 struct mana_rxq *rxq, bool validate_state) 1885 1886 { 1887 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 1888 struct mana_recv_buf_oob *rx_oob; 1889 struct device *dev = gc->dev; 1890 struct napi_struct *napi; 1891 int i; 1892 1893 if (!rxq) 1894 return; 1895 1896 napi = &rxq->rx_cq.napi; 1897 1898 if (validate_state) 1899 napi_synchronize(napi); 1900 1901 napi_disable(napi); 1902 1903 xdp_rxq_info_unreg(&rxq->xdp_rxq); 1904 1905 netif_napi_del(napi); 1906 1907 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); 1908 1909 mana_deinit_cq(apc, &rxq->rx_cq); 1910 1911 if (rxq->xdp_save_va) 1912 put_page(virt_to_head_page(rxq->xdp_save_va)); 1913 1914 for (i = 0; i < rxq->num_rx_buf; i++) { 1915 rx_oob = &rxq->rx_oobs[i]; 1916 1917 if (!rx_oob->buf_va) 1918 continue; 1919 1920 dma_unmap_single(dev, rx_oob->sgl[0].address, 1921 rx_oob->sgl[0].size, DMA_FROM_DEVICE); 1922 1923 put_page(virt_to_head_page(rx_oob->buf_va)); 1924 rx_oob->buf_va = NULL; 1925 } 1926 1927 if (rxq->gdma_rq) 1928 mana_gd_destroy_queue(gc, rxq->gdma_rq); 1929 1930 kfree(rxq); 1931 } 1932 1933 static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, 1934 struct mana_rxq *rxq, struct device *dev) 1935 { 1936 struct mana_port_context *mpc = netdev_priv(rxq->ndev); 1937 dma_addr_t da; 1938 void *va; 1939 1940 if (mpc->rxbufs_pre) 1941 va = mana_get_rxbuf_pre(rxq, &da); 1942 else 1943 va = mana_get_rxfrag(rxq, dev, &da, false); 1944 1945 if (!va) 1946 return -ENOMEM; 1947 1948 rx_oob->buf_va = va; 1949 1950 rx_oob->sgl[0].address = da; 1951 rx_oob->sgl[0].size = rxq->datasize; 1952 rx_oob->sgl[0].mem_key = mem_key; 1953 1954 return 0; 1955 } 1956 1957 #define MANA_WQE_HEADER_SIZE 16 1958 #define MANA_WQE_SGE_SIZE 16 1959 1960 static int mana_alloc_rx_wqe(struct mana_port_context *apc, 1961 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) 1962 { 1963 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 1964 struct mana_recv_buf_oob *rx_oob; 1965 struct device *dev = gc->dev; 1966 u32 buf_idx; 1967 int ret; 1968 1969 WARN_ON(rxq->datasize == 0); 1970 1971 *rxq_size = 0; 1972 *cq_size = 0; 1973 1974 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { 1975 rx_oob = &rxq->rx_oobs[buf_idx]; 1976 memset(rx_oob, 0, sizeof(*rx_oob)); 1977 1978 rx_oob->num_sge = 1; 1979 1980 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, 1981 dev); 1982 if (ret) 1983 return ret; 1984 1985 rx_oob->wqe_req.sgl = rx_oob->sgl; 1986 rx_oob->wqe_req.num_sge = rx_oob->num_sge; 1987 rx_oob->wqe_req.inline_oob_size = 0; 1988 rx_oob->wqe_req.inline_oob_data = NULL; 1989 rx_oob->wqe_req.flags = 0; 1990 rx_oob->wqe_req.client_data_unit = 0; 1991 1992 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE + 1993 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); 1994 *cq_size += COMP_ENTRY_SIZE; 1995 } 1996 1997 return 0; 1998 } 1999 2000 static int mana_push_wqe(struct mana_rxq *rxq) 2001 { 2002 struct mana_recv_buf_oob *rx_oob; 2003 u32 buf_idx; 2004 int err; 2005 2006 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { 2007 rx_oob = &rxq->rx_oobs[buf_idx]; 2008 2009 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, 2010 &rx_oob->wqe_inf); 2011 if (err) 2012 return -ENOSPC; 2013 } 2014 2015 return 0; 2016 } 2017 2018 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, 2019 u32 rxq_idx, struct mana_eq *eq, 2020 struct net_device *ndev) 2021 { 2022 struct gdma_dev *gd = apc->ac->gdma_dev; 2023 struct mana_obj_spec wq_spec; 2024 struct mana_obj_spec cq_spec; 2025 struct gdma_queue_spec spec; 2026 struct mana_cq *cq = NULL; 2027 struct gdma_context *gc; 2028 u32 cq_size, rq_size; 2029 struct mana_rxq *rxq; 2030 int err; 2031 2032 gc = gd->gdma_context; 2033 2034 rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE), 2035 GFP_KERNEL); 2036 if (!rxq) 2037 return NULL; 2038 2039 rxq->ndev = ndev; 2040 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; 2041 rxq->rxq_idx = rxq_idx; 2042 rxq->rxobj = INVALID_MANA_HANDLE; 2043 2044 mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, 2045 &rxq->headroom); 2046 2047 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); 2048 if (err) 2049 goto out; 2050 2051 rq_size = PAGE_ALIGN(rq_size); 2052 cq_size = PAGE_ALIGN(cq_size); 2053 2054 /* Create RQ */ 2055 memset(&spec, 0, sizeof(spec)); 2056 spec.type = GDMA_RQ; 2057 spec.monitor_avl_buf = true; 2058 spec.queue_size = rq_size; 2059 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); 2060 if (err) 2061 goto out; 2062 2063 /* Create RQ's CQ */ 2064 cq = &rxq->rx_cq; 2065 cq->type = MANA_CQ_TYPE_RX; 2066 cq->rxq = rxq; 2067 2068 memset(&spec, 0, sizeof(spec)); 2069 spec.type = GDMA_CQ; 2070 spec.monitor_avl_buf = false; 2071 spec.queue_size = cq_size; 2072 spec.cq.callback = mana_schedule_napi; 2073 spec.cq.parent_eq = eq->eq; 2074 spec.cq.context = cq; 2075 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); 2076 if (err) 2077 goto out; 2078 2079 memset(&wq_spec, 0, sizeof(wq_spec)); 2080 memset(&cq_spec, 0, sizeof(cq_spec)); 2081 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; 2082 wq_spec.queue_size = rxq->gdma_rq->queue_size; 2083 2084 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 2085 cq_spec.queue_size = cq->gdma_cq->queue_size; 2086 cq_spec.modr_ctx_id = 0; 2087 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; 2088 2089 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, 2090 &wq_spec, &cq_spec, &rxq->rxobj); 2091 if (err) 2092 goto out; 2093 2094 rxq->gdma_rq->id = wq_spec.queue_index; 2095 cq->gdma_cq->id = cq_spec.queue_index; 2096 2097 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 2098 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 2099 2100 rxq->gdma_id = rxq->gdma_rq->id; 2101 cq->gdma_id = cq->gdma_cq->id; 2102 2103 err = mana_push_wqe(rxq); 2104 if (err) 2105 goto out; 2106 2107 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { 2108 err = -EINVAL; 2109 goto out; 2110 } 2111 2112 gc->cq_table[cq->gdma_id] = cq->gdma_cq; 2113 2114 netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); 2115 2116 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, 2117 cq->napi.napi_id)); 2118 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, 2119 MEM_TYPE_PAGE_SHARED, NULL)); 2120 2121 napi_enable(&cq->napi); 2122 2123 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); 2124 out: 2125 if (!err) 2126 return rxq; 2127 2128 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err); 2129 2130 mana_destroy_rxq(apc, rxq, false); 2131 2132 if (cq) 2133 mana_deinit_cq(apc, cq); 2134 2135 return NULL; 2136 } 2137 2138 static int mana_add_rx_queues(struct mana_port_context *apc, 2139 struct net_device *ndev) 2140 { 2141 struct mana_context *ac = apc->ac; 2142 struct mana_rxq *rxq; 2143 int err = 0; 2144 int i; 2145 2146 for (i = 0; i < apc->num_queues; i++) { 2147 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); 2148 if (!rxq) { 2149 err = -ENOMEM; 2150 goto out; 2151 } 2152 2153 u64_stats_init(&rxq->stats.syncp); 2154 2155 apc->rxqs[i] = rxq; 2156 } 2157 2158 apc->default_rxobj = apc->rxqs[0]->rxobj; 2159 out: 2160 return err; 2161 } 2162 2163 static void mana_destroy_vport(struct mana_port_context *apc) 2164 { 2165 struct gdma_dev *gd = apc->ac->gdma_dev; 2166 struct mana_rxq *rxq; 2167 u32 rxq_idx; 2168 2169 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { 2170 rxq = apc->rxqs[rxq_idx]; 2171 if (!rxq) 2172 continue; 2173 2174 mana_destroy_rxq(apc, rxq, true); 2175 apc->rxqs[rxq_idx] = NULL; 2176 } 2177 2178 mana_destroy_txq(apc); 2179 mana_uncfg_vport(apc); 2180 2181 if (gd->gdma_context->is_pf) 2182 mana_pf_deregister_hw_vport(apc); 2183 } 2184 2185 static int mana_create_vport(struct mana_port_context *apc, 2186 struct net_device *net) 2187 { 2188 struct gdma_dev *gd = apc->ac->gdma_dev; 2189 int err; 2190 2191 apc->default_rxobj = INVALID_MANA_HANDLE; 2192 2193 if (gd->gdma_context->is_pf) { 2194 err = mana_pf_register_hw_vport(apc); 2195 if (err) 2196 return err; 2197 } 2198 2199 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); 2200 if (err) 2201 return err; 2202 2203 return mana_create_txq(apc, net); 2204 } 2205 2206 static void mana_rss_table_init(struct mana_port_context *apc) 2207 { 2208 int i; 2209 2210 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) 2211 apc->indir_table[i] = 2212 ethtool_rxfh_indir_default(i, apc->num_queues); 2213 } 2214 2215 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, 2216 bool update_hash, bool update_tab) 2217 { 2218 u32 queue_idx; 2219 int err; 2220 int i; 2221 2222 if (update_tab) { 2223 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { 2224 queue_idx = apc->indir_table[i]; 2225 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; 2226 } 2227 } 2228 2229 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); 2230 if (err) 2231 return err; 2232 2233 mana_fence_rqs(apc); 2234 2235 return 0; 2236 } 2237 2238 static int mana_init_port(struct net_device *ndev) 2239 { 2240 struct mana_port_context *apc = netdev_priv(ndev); 2241 u32 max_txq, max_rxq, max_queues; 2242 int port_idx = apc->port_idx; 2243 u32 num_indirect_entries; 2244 int err; 2245 2246 err = mana_init_port_context(apc); 2247 if (err) 2248 return err; 2249 2250 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, 2251 &num_indirect_entries); 2252 if (err) { 2253 netdev_err(ndev, "Failed to query info for vPort %d\n", 2254 port_idx); 2255 goto reset_apc; 2256 } 2257 2258 max_queues = min_t(u32, max_txq, max_rxq); 2259 if (apc->max_queues > max_queues) 2260 apc->max_queues = max_queues; 2261 2262 if (apc->num_queues > apc->max_queues) 2263 apc->num_queues = apc->max_queues; 2264 2265 eth_hw_addr_set(ndev, apc->mac_addr); 2266 2267 return 0; 2268 2269 reset_apc: 2270 kfree(apc->rxqs); 2271 apc->rxqs = NULL; 2272 return err; 2273 } 2274 2275 int mana_alloc_queues(struct net_device *ndev) 2276 { 2277 struct mana_port_context *apc = netdev_priv(ndev); 2278 struct gdma_dev *gd = apc->ac->gdma_dev; 2279 int err; 2280 2281 err = mana_create_vport(apc, ndev); 2282 if (err) 2283 return err; 2284 2285 err = netif_set_real_num_tx_queues(ndev, apc->num_queues); 2286 if (err) 2287 goto destroy_vport; 2288 2289 err = mana_add_rx_queues(apc, ndev); 2290 if (err) 2291 goto destroy_vport; 2292 2293 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; 2294 2295 err = netif_set_real_num_rx_queues(ndev, apc->num_queues); 2296 if (err) 2297 goto destroy_vport; 2298 2299 mana_rss_table_init(apc); 2300 2301 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); 2302 if (err) 2303 goto destroy_vport; 2304 2305 if (gd->gdma_context->is_pf) { 2306 err = mana_pf_register_filter(apc); 2307 if (err) 2308 goto destroy_vport; 2309 } 2310 2311 mana_chn_setxdp(apc, mana_xdp_get(apc)); 2312 2313 return 0; 2314 2315 destroy_vport: 2316 mana_destroy_vport(apc); 2317 return err; 2318 } 2319 2320 int mana_attach(struct net_device *ndev) 2321 { 2322 struct mana_port_context *apc = netdev_priv(ndev); 2323 int err; 2324 2325 ASSERT_RTNL(); 2326 2327 err = mana_init_port(ndev); 2328 if (err) 2329 return err; 2330 2331 if (apc->port_st_save) { 2332 err = mana_alloc_queues(ndev); 2333 if (err) { 2334 mana_cleanup_port_context(apc); 2335 return err; 2336 } 2337 } 2338 2339 apc->port_is_up = apc->port_st_save; 2340 2341 /* Ensure port state updated before txq state */ 2342 smp_wmb(); 2343 2344 if (apc->port_is_up) 2345 netif_carrier_on(ndev); 2346 2347 netif_device_attach(ndev); 2348 2349 return 0; 2350 } 2351 2352 static int mana_dealloc_queues(struct net_device *ndev) 2353 { 2354 struct mana_port_context *apc = netdev_priv(ndev); 2355 struct gdma_dev *gd = apc->ac->gdma_dev; 2356 struct mana_txq *txq; 2357 int i, err; 2358 2359 if (apc->port_is_up) 2360 return -EINVAL; 2361 2362 mana_chn_setxdp(apc, NULL); 2363 2364 if (gd->gdma_context->is_pf) 2365 mana_pf_deregister_filter(apc); 2366 2367 /* No packet can be transmitted now since apc->port_is_up is false. 2368 * There is still a tiny chance that mana_poll_tx_cq() can re-enable 2369 * a txq because it may not timely see apc->port_is_up being cleared 2370 * to false, but it doesn't matter since mana_start_xmit() drops any 2371 * new packets due to apc->port_is_up being false. 2372 * 2373 * Drain all the in-flight TX packets 2374 */ 2375 for (i = 0; i < apc->num_queues; i++) { 2376 txq = &apc->tx_qp[i].txq; 2377 2378 while (atomic_read(&txq->pending_sends) > 0) 2379 usleep_range(1000, 2000); 2380 } 2381 2382 /* We're 100% sure the queues can no longer be woken up, because 2383 * we're sure now mana_poll_tx_cq() can't be running. 2384 */ 2385 2386 apc->rss_state = TRI_STATE_FALSE; 2387 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false); 2388 if (err) { 2389 netdev_err(ndev, "Failed to disable vPort: %d\n", err); 2390 return err; 2391 } 2392 2393 mana_destroy_vport(apc); 2394 2395 return 0; 2396 } 2397 2398 int mana_detach(struct net_device *ndev, bool from_close) 2399 { 2400 struct mana_port_context *apc = netdev_priv(ndev); 2401 int err; 2402 2403 ASSERT_RTNL(); 2404 2405 apc->port_st_save = apc->port_is_up; 2406 apc->port_is_up = false; 2407 2408 /* Ensure port state updated before txq state */ 2409 smp_wmb(); 2410 2411 netif_tx_disable(ndev); 2412 netif_carrier_off(ndev); 2413 2414 if (apc->port_st_save) { 2415 err = mana_dealloc_queues(ndev); 2416 if (err) 2417 return err; 2418 } 2419 2420 if (!from_close) { 2421 netif_device_detach(ndev); 2422 mana_cleanup_port_context(apc); 2423 } 2424 2425 return 0; 2426 } 2427 2428 static int mana_probe_port(struct mana_context *ac, int port_idx, 2429 struct net_device **ndev_storage) 2430 { 2431 struct gdma_context *gc = ac->gdma_dev->gdma_context; 2432 struct mana_port_context *apc; 2433 struct net_device *ndev; 2434 int err; 2435 2436 ndev = alloc_etherdev_mq(sizeof(struct mana_port_context), 2437 gc->max_num_queues); 2438 if (!ndev) 2439 return -ENOMEM; 2440 2441 *ndev_storage = ndev; 2442 2443 apc = netdev_priv(ndev); 2444 apc->ac = ac; 2445 apc->ndev = ndev; 2446 apc->max_queues = gc->max_num_queues; 2447 apc->num_queues = gc->max_num_queues; 2448 apc->port_handle = INVALID_MANA_HANDLE; 2449 apc->pf_filter_handle = INVALID_MANA_HANDLE; 2450 apc->port_idx = port_idx; 2451 2452 mutex_init(&apc->vport_mutex); 2453 apc->vport_use_count = 0; 2454 2455 ndev->netdev_ops = &mana_devops; 2456 ndev->ethtool_ops = &mana_ethtool_ops; 2457 ndev->mtu = ETH_DATA_LEN; 2458 ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; 2459 ndev->min_mtu = ETH_MIN_MTU; 2460 ndev->needed_headroom = MANA_HEADROOM; 2461 ndev->dev_port = port_idx; 2462 SET_NETDEV_DEV(ndev, gc->dev); 2463 2464 netif_carrier_off(ndev); 2465 2466 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); 2467 2468 err = mana_init_port(ndev); 2469 if (err) 2470 goto free_net; 2471 2472 netdev_lockdep_set_classes(ndev); 2473 2474 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2475 ndev->hw_features |= NETIF_F_RXCSUM; 2476 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 2477 ndev->hw_features |= NETIF_F_RXHASH; 2478 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | 2479 NETIF_F_HW_VLAN_CTAG_RX; 2480 ndev->vlan_features = ndev->features; 2481 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 2482 NETDEV_XDP_ACT_NDO_XMIT; 2483 2484 err = register_netdev(ndev); 2485 if (err) { 2486 netdev_err(ndev, "Unable to register netdev.\n"); 2487 goto reset_apc; 2488 } 2489 2490 return 0; 2491 2492 reset_apc: 2493 kfree(apc->rxqs); 2494 apc->rxqs = NULL; 2495 free_net: 2496 *ndev_storage = NULL; 2497 netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err); 2498 free_netdev(ndev); 2499 return err; 2500 } 2501 2502 static void adev_release(struct device *dev) 2503 { 2504 struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev); 2505 2506 kfree(madev); 2507 } 2508 2509 static void remove_adev(struct gdma_dev *gd) 2510 { 2511 struct auxiliary_device *adev = gd->adev; 2512 int id = adev->id; 2513 2514 auxiliary_device_delete(adev); 2515 auxiliary_device_uninit(adev); 2516 2517 mana_adev_idx_free(id); 2518 gd->adev = NULL; 2519 } 2520 2521 static int add_adev(struct gdma_dev *gd) 2522 { 2523 struct auxiliary_device *adev; 2524 struct mana_adev *madev; 2525 int ret; 2526 2527 madev = kzalloc(sizeof(*madev), GFP_KERNEL); 2528 if (!madev) 2529 return -ENOMEM; 2530 2531 adev = &madev->adev; 2532 ret = mana_adev_idx_alloc(); 2533 if (ret < 0) 2534 goto idx_fail; 2535 adev->id = ret; 2536 2537 adev->name = "rdma"; 2538 adev->dev.parent = gd->gdma_context->dev; 2539 adev->dev.release = adev_release; 2540 madev->mdev = gd; 2541 2542 ret = auxiliary_device_init(adev); 2543 if (ret) 2544 goto init_fail; 2545 2546 ret = auxiliary_device_add(adev); 2547 if (ret) 2548 goto add_fail; 2549 2550 gd->adev = adev; 2551 return 0; 2552 2553 add_fail: 2554 auxiliary_device_uninit(adev); 2555 2556 init_fail: 2557 mana_adev_idx_free(adev->id); 2558 2559 idx_fail: 2560 kfree(madev); 2561 2562 return ret; 2563 } 2564 2565 int mana_probe(struct gdma_dev *gd, bool resuming) 2566 { 2567 struct gdma_context *gc = gd->gdma_context; 2568 struct mana_context *ac = gd->driver_data; 2569 struct device *dev = gc->dev; 2570 u16 num_ports = 0; 2571 int err; 2572 int i; 2573 2574 dev_info(dev, 2575 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n", 2576 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION); 2577 2578 err = mana_gd_register_device(gd); 2579 if (err) 2580 return err; 2581 2582 if (!resuming) { 2583 ac = kzalloc(sizeof(*ac), GFP_KERNEL); 2584 if (!ac) 2585 return -ENOMEM; 2586 2587 ac->gdma_dev = gd; 2588 gd->driver_data = ac; 2589 } 2590 2591 err = mana_create_eq(ac); 2592 if (err) 2593 goto out; 2594 2595 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, 2596 MANA_MICRO_VERSION, &num_ports); 2597 if (err) 2598 goto out; 2599 2600 if (!resuming) { 2601 ac->num_ports = num_ports; 2602 } else { 2603 if (ac->num_ports != num_ports) { 2604 dev_err(dev, "The number of vPorts changed: %d->%d\n", 2605 ac->num_ports, num_ports); 2606 err = -EPROTO; 2607 goto out; 2608 } 2609 } 2610 2611 if (ac->num_ports == 0) 2612 dev_err(dev, "Failed to detect any vPort\n"); 2613 2614 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) 2615 ac->num_ports = MAX_PORTS_IN_MANA_DEV; 2616 2617 if (!resuming) { 2618 for (i = 0; i < ac->num_ports; i++) { 2619 err = mana_probe_port(ac, i, &ac->ports[i]); 2620 if (err) 2621 break; 2622 } 2623 } else { 2624 for (i = 0; i < ac->num_ports; i++) { 2625 rtnl_lock(); 2626 err = mana_attach(ac->ports[i]); 2627 rtnl_unlock(); 2628 if (err) 2629 break; 2630 } 2631 } 2632 2633 err = add_adev(gd); 2634 out: 2635 if (err) 2636 mana_remove(gd, false); 2637 2638 return err; 2639 } 2640 2641 void mana_remove(struct gdma_dev *gd, bool suspending) 2642 { 2643 struct gdma_context *gc = gd->gdma_context; 2644 struct mana_context *ac = gd->driver_data; 2645 struct device *dev = gc->dev; 2646 struct net_device *ndev; 2647 int err; 2648 int i; 2649 2650 /* adev currently doesn't support suspending, always remove it */ 2651 if (gd->adev) 2652 remove_adev(gd); 2653 2654 for (i = 0; i < ac->num_ports; i++) { 2655 ndev = ac->ports[i]; 2656 if (!ndev) { 2657 if (i == 0) 2658 dev_err(dev, "No net device to remove\n"); 2659 goto out; 2660 } 2661 2662 /* All cleanup actions should stay after rtnl_lock(), otherwise 2663 * other functions may access partially cleaned up data. 2664 */ 2665 rtnl_lock(); 2666 2667 err = mana_detach(ndev, false); 2668 if (err) 2669 netdev_err(ndev, "Failed to detach vPort %d: %d\n", 2670 i, err); 2671 2672 if (suspending) { 2673 /* No need to unregister the ndev. */ 2674 rtnl_unlock(); 2675 continue; 2676 } 2677 2678 unregister_netdevice(ndev); 2679 2680 rtnl_unlock(); 2681 2682 free_netdev(ndev); 2683 } 2684 2685 mana_destroy_eq(ac); 2686 out: 2687 mana_gd_deregister_device(gd); 2688 2689 if (suspending) 2690 return; 2691 2692 gd->driver_data = NULL; 2693 gd->gdma_context = NULL; 2694 kfree(ac); 2695 } 2696