1 /* 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 * 18 */ 19 20 #include <linux/module.h> 21 #include <linux/kernel.h> 22 #include <linux/string.h> 23 #include <linux/errno.h> 24 #include <linux/types.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/workqueue.h> 28 #include <linux/pci.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if.h> 32 #include <linux/if_ether.h> 33 #include <linux/if_vlan.h> 34 #include <linux/in.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/tcp.h> 38 #include <linux/rtnetlink.h> 39 #include <linux/prefetch.h> 40 #include <net/ip6_checksum.h> 41 #include <linux/ktime.h> 42 #include <linux/numa.h> 43 #ifdef CONFIG_RFS_ACCEL 44 #include <linux/cpu_rmap.h> 45 #endif 46 #include <linux/crash_dump.h> 47 #include <net/busy_poll.h> 48 #include <net/vxlan.h> 49 #include <net/netdev_queues.h> 50 51 #include "cq_enet_desc.h" 52 #include "vnic_dev.h" 53 #include "vnic_intr.h" 54 #include "vnic_stats.h" 55 #include "vnic_vic.h" 56 #include "enic_res.h" 57 #include "enic.h" 58 #include "enic_dev.h" 59 #include "enic_pp.h" 60 #include "enic_clsf.h" 61 62 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 63 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 64 #define MAX_TSO (1 << 16) 65 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) 66 67 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 68 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ 69 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ 70 71 #define RX_COPYBREAK_DEFAULT 256 72 73 /* Supported devices */ 74 static const struct pci_device_id enic_id_table[] = { 75 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 76 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, 77 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, 78 { 0, } /* end of table */ 79 }; 80 81 MODULE_DESCRIPTION(DRV_DESCRIPTION); 82 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); 83 MODULE_LICENSE("GPL"); 84 MODULE_DEVICE_TABLE(pci, enic_id_table); 85 86 #define ENIC_LARGE_PKT_THRESHOLD 1000 87 #define ENIC_MAX_COALESCE_TIMERS 10 88 /* Interrupt moderation table, which will be used to decide the 89 * coalescing timer values 90 * {rx_rate in Mbps, mapping percentage of the range} 91 */ 92 static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = { 93 {4000, 0}, 94 {4400, 10}, 95 {5060, 20}, 96 {5230, 30}, 97 {5540, 40}, 98 {5820, 50}, 99 {6120, 60}, 100 {6435, 70}, 101 {6745, 80}, 102 {7000, 90}, 103 {0xFFFFFFFF, 100} 104 }; 105 106 /* This table helps the driver to pick different ranges for rx coalescing 107 * timer depending on the link speed. 108 */ 109 static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { 110 {0, 0}, /* 0 - 4 Gbps */ 111 {0, 3}, /* 4 - 10 Gbps */ 112 {3, 6}, /* 10 - 40 Gbps */ 113 }; 114 115 static void enic_init_affinity_hint(struct enic *enic) 116 { 117 int numa_node = dev_to_node(&enic->pdev->dev); 118 int i; 119 120 for (i = 0; i < enic->intr_count; i++) { 121 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || 122 (cpumask_available(enic->msix[i].affinity_mask) && 123 !cpumask_empty(enic->msix[i].affinity_mask))) 124 continue; 125 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, 126 GFP_KERNEL)) 127 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 128 enic->msix[i].affinity_mask); 129 } 130 } 131 132 static void enic_free_affinity_hint(struct enic *enic) 133 { 134 int i; 135 136 for (i = 0; i < enic->intr_count; i++) { 137 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i)) 138 continue; 139 free_cpumask_var(enic->msix[i].affinity_mask); 140 } 141 } 142 143 static void enic_set_affinity_hint(struct enic *enic) 144 { 145 int i; 146 int err; 147 148 for (i = 0; i < enic->intr_count; i++) { 149 if (enic_is_err_intr(enic, i) || 150 enic_is_notify_intr(enic, i) || 151 !cpumask_available(enic->msix[i].affinity_mask) || 152 cpumask_empty(enic->msix[i].affinity_mask)) 153 continue; 154 err = irq_update_affinity_hint(enic->msix_entry[i].vector, 155 enic->msix[i].affinity_mask); 156 if (err) 157 netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n", 158 err); 159 } 160 161 for (i = 0; i < enic->wq_count; i++) { 162 int wq_intr = enic_msix_wq_intr(enic, i); 163 164 if (cpumask_available(enic->msix[wq_intr].affinity_mask) && 165 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) 166 netif_set_xps_queue(enic->netdev, 167 enic->msix[wq_intr].affinity_mask, 168 i); 169 } 170 } 171 172 static void enic_unset_affinity_hint(struct enic *enic) 173 { 174 int i; 175 176 for (i = 0; i < enic->intr_count; i++) 177 irq_update_affinity_hint(enic->msix_entry[i].vector, NULL); 178 } 179 180 static int enic_udp_tunnel_set_port(struct net_device *netdev, 181 unsigned int table, unsigned int entry, 182 struct udp_tunnel_info *ti) 183 { 184 struct enic *enic = netdev_priv(netdev); 185 int err; 186 187 spin_lock_bh(&enic->devcmd_lock); 188 189 err = vnic_dev_overlay_offload_cfg(enic->vdev, 190 OVERLAY_CFG_VXLAN_PORT_UPDATE, 191 ntohs(ti->port)); 192 if (err) 193 goto error; 194 195 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, 196 enic->vxlan.patch_level); 197 if (err) 198 goto error; 199 200 enic->vxlan.vxlan_udp_port_number = ntohs(ti->port); 201 error: 202 spin_unlock_bh(&enic->devcmd_lock); 203 204 return err; 205 } 206 207 static int enic_udp_tunnel_unset_port(struct net_device *netdev, 208 unsigned int table, unsigned int entry, 209 struct udp_tunnel_info *ti) 210 { 211 struct enic *enic = netdev_priv(netdev); 212 int err; 213 214 spin_lock_bh(&enic->devcmd_lock); 215 216 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, 217 OVERLAY_OFFLOAD_DISABLE); 218 if (err) 219 goto unlock; 220 221 enic->vxlan.vxlan_udp_port_number = 0; 222 223 unlock: 224 spin_unlock_bh(&enic->devcmd_lock); 225 226 return err; 227 } 228 229 static const struct udp_tunnel_nic_info enic_udp_tunnels = { 230 .set_port = enic_udp_tunnel_set_port, 231 .unset_port = enic_udp_tunnel_unset_port, 232 .tables = { 233 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 234 }, 235 }, enic_udp_tunnels_v4 = { 236 .set_port = enic_udp_tunnel_set_port, 237 .unset_port = enic_udp_tunnel_unset_port, 238 .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, 239 .tables = { 240 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 241 }, 242 }; 243 244 static netdev_features_t enic_features_check(struct sk_buff *skb, 245 struct net_device *dev, 246 netdev_features_t features) 247 { 248 const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb); 249 struct enic *enic = netdev_priv(dev); 250 struct udphdr *udph; 251 u16 port = 0; 252 u8 proto; 253 254 if (!skb->encapsulation) 255 return features; 256 257 features = vxlan_features_check(skb, features); 258 259 switch (vlan_get_protocol(skb)) { 260 case htons(ETH_P_IPV6): 261 if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) 262 goto out; 263 proto = ipv6_hdr(skb)->nexthdr; 264 break; 265 case htons(ETH_P_IP): 266 proto = ip_hdr(skb)->protocol; 267 break; 268 default: 269 goto out; 270 } 271 272 switch (eth->h_proto) { 273 case ntohs(ETH_P_IPV6): 274 if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) 275 goto out; 276 fallthrough; 277 case ntohs(ETH_P_IP): 278 break; 279 default: 280 goto out; 281 } 282 283 284 if (proto == IPPROTO_UDP) { 285 udph = udp_hdr(skb); 286 port = be16_to_cpu(udph->dest); 287 } 288 289 /* HW supports offload of only one UDP port. Remove CSUM and GSO MASK 290 * for other UDP port tunnels 291 */ 292 if (port != enic->vxlan.vxlan_udp_port_number) 293 goto out; 294 295 return features; 296 297 out: 298 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 299 } 300 301 int enic_is_dynamic(struct enic *enic) 302 { 303 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 304 } 305 306 int enic_sriov_enabled(struct enic *enic) 307 { 308 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; 309 } 310 311 static int enic_is_sriov_vf(struct enic *enic) 312 { 313 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; 314 } 315 316 int enic_is_valid_vf(struct enic *enic, int vf) 317 { 318 #ifdef CONFIG_PCI_IOV 319 return vf >= 0 && vf < enic->num_vfs; 320 #else 321 return 0; 322 #endif 323 } 324 325 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 326 { 327 struct enic *enic = vnic_dev_priv(wq->vdev); 328 329 if (buf->sop) 330 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, 331 DMA_TO_DEVICE); 332 else 333 dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, 334 DMA_TO_DEVICE); 335 336 if (buf->os_buf) 337 dev_kfree_skb_any(buf->os_buf); 338 } 339 340 static void enic_wq_free_buf(struct vnic_wq *wq, 341 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) 342 { 343 struct enic *enic = vnic_dev_priv(wq->vdev); 344 345 enic->wq_stats[wq->index].cq_work++; 346 enic->wq_stats[wq->index].cq_bytes += buf->len; 347 enic_free_wq_buf(wq, buf); 348 } 349 350 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 351 u8 type, u16 q_number, u16 completed_index, void *opaque) 352 { 353 struct enic *enic = vnic_dev_priv(vdev); 354 355 spin_lock(&enic->wq_lock[q_number]); 356 357 vnic_wq_service(&enic->wq[q_number], cq_desc, 358 completed_index, enic_wq_free_buf, 359 opaque); 360 361 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && 362 vnic_wq_desc_avail(&enic->wq[q_number]) >= 363 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) { 364 netif_wake_subqueue(enic->netdev, q_number); 365 enic->wq_stats[q_number].wake++; 366 } 367 368 spin_unlock(&enic->wq_lock[q_number]); 369 370 return 0; 371 } 372 373 static bool enic_log_q_error(struct enic *enic) 374 { 375 unsigned int i; 376 u32 error_status; 377 bool err = false; 378 379 for (i = 0; i < enic->wq_count; i++) { 380 error_status = vnic_wq_error_status(&enic->wq[i]); 381 err |= error_status; 382 if (error_status) 383 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", 384 i, error_status); 385 } 386 387 for (i = 0; i < enic->rq_count; i++) { 388 error_status = vnic_rq_error_status(&enic->rq[i]); 389 err |= error_status; 390 if (error_status) 391 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", 392 i, error_status); 393 } 394 395 return err; 396 } 397 398 static void enic_msglvl_check(struct enic *enic) 399 { 400 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); 401 402 if (msg_enable != enic->msg_enable) { 403 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", 404 enic->msg_enable, msg_enable); 405 enic->msg_enable = msg_enable; 406 } 407 } 408 409 static void enic_mtu_check(struct enic *enic) 410 { 411 u32 mtu = vnic_dev_mtu(enic->vdev); 412 struct net_device *netdev = enic->netdev; 413 414 if (mtu && mtu != enic->port_mtu) { 415 enic->port_mtu = mtu; 416 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 417 mtu = max_t(int, ENIC_MIN_MTU, 418 min_t(int, ENIC_MAX_MTU, mtu)); 419 if (mtu != netdev->mtu) 420 schedule_work(&enic->change_mtu_work); 421 } else { 422 if (mtu < netdev->mtu) 423 netdev_warn(netdev, 424 "interface MTU (%d) set higher " 425 "than switch port MTU (%d)\n", 426 netdev->mtu, mtu); 427 } 428 } 429 } 430 431 static void enic_link_check(struct enic *enic) 432 { 433 int link_status = vnic_dev_link_status(enic->vdev); 434 int carrier_ok = netif_carrier_ok(enic->netdev); 435 436 if (link_status && !carrier_ok) { 437 netdev_info(enic->netdev, "Link UP\n"); 438 netif_carrier_on(enic->netdev); 439 } else if (!link_status && carrier_ok) { 440 netdev_info(enic->netdev, "Link DOWN\n"); 441 netif_carrier_off(enic->netdev); 442 } 443 } 444 445 static void enic_notify_check(struct enic *enic) 446 { 447 enic_msglvl_check(enic); 448 enic_mtu_check(enic); 449 enic_link_check(enic); 450 } 451 452 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) 453 454 static irqreturn_t enic_isr_legacy(int irq, void *data) 455 { 456 struct net_device *netdev = data; 457 struct enic *enic = netdev_priv(netdev); 458 unsigned int io_intr = ENIC_LEGACY_IO_INTR; 459 unsigned int err_intr = ENIC_LEGACY_ERR_INTR; 460 unsigned int notify_intr = ENIC_LEGACY_NOTIFY_INTR; 461 u32 pba; 462 463 vnic_intr_mask(&enic->intr[io_intr]); 464 465 pba = vnic_intr_legacy_pba(enic->legacy_pba); 466 if (!pba) { 467 vnic_intr_unmask(&enic->intr[io_intr]); 468 return IRQ_NONE; /* not our interrupt */ 469 } 470 471 if (ENIC_TEST_INTR(pba, notify_intr)) { 472 enic_notify_check(enic); 473 vnic_intr_return_all_credits(&enic->intr[notify_intr]); 474 } 475 476 if (ENIC_TEST_INTR(pba, err_intr)) { 477 vnic_intr_return_all_credits(&enic->intr[err_intr]); 478 enic_log_q_error(enic); 479 /* schedule recovery from WQ/RQ error */ 480 schedule_work(&enic->reset); 481 return IRQ_HANDLED; 482 } 483 484 if (ENIC_TEST_INTR(pba, io_intr)) 485 napi_schedule_irqoff(&enic->napi[0]); 486 else 487 vnic_intr_unmask(&enic->intr[io_intr]); 488 489 return IRQ_HANDLED; 490 } 491 492 static irqreturn_t enic_isr_msi(int irq, void *data) 493 { 494 struct enic *enic = data; 495 496 /* With MSI, there is no sharing of interrupts, so this is 497 * our interrupt and there is no need to ack it. The device 498 * is not providing per-vector masking, so the OS will not 499 * write to PCI config space to mask/unmask the interrupt. 500 * We're using mask_on_assertion for MSI, so the device 501 * automatically masks the interrupt when the interrupt is 502 * generated. Later, when exiting polling, the interrupt 503 * will be unmasked (see enic_poll). 504 * 505 * Also, the device uses the same PCIe Traffic Class (TC) 506 * for Memory Write data and MSI, so there are no ordering 507 * issues; the MSI will always arrive at the Root Complex 508 * _after_ corresponding Memory Writes (i.e. descriptor 509 * writes). 510 */ 511 512 napi_schedule_irqoff(&enic->napi[0]); 513 514 return IRQ_HANDLED; 515 } 516 517 static irqreturn_t enic_isr_msix(int irq, void *data) 518 { 519 struct napi_struct *napi = data; 520 521 napi_schedule_irqoff(napi); 522 523 return IRQ_HANDLED; 524 } 525 526 static irqreturn_t enic_isr_msix_err(int irq, void *data) 527 { 528 struct enic *enic = data; 529 unsigned int intr = enic_msix_err_intr(enic); 530 531 vnic_intr_return_all_credits(&enic->intr[intr]); 532 533 if (enic_log_q_error(enic)) 534 /* schedule recovery from WQ/RQ error */ 535 schedule_work(&enic->reset); 536 537 return IRQ_HANDLED; 538 } 539 540 static irqreturn_t enic_isr_msix_notify(int irq, void *data) 541 { 542 struct enic *enic = data; 543 unsigned int intr = enic_msix_notify_intr(enic); 544 545 enic_notify_check(enic); 546 vnic_intr_return_all_credits(&enic->intr[intr]); 547 548 return IRQ_HANDLED; 549 } 550 551 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, 552 struct sk_buff *skb, unsigned int len_left, 553 int loopback) 554 { 555 const skb_frag_t *frag; 556 dma_addr_t dma_addr; 557 558 /* Queue additional data fragments */ 559 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 560 len_left -= skb_frag_size(frag); 561 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, 562 skb_frag_size(frag), 563 DMA_TO_DEVICE); 564 if (unlikely(enic_dma_map_check(enic, dma_addr))) 565 return -ENOMEM; 566 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), 567 (len_left == 0), /* EOP? */ 568 loopback); 569 } 570 571 return 0; 572 } 573 574 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, 575 struct sk_buff *skb, int vlan_tag_insert, 576 unsigned int vlan_tag, int loopback) 577 { 578 unsigned int head_len = skb_headlen(skb); 579 unsigned int len_left = skb->len - head_len; 580 int eop = (len_left == 0); 581 dma_addr_t dma_addr; 582 int err = 0; 583 584 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, 585 DMA_TO_DEVICE); 586 if (unlikely(enic_dma_map_check(enic, dma_addr))) 587 return -ENOMEM; 588 589 /* Queue the main skb fragment. The fragments are no larger 590 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 591 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 592 * per fragment is queued. 593 */ 594 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert, 595 vlan_tag, eop, loopback); 596 597 if (!eop) 598 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 599 600 /* The enic_queue_wq_desc() above does not do HW checksum */ 601 enic->wq_stats[wq->index].csum_none++; 602 enic->wq_stats[wq->index].packets++; 603 enic->wq_stats[wq->index].bytes += skb->len; 604 605 return err; 606 } 607 608 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, 609 struct sk_buff *skb, int vlan_tag_insert, 610 unsigned int vlan_tag, int loopback) 611 { 612 unsigned int head_len = skb_headlen(skb); 613 unsigned int len_left = skb->len - head_len; 614 unsigned int hdr_len = skb_checksum_start_offset(skb); 615 unsigned int csum_offset = hdr_len + skb->csum_offset; 616 int eop = (len_left == 0); 617 dma_addr_t dma_addr; 618 int err = 0; 619 620 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, 621 DMA_TO_DEVICE); 622 if (unlikely(enic_dma_map_check(enic, dma_addr))) 623 return -ENOMEM; 624 625 /* Queue the main skb fragment. The fragments are no larger 626 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 627 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 628 * per fragment is queued. 629 */ 630 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset, 631 hdr_len, vlan_tag_insert, vlan_tag, eop, 632 loopback); 633 634 if (!eop) 635 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 636 637 enic->wq_stats[wq->index].csum_partial++; 638 enic->wq_stats[wq->index].packets++; 639 enic->wq_stats[wq->index].bytes += skb->len; 640 641 return err; 642 } 643 644 static void enic_preload_tcp_csum_encap(struct sk_buff *skb) 645 { 646 const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb); 647 648 switch (eth->h_proto) { 649 case ntohs(ETH_P_IP): 650 inner_ip_hdr(skb)->check = 0; 651 inner_tcp_hdr(skb)->check = 652 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, 653 inner_ip_hdr(skb)->daddr, 0, 654 IPPROTO_TCP, 0); 655 break; 656 case ntohs(ETH_P_IPV6): 657 inner_tcp_hdr(skb)->check = 658 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, 659 &inner_ipv6_hdr(skb)->daddr, 0, 660 IPPROTO_TCP, 0); 661 break; 662 default: 663 WARN_ONCE(1, "Non ipv4/ipv6 inner pkt for encap offload"); 664 break; 665 } 666 } 667 668 static void enic_preload_tcp_csum(struct sk_buff *skb) 669 { 670 /* Preload TCP csum field with IP pseudo hdr calculated 671 * with IP length set to zero. HW will later add in length 672 * to each TCP segment resulting from the TSO. 673 */ 674 675 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 676 ip_hdr(skb)->check = 0; 677 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 678 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 679 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 680 tcp_v6_gso_csum_prep(skb); 681 } 682 } 683 684 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, 685 struct sk_buff *skb, unsigned int mss, 686 int vlan_tag_insert, unsigned int vlan_tag, 687 int loopback) 688 { 689 unsigned int frag_len_left = skb_headlen(skb); 690 unsigned int len_left = skb->len - frag_len_left; 691 int eop = (len_left == 0); 692 unsigned int offset = 0; 693 unsigned int hdr_len; 694 dma_addr_t dma_addr; 695 unsigned int pkts; 696 unsigned int len; 697 skb_frag_t *frag; 698 699 if (skb->encapsulation) { 700 hdr_len = skb_inner_tcp_all_headers(skb); 701 enic_preload_tcp_csum_encap(skb); 702 enic->wq_stats[wq->index].encap_tso++; 703 } else { 704 hdr_len = skb_tcp_all_headers(skb); 705 enic_preload_tcp_csum(skb); 706 enic->wq_stats[wq->index].tso++; 707 } 708 709 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 710 * for the main skb fragment 711 */ 712 while (frag_len_left) { 713 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); 714 dma_addr = dma_map_single(&enic->pdev->dev, 715 skb->data + offset, len, 716 DMA_TO_DEVICE); 717 if (unlikely(enic_dma_map_check(enic, dma_addr))) 718 return -ENOMEM; 719 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, 720 vlan_tag_insert, vlan_tag, 721 eop && (len == frag_len_left), loopback); 722 frag_len_left -= len; 723 offset += len; 724 } 725 726 if (eop) 727 goto tso_out_stats; 728 729 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 730 * for additional data fragments 731 */ 732 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 733 len_left -= skb_frag_size(frag); 734 frag_len_left = skb_frag_size(frag); 735 offset = 0; 736 737 while (frag_len_left) { 738 len = min(frag_len_left, 739 (unsigned int)WQ_ENET_MAX_DESC_LEN); 740 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 741 offset, len, 742 DMA_TO_DEVICE); 743 if (unlikely(enic_dma_map_check(enic, dma_addr))) 744 return -ENOMEM; 745 enic_queue_wq_desc_cont(wq, skb, dma_addr, len, 746 (len_left == 0) && 747 (len == frag_len_left),/*EOP*/ 748 loopback); 749 frag_len_left -= len; 750 offset += len; 751 } 752 } 753 754 tso_out_stats: 755 /* calculate how many packets tso sent */ 756 len = skb->len - hdr_len; 757 pkts = len / mss; 758 if ((len % mss) > 0) 759 pkts++; 760 enic->wq_stats[wq->index].packets += pkts; 761 enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len)); 762 763 return 0; 764 } 765 766 static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, 767 struct sk_buff *skb, 768 int vlan_tag_insert, 769 unsigned int vlan_tag, int loopback) 770 { 771 unsigned int head_len = skb_headlen(skb); 772 unsigned int len_left = skb->len - head_len; 773 /* Hardware will overwrite the checksum fields, calculating from 774 * scratch and ignoring the value placed by software. 775 * Offload mode = 00 776 * mss[2], mss[1], mss[0] bits are set 777 */ 778 unsigned int mss_or_csum = 7; 779 int eop = (len_left == 0); 780 dma_addr_t dma_addr; 781 int err = 0; 782 783 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, 784 DMA_TO_DEVICE); 785 if (unlikely(enic_dma_map_check(enic, dma_addr))) 786 return -ENOMEM; 787 788 enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0, 789 vlan_tag_insert, vlan_tag, 790 WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop, 791 loopback); 792 if (!eop) 793 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 794 795 enic->wq_stats[wq->index].encap_csum++; 796 enic->wq_stats[wq->index].packets++; 797 enic->wq_stats[wq->index].bytes += skb->len; 798 799 return err; 800 } 801 802 static inline int enic_queue_wq_skb(struct enic *enic, 803 struct vnic_wq *wq, struct sk_buff *skb) 804 { 805 unsigned int mss = skb_shinfo(skb)->gso_size; 806 unsigned int vlan_tag = 0; 807 int vlan_tag_insert = 0; 808 int loopback = 0; 809 int err; 810 811 if (skb_vlan_tag_present(skb)) { 812 /* VLAN tag from trunking driver */ 813 vlan_tag_insert = 1; 814 vlan_tag = skb_vlan_tag_get(skb); 815 enic->wq_stats[wq->index].add_vlan++; 816 } else if (enic->loop_enable) { 817 vlan_tag = enic->loop_tag; 818 loopback = 1; 819 } 820 821 if (mss) 822 err = enic_queue_wq_skb_tso(enic, wq, skb, mss, 823 vlan_tag_insert, vlan_tag, 824 loopback); 825 else if (skb->encapsulation) 826 err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert, 827 vlan_tag, loopback); 828 else if (skb->ip_summed == CHECKSUM_PARTIAL) 829 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, 830 vlan_tag, loopback); 831 else 832 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, 833 vlan_tag, loopback); 834 if (unlikely(err)) { 835 struct vnic_wq_buf *buf; 836 837 buf = wq->to_use->prev; 838 /* while not EOP of previous pkt && queue not empty. 839 * For all non EOP bufs, os_buf is NULL. 840 */ 841 while (!buf->os_buf && (buf->next != wq->to_clean)) { 842 enic_free_wq_buf(wq, buf); 843 wq->ring.desc_avail++; 844 buf = buf->prev; 845 } 846 wq->to_use = buf->next; 847 dev_kfree_skb(skb); 848 } 849 return err; 850 } 851 852 /* netif_tx_lock held, process context with BHs disabled, or BH */ 853 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, 854 struct net_device *netdev) 855 { 856 struct enic *enic = netdev_priv(netdev); 857 struct vnic_wq *wq; 858 unsigned int txq_map; 859 struct netdev_queue *txq; 860 861 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; 862 wq = &enic->wq[txq_map]; 863 864 if (skb->len <= 0) { 865 dev_kfree_skb_any(skb); 866 enic->wq_stats[wq->index].null_pkt++; 867 return NETDEV_TX_OK; 868 } 869 870 txq = netdev_get_tx_queue(netdev, txq_map); 871 872 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, 873 * which is very likely. In the off chance it's going to take 874 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. 875 */ 876 877 if (skb_shinfo(skb)->gso_size == 0 && 878 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && 879 skb_linearize(skb)) { 880 dev_kfree_skb_any(skb); 881 enic->wq_stats[wq->index].skb_linear_fail++; 882 return NETDEV_TX_OK; 883 } 884 885 spin_lock(&enic->wq_lock[txq_map]); 886 887 if (vnic_wq_desc_avail(wq) < 888 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 889 netif_tx_stop_queue(txq); 890 /* This is a hard error, log it */ 891 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); 892 spin_unlock(&enic->wq_lock[txq_map]); 893 enic->wq_stats[wq->index].desc_full_awake++; 894 return NETDEV_TX_BUSY; 895 } 896 897 if (enic_queue_wq_skb(enic, wq, skb)) 898 goto error; 899 900 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) { 901 netif_tx_stop_queue(txq); 902 enic->wq_stats[wq->index].stopped++; 903 } 904 skb_tx_timestamp(skb); 905 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) 906 vnic_wq_doorbell(wq); 907 908 error: 909 spin_unlock(&enic->wq_lock[txq_map]); 910 911 return NETDEV_TX_OK; 912 } 913 914 /* rcu_read_lock potentially held, nominally process context */ 915 static void enic_get_stats(struct net_device *netdev, 916 struct rtnl_link_stats64 *net_stats) 917 { 918 struct enic *enic = netdev_priv(netdev); 919 struct vnic_stats *stats; 920 u64 pkt_truncated = 0; 921 u64 bad_fcs = 0; 922 int err; 923 int i; 924 925 err = enic_dev_stats_dump(enic, &stats); 926 /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump 927 * For other failures, like devcmd failure, we return previously 928 * recorded stats. 929 */ 930 if (err == -ENOMEM) 931 return; 932 933 net_stats->tx_packets = stats->tx.tx_frames_ok; 934 net_stats->tx_bytes = stats->tx.tx_bytes_ok; 935 net_stats->tx_errors = stats->tx.tx_errors; 936 net_stats->tx_dropped = stats->tx.tx_drops; 937 938 net_stats->rx_packets = stats->rx.rx_frames_ok; 939 net_stats->rx_bytes = stats->rx.rx_bytes_ok; 940 net_stats->rx_errors = stats->rx.rx_errors; 941 net_stats->multicast = stats->rx.rx_multicast_frames_ok; 942 943 for (i = 0; i < ENIC_RQ_MAX; i++) { 944 struct enic_rq_stats *rqs = &enic->rq_stats[i]; 945 946 if (!enic->rq->ctrl) 947 break; 948 pkt_truncated += rqs->pkt_truncated; 949 bad_fcs += rqs->bad_fcs; 950 } 951 net_stats->rx_over_errors = pkt_truncated; 952 net_stats->rx_crc_errors = bad_fcs; 953 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; 954 } 955 956 static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr) 957 { 958 struct enic *enic = netdev_priv(netdev); 959 960 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { 961 unsigned int mc_count = netdev_mc_count(netdev); 962 963 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n", 964 ENIC_MULTICAST_PERFECT_FILTERS, mc_count); 965 966 return -ENOSPC; 967 } 968 969 enic_dev_add_addr(enic, mc_addr); 970 enic->mc_count++; 971 972 return 0; 973 } 974 975 static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr) 976 { 977 struct enic *enic = netdev_priv(netdev); 978 979 enic_dev_del_addr(enic, mc_addr); 980 enic->mc_count--; 981 982 return 0; 983 } 984 985 static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr) 986 { 987 struct enic *enic = netdev_priv(netdev); 988 989 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { 990 unsigned int uc_count = netdev_uc_count(netdev); 991 992 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n", 993 ENIC_UNICAST_PERFECT_FILTERS, uc_count); 994 995 return -ENOSPC; 996 } 997 998 enic_dev_add_addr(enic, uc_addr); 999 enic->uc_count++; 1000 1001 return 0; 1002 } 1003 1004 static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr) 1005 { 1006 struct enic *enic = netdev_priv(netdev); 1007 1008 enic_dev_del_addr(enic, uc_addr); 1009 enic->uc_count--; 1010 1011 return 0; 1012 } 1013 1014 void enic_reset_addr_lists(struct enic *enic) 1015 { 1016 struct net_device *netdev = enic->netdev; 1017 1018 __dev_uc_unsync(netdev, NULL); 1019 __dev_mc_unsync(netdev, NULL); 1020 1021 enic->mc_count = 0; 1022 enic->uc_count = 0; 1023 enic->flags = 0; 1024 } 1025 1026 static int enic_set_mac_addr(struct net_device *netdev, char *addr) 1027 { 1028 struct enic *enic = netdev_priv(netdev); 1029 1030 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 1031 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) 1032 return -EADDRNOTAVAIL; 1033 } else { 1034 if (!is_valid_ether_addr(addr)) 1035 return -EADDRNOTAVAIL; 1036 } 1037 1038 eth_hw_addr_set(netdev, addr); 1039 1040 return 0; 1041 } 1042 1043 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) 1044 { 1045 struct enic *enic = netdev_priv(netdev); 1046 struct sockaddr *saddr = p; 1047 char *addr = saddr->sa_data; 1048 int err; 1049 1050 if (netif_running(enic->netdev)) { 1051 err = enic_dev_del_station_addr(enic); 1052 if (err) 1053 return err; 1054 } 1055 1056 err = enic_set_mac_addr(netdev, addr); 1057 if (err) 1058 return err; 1059 1060 if (netif_running(enic->netdev)) { 1061 err = enic_dev_add_station_addr(enic); 1062 if (err) 1063 return err; 1064 } 1065 1066 return err; 1067 } 1068 1069 static int enic_set_mac_address(struct net_device *netdev, void *p) 1070 { 1071 struct sockaddr *saddr = p; 1072 char *addr = saddr->sa_data; 1073 struct enic *enic = netdev_priv(netdev); 1074 int err; 1075 1076 err = enic_dev_del_station_addr(enic); 1077 if (err) 1078 return err; 1079 1080 err = enic_set_mac_addr(netdev, addr); 1081 if (err) 1082 return err; 1083 1084 return enic_dev_add_station_addr(enic); 1085 } 1086 1087 /* netif_tx_lock held, BHs disabled */ 1088 static void enic_set_rx_mode(struct net_device *netdev) 1089 { 1090 struct enic *enic = netdev_priv(netdev); 1091 int directed = 1; 1092 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; 1093 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; 1094 int promisc = (netdev->flags & IFF_PROMISC) || 1095 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS; 1096 int allmulti = (netdev->flags & IFF_ALLMULTI) || 1097 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS; 1098 unsigned int flags = netdev->flags | 1099 (allmulti ? IFF_ALLMULTI : 0) | 1100 (promisc ? IFF_PROMISC : 0); 1101 1102 if (enic->flags != flags) { 1103 enic->flags = flags; 1104 enic_dev_packet_filter(enic, directed, 1105 multicast, broadcast, promisc, allmulti); 1106 } 1107 1108 if (!promisc) { 1109 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync); 1110 if (!allmulti) 1111 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync); 1112 } 1113 } 1114 1115 /* netif_tx_lock held, BHs disabled */ 1116 static void enic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1117 { 1118 struct enic *enic = netdev_priv(netdev); 1119 schedule_work(&enic->tx_hang_reset); 1120 } 1121 1122 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1123 { 1124 struct enic *enic = netdev_priv(netdev); 1125 struct enic_port_profile *pp; 1126 int err; 1127 1128 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 1129 if (err) 1130 return err; 1131 1132 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) { 1133 if (vf == PORT_SELF_VF) { 1134 memcpy(pp->vf_mac, mac, ETH_ALEN); 1135 return 0; 1136 } else { 1137 /* 1138 * For sriov vf's set the mac in hw 1139 */ 1140 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, 1141 vnic_dev_set_mac_addr, mac); 1142 return enic_dev_status_to_errno(err); 1143 } 1144 } else 1145 return -EINVAL; 1146 } 1147 1148 static int enic_set_vf_port(struct net_device *netdev, int vf, 1149 struct nlattr *port[]) 1150 { 1151 static const u8 zero_addr[ETH_ALEN] = {}; 1152 struct enic *enic = netdev_priv(netdev); 1153 struct enic_port_profile prev_pp; 1154 struct enic_port_profile *pp; 1155 int err = 0, restore_pp = 1; 1156 1157 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 1158 if (err) 1159 return err; 1160 1161 if (!port[IFLA_PORT_REQUEST]) 1162 return -EOPNOTSUPP; 1163 1164 memcpy(&prev_pp, pp, sizeof(*enic->pp)); 1165 memset(pp, 0, sizeof(*enic->pp)); 1166 1167 pp->set |= ENIC_SET_REQUEST; 1168 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); 1169 1170 if (port[IFLA_PORT_PROFILE]) { 1171 if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) { 1172 memcpy(pp, &prev_pp, sizeof(*pp)); 1173 return -EINVAL; 1174 } 1175 pp->set |= ENIC_SET_NAME; 1176 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), 1177 PORT_PROFILE_MAX); 1178 } 1179 1180 if (port[IFLA_PORT_INSTANCE_UUID]) { 1181 if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) { 1182 memcpy(pp, &prev_pp, sizeof(*pp)); 1183 return -EINVAL; 1184 } 1185 pp->set |= ENIC_SET_INSTANCE; 1186 memcpy(pp->instance_uuid, 1187 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); 1188 } 1189 1190 if (port[IFLA_PORT_HOST_UUID]) { 1191 if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) { 1192 memcpy(pp, &prev_pp, sizeof(*pp)); 1193 return -EINVAL; 1194 } 1195 pp->set |= ENIC_SET_HOST; 1196 memcpy(pp->host_uuid, 1197 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); 1198 } 1199 1200 if (vf == PORT_SELF_VF) { 1201 /* Special case handling: mac came from IFLA_VF_MAC */ 1202 if (!is_zero_ether_addr(prev_pp.vf_mac)) 1203 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN); 1204 1205 if (is_zero_ether_addr(netdev->dev_addr)) 1206 eth_hw_addr_random(netdev); 1207 } else { 1208 /* SR-IOV VF: get mac from adapter */ 1209 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, 1210 vnic_dev_get_mac_addr, pp->mac_addr); 1211 if (err) { 1212 netdev_err(netdev, "Error getting mac for vf %d\n", vf); 1213 memcpy(pp, &prev_pp, sizeof(*pp)); 1214 return enic_dev_status_to_errno(err); 1215 } 1216 } 1217 1218 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); 1219 if (err) { 1220 if (restore_pp) { 1221 /* Things are still the way they were: Implicit 1222 * DISASSOCIATE failed 1223 */ 1224 memcpy(pp, &prev_pp, sizeof(*pp)); 1225 } else { 1226 memset(pp, 0, sizeof(*pp)); 1227 if (vf == PORT_SELF_VF) 1228 eth_hw_addr_set(netdev, zero_addr); 1229 } 1230 } else { 1231 /* Set flag to indicate that the port assoc/disassoc 1232 * request has been sent out to fw 1233 */ 1234 pp->set |= ENIC_PORT_REQUEST_APPLIED; 1235 1236 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ 1237 if (pp->request == PORT_REQUEST_DISASSOCIATE) { 1238 eth_zero_addr(pp->mac_addr); 1239 if (vf == PORT_SELF_VF) 1240 eth_hw_addr_set(netdev, zero_addr); 1241 } 1242 } 1243 1244 if (vf == PORT_SELF_VF) 1245 eth_zero_addr(pp->vf_mac); 1246 1247 return err; 1248 } 1249 1250 static int enic_get_vf_port(struct net_device *netdev, int vf, 1251 struct sk_buff *skb) 1252 { 1253 struct enic *enic = netdev_priv(netdev); 1254 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1255 struct enic_port_profile *pp; 1256 int err; 1257 1258 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 1259 if (err) 1260 return err; 1261 1262 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED)) 1263 return -ENODATA; 1264 1265 err = enic_process_get_pp_request(enic, vf, pp->request, &response); 1266 if (err) 1267 return err; 1268 1269 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) || 1270 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) || 1271 ((pp->set & ENIC_SET_NAME) && 1272 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) || 1273 ((pp->set & ENIC_SET_INSTANCE) && 1274 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 1275 pp->instance_uuid)) || 1276 ((pp->set & ENIC_SET_HOST) && 1277 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid))) 1278 goto nla_put_failure; 1279 return 0; 1280 1281 nla_put_failure: 1282 return -EMSGSIZE; 1283 } 1284 1285 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) 1286 { 1287 struct enic *enic = vnic_dev_priv(rq->vdev); 1288 1289 if (!buf->os_buf) 1290 return; 1291 1292 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, 1293 DMA_FROM_DEVICE); 1294 dev_kfree_skb_any(buf->os_buf); 1295 buf->os_buf = NULL; 1296 } 1297 1298 static int enic_rq_alloc_buf(struct vnic_rq *rq) 1299 { 1300 struct enic *enic = vnic_dev_priv(rq->vdev); 1301 struct net_device *netdev = enic->netdev; 1302 struct sk_buff *skb; 1303 unsigned int len = netdev->mtu + VLAN_ETH_HLEN; 1304 unsigned int os_buf_index = 0; 1305 dma_addr_t dma_addr; 1306 struct vnic_rq_buf *buf = rq->to_use; 1307 1308 if (buf->os_buf) { 1309 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, 1310 buf->len); 1311 1312 return 0; 1313 } 1314 skb = netdev_alloc_skb_ip_align(netdev, len); 1315 if (!skb) { 1316 enic->rq_stats[rq->index].no_skb++; 1317 return -ENOMEM; 1318 } 1319 1320 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len, 1321 DMA_FROM_DEVICE); 1322 if (unlikely(enic_dma_map_check(enic, dma_addr))) { 1323 dev_kfree_skb(skb); 1324 return -ENOMEM; 1325 } 1326 1327 enic_queue_rq_desc(rq, skb, os_buf_index, 1328 dma_addr, len); 1329 1330 return 0; 1331 } 1332 1333 static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, 1334 u32 pkt_len) 1335 { 1336 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len) 1337 pkt_size->large_pkt_bytes_cnt += pkt_len; 1338 else 1339 pkt_size->small_pkt_bytes_cnt += pkt_len; 1340 } 1341 1342 static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb, 1343 struct vnic_rq_buf *buf, u16 len) 1344 { 1345 struct enic *enic = netdev_priv(netdev); 1346 struct sk_buff *new_skb; 1347 1348 if (len > enic->rx_copybreak) 1349 return false; 1350 new_skb = netdev_alloc_skb_ip_align(netdev, len); 1351 if (!new_skb) 1352 return false; 1353 dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len, 1354 DMA_FROM_DEVICE); 1355 memcpy(new_skb->data, (*skb)->data, len); 1356 *skb = new_skb; 1357 1358 return true; 1359 } 1360 1361 static void enic_rq_indicate_buf(struct vnic_rq *rq, 1362 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1363 int skipped, void *opaque) 1364 { 1365 struct enic *enic = vnic_dev_priv(rq->vdev); 1366 struct net_device *netdev = enic->netdev; 1367 struct sk_buff *skb; 1368 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; 1369 struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index]; 1370 1371 u8 type, color, eop, sop, ingress_port, vlan_stripped; 1372 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; 1373 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; 1374 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; 1375 u8 packet_error; 1376 u16 q_number, completed_index, bytes_written, vlan_tci, checksum; 1377 u32 rss_hash; 1378 bool outer_csum_ok = true, encap = false; 1379 1380 rqstats->packets++; 1381 if (skipped) { 1382 rqstats->desc_skip++; 1383 return; 1384 } 1385 1386 skb = buf->os_buf; 1387 1388 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 1389 &type, &color, &q_number, &completed_index, 1390 &ingress_port, &fcoe, &eop, &sop, &rss_type, 1391 &csum_not_calc, &rss_hash, &bytes_written, 1392 &packet_error, &vlan_stripped, &vlan_tci, &checksum, 1393 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, 1394 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, 1395 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, 1396 &fcs_ok); 1397 1398 if (packet_error) { 1399 1400 if (!fcs_ok) { 1401 if (bytes_written > 0) 1402 rqstats->bad_fcs++; 1403 else if (bytes_written == 0) 1404 rqstats->pkt_truncated++; 1405 } 1406 1407 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, 1408 DMA_FROM_DEVICE); 1409 dev_kfree_skb_any(skb); 1410 buf->os_buf = NULL; 1411 1412 return; 1413 } 1414 1415 if (eop && bytes_written > 0) { 1416 1417 /* Good receive 1418 */ 1419 rqstats->bytes += bytes_written; 1420 if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { 1421 buf->os_buf = NULL; 1422 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, 1423 buf->len, DMA_FROM_DEVICE); 1424 } 1425 prefetch(skb->data - NET_IP_ALIGN); 1426 1427 skb_put(skb, bytes_written); 1428 skb->protocol = eth_type_trans(skb, netdev); 1429 skb_record_rx_queue(skb, q_number); 1430 if ((netdev->features & NETIF_F_RXHASH) && rss_hash && 1431 (type == 3)) { 1432 switch (rss_type) { 1433 case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4: 1434 case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6: 1435 case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX: 1436 skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4); 1437 rqstats->l4_rss_hash++; 1438 break; 1439 case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4: 1440 case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6: 1441 case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX: 1442 skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3); 1443 rqstats->l3_rss_hash++; 1444 break; 1445 } 1446 } 1447 if (enic->vxlan.vxlan_udp_port_number) { 1448 switch (enic->vxlan.patch_level) { 1449 case 0: 1450 if (fcoe) { 1451 encap = true; 1452 outer_csum_ok = fcoe_fc_crc_ok; 1453 } 1454 break; 1455 case 2: 1456 if ((type == 7) && 1457 (rss_hash & BIT(0))) { 1458 encap = true; 1459 outer_csum_ok = (rss_hash & BIT(1)) && 1460 (rss_hash & BIT(2)); 1461 } 1462 break; 1463 } 1464 } 1465 1466 /* Hardware does not provide whole packet checksum. It only 1467 * provides pseudo checksum. Since hw validates the packet 1468 * checksum but not provide us the checksum value. use 1469 * CHECSUM_UNNECESSARY. 1470 * 1471 * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is 1472 * inner csum_ok. outer_csum_ok is set by hw when outer udp 1473 * csum is correct or is zero. 1474 */ 1475 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && 1476 tcp_udp_csum_ok && outer_csum_ok && 1477 (ipv4_csum_ok || ipv6)) { 1478 skb->ip_summed = CHECKSUM_UNNECESSARY; 1479 skb->csum_level = encap; 1480 if (encap) 1481 rqstats->csum_unnecessary_encap++; 1482 else 1483 rqstats->csum_unnecessary++; 1484 } 1485 1486 if (vlan_stripped) { 1487 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 1488 rqstats->vlan_stripped++; 1489 } 1490 skb_mark_napi_id(skb, &enic->napi[rq->index]); 1491 if (!(netdev->features & NETIF_F_GRO)) 1492 netif_receive_skb(skb); 1493 else 1494 napi_gro_receive(&enic->napi[q_number], skb); 1495 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1496 enic_intr_update_pkt_size(&cq->pkt_size_counter, 1497 bytes_written); 1498 } else { 1499 1500 /* Buffer overflow 1501 */ 1502 rqstats->pkt_truncated++; 1503 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, 1504 DMA_FROM_DEVICE); 1505 dev_kfree_skb_any(skb); 1506 buf->os_buf = NULL; 1507 } 1508 } 1509 1510 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 1511 u8 type, u16 q_number, u16 completed_index, void *opaque) 1512 { 1513 struct enic *enic = vnic_dev_priv(vdev); 1514 1515 vnic_rq_service(&enic->rq[q_number], cq_desc, 1516 completed_index, VNIC_RQ_RETURN_DESC, 1517 enic_rq_indicate_buf, opaque); 1518 1519 return 0; 1520 } 1521 1522 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) 1523 { 1524 unsigned int intr = enic_msix_rq_intr(enic, rq->index); 1525 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; 1526 u32 timer = cq->tobe_rx_coal_timeval; 1527 1528 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { 1529 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); 1530 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; 1531 } 1532 } 1533 1534 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) 1535 { 1536 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; 1537 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; 1538 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; 1539 int index; 1540 u32 timer; 1541 u32 range_start; 1542 u32 traffic; 1543 u64 delta; 1544 ktime_t now = ktime_get(); 1545 1546 delta = ktime_us_delta(now, cq->prev_ts); 1547 if (delta < ENIC_AIC_TS_BREAK) 1548 return; 1549 cq->prev_ts = now; 1550 1551 traffic = pkt_size_counter->large_pkt_bytes_cnt + 1552 pkt_size_counter->small_pkt_bytes_cnt; 1553 /* The table takes Mbps 1554 * traffic *= 8 => bits 1555 * traffic *= (10^6 / delta) => bps 1556 * traffic /= 10^6 => Mbps 1557 * 1558 * Combining, traffic *= (8 / delta) 1559 */ 1560 1561 traffic <<= 3; 1562 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; 1563 1564 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) 1565 if (traffic < mod_table[index].rx_rate) 1566 break; 1567 range_start = (pkt_size_counter->small_pkt_bytes_cnt > 1568 pkt_size_counter->large_pkt_bytes_cnt << 1) ? 1569 rx_coal->small_pkt_range_start : 1570 rx_coal->large_pkt_range_start; 1571 timer = range_start + ((rx_coal->range_end - range_start) * 1572 mod_table[index].range_percent / 100); 1573 /* Damping */ 1574 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; 1575 1576 pkt_size_counter->large_pkt_bytes_cnt = 0; 1577 pkt_size_counter->small_pkt_bytes_cnt = 0; 1578 } 1579 1580 static int enic_poll(struct napi_struct *napi, int budget) 1581 { 1582 struct net_device *netdev = napi->dev; 1583 struct enic *enic = netdev_priv(netdev); 1584 unsigned int cq_rq = enic_cq_rq(enic, 0); 1585 unsigned int cq_wq = enic_cq_wq(enic, 0); 1586 unsigned int intr = ENIC_LEGACY_IO_INTR; 1587 unsigned int rq_work_to_do = budget; 1588 unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET; 1589 unsigned int work_done, rq_work_done = 0, wq_work_done; 1590 int err; 1591 1592 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, 1593 enic_wq_service, NULL); 1594 1595 if (budget > 0) 1596 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], 1597 rq_work_to_do, enic_rq_service, NULL); 1598 1599 /* Accumulate intr event credits for this polling 1600 * cycle. An intr event is the completion of a 1601 * a WQ or RQ packet. 1602 */ 1603 1604 work_done = rq_work_done + wq_work_done; 1605 1606 if (work_done > 0) 1607 vnic_intr_return_credits(&enic->intr[intr], 1608 work_done, 1609 0 /* don't unmask intr */, 1610 0 /* don't reset intr timer */); 1611 1612 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1613 1614 /* Buffer allocation failed. Stay in polling 1615 * mode so we can try to fill the ring again. 1616 */ 1617 1618 if (err) 1619 rq_work_done = rq_work_to_do; 1620 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1621 /* Call the function which refreshes the intr coalescing timer 1622 * value based on the traffic. 1623 */ 1624 enic_calc_int_moderation(enic, &enic->rq[0]); 1625 1626 if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) { 1627 1628 /* Some work done, but not enough to stay in polling, 1629 * exit polling 1630 */ 1631 1632 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1633 enic_set_int_moderation(enic, &enic->rq[0]); 1634 vnic_intr_unmask(&enic->intr[intr]); 1635 enic->rq_stats[0].napi_complete++; 1636 } else { 1637 enic->rq_stats[0].napi_repoll++; 1638 } 1639 1640 return rq_work_done; 1641 } 1642 1643 #ifdef CONFIG_RFS_ACCEL 1644 static void enic_free_rx_cpu_rmap(struct enic *enic) 1645 { 1646 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); 1647 enic->netdev->rx_cpu_rmap = NULL; 1648 } 1649 1650 static void enic_set_rx_cpu_rmap(struct enic *enic) 1651 { 1652 int i, res; 1653 1654 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { 1655 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); 1656 if (unlikely(!enic->netdev->rx_cpu_rmap)) 1657 return; 1658 for (i = 0; i < enic->rq_count; i++) { 1659 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, 1660 enic->msix_entry[i].vector); 1661 if (unlikely(res)) { 1662 enic_free_rx_cpu_rmap(enic); 1663 return; 1664 } 1665 } 1666 } 1667 } 1668 1669 #else 1670 1671 static void enic_free_rx_cpu_rmap(struct enic *enic) 1672 { 1673 } 1674 1675 static void enic_set_rx_cpu_rmap(struct enic *enic) 1676 { 1677 } 1678 1679 #endif /* CONFIG_RFS_ACCEL */ 1680 1681 static int enic_poll_msix_wq(struct napi_struct *napi, int budget) 1682 { 1683 struct net_device *netdev = napi->dev; 1684 struct enic *enic = netdev_priv(netdev); 1685 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; 1686 struct vnic_wq *wq = &enic->wq[wq_index]; 1687 unsigned int cq; 1688 unsigned int intr; 1689 unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET; 1690 unsigned int wq_work_done; 1691 unsigned int wq_irq; 1692 1693 wq_irq = wq->index; 1694 cq = enic_cq_wq(enic, wq_irq); 1695 intr = enic_msix_wq_intr(enic, wq_irq); 1696 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, 1697 enic_wq_service, NULL); 1698 1699 vnic_intr_return_credits(&enic->intr[intr], wq_work_done, 1700 0 /* don't unmask intr */, 1701 1 /* reset intr timer */); 1702 if (!wq_work_done) { 1703 napi_complete(napi); 1704 vnic_intr_unmask(&enic->intr[intr]); 1705 return 0; 1706 } 1707 1708 return budget; 1709 } 1710 1711 static int enic_poll_msix_rq(struct napi_struct *napi, int budget) 1712 { 1713 struct net_device *netdev = napi->dev; 1714 struct enic *enic = netdev_priv(netdev); 1715 unsigned int rq = (napi - &enic->napi[0]); 1716 unsigned int cq = enic_cq_rq(enic, rq); 1717 unsigned int intr = enic_msix_rq_intr(enic, rq); 1718 unsigned int work_to_do = budget; 1719 unsigned int work_done = 0; 1720 int err; 1721 1722 /* Service RQ 1723 */ 1724 1725 if (budget > 0) 1726 work_done = vnic_cq_service(&enic->cq[cq], 1727 work_to_do, enic_rq_service, NULL); 1728 1729 /* Return intr event credits for this polling 1730 * cycle. An intr event is the completion of a 1731 * RQ packet. 1732 */ 1733 1734 if (work_done > 0) 1735 vnic_intr_return_credits(&enic->intr[intr], 1736 work_done, 1737 0 /* don't unmask intr */, 1738 0 /* don't reset intr timer */); 1739 1740 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); 1741 1742 /* Buffer allocation failed. Stay in polling mode 1743 * so we can try to fill the ring again. 1744 */ 1745 1746 if (err) 1747 work_done = work_to_do; 1748 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1749 /* Call the function which refreshes the intr coalescing timer 1750 * value based on the traffic. 1751 */ 1752 enic_calc_int_moderation(enic, &enic->rq[rq]); 1753 1754 if ((work_done < budget) && napi_complete_done(napi, work_done)) { 1755 1756 /* Some work done, but not enough to stay in polling, 1757 * exit polling 1758 */ 1759 1760 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1761 enic_set_int_moderation(enic, &enic->rq[rq]); 1762 vnic_intr_unmask(&enic->intr[intr]); 1763 enic->rq_stats[rq].napi_complete++; 1764 } else { 1765 enic->rq_stats[rq].napi_repoll++; 1766 } 1767 1768 return work_done; 1769 } 1770 1771 static void enic_notify_timer(struct timer_list *t) 1772 { 1773 struct enic *enic = from_timer(enic, t, notify_timer); 1774 1775 enic_notify_check(enic); 1776 1777 mod_timer(&enic->notify_timer, 1778 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); 1779 } 1780 1781 static void enic_free_intr(struct enic *enic) 1782 { 1783 struct net_device *netdev = enic->netdev; 1784 unsigned int i; 1785 1786 enic_free_rx_cpu_rmap(enic); 1787 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1788 case VNIC_DEV_INTR_MODE_INTX: 1789 free_irq(enic->pdev->irq, netdev); 1790 break; 1791 case VNIC_DEV_INTR_MODE_MSI: 1792 free_irq(enic->pdev->irq, enic); 1793 break; 1794 case VNIC_DEV_INTR_MODE_MSIX: 1795 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) 1796 if (enic->msix[i].requested) 1797 free_irq(enic->msix_entry[i].vector, 1798 enic->msix[i].devid); 1799 break; 1800 default: 1801 break; 1802 } 1803 } 1804 1805 static int enic_request_intr(struct enic *enic) 1806 { 1807 struct net_device *netdev = enic->netdev; 1808 unsigned int i, intr; 1809 int err = 0; 1810 1811 enic_set_rx_cpu_rmap(enic); 1812 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1813 1814 case VNIC_DEV_INTR_MODE_INTX: 1815 1816 err = request_irq(enic->pdev->irq, enic_isr_legacy, 1817 IRQF_SHARED, netdev->name, netdev); 1818 break; 1819 1820 case VNIC_DEV_INTR_MODE_MSI: 1821 1822 err = request_irq(enic->pdev->irq, enic_isr_msi, 1823 0, netdev->name, enic); 1824 break; 1825 1826 case VNIC_DEV_INTR_MODE_MSIX: 1827 1828 for (i = 0; i < enic->rq_count; i++) { 1829 intr = enic_msix_rq_intr(enic, i); 1830 snprintf(enic->msix[intr].devname, 1831 sizeof(enic->msix[intr].devname), 1832 "%s-rx-%u", netdev->name, i); 1833 enic->msix[intr].isr = enic_isr_msix; 1834 enic->msix[intr].devid = &enic->napi[i]; 1835 } 1836 1837 for (i = 0; i < enic->wq_count; i++) { 1838 int wq = enic_cq_wq(enic, i); 1839 1840 intr = enic_msix_wq_intr(enic, i); 1841 snprintf(enic->msix[intr].devname, 1842 sizeof(enic->msix[intr].devname), 1843 "%s-tx-%u", netdev->name, i); 1844 enic->msix[intr].isr = enic_isr_msix; 1845 enic->msix[intr].devid = &enic->napi[wq]; 1846 } 1847 1848 intr = enic_msix_err_intr(enic); 1849 snprintf(enic->msix[intr].devname, 1850 sizeof(enic->msix[intr].devname), 1851 "%s-err", netdev->name); 1852 enic->msix[intr].isr = enic_isr_msix_err; 1853 enic->msix[intr].devid = enic; 1854 1855 intr = enic_msix_notify_intr(enic); 1856 snprintf(enic->msix[intr].devname, 1857 sizeof(enic->msix[intr].devname), 1858 "%s-notify", netdev->name); 1859 enic->msix[intr].isr = enic_isr_msix_notify; 1860 enic->msix[intr].devid = enic; 1861 1862 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) 1863 enic->msix[i].requested = 0; 1864 1865 for (i = 0; i < enic->intr_count; i++) { 1866 err = request_irq(enic->msix_entry[i].vector, 1867 enic->msix[i].isr, 0, 1868 enic->msix[i].devname, 1869 enic->msix[i].devid); 1870 if (err) { 1871 enic_free_intr(enic); 1872 break; 1873 } 1874 enic->msix[i].requested = 1; 1875 } 1876 1877 break; 1878 1879 default: 1880 break; 1881 } 1882 1883 return err; 1884 } 1885 1886 static void enic_synchronize_irqs(struct enic *enic) 1887 { 1888 unsigned int i; 1889 1890 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1891 case VNIC_DEV_INTR_MODE_INTX: 1892 case VNIC_DEV_INTR_MODE_MSI: 1893 synchronize_irq(enic->pdev->irq); 1894 break; 1895 case VNIC_DEV_INTR_MODE_MSIX: 1896 for (i = 0; i < enic->intr_count; i++) 1897 synchronize_irq(enic->msix_entry[i].vector); 1898 break; 1899 default: 1900 break; 1901 } 1902 } 1903 1904 static void enic_set_rx_coal_setting(struct enic *enic) 1905 { 1906 unsigned int speed; 1907 int index = -1; 1908 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; 1909 1910 /* 1. Read the link speed from fw 1911 * 2. Pick the default range for the speed 1912 * 3. Update it in enic->rx_coalesce_setting 1913 */ 1914 speed = vnic_dev_port_speed(enic->vdev); 1915 if (ENIC_LINK_SPEED_10G < speed) 1916 index = ENIC_LINK_40G_INDEX; 1917 else if (ENIC_LINK_SPEED_4G < speed) 1918 index = ENIC_LINK_10G_INDEX; 1919 else 1920 index = ENIC_LINK_4G_INDEX; 1921 1922 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; 1923 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; 1924 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; 1925 1926 /* Start with the value provided by UCSM */ 1927 for (index = 0; index < enic->rq_count; index++) 1928 enic->cq[index].cur_rx_coal_timeval = 1929 enic->config.intr_timer_usec; 1930 1931 rx_coal->use_adaptive_rx_coalesce = 1; 1932 } 1933 1934 static int enic_dev_notify_set(struct enic *enic) 1935 { 1936 int err; 1937 1938 spin_lock_bh(&enic->devcmd_lock); 1939 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1940 case VNIC_DEV_INTR_MODE_INTX: 1941 err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR); 1942 break; 1943 case VNIC_DEV_INTR_MODE_MSIX: 1944 err = vnic_dev_notify_set(enic->vdev, 1945 enic_msix_notify_intr(enic)); 1946 break; 1947 default: 1948 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); 1949 break; 1950 } 1951 spin_unlock_bh(&enic->devcmd_lock); 1952 1953 return err; 1954 } 1955 1956 static void enic_notify_timer_start(struct enic *enic) 1957 { 1958 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1959 case VNIC_DEV_INTR_MODE_MSI: 1960 mod_timer(&enic->notify_timer, jiffies); 1961 break; 1962 default: 1963 /* Using intr for notification for INTx/MSI-X */ 1964 break; 1965 } 1966 } 1967 1968 /* rtnl lock is held, process context */ 1969 static int enic_open(struct net_device *netdev) 1970 { 1971 struct enic *enic = netdev_priv(netdev); 1972 unsigned int i; 1973 int err, ret; 1974 1975 err = enic_request_intr(enic); 1976 if (err) { 1977 netdev_err(netdev, "Unable to request irq.\n"); 1978 return err; 1979 } 1980 enic_init_affinity_hint(enic); 1981 enic_set_affinity_hint(enic); 1982 1983 err = enic_dev_notify_set(enic); 1984 if (err) { 1985 netdev_err(netdev, 1986 "Failed to alloc notify buffer, aborting.\n"); 1987 goto err_out_free_intr; 1988 } 1989 1990 for (i = 0; i < enic->rq_count; i++) { 1991 /* enable rq before updating rq desc */ 1992 vnic_rq_enable(&enic->rq[i]); 1993 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); 1994 /* Need at least one buffer on ring to get going */ 1995 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1996 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1997 err = -ENOMEM; 1998 goto err_out_free_rq; 1999 } 2000 } 2001 2002 for (i = 0; i < enic->wq_count; i++) 2003 vnic_wq_enable(&enic->wq[i]); 2004 2005 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 2006 enic_dev_add_station_addr(enic); 2007 2008 enic_set_rx_mode(netdev); 2009 2010 netif_tx_wake_all_queues(netdev); 2011 2012 for (i = 0; i < enic->rq_count; i++) 2013 napi_enable(&enic->napi[i]); 2014 2015 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 2016 for (i = 0; i < enic->wq_count; i++) 2017 napi_enable(&enic->napi[enic_cq_wq(enic, i)]); 2018 enic_dev_enable(enic); 2019 2020 for (i = 0; i < enic->intr_count; i++) 2021 vnic_intr_unmask(&enic->intr[i]); 2022 2023 enic_notify_timer_start(enic); 2024 enic_rfs_timer_start(enic); 2025 2026 return 0; 2027 2028 err_out_free_rq: 2029 for (i = 0; i < enic->rq_count; i++) { 2030 ret = vnic_rq_disable(&enic->rq[i]); 2031 if (!ret) 2032 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 2033 } 2034 enic_dev_notify_unset(enic); 2035 err_out_free_intr: 2036 enic_unset_affinity_hint(enic); 2037 enic_free_intr(enic); 2038 2039 return err; 2040 } 2041 2042 /* rtnl lock is held, process context */ 2043 static int enic_stop(struct net_device *netdev) 2044 { 2045 struct enic *enic = netdev_priv(netdev); 2046 unsigned int i; 2047 int err; 2048 2049 for (i = 0; i < enic->intr_count; i++) { 2050 vnic_intr_mask(&enic->intr[i]); 2051 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ 2052 } 2053 2054 enic_synchronize_irqs(enic); 2055 2056 del_timer_sync(&enic->notify_timer); 2057 enic_rfs_flw_tbl_free(enic); 2058 2059 enic_dev_disable(enic); 2060 2061 for (i = 0; i < enic->rq_count; i++) 2062 napi_disable(&enic->napi[i]); 2063 2064 netif_carrier_off(netdev); 2065 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 2066 for (i = 0; i < enic->wq_count; i++) 2067 napi_disable(&enic->napi[enic_cq_wq(enic, i)]); 2068 netif_tx_disable(netdev); 2069 2070 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 2071 enic_dev_del_station_addr(enic); 2072 2073 for (i = 0; i < enic->wq_count; i++) { 2074 err = vnic_wq_disable(&enic->wq[i]); 2075 if (err) 2076 return err; 2077 } 2078 for (i = 0; i < enic->rq_count; i++) { 2079 err = vnic_rq_disable(&enic->rq[i]); 2080 if (err) 2081 return err; 2082 } 2083 2084 enic_dev_notify_unset(enic); 2085 enic_unset_affinity_hint(enic); 2086 enic_free_intr(enic); 2087 2088 for (i = 0; i < enic->wq_count; i++) 2089 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 2090 for (i = 0; i < enic->rq_count; i++) 2091 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 2092 for (i = 0; i < enic->cq_count; i++) 2093 vnic_cq_clean(&enic->cq[i]); 2094 for (i = 0; i < enic->intr_count; i++) 2095 vnic_intr_clean(&enic->intr[i]); 2096 2097 return 0; 2098 } 2099 2100 static int _enic_change_mtu(struct net_device *netdev, int new_mtu) 2101 { 2102 bool running = netif_running(netdev); 2103 int err = 0; 2104 2105 ASSERT_RTNL(); 2106 if (running) { 2107 err = enic_stop(netdev); 2108 if (err) 2109 return err; 2110 } 2111 2112 WRITE_ONCE(netdev->mtu, new_mtu); 2113 2114 if (running) { 2115 err = enic_open(netdev); 2116 if (err) 2117 return err; 2118 } 2119 2120 return 0; 2121 } 2122 2123 static int enic_change_mtu(struct net_device *netdev, int new_mtu) 2124 { 2125 struct enic *enic = netdev_priv(netdev); 2126 2127 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2128 return -EOPNOTSUPP; 2129 2130 if (netdev->mtu > enic->port_mtu) 2131 netdev_warn(netdev, 2132 "interface MTU (%d) set higher than port MTU (%d)\n", 2133 netdev->mtu, enic->port_mtu); 2134 2135 return _enic_change_mtu(netdev, new_mtu); 2136 } 2137 2138 static void enic_change_mtu_work(struct work_struct *work) 2139 { 2140 struct enic *enic = container_of(work, struct enic, change_mtu_work); 2141 struct net_device *netdev = enic->netdev; 2142 int new_mtu = vnic_dev_mtu(enic->vdev); 2143 2144 rtnl_lock(); 2145 (void)_enic_change_mtu(netdev, new_mtu); 2146 rtnl_unlock(); 2147 2148 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); 2149 } 2150 2151 #ifdef CONFIG_NET_POLL_CONTROLLER 2152 static void enic_poll_controller(struct net_device *netdev) 2153 { 2154 struct enic *enic = netdev_priv(netdev); 2155 struct vnic_dev *vdev = enic->vdev; 2156 unsigned int i, intr; 2157 2158 switch (vnic_dev_get_intr_mode(vdev)) { 2159 case VNIC_DEV_INTR_MODE_MSIX: 2160 for (i = 0; i < enic->rq_count; i++) { 2161 intr = enic_msix_rq_intr(enic, i); 2162 enic_isr_msix(enic->msix_entry[intr].vector, 2163 &enic->napi[i]); 2164 } 2165 2166 for (i = 0; i < enic->wq_count; i++) { 2167 intr = enic_msix_wq_intr(enic, i); 2168 enic_isr_msix(enic->msix_entry[intr].vector, 2169 &enic->napi[enic_cq_wq(enic, i)]); 2170 } 2171 2172 break; 2173 case VNIC_DEV_INTR_MODE_MSI: 2174 enic_isr_msi(enic->pdev->irq, enic); 2175 break; 2176 case VNIC_DEV_INTR_MODE_INTX: 2177 enic_isr_legacy(enic->pdev->irq, netdev); 2178 break; 2179 default: 2180 break; 2181 } 2182 } 2183 #endif 2184 2185 static int enic_dev_wait(struct vnic_dev *vdev, 2186 int (*start)(struct vnic_dev *, int), 2187 int (*finished)(struct vnic_dev *, int *), 2188 int arg) 2189 { 2190 unsigned long time; 2191 int done; 2192 int err; 2193 2194 err = start(vdev, arg); 2195 if (err) 2196 return err; 2197 2198 /* Wait for func to complete...2 seconds max 2199 */ 2200 2201 time = jiffies + (HZ * 2); 2202 do { 2203 2204 err = finished(vdev, &done); 2205 if (err) 2206 return err; 2207 2208 if (done) 2209 return 0; 2210 2211 schedule_timeout_uninterruptible(HZ / 10); 2212 2213 } while (time_after(time, jiffies)); 2214 2215 return -ETIMEDOUT; 2216 } 2217 2218 static int enic_dev_open(struct enic *enic) 2219 { 2220 int err; 2221 u32 flags = CMD_OPENF_IG_DESCCACHE; 2222 2223 err = enic_dev_wait(enic->vdev, vnic_dev_open, 2224 vnic_dev_open_done, flags); 2225 if (err) 2226 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", 2227 err); 2228 2229 return err; 2230 } 2231 2232 static int enic_dev_soft_reset(struct enic *enic) 2233 { 2234 int err; 2235 2236 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, 2237 vnic_dev_soft_reset_done, 0); 2238 if (err) 2239 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n", 2240 err); 2241 2242 return err; 2243 } 2244 2245 static int enic_dev_hang_reset(struct enic *enic) 2246 { 2247 int err; 2248 2249 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, 2250 vnic_dev_hang_reset_done, 0); 2251 if (err) 2252 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", 2253 err); 2254 2255 return err; 2256 } 2257 2258 int __enic_set_rsskey(struct enic *enic) 2259 { 2260 union vnic_rss_key *rss_key_buf_va; 2261 dma_addr_t rss_key_buf_pa; 2262 int i, kidx, bidx, err; 2263 2264 rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev, 2265 sizeof(union vnic_rss_key), 2266 &rss_key_buf_pa, GFP_ATOMIC); 2267 if (!rss_key_buf_va) 2268 return -ENOMEM; 2269 2270 for (i = 0; i < ENIC_RSS_LEN; i++) { 2271 kidx = i / ENIC_RSS_BYTES_PER_KEY; 2272 bidx = i % ENIC_RSS_BYTES_PER_KEY; 2273 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; 2274 } 2275 spin_lock_bh(&enic->devcmd_lock); 2276 err = enic_set_rss_key(enic, 2277 rss_key_buf_pa, 2278 sizeof(union vnic_rss_key)); 2279 spin_unlock_bh(&enic->devcmd_lock); 2280 2281 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key), 2282 rss_key_buf_va, rss_key_buf_pa); 2283 2284 return err; 2285 } 2286 2287 static int enic_set_rsskey(struct enic *enic) 2288 { 2289 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); 2290 2291 return __enic_set_rsskey(enic); 2292 } 2293 2294 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) 2295 { 2296 dma_addr_t rss_cpu_buf_pa; 2297 union vnic_rss_cpu *rss_cpu_buf_va = NULL; 2298 unsigned int i; 2299 int err; 2300 2301 rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev, 2302 sizeof(union vnic_rss_cpu), 2303 &rss_cpu_buf_pa, GFP_ATOMIC); 2304 if (!rss_cpu_buf_va) 2305 return -ENOMEM; 2306 2307 for (i = 0; i < (1 << rss_hash_bits); i++) 2308 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; 2309 2310 spin_lock_bh(&enic->devcmd_lock); 2311 err = enic_set_rss_cpu(enic, 2312 rss_cpu_buf_pa, 2313 sizeof(union vnic_rss_cpu)); 2314 spin_unlock_bh(&enic->devcmd_lock); 2315 2316 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu), 2317 rss_cpu_buf_va, rss_cpu_buf_pa); 2318 2319 return err; 2320 } 2321 2322 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, 2323 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) 2324 { 2325 const u8 tso_ipid_split_en = 0; 2326 const u8 ig_vlan_strip_en = 1; 2327 int err; 2328 2329 /* Enable VLAN tag stripping. 2330 */ 2331 2332 spin_lock_bh(&enic->devcmd_lock); 2333 err = enic_set_nic_cfg(enic, 2334 rss_default_cpu, rss_hash_type, 2335 rss_hash_bits, rss_base_cpu, 2336 rss_enable, tso_ipid_split_en, 2337 ig_vlan_strip_en); 2338 spin_unlock_bh(&enic->devcmd_lock); 2339 2340 return err; 2341 } 2342 2343 static int enic_set_rss_nic_cfg(struct enic *enic) 2344 { 2345 struct device *dev = enic_get_dev(enic); 2346 const u8 rss_default_cpu = 0; 2347 const u8 rss_hash_bits = 7; 2348 const u8 rss_base_cpu = 0; 2349 u8 rss_hash_type; 2350 int res; 2351 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); 2352 2353 spin_lock_bh(&enic->devcmd_lock); 2354 res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type); 2355 spin_unlock_bh(&enic->devcmd_lock); 2356 if (res) { 2357 /* defaults for old adapters 2358 */ 2359 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | 2360 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | 2361 NIC_CFG_RSS_HASH_TYPE_IPV6 | 2362 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; 2363 } 2364 2365 if (rss_enable) { 2366 if (!enic_set_rsskey(enic)) { 2367 if (enic_set_rsscpu(enic, rss_hash_bits)) { 2368 rss_enable = 0; 2369 dev_warn(dev, "RSS disabled, " 2370 "Failed to set RSS cpu indirection table."); 2371 } 2372 } else { 2373 rss_enable = 0; 2374 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n"); 2375 } 2376 } 2377 2378 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, 2379 rss_hash_bits, rss_base_cpu, rss_enable); 2380 } 2381 2382 static void enic_set_api_busy(struct enic *enic, bool busy) 2383 { 2384 spin_lock(&enic->enic_api_lock); 2385 enic->enic_api_busy = busy; 2386 spin_unlock(&enic->enic_api_lock); 2387 } 2388 2389 static void enic_reset(struct work_struct *work) 2390 { 2391 struct enic *enic = container_of(work, struct enic, reset); 2392 2393 if (!netif_running(enic->netdev)) 2394 return; 2395 2396 rtnl_lock(); 2397 2398 /* Stop any activity from infiniband */ 2399 enic_set_api_busy(enic, true); 2400 2401 enic_stop(enic->netdev); 2402 enic_dev_soft_reset(enic); 2403 enic_reset_addr_lists(enic); 2404 enic_init_vnic_resources(enic); 2405 enic_set_rss_nic_cfg(enic); 2406 enic_dev_set_ig_vlan_rewrite_mode(enic); 2407 enic_open(enic->netdev); 2408 2409 /* Allow infiniband to fiddle with the device again */ 2410 enic_set_api_busy(enic, false); 2411 2412 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); 2413 2414 rtnl_unlock(); 2415 } 2416 2417 static void enic_tx_hang_reset(struct work_struct *work) 2418 { 2419 struct enic *enic = container_of(work, struct enic, tx_hang_reset); 2420 2421 rtnl_lock(); 2422 2423 /* Stop any activity from infiniband */ 2424 enic_set_api_busy(enic, true); 2425 2426 enic_dev_hang_notify(enic); 2427 enic_stop(enic->netdev); 2428 enic_dev_hang_reset(enic); 2429 enic_reset_addr_lists(enic); 2430 enic_init_vnic_resources(enic); 2431 enic_set_rss_nic_cfg(enic); 2432 enic_dev_set_ig_vlan_rewrite_mode(enic); 2433 enic_open(enic->netdev); 2434 2435 /* Allow infiniband to fiddle with the device again */ 2436 enic_set_api_busy(enic, false); 2437 2438 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); 2439 2440 rtnl_unlock(); 2441 } 2442 2443 static int enic_set_intr_mode(struct enic *enic) 2444 { 2445 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); 2446 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); 2447 unsigned int i; 2448 2449 /* Set interrupt mode (INTx, MSI, MSI-X) depending 2450 * on system capabilities. 2451 * 2452 * Try MSI-X first 2453 * 2454 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs 2455 * (the second to last INTR is used for WQ/RQ errors) 2456 * (the last INTR is used for notifications) 2457 */ 2458 2459 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); 2460 for (i = 0; i < n + m + 2; i++) 2461 enic->msix_entry[i].entry = i; 2462 2463 /* Use multiple RQs if RSS is enabled 2464 */ 2465 2466 if (ENIC_SETTING(enic, RSS) && 2467 enic->config.intr_mode < 1 && 2468 enic->rq_count >= n && 2469 enic->wq_count >= m && 2470 enic->cq_count >= n + m && 2471 enic->intr_count >= n + m + 2) { 2472 2473 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, 2474 n + m + 2, n + m + 2) > 0) { 2475 2476 enic->rq_count = n; 2477 enic->wq_count = m; 2478 enic->cq_count = n + m; 2479 enic->intr_count = n + m + 2; 2480 2481 vnic_dev_set_intr_mode(enic->vdev, 2482 VNIC_DEV_INTR_MODE_MSIX); 2483 2484 return 0; 2485 } 2486 } 2487 2488 if (enic->config.intr_mode < 1 && 2489 enic->rq_count >= 1 && 2490 enic->wq_count >= m && 2491 enic->cq_count >= 1 + m && 2492 enic->intr_count >= 1 + m + 2) { 2493 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, 2494 1 + m + 2, 1 + m + 2) > 0) { 2495 2496 enic->rq_count = 1; 2497 enic->wq_count = m; 2498 enic->cq_count = 1 + m; 2499 enic->intr_count = 1 + m + 2; 2500 2501 vnic_dev_set_intr_mode(enic->vdev, 2502 VNIC_DEV_INTR_MODE_MSIX); 2503 2504 return 0; 2505 } 2506 } 2507 2508 /* Next try MSI 2509 * 2510 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR 2511 */ 2512 2513 if (enic->config.intr_mode < 2 && 2514 enic->rq_count >= 1 && 2515 enic->wq_count >= 1 && 2516 enic->cq_count >= 2 && 2517 enic->intr_count >= 1 && 2518 !pci_enable_msi(enic->pdev)) { 2519 2520 enic->rq_count = 1; 2521 enic->wq_count = 1; 2522 enic->cq_count = 2; 2523 enic->intr_count = 1; 2524 2525 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); 2526 2527 return 0; 2528 } 2529 2530 /* Next try INTx 2531 * 2532 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs 2533 * (the first INTR is used for WQ/RQ) 2534 * (the second INTR is used for WQ/RQ errors) 2535 * (the last INTR is used for notifications) 2536 */ 2537 2538 if (enic->config.intr_mode < 3 && 2539 enic->rq_count >= 1 && 2540 enic->wq_count >= 1 && 2541 enic->cq_count >= 2 && 2542 enic->intr_count >= 3) { 2543 2544 enic->rq_count = 1; 2545 enic->wq_count = 1; 2546 enic->cq_count = 2; 2547 enic->intr_count = 3; 2548 2549 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); 2550 2551 return 0; 2552 } 2553 2554 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 2555 2556 return -EINVAL; 2557 } 2558 2559 static void enic_clear_intr_mode(struct enic *enic) 2560 { 2561 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2562 case VNIC_DEV_INTR_MODE_MSIX: 2563 pci_disable_msix(enic->pdev); 2564 break; 2565 case VNIC_DEV_INTR_MODE_MSI: 2566 pci_disable_msi(enic->pdev); 2567 break; 2568 default: 2569 break; 2570 } 2571 2572 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 2573 } 2574 2575 static void enic_get_queue_stats_rx(struct net_device *dev, int idx, 2576 struct netdev_queue_stats_rx *rxs) 2577 { 2578 struct enic *enic = netdev_priv(dev); 2579 struct enic_rq_stats *rqstats = &enic->rq_stats[idx]; 2580 2581 rxs->bytes = rqstats->bytes; 2582 rxs->packets = rqstats->packets; 2583 rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated; 2584 rxs->hw_drop_overruns = rqstats->pkt_truncated; 2585 rxs->csum_unnecessary = rqstats->csum_unnecessary + 2586 rqstats->csum_unnecessary_encap; 2587 } 2588 2589 static void enic_get_queue_stats_tx(struct net_device *dev, int idx, 2590 struct netdev_queue_stats_tx *txs) 2591 { 2592 struct enic *enic = netdev_priv(dev); 2593 struct enic_wq_stats *wqstats = &enic->wq_stats[idx]; 2594 2595 txs->bytes = wqstats->bytes; 2596 txs->packets = wqstats->packets; 2597 txs->csum_none = wqstats->csum_none; 2598 txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum + 2599 wqstats->tso; 2600 txs->hw_gso_packets = wqstats->tso; 2601 txs->stop = wqstats->stopped; 2602 txs->wake = wqstats->wake; 2603 } 2604 2605 static void enic_get_base_stats(struct net_device *dev, 2606 struct netdev_queue_stats_rx *rxs, 2607 struct netdev_queue_stats_tx *txs) 2608 { 2609 rxs->bytes = 0; 2610 rxs->packets = 0; 2611 rxs->hw_drops = 0; 2612 rxs->hw_drop_overruns = 0; 2613 rxs->csum_unnecessary = 0; 2614 txs->bytes = 0; 2615 txs->packets = 0; 2616 txs->csum_none = 0; 2617 txs->needs_csum = 0; 2618 txs->hw_gso_packets = 0; 2619 txs->stop = 0; 2620 txs->wake = 0; 2621 } 2622 2623 static const struct net_device_ops enic_netdev_dynamic_ops = { 2624 .ndo_open = enic_open, 2625 .ndo_stop = enic_stop, 2626 .ndo_start_xmit = enic_hard_start_xmit, 2627 .ndo_get_stats64 = enic_get_stats, 2628 .ndo_validate_addr = eth_validate_addr, 2629 .ndo_set_rx_mode = enic_set_rx_mode, 2630 .ndo_set_mac_address = enic_set_mac_address_dynamic, 2631 .ndo_change_mtu = enic_change_mtu, 2632 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2633 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, 2634 .ndo_tx_timeout = enic_tx_timeout, 2635 .ndo_set_vf_port = enic_set_vf_port, 2636 .ndo_get_vf_port = enic_get_vf_port, 2637 .ndo_set_vf_mac = enic_set_vf_mac, 2638 #ifdef CONFIG_NET_POLL_CONTROLLER 2639 .ndo_poll_controller = enic_poll_controller, 2640 #endif 2641 #ifdef CONFIG_RFS_ACCEL 2642 .ndo_rx_flow_steer = enic_rx_flow_steer, 2643 #endif 2644 .ndo_features_check = enic_features_check, 2645 }; 2646 2647 static const struct net_device_ops enic_netdev_ops = { 2648 .ndo_open = enic_open, 2649 .ndo_stop = enic_stop, 2650 .ndo_start_xmit = enic_hard_start_xmit, 2651 .ndo_get_stats64 = enic_get_stats, 2652 .ndo_validate_addr = eth_validate_addr, 2653 .ndo_set_mac_address = enic_set_mac_address, 2654 .ndo_set_rx_mode = enic_set_rx_mode, 2655 .ndo_change_mtu = enic_change_mtu, 2656 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2657 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, 2658 .ndo_tx_timeout = enic_tx_timeout, 2659 .ndo_set_vf_port = enic_set_vf_port, 2660 .ndo_get_vf_port = enic_get_vf_port, 2661 .ndo_set_vf_mac = enic_set_vf_mac, 2662 #ifdef CONFIG_NET_POLL_CONTROLLER 2663 .ndo_poll_controller = enic_poll_controller, 2664 #endif 2665 #ifdef CONFIG_RFS_ACCEL 2666 .ndo_rx_flow_steer = enic_rx_flow_steer, 2667 #endif 2668 .ndo_features_check = enic_features_check, 2669 }; 2670 2671 static const struct netdev_stat_ops enic_netdev_stat_ops = { 2672 .get_queue_stats_rx = enic_get_queue_stats_rx, 2673 .get_queue_stats_tx = enic_get_queue_stats_tx, 2674 .get_base_stats = enic_get_base_stats, 2675 }; 2676 2677 static void enic_dev_deinit(struct enic *enic) 2678 { 2679 unsigned int i; 2680 2681 for (i = 0; i < enic->rq_count; i++) 2682 __netif_napi_del(&enic->napi[i]); 2683 2684 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 2685 for (i = 0; i < enic->wq_count; i++) 2686 __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); 2687 2688 /* observe RCU grace period after __netif_napi_del() calls */ 2689 synchronize_net(); 2690 2691 enic_free_vnic_resources(enic); 2692 enic_clear_intr_mode(enic); 2693 enic_free_affinity_hint(enic); 2694 } 2695 2696 static void enic_kdump_kernel_config(struct enic *enic) 2697 { 2698 if (is_kdump_kernel()) { 2699 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); 2700 enic->rq_count = 1; 2701 enic->wq_count = 1; 2702 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; 2703 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; 2704 enic->config.mtu = min_t(u16, 1500, enic->config.mtu); 2705 } 2706 } 2707 2708 static int enic_dev_init(struct enic *enic) 2709 { 2710 struct device *dev = enic_get_dev(enic); 2711 struct net_device *netdev = enic->netdev; 2712 unsigned int i; 2713 int err; 2714 2715 /* Get interrupt coalesce timer info */ 2716 err = enic_dev_intr_coal_timer_info(enic); 2717 if (err) { 2718 dev_warn(dev, "Using default conversion factor for " 2719 "interrupt coalesce timer\n"); 2720 vnic_dev_intr_coal_timer_info_default(enic->vdev); 2721 } 2722 2723 /* Get vNIC configuration 2724 */ 2725 2726 err = enic_get_vnic_config(enic); 2727 if (err) { 2728 dev_err(dev, "Get vNIC configuration failed, aborting\n"); 2729 return err; 2730 } 2731 2732 /* Get available resource counts 2733 */ 2734 2735 enic_get_res_counts(enic); 2736 2737 /* modify resource count if we are in kdump_kernel 2738 */ 2739 enic_kdump_kernel_config(enic); 2740 2741 /* Set interrupt mode based on resource counts and system 2742 * capabilities 2743 */ 2744 2745 err = enic_set_intr_mode(enic); 2746 if (err) { 2747 dev_err(dev, "Failed to set intr mode based on resource " 2748 "counts and system capabilities, aborting\n"); 2749 return err; 2750 } 2751 2752 /* Allocate and configure vNIC resources 2753 */ 2754 2755 err = enic_alloc_vnic_resources(enic); 2756 if (err) { 2757 dev_err(dev, "Failed to alloc vNIC resources, aborting\n"); 2758 goto err_out_free_vnic_resources; 2759 } 2760 2761 enic_init_vnic_resources(enic); 2762 2763 err = enic_set_rss_nic_cfg(enic); 2764 if (err) { 2765 dev_err(dev, "Failed to config nic, aborting\n"); 2766 goto err_out_free_vnic_resources; 2767 } 2768 2769 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2770 default: 2771 netif_napi_add(netdev, &enic->napi[0], enic_poll); 2772 break; 2773 case VNIC_DEV_INTR_MODE_MSIX: 2774 for (i = 0; i < enic->rq_count; i++) { 2775 netif_napi_add(netdev, &enic->napi[i], 2776 enic_poll_msix_rq); 2777 } 2778 for (i = 0; i < enic->wq_count; i++) 2779 netif_napi_add(netdev, 2780 &enic->napi[enic_cq_wq(enic, i)], 2781 enic_poll_msix_wq); 2782 break; 2783 } 2784 2785 return 0; 2786 2787 err_out_free_vnic_resources: 2788 enic_free_affinity_hint(enic); 2789 enic_clear_intr_mode(enic); 2790 enic_free_vnic_resources(enic); 2791 2792 return err; 2793 } 2794 2795 static void enic_iounmap(struct enic *enic) 2796 { 2797 unsigned int i; 2798 2799 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) 2800 if (enic->bar[i].vaddr) 2801 iounmap(enic->bar[i].vaddr); 2802 } 2803 2804 static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2805 { 2806 struct device *dev = &pdev->dev; 2807 struct net_device *netdev; 2808 struct enic *enic; 2809 int using_dac = 0; 2810 unsigned int i; 2811 int err; 2812 #ifdef CONFIG_PCI_IOV 2813 int pos = 0; 2814 #endif 2815 int num_pps = 1; 2816 2817 /* Allocate net device structure and initialize. Private 2818 * instance data is initialized to zero. 2819 */ 2820 2821 netdev = alloc_etherdev_mqs(sizeof(struct enic), 2822 ENIC_RQ_MAX, ENIC_WQ_MAX); 2823 if (!netdev) 2824 return -ENOMEM; 2825 2826 pci_set_drvdata(pdev, netdev); 2827 2828 SET_NETDEV_DEV(netdev, &pdev->dev); 2829 2830 enic = netdev_priv(netdev); 2831 enic->netdev = netdev; 2832 enic->pdev = pdev; 2833 2834 /* Setup PCI resources 2835 */ 2836 2837 err = pci_enable_device_mem(pdev); 2838 if (err) { 2839 dev_err(dev, "Cannot enable PCI device, aborting\n"); 2840 goto err_out_free_netdev; 2841 } 2842 2843 err = pci_request_regions(pdev, DRV_NAME); 2844 if (err) { 2845 dev_err(dev, "Cannot request PCI regions, aborting\n"); 2846 goto err_out_disable_device; 2847 } 2848 2849 pci_set_master(pdev); 2850 2851 /* Query PCI controller on system for DMA addressing 2852 * limitation for the device. Try 47-bit first, and 2853 * fail to 32-bit. 2854 */ 2855 2856 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47)); 2857 if (err) { 2858 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2859 if (err) { 2860 dev_err(dev, "No usable DMA configuration, aborting\n"); 2861 goto err_out_release_regions; 2862 } 2863 } else { 2864 using_dac = 1; 2865 } 2866 2867 /* Map vNIC resources from BAR0-5 2868 */ 2869 2870 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { 2871 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) 2872 continue; 2873 enic->bar[i].len = pci_resource_len(pdev, i); 2874 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); 2875 if (!enic->bar[i].vaddr) { 2876 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); 2877 err = -ENODEV; 2878 goto err_out_iounmap; 2879 } 2880 enic->bar[i].bus_addr = pci_resource_start(pdev, i); 2881 } 2882 2883 /* Register vNIC device 2884 */ 2885 2886 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, 2887 ARRAY_SIZE(enic->bar)); 2888 if (!enic->vdev) { 2889 dev_err(dev, "vNIC registration failed, aborting\n"); 2890 err = -ENODEV; 2891 goto err_out_iounmap; 2892 } 2893 2894 err = vnic_devcmd_init(enic->vdev); 2895 2896 if (err) 2897 goto err_out_vnic_unregister; 2898 2899 #ifdef CONFIG_PCI_IOV 2900 /* Get number of subvnics */ 2901 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 2902 if (pos) { 2903 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, 2904 &enic->num_vfs); 2905 if (enic->num_vfs) { 2906 err = pci_enable_sriov(pdev, enic->num_vfs); 2907 if (err) { 2908 dev_err(dev, "SRIOV enable failed, aborting." 2909 " pci_enable_sriov() returned %d\n", 2910 err); 2911 goto err_out_vnic_unregister; 2912 } 2913 enic->priv_flags |= ENIC_SRIOV_ENABLED; 2914 num_pps = enic->num_vfs; 2915 } 2916 } 2917 #endif 2918 2919 /* Allocate structure for port profiles */ 2920 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); 2921 if (!enic->pp) { 2922 err = -ENOMEM; 2923 goto err_out_disable_sriov_pp; 2924 } 2925 2926 /* Issue device open to get device in known state 2927 */ 2928 2929 err = enic_dev_open(enic); 2930 if (err) { 2931 dev_err(dev, "vNIC dev open failed, aborting\n"); 2932 goto err_out_disable_sriov; 2933 } 2934 2935 /* Setup devcmd lock 2936 */ 2937 2938 spin_lock_init(&enic->devcmd_lock); 2939 spin_lock_init(&enic->enic_api_lock); 2940 2941 /* 2942 * Set ingress vlan rewrite mode before vnic initialization 2943 */ 2944 2945 err = enic_dev_set_ig_vlan_rewrite_mode(enic); 2946 if (err) { 2947 dev_err(dev, 2948 "Failed to set ingress vlan rewrite mode, aborting.\n"); 2949 goto err_out_dev_close; 2950 } 2951 2952 /* Issue device init to initialize the vnic-to-switch link. 2953 * We'll start with carrier off and wait for link UP 2954 * notification later to turn on carrier. We don't need 2955 * to wait here for the vnic-to-switch link initialization 2956 * to complete; link UP notification is the indication that 2957 * the process is complete. 2958 */ 2959 2960 netif_carrier_off(netdev); 2961 2962 /* Do not call dev_init for a dynamic vnic. 2963 * For a dynamic vnic, init_prov_info will be 2964 * called later by an upper layer. 2965 */ 2966 2967 if (!enic_is_dynamic(enic)) { 2968 err = vnic_dev_init(enic->vdev, 0); 2969 if (err) { 2970 dev_err(dev, "vNIC dev init failed, aborting\n"); 2971 goto err_out_dev_close; 2972 } 2973 } 2974 2975 err = enic_dev_init(enic); 2976 if (err) { 2977 dev_err(dev, "Device initialization failed, aborting\n"); 2978 goto err_out_dev_close; 2979 } 2980 2981 netif_set_real_num_tx_queues(netdev, enic->wq_count); 2982 netif_set_real_num_rx_queues(netdev, enic->rq_count); 2983 2984 /* Setup notification timer, HW reset task, and wq locks 2985 */ 2986 2987 timer_setup(&enic->notify_timer, enic_notify_timer, 0); 2988 2989 enic_rfs_flw_tbl_init(enic); 2990 enic_set_rx_coal_setting(enic); 2991 INIT_WORK(&enic->reset, enic_reset); 2992 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); 2993 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); 2994 2995 for (i = 0; i < enic->wq_count; i++) 2996 spin_lock_init(&enic->wq_lock[i]); 2997 2998 /* Register net device 2999 */ 3000 3001 enic->port_mtu = enic->config.mtu; 3002 3003 err = enic_set_mac_addr(netdev, enic->mac_addr); 3004 if (err) { 3005 dev_err(dev, "Invalid MAC address, aborting\n"); 3006 goto err_out_dev_deinit; 3007 } 3008 3009 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 3010 /* rx coalesce time already got initialized. This gets used 3011 * if adaptive coal is turned off 3012 */ 3013 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 3014 3015 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 3016 netdev->netdev_ops = &enic_netdev_dynamic_ops; 3017 else 3018 netdev->netdev_ops = &enic_netdev_ops; 3019 netdev->stat_ops = &enic_netdev_stat_ops; 3020 3021 netdev->watchdog_timeo = 2 * HZ; 3022 enic_set_ethtool_ops(netdev); 3023 3024 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 3025 if (ENIC_SETTING(enic, LOOP)) { 3026 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX; 3027 enic->loop_enable = 1; 3028 enic->loop_tag = enic->config.loop_tag; 3029 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); 3030 } 3031 if (ENIC_SETTING(enic, TXCSUM)) 3032 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; 3033 if (ENIC_SETTING(enic, TSO)) 3034 netdev->hw_features |= NETIF_F_TSO | 3035 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 3036 if (ENIC_SETTING(enic, RSS)) 3037 netdev->hw_features |= NETIF_F_RXHASH; 3038 if (ENIC_SETTING(enic, RXCSUM)) 3039 netdev->hw_features |= NETIF_F_RXCSUM; 3040 if (ENIC_SETTING(enic, VXLAN)) { 3041 u64 patch_level; 3042 u64 a1 = 0; 3043 3044 netdev->hw_enc_features |= NETIF_F_RXCSUM | 3045 NETIF_F_TSO | 3046 NETIF_F_TSO6 | 3047 NETIF_F_TSO_ECN | 3048 NETIF_F_GSO_UDP_TUNNEL | 3049 NETIF_F_HW_CSUM | 3050 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3051 netdev->hw_features |= netdev->hw_enc_features; 3052 /* get bit mask from hw about supported offload bit level 3053 * BIT(0) = fw supports patch_level 0 3054 * fcoe bit = encap 3055 * fcoe_fc_crc_ok = outer csum ok 3056 * BIT(1) = always set by fw 3057 * BIT(2) = fw supports patch_level 2 3058 * BIT(0) in rss_hash = encap 3059 * BIT(1,2) in rss_hash = outer_ip_csum_ok/ 3060 * outer_tcp_csum_ok 3061 * used in enic_rq_indicate_buf 3062 */ 3063 err = vnic_dev_get_supported_feature_ver(enic->vdev, 3064 VIC_FEATURE_VXLAN, 3065 &patch_level, &a1); 3066 if (err) 3067 patch_level = 0; 3068 enic->vxlan.flags = (u8)a1; 3069 /* mask bits that are supported by driver 3070 */ 3071 patch_level &= BIT_ULL(0) | BIT_ULL(2); 3072 patch_level = fls(patch_level); 3073 patch_level = patch_level ? patch_level - 1 : 0; 3074 enic->vxlan.patch_level = patch_level; 3075 3076 if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 || 3077 enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) { 3078 netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4; 3079 if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6) 3080 netdev->udp_tunnel_nic_info = &enic_udp_tunnels; 3081 } 3082 } 3083 3084 netdev->features |= netdev->hw_features; 3085 netdev->vlan_features |= netdev->features; 3086 3087 #ifdef CONFIG_RFS_ACCEL 3088 netdev->hw_features |= NETIF_F_NTUPLE; 3089 #endif 3090 3091 if (using_dac) 3092 netdev->features |= NETIF_F_HIGHDMA; 3093 3094 netdev->priv_flags |= IFF_UNICAST_FLT; 3095 3096 /* MTU range: 68 - 9000 */ 3097 netdev->min_mtu = ENIC_MIN_MTU; 3098 netdev->max_mtu = ENIC_MAX_MTU; 3099 netdev->mtu = enic->port_mtu; 3100 3101 err = register_netdev(netdev); 3102 if (err) { 3103 dev_err(dev, "Cannot register net device, aborting\n"); 3104 goto err_out_dev_deinit; 3105 } 3106 enic->rx_copybreak = RX_COPYBREAK_DEFAULT; 3107 3108 return 0; 3109 3110 err_out_dev_deinit: 3111 enic_dev_deinit(enic); 3112 err_out_dev_close: 3113 vnic_dev_close(enic->vdev); 3114 err_out_disable_sriov: 3115 kfree(enic->pp); 3116 err_out_disable_sriov_pp: 3117 #ifdef CONFIG_PCI_IOV 3118 if (enic_sriov_enabled(enic)) { 3119 pci_disable_sriov(pdev); 3120 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 3121 } 3122 #endif 3123 err_out_vnic_unregister: 3124 vnic_dev_unregister(enic->vdev); 3125 err_out_iounmap: 3126 enic_iounmap(enic); 3127 err_out_release_regions: 3128 pci_release_regions(pdev); 3129 err_out_disable_device: 3130 pci_disable_device(pdev); 3131 err_out_free_netdev: 3132 free_netdev(netdev); 3133 3134 return err; 3135 } 3136 3137 static void enic_remove(struct pci_dev *pdev) 3138 { 3139 struct net_device *netdev = pci_get_drvdata(pdev); 3140 3141 if (netdev) { 3142 struct enic *enic = netdev_priv(netdev); 3143 3144 cancel_work_sync(&enic->reset); 3145 cancel_work_sync(&enic->change_mtu_work); 3146 unregister_netdev(netdev); 3147 enic_dev_deinit(enic); 3148 vnic_dev_close(enic->vdev); 3149 #ifdef CONFIG_PCI_IOV 3150 if (enic_sriov_enabled(enic)) { 3151 pci_disable_sriov(pdev); 3152 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 3153 } 3154 #endif 3155 kfree(enic->pp); 3156 vnic_dev_unregister(enic->vdev); 3157 enic_iounmap(enic); 3158 pci_release_regions(pdev); 3159 pci_disable_device(pdev); 3160 free_netdev(netdev); 3161 } 3162 } 3163 3164 static struct pci_driver enic_driver = { 3165 .name = DRV_NAME, 3166 .id_table = enic_id_table, 3167 .probe = enic_probe, 3168 .remove = enic_remove, 3169 }; 3170 3171 module_pci_driver(enic_driver); 3172