1 /* 2 * Linux driver for VMware's vmxnet3 ethernet NIC. 3 * 4 * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more 14 * details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 * 23 * Maintained by: pv-drivers@vmware.com 24 * 25 */ 26 27 #ifndef _VMXNET3_INT_H 28 #define _VMXNET3_INT_H 29 30 #include <linux/bitops.h> 31 #include <linux/ethtool.h> 32 #include <linux/delay.h> 33 #include <linux/netdevice.h> 34 #include <linux/pci.h> 35 #include <linux/compiler.h> 36 #include <linux/slab.h> 37 #include <linux/spinlock.h> 38 #include <linux/ioport.h> 39 #include <linux/highmem.h> 40 #include <linux/timer.h> 41 #include <linux/skbuff.h> 42 #include <linux/interrupt.h> 43 #include <linux/workqueue.h> 44 #include <linux/uaccess.h> 45 #include <asm/dma.h> 46 #include <asm/page.h> 47 48 #include <linux/tcp.h> 49 #include <linux/udp.h> 50 #include <linux/ip.h> 51 #include <linux/ipv6.h> 52 #include <linux/in.h> 53 #include <linux/etherdevice.h> 54 #include <asm/checksum.h> 55 #include <linux/if_vlan.h> 56 #include <linux/if_arp.h> 57 #include <linux/inetdevice.h> 58 #include <linux/log2.h> 59 #include <linux/bpf.h> 60 #include <net/page_pool/helpers.h> 61 #include <net/xdp.h> 62 63 #include "vmxnet3_defs.h" 64 65 #ifdef DEBUG 66 # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)" 67 #else 68 # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI" 69 #endif 70 71 72 /* 73 * Version numbers 74 */ 75 #define VMXNET3_DRIVER_VERSION_STRING "1.9.0.0-k" 76 77 /* Each byte of this 32-bit integer encodes a version number in 78 * VMXNET3_DRIVER_VERSION_STRING. 79 */ 80 #define VMXNET3_DRIVER_VERSION_NUM 0x01090000 81 82 #if defined(CONFIG_PCI_MSI) 83 /* RSS only makes sense if MSI-X is supported. */ 84 #define VMXNET3_RSS 85 #endif 86 87 #define VMXNET3_REV_9 8 /* Vmxnet3 Rev. 9 */ 88 #define VMXNET3_REV_8 7 /* Vmxnet3 Rev. 8 */ 89 #define VMXNET3_REV_7 6 /* Vmxnet3 Rev. 7 */ 90 #define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */ 91 #define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */ 92 #define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */ 93 #define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */ 94 #define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */ 95 #define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */ 96 97 /* 98 * Capabilities 99 */ 100 101 enum { 102 VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */ 103 VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over 104 * IPv4 */ 105 VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */ 106 VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */ 107 VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */ 108 VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation 109 * offload */ 110 VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */ 111 VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */ 112 VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */ 113 VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */ 114 VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */ 115 VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */ 116 VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */ 117 VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */ 118 VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries 119 * for a pkt */ 120 VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */ 121 VMNET_CAP_LPD = 0x10000, /* large pkt delivery */ 122 VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/ 123 VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/ 124 /* pages transmits */ 125 VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */ 126 VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */ 127 VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */ 128 /* pkts up to 256kB. */ 129 VMNET_CAP_UPT = 0x400000 /* Support UPT */ 130 }; 131 132 /* 133 * Maximum devices supported. 134 */ 135 #define MAX_ETHERNET_CARDS 10 136 #define MAX_PCI_PASSTHRU_DEVICE 6 137 138 struct vmxnet3_cmd_ring { 139 union Vmxnet3_GenericDesc *base; 140 u32 size; 141 u32 next2fill; 142 u32 next2comp; 143 u8 gen; 144 u8 isOutOfOrder; 145 dma_addr_t basePA; 146 }; 147 148 static inline void 149 vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring) 150 { 151 ring->next2fill++; 152 if (unlikely(ring->next2fill == ring->size)) { 153 ring->next2fill = 0; 154 VMXNET3_FLIP_RING_GEN(ring->gen); 155 } 156 } 157 158 static inline void 159 vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring) 160 { 161 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size); 162 } 163 164 static inline int 165 vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring) 166 { 167 return (ring->next2comp > ring->next2fill ? 0 : ring->size) + 168 ring->next2comp - ring->next2fill - 1; 169 } 170 171 struct vmxnet3_comp_ring { 172 union Vmxnet3_GenericDesc *base; 173 u32 size; 174 u32 next2proc; 175 u8 gen; 176 u8 intr_idx; 177 dma_addr_t basePA; 178 }; 179 180 static inline void 181 vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring) 182 { 183 ring->next2proc++; 184 if (unlikely(ring->next2proc == ring->size)) { 185 ring->next2proc = 0; 186 VMXNET3_FLIP_RING_GEN(ring->gen); 187 } 188 } 189 190 struct vmxnet3_tx_data_ring { 191 struct Vmxnet3_TxDataDesc *base; 192 u32 size; 193 dma_addr_t basePA; 194 }; 195 196 struct vmxnet3_tx_ts_ring { 197 struct Vmxnet3_TxTSDesc *base; 198 dma_addr_t basePA; 199 }; 200 201 #define VMXNET3_MAP_NONE 0 202 #define VMXNET3_MAP_SINGLE BIT(0) 203 #define VMXNET3_MAP_PAGE BIT(1) 204 #define VMXNET3_MAP_XDP BIT(2) 205 206 struct vmxnet3_tx_buf_info { 207 u32 map_type; 208 u16 len; 209 u16 sop_idx; 210 dma_addr_t dma_addr; 211 union { 212 struct sk_buff *skb; 213 struct xdp_frame *xdpf; 214 }; 215 }; 216 217 struct vmxnet3_tq_driver_stats { 218 u64 drop_total; /* # of pkts dropped by the driver, the 219 * counters below track droppings due to 220 * different reasons 221 */ 222 u64 drop_too_many_frags; 223 u64 drop_oversized_hdr; 224 u64 drop_hdr_inspect_err; 225 u64 drop_tso; 226 227 u64 tx_ring_full; 228 u64 linearized; /* # of pkts linearized */ 229 u64 copy_skb_header; /* # of times we have to copy skb header */ 230 u64 oversized_hdr; 231 232 u64 xdp_xmit; 233 u64 xdp_xmit_err; 234 }; 235 236 struct vmxnet3_tx_ctx { 237 bool ipv4; 238 bool ipv6; 239 u16 mss; 240 u32 l4_offset; /* only valid for pkts requesting tso or csum 241 * offloading. For encap offload, it refers to 242 * inner L4 offset i.e. it includes outer header 243 * encap header and inner eth and ip header size 244 */ 245 246 u32 l4_hdr_size; /* only valid if mss != 0 247 * Refers to inner L4 hdr size for encap 248 * offload 249 */ 250 u32 copy_size; /* # of bytes copied into the data ring */ 251 union Vmxnet3_GenericDesc *sop_txd; 252 union Vmxnet3_GenericDesc *eop_txd; 253 struct Vmxnet3_TxTSDesc *ts_txd; 254 }; 255 256 struct vmxnet3_tx_queue { 257 char name[IFNAMSIZ+8]; /* To identify interrupt */ 258 struct vmxnet3_adapter *adapter; 259 spinlock_t tx_lock; 260 struct vmxnet3_cmd_ring tx_ring; 261 struct vmxnet3_tx_buf_info *buf_info; 262 struct vmxnet3_tx_data_ring data_ring; 263 struct vmxnet3_tx_ts_ring ts_ring; 264 struct vmxnet3_comp_ring comp_ring; 265 struct Vmxnet3_TxQueueCtrl *shared; 266 struct vmxnet3_tq_driver_stats stats; 267 bool stopped; 268 int num_stop; /* # of times the queue is 269 * stopped */ 270 int qid; 271 u16 txdata_desc_size; 272 u16 tx_ts_desc_size; 273 u16 tsPktCount; 274 } ____cacheline_aligned; 275 276 enum vmxnet3_rx_buf_type { 277 VMXNET3_RX_BUF_NONE = 0, 278 VMXNET3_RX_BUF_SKB = 1, 279 VMXNET3_RX_BUF_PAGE = 2, 280 VMXNET3_RX_BUF_XDP = 3, 281 }; 282 283 #define VMXNET3_RXD_COMP_PENDING 0 284 #define VMXNET3_RXD_COMP_DONE 1 285 286 struct vmxnet3_rx_buf_info { 287 enum vmxnet3_rx_buf_type buf_type; 288 u16 len; 289 u8 comp_state; 290 union { 291 struct sk_buff *skb; 292 struct page *page; 293 }; 294 dma_addr_t dma_addr; 295 }; 296 297 struct vmxnet3_rx_ctx { 298 struct sk_buff *skb; 299 u32 sop_idx; 300 }; 301 302 struct vmxnet3_rq_driver_stats { 303 u64 drop_total; 304 u64 drop_err; 305 u64 drop_fcs; 306 u64 rx_buf_alloc_failure; 307 308 u64 xdp_packets; /* Total packets processed by XDP. */ 309 u64 xdp_tx; 310 u64 xdp_redirects; 311 u64 xdp_drops; 312 u64 xdp_aborted; 313 }; 314 315 struct vmxnet3_rx_data_ring { 316 Vmxnet3_RxDataDesc *base; 317 dma_addr_t basePA; 318 u16 desc_size; 319 }; 320 321 struct vmxnet3_rx_ts_ring { 322 struct Vmxnet3_RxTSDesc *base; 323 dma_addr_t basePA; 324 }; 325 326 struct vmxnet3_rx_queue { 327 char name[IFNAMSIZ + 8]; /* To identify interrupt */ 328 struct vmxnet3_adapter *adapter; 329 struct napi_struct napi; 330 struct vmxnet3_cmd_ring rx_ring[2]; 331 struct vmxnet3_rx_data_ring data_ring; 332 struct vmxnet3_comp_ring comp_ring; 333 struct vmxnet3_rx_ts_ring ts_ring; 334 struct vmxnet3_rx_ctx rx_ctx; 335 u32 qid; /* rqID in RCD for buffer from 1st ring */ 336 u32 qid2; /* rqID in RCD for buffer from 2nd ring */ 337 u32 dataRingQid; /* rqID in RCD for buffer from data ring */ 338 struct vmxnet3_rx_buf_info *buf_info[2]; 339 struct Vmxnet3_RxQueueCtrl *shared; 340 struct vmxnet3_rq_driver_stats stats; 341 struct page_pool *page_pool; 342 struct xdp_rxq_info xdp_rxq; 343 u16 rx_ts_desc_size; 344 } ____cacheline_aligned; 345 346 #define VMXNET3_DEVICE_MAX_TX_QUEUES 32 347 #define VMXNET3_DEVICE_MAX_RX_QUEUES 32 /* Keep this value as a power of 2 */ 348 349 #define VMXNET3_DEVICE_DEFAULT_TX_QUEUES 8 350 #define VMXNET3_DEVICE_DEFAULT_RX_QUEUES 8 /* Keep this value as a power of 2 */ 351 352 /* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */ 353 #define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4) 354 355 #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ 356 VMXNET3_DEVICE_MAX_RX_QUEUES + 1) 357 #define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for tx, 1 for rx pair and 1 for event */ 358 359 360 struct vmxnet3_intr { 361 enum vmxnet3_intr_mask_mode mask_mode; 362 enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */ 363 u8 num_intrs; /* # of intr vectors */ 364 u8 event_intr_idx; /* idx of the intr vector for event */ 365 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ 366 char event_msi_vector_name[IFNAMSIZ+17]; 367 #ifdef CONFIG_PCI_MSI 368 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; 369 #endif 370 }; 371 372 /* Interrupt sharing schemes, share_intr */ 373 #define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */ 374 #define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */ 375 #define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */ 376 377 378 #define VMXNET3_STATE_BIT_RESETTING 0 379 #define VMXNET3_STATE_BIT_QUIESCED 1 380 struct vmxnet3_adapter { 381 struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES]; 382 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; 383 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 384 struct vmxnet3_intr intr; 385 spinlock_t cmd_lock; 386 struct Vmxnet3_DriverShared *shared; 387 struct Vmxnet3_PMConf *pm_conf; 388 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ 389 struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */ 390 struct net_device *netdev; 391 struct pci_dev *pdev; 392 393 u8 __iomem *hw_addr0; /* for BAR 0 */ 394 u8 __iomem *hw_addr1; /* for BAR 1 */ 395 u8 version; 396 397 #ifdef VMXNET3_RSS 398 struct UPT1_RSSConf *rss_conf; 399 bool rss; 400 #endif 401 u32 num_rx_queues; 402 u32 num_tx_queues; 403 404 /* rx buffer related */ 405 unsigned skb_buf_size; 406 int rx_buf_per_pkt; /* only apply to the 1st ring */ 407 dma_addr_t shared_pa; 408 dma_addr_t queue_desc_pa; 409 dma_addr_t coal_conf_pa; 410 411 /* Wake-on-LAN */ 412 u32 wol; 413 414 /* Link speed */ 415 u32 link_speed; /* in mbps */ 416 417 u64 tx_timeout_count; 418 419 /* Ring sizes */ 420 u32 tx_ring_size; 421 u32 rx_ring_size; 422 u32 rx_ring2_size; 423 424 /* Size of buffer in the data ring */ 425 u16 txdata_desc_size; 426 u16 rxdata_desc_size; 427 428 bool rxdataring_enabled; 429 bool default_rss_fields; 430 enum Vmxnet3_RSSField rss_fields; 431 432 struct work_struct work; 433 434 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 435 436 int share_intr; 437 438 struct Vmxnet3_CoalesceScheme *coal_conf; 439 bool default_coal_mode; 440 441 dma_addr_t adapter_pa; 442 dma_addr_t pm_conf_pa; 443 dma_addr_t rss_conf_pa; 444 bool queuesExtEnabled; 445 struct Vmxnet3_RingBufferSize ringBufSize; 446 u32 devcap_supported[8]; 447 u32 ptcap_supported[8]; 448 u32 dev_caps[8]; 449 u16 tx_prod_offset; 450 u16 rx_prod_offset; 451 u16 rx_prod2_offset; 452 struct bpf_prog __rcu *xdp_bpf_prog; 453 struct Vmxnet3_LatencyConf *latencyConf; 454 /* Size of buffer in the ts ring */ 455 u16 tx_ts_desc_size; 456 u16 rx_ts_desc_size; 457 u32 disabledOffloads; 458 }; 459 460 #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 461 writel((val), (adapter)->hw_addr0 + (reg)) 462 #define VMXNET3_READ_BAR0_REG(adapter, reg) \ 463 readl((adapter)->hw_addr0 + (reg)) 464 465 #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ 466 writel((val), (adapter)->hw_addr1 + (reg)) 467 #define VMXNET3_READ_BAR1_REG(adapter, reg) \ 468 readl((adapter)->hw_addr1 + (reg)) 469 470 #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) 471 #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ 472 ((rq)->rx_ring[ring_idx].size >> 3) 473 474 #define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma)) 475 #define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32)) 476 477 #define VMXNET3_VERSION_GE_2(adapter) \ 478 (adapter->version >= VMXNET3_REV_2 + 1) 479 #define VMXNET3_VERSION_GE_3(adapter) \ 480 (adapter->version >= VMXNET3_REV_3 + 1) 481 #define VMXNET3_VERSION_GE_4(adapter) \ 482 (adapter->version >= VMXNET3_REV_4 + 1) 483 #define VMXNET3_VERSION_GE_5(adapter) \ 484 (adapter->version >= VMXNET3_REV_5 + 1) 485 #define VMXNET3_VERSION_GE_6(adapter) \ 486 (adapter->version >= VMXNET3_REV_6 + 1) 487 #define VMXNET3_VERSION_GE_7(adapter) \ 488 (adapter->version >= VMXNET3_REV_7 + 1) 489 #define VMXNET3_VERSION_GE_8(adapter) \ 490 (adapter->version >= VMXNET3_REV_8 + 1) 491 #define VMXNET3_VERSION_GE_9(adapter) \ 492 (adapter->version >= VMXNET3_REV_9 + 1) 493 494 /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ 495 #define VMXNET3_DEF_TX_RING_SIZE 512 496 #define VMXNET3_DEF_RX_RING_SIZE 1024 497 #define VMXNET3_DEF_RX_RING2_SIZE 512 498 499 #define VMXNET3_DEF_RXDATA_DESC_SIZE 128 500 501 #define VMXNET3_MAX_ETH_HDR_SIZE 22 502 #define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 503 504 #define VMXNET3_GET_RING_IDX(adapter, rqID) \ 505 ((rqID >= adapter->num_rx_queues && \ 506 rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \ 507 508 #define VMXNET3_RX_DATA_RING(adapter, rqID) \ 509 (rqID >= 2 * adapter->num_rx_queues && \ 510 rqID < 3 * adapter->num_rx_queues) \ 511 512 #define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64 513 514 #define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs) 515 #define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate) 516 #define VMXNET3_RSS_FIELDS_DEFAULT (VMXNET3_RSS_FIELDS_TCPIP4 | \ 517 VMXNET3_RSS_FIELDS_TCPIP6) 518 519 int 520 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 521 522 int 523 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter); 524 525 void 526 vmxnet3_force_close(struct vmxnet3_adapter *adapter); 527 528 void 529 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); 530 531 void 532 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter); 533 534 void 535 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter); 536 537 int 538 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter); 539 540 void 541 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter); 542 543 netdev_features_t 544 vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features); 545 546 netdev_features_t 547 vmxnet3_features_check(struct sk_buff *skb, 548 struct net_device *netdev, netdev_features_t features); 549 550 int 551 vmxnet3_set_features(struct net_device *netdev, netdev_features_t features); 552 553 int 554 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 555 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size, 556 u16 txdata_desc_size, u16 rxdata_desc_size); 557 558 void vmxnet3_set_ethtool_ops(struct net_device *netdev); 559 560 void vmxnet3_get_stats64(struct net_device *dev, 561 struct rtnl_link_stats64 *stats); 562 bool vmxnet3_check_ptcapability(u32 cap_supported, u32 cap); 563 564 extern char vmxnet3_driver_name[]; 565 #endif 566