1 /* 2 * Linux driver for VMware's vmxnet3 ethernet NIC. 3 * 4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more 14 * details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 * 23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 24 * 25 */ 26 27 #ifndef _VMXNET3_INT_H 28 #define _VMXNET3_INT_H 29 30 #include <linux/types.h> 31 #include <linux/ethtool.h> 32 #include <linux/delay.h> 33 #include <linux/device.h> 34 #include <linux/netdevice.h> 35 #include <linux/pci.h> 36 #include <linux/ethtool.h> 37 #include <linux/compiler.h> 38 #include <linux/module.h> 39 #include <linux/moduleparam.h> 40 #include <linux/slab.h> 41 #include <linux/spinlock.h> 42 #include <linux/ioport.h> 43 #include <linux/highmem.h> 44 #include <linux/init.h> 45 #include <linux/timer.h> 46 #include <linux/skbuff.h> 47 #include <linux/interrupt.h> 48 #include <linux/workqueue.h> 49 #include <linux/uaccess.h> 50 #include <asm/dma.h> 51 #include <asm/page.h> 52 53 #include <linux/tcp.h> 54 #include <linux/udp.h> 55 #include <linux/ip.h> 56 #include <linux/ipv6.h> 57 #include <linux/in.h> 58 #include <linux/etherdevice.h> 59 #include <asm/checksum.h> 60 #include <linux/if_vlan.h> 61 #include <linux/if_arp.h> 62 #include <linux/inetdevice.h> 63 64 #include "vmxnet3_defs.h" 65 66 #ifdef DEBUG 67 # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)" 68 #else 69 # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI" 70 #endif 71 72 73 /* 74 * Version numbers 75 */ 76 #define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k" 77 78 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 79 #define VMXNET3_DRIVER_VERSION_NUM 0x01000500 80 81 82 /* 83 * Capabilities 84 */ 85 86 enum { 87 VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */ 88 VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over 89 * IPv4 */ 90 VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */ 91 VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */ 92 VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */ 93 VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation 94 * offload */ 95 VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */ 96 VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */ 97 VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */ 98 VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */ 99 VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */ 100 VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */ 101 VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */ 102 VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */ 103 VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries 104 * for a pkt */ 105 VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */ 106 VMNET_CAP_LPD = 0x10000, /* large pkt delivery */ 107 VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/ 108 VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/ 109 /* pages transmits */ 110 VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */ 111 VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */ 112 VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */ 113 /* pkts up to 256kB. */ 114 VMNET_CAP_UPT = 0x400000 /* Support UPT */ 115 }; 116 117 /* 118 * PCI vendor and device IDs. 119 */ 120 #define PCI_VENDOR_ID_VMWARE 0x15AD 121 #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0 122 #define MAX_ETHERNET_CARDS 10 123 #define MAX_PCI_PASSTHRU_DEVICE 6 124 125 struct vmxnet3_cmd_ring { 126 union Vmxnet3_GenericDesc *base; 127 u32 size; 128 u32 next2fill; 129 u32 next2comp; 130 u8 gen; 131 dma_addr_t basePA; 132 }; 133 134 static inline void 135 vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring) 136 { 137 ring->next2fill++; 138 if (unlikely(ring->next2fill == ring->size)) { 139 ring->next2fill = 0; 140 VMXNET3_FLIP_RING_GEN(ring->gen); 141 } 142 } 143 144 static inline void 145 vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring) 146 { 147 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size); 148 } 149 150 static inline int 151 vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring) 152 { 153 return (ring->next2comp > ring->next2fill ? 0 : ring->size) + 154 ring->next2comp - ring->next2fill - 1; 155 } 156 157 struct vmxnet3_comp_ring { 158 union Vmxnet3_GenericDesc *base; 159 u32 size; 160 u32 next2proc; 161 u8 gen; 162 u8 intr_idx; 163 dma_addr_t basePA; 164 }; 165 166 static inline void 167 vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring) 168 { 169 ring->next2proc++; 170 if (unlikely(ring->next2proc == ring->size)) { 171 ring->next2proc = 0; 172 VMXNET3_FLIP_RING_GEN(ring->gen); 173 } 174 } 175 176 struct vmxnet3_tx_data_ring { 177 struct Vmxnet3_TxDataDesc *base; 178 u32 size; 179 dma_addr_t basePA; 180 }; 181 182 enum vmxnet3_buf_map_type { 183 VMXNET3_MAP_INVALID = 0, 184 VMXNET3_MAP_NONE, 185 VMXNET3_MAP_SINGLE, 186 VMXNET3_MAP_PAGE, 187 }; 188 189 struct vmxnet3_tx_buf_info { 190 u32 map_type; 191 u16 len; 192 u16 sop_idx; 193 dma_addr_t dma_addr; 194 struct sk_buff *skb; 195 }; 196 197 struct vmxnet3_tq_driver_stats { 198 u64 drop_total; /* # of pkts dropped by the driver, the 199 * counters below track droppings due to 200 * different reasons 201 */ 202 u64 drop_too_many_frags; 203 u64 drop_oversized_hdr; 204 u64 drop_hdr_inspect_err; 205 u64 drop_tso; 206 207 u64 tx_ring_full; 208 u64 linearized; /* # of pkts linearized */ 209 u64 copy_skb_header; /* # of times we have to copy skb header */ 210 u64 oversized_hdr; 211 }; 212 213 struct vmxnet3_tx_ctx { 214 bool ipv4; 215 u16 mss; 216 u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum 217 * offloading 218 */ 219 u32 l4_hdr_size; /* only valid if mss != 0 */ 220 u32 copy_size; /* # of bytes copied into the data ring */ 221 union Vmxnet3_GenericDesc *sop_txd; 222 union Vmxnet3_GenericDesc *eop_txd; 223 }; 224 225 struct vmxnet3_tx_queue { 226 spinlock_t tx_lock; 227 struct vmxnet3_cmd_ring tx_ring; 228 struct vmxnet3_tx_buf_info *buf_info; 229 struct vmxnet3_tx_data_ring data_ring; 230 struct vmxnet3_comp_ring comp_ring; 231 struct Vmxnet3_TxQueueCtrl *shared; 232 struct vmxnet3_tq_driver_stats stats; 233 bool stopped; 234 int num_stop; /* # of times the queue is 235 * stopped */ 236 } __attribute__((__aligned__(SMP_CACHE_BYTES))); 237 238 enum vmxnet3_rx_buf_type { 239 VMXNET3_RX_BUF_NONE = 0, 240 VMXNET3_RX_BUF_SKB = 1, 241 VMXNET3_RX_BUF_PAGE = 2 242 }; 243 244 struct vmxnet3_rx_buf_info { 245 enum vmxnet3_rx_buf_type buf_type; 246 u16 len; 247 union { 248 struct sk_buff *skb; 249 struct page *page; 250 }; 251 dma_addr_t dma_addr; 252 }; 253 254 struct vmxnet3_rx_ctx { 255 struct sk_buff *skb; 256 u32 sop_idx; 257 }; 258 259 struct vmxnet3_rq_driver_stats { 260 u64 drop_total; 261 u64 drop_err; 262 u64 drop_fcs; 263 u64 rx_buf_alloc_failure; 264 }; 265 266 struct vmxnet3_rx_queue { 267 struct vmxnet3_cmd_ring rx_ring[2]; 268 struct vmxnet3_comp_ring comp_ring; 269 struct vmxnet3_rx_ctx rx_ctx; 270 u32 qid; /* rqID in RCD for buffer from 1st ring */ 271 u32 qid2; /* rqID in RCD for buffer from 2nd ring */ 272 u32 uncommitted[2]; /* # of buffers allocated since last RXPROD 273 * update */ 274 struct vmxnet3_rx_buf_info *buf_info[2]; 275 struct Vmxnet3_RxQueueCtrl *shared; 276 struct vmxnet3_rq_driver_stats stats; 277 } __attribute__((__aligned__(SMP_CACHE_BYTES))); 278 279 #define VMXNET3_LINUX_MAX_MSIX_VECT 1 280 281 struct vmxnet3_intr { 282 enum vmxnet3_intr_mask_mode mask_mode; 283 enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */ 284 u8 num_intrs; /* # of intr vectors */ 285 u8 event_intr_idx; /* idx of the intr vector for event */ 286 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ 287 #ifdef CONFIG_PCI_MSI 288 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; 289 #endif 290 }; 291 292 #define VMXNET3_STATE_BIT_RESETTING 0 293 #define VMXNET3_STATE_BIT_QUIESCED 1 294 struct vmxnet3_adapter { 295 struct vmxnet3_tx_queue tx_queue; 296 struct vmxnet3_rx_queue rx_queue; 297 struct napi_struct napi; 298 struct vlan_group *vlan_grp; 299 300 struct vmxnet3_intr intr; 301 302 struct Vmxnet3_DriverShared *shared; 303 struct Vmxnet3_PMConf *pm_conf; 304 struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */ 305 struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */ 306 struct net_device *netdev; 307 struct pci_dev *pdev; 308 309 u8 *hw_addr0; /* for BAR 0 */ 310 u8 *hw_addr1; /* for BAR 1 */ 311 312 /* feature control */ 313 bool rxcsum; 314 bool lro; 315 bool jumbo_frame; 316 317 /* rx buffer related */ 318 unsigned skb_buf_size; 319 int rx_buf_per_pkt; /* only apply to the 1st ring */ 320 dma_addr_t shared_pa; 321 dma_addr_t queue_desc_pa; 322 323 /* Wake-on-LAN */ 324 u32 wol; 325 326 /* Link speed */ 327 u32 link_speed; /* in mbps */ 328 329 u64 tx_timeout_count; 330 struct work_struct work; 331 332 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 333 334 int dev_number; 335 }; 336 337 #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 338 writel((val), (adapter)->hw_addr0 + (reg)) 339 #define VMXNET3_READ_BAR0_REG(adapter, reg) \ 340 readl((adapter)->hw_addr0 + (reg)) 341 342 #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ 343 writel((val), (adapter)->hw_addr1 + (reg)) 344 #define VMXNET3_READ_BAR1_REG(adapter, reg) \ 345 readl((adapter)->hw_addr1 + (reg)) 346 347 #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) 348 #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ 349 ((rq)->rx_ring[ring_idx].size >> 3) 350 351 #define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma)) 352 #define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32)) 353 354 /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ 355 #define VMXNET3_DEF_TX_RING_SIZE 512 356 #define VMXNET3_DEF_RX_RING_SIZE 256 357 358 #define VMXNET3_MAX_ETH_HDR_SIZE 22 359 #define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 360 361 int 362 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 363 364 int 365 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter); 366 367 void 368 vmxnet3_force_close(struct vmxnet3_adapter *adapter); 369 370 void 371 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); 372 373 void 374 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 375 struct vmxnet3_adapter *adapter); 376 377 void 378 vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 379 struct vmxnet3_adapter *adapter); 380 381 int 382 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 383 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); 384 385 extern void vmxnet3_set_ethtool_ops(struct net_device *netdev); 386 extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev); 387 388 extern char vmxnet3_driver_name[]; 389 #endif 390