xref: /linux/drivers/net/vmxnet3/vmxnet3_int.h (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 /*
2  * Linux driver for VMware's vmxnet3 ethernet NIC.
3  *
4  * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; version 2 of the License and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * The full GNU General Public License is included in this distribution in
21  * the file called "COPYING".
22  *
23  * Maintained by: pv-drivers@vmware.com
24  *
25  */
26 
27 #ifndef _VMXNET3_INT_H
28 #define _VMXNET3_INT_H
29 
30 #include <linux/bitops.h>
31 #include <linux/ethtool.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/pci.h>
35 #include <linux/compiler.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/ioport.h>
39 #include <linux/highmem.h>
40 #include <linux/timer.h>
41 #include <linux/skbuff.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/uaccess.h>
45 #include <asm/dma.h>
46 #include <asm/page.h>
47 
48 #include <linux/tcp.h>
49 #include <linux/udp.h>
50 #include <linux/ip.h>
51 #include <linux/ipv6.h>
52 #include <linux/in.h>
53 #include <linux/etherdevice.h>
54 #include <asm/checksum.h>
55 #include <linux/if_vlan.h>
56 #include <linux/if_arp.h>
57 #include <linux/inetdevice.h>
58 #include <linux/log2.h>
59 
60 #include "vmxnet3_defs.h"
61 
62 #ifdef DEBUG
63 # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
64 #else
65 # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
66 #endif
67 
68 
69 /*
70  * Version numbers
71  */
72 #define VMXNET3_DRIVER_VERSION_STRING   "1.5.0.0-k"
73 
74 /* Each byte of this 32-bit integer encodes a version number in
75  * VMXNET3_DRIVER_VERSION_STRING.
76  */
77 #define VMXNET3_DRIVER_VERSION_NUM      0x01050000
78 
79 #if defined(CONFIG_PCI_MSI)
80 	/* RSS only makes sense if MSI-X is supported. */
81 	#define VMXNET3_RSS
82 #endif
83 
84 #define VMXNET3_REV_4		3	/* Vmxnet3 Rev. 4 */
85 #define VMXNET3_REV_3		2	/* Vmxnet3 Rev. 3 */
86 #define VMXNET3_REV_2		1	/* Vmxnet3 Rev. 2 */
87 #define VMXNET3_REV_1		0	/* Vmxnet3 Rev. 1 */
88 
89 /*
90  * Capabilities
91  */
92 
93 enum {
94 	VMNET_CAP_SG	        = 0x0001, /* Can do scatter-gather transmits. */
95 	VMNET_CAP_IP4_CSUM      = 0x0002, /* Can checksum only TCP/UDP over
96 					   * IPv4 */
97 	VMNET_CAP_HW_CSUM       = 0x0004, /* Can checksum all packets. */
98 	VMNET_CAP_HIGH_DMA      = 0x0008, /* Can DMA to high memory. */
99 	VMNET_CAP_TOE	        = 0x0010, /* Supports TCP/IP offload. */
100 	VMNET_CAP_TSO	        = 0x0020, /* Supports TCP Segmentation
101 					   * offload */
102 	VMNET_CAP_SW_TSO        = 0x0040, /* Supports SW TCP Segmentation */
103 	VMNET_CAP_VMXNET_APROM  = 0x0080, /* Vmxnet APROM support */
104 	VMNET_CAP_HW_TX_VLAN    = 0x0100, /* Can we do VLAN tagging in HW */
105 	VMNET_CAP_HW_RX_VLAN    = 0x0200, /* Can we do VLAN untagging in HW */
106 	VMNET_CAP_SW_VLAN       = 0x0400, /* VLAN tagging/untagging in SW */
107 	VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
108 	VMNET_CAP_ENABLE_INT_INLINE = 0x1000,  /* Enable Interrupt Inline */
109 	VMNET_CAP_ENABLE_HEADER_COPY = 0x2000,  /* copy header for vmkernel */
110 	VMNET_CAP_TX_CHAIN      = 0x4000, /* Guest can use multiple tx entries
111 					  * for a pkt */
112 	VMNET_CAP_RX_CHAIN      = 0x8000, /* pkt can span multiple rx entries */
113 	VMNET_CAP_LPD           = 0x10000, /* large pkt delivery */
114 	VMNET_CAP_BPF           = 0x20000, /* BPF Support in VMXNET Virtual HW*/
115 	VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
116 					   /* pages transmits */
117 	VMNET_CAP_IP6_CSUM      = 0x80000, /* Can do IPv6 csum offload. */
118 	VMNET_CAP_TSO6         = 0x100000, /* TSO seg. offload for IPv6 pkts. */
119 	VMNET_CAP_TSO256k      = 0x200000, /* Can do TSO seg offload for */
120 					   /* pkts up to 256kB. */
121 	VMNET_CAP_UPT          = 0x400000  /* Support UPT */
122 };
123 
124 /*
125  * Maximum devices supported.
126  */
127 #define MAX_ETHERNET_CARDS		10
128 #define MAX_PCI_PASSTHRU_DEVICE		6
129 
130 struct vmxnet3_cmd_ring {
131 	union Vmxnet3_GenericDesc *base;
132 	u32		size;
133 	u32		next2fill;
134 	u32		next2comp;
135 	u8		gen;
136 	dma_addr_t	basePA;
137 };
138 
139 static inline void
140 vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
141 {
142 	ring->next2fill++;
143 	if (unlikely(ring->next2fill == ring->size)) {
144 		ring->next2fill = 0;
145 		VMXNET3_FLIP_RING_GEN(ring->gen);
146 	}
147 }
148 
149 static inline void
150 vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
151 {
152 	VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
153 }
154 
155 static inline int
156 vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
157 {
158 	return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
159 		ring->next2comp - ring->next2fill - 1;
160 }
161 
162 struct vmxnet3_comp_ring {
163 	union Vmxnet3_GenericDesc *base;
164 	u32               size;
165 	u32               next2proc;
166 	u8                gen;
167 	u8                intr_idx;
168 	dma_addr_t           basePA;
169 };
170 
171 static inline void
172 vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
173 {
174 	ring->next2proc++;
175 	if (unlikely(ring->next2proc == ring->size)) {
176 		ring->next2proc = 0;
177 		VMXNET3_FLIP_RING_GEN(ring->gen);
178 	}
179 }
180 
181 struct vmxnet3_tx_data_ring {
182 	struct Vmxnet3_TxDataDesc *base;
183 	u32              size;
184 	dma_addr_t          basePA;
185 };
186 
187 enum vmxnet3_buf_map_type {
188 	VMXNET3_MAP_INVALID = 0,
189 	VMXNET3_MAP_NONE,
190 	VMXNET3_MAP_SINGLE,
191 	VMXNET3_MAP_PAGE,
192 };
193 
194 struct vmxnet3_tx_buf_info {
195 	u32      map_type;
196 	u16      len;
197 	u16      sop_idx;
198 	dma_addr_t  dma_addr;
199 	struct sk_buff *skb;
200 };
201 
202 struct vmxnet3_tq_driver_stats {
203 	u64 drop_total;     /* # of pkts dropped by the driver, the
204 				* counters below track droppings due to
205 				* different reasons
206 				*/
207 	u64 drop_too_many_frags;
208 	u64 drop_oversized_hdr;
209 	u64 drop_hdr_inspect_err;
210 	u64 drop_tso;
211 
212 	u64 tx_ring_full;
213 	u64 linearized;         /* # of pkts linearized */
214 	u64 copy_skb_header;    /* # of times we have to copy skb header */
215 	u64 oversized_hdr;
216 };
217 
218 struct vmxnet3_tx_ctx {
219 	bool   ipv4;
220 	bool   ipv6;
221 	u16 mss;
222 	u32    l4_offset;	/* only valid for pkts requesting tso or csum
223 				 * offloading. For encap offload, it refers to
224 				 * inner L4 offset i.e. it includes outer header
225 				 * encap header and inner eth and ip header size
226 				 */
227 
228 	u32	l4_hdr_size;	/* only valid if mss != 0
229 				 * Refers to inner L4 hdr size for encap
230 				 * offload
231 				 */
232 	u32 copy_size;       /* # of bytes copied into the data ring */
233 	union Vmxnet3_GenericDesc *sop_txd;
234 	union Vmxnet3_GenericDesc *eop_txd;
235 };
236 
237 struct vmxnet3_tx_queue {
238 	char			name[IFNAMSIZ+8]; /* To identify interrupt */
239 	struct vmxnet3_adapter		*adapter;
240 	spinlock_t                      tx_lock;
241 	struct vmxnet3_cmd_ring         tx_ring;
242 	struct vmxnet3_tx_buf_info      *buf_info;
243 	struct vmxnet3_tx_data_ring     data_ring;
244 	struct vmxnet3_comp_ring        comp_ring;
245 	struct Vmxnet3_TxQueueCtrl      *shared;
246 	struct vmxnet3_tq_driver_stats  stats;
247 	bool                            stopped;
248 	int                             num_stop;  /* # of times the queue is
249 						    * stopped */
250 	int				qid;
251 	u16				txdata_desc_size;
252 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
253 
254 enum vmxnet3_rx_buf_type {
255 	VMXNET3_RX_BUF_NONE = 0,
256 	VMXNET3_RX_BUF_SKB = 1,
257 	VMXNET3_RX_BUF_PAGE = 2
258 };
259 
260 struct vmxnet3_rx_buf_info {
261 	enum vmxnet3_rx_buf_type buf_type;
262 	u16     len;
263 	union {
264 		struct sk_buff *skb;
265 		struct page    *page;
266 	};
267 	dma_addr_t dma_addr;
268 };
269 
270 struct vmxnet3_rx_ctx {
271 	struct sk_buff *skb;
272 	u32 sop_idx;
273 };
274 
275 struct vmxnet3_rq_driver_stats {
276 	u64 drop_total;
277 	u64 drop_err;
278 	u64 drop_fcs;
279 	u64 rx_buf_alloc_failure;
280 };
281 
282 struct vmxnet3_rx_data_ring {
283 	Vmxnet3_RxDataDesc *base;
284 	dma_addr_t basePA;
285 	u16 desc_size;
286 };
287 
288 struct vmxnet3_rx_queue {
289 	char			name[IFNAMSIZ + 8]; /* To identify interrupt */
290 	struct vmxnet3_adapter	  *adapter;
291 	struct napi_struct        napi;
292 	struct vmxnet3_cmd_ring   rx_ring[2];
293 	struct vmxnet3_rx_data_ring data_ring;
294 	struct vmxnet3_comp_ring  comp_ring;
295 	struct vmxnet3_rx_ctx     rx_ctx;
296 	u32 qid;            /* rqID in RCD for buffer from 1st ring */
297 	u32 qid2;           /* rqID in RCD for buffer from 2nd ring */
298 	u32 dataRingQid;    /* rqID in RCD for buffer from data ring */
299 	struct vmxnet3_rx_buf_info     *buf_info[2];
300 	struct Vmxnet3_RxQueueCtrl            *shared;
301 	struct vmxnet3_rq_driver_stats  stats;
302 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
303 
304 #define VMXNET3_DEVICE_MAX_TX_QUEUES 8
305 #define VMXNET3_DEVICE_MAX_RX_QUEUES 8   /* Keep this value as a power of 2 */
306 
307 /* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
308 #define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
309 
310 #define VMXNET3_LINUX_MAX_MSIX_VECT     (VMXNET3_DEVICE_MAX_TX_QUEUES + \
311 					 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
312 #define VMXNET3_LINUX_MIN_MSIX_VECT     2 /* 1 for tx-rx pair and 1 for event */
313 
314 
315 struct vmxnet3_intr {
316 	enum vmxnet3_intr_mask_mode  mask_mode;
317 	enum vmxnet3_intr_type       type;	/* MSI-X, MSI, or INTx? */
318 	u8  num_intrs;			/* # of intr vectors */
319 	u8  event_intr_idx;		/* idx of the intr vector for event */
320 	u8  mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
321 	char	event_msi_vector_name[IFNAMSIZ+17];
322 #ifdef CONFIG_PCI_MSI
323 	struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
324 #endif
325 };
326 
327 /* Interrupt sharing schemes, share_intr */
328 #define VMXNET3_INTR_BUDDYSHARE 0    /* Corresponding tx,rx queues share irq */
329 #define VMXNET3_INTR_TXSHARE 1	     /* All tx queues share one irq */
330 #define VMXNET3_INTR_DONTSHARE 2     /* each queue has its own irq */
331 
332 
333 #define VMXNET3_STATE_BIT_RESETTING   0
334 #define VMXNET3_STATE_BIT_QUIESCED    1
335 struct vmxnet3_adapter {
336 	struct vmxnet3_tx_queue		tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
337 	struct vmxnet3_rx_queue		rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
338 	unsigned long			active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
339 	struct vmxnet3_intr		intr;
340 	spinlock_t			cmd_lock;
341 	struct Vmxnet3_DriverShared	*shared;
342 	struct Vmxnet3_PMConf		*pm_conf;
343 	struct Vmxnet3_TxQueueDesc	*tqd_start;     /* all tx queue desc */
344 	struct Vmxnet3_RxQueueDesc	*rqd_start;	/* all rx queue desc */
345 	struct net_device		*netdev;
346 	struct pci_dev			*pdev;
347 
348 	u8			__iomem *hw_addr0; /* for BAR 0 */
349 	u8			__iomem *hw_addr1; /* for BAR 1 */
350 	u8                              version;
351 
352 #ifdef VMXNET3_RSS
353 	struct UPT1_RSSConf		*rss_conf;
354 	bool				rss;
355 #endif
356 	u32				num_rx_queues;
357 	u32				num_tx_queues;
358 
359 	/* rx buffer related */
360 	unsigned			skb_buf_size;
361 	int		rx_buf_per_pkt;  /* only apply to the 1st ring */
362 	dma_addr_t			shared_pa;
363 	dma_addr_t queue_desc_pa;
364 	dma_addr_t coal_conf_pa;
365 
366 	/* Wake-on-LAN */
367 	u32     wol;
368 
369 	/* Link speed */
370 	u32     link_speed; /* in mbps */
371 
372 	u64     tx_timeout_count;
373 
374 	/* Ring sizes */
375 	u32 tx_ring_size;
376 	u32 rx_ring_size;
377 	u32 rx_ring2_size;
378 
379 	/* Size of buffer in the data ring */
380 	u16 txdata_desc_size;
381 	u16 rxdata_desc_size;
382 
383 	bool rxdataring_enabled;
384 	bool default_rss_fields;
385 	enum Vmxnet3_RSSField rss_fields;
386 
387 	struct work_struct work;
388 
389 	unsigned long  state;    /* VMXNET3_STATE_BIT_xxx */
390 
391 	int share_intr;
392 
393 	struct Vmxnet3_CoalesceScheme *coal_conf;
394 	bool   default_coal_mode;
395 
396 	dma_addr_t adapter_pa;
397 	dma_addr_t pm_conf_pa;
398 	dma_addr_t rss_conf_pa;
399 };
400 
401 #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val)  \
402 	writel((val), (adapter)->hw_addr0 + (reg))
403 #define VMXNET3_READ_BAR0_REG(adapter, reg)        \
404 	readl((adapter)->hw_addr0 + (reg))
405 
406 #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val)  \
407 	writel((val), (adapter)->hw_addr1 + (reg))
408 #define VMXNET3_READ_BAR1_REG(adapter, reg)        \
409 	readl((adapter)->hw_addr1 + (reg))
410 
411 #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq)  (5)
412 #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
413 	((rq)->rx_ring[ring_idx].size >> 3)
414 
415 #define VMXNET3_GET_ADDR_LO(dma)   ((u32)(dma))
416 #define VMXNET3_GET_ADDR_HI(dma)   ((u32)(((u64)(dma)) >> 32))
417 
418 #define VMXNET3_VERSION_GE_2(adapter) \
419 	(adapter->version >= VMXNET3_REV_2 + 1)
420 #define VMXNET3_VERSION_GE_3(adapter) \
421 	(adapter->version >= VMXNET3_REV_3 + 1)
422 #define VMXNET3_VERSION_GE_4(adapter) \
423 	(adapter->version >= VMXNET3_REV_4 + 1)
424 
425 /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
426 #define VMXNET3_DEF_TX_RING_SIZE    512
427 #define VMXNET3_DEF_RX_RING_SIZE    1024
428 #define VMXNET3_DEF_RX_RING2_SIZE   256
429 
430 #define VMXNET3_DEF_RXDATA_DESC_SIZE 128
431 
432 #define VMXNET3_MAX_ETH_HDR_SIZE    22
433 #define VMXNET3_MAX_SKB_BUF_SIZE    (3*1024)
434 
435 #define VMXNET3_GET_RING_IDX(adapter, rqID)		\
436 	((rqID >= adapter->num_rx_queues &&		\
437 	 rqID < 2 * adapter->num_rx_queues) ? 1 : 0)	\
438 
439 #define VMXNET3_RX_DATA_RING(adapter, rqID)		\
440 	(rqID >= 2 * adapter->num_rx_queues &&		\
441 	rqID < 3 * adapter->num_rx_queues)		\
442 
443 #define VMXNET3_COAL_STATIC_DEFAULT_DEPTH	64
444 
445 #define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
446 #define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
447 #define VMXNET3_RSS_FIELDS_DEFAULT (VMXNET3_RSS_FIELDS_TCPIP4 | \
448 				    VMXNET3_RSS_FIELDS_TCPIP6)
449 
450 int
451 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
452 
453 int
454 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
455 
456 void
457 vmxnet3_force_close(struct vmxnet3_adapter *adapter);
458 
459 void
460 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
461 
462 void
463 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
464 
465 void
466 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
467 
468 netdev_features_t
469 vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features);
470 
471 netdev_features_t
472 vmxnet3_features_check(struct sk_buff *skb,
473 		       struct net_device *netdev, netdev_features_t features);
474 
475 int
476 vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
477 
478 int
479 vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
480 		      u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
481 		      u16 txdata_desc_size, u16 rxdata_desc_size);
482 
483 void vmxnet3_set_ethtool_ops(struct net_device *netdev);
484 
485 void vmxnet3_get_stats64(struct net_device *dev,
486 			 struct rtnl_link_stats64 *stats);
487 
488 extern char vmxnet3_driver_name[];
489 #endif
490