xref: /linux/drivers/net/ethernet/ibm/ibmvnic.h (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /**************************************************************************/
3 /*                                                                        */
4 /*  IBM System i and System p Virtual NIC Device Driver                   */
5 /*  Copyright (C) 2014 IBM Corp.                                          */
6 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
7 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
8 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
9 /*                                                                        */
10 /*                                                                        */
11 /* This module contains the implementation of a virtual ethernet device   */
12 /* for use with IBM i/pSeries LPAR Linux.  It utilizes the logical LAN    */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor.   */
15 /*                                                                        */
16 /**************************************************************************/
17 
18 #define IBMVNIC_NAME		"ibmvnic"
19 #define IBMVNIC_DRIVER_VERSION	"1.0.1"
20 #define IBMVNIC_INVALID_MAP	-1
21 #define IBMVNIC_OPEN_FAILED	3
22 
23 /* basic structures plus 100 2k buffers */
24 #define IBMVNIC_IO_ENTITLEMENT_DEFAULT	610305
25 
26 /* Initial module_parameters */
27 #define IBMVNIC_RX_WEIGHT		16
28 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
29 #define IBMVNIC_BUFFS_PER_POOL	100
30 #define IBMVNIC_MAX_QUEUES	16
31 #define IBMVNIC_MAX_QUEUE_SZ   4096
32 #define IBMVNIC_MAX_IND_DESCS 128
33 #define IBMVNIC_SAFE_IND_DESC 16
34 #define IBMVNIC_IND_MAX_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
35 
36 #define IBMVNIC_TSO_BUF_SZ	65536
37 #define IBMVNIC_TSO_BUFS	64
38 #define IBMVNIC_TSO_POOL_MASK	0x80000000
39 
40 /* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
41  * has a set of buffers. The size of each buffer is determined by the MTU.
42  *
43  * Each Rx/Tx pool is also associated with a DMA region that is shared
44  * with the "hardware" (VIOS) and used to send/receive packets. The DMA
45  * region is also referred to as a Long Term Buffer or LTB.
46  *
47  * The size of the DMA region required for an Rx/Tx pool depends on the
48  * number and size (MTU) of the buffers in the pool. At the max levels
49  * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
50  * some padding.
51  *
52  * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the
53  * kernel (about 16MB currently).  To support say 4K Jumbo frames, we
54  * use a set of LTBs (struct ltb_set) per pool.
55  *
56  * IBMVNIC_ONE_LTB_MAX  - max size of each LTB supported by kernel
57  * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
58  * (must be <= IBMVNIC_ONE_LTB_MAX)
59  * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
60  *
61  * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
62  * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
63  *
64  * The Rx and Tx pools can have upto 4096 buffers. The max size of these
65  * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
66  * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
67  *
68  * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
69  * the allocation of the LTB can fail when system is low in memory. If
70  * its too small, we would need several mappings for each of the Rx/
71  * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
72  * VNIC protocol.
73  *
74  * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
75  * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
76  * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
77  * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
78  */
79 #define IBMVNIC_ONE_LTB_MAX	((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE))
80 #define IBMVNIC_ONE_LTB_SIZE	min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
81 #define IBMVNIC_LTB_SET_SIZE	(38 << 20)
82 
83 #define IBMVNIC_BUFFER_HLEN		500
84 #define IBMVNIC_RESET_DELAY 100
85 
86 struct ibmvnic_login_buffer {
87 	__be32 len;
88 	__be32 version;
89 #define INITIAL_VERSION_LB 1
90 	__be32 num_txcomp_subcrqs;
91 	__be32 off_txcomp_subcrqs;
92 	__be32 num_rxcomp_subcrqs;
93 	__be32 off_rxcomp_subcrqs;
94 	__be32 login_rsp_ioba;
95 	__be32 login_rsp_len;
96 	__be32 client_data_offset;
97 	__be32 client_data_len;
98 } __packed __aligned(8);
99 
100 struct ibmvnic_login_rsp_buffer {
101 	__be32 len;
102 	__be32 version;
103 #define INITIAL_VERSION_LRB 1
104 	__be32 num_txsubm_subcrqs;
105 	__be32 off_txsubm_subcrqs;
106 	__be32 num_rxadd_subcrqs;
107 	__be32 off_rxadd_subcrqs;
108 	__be32 off_rxadd_buff_size;
109 	__be32 num_supp_tx_desc;
110 	__be32 off_supp_tx_desc;
111 } __packed __aligned(8);
112 
113 struct ibmvnic_query_ip_offload_buffer {
114 	__be32 len;
115 	__be32 version;
116 #define INITIAL_VERSION_IOB 1
117 	u8 ipv4_chksum;
118 	u8 ipv6_chksum;
119 	u8 tcp_ipv4_chksum;
120 	u8 tcp_ipv6_chksum;
121 	u8 udp_ipv4_chksum;
122 	u8 udp_ipv6_chksum;
123 	u8 large_tx_ipv4;
124 	u8 large_tx_ipv6;
125 	u8 large_rx_ipv4;
126 	u8 large_rx_ipv6;
127 	u8 reserved1[14];
128 	__be16 max_ipv4_header_size;
129 	__be16 max_ipv6_header_size;
130 	__be16 max_tcp_header_size;
131 	__be16 max_udp_header_size;
132 	__be32 max_large_tx_size;
133 	__be32 max_large_rx_size;
134 	u8 reserved2[16];
135 	u8 ipv6_extension_header;
136 #define IPV6_EH_NOT_SUPPORTED	0x00
137 #define IPV6_EH_SUPPORTED_LIM	0x01
138 #define IPV6_EH_SUPPORTED	0xFF
139 	u8 tcp_pseudosum_req;
140 #define TCP_PS_NOT_REQUIRED	0x00
141 #define TCP_PS_REQUIRED		0x01
142 	u8 reserved3[30];
143 	__be16 num_ipv6_ext_headers;
144 	__be32 off_ipv6_ext_headers;
145 	u8 reserved4[154];
146 } __packed __aligned(8);
147 
148 struct ibmvnic_control_ip_offload_buffer {
149 	__be32 len;
150 	__be32 version;
151 #define INITIAL_VERSION_IOB 1
152 	u8 ipv4_chksum;
153 	u8 ipv6_chksum;
154 	u8 tcp_ipv4_chksum;
155 	u8 tcp_ipv6_chksum;
156 	u8 udp_ipv4_chksum;
157 	u8 udp_ipv6_chksum;
158 	u8 large_tx_ipv4;
159 	u8 large_tx_ipv6;
160 	u8 bad_packet_rx;
161 	u8 large_rx_ipv4;
162 	u8 large_rx_ipv6;
163 	u8 reserved4[111];
164 } __packed __aligned(8);
165 
166 struct ibmvnic_fw_component {
167 	u8 name[48];
168 	__be32 trace_buff_size;
169 	u8 correlator;
170 	u8 trace_level;
171 	u8 parent_correlator;
172 	u8 error_check_level;
173 	u8 trace_on;
174 	u8 reserved[7];
175 	u8 description[192];
176 } __packed __aligned(8);
177 
178 struct ibmvnic_fw_trace_entry {
179 	__be32 trace_id;
180 	u8 num_valid_data;
181 	u8 reserved[3];
182 	__be64 pmc_registers;
183 	__be64 timebase;
184 	__be64 trace_data[5];
185 } __packed __aligned(8);
186 
187 struct ibmvnic_statistics {
188 	__be32 version;
189 	__be32 promiscuous;
190 	__be64 rx_packets;
191 	__be64 rx_bytes;
192 	__be64 tx_packets;
193 	__be64 tx_bytes;
194 	__be64 ucast_tx_packets;
195 	__be64 ucast_rx_packets;
196 	__be64 mcast_tx_packets;
197 	__be64 mcast_rx_packets;
198 	__be64 bcast_tx_packets;
199 	__be64 bcast_rx_packets;
200 	__be64 align_errors;
201 	__be64 fcs_errors;
202 	__be64 single_collision_frames;
203 	__be64 multi_collision_frames;
204 	__be64 sqe_test_errors;
205 	__be64 deferred_tx;
206 	__be64 late_collisions;
207 	__be64 excess_collisions;
208 	__be64 internal_mac_tx_errors;
209 	__be64 carrier_sense;
210 	__be64 too_long_frames;
211 	__be64 internal_mac_rx_errors;
212 	u8 reserved[72];
213 } __packed __aligned(8);
214 
215 struct ibmvnic_tx_queue_stats {
216 	u64 batched_packets;
217 	u64 direct_packets;
218 	u64 bytes;
219 	u64 dropped_packets;
220 };
221 
222 #define NUM_TX_STATS \
223 	(sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64))
224 
225 struct ibmvnic_rx_queue_stats {
226 	u64 packets;
227 	u64 bytes;
228 	u64 interrupts;
229 };
230 
231 #define NUM_RX_STATS \
232 	(sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64))
233 
234 struct ibmvnic_acl_buffer {
235 	__be32 len;
236 	__be32 version;
237 #define INITIAL_VERSION_IOB 1
238 	u8 mac_acls_restrict;
239 	u8 vlan_acls_restrict;
240 	u8 reserved1[22];
241 	__be32 num_mac_addrs;
242 	__be32 offset_mac_addrs;
243 	__be32 num_vlan_ids;
244 	__be32 offset_vlan_ids;
245 	u8 reserved2[80];
246 } __packed __aligned(8);
247 
248 /* descriptors have been changed, how should this be defined?  1? 4? */
249 
250 #define IBMVNIC_TX_DESC_VERSIONS 3
251 
252 /* is this still needed? */
253 struct ibmvnic_tx_comp_desc {
254 	u8 first;
255 	u8 num_comps;
256 	__be16 rcs[5];
257 	__be32 correlators[5];
258 } __packed __aligned(8);
259 
260 /* some flags that included in v0 descriptor, which is gone
261  * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM
262  * and only in some offload_flags variable that doesn't seem
263  * to be used anywhere, can probably be removed?
264  */
265 
266 #define IBMVNIC_TCP_CHKSUM		0x20
267 #define IBMVNIC_UDP_CHKSUM		0x08
268 
269 struct ibmvnic_tx_desc {
270 	u8 first;
271 	u8 type;
272 
273 #define IBMVNIC_TX_DESC 0x10
274 	u8 n_crq_elem;
275 	u8 n_sge;
276 	u8 flags1;
277 #define IBMVNIC_TX_COMP_NEEDED		0x80
278 #define IBMVNIC_TX_CHKSUM_OFFLOAD	0x40
279 #define IBMVNIC_TX_LSO			0x20
280 #define IBMVNIC_TX_PROT_TCP		0x10
281 #define IBMVNIC_TX_PROT_UDP		0x08
282 #define IBMVNIC_TX_PROT_IPV4		0x04
283 #define IBMVNIC_TX_PROT_IPV6		0x02
284 #define IBMVNIC_TX_VLAN_PRESENT		0x01
285 	u8 flags2;
286 #define IBMVNIC_TX_VLAN_INSERT		0x80
287 	__be16 mss;
288 	u8 reserved[4];
289 	__be32 correlator;
290 	__be16 vlan_id;
291 	__be16 dma_reg;
292 	__be32 sge_len;
293 	__be64 ioba;
294 } __packed __aligned(8);
295 
296 struct ibmvnic_hdr_desc {
297 	u8 first;
298 	u8 type;
299 #define IBMVNIC_HDR_DESC		0x11
300 	u8 len;
301 	u8 l2_len;
302 	__be16 l3_len;
303 	u8 l4_len;
304 	u8 flag;
305 	u8 data[24];
306 } __packed __aligned(8);
307 
308 struct ibmvnic_hdr_ext_desc {
309 	u8 first;
310 	u8 type;
311 #define IBMVNIC_HDR_EXT_DESC		0x12
312 	u8 len;
313 	u8 data[29];
314 } __packed __aligned(8);
315 
316 struct ibmvnic_sge_desc {
317 	u8 first;
318 	u8 type;
319 #define IBMVNIC_SGE_DESC		0x30
320 	__be16 sge1_dma_reg;
321 	__be32 sge1_len;
322 	__be64 sge1_ioba;
323 	__be16 reserved;
324 	__be16 sge2_dma_reg;
325 	__be32 sge2_len;
326 	__be64 sge2_ioba;
327 } __packed __aligned(8);
328 
329 struct ibmvnic_rx_comp_desc {
330 	u8 first;
331 	u8 flags;
332 #define IBMVNIC_IP_CHKSUM_GOOD		0x80
333 #define IBMVNIC_TCP_UDP_CHKSUM_GOOD	0x40
334 #define IBMVNIC_END_FRAME			0x20
335 #define IBMVNIC_EXACT_MC			0x10
336 #define IBMVNIC_VLAN_STRIPPED			0x08
337 	__be16 off_frame_data;
338 	__be32 len;
339 	__be64 correlator;
340 	__be16 vlan_tci;
341 	__be16 rc;
342 	u8 reserved[12];
343 } __packed __aligned(8);
344 
345 struct ibmvnic_generic_scrq {
346 	u8 first;
347 	u8 reserved[31];
348 } __packed __aligned(8);
349 
350 struct ibmvnic_rx_buff_add_desc {
351 	u8 first;
352 	u8 reserved[7];
353 	__be64 correlator;
354 	__be32 ioba;
355 	u8 map_id;
356 	__be32 len:24;
357 	u8 reserved2[8];
358 } __packed __aligned(8);
359 
360 struct ibmvnic_rc {
361 	u8 code; /* one of enum ibmvnic_rc_codes */
362 	u8 detailed_data[3];
363 } __packed __aligned(4);
364 
365 struct ibmvnic_generic_crq {
366 	u8 first;
367 	u8 cmd;
368 	u8 params[10];
369 	struct ibmvnic_rc rc;
370 } __packed __aligned(8);
371 
372 struct ibmvnic_version_exchange {
373 	u8 first;
374 	u8 cmd;
375 	__be16 version;
376 #define IBMVNIC_INITIAL_VERSION 1
377 	u8 reserved[8];
378 	struct ibmvnic_rc rc;
379 } __packed __aligned(8);
380 
381 struct ibmvnic_capability {
382 	u8 first;
383 	u8 cmd;
384 	__be16 capability; /* one of ibmvnic_capabilities */
385 	__be64 number;
386 	struct ibmvnic_rc rc;
387 } __packed __aligned(8);
388 
389 struct ibmvnic_login {
390 	u8 first;
391 	u8 cmd;
392 	u8 reserved[6];
393 	__be32 ioba;
394 	__be32 len;
395 } __packed __aligned(8);
396 
397 struct ibmvnic_phys_parms {
398 	u8 first;
399 	u8 cmd;
400 	u8 flags1;
401 #define IBMVNIC_EXTERNAL_LOOPBACK	0x80
402 #define IBMVNIC_INTERNAL_LOOPBACK	0x40
403 #define IBMVNIC_PROMISC		0x20
404 #define IBMVNIC_PHYS_LINK_ACTIVE	0x10
405 #define IBMVNIC_AUTONEG_DUPLEX	0x08
406 #define IBMVNIC_FULL_DUPLEX	0x04
407 #define IBMVNIC_HALF_DUPLEX	0x02
408 #define IBMVNIC_CAN_CHG_PHYS_PARMS	0x01
409 	u8 flags2;
410 #define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
411 	__be32 speed;
412 #define IBMVNIC_AUTONEG		0x80000000
413 #define IBMVNIC_10MBPS		0x40000000
414 #define IBMVNIC_100MBPS		0x20000000
415 #define IBMVNIC_1GBPS		0x10000000
416 #define IBMVNIC_10GBPS		0x08000000
417 #define IBMVNIC_40GBPS		0x04000000
418 #define IBMVNIC_100GBPS		0x02000000
419 #define IBMVNIC_25GBPS		0x01000000
420 #define IBMVNIC_50GBPS		0x00800000
421 #define IBMVNIC_200GBPS		0x00400000
422 	__be32 mtu;
423 	struct ibmvnic_rc rc;
424 } __packed __aligned(8);
425 
426 struct ibmvnic_logical_link_state {
427 	u8 first;
428 	u8 cmd;
429 	u8 link_state;
430 #define IBMVNIC_LOGICAL_LNK_DN 0x00
431 #define IBMVNIC_LOGICAL_LNK_UP 0x01
432 #define IBMVNIC_LOGICAL_LNK_QUERY 0xff
433 	u8 reserved[9];
434 	struct ibmvnic_rc rc;
435 } __packed __aligned(8);
436 
437 struct ibmvnic_query_ip_offload {
438 	u8 first;
439 	u8 cmd;
440 	u8 reserved[2];
441 	__be32 len;
442 	__be32 ioba;
443 	struct ibmvnic_rc rc;
444 } __packed __aligned(8);
445 
446 struct ibmvnic_control_ip_offload {
447 	u8 first;
448 	u8 cmd;
449 	u8 reserved[2];
450 	__be32 ioba;
451 	__be32 len;
452 	struct ibmvnic_rc rc;
453 } __packed __aligned(8);
454 
455 struct ibmvnic_request_statistics {
456 	u8 first;
457 	u8 cmd;
458 	u8 flags;
459 #define IBMVNIC_PHYSICAL_PORT	0x80
460 	u8 reserved1;
461 	__be32 ioba;
462 	__be32 len;
463 	u8 reserved[4];
464 } __packed __aligned(8);
465 
466 struct ibmvnic_error_indication {
467 	u8 first;
468 	u8 cmd;
469 	u8 flags;
470 #define IBMVNIC_FATAL_ERROR	0x80
471 	u8 reserved1;
472 	__be32 error_id;
473 	__be32 detail_error_sz;
474 	__be16 error_cause;
475 	u8 reserved2[2];
476 } __packed __aligned(8);
477 
478 struct ibmvnic_link_state_indication {
479 	u8 first;
480 	u8 cmd;
481 	u8 reserved1[2];
482 	u8 phys_link_state;
483 	u8 logical_link_state;
484 	u8 reserved2[10];
485 } __packed __aligned(8);
486 
487 struct ibmvnic_change_mac_addr {
488 	u8 first;
489 	u8 cmd;
490 	u8 mac_addr[6];
491 	u8 reserved[4];
492 	struct ibmvnic_rc rc;
493 } __packed __aligned(8);
494 
495 struct ibmvnic_multicast_ctrl {
496 	u8 first;
497 	u8 cmd;
498 	u8 mac_addr[6];
499 	u8 flags;
500 #define IBMVNIC_ENABLE_MC		0x80
501 #define IBMVNIC_DISABLE_MC		0x40
502 #define IBMVNIC_ENABLE_ALL		0x20
503 #define IBMVNIC_DISABLE_ALL	0x10
504 	u8 reserved1;
505 	__be16 reserved2; /* was num_enabled_mc_addr; */
506 	struct ibmvnic_rc rc;
507 } __packed __aligned(8);
508 
509 struct ibmvnic_get_vpd_size {
510 	u8 first;
511 	u8 cmd;
512 	u8 reserved[14];
513 } __packed __aligned(8);
514 
515 struct ibmvnic_get_vpd_size_rsp {
516 	u8 first;
517 	u8 cmd;
518 	u8 reserved[2];
519 	__be64 len;
520 	struct ibmvnic_rc rc;
521 } __packed __aligned(8);
522 
523 struct ibmvnic_get_vpd {
524 	u8 first;
525 	u8 cmd;
526 	u8 reserved1[2];
527 	__be32 ioba;
528 	__be32 len;
529 	u8 reserved[4];
530 } __packed __aligned(8);
531 
532 struct ibmvnic_get_vpd_rsp {
533 	u8 first;
534 	u8 cmd;
535 	u8 reserved[10];
536 	struct ibmvnic_rc rc;
537 } __packed __aligned(8);
538 
539 struct ibmvnic_acl_change_indication {
540 	u8 first;
541 	u8 cmd;
542 	__be16 change_type;
543 #define IBMVNIC_MAC_ACL 0
544 #define IBMVNIC_VLAN_ACL 1
545 	u8 reserved[12];
546 } __packed __aligned(8);
547 
548 struct ibmvnic_acl_query {
549 	u8 first;
550 	u8 cmd;
551 	u8 reserved1[2];
552 	__be32 ioba;
553 	__be32 len;
554 	u8 reserved2[4];
555 } __packed __aligned(8);
556 
557 struct ibmvnic_tune {
558 	u8 first;
559 	u8 cmd;
560 	u8 reserved1[2];
561 	__be32 ioba;
562 	__be32 len;
563 	u8 reserved2[4];
564 } __packed __aligned(8);
565 
566 struct ibmvnic_request_map {
567 	u8 first;
568 	u8 cmd;
569 	u8 reserved1;
570 	u8 map_id;
571 	__be32 ioba;
572 	__be32 len;
573 	u8 reserved2[4];
574 } __packed __aligned(8);
575 
576 struct ibmvnic_request_map_rsp {
577 	u8 first;
578 	u8 cmd;
579 	u8 reserved1;
580 	u8 map_id;
581 	u8 reserved2[8];
582 	struct ibmvnic_rc rc;
583 } __packed __aligned(8);
584 
585 struct ibmvnic_request_unmap {
586 	u8 first;
587 	u8 cmd;
588 	u8 reserved1;
589 	u8 map_id;
590 	u8 reserved2[12];
591 } __packed __aligned(8);
592 
593 struct ibmvnic_request_unmap_rsp {
594 	u8 first;
595 	u8 cmd;
596 	u8 reserved1;
597 	u8 map_id;
598 	u8 reserved2[8];
599 	struct ibmvnic_rc rc;
600 } __packed __aligned(8);
601 
602 struct ibmvnic_query_map {
603 	u8 first;
604 	u8 cmd;
605 	u8 reserved[14];
606 } __packed __aligned(8);
607 
608 struct ibmvnic_query_map_rsp {
609 	u8 first;
610 	u8 cmd;
611 	u8 reserved;
612 	u8 page_size;
613 	__be32 tot_pages;
614 	__be32 free_pages;
615 	struct ibmvnic_rc rc;
616 } __packed __aligned(8);
617 
618 union ibmvnic_crq {
619 	struct ibmvnic_generic_crq generic;
620 	struct ibmvnic_version_exchange version_exchange;
621 	struct ibmvnic_version_exchange version_exchange_rsp;
622 	struct ibmvnic_capability query_capability;
623 	struct ibmvnic_capability query_capability_rsp;
624 	struct ibmvnic_capability request_capability;
625 	struct ibmvnic_capability request_capability_rsp;
626 	struct ibmvnic_login login;
627 	struct ibmvnic_generic_crq login_rsp;
628 	struct ibmvnic_phys_parms query_phys_parms;
629 	struct ibmvnic_phys_parms query_phys_parms_rsp;
630 	struct ibmvnic_phys_parms query_phys_capabilities;
631 	struct ibmvnic_phys_parms query_phys_capabilities_rsp;
632 	struct ibmvnic_phys_parms set_phys_parms;
633 	struct ibmvnic_phys_parms set_phys_parms_rsp;
634 	struct ibmvnic_logical_link_state logical_link_state;
635 	struct ibmvnic_logical_link_state logical_link_state_rsp;
636 	struct ibmvnic_query_ip_offload query_ip_offload;
637 	struct ibmvnic_query_ip_offload query_ip_offload_rsp;
638 	struct ibmvnic_control_ip_offload control_ip_offload;
639 	struct ibmvnic_control_ip_offload control_ip_offload_rsp;
640 	struct ibmvnic_request_statistics request_statistics;
641 	struct ibmvnic_generic_crq request_statistics_rsp;
642 	struct ibmvnic_error_indication error_indication;
643 	struct ibmvnic_link_state_indication link_state_indication;
644 	struct ibmvnic_change_mac_addr change_mac_addr;
645 	struct ibmvnic_change_mac_addr change_mac_addr_rsp;
646 	struct ibmvnic_multicast_ctrl multicast_ctrl;
647 	struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
648 	struct ibmvnic_get_vpd_size get_vpd_size;
649 	struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
650 	struct ibmvnic_get_vpd get_vpd;
651 	struct ibmvnic_get_vpd_rsp get_vpd_rsp;
652 	struct ibmvnic_acl_change_indication acl_change_indication;
653 	struct ibmvnic_acl_query acl_query;
654 	struct ibmvnic_generic_crq acl_query_rsp;
655 	struct ibmvnic_tune tune;
656 	struct ibmvnic_generic_crq tune_rsp;
657 	struct ibmvnic_request_map request_map;
658 	struct ibmvnic_request_map_rsp request_map_rsp;
659 	struct ibmvnic_request_unmap request_unmap;
660 	struct ibmvnic_request_unmap_rsp request_unmap_rsp;
661 	struct ibmvnic_query_map query_map;
662 	struct ibmvnic_query_map_rsp query_map_rsp;
663 };
664 
665 enum ibmvnic_rc_codes {
666 	SUCCESS = 0,
667 	PARTIALSUCCESS = 1,
668 	PERMISSION = 2,
669 	NOMEMORY = 3,
670 	PARAMETER = 4,
671 	UNKNOWNCOMMAND = 5,
672 	ABORTED = 6,
673 	INVALIDSTATE = 7,
674 	INVALIDIOBA = 8,
675 	INVALIDLENGTH = 9,
676 	UNSUPPORTEDOPTION = 10,
677 };
678 
679 enum ibmvnic_capabilities {
680 	MIN_TX_QUEUES = 1,
681 	MIN_RX_QUEUES = 2,
682 	MIN_RX_ADD_QUEUES = 3,
683 	MAX_TX_QUEUES = 4,
684 	MAX_RX_QUEUES = 5,
685 	MAX_RX_ADD_QUEUES = 6,
686 	REQ_TX_QUEUES = 7,
687 	REQ_RX_QUEUES = 8,
688 	REQ_RX_ADD_QUEUES = 9,
689 	MIN_TX_ENTRIES_PER_SUBCRQ = 10,
690 	MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11,
691 	MAX_TX_ENTRIES_PER_SUBCRQ = 12,
692 	MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13,
693 	REQ_TX_ENTRIES_PER_SUBCRQ = 14,
694 	REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15,
695 	TCP_IP_OFFLOAD = 16,
696 	PROMISC_REQUESTED = 17,
697 	PROMISC_SUPPORTED = 18,
698 	MIN_MTU = 19,
699 	MAX_MTU = 20,
700 	REQ_MTU = 21,
701 	MAX_MULTICAST_FILTERS = 22,
702 	VLAN_HEADER_INSERTION = 23,
703 	RX_VLAN_HEADER_INSERTION = 24,
704 	MAX_TX_SG_ENTRIES = 25,
705 	RX_SG_SUPPORTED = 26,
706 	RX_SG_REQUESTED = 27,
707 	OPT_TX_COMP_SUB_QUEUES = 28,
708 	OPT_RX_COMP_QUEUES = 29,
709 	OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30,
710 	OPT_TX_ENTRIES_PER_SUBCRQ = 31,
711 	OPT_RXBA_ENTRIES_PER_SUBCRQ = 32,
712 	TX_RX_DESC_REQ = 33,
713 };
714 
715 enum ibmvnic_error_cause {
716 	ADAPTER_PROBLEM = 0,
717 	BUS_PROBLEM = 1,
718 	FW_PROBLEM = 2,
719 	DD_PROBLEM = 3,
720 	EEH_RECOVERY = 4,
721 	FW_UPDATED = 5,
722 	LOW_MEMORY = 6,
723 };
724 
725 enum ibmvnic_commands {
726 	VERSION_EXCHANGE = 0x01,
727 	VERSION_EXCHANGE_RSP = 0x81,
728 	QUERY_CAPABILITY = 0x02,
729 	QUERY_CAPABILITY_RSP = 0x82,
730 	REQUEST_CAPABILITY = 0x03,
731 	REQUEST_CAPABILITY_RSP = 0x83,
732 	LOGIN = 0x04,
733 	LOGIN_RSP = 0x84,
734 	QUERY_PHYS_PARMS = 0x05,
735 	QUERY_PHYS_PARMS_RSP = 0x85,
736 	QUERY_PHYS_CAPABILITIES = 0x06,
737 	QUERY_PHYS_CAPABILITIES_RSP = 0x86,
738 	SET_PHYS_PARMS = 0x07,
739 	SET_PHYS_PARMS_RSP = 0x87,
740 	ERROR_INDICATION = 0x08,
741 	LOGICAL_LINK_STATE = 0x0C,
742 	LOGICAL_LINK_STATE_RSP = 0x8C,
743 	REQUEST_STATISTICS = 0x0D,
744 	REQUEST_STATISTICS_RSP = 0x8D,
745 	COLLECT_FW_TRACE = 0x11,
746 	COLLECT_FW_TRACE_RSP = 0x91,
747 	LINK_STATE_INDICATION = 0x12,
748 	CHANGE_MAC_ADDR = 0x13,
749 	CHANGE_MAC_ADDR_RSP = 0x93,
750 	MULTICAST_CTRL = 0x14,
751 	MULTICAST_CTRL_RSP = 0x94,
752 	GET_VPD_SIZE = 0x15,
753 	GET_VPD_SIZE_RSP = 0x95,
754 	GET_VPD = 0x16,
755 	GET_VPD_RSP = 0x96,
756 	TUNE = 0x17,
757 	TUNE_RSP = 0x97,
758 	QUERY_IP_OFFLOAD = 0x18,
759 	QUERY_IP_OFFLOAD_RSP = 0x98,
760 	CONTROL_IP_OFFLOAD = 0x19,
761 	CONTROL_IP_OFFLOAD_RSP = 0x99,
762 	ACL_CHANGE_INDICATION = 0x1A,
763 	ACL_QUERY = 0x1B,
764 	ACL_QUERY_RSP = 0x9B,
765 	QUERY_MAP = 0x1D,
766 	QUERY_MAP_RSP = 0x9D,
767 	REQUEST_MAP = 0x1E,
768 	REQUEST_MAP_RSP = 0x9E,
769 	REQUEST_UNMAP = 0x1F,
770 	REQUEST_UNMAP_RSP = 0x9F,
771 	VLAN_CTRL = 0x20,
772 	VLAN_CTRL_RSP = 0xA0,
773 };
774 
775 enum ibmvnic_crq_type {
776 	IBMVNIC_CRQ_CMD			= 0x80,
777 	IBMVNIC_CRQ_CMD_RSP		= 0x80,
778 	IBMVNIC_CRQ_INIT_CMD		= 0xC0,
779 	IBMVNIC_CRQ_INIT_RSP		= 0xC0,
780 	IBMVNIC_CRQ_XPORT_EVENT		= 0xFF,
781 };
782 
783 enum ibmvfc_crq_format {
784 	IBMVNIC_CRQ_INIT                 = 0x01,
785 	IBMVNIC_CRQ_INIT_COMPLETE        = 0x02,
786 	IBMVNIC_PARTITION_MIGRATED       = 0x06,
787 	IBMVNIC_DEVICE_FAILOVER          = 0x08,
788 };
789 
790 struct ibmvnic_crq_queue {
791 	union ibmvnic_crq *msgs;
792 	int size, cur;
793 	dma_addr_t msg_token;
794 	/* Used for serialization of msgs, cur */
795 	spinlock_t lock;
796 	bool active;
797 	char name[32];
798 };
799 
800 union sub_crq {
801 	struct ibmvnic_generic_scrq generic;
802 	struct ibmvnic_tx_comp_desc tx_comp;
803 	struct ibmvnic_tx_desc v1;
804 	struct ibmvnic_hdr_desc hdr;
805 	struct ibmvnic_hdr_ext_desc hdr_ext;
806 	struct ibmvnic_sge_desc sge;
807 	struct ibmvnic_rx_comp_desc rx_comp;
808 	struct ibmvnic_rx_buff_add_desc rx_add;
809 };
810 
811 struct ibmvnic_ind_xmit_queue {
812 	union sub_crq *indir_arr;
813 	dma_addr_t indir_dma;
814 	int index;
815 };
816 
817 struct ibmvnic_sub_crq_queue {
818 	union sub_crq *msgs;
819 	int size, cur;
820 	dma_addr_t msg_token;
821 	unsigned long crq_num;
822 	unsigned long hw_irq;
823 	unsigned int irq;
824 	unsigned int pool_index;
825 	int scrq_num;
826 	/* Used for serialization of msgs, cur */
827 	spinlock_t lock;
828 	struct sk_buff *rx_skb_top;
829 	struct ibmvnic_adapter *adapter;
830 	struct ibmvnic_ind_xmit_queue ind_buf;
831 	atomic_t used;
832 	char name[32];
833 	u64 handle;
834 	cpumask_var_t affinity_mask;
835 } ____cacheline_aligned;
836 
837 struct ibmvnic_long_term_buff {
838 	unsigned char *buff;
839 	dma_addr_t addr;
840 	u64 size;
841 	u8 map_id;
842 };
843 
844 struct ibmvnic_ltb_set {
845 	int num_ltbs;
846 	struct ibmvnic_long_term_buff *ltbs;
847 };
848 
849 struct ibmvnic_tx_buff {
850 	struct sk_buff *skb;
851 	int index;
852 	int pool_index;
853 	int num_entries;
854 };
855 
856 struct ibmvnic_tx_pool {
857 	struct ibmvnic_tx_buff *tx_buff;
858 	int *free_map;
859 	int consumer_index;
860 	int producer_index;
861 	struct ibmvnic_ltb_set ltb_set;
862 	int num_buffers;
863 	int buf_size;
864 } ____cacheline_aligned;
865 
866 struct ibmvnic_rx_buff {
867 	struct sk_buff *skb;
868 	dma_addr_t dma;
869 	unsigned char *data;
870 	int size;
871 	int pool_index;
872 };
873 
874 struct ibmvnic_rx_pool {
875 	struct ibmvnic_rx_buff *rx_buff;
876 	int size;			/* # of buffers in the pool */
877 	int index;
878 	int buff_size;
879 	atomic_t available;
880 	int *free_map;
881 	int next_free;
882 	int next_alloc;
883 	int active;
884 	struct ibmvnic_ltb_set ltb_set;
885 } ____cacheline_aligned;
886 
887 struct ibmvnic_vpd {
888 	unsigned char *buff;
889 	dma_addr_t dma_addr;
890 	u64 len;
891 };
892 
893 enum vnic_state {VNIC_PROBING = 1,
894 		 VNIC_PROBED,
895 		 VNIC_OPENING,
896 		 VNIC_OPEN,
897 		 VNIC_CLOSING,
898 		 VNIC_CLOSED,
899 		 VNIC_REMOVING,
900 		 VNIC_REMOVED,
901 		 VNIC_DOWN};
902 
903 enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
904 			   VNIC_RESET_MOBILITY,
905 			   VNIC_RESET_FATAL,
906 			   VNIC_RESET_NON_FATAL,
907 			   VNIC_RESET_TIMEOUT,
908 			   VNIC_RESET_CHANGE_PARAM,
909 			   VNIC_RESET_PASSIVE_INIT};
910 
911 struct ibmvnic_rwi {
912 	enum ibmvnic_reset_reason reset_reason;
913 	struct list_head list;
914 };
915 
916 struct ibmvnic_tunables {
917 	u64 rx_queues;
918 	u64 tx_queues;
919 	u64 rx_entries;
920 	u64 tx_entries;
921 	u64 mtu;
922 };
923 
924 struct ibmvnic_adapter {
925 	struct vio_dev *vdev;
926 	struct net_device *netdev;
927 	struct ibmvnic_crq_queue crq;
928 	u8 mac_addr[ETH_ALEN];
929 	struct ibmvnic_query_ip_offload_buffer ip_offload_buf;
930 	dma_addr_t ip_offload_tok;
931 	struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
932 	dma_addr_t ip_offload_ctrl_tok;
933 	u32 msg_enable;
934 	u32 cur_max_ind_descs;
935 
936 	/* Vital Product Data (VPD) */
937 	struct ibmvnic_vpd *vpd;
938 	char fw_version[32];
939 
940 	/* Statistics */
941 	struct ibmvnic_statistics stats;
942 	dma_addr_t stats_token;
943 	struct completion stats_done;
944 	int replenish_no_mem;
945 	int replenish_add_buff_success;
946 	int replenish_add_buff_failure;
947 	int replenish_task_cycles;
948 	int tx_send_failed;
949 	int tx_map_failed;
950 
951 	struct ibmvnic_tx_queue_stats *tx_stats_buffers;
952 	struct ibmvnic_rx_queue_stats *rx_stats_buffers;
953 
954 	int phys_link_state;
955 	int logical_link_state;
956 
957 	u32 speed;
958 	u8 duplex;
959 
960 	/* login data */
961 	struct ibmvnic_login_buffer *login_buf;
962 	dma_addr_t login_buf_token;
963 	int login_buf_sz;
964 
965 	struct ibmvnic_login_rsp_buffer *login_rsp_buf;
966 	dma_addr_t login_rsp_buf_token;
967 	int login_rsp_buf_sz;
968 
969 	atomic_t running_cap_crqs;
970 
971 	struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
972 	struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
973 
974 	/* rx structs */
975 	struct napi_struct *napi;
976 	struct ibmvnic_rx_pool *rx_pool;
977 	u64 promisc;
978 
979 	struct ibmvnic_tx_pool *tx_pool;
980 	struct ibmvnic_tx_pool *tso_pool;
981 	struct completion probe_done;
982 	struct completion init_done;
983 	int init_done_rc;
984 
985 	struct completion fw_done;
986 	/* Used for serialization of device commands */
987 	struct mutex fw_lock;
988 	int fw_done_rc;
989 
990 	struct completion reset_done;
991 	int reset_done_rc;
992 	bool wait_for_reset;
993 
994 	/* CPU hotplug instances for online & dead */
995 	struct hlist_node node;
996 	struct hlist_node node_dead;
997 
998 	/* partner capabilities */
999 	u64 min_tx_queues;
1000 	u64 min_rx_queues;
1001 	u64 min_rx_add_queues;
1002 	u64 max_tx_queues;
1003 	u64 max_rx_queues;
1004 	u64 max_rx_add_queues;
1005 	u64 req_tx_queues;
1006 	u64 req_rx_queues;
1007 	u64 req_rx_add_queues;
1008 	u64 min_tx_entries_per_subcrq;
1009 	u64 min_rx_add_entries_per_subcrq;
1010 	u64 max_tx_entries_per_subcrq;
1011 	u64 max_rx_add_entries_per_subcrq;
1012 	u64 req_tx_entries_per_subcrq;
1013 	u64 req_rx_add_entries_per_subcrq;
1014 	u64 tcp_ip_offload;
1015 	u64 promisc_requested;
1016 	u64 promisc_supported;
1017 	u64 min_mtu;
1018 	u64 max_mtu;
1019 	u64 req_mtu;
1020 	u64 prev_mtu;
1021 	u64 max_multicast_filters;
1022 	u64 vlan_header_insertion;
1023 	u64 rx_vlan_header_insertion;
1024 	u64 max_tx_sg_entries;
1025 	u64 rx_sg_supported;
1026 	u64 rx_sg_requested;
1027 	u64 opt_tx_comp_sub_queues;
1028 	u64 opt_rx_comp_queues;
1029 	u64 opt_rx_bufadd_q_per_rx_comp_q;
1030 	u64 opt_tx_entries_per_subcrq;
1031 	u64 opt_rxba_entries_per_subcrq;
1032 	__be64 tx_rx_desc_req;
1033 #define MAX_MAP_ID	255
1034 	DECLARE_BITMAP(map_ids, MAX_MAP_ID);
1035 	u32 num_active_rx_scrqs;
1036 	u32 num_active_rx_pools;
1037 	u32 num_active_rx_napi;
1038 	u32 num_active_tx_scrqs;
1039 	u32 num_active_tx_pools;
1040 
1041 	u32 prev_rx_pool_size;
1042 	u32 prev_tx_pool_size;
1043 	u32 cur_rx_buf_sz;
1044 	u32 prev_rx_buf_sz;
1045 
1046 	struct tasklet_struct tasklet;
1047 	enum vnic_state state;
1048 	/* Used for serialization of state field. When taking both state
1049 	 * and rwi locks, take state lock first.
1050 	 */
1051 	spinlock_t state_lock;
1052 	enum ibmvnic_reset_reason reset_reason;
1053 	struct list_head rwi_list;
1054 	/* Used for serialization of rwi_list. When taking both state
1055 	 * and rwi locks, take state lock first
1056 	 */
1057 	spinlock_t rwi_lock;
1058 	struct work_struct ibmvnic_reset;
1059 	struct delayed_work ibmvnic_delayed_reset;
1060 	unsigned long resetting;
1061 	/* last device reset time */
1062 	unsigned long last_reset_time;
1063 
1064 	bool napi_enabled;
1065 	bool from_passive_init;
1066 	bool login_pending;
1067 	/* protected by rcu */
1068 	bool tx_queues_active;
1069 	bool failover_pending;
1070 	bool force_reset_recovery;
1071 
1072 	struct ibmvnic_tunables desired;
1073 	struct ibmvnic_tunables fallback;
1074 };
1075