xref: /linux/drivers/net/ethernet/ibm/ibmvnic.h (revision cbac924200b838cfb8d8b1415113d788089dc50b)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /**************************************************************************/
3 /*                                                                        */
4 /*  IBM System i and System p Virtual NIC Device Driver                   */
5 /*  Copyright (C) 2014 IBM Corp.                                          */
6 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
7 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
8 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
9 /*                                                                        */
10 /*                                                                        */
11 /* This module contains the implementation of a virtual ethernet device   */
12 /* for use with IBM i/pSeries LPAR Linux.  It utilizes the logical LAN    */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor.   */
15 /*                                                                        */
16 /**************************************************************************/
17 
18 #define IBMVNIC_NAME		"ibmvnic"
19 #define IBMVNIC_DRIVER_VERSION	"1.0.1"
20 #define IBMVNIC_INVALID_MAP	-1
21 #define IBMVNIC_OPEN_FAILED	3
22 
23 /* basic structures plus 100 2k buffers */
24 #define IBMVNIC_IO_ENTITLEMENT_DEFAULT	610305
25 
26 /* Initial module_parameters */
27 #define IBMVNIC_RX_WEIGHT		16
28 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
29 #define IBMVNIC_BUFFS_PER_POOL	100
30 #define IBMVNIC_MAX_QUEUES	16
31 #define IBMVNIC_MAX_QUEUE_SZ   4096
32 #define IBMVNIC_MAX_IND_DESCS  16
33 #define IBMVNIC_IND_ARR_SZ	(IBMVNIC_MAX_IND_DESCS * 32)
34 
35 #define IBMVNIC_TSO_BUF_SZ	65536
36 #define IBMVNIC_TSO_BUFS	64
37 #define IBMVNIC_TSO_POOL_MASK	0x80000000
38 
39 #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
40 #define IBMVNIC_BUFFER_HLEN 500
41 
42 #define IBMVNIC_RESET_DELAY 100
43 
44 static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
45 #define IBMVNIC_USE_SERVER_MAXES 0x1
46 	"use-server-maxes"
47 };
48 
49 struct ibmvnic_login_buffer {
50 	__be32 len;
51 	__be32 version;
52 #define INITIAL_VERSION_LB 1
53 	__be32 num_txcomp_subcrqs;
54 	__be32 off_txcomp_subcrqs;
55 	__be32 num_rxcomp_subcrqs;
56 	__be32 off_rxcomp_subcrqs;
57 	__be32 login_rsp_ioba;
58 	__be32 login_rsp_len;
59 	__be32 client_data_offset;
60 	__be32 client_data_len;
61 } __packed __aligned(8);
62 
63 struct ibmvnic_login_rsp_buffer {
64 	__be32 len;
65 	__be32 version;
66 #define INITIAL_VERSION_LRB 1
67 	__be32 num_txsubm_subcrqs;
68 	__be32 off_txsubm_subcrqs;
69 	__be32 num_rxadd_subcrqs;
70 	__be32 off_rxadd_subcrqs;
71 	__be32 off_rxadd_buff_size;
72 	__be32 num_supp_tx_desc;
73 	__be32 off_supp_tx_desc;
74 } __packed __aligned(8);
75 
76 struct ibmvnic_query_ip_offload_buffer {
77 	__be32 len;
78 	__be32 version;
79 #define INITIAL_VERSION_IOB 1
80 	u8 ipv4_chksum;
81 	u8 ipv6_chksum;
82 	u8 tcp_ipv4_chksum;
83 	u8 tcp_ipv6_chksum;
84 	u8 udp_ipv4_chksum;
85 	u8 udp_ipv6_chksum;
86 	u8 large_tx_ipv4;
87 	u8 large_tx_ipv6;
88 	u8 large_rx_ipv4;
89 	u8 large_rx_ipv6;
90 	u8 reserved1[14];
91 	__be16 max_ipv4_header_size;
92 	__be16 max_ipv6_header_size;
93 	__be16 max_tcp_header_size;
94 	__be16 max_udp_header_size;
95 	__be32 max_large_tx_size;
96 	__be32 max_large_rx_size;
97 	u8 reserved2[16];
98 	u8 ipv6_extension_header;
99 #define IPV6_EH_NOT_SUPPORTED	0x00
100 #define IPV6_EH_SUPPORTED_LIM	0x01
101 #define IPV6_EH_SUPPORTED	0xFF
102 	u8 tcp_pseudosum_req;
103 #define TCP_PS_NOT_REQUIRED	0x00
104 #define TCP_PS_REQUIRED		0x01
105 	u8 reserved3[30];
106 	__be16 num_ipv6_ext_headers;
107 	__be32 off_ipv6_ext_headers;
108 	u8 reserved4[154];
109 } __packed __aligned(8);
110 
111 struct ibmvnic_control_ip_offload_buffer {
112 	__be32 len;
113 	__be32 version;
114 #define INITIAL_VERSION_IOB 1
115 	u8 ipv4_chksum;
116 	u8 ipv6_chksum;
117 	u8 tcp_ipv4_chksum;
118 	u8 tcp_ipv6_chksum;
119 	u8 udp_ipv4_chksum;
120 	u8 udp_ipv6_chksum;
121 	u8 large_tx_ipv4;
122 	u8 large_tx_ipv6;
123 	u8 bad_packet_rx;
124 	u8 large_rx_ipv4;
125 	u8 large_rx_ipv6;
126 	u8 reserved4[111];
127 } __packed __aligned(8);
128 
129 struct ibmvnic_fw_component {
130 	u8 name[48];
131 	__be32 trace_buff_size;
132 	u8 correlator;
133 	u8 trace_level;
134 	u8 parent_correlator;
135 	u8 error_check_level;
136 	u8 trace_on;
137 	u8 reserved[7];
138 	u8 description[192];
139 } __packed __aligned(8);
140 
141 struct ibmvnic_fw_trace_entry {
142 	__be32 trace_id;
143 	u8 num_valid_data;
144 	u8 reserved[3];
145 	__be64 pmc_registers;
146 	__be64 timebase;
147 	__be64 trace_data[5];
148 } __packed __aligned(8);
149 
150 struct ibmvnic_statistics {
151 	__be32 version;
152 	__be32 promiscuous;
153 	__be64 rx_packets;
154 	__be64 rx_bytes;
155 	__be64 tx_packets;
156 	__be64 tx_bytes;
157 	__be64 ucast_tx_packets;
158 	__be64 ucast_rx_packets;
159 	__be64 mcast_tx_packets;
160 	__be64 mcast_rx_packets;
161 	__be64 bcast_tx_packets;
162 	__be64 bcast_rx_packets;
163 	__be64 align_errors;
164 	__be64 fcs_errors;
165 	__be64 single_collision_frames;
166 	__be64 multi_collision_frames;
167 	__be64 sqe_test_errors;
168 	__be64 deferred_tx;
169 	__be64 late_collisions;
170 	__be64 excess_collisions;
171 	__be64 internal_mac_tx_errors;
172 	__be64 carrier_sense;
173 	__be64 too_long_frames;
174 	__be64 internal_mac_rx_errors;
175 	u8 reserved[72];
176 } __packed __aligned(8);
177 
178 #define NUM_TX_STATS 3
179 struct ibmvnic_tx_queue_stats {
180 	u64 packets;
181 	u64 bytes;
182 	u64 dropped_packets;
183 };
184 
185 #define NUM_RX_STATS 3
186 struct ibmvnic_rx_queue_stats {
187 	u64 packets;
188 	u64 bytes;
189 	u64 interrupts;
190 };
191 
192 struct ibmvnic_acl_buffer {
193 	__be32 len;
194 	__be32 version;
195 #define INITIAL_VERSION_IOB 1
196 	u8 mac_acls_restrict;
197 	u8 vlan_acls_restrict;
198 	u8 reserved1[22];
199 	__be32 num_mac_addrs;
200 	__be32 offset_mac_addrs;
201 	__be32 num_vlan_ids;
202 	__be32 offset_vlan_ids;
203 	u8 reserved2[80];
204 } __packed __aligned(8);
205 
206 /* descriptors have been changed, how should this be defined?  1? 4? */
207 
208 #define IBMVNIC_TX_DESC_VERSIONS 3
209 
210 /* is this still needed? */
211 struct ibmvnic_tx_comp_desc {
212 	u8 first;
213 	u8 num_comps;
214 	__be16 rcs[5];
215 	__be32 correlators[5];
216 } __packed __aligned(8);
217 
218 /* some flags that included in v0 descriptor, which is gone
219  * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM
220  * and only in some offload_flags variable that doesn't seem
221  * to be used anywhere, can probably be removed?
222  */
223 
224 #define IBMVNIC_TCP_CHKSUM		0x20
225 #define IBMVNIC_UDP_CHKSUM		0x08
226 
227 struct ibmvnic_tx_desc {
228 	u8 first;
229 	u8 type;
230 
231 #define IBMVNIC_TX_DESC 0x10
232 	u8 n_crq_elem;
233 	u8 n_sge;
234 	u8 flags1;
235 #define IBMVNIC_TX_COMP_NEEDED		0x80
236 #define IBMVNIC_TX_CHKSUM_OFFLOAD	0x40
237 #define IBMVNIC_TX_LSO			0x20
238 #define IBMVNIC_TX_PROT_TCP		0x10
239 #define IBMVNIC_TX_PROT_UDP		0x08
240 #define IBMVNIC_TX_PROT_IPV4		0x04
241 #define IBMVNIC_TX_PROT_IPV6		0x02
242 #define IBMVNIC_TX_VLAN_PRESENT		0x01
243 	u8 flags2;
244 #define IBMVNIC_TX_VLAN_INSERT		0x80
245 	__be16 mss;
246 	u8 reserved[4];
247 	__be32 correlator;
248 	__be16 vlan_id;
249 	__be16 dma_reg;
250 	__be32 sge_len;
251 	__be64 ioba;
252 } __packed __aligned(8);
253 
254 struct ibmvnic_hdr_desc {
255 	u8 first;
256 	u8 type;
257 #define IBMVNIC_HDR_DESC		0x11
258 	u8 len;
259 	u8 l2_len;
260 	__be16 l3_len;
261 	u8 l4_len;
262 	u8 flag;
263 	u8 data[24];
264 } __packed __aligned(8);
265 
266 struct ibmvnic_hdr_ext_desc {
267 	u8 first;
268 	u8 type;
269 #define IBMVNIC_HDR_EXT_DESC		0x12
270 	u8 len;
271 	u8 data[29];
272 } __packed __aligned(8);
273 
274 struct ibmvnic_sge_desc {
275 	u8 first;
276 	u8 type;
277 #define IBMVNIC_SGE_DESC		0x30
278 	__be16 sge1_dma_reg;
279 	__be32 sge1_len;
280 	__be64 sge1_ioba;
281 	__be16 reserved;
282 	__be16 sge2_dma_reg;
283 	__be32 sge2_len;
284 	__be64 sge2_ioba;
285 } __packed __aligned(8);
286 
287 struct ibmvnic_rx_comp_desc {
288 	u8 first;
289 	u8 flags;
290 #define IBMVNIC_IP_CHKSUM_GOOD		0x80
291 #define IBMVNIC_TCP_UDP_CHKSUM_GOOD	0x40
292 #define IBMVNIC_END_FRAME			0x20
293 #define IBMVNIC_EXACT_MC			0x10
294 #define IBMVNIC_VLAN_STRIPPED			0x08
295 	__be16 off_frame_data;
296 	__be32 len;
297 	__be64 correlator;
298 	__be16 vlan_tci;
299 	__be16 rc;
300 	u8 reserved[12];
301 } __packed __aligned(8);
302 
303 struct ibmvnic_generic_scrq {
304 	u8 first;
305 	u8 reserved[31];
306 } __packed __aligned(8);
307 
308 struct ibmvnic_rx_buff_add_desc {
309 	u8 first;
310 	u8 reserved[7];
311 	__be64 correlator;
312 	__be32 ioba;
313 	u8 map_id;
314 	__be32 len:24;
315 	u8 reserved2[8];
316 } __packed __aligned(8);
317 
318 struct ibmvnic_rc {
319 	u8 code; /* one of enum ibmvnic_rc_codes */
320 	u8 detailed_data[3];
321 } __packed __aligned(4);
322 
323 struct ibmvnic_generic_crq {
324 	u8 first;
325 	u8 cmd;
326 	u8 params[10];
327 	struct ibmvnic_rc rc;
328 } __packed __aligned(8);
329 
330 struct ibmvnic_version_exchange {
331 	u8 first;
332 	u8 cmd;
333 	__be16 version;
334 #define IBMVNIC_INITIAL_VERSION 1
335 	u8 reserved[8];
336 	struct ibmvnic_rc rc;
337 } __packed __aligned(8);
338 
339 struct ibmvnic_capability {
340 	u8 first;
341 	u8 cmd;
342 	__be16 capability; /* one of ibmvnic_capabilities */
343 	__be64 number;
344 	struct ibmvnic_rc rc;
345 } __packed __aligned(8);
346 
347 struct ibmvnic_login {
348 	u8 first;
349 	u8 cmd;
350 	u8 reserved[6];
351 	__be32 ioba;
352 	__be32 len;
353 } __packed __aligned(8);
354 
355 struct ibmvnic_phys_parms {
356 	u8 first;
357 	u8 cmd;
358 	u8 flags1;
359 #define IBMVNIC_EXTERNAL_LOOPBACK	0x80
360 #define IBMVNIC_INTERNAL_LOOPBACK	0x40
361 #define IBMVNIC_PROMISC		0x20
362 #define IBMVNIC_PHYS_LINK_ACTIVE	0x10
363 #define IBMVNIC_AUTONEG_DUPLEX	0x08
364 #define IBMVNIC_FULL_DUPLEX	0x04
365 #define IBMVNIC_HALF_DUPLEX	0x02
366 #define IBMVNIC_CAN_CHG_PHYS_PARMS	0x01
367 	u8 flags2;
368 #define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
369 	__be32 speed;
370 #define IBMVNIC_AUTONEG		0x80000000
371 #define IBMVNIC_10MBPS		0x40000000
372 #define IBMVNIC_100MBPS		0x20000000
373 #define IBMVNIC_1GBPS		0x10000000
374 #define IBMVNIC_10GBPS		0x08000000
375 #define IBMVNIC_40GBPS		0x04000000
376 #define IBMVNIC_100GBPS		0x02000000
377 #define IBMVNIC_25GBPS		0x01000000
378 #define IBMVNIC_50GBPS		0x00800000
379 #define IBMVNIC_200GBPS		0x00400000
380 	__be32 mtu;
381 	struct ibmvnic_rc rc;
382 } __packed __aligned(8);
383 
384 struct ibmvnic_logical_link_state {
385 	u8 first;
386 	u8 cmd;
387 	u8 link_state;
388 #define IBMVNIC_LOGICAL_LNK_DN 0x00
389 #define IBMVNIC_LOGICAL_LNK_UP 0x01
390 #define IBMVNIC_LOGICAL_LNK_QUERY 0xff
391 	u8 reserved[9];
392 	struct ibmvnic_rc rc;
393 } __packed __aligned(8);
394 
395 struct ibmvnic_query_ip_offload {
396 	u8 first;
397 	u8 cmd;
398 	u8 reserved[2];
399 	__be32 len;
400 	__be32 ioba;
401 	struct ibmvnic_rc rc;
402 } __packed __aligned(8);
403 
404 struct ibmvnic_control_ip_offload {
405 	u8 first;
406 	u8 cmd;
407 	u8 reserved[2];
408 	__be32 ioba;
409 	__be32 len;
410 	struct ibmvnic_rc rc;
411 } __packed __aligned(8);
412 
413 struct ibmvnic_request_statistics {
414 	u8 first;
415 	u8 cmd;
416 	u8 flags;
417 #define IBMVNIC_PHYSICAL_PORT	0x80
418 	u8 reserved1;
419 	__be32 ioba;
420 	__be32 len;
421 	u8 reserved[4];
422 } __packed __aligned(8);
423 
424 struct ibmvnic_error_indication {
425 	u8 first;
426 	u8 cmd;
427 	u8 flags;
428 #define IBMVNIC_FATAL_ERROR	0x80
429 	u8 reserved1;
430 	__be32 error_id;
431 	__be32 detail_error_sz;
432 	__be16 error_cause;
433 	u8 reserved2[2];
434 } __packed __aligned(8);
435 
436 struct ibmvnic_link_state_indication {
437 	u8 first;
438 	u8 cmd;
439 	u8 reserved1[2];
440 	u8 phys_link_state;
441 	u8 logical_link_state;
442 	u8 reserved2[10];
443 } __packed __aligned(8);
444 
445 struct ibmvnic_change_mac_addr {
446 	u8 first;
447 	u8 cmd;
448 	u8 mac_addr[6];
449 	u8 reserved[4];
450 	struct ibmvnic_rc rc;
451 } __packed __aligned(8);
452 
453 struct ibmvnic_multicast_ctrl {
454 	u8 first;
455 	u8 cmd;
456 	u8 mac_addr[6];
457 	u8 flags;
458 #define IBMVNIC_ENABLE_MC		0x80
459 #define IBMVNIC_DISABLE_MC		0x40
460 #define IBMVNIC_ENABLE_ALL		0x20
461 #define IBMVNIC_DISABLE_ALL	0x10
462 	u8 reserved1;
463 	__be16 reserved2; /* was num_enabled_mc_addr; */
464 	struct ibmvnic_rc rc;
465 } __packed __aligned(8);
466 
467 struct ibmvnic_get_vpd_size {
468 	u8 first;
469 	u8 cmd;
470 	u8 reserved[14];
471 } __packed __aligned(8);
472 
473 struct ibmvnic_get_vpd_size_rsp {
474 	u8 first;
475 	u8 cmd;
476 	u8 reserved[2];
477 	__be64 len;
478 	struct ibmvnic_rc rc;
479 } __packed __aligned(8);
480 
481 struct ibmvnic_get_vpd {
482 	u8 first;
483 	u8 cmd;
484 	u8 reserved1[2];
485 	__be32 ioba;
486 	__be32 len;
487 	u8 reserved[4];
488 } __packed __aligned(8);
489 
490 struct ibmvnic_get_vpd_rsp {
491 	u8 first;
492 	u8 cmd;
493 	u8 reserved[10];
494 	struct ibmvnic_rc rc;
495 } __packed __aligned(8);
496 
497 struct ibmvnic_acl_change_indication {
498 	u8 first;
499 	u8 cmd;
500 	__be16 change_type;
501 #define IBMVNIC_MAC_ACL 0
502 #define IBMVNIC_VLAN_ACL 1
503 	u8 reserved[12];
504 } __packed __aligned(8);
505 
506 struct ibmvnic_acl_query {
507 	u8 first;
508 	u8 cmd;
509 	u8 reserved1[2];
510 	__be32 ioba;
511 	__be32 len;
512 	u8 reserved2[4];
513 } __packed __aligned(8);
514 
515 struct ibmvnic_tune {
516 	u8 first;
517 	u8 cmd;
518 	u8 reserved1[2];
519 	__be32 ioba;
520 	__be32 len;
521 	u8 reserved2[4];
522 } __packed __aligned(8);
523 
524 struct ibmvnic_request_map {
525 	u8 first;
526 	u8 cmd;
527 	u8 reserved1;
528 	u8 map_id;
529 	__be32 ioba;
530 	__be32 len;
531 	u8 reserved2[4];
532 } __packed __aligned(8);
533 
534 struct ibmvnic_request_map_rsp {
535 	u8 first;
536 	u8 cmd;
537 	u8 reserved1;
538 	u8 map_id;
539 	u8 reserved2[8];
540 	struct ibmvnic_rc rc;
541 } __packed __aligned(8);
542 
543 struct ibmvnic_request_unmap {
544 	u8 first;
545 	u8 cmd;
546 	u8 reserved1;
547 	u8 map_id;
548 	u8 reserved2[12];
549 } __packed __aligned(8);
550 
551 struct ibmvnic_request_unmap_rsp {
552 	u8 first;
553 	u8 cmd;
554 	u8 reserved1;
555 	u8 map_id;
556 	u8 reserved2[8];
557 	struct ibmvnic_rc rc;
558 } __packed __aligned(8);
559 
560 struct ibmvnic_query_map {
561 	u8 first;
562 	u8 cmd;
563 	u8 reserved[14];
564 } __packed __aligned(8);
565 
566 struct ibmvnic_query_map_rsp {
567 	u8 first;
568 	u8 cmd;
569 	u8 reserved;
570 	u8 page_size;
571 	__be32 tot_pages;
572 	__be32 free_pages;
573 	struct ibmvnic_rc rc;
574 } __packed __aligned(8);
575 
576 union ibmvnic_crq {
577 	struct ibmvnic_generic_crq generic;
578 	struct ibmvnic_version_exchange version_exchange;
579 	struct ibmvnic_version_exchange version_exchange_rsp;
580 	struct ibmvnic_capability query_capability;
581 	struct ibmvnic_capability query_capability_rsp;
582 	struct ibmvnic_capability request_capability;
583 	struct ibmvnic_capability request_capability_rsp;
584 	struct ibmvnic_login login;
585 	struct ibmvnic_generic_crq login_rsp;
586 	struct ibmvnic_phys_parms query_phys_parms;
587 	struct ibmvnic_phys_parms query_phys_parms_rsp;
588 	struct ibmvnic_phys_parms query_phys_capabilities;
589 	struct ibmvnic_phys_parms query_phys_capabilities_rsp;
590 	struct ibmvnic_phys_parms set_phys_parms;
591 	struct ibmvnic_phys_parms set_phys_parms_rsp;
592 	struct ibmvnic_logical_link_state logical_link_state;
593 	struct ibmvnic_logical_link_state logical_link_state_rsp;
594 	struct ibmvnic_query_ip_offload query_ip_offload;
595 	struct ibmvnic_query_ip_offload query_ip_offload_rsp;
596 	struct ibmvnic_control_ip_offload control_ip_offload;
597 	struct ibmvnic_control_ip_offload control_ip_offload_rsp;
598 	struct ibmvnic_request_statistics request_statistics;
599 	struct ibmvnic_generic_crq request_statistics_rsp;
600 	struct ibmvnic_error_indication error_indication;
601 	struct ibmvnic_link_state_indication link_state_indication;
602 	struct ibmvnic_change_mac_addr change_mac_addr;
603 	struct ibmvnic_change_mac_addr change_mac_addr_rsp;
604 	struct ibmvnic_multicast_ctrl multicast_ctrl;
605 	struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
606 	struct ibmvnic_get_vpd_size get_vpd_size;
607 	struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
608 	struct ibmvnic_get_vpd get_vpd;
609 	struct ibmvnic_get_vpd_rsp get_vpd_rsp;
610 	struct ibmvnic_acl_change_indication acl_change_indication;
611 	struct ibmvnic_acl_query acl_query;
612 	struct ibmvnic_generic_crq acl_query_rsp;
613 	struct ibmvnic_tune tune;
614 	struct ibmvnic_generic_crq tune_rsp;
615 	struct ibmvnic_request_map request_map;
616 	struct ibmvnic_request_map_rsp request_map_rsp;
617 	struct ibmvnic_request_unmap request_unmap;
618 	struct ibmvnic_request_unmap_rsp request_unmap_rsp;
619 	struct ibmvnic_query_map query_map;
620 	struct ibmvnic_query_map_rsp query_map_rsp;
621 };
622 
623 enum ibmvnic_rc_codes {
624 	SUCCESS = 0,
625 	PARTIALSUCCESS = 1,
626 	PERMISSION = 2,
627 	NOMEMORY = 3,
628 	PARAMETER = 4,
629 	UNKNOWNCOMMAND = 5,
630 	ABORTED = 6,
631 	INVALIDSTATE = 7,
632 	INVALIDIOBA = 8,
633 	INVALIDLENGTH = 9,
634 	UNSUPPORTEDOPTION = 10,
635 };
636 
637 enum ibmvnic_capabilities {
638 	MIN_TX_QUEUES = 1,
639 	MIN_RX_QUEUES = 2,
640 	MIN_RX_ADD_QUEUES = 3,
641 	MAX_TX_QUEUES = 4,
642 	MAX_RX_QUEUES = 5,
643 	MAX_RX_ADD_QUEUES = 6,
644 	REQ_TX_QUEUES = 7,
645 	REQ_RX_QUEUES = 8,
646 	REQ_RX_ADD_QUEUES = 9,
647 	MIN_TX_ENTRIES_PER_SUBCRQ = 10,
648 	MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11,
649 	MAX_TX_ENTRIES_PER_SUBCRQ = 12,
650 	MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13,
651 	REQ_TX_ENTRIES_PER_SUBCRQ = 14,
652 	REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15,
653 	TCP_IP_OFFLOAD = 16,
654 	PROMISC_REQUESTED = 17,
655 	PROMISC_SUPPORTED = 18,
656 	MIN_MTU = 19,
657 	MAX_MTU = 20,
658 	REQ_MTU = 21,
659 	MAX_MULTICAST_FILTERS = 22,
660 	VLAN_HEADER_INSERTION = 23,
661 	RX_VLAN_HEADER_INSERTION = 24,
662 	MAX_TX_SG_ENTRIES = 25,
663 	RX_SG_SUPPORTED = 26,
664 	RX_SG_REQUESTED = 27,
665 	OPT_TX_COMP_SUB_QUEUES = 28,
666 	OPT_RX_COMP_QUEUES = 29,
667 	OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30,
668 	OPT_TX_ENTRIES_PER_SUBCRQ = 31,
669 	OPT_RXBA_ENTRIES_PER_SUBCRQ = 32,
670 	TX_RX_DESC_REQ = 33,
671 };
672 
673 enum ibmvnic_error_cause {
674 	ADAPTER_PROBLEM = 0,
675 	BUS_PROBLEM = 1,
676 	FW_PROBLEM = 2,
677 	DD_PROBLEM = 3,
678 	EEH_RECOVERY = 4,
679 	FW_UPDATED = 5,
680 	LOW_MEMORY = 6,
681 };
682 
683 enum ibmvnic_commands {
684 	VERSION_EXCHANGE = 0x01,
685 	VERSION_EXCHANGE_RSP = 0x81,
686 	QUERY_CAPABILITY = 0x02,
687 	QUERY_CAPABILITY_RSP = 0x82,
688 	REQUEST_CAPABILITY = 0x03,
689 	REQUEST_CAPABILITY_RSP = 0x83,
690 	LOGIN = 0x04,
691 	LOGIN_RSP = 0x84,
692 	QUERY_PHYS_PARMS = 0x05,
693 	QUERY_PHYS_PARMS_RSP = 0x85,
694 	QUERY_PHYS_CAPABILITIES = 0x06,
695 	QUERY_PHYS_CAPABILITIES_RSP = 0x86,
696 	SET_PHYS_PARMS = 0x07,
697 	SET_PHYS_PARMS_RSP = 0x87,
698 	ERROR_INDICATION = 0x08,
699 	LOGICAL_LINK_STATE = 0x0C,
700 	LOGICAL_LINK_STATE_RSP = 0x8C,
701 	REQUEST_STATISTICS = 0x0D,
702 	REQUEST_STATISTICS_RSP = 0x8D,
703 	COLLECT_FW_TRACE = 0x11,
704 	COLLECT_FW_TRACE_RSP = 0x91,
705 	LINK_STATE_INDICATION = 0x12,
706 	CHANGE_MAC_ADDR = 0x13,
707 	CHANGE_MAC_ADDR_RSP = 0x93,
708 	MULTICAST_CTRL = 0x14,
709 	MULTICAST_CTRL_RSP = 0x94,
710 	GET_VPD_SIZE = 0x15,
711 	GET_VPD_SIZE_RSP = 0x95,
712 	GET_VPD = 0x16,
713 	GET_VPD_RSP = 0x96,
714 	TUNE = 0x17,
715 	TUNE_RSP = 0x97,
716 	QUERY_IP_OFFLOAD = 0x18,
717 	QUERY_IP_OFFLOAD_RSP = 0x98,
718 	CONTROL_IP_OFFLOAD = 0x19,
719 	CONTROL_IP_OFFLOAD_RSP = 0x99,
720 	ACL_CHANGE_INDICATION = 0x1A,
721 	ACL_QUERY = 0x1B,
722 	ACL_QUERY_RSP = 0x9B,
723 	QUERY_MAP = 0x1D,
724 	QUERY_MAP_RSP = 0x9D,
725 	REQUEST_MAP = 0x1E,
726 	REQUEST_MAP_RSP = 0x9E,
727 	REQUEST_UNMAP = 0x1F,
728 	REQUEST_UNMAP_RSP = 0x9F,
729 	VLAN_CTRL = 0x20,
730 	VLAN_CTRL_RSP = 0xA0,
731 };
732 
733 enum ibmvnic_crq_type {
734 	IBMVNIC_CRQ_CMD			= 0x80,
735 	IBMVNIC_CRQ_CMD_RSP		= 0x80,
736 	IBMVNIC_CRQ_INIT_CMD		= 0xC0,
737 	IBMVNIC_CRQ_INIT_RSP		= 0xC0,
738 	IBMVNIC_CRQ_XPORT_EVENT		= 0xFF,
739 };
740 
741 enum ibmvfc_crq_format {
742 	IBMVNIC_CRQ_INIT                 = 0x01,
743 	IBMVNIC_CRQ_INIT_COMPLETE        = 0x02,
744 	IBMVNIC_PARTITION_MIGRATED       = 0x06,
745 	IBMVNIC_DEVICE_FAILOVER          = 0x08,
746 };
747 
748 struct ibmvnic_crq_queue {
749 	union ibmvnic_crq *msgs;
750 	int size, cur;
751 	dma_addr_t msg_token;
752 	/* Used for serialization of msgs, cur */
753 	spinlock_t lock;
754 	bool active;
755 	char name[32];
756 };
757 
758 union sub_crq {
759 	struct ibmvnic_generic_scrq generic;
760 	struct ibmvnic_tx_comp_desc tx_comp;
761 	struct ibmvnic_tx_desc v1;
762 	struct ibmvnic_hdr_desc hdr;
763 	struct ibmvnic_hdr_ext_desc hdr_ext;
764 	struct ibmvnic_sge_desc sge;
765 	struct ibmvnic_rx_comp_desc rx_comp;
766 	struct ibmvnic_rx_buff_add_desc rx_add;
767 };
768 
769 struct ibmvnic_ind_xmit_queue {
770 	union sub_crq *indir_arr;
771 	dma_addr_t indir_dma;
772 	int index;
773 };
774 
775 struct ibmvnic_sub_crq_queue {
776 	union sub_crq *msgs;
777 	int size, cur;
778 	dma_addr_t msg_token;
779 	unsigned long crq_num;
780 	unsigned long hw_irq;
781 	unsigned int irq;
782 	unsigned int pool_index;
783 	int scrq_num;
784 	/* Used for serialization of msgs, cur */
785 	spinlock_t lock;
786 	struct sk_buff *rx_skb_top;
787 	struct ibmvnic_adapter *adapter;
788 	struct ibmvnic_ind_xmit_queue ind_buf;
789 	atomic_t used;
790 	char name[32];
791 	u64 handle;
792 } ____cacheline_aligned;
793 
794 struct ibmvnic_long_term_buff {
795 	unsigned char *buff;
796 	dma_addr_t addr;
797 	u64 size;
798 	u8 map_id;
799 };
800 
801 struct ibmvnic_tx_buff {
802 	struct sk_buff *skb;
803 	int index;
804 	int pool_index;
805 	int num_entries;
806 };
807 
808 struct ibmvnic_tx_pool {
809 	struct ibmvnic_tx_buff *tx_buff;
810 	int *free_map;
811 	int consumer_index;
812 	int producer_index;
813 	struct ibmvnic_long_term_buff long_term_buff;
814 	int num_buffers;
815 	int buf_size;
816 } ____cacheline_aligned;
817 
818 struct ibmvnic_rx_buff {
819 	struct sk_buff *skb;
820 	dma_addr_t dma;
821 	unsigned char *data;
822 	int size;
823 	int pool_index;
824 };
825 
826 struct ibmvnic_rx_pool {
827 	struct ibmvnic_rx_buff *rx_buff;
828 	int size;			/* # of buffers in the pool */
829 	int index;
830 	int buff_size;
831 	atomic_t available;
832 	int *free_map;
833 	int next_free;
834 	int next_alloc;
835 	int active;
836 	struct ibmvnic_long_term_buff long_term_buff;
837 } ____cacheline_aligned;
838 
839 struct ibmvnic_vpd {
840 	unsigned char *buff;
841 	dma_addr_t dma_addr;
842 	u64 len;
843 };
844 
845 enum vnic_state {VNIC_PROBING = 1,
846 		 VNIC_PROBED,
847 		 VNIC_OPENING,
848 		 VNIC_OPEN,
849 		 VNIC_CLOSING,
850 		 VNIC_CLOSED,
851 		 VNIC_REMOVING,
852 		 VNIC_REMOVED,
853 		 VNIC_DOWN};
854 
855 enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
856 			   VNIC_RESET_MOBILITY,
857 			   VNIC_RESET_FATAL,
858 			   VNIC_RESET_NON_FATAL,
859 			   VNIC_RESET_TIMEOUT,
860 			   VNIC_RESET_CHANGE_PARAM,
861 			   VNIC_RESET_PASSIVE_INIT};
862 
863 struct ibmvnic_rwi {
864 	enum ibmvnic_reset_reason reset_reason;
865 	struct list_head list;
866 };
867 
868 struct ibmvnic_tunables {
869 	u64 rx_queues;
870 	u64 tx_queues;
871 	u64 rx_entries;
872 	u64 tx_entries;
873 	u64 mtu;
874 };
875 
876 struct ibmvnic_adapter {
877 	struct vio_dev *vdev;
878 	struct net_device *netdev;
879 	struct ibmvnic_crq_queue crq;
880 	u8 mac_addr[ETH_ALEN];
881 	struct ibmvnic_query_ip_offload_buffer ip_offload_buf;
882 	dma_addr_t ip_offload_tok;
883 	struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
884 	dma_addr_t ip_offload_ctrl_tok;
885 	u32 msg_enable;
886 	u32 priv_flags;
887 
888 	/* Vital Product Data (VPD) */
889 	struct ibmvnic_vpd *vpd;
890 	char fw_version[32];
891 
892 	/* Statistics */
893 	struct ibmvnic_statistics stats;
894 	dma_addr_t stats_token;
895 	struct completion stats_done;
896 	int replenish_no_mem;
897 	int replenish_add_buff_success;
898 	int replenish_add_buff_failure;
899 	int replenish_task_cycles;
900 	int tx_send_failed;
901 	int tx_map_failed;
902 
903 	struct ibmvnic_tx_queue_stats *tx_stats_buffers;
904 	struct ibmvnic_rx_queue_stats *rx_stats_buffers;
905 
906 	int phys_link_state;
907 	int logical_link_state;
908 
909 	u32 speed;
910 	u8 duplex;
911 
912 	/* login data */
913 	struct ibmvnic_login_buffer *login_buf;
914 	dma_addr_t login_buf_token;
915 	int login_buf_sz;
916 
917 	struct ibmvnic_login_rsp_buffer *login_rsp_buf;
918 	dma_addr_t login_rsp_buf_token;
919 	int login_rsp_buf_sz;
920 
921 	atomic_t running_cap_crqs;
922 
923 	struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
924 	struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
925 
926 	/* rx structs */
927 	struct napi_struct *napi;
928 	struct ibmvnic_rx_pool *rx_pool;
929 	u64 promisc;
930 
931 	struct ibmvnic_tx_pool *tx_pool;
932 	struct ibmvnic_tx_pool *tso_pool;
933 	struct completion probe_done;
934 	struct completion init_done;
935 	int init_done_rc;
936 
937 	struct completion fw_done;
938 	/* Used for serialization of device commands */
939 	struct mutex fw_lock;
940 	int fw_done_rc;
941 
942 	struct completion reset_done;
943 	int reset_done_rc;
944 	bool wait_for_reset;
945 
946 	/* partner capabilities */
947 	u64 min_tx_queues;
948 	u64 min_rx_queues;
949 	u64 min_rx_add_queues;
950 	u64 max_tx_queues;
951 	u64 max_rx_queues;
952 	u64 max_rx_add_queues;
953 	u64 req_tx_queues;
954 	u64 req_rx_queues;
955 	u64 req_rx_add_queues;
956 	u64 min_tx_entries_per_subcrq;
957 	u64 min_rx_add_entries_per_subcrq;
958 	u64 max_tx_entries_per_subcrq;
959 	u64 max_rx_add_entries_per_subcrq;
960 	u64 req_tx_entries_per_subcrq;
961 	u64 req_rx_add_entries_per_subcrq;
962 	u64 tcp_ip_offload;
963 	u64 promisc_requested;
964 	u64 promisc_supported;
965 	u64 min_mtu;
966 	u64 max_mtu;
967 	u64 req_mtu;
968 	u64 prev_mtu;
969 	u64 max_multicast_filters;
970 	u64 vlan_header_insertion;
971 	u64 rx_vlan_header_insertion;
972 	u64 max_tx_sg_entries;
973 	u64 rx_sg_supported;
974 	u64 rx_sg_requested;
975 	u64 opt_tx_comp_sub_queues;
976 	u64 opt_rx_comp_queues;
977 	u64 opt_rx_bufadd_q_per_rx_comp_q;
978 	u64 opt_tx_entries_per_subcrq;
979 	u64 opt_rxba_entries_per_subcrq;
980 	__be64 tx_rx_desc_req;
981 #define MAX_MAP_ID	255
982 	DECLARE_BITMAP(map_ids, MAX_MAP_ID);
983 	u32 num_active_rx_scrqs;
984 	u32 num_active_rx_pools;
985 	u32 num_active_rx_napi;
986 	u32 num_active_tx_scrqs;
987 	u32 num_active_tx_pools;
988 
989 	u32 prev_rx_pool_size;
990 	u32 prev_tx_pool_size;
991 	u32 cur_rx_buf_sz;
992 	u32 prev_rx_buf_sz;
993 
994 	struct tasklet_struct tasklet;
995 	enum vnic_state state;
996 	/* Used for serialization of state field. When taking both state
997 	 * and rwi locks, take state lock first.
998 	 */
999 	spinlock_t state_lock;
1000 	enum ibmvnic_reset_reason reset_reason;
1001 	struct list_head rwi_list;
1002 	/* Used for serialization of rwi_list. When taking both state
1003 	 * and rwi locks, take state lock first
1004 	 */
1005 	spinlock_t rwi_lock;
1006 	struct work_struct ibmvnic_reset;
1007 	struct delayed_work ibmvnic_delayed_reset;
1008 	unsigned long resetting;
1009 	/* last device reset time */
1010 	unsigned long last_reset_time;
1011 
1012 	bool napi_enabled;
1013 	bool from_passive_init;
1014 	bool login_pending;
1015 	/* protected by rcu */
1016 	bool tx_queues_active;
1017 	bool failover_pending;
1018 	bool force_reset_recovery;
1019 
1020 	struct ibmvnic_tunables desired;
1021 	struct ibmvnic_tunables fallback;
1022 };
1023