1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /**************************************************************************/ 17 18 #define IBMVNIC_NAME "ibmvnic" 19 #define IBMVNIC_DRIVER_VERSION "1.0.1" 20 #define IBMVNIC_INVALID_MAP -1 21 #define IBMVNIC_OPEN_FAILED 3 22 23 /* basic structures plus 100 2k buffers */ 24 #define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305 25 26 /* Initial module_parameters */ 27 #define IBMVNIC_RX_WEIGHT 16 28 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */ 29 #define IBMVNIC_BUFFS_PER_POOL 100 30 #define IBMVNIC_MAX_QUEUES 16 31 #define IBMVNIC_MAX_QUEUE_SZ 4096 32 #define IBMVNIC_MAX_IND_DESCS 16 33 #define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32) 34 35 #define IBMVNIC_TSO_BUF_SZ 65536 36 #define IBMVNIC_TSO_BUFS 64 37 #define IBMVNIC_TSO_POOL_MASK 0x80000000 38 39 /* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool 40 * has a set of buffers. The size of each buffer is determined by the MTU. 41 * 42 * Each Rx/Tx pool is also associated with a DMA region that is shared 43 * with the "hardware" (VIOS) and used to send/receive packets. The DMA 44 * region is also referred to as a Long Term Buffer or LTB. 45 * 46 * The size of the DMA region required for an Rx/Tx pool depends on the 47 * number and size (MTU) of the buffers in the pool. At the max levels 48 * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus 49 * some padding. 50 * 51 * But the size of a single DMA region is limited by MAX_ORDER in the 52 * kernel (about 16MB currently). To support say 4K Jumbo frames, we 53 * use a set of LTBs (struct ltb_set) per pool. 54 * 55 * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel 56 * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set 57 * (must be <= IBMVNIC_ONE_LTB_MAX) 58 * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set 59 * 60 * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools 61 * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB. 62 * 63 * The Rx and Tx pools can have upto 4096 buffers. The max size of these 64 * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN). 65 * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB. 66 * 67 * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large, 68 * the allocation of the LTB can fail when system is low in memory. If 69 * its too small, we would need several mappings for each of the Rx/ 70 * Tx/TSO pools but there is a limit of 255 mappings per vnic in the 71 * VNIC protocol. 72 * 73 * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set 74 * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO 75 * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160 76 * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC. 77 */ 78 #define IBMVNIC_ONE_LTB_MAX ((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE)) 79 #define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX) 80 #define IBMVNIC_LTB_SET_SIZE (38 << 20) 81 82 #define IBMVNIC_BUFFER_HLEN 500 83 #define IBMVNIC_RESET_DELAY 100 84 85 static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = { 86 #define IBMVNIC_USE_SERVER_MAXES 0x1 87 "use-server-maxes" 88 }; 89 90 struct ibmvnic_login_buffer { 91 __be32 len; 92 __be32 version; 93 #define INITIAL_VERSION_LB 1 94 __be32 num_txcomp_subcrqs; 95 __be32 off_txcomp_subcrqs; 96 __be32 num_rxcomp_subcrqs; 97 __be32 off_rxcomp_subcrqs; 98 __be32 login_rsp_ioba; 99 __be32 login_rsp_len; 100 __be32 client_data_offset; 101 __be32 client_data_len; 102 } __packed __aligned(8); 103 104 struct ibmvnic_login_rsp_buffer { 105 __be32 len; 106 __be32 version; 107 #define INITIAL_VERSION_LRB 1 108 __be32 num_txsubm_subcrqs; 109 __be32 off_txsubm_subcrqs; 110 __be32 num_rxadd_subcrqs; 111 __be32 off_rxadd_subcrqs; 112 __be32 off_rxadd_buff_size; 113 __be32 num_supp_tx_desc; 114 __be32 off_supp_tx_desc; 115 } __packed __aligned(8); 116 117 struct ibmvnic_query_ip_offload_buffer { 118 __be32 len; 119 __be32 version; 120 #define INITIAL_VERSION_IOB 1 121 u8 ipv4_chksum; 122 u8 ipv6_chksum; 123 u8 tcp_ipv4_chksum; 124 u8 tcp_ipv6_chksum; 125 u8 udp_ipv4_chksum; 126 u8 udp_ipv6_chksum; 127 u8 large_tx_ipv4; 128 u8 large_tx_ipv6; 129 u8 large_rx_ipv4; 130 u8 large_rx_ipv6; 131 u8 reserved1[14]; 132 __be16 max_ipv4_header_size; 133 __be16 max_ipv6_header_size; 134 __be16 max_tcp_header_size; 135 __be16 max_udp_header_size; 136 __be32 max_large_tx_size; 137 __be32 max_large_rx_size; 138 u8 reserved2[16]; 139 u8 ipv6_extension_header; 140 #define IPV6_EH_NOT_SUPPORTED 0x00 141 #define IPV6_EH_SUPPORTED_LIM 0x01 142 #define IPV6_EH_SUPPORTED 0xFF 143 u8 tcp_pseudosum_req; 144 #define TCP_PS_NOT_REQUIRED 0x00 145 #define TCP_PS_REQUIRED 0x01 146 u8 reserved3[30]; 147 __be16 num_ipv6_ext_headers; 148 __be32 off_ipv6_ext_headers; 149 u8 reserved4[154]; 150 } __packed __aligned(8); 151 152 struct ibmvnic_control_ip_offload_buffer { 153 __be32 len; 154 __be32 version; 155 #define INITIAL_VERSION_IOB 1 156 u8 ipv4_chksum; 157 u8 ipv6_chksum; 158 u8 tcp_ipv4_chksum; 159 u8 tcp_ipv6_chksum; 160 u8 udp_ipv4_chksum; 161 u8 udp_ipv6_chksum; 162 u8 large_tx_ipv4; 163 u8 large_tx_ipv6; 164 u8 bad_packet_rx; 165 u8 large_rx_ipv4; 166 u8 large_rx_ipv6; 167 u8 reserved4[111]; 168 } __packed __aligned(8); 169 170 struct ibmvnic_fw_component { 171 u8 name[48]; 172 __be32 trace_buff_size; 173 u8 correlator; 174 u8 trace_level; 175 u8 parent_correlator; 176 u8 error_check_level; 177 u8 trace_on; 178 u8 reserved[7]; 179 u8 description[192]; 180 } __packed __aligned(8); 181 182 struct ibmvnic_fw_trace_entry { 183 __be32 trace_id; 184 u8 num_valid_data; 185 u8 reserved[3]; 186 __be64 pmc_registers; 187 __be64 timebase; 188 __be64 trace_data[5]; 189 } __packed __aligned(8); 190 191 struct ibmvnic_statistics { 192 __be32 version; 193 __be32 promiscuous; 194 __be64 rx_packets; 195 __be64 rx_bytes; 196 __be64 tx_packets; 197 __be64 tx_bytes; 198 __be64 ucast_tx_packets; 199 __be64 ucast_rx_packets; 200 __be64 mcast_tx_packets; 201 __be64 mcast_rx_packets; 202 __be64 bcast_tx_packets; 203 __be64 bcast_rx_packets; 204 __be64 align_errors; 205 __be64 fcs_errors; 206 __be64 single_collision_frames; 207 __be64 multi_collision_frames; 208 __be64 sqe_test_errors; 209 __be64 deferred_tx; 210 __be64 late_collisions; 211 __be64 excess_collisions; 212 __be64 internal_mac_tx_errors; 213 __be64 carrier_sense; 214 __be64 too_long_frames; 215 __be64 internal_mac_rx_errors; 216 u8 reserved[72]; 217 } __packed __aligned(8); 218 219 #define NUM_TX_STATS 3 220 struct ibmvnic_tx_queue_stats { 221 u64 packets; 222 u64 bytes; 223 u64 dropped_packets; 224 }; 225 226 #define NUM_RX_STATS 3 227 struct ibmvnic_rx_queue_stats { 228 u64 packets; 229 u64 bytes; 230 u64 interrupts; 231 }; 232 233 struct ibmvnic_acl_buffer { 234 __be32 len; 235 __be32 version; 236 #define INITIAL_VERSION_IOB 1 237 u8 mac_acls_restrict; 238 u8 vlan_acls_restrict; 239 u8 reserved1[22]; 240 __be32 num_mac_addrs; 241 __be32 offset_mac_addrs; 242 __be32 num_vlan_ids; 243 __be32 offset_vlan_ids; 244 u8 reserved2[80]; 245 } __packed __aligned(8); 246 247 /* descriptors have been changed, how should this be defined? 1? 4? */ 248 249 #define IBMVNIC_TX_DESC_VERSIONS 3 250 251 /* is this still needed? */ 252 struct ibmvnic_tx_comp_desc { 253 u8 first; 254 u8 num_comps; 255 __be16 rcs[5]; 256 __be32 correlators[5]; 257 } __packed __aligned(8); 258 259 /* some flags that included in v0 descriptor, which is gone 260 * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM 261 * and only in some offload_flags variable that doesn't seem 262 * to be used anywhere, can probably be removed? 263 */ 264 265 #define IBMVNIC_TCP_CHKSUM 0x20 266 #define IBMVNIC_UDP_CHKSUM 0x08 267 268 struct ibmvnic_tx_desc { 269 u8 first; 270 u8 type; 271 272 #define IBMVNIC_TX_DESC 0x10 273 u8 n_crq_elem; 274 u8 n_sge; 275 u8 flags1; 276 #define IBMVNIC_TX_COMP_NEEDED 0x80 277 #define IBMVNIC_TX_CHKSUM_OFFLOAD 0x40 278 #define IBMVNIC_TX_LSO 0x20 279 #define IBMVNIC_TX_PROT_TCP 0x10 280 #define IBMVNIC_TX_PROT_UDP 0x08 281 #define IBMVNIC_TX_PROT_IPV4 0x04 282 #define IBMVNIC_TX_PROT_IPV6 0x02 283 #define IBMVNIC_TX_VLAN_PRESENT 0x01 284 u8 flags2; 285 #define IBMVNIC_TX_VLAN_INSERT 0x80 286 __be16 mss; 287 u8 reserved[4]; 288 __be32 correlator; 289 __be16 vlan_id; 290 __be16 dma_reg; 291 __be32 sge_len; 292 __be64 ioba; 293 } __packed __aligned(8); 294 295 struct ibmvnic_hdr_desc { 296 u8 first; 297 u8 type; 298 #define IBMVNIC_HDR_DESC 0x11 299 u8 len; 300 u8 l2_len; 301 __be16 l3_len; 302 u8 l4_len; 303 u8 flag; 304 u8 data[24]; 305 } __packed __aligned(8); 306 307 struct ibmvnic_hdr_ext_desc { 308 u8 first; 309 u8 type; 310 #define IBMVNIC_HDR_EXT_DESC 0x12 311 u8 len; 312 u8 data[29]; 313 } __packed __aligned(8); 314 315 struct ibmvnic_sge_desc { 316 u8 first; 317 u8 type; 318 #define IBMVNIC_SGE_DESC 0x30 319 __be16 sge1_dma_reg; 320 __be32 sge1_len; 321 __be64 sge1_ioba; 322 __be16 reserved; 323 __be16 sge2_dma_reg; 324 __be32 sge2_len; 325 __be64 sge2_ioba; 326 } __packed __aligned(8); 327 328 struct ibmvnic_rx_comp_desc { 329 u8 first; 330 u8 flags; 331 #define IBMVNIC_IP_CHKSUM_GOOD 0x80 332 #define IBMVNIC_TCP_UDP_CHKSUM_GOOD 0x40 333 #define IBMVNIC_END_FRAME 0x20 334 #define IBMVNIC_EXACT_MC 0x10 335 #define IBMVNIC_VLAN_STRIPPED 0x08 336 __be16 off_frame_data; 337 __be32 len; 338 __be64 correlator; 339 __be16 vlan_tci; 340 __be16 rc; 341 u8 reserved[12]; 342 } __packed __aligned(8); 343 344 struct ibmvnic_generic_scrq { 345 u8 first; 346 u8 reserved[31]; 347 } __packed __aligned(8); 348 349 struct ibmvnic_rx_buff_add_desc { 350 u8 first; 351 u8 reserved[7]; 352 __be64 correlator; 353 __be32 ioba; 354 u8 map_id; 355 __be32 len:24; 356 u8 reserved2[8]; 357 } __packed __aligned(8); 358 359 struct ibmvnic_rc { 360 u8 code; /* one of enum ibmvnic_rc_codes */ 361 u8 detailed_data[3]; 362 } __packed __aligned(4); 363 364 struct ibmvnic_generic_crq { 365 u8 first; 366 u8 cmd; 367 u8 params[10]; 368 struct ibmvnic_rc rc; 369 } __packed __aligned(8); 370 371 struct ibmvnic_version_exchange { 372 u8 first; 373 u8 cmd; 374 __be16 version; 375 #define IBMVNIC_INITIAL_VERSION 1 376 u8 reserved[8]; 377 struct ibmvnic_rc rc; 378 } __packed __aligned(8); 379 380 struct ibmvnic_capability { 381 u8 first; 382 u8 cmd; 383 __be16 capability; /* one of ibmvnic_capabilities */ 384 __be64 number; 385 struct ibmvnic_rc rc; 386 } __packed __aligned(8); 387 388 struct ibmvnic_login { 389 u8 first; 390 u8 cmd; 391 u8 reserved[6]; 392 __be32 ioba; 393 __be32 len; 394 } __packed __aligned(8); 395 396 struct ibmvnic_phys_parms { 397 u8 first; 398 u8 cmd; 399 u8 flags1; 400 #define IBMVNIC_EXTERNAL_LOOPBACK 0x80 401 #define IBMVNIC_INTERNAL_LOOPBACK 0x40 402 #define IBMVNIC_PROMISC 0x20 403 #define IBMVNIC_PHYS_LINK_ACTIVE 0x10 404 #define IBMVNIC_AUTONEG_DUPLEX 0x08 405 #define IBMVNIC_FULL_DUPLEX 0x04 406 #define IBMVNIC_HALF_DUPLEX 0x02 407 #define IBMVNIC_CAN_CHG_PHYS_PARMS 0x01 408 u8 flags2; 409 #define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80 410 __be32 speed; 411 #define IBMVNIC_AUTONEG 0x80000000 412 #define IBMVNIC_10MBPS 0x40000000 413 #define IBMVNIC_100MBPS 0x20000000 414 #define IBMVNIC_1GBPS 0x10000000 415 #define IBMVNIC_10GBPS 0x08000000 416 #define IBMVNIC_40GBPS 0x04000000 417 #define IBMVNIC_100GBPS 0x02000000 418 #define IBMVNIC_25GBPS 0x01000000 419 #define IBMVNIC_50GBPS 0x00800000 420 #define IBMVNIC_200GBPS 0x00400000 421 __be32 mtu; 422 struct ibmvnic_rc rc; 423 } __packed __aligned(8); 424 425 struct ibmvnic_logical_link_state { 426 u8 first; 427 u8 cmd; 428 u8 link_state; 429 #define IBMVNIC_LOGICAL_LNK_DN 0x00 430 #define IBMVNIC_LOGICAL_LNK_UP 0x01 431 #define IBMVNIC_LOGICAL_LNK_QUERY 0xff 432 u8 reserved[9]; 433 struct ibmvnic_rc rc; 434 } __packed __aligned(8); 435 436 struct ibmvnic_query_ip_offload { 437 u8 first; 438 u8 cmd; 439 u8 reserved[2]; 440 __be32 len; 441 __be32 ioba; 442 struct ibmvnic_rc rc; 443 } __packed __aligned(8); 444 445 struct ibmvnic_control_ip_offload { 446 u8 first; 447 u8 cmd; 448 u8 reserved[2]; 449 __be32 ioba; 450 __be32 len; 451 struct ibmvnic_rc rc; 452 } __packed __aligned(8); 453 454 struct ibmvnic_request_statistics { 455 u8 first; 456 u8 cmd; 457 u8 flags; 458 #define IBMVNIC_PHYSICAL_PORT 0x80 459 u8 reserved1; 460 __be32 ioba; 461 __be32 len; 462 u8 reserved[4]; 463 } __packed __aligned(8); 464 465 struct ibmvnic_error_indication { 466 u8 first; 467 u8 cmd; 468 u8 flags; 469 #define IBMVNIC_FATAL_ERROR 0x80 470 u8 reserved1; 471 __be32 error_id; 472 __be32 detail_error_sz; 473 __be16 error_cause; 474 u8 reserved2[2]; 475 } __packed __aligned(8); 476 477 struct ibmvnic_link_state_indication { 478 u8 first; 479 u8 cmd; 480 u8 reserved1[2]; 481 u8 phys_link_state; 482 u8 logical_link_state; 483 u8 reserved2[10]; 484 } __packed __aligned(8); 485 486 struct ibmvnic_change_mac_addr { 487 u8 first; 488 u8 cmd; 489 u8 mac_addr[6]; 490 u8 reserved[4]; 491 struct ibmvnic_rc rc; 492 } __packed __aligned(8); 493 494 struct ibmvnic_multicast_ctrl { 495 u8 first; 496 u8 cmd; 497 u8 mac_addr[6]; 498 u8 flags; 499 #define IBMVNIC_ENABLE_MC 0x80 500 #define IBMVNIC_DISABLE_MC 0x40 501 #define IBMVNIC_ENABLE_ALL 0x20 502 #define IBMVNIC_DISABLE_ALL 0x10 503 u8 reserved1; 504 __be16 reserved2; /* was num_enabled_mc_addr; */ 505 struct ibmvnic_rc rc; 506 } __packed __aligned(8); 507 508 struct ibmvnic_get_vpd_size { 509 u8 first; 510 u8 cmd; 511 u8 reserved[14]; 512 } __packed __aligned(8); 513 514 struct ibmvnic_get_vpd_size_rsp { 515 u8 first; 516 u8 cmd; 517 u8 reserved[2]; 518 __be64 len; 519 struct ibmvnic_rc rc; 520 } __packed __aligned(8); 521 522 struct ibmvnic_get_vpd { 523 u8 first; 524 u8 cmd; 525 u8 reserved1[2]; 526 __be32 ioba; 527 __be32 len; 528 u8 reserved[4]; 529 } __packed __aligned(8); 530 531 struct ibmvnic_get_vpd_rsp { 532 u8 first; 533 u8 cmd; 534 u8 reserved[10]; 535 struct ibmvnic_rc rc; 536 } __packed __aligned(8); 537 538 struct ibmvnic_acl_change_indication { 539 u8 first; 540 u8 cmd; 541 __be16 change_type; 542 #define IBMVNIC_MAC_ACL 0 543 #define IBMVNIC_VLAN_ACL 1 544 u8 reserved[12]; 545 } __packed __aligned(8); 546 547 struct ibmvnic_acl_query { 548 u8 first; 549 u8 cmd; 550 u8 reserved1[2]; 551 __be32 ioba; 552 __be32 len; 553 u8 reserved2[4]; 554 } __packed __aligned(8); 555 556 struct ibmvnic_tune { 557 u8 first; 558 u8 cmd; 559 u8 reserved1[2]; 560 __be32 ioba; 561 __be32 len; 562 u8 reserved2[4]; 563 } __packed __aligned(8); 564 565 struct ibmvnic_request_map { 566 u8 first; 567 u8 cmd; 568 u8 reserved1; 569 u8 map_id; 570 __be32 ioba; 571 __be32 len; 572 u8 reserved2[4]; 573 } __packed __aligned(8); 574 575 struct ibmvnic_request_map_rsp { 576 u8 first; 577 u8 cmd; 578 u8 reserved1; 579 u8 map_id; 580 u8 reserved2[8]; 581 struct ibmvnic_rc rc; 582 } __packed __aligned(8); 583 584 struct ibmvnic_request_unmap { 585 u8 first; 586 u8 cmd; 587 u8 reserved1; 588 u8 map_id; 589 u8 reserved2[12]; 590 } __packed __aligned(8); 591 592 struct ibmvnic_request_unmap_rsp { 593 u8 first; 594 u8 cmd; 595 u8 reserved1; 596 u8 map_id; 597 u8 reserved2[8]; 598 struct ibmvnic_rc rc; 599 } __packed __aligned(8); 600 601 struct ibmvnic_query_map { 602 u8 first; 603 u8 cmd; 604 u8 reserved[14]; 605 } __packed __aligned(8); 606 607 struct ibmvnic_query_map_rsp { 608 u8 first; 609 u8 cmd; 610 u8 reserved; 611 u8 page_size; 612 __be32 tot_pages; 613 __be32 free_pages; 614 struct ibmvnic_rc rc; 615 } __packed __aligned(8); 616 617 union ibmvnic_crq { 618 struct ibmvnic_generic_crq generic; 619 struct ibmvnic_version_exchange version_exchange; 620 struct ibmvnic_version_exchange version_exchange_rsp; 621 struct ibmvnic_capability query_capability; 622 struct ibmvnic_capability query_capability_rsp; 623 struct ibmvnic_capability request_capability; 624 struct ibmvnic_capability request_capability_rsp; 625 struct ibmvnic_login login; 626 struct ibmvnic_generic_crq login_rsp; 627 struct ibmvnic_phys_parms query_phys_parms; 628 struct ibmvnic_phys_parms query_phys_parms_rsp; 629 struct ibmvnic_phys_parms query_phys_capabilities; 630 struct ibmvnic_phys_parms query_phys_capabilities_rsp; 631 struct ibmvnic_phys_parms set_phys_parms; 632 struct ibmvnic_phys_parms set_phys_parms_rsp; 633 struct ibmvnic_logical_link_state logical_link_state; 634 struct ibmvnic_logical_link_state logical_link_state_rsp; 635 struct ibmvnic_query_ip_offload query_ip_offload; 636 struct ibmvnic_query_ip_offload query_ip_offload_rsp; 637 struct ibmvnic_control_ip_offload control_ip_offload; 638 struct ibmvnic_control_ip_offload control_ip_offload_rsp; 639 struct ibmvnic_request_statistics request_statistics; 640 struct ibmvnic_generic_crq request_statistics_rsp; 641 struct ibmvnic_error_indication error_indication; 642 struct ibmvnic_link_state_indication link_state_indication; 643 struct ibmvnic_change_mac_addr change_mac_addr; 644 struct ibmvnic_change_mac_addr change_mac_addr_rsp; 645 struct ibmvnic_multicast_ctrl multicast_ctrl; 646 struct ibmvnic_multicast_ctrl multicast_ctrl_rsp; 647 struct ibmvnic_get_vpd_size get_vpd_size; 648 struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp; 649 struct ibmvnic_get_vpd get_vpd; 650 struct ibmvnic_get_vpd_rsp get_vpd_rsp; 651 struct ibmvnic_acl_change_indication acl_change_indication; 652 struct ibmvnic_acl_query acl_query; 653 struct ibmvnic_generic_crq acl_query_rsp; 654 struct ibmvnic_tune tune; 655 struct ibmvnic_generic_crq tune_rsp; 656 struct ibmvnic_request_map request_map; 657 struct ibmvnic_request_map_rsp request_map_rsp; 658 struct ibmvnic_request_unmap request_unmap; 659 struct ibmvnic_request_unmap_rsp request_unmap_rsp; 660 struct ibmvnic_query_map query_map; 661 struct ibmvnic_query_map_rsp query_map_rsp; 662 }; 663 664 enum ibmvnic_rc_codes { 665 SUCCESS = 0, 666 PARTIALSUCCESS = 1, 667 PERMISSION = 2, 668 NOMEMORY = 3, 669 PARAMETER = 4, 670 UNKNOWNCOMMAND = 5, 671 ABORTED = 6, 672 INVALIDSTATE = 7, 673 INVALIDIOBA = 8, 674 INVALIDLENGTH = 9, 675 UNSUPPORTEDOPTION = 10, 676 }; 677 678 enum ibmvnic_capabilities { 679 MIN_TX_QUEUES = 1, 680 MIN_RX_QUEUES = 2, 681 MIN_RX_ADD_QUEUES = 3, 682 MAX_TX_QUEUES = 4, 683 MAX_RX_QUEUES = 5, 684 MAX_RX_ADD_QUEUES = 6, 685 REQ_TX_QUEUES = 7, 686 REQ_RX_QUEUES = 8, 687 REQ_RX_ADD_QUEUES = 9, 688 MIN_TX_ENTRIES_PER_SUBCRQ = 10, 689 MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11, 690 MAX_TX_ENTRIES_PER_SUBCRQ = 12, 691 MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13, 692 REQ_TX_ENTRIES_PER_SUBCRQ = 14, 693 REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15, 694 TCP_IP_OFFLOAD = 16, 695 PROMISC_REQUESTED = 17, 696 PROMISC_SUPPORTED = 18, 697 MIN_MTU = 19, 698 MAX_MTU = 20, 699 REQ_MTU = 21, 700 MAX_MULTICAST_FILTERS = 22, 701 VLAN_HEADER_INSERTION = 23, 702 RX_VLAN_HEADER_INSERTION = 24, 703 MAX_TX_SG_ENTRIES = 25, 704 RX_SG_SUPPORTED = 26, 705 RX_SG_REQUESTED = 27, 706 OPT_TX_COMP_SUB_QUEUES = 28, 707 OPT_RX_COMP_QUEUES = 29, 708 OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30, 709 OPT_TX_ENTRIES_PER_SUBCRQ = 31, 710 OPT_RXBA_ENTRIES_PER_SUBCRQ = 32, 711 TX_RX_DESC_REQ = 33, 712 }; 713 714 enum ibmvnic_error_cause { 715 ADAPTER_PROBLEM = 0, 716 BUS_PROBLEM = 1, 717 FW_PROBLEM = 2, 718 DD_PROBLEM = 3, 719 EEH_RECOVERY = 4, 720 FW_UPDATED = 5, 721 LOW_MEMORY = 6, 722 }; 723 724 enum ibmvnic_commands { 725 VERSION_EXCHANGE = 0x01, 726 VERSION_EXCHANGE_RSP = 0x81, 727 QUERY_CAPABILITY = 0x02, 728 QUERY_CAPABILITY_RSP = 0x82, 729 REQUEST_CAPABILITY = 0x03, 730 REQUEST_CAPABILITY_RSP = 0x83, 731 LOGIN = 0x04, 732 LOGIN_RSP = 0x84, 733 QUERY_PHYS_PARMS = 0x05, 734 QUERY_PHYS_PARMS_RSP = 0x85, 735 QUERY_PHYS_CAPABILITIES = 0x06, 736 QUERY_PHYS_CAPABILITIES_RSP = 0x86, 737 SET_PHYS_PARMS = 0x07, 738 SET_PHYS_PARMS_RSP = 0x87, 739 ERROR_INDICATION = 0x08, 740 LOGICAL_LINK_STATE = 0x0C, 741 LOGICAL_LINK_STATE_RSP = 0x8C, 742 REQUEST_STATISTICS = 0x0D, 743 REQUEST_STATISTICS_RSP = 0x8D, 744 COLLECT_FW_TRACE = 0x11, 745 COLLECT_FW_TRACE_RSP = 0x91, 746 LINK_STATE_INDICATION = 0x12, 747 CHANGE_MAC_ADDR = 0x13, 748 CHANGE_MAC_ADDR_RSP = 0x93, 749 MULTICAST_CTRL = 0x14, 750 MULTICAST_CTRL_RSP = 0x94, 751 GET_VPD_SIZE = 0x15, 752 GET_VPD_SIZE_RSP = 0x95, 753 GET_VPD = 0x16, 754 GET_VPD_RSP = 0x96, 755 TUNE = 0x17, 756 TUNE_RSP = 0x97, 757 QUERY_IP_OFFLOAD = 0x18, 758 QUERY_IP_OFFLOAD_RSP = 0x98, 759 CONTROL_IP_OFFLOAD = 0x19, 760 CONTROL_IP_OFFLOAD_RSP = 0x99, 761 ACL_CHANGE_INDICATION = 0x1A, 762 ACL_QUERY = 0x1B, 763 ACL_QUERY_RSP = 0x9B, 764 QUERY_MAP = 0x1D, 765 QUERY_MAP_RSP = 0x9D, 766 REQUEST_MAP = 0x1E, 767 REQUEST_MAP_RSP = 0x9E, 768 REQUEST_UNMAP = 0x1F, 769 REQUEST_UNMAP_RSP = 0x9F, 770 VLAN_CTRL = 0x20, 771 VLAN_CTRL_RSP = 0xA0, 772 }; 773 774 enum ibmvnic_crq_type { 775 IBMVNIC_CRQ_CMD = 0x80, 776 IBMVNIC_CRQ_CMD_RSP = 0x80, 777 IBMVNIC_CRQ_INIT_CMD = 0xC0, 778 IBMVNIC_CRQ_INIT_RSP = 0xC0, 779 IBMVNIC_CRQ_XPORT_EVENT = 0xFF, 780 }; 781 782 enum ibmvfc_crq_format { 783 IBMVNIC_CRQ_INIT = 0x01, 784 IBMVNIC_CRQ_INIT_COMPLETE = 0x02, 785 IBMVNIC_PARTITION_MIGRATED = 0x06, 786 IBMVNIC_DEVICE_FAILOVER = 0x08, 787 }; 788 789 struct ibmvnic_crq_queue { 790 union ibmvnic_crq *msgs; 791 int size, cur; 792 dma_addr_t msg_token; 793 /* Used for serialization of msgs, cur */ 794 spinlock_t lock; 795 bool active; 796 char name[32]; 797 }; 798 799 union sub_crq { 800 struct ibmvnic_generic_scrq generic; 801 struct ibmvnic_tx_comp_desc tx_comp; 802 struct ibmvnic_tx_desc v1; 803 struct ibmvnic_hdr_desc hdr; 804 struct ibmvnic_hdr_ext_desc hdr_ext; 805 struct ibmvnic_sge_desc sge; 806 struct ibmvnic_rx_comp_desc rx_comp; 807 struct ibmvnic_rx_buff_add_desc rx_add; 808 }; 809 810 struct ibmvnic_ind_xmit_queue { 811 union sub_crq *indir_arr; 812 dma_addr_t indir_dma; 813 int index; 814 }; 815 816 struct ibmvnic_sub_crq_queue { 817 union sub_crq *msgs; 818 int size, cur; 819 dma_addr_t msg_token; 820 unsigned long crq_num; 821 unsigned long hw_irq; 822 unsigned int irq; 823 unsigned int pool_index; 824 int scrq_num; 825 /* Used for serialization of msgs, cur */ 826 spinlock_t lock; 827 struct sk_buff *rx_skb_top; 828 struct ibmvnic_adapter *adapter; 829 struct ibmvnic_ind_xmit_queue ind_buf; 830 atomic_t used; 831 char name[32]; 832 u64 handle; 833 } ____cacheline_aligned; 834 835 struct ibmvnic_long_term_buff { 836 unsigned char *buff; 837 dma_addr_t addr; 838 u64 size; 839 u8 map_id; 840 }; 841 842 struct ibmvnic_ltb_set { 843 int num_ltbs; 844 struct ibmvnic_long_term_buff *ltbs; 845 }; 846 847 struct ibmvnic_tx_buff { 848 struct sk_buff *skb; 849 int index; 850 int pool_index; 851 int num_entries; 852 }; 853 854 struct ibmvnic_tx_pool { 855 struct ibmvnic_tx_buff *tx_buff; 856 int *free_map; 857 int consumer_index; 858 int producer_index; 859 struct ibmvnic_ltb_set ltb_set; 860 int num_buffers; 861 int buf_size; 862 } ____cacheline_aligned; 863 864 struct ibmvnic_rx_buff { 865 struct sk_buff *skb; 866 dma_addr_t dma; 867 unsigned char *data; 868 int size; 869 int pool_index; 870 }; 871 872 struct ibmvnic_rx_pool { 873 struct ibmvnic_rx_buff *rx_buff; 874 int size; /* # of buffers in the pool */ 875 int index; 876 int buff_size; 877 atomic_t available; 878 int *free_map; 879 int next_free; 880 int next_alloc; 881 int active; 882 struct ibmvnic_ltb_set ltb_set; 883 } ____cacheline_aligned; 884 885 struct ibmvnic_vpd { 886 unsigned char *buff; 887 dma_addr_t dma_addr; 888 u64 len; 889 }; 890 891 enum vnic_state {VNIC_PROBING = 1, 892 VNIC_PROBED, 893 VNIC_OPENING, 894 VNIC_OPEN, 895 VNIC_CLOSING, 896 VNIC_CLOSED, 897 VNIC_REMOVING, 898 VNIC_REMOVED, 899 VNIC_DOWN}; 900 901 enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1, 902 VNIC_RESET_MOBILITY, 903 VNIC_RESET_FATAL, 904 VNIC_RESET_NON_FATAL, 905 VNIC_RESET_TIMEOUT, 906 VNIC_RESET_CHANGE_PARAM, 907 VNIC_RESET_PASSIVE_INIT}; 908 909 struct ibmvnic_rwi { 910 enum ibmvnic_reset_reason reset_reason; 911 struct list_head list; 912 }; 913 914 struct ibmvnic_tunables { 915 u64 rx_queues; 916 u64 tx_queues; 917 u64 rx_entries; 918 u64 tx_entries; 919 u64 mtu; 920 }; 921 922 struct ibmvnic_adapter { 923 struct vio_dev *vdev; 924 struct net_device *netdev; 925 struct ibmvnic_crq_queue crq; 926 u8 mac_addr[ETH_ALEN]; 927 struct ibmvnic_query_ip_offload_buffer ip_offload_buf; 928 dma_addr_t ip_offload_tok; 929 struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; 930 dma_addr_t ip_offload_ctrl_tok; 931 u32 msg_enable; 932 u32 priv_flags; 933 934 /* Vital Product Data (VPD) */ 935 struct ibmvnic_vpd *vpd; 936 char fw_version[32]; 937 938 /* Statistics */ 939 struct ibmvnic_statistics stats; 940 dma_addr_t stats_token; 941 struct completion stats_done; 942 int replenish_no_mem; 943 int replenish_add_buff_success; 944 int replenish_add_buff_failure; 945 int replenish_task_cycles; 946 int tx_send_failed; 947 int tx_map_failed; 948 949 struct ibmvnic_tx_queue_stats *tx_stats_buffers; 950 struct ibmvnic_rx_queue_stats *rx_stats_buffers; 951 952 int phys_link_state; 953 int logical_link_state; 954 955 u32 speed; 956 u8 duplex; 957 958 /* login data */ 959 struct ibmvnic_login_buffer *login_buf; 960 dma_addr_t login_buf_token; 961 int login_buf_sz; 962 963 struct ibmvnic_login_rsp_buffer *login_rsp_buf; 964 dma_addr_t login_rsp_buf_token; 965 int login_rsp_buf_sz; 966 967 atomic_t running_cap_crqs; 968 969 struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned; 970 struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned; 971 972 /* rx structs */ 973 struct napi_struct *napi; 974 struct ibmvnic_rx_pool *rx_pool; 975 u64 promisc; 976 977 struct ibmvnic_tx_pool *tx_pool; 978 struct ibmvnic_tx_pool *tso_pool; 979 struct completion probe_done; 980 struct completion init_done; 981 int init_done_rc; 982 983 struct completion fw_done; 984 /* Used for serialization of device commands */ 985 struct mutex fw_lock; 986 int fw_done_rc; 987 988 struct completion reset_done; 989 int reset_done_rc; 990 bool wait_for_reset; 991 992 /* partner capabilities */ 993 u64 min_tx_queues; 994 u64 min_rx_queues; 995 u64 min_rx_add_queues; 996 u64 max_tx_queues; 997 u64 max_rx_queues; 998 u64 max_rx_add_queues; 999 u64 req_tx_queues; 1000 u64 req_rx_queues; 1001 u64 req_rx_add_queues; 1002 u64 min_tx_entries_per_subcrq; 1003 u64 min_rx_add_entries_per_subcrq; 1004 u64 max_tx_entries_per_subcrq; 1005 u64 max_rx_add_entries_per_subcrq; 1006 u64 req_tx_entries_per_subcrq; 1007 u64 req_rx_add_entries_per_subcrq; 1008 u64 tcp_ip_offload; 1009 u64 promisc_requested; 1010 u64 promisc_supported; 1011 u64 min_mtu; 1012 u64 max_mtu; 1013 u64 req_mtu; 1014 u64 prev_mtu; 1015 u64 max_multicast_filters; 1016 u64 vlan_header_insertion; 1017 u64 rx_vlan_header_insertion; 1018 u64 max_tx_sg_entries; 1019 u64 rx_sg_supported; 1020 u64 rx_sg_requested; 1021 u64 opt_tx_comp_sub_queues; 1022 u64 opt_rx_comp_queues; 1023 u64 opt_rx_bufadd_q_per_rx_comp_q; 1024 u64 opt_tx_entries_per_subcrq; 1025 u64 opt_rxba_entries_per_subcrq; 1026 __be64 tx_rx_desc_req; 1027 #define MAX_MAP_ID 255 1028 DECLARE_BITMAP(map_ids, MAX_MAP_ID); 1029 u32 num_active_rx_scrqs; 1030 u32 num_active_rx_pools; 1031 u32 num_active_rx_napi; 1032 u32 num_active_tx_scrqs; 1033 u32 num_active_tx_pools; 1034 1035 u32 prev_rx_pool_size; 1036 u32 prev_tx_pool_size; 1037 u32 cur_rx_buf_sz; 1038 u32 prev_rx_buf_sz; 1039 1040 struct tasklet_struct tasklet; 1041 enum vnic_state state; 1042 /* Used for serialization of state field. When taking both state 1043 * and rwi locks, take state lock first. 1044 */ 1045 spinlock_t state_lock; 1046 enum ibmvnic_reset_reason reset_reason; 1047 struct list_head rwi_list; 1048 /* Used for serialization of rwi_list. When taking both state 1049 * and rwi locks, take state lock first 1050 */ 1051 spinlock_t rwi_lock; 1052 struct work_struct ibmvnic_reset; 1053 struct delayed_work ibmvnic_delayed_reset; 1054 unsigned long resetting; 1055 /* last device reset time */ 1056 unsigned long last_reset_time; 1057 1058 bool napi_enabled; 1059 bool from_passive_init; 1060 bool login_pending; 1061 /* protected by rcu */ 1062 bool tx_queues_active; 1063 bool failover_pending; 1064 bool force_reset_recovery; 1065 1066 struct ibmvnic_tunables desired; 1067 struct ibmvnic_tunables fallback; 1068 }; 1069