1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2020, The University of Queensland 14 * Copyright (c) 2018, Joyent, Inc. 15 * Copyright 2020 RackTop Systems, Inc. 16 */ 17 18 /* 19 * Mellanox Connect-X 4/5/6 driver. 20 * 21 * More details in mlxcx.c 22 */ 23 24 #ifndef _MLXCX_H 25 #define _MLXCX_H 26 27 /* 28 * mlxcx(7D) defintions 29 */ 30 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/ddifm.h> 34 #include <sys/id_space.h> 35 #include <sys/list.h> 36 #include <sys/taskq_impl.h> 37 #include <sys/stddef.h> 38 #include <sys/stream.h> 39 #include <sys/strsun.h> 40 #include <sys/mac_provider.h> 41 #include <sys/mac_ether.h> 42 #include <sys/cpuvar.h> 43 #include <sys/ethernet.h> 44 45 #include <inet/ip.h> 46 #include <inet/ip6.h> 47 48 #include <sys/ddifm.h> 49 #include <sys/fm/protocol.h> 50 #include <sys/fm/util.h> 51 #include <sys/fm/io/ddi.h> 52 53 #include <mlxcx_reg.h> 54 55 #ifdef __cplusplus 56 extern "C" { 57 #endif 58 59 /* 60 * Get access to the first PCI BAR. 61 */ 62 #define MLXCX_REG_NUMBER 1 63 64 /* 65 * The command queue is supposed to be a page, which is 4k. 66 */ 67 #define MLXCX_CMD_DMA_PAGE_SIZE 4096 68 69 /* 70 * Queues can allocate in units of this much memory. 71 */ 72 #define MLXCX_QUEUE_DMA_PAGE_SIZE 4096 73 74 /* 75 * We advertise two sizes of groups to MAC -- a certain number of "large" 76 * groups (including the default group, which is sized to at least ncpus) 77 * followed by a certain number of "small" groups. 78 * 79 * This allows us to have a larger amount of classification resources available 80 * for zones/VMs without resorting to software classification. 81 */ 82 #define MLXCX_RX_NGROUPS_LARGE_DFLT 2 83 #define MLXCX_RX_NRINGS_PER_LARGE_GROUP_DFLT 16 84 #define MLXCX_RX_NGROUPS_SMALL_DFLT 256 85 #define MLXCX_RX_NRINGS_PER_SMALL_GROUP_DFLT 4 86 87 #define MLXCX_TX_NGROUPS_DFLT 1 88 #define MLXCX_TX_NRINGS_PER_GROUP_DFLT 64 89 90 /* 91 * Queues will be sized to (1 << *Q_SIZE_SHIFT) entries long. 92 */ 93 #define MLXCX_EQ_SIZE_SHIFT_DFLT 9 94 95 /* 96 * The CQ, SQ and RQ sizes can effect throughput on higher speed interfaces. 97 * EQ less so, as it only takes a single EQ entry to indicate there are 98 * multiple completions on the CQ. 99 * 100 * Particularly on the Rx side, the RQ (and corresponding CQ) would run 101 * low on available entries. A symptom of this is the refill taskq running 102 * frequently. A larger RQ (and CQ) alleviates this, and as there is a 103 * close relationship between SQ and CQ size, the SQ is increased too. 104 */ 105 #define MLXCX_CQ_SIZE_SHIFT_DFLT 10 106 #define MLXCX_CQ_SIZE_SHIFT_25G 12 107 108 /* 109 * Default to making SQs bigger than RQs for 9k MTU, since most packets will 110 * spill over into more than one slot. RQ WQEs are always 1 slot. 111 */ 112 #define MLXCX_SQ_SIZE_SHIFT_DFLT 11 113 #define MLXCX_SQ_SIZE_SHIFT_25G 13 114 115 #define MLXCX_RQ_SIZE_SHIFT_DFLT 10 116 #define MLXCX_RQ_SIZE_SHIFT_25G 12 117 118 #define MLXCX_CQ_HWM_GAP 16 119 #define MLXCX_CQ_LWM_GAP 24 120 121 #define MLXCX_WQ_HWM_GAP MLXCX_CQ_HWM_GAP 122 #define MLXCX_WQ_LWM_GAP MLXCX_CQ_LWM_GAP 123 124 #define MLXCX_RQ_REFILL_STEP 64 125 126 /* 127 * CQ event moderation 128 */ 129 #define MLXCX_CQEMOD_PERIOD_USEC_DFLT 50 130 #define MLXCX_CQEMOD_COUNT_DFLT \ 131 (8 * ((1 << MLXCX_CQ_SIZE_SHIFT_DFLT) / 10)) 132 133 /* 134 * EQ interrupt moderation 135 */ 136 #define MLXCX_INTRMOD_PERIOD_USEC_DFLT 10 137 138 /* Size of root flow tables */ 139 #define MLXCX_FTBL_ROOT_SIZE_SHIFT_DFLT 12 140 141 /* Size of 2nd level flow tables for VLAN filtering */ 142 #define MLXCX_FTBL_VLAN_SIZE_SHIFT_DFLT 4 143 144 /* 145 * How big does an mblk have to be before we dma_bind() it instead of 146 * bcopying? 147 */ 148 #define MLXCX_TX_BIND_THRESHOLD_DFLT 2048 149 150 /* 151 * How often to check the status of completion queues for overflow and 152 * other problems. 153 */ 154 #define MLXCX_WQ_CHECK_INTERVAL_SEC_DFLT 300 155 #define MLXCX_CQ_CHECK_INTERVAL_SEC_DFLT 300 156 #define MLXCX_EQ_CHECK_INTERVAL_SEC_DFLT 30 157 158 /* 159 * After this many packets, the packets received so far are passed to 160 * the mac layer. 161 */ 162 #define MLXCX_RX_PER_CQ_DEFAULT 256 163 #define MLXCX_RX_PER_CQ_MIN 16 164 #define MLXCX_RX_PER_CQ_MAX 4096 165 166 #define MLXCX_DOORBELL_TRIES_DFLT 3 167 extern uint_t mlxcx_doorbell_tries; 168 169 #define MLXCX_STUCK_INTR_COUNT_DFLT 128 170 extern uint_t mlxcx_stuck_intr_count; 171 172 #define MLXCX_BUF_BIND_MAX_ATTEMTPS 50 173 174 #define MLXCX_MTU_OFFSET \ 175 (sizeof (struct ether_vlan_header) + ETHERFCSL) 176 177 /* 178 * This is the current version of the command structure that the driver expects 179 * to be found in the ISS. 180 */ 181 #define MLXCX_CMD_REVISION 5 182 183 #ifdef DEBUG 184 #define MLXCX_DMA_SYNC(dma, flag) VERIFY0(ddi_dma_sync( \ 185 (dma).mxdb_dma_handle, 0, 0, \ 186 (flag))) 187 #else 188 #define MLXCX_DMA_SYNC(dma, flag) (void) ddi_dma_sync( \ 189 (dma).mxdb_dma_handle, 0, 0, \ 190 (flag)) 191 #endif 192 193 #define MLXCX_FM_SERVICE_MLXCX "mlxcx" 194 195 /* 196 * This macro defines the expected value of the 'Interface Step Sequence ID' 197 * (issi) which represents the version of the start up and tear down sequence. 198 * We must check that hardware supports this and tell it which version we're 199 * using as well. 200 */ 201 #define MLXCX_CURRENT_ISSI 1 202 203 /* 204 * This is the size of a page that the hardware expects from us when 205 * manipulating pages. 206 */ 207 #define MLXCX_HW_PAGE_SIZE 4096 208 209 /* 210 * This is a special lkey value used to terminate a list of scatter pointers. 211 */ 212 #define MLXCX_NULL_LKEY 0x100 213 214 /* 215 * Forwards 216 */ 217 struct mlxcx; 218 typedef struct mlxcx mlxcx_t; 219 220 typedef enum { 221 MLXCX_DMABUF_HDL_ALLOC = 1 << 0, 222 MLXCX_DMABUF_MEM_ALLOC = 1 << 1, 223 MLXCX_DMABUF_BOUND = 1 << 2, 224 MLXCX_DMABUF_FOREIGN = 1 << 3, 225 } mlxcx_dma_buffer_flags_t; 226 227 typedef struct mlxcx_dma_buffer { 228 mlxcx_dma_buffer_flags_t mxdb_flags; 229 caddr_t mxdb_va; /* Buffer VA */ 230 size_t mxdb_len; /* Buffer logical len */ 231 ddi_acc_handle_t mxdb_acc_handle; 232 ddi_dma_handle_t mxdb_dma_handle; 233 uint_t mxdb_ncookies; 234 } mlxcx_dma_buffer_t; 235 236 typedef struct mlxcx_dev_page { 237 list_node_t mxdp_list; 238 avl_node_t mxdp_tree; 239 uintptr_t mxdp_pa; 240 mlxcx_dma_buffer_t mxdp_dma; 241 } mlxcx_dev_page_t; 242 243 /* 244 * Data structure to keep track of all information related to the command queue. 245 */ 246 typedef enum { 247 MLXCX_CMD_QUEUE_S_IDLE = 1, 248 MLXCX_CMD_QUEUE_S_BUSY, 249 MLXCX_CMD_QUEUE_S_BROKEN 250 } mlxcx_cmd_queue_status_t; 251 252 typedef struct mlxcx_cmd_queue { 253 kmutex_t mcmd_lock; 254 kcondvar_t mcmd_cv; 255 mlxcx_dma_buffer_t mcmd_dma; 256 mlxcx_cmd_ent_t *mcmd_ent; 257 258 uint8_t mcmd_size_l2; 259 uint8_t mcmd_stride_l2; 260 261 mlxcx_cmd_queue_status_t mcmd_status; 262 263 ddi_taskq_t *mcmd_taskq; 264 id_space_t *mcmd_tokens; 265 } mlxcx_cmd_queue_t; 266 267 typedef struct mlxcd_cmd_mbox { 268 list_node_t mlbox_node; 269 mlxcx_dma_buffer_t mlbox_dma; 270 mlxcx_cmd_mailbox_t *mlbox_data; 271 } mlxcx_cmd_mbox_t; 272 273 typedef enum { 274 MLXCX_EQ_ALLOC = 1 << 0, /* dma mem alloc'd, size set */ 275 MLXCX_EQ_CREATED = 1 << 1, /* CREATE_EQ sent to hw */ 276 MLXCX_EQ_DESTROYED = 1 << 2, /* DESTROY_EQ sent to hw */ 277 MLXCX_EQ_ARMED = 1 << 3, /* Armed through the UAR */ 278 MLXCX_EQ_POLLING = 1 << 4, /* Currently being polled */ 279 } mlxcx_eventq_state_t; 280 281 typedef struct mlxcx_bf { 282 kmutex_t mbf_mtx; 283 uint_t mbf_cnt; 284 uint_t mbf_even; 285 uint_t mbf_odd; 286 } mlxcx_bf_t; 287 288 typedef struct mlxcx_uar { 289 boolean_t mlu_allocated; 290 uint_t mlu_num; 291 uint_t mlu_base; 292 293 volatile uint_t mlu_bfcnt; 294 mlxcx_bf_t mlu_bf[MLXCX_BF_PER_UAR]; 295 } mlxcx_uar_t; 296 297 typedef struct mlxcx_pd { 298 boolean_t mlpd_allocated; 299 uint32_t mlpd_num; 300 } mlxcx_pd_t; 301 302 typedef struct mlxcx_tdom { 303 boolean_t mltd_allocated; 304 uint32_t mltd_num; 305 } mlxcx_tdom_t; 306 307 typedef enum { 308 MLXCX_PORT_VPORT_PROMISC = 1 << 0, 309 } mlxcx_port_flags_t; 310 311 typedef struct mlxcx_flow_table mlxcx_flow_table_t; 312 typedef struct mlxcx_flow_group mlxcx_flow_group_t; 313 314 typedef struct { 315 uint64_t mlps_rx_drops; 316 } mlxcx_port_stats_t; 317 318 typedef enum { 319 MLXCX_PORT_INIT = 1 << 0 320 } mlxcx_port_init_t; 321 322 typedef struct mlxcx_port { 323 kmutex_t mlp_mtx; 324 mlxcx_port_init_t mlp_init; 325 mlxcx_t *mlp_mlx; 326 /* 327 * The mlp_num we have here starts at zero (it's an index), but the 328 * numbering we have to use for register access starts at 1. We 329 * currently write mlp_num into the other_vport fields in mlxcx_cmd.c 330 * (where 0 is a magic number meaning "my vport") so if we ever add 331 * support for virtualisation features and deal with more than one 332 * vport, we will probably have to change this. 333 */ 334 uint_t mlp_num; 335 mlxcx_port_flags_t mlp_flags; 336 uint64_t mlp_guid; 337 uint8_t mlp_mac_address[ETHERADDRL]; 338 339 uint_t mlp_mtu; 340 uint_t mlp_max_mtu; 341 342 mlxcx_port_status_t mlp_admin_status; 343 mlxcx_port_status_t mlp_oper_status; 344 345 boolean_t mlp_autoneg; 346 mlxcx_eth_proto_t mlp_max_proto; 347 mlxcx_eth_proto_t mlp_admin_proto; 348 mlxcx_eth_proto_t mlp_oper_proto; 349 350 mlxcx_eth_inline_mode_t mlp_wqe_min_inline; 351 352 /* Root flow tables */ 353 mlxcx_flow_table_t *mlp_rx_flow; 354 mlxcx_flow_table_t *mlp_tx_flow; 355 356 mlxcx_flow_group_t *mlp_promisc; 357 mlxcx_flow_group_t *mlp_bcast; 358 mlxcx_flow_group_t *mlp_umcast; 359 360 avl_tree_t mlp_dmac_fe; 361 362 mlxcx_port_stats_t mlp_stats; 363 364 mlxcx_module_status_t mlp_last_modstate; 365 mlxcx_module_error_type_t mlp_last_moderr; 366 } mlxcx_port_t; 367 368 typedef enum { 369 MLXCX_EQ_TYPE_ANY, 370 MLXCX_EQ_TYPE_RX, 371 MLXCX_EQ_TYPE_TX 372 } mlxcx_eventq_type_t; 373 374 typedef struct mlxcx_event_queue { 375 kmutex_t mleq_mtx; 376 mlxcx_t *mleq_mlx; 377 mlxcx_eventq_state_t mleq_state; 378 mlxcx_eventq_type_t mleq_type; 379 380 mlxcx_dma_buffer_t mleq_dma; 381 382 size_t mleq_entshift; 383 size_t mleq_nents; 384 mlxcx_eventq_ent_t *mleq_ent; 385 uint32_t mleq_cc; /* consumer counter */ 386 uint32_t mleq_cc_armed; 387 388 uint32_t mleq_events; 389 390 uint32_t mleq_badintrs; 391 392 /* Hardware eq number */ 393 uint_t mleq_num; 394 /* Index into the mlxcx_t's interrupts array */ 395 uint_t mleq_intr_index; 396 397 /* UAR region that has this EQ's doorbell in it */ 398 mlxcx_uar_t *mleq_uar; 399 400 /* Tree of CQn => mlxcx_completion_queue_t */ 401 avl_tree_t mleq_cqs; 402 403 uint32_t mleq_check_disarm_cc; 404 uint_t mleq_check_disarm_cnt; 405 } mlxcx_event_queue_t; 406 407 typedef enum { 408 MLXCX_TIS_CREATED = 1 << 0, 409 MLXCX_TIS_DESTROYED = 1 << 1, 410 } mlxcx_tis_state_t; 411 412 typedef struct mlxcx_tis { 413 mlxcx_tis_state_t mltis_state; 414 list_node_t mltis_entry; 415 uint_t mltis_num; 416 mlxcx_tdom_t *mltis_tdom; 417 } mlxcx_tis_t; 418 419 typedef enum { 420 MLXCX_BUFFER_INIT, 421 MLXCX_BUFFER_FREE, 422 MLXCX_BUFFER_ON_WQ, 423 MLXCX_BUFFER_ON_LOAN, 424 MLXCX_BUFFER_ON_CHAIN, 425 } mlxcx_buffer_state_t; 426 427 typedef enum { 428 MLXCX_SHARD_READY, 429 MLXCX_SHARD_DRAINING, 430 } mlxcx_shard_state_t; 431 432 typedef struct mlxcx_buf_shard { 433 mlxcx_shard_state_t mlbs_state; 434 list_node_t mlbs_entry; 435 kmutex_t mlbs_mtx; 436 list_t mlbs_busy; 437 list_t mlbs_free; 438 list_t mlbs_loaned; 439 kcondvar_t mlbs_free_nonempty; 440 } mlxcx_buf_shard_t; 441 442 typedef struct mlxcx_buffer { 443 mlxcx_buf_shard_t *mlb_shard; 444 list_node_t mlb_entry; 445 list_node_t mlb_cq_entry; 446 447 struct mlxcx_buffer *mlb_tx_head; /* head of tx chain */ 448 list_t mlb_tx_chain; 449 list_node_t mlb_tx_chain_entry; 450 451 boolean_t mlb_foreign; 452 size_t mlb_used; 453 mblk_t *mlb_tx_mp; 454 455 /* 456 * The number of work queue basic blocks this buf uses. 457 */ 458 uint_t mlb_wqebbs; 459 460 mlxcx_t *mlb_mlx; 461 mlxcx_buffer_state_t mlb_state; 462 uint_t mlb_wqe_index; 463 mlxcx_dma_buffer_t mlb_dma; 464 mblk_t *mlb_mp; 465 frtn_t mlb_frtn; 466 } mlxcx_buffer_t; 467 468 typedef enum { 469 MLXCX_CQ_ALLOC = 1 << 0, 470 MLXCX_CQ_CREATED = 1 << 1, 471 MLXCX_CQ_DESTROYED = 1 << 2, 472 MLXCX_CQ_EQAVL = 1 << 3, 473 MLXCX_CQ_BLOCKED_MAC = 1 << 4, 474 MLXCX_CQ_TEARDOWN = 1 << 5, 475 MLXCX_CQ_POLLING = 1 << 6, 476 MLXCX_CQ_ARMED = 1 << 7, 477 } mlxcx_completionq_state_t; 478 479 typedef struct mlxcx_work_queue mlxcx_work_queue_t; 480 481 typedef struct mlxcx_completion_queue { 482 kmutex_t mlcq_mtx; 483 mlxcx_t *mlcq_mlx; 484 mlxcx_completionq_state_t mlcq_state; 485 486 mlxcx_port_stats_t *mlcq_stats; 487 488 list_node_t mlcq_entry; 489 avl_node_t mlcq_eq_entry; 490 491 uint_t mlcq_num; 492 493 mlxcx_work_queue_t *mlcq_wq; 494 mlxcx_event_queue_t *mlcq_eq; 495 496 /* UAR region that has this CQ's UAR doorbell in it */ 497 mlxcx_uar_t *mlcq_uar; 498 499 mlxcx_dma_buffer_t mlcq_dma; 500 501 size_t mlcq_entshift; 502 size_t mlcq_nents; 503 mlxcx_completionq_ent_t *mlcq_ent; 504 uint32_t mlcq_cc; /* consumer counter */ 505 uint32_t mlcq_cc_armed; /* cc at last arm */ 506 uint32_t mlcq_ec; /* event counter */ 507 uint32_t mlcq_ec_armed; /* ec at last arm */ 508 509 mlxcx_dma_buffer_t mlcq_doorbell_dma; 510 mlxcx_completionq_doorbell_t *mlcq_doorbell; 511 512 uint64_t mlcq_bufcnt; 513 size_t mlcq_bufhwm; 514 size_t mlcq_buflwm; 515 list_t mlcq_buffers; 516 kmutex_t mlcq_bufbmtx; 517 list_t mlcq_buffers_b; 518 519 uint_t mlcq_check_disarm_cnt; 520 uint64_t mlcq_check_disarm_cc; 521 522 uint_t mlcq_cqemod_period_usec; 523 uint_t mlcq_cqemod_count; 524 525 mac_ring_handle_t mlcq_mac_hdl; 526 uint64_t mlcq_mac_gen; 527 528 boolean_t mlcq_fm_repd_qstate; 529 } mlxcx_completion_queue_t; 530 531 typedef enum { 532 MLXCX_WQ_ALLOC = 1 << 0, 533 MLXCX_WQ_CREATED = 1 << 1, 534 MLXCX_WQ_STARTED = 1 << 2, 535 MLXCX_WQ_DESTROYED = 1 << 3, 536 MLXCX_WQ_TEARDOWN = 1 << 4, 537 MLXCX_WQ_BUFFERS = 1 << 5, 538 MLXCX_WQ_REFILLING = 1 << 6, 539 MLXCX_WQ_BLOCKED_MAC = 1 << 7 540 } mlxcx_workq_state_t; 541 542 typedef enum { 543 MLXCX_WQ_TYPE_SENDQ = 1, 544 MLXCX_WQ_TYPE_RECVQ 545 } mlxcx_workq_type_t; 546 547 typedef struct mlxcx_ring_group mlxcx_ring_group_t; 548 549 struct mlxcx_work_queue { 550 kmutex_t mlwq_mtx; 551 mlxcx_t *mlwq_mlx; 552 mlxcx_workq_type_t mlwq_type; 553 mlxcx_workq_state_t mlwq_state; 554 555 list_node_t mlwq_entry; 556 list_node_t mlwq_group_entry; 557 558 mlxcx_ring_group_t *mlwq_group; 559 560 uint_t mlwq_num; 561 562 mlxcx_completion_queue_t *mlwq_cq; 563 mlxcx_pd_t *mlwq_pd; 564 565 /* Required for send queues */ 566 mlxcx_tis_t *mlwq_tis; 567 568 /* UAR region that has this WQ's blueflame buffers in it */ 569 mlxcx_uar_t *mlwq_uar; 570 571 mlxcx_dma_buffer_t mlwq_dma; 572 573 mlxcx_eth_inline_mode_t mlwq_inline_mode; 574 size_t mlwq_entshift; 575 size_t mlwq_nents; 576 /* Discriminate based on mwq_type */ 577 union { 578 mlxcx_sendq_ent_t *mlwq_send_ent; 579 mlxcx_sendq_extra_ent_t *mlwq_send_extra_ent; 580 mlxcx_recvq_ent_t *mlwq_recv_ent; 581 mlxcx_sendq_bf_t *mlwq_bf_ent; 582 }; 583 uint64_t mlwq_pc; /* producer counter */ 584 585 uint64_t mlwq_wqebb_used; 586 size_t mlwq_bufhwm; 587 size_t mlwq_buflwm; 588 589 mlxcx_dma_buffer_t mlwq_doorbell_dma; 590 mlxcx_workq_doorbell_t *mlwq_doorbell; 591 592 mlxcx_buf_shard_t *mlwq_bufs; 593 mlxcx_buf_shard_t *mlwq_foreign_bufs; 594 595 taskq_ent_t mlwq_tqe; 596 597 boolean_t mlwq_fm_repd_qstate; 598 }; 599 600 #define MLXCX_RQT_MAX_SIZE 64 601 602 typedef enum { 603 MLXCX_RQT_CREATED = 1 << 0, 604 MLXCX_RQT_DESTROYED = 1 << 1, 605 MLXCX_RQT_DIRTY = 1 << 2, 606 } mlxcx_rqtable_state_t; 607 608 typedef struct mlxcx_rqtable { 609 mlxcx_rqtable_state_t mlrqt_state; 610 list_node_t mlrqt_entry; 611 uint_t mlrqt_num; 612 613 size_t mlrqt_max; 614 size_t mlrqt_used; 615 616 size_t mlrqt_rq_size; 617 mlxcx_work_queue_t **mlrqt_rq; 618 } mlxcx_rqtable_t; 619 620 typedef enum { 621 MLXCX_TIR_CREATED = 1 << 0, 622 MLXCX_TIR_DESTROYED = 1 << 1, 623 } mlxcx_tir_state_t; 624 625 typedef struct mlxcx_tir { 626 mlxcx_tir_state_t mltir_state; 627 list_node_t mltir_entry; 628 uint_t mltir_num; 629 mlxcx_tdom_t *mltir_tdom; 630 mlxcx_tir_type_t mltir_type; 631 union { 632 mlxcx_rqtable_t *mltir_rqtable; 633 mlxcx_work_queue_t *mltir_rq; 634 }; 635 mlxcx_tir_hash_fn_t mltir_hash_fn; 636 uint8_t mltir_toeplitz_key[40]; 637 mlxcx_tir_rx_hash_l3_type_t mltir_l3_type; 638 mlxcx_tir_rx_hash_l4_type_t mltir_l4_type; 639 mlxcx_tir_rx_hash_fields_t mltir_hash_fields; 640 } mlxcx_tir_t; 641 642 typedef enum { 643 MLXCX_FLOW_GROUP_CREATED = 1 << 0, 644 MLXCX_FLOW_GROUP_BUSY = 1 << 1, 645 MLXCX_FLOW_GROUP_DESTROYED = 1 << 2, 646 } mlxcx_flow_group_state_t; 647 648 typedef enum { 649 MLXCX_FLOW_MATCH_SMAC = 1 << 0, 650 MLXCX_FLOW_MATCH_DMAC = 1 << 1, 651 MLXCX_FLOW_MATCH_VLAN = 1 << 2, 652 MLXCX_FLOW_MATCH_VID = 1 << 3, 653 MLXCX_FLOW_MATCH_IP_VER = 1 << 4, 654 MLXCX_FLOW_MATCH_SRCIP = 1 << 5, 655 MLXCX_FLOW_MATCH_DSTIP = 1 << 6, 656 MLXCX_FLOW_MATCH_IP_PROTO = 1 << 7, 657 MLXCX_FLOW_MATCH_SQN = 1 << 8, 658 MLXCX_FLOW_MATCH_VXLAN = 1 << 9, 659 } mlxcx_flow_mask_t; 660 661 struct mlxcx_flow_group { 662 list_node_t mlfg_entry; 663 list_node_t mlfg_role_entry; 664 mlxcx_flow_group_state_t mlfg_state; 665 mlxcx_flow_table_t *mlfg_table; 666 uint_t mlfg_num; 667 size_t mlfg_start_idx; 668 size_t mlfg_size; 669 size_t mlfg_avail; 670 list_t mlfg_entries; 671 mlxcx_flow_mask_t mlfg_mask; 672 }; 673 674 typedef enum { 675 MLXCX_FLOW_ENTRY_RESERVED = 1 << 0, 676 MLXCX_FLOW_ENTRY_CREATED = 1 << 1, 677 MLXCX_FLOW_ENTRY_DELETED = 1 << 2, 678 MLXCX_FLOW_ENTRY_DIRTY = 1 << 3, 679 } mlxcx_flow_entry_state_t; 680 681 typedef struct { 682 mlxcx_tir_t *mlfed_tir; 683 mlxcx_flow_table_t *mlfed_flow; 684 } mlxcx_flow_entry_dest_t; 685 686 typedef struct mlxcx_flow_entry { 687 list_node_t mlfe_group_entry; 688 avl_node_t mlfe_dmac_entry; 689 mlxcx_flow_entry_state_t mlfe_state; 690 mlxcx_flow_table_t *mlfe_table; 691 mlxcx_flow_group_t *mlfe_group; 692 uint_t mlfe_index; 693 694 mlxcx_flow_action_t mlfe_action; 695 696 /* Criteria for match */ 697 uint8_t mlfe_smac[ETHERADDRL]; 698 uint8_t mlfe_dmac[ETHERADDRL]; 699 700 mlxcx_vlan_type_t mlfe_vlan_type; 701 uint16_t mlfe_vid; 702 703 uint_t mlfe_ip_version; 704 uint8_t mlfe_srcip[IPV6_ADDR_LEN]; 705 uint8_t mlfe_dstip[IPV6_ADDR_LEN]; 706 707 uint_t mlfe_ip_proto; 708 uint16_t mlfe_sport; 709 uint16_t mlfe_dport; 710 711 uint32_t mlfe_sqn; 712 uint32_t mlfe_vxlan_vni; 713 714 /* Destinations */ 715 size_t mlfe_ndest; 716 mlxcx_flow_entry_dest_t mlfe_dest[MLXCX_FLOW_MAX_DESTINATIONS]; 717 718 /* 719 * mlxcx_group_mac_ts joining this entry to N ring groups 720 * only used by FEs on the root rx flow table 721 */ 722 list_t mlfe_ring_groups; 723 } mlxcx_flow_entry_t; 724 725 typedef enum { 726 MLXCX_FLOW_TABLE_CREATED = 1 << 0, 727 MLXCX_FLOW_TABLE_DESTROYED = 1 << 1, 728 MLXCX_FLOW_TABLE_ROOT = 1 << 2 729 } mlxcx_flow_table_state_t; 730 731 struct mlxcx_flow_table { 732 kmutex_t mlft_mtx; 733 mlxcx_flow_table_state_t mlft_state; 734 uint_t mlft_level; 735 uint_t mlft_num; 736 mlxcx_flow_table_type_t mlft_type; 737 738 mlxcx_port_t *mlft_port; 739 740 size_t mlft_entshift; 741 size_t mlft_nents; 742 743 size_t mlft_entsize; 744 mlxcx_flow_entry_t *mlft_ent; 745 746 /* First entry not yet claimed by a group */ 747 size_t mlft_next_ent; 748 749 list_t mlft_groups; 750 }; 751 752 typedef enum { 753 MLXCX_GROUP_RX, 754 MLXCX_GROUP_TX 755 } mlxcx_group_type_t; 756 757 typedef enum { 758 MLXCX_GROUP_INIT = 1 << 0, 759 MLXCX_GROUP_WQS = 1 << 1, 760 MLXCX_GROUP_TIRTIS = 1 << 2, 761 MLXCX_GROUP_FLOWS = 1 << 3, 762 MLXCX_GROUP_RUNNING = 1 << 4, 763 MLXCX_GROUP_RQT = 1 << 5, 764 } mlxcx_group_state_t; 765 766 #define MLXCX_RX_HASH_FT_SIZE_SHIFT 4 767 768 typedef enum { 769 MLXCX_TIR_ROLE_IPv4 = 0, 770 MLXCX_TIR_ROLE_IPv6, 771 MLXCX_TIR_ROLE_TCPv4, 772 MLXCX_TIR_ROLE_TCPv6, 773 MLXCX_TIR_ROLE_UDPv4, 774 MLXCX_TIR_ROLE_UDPv6, 775 MLXCX_TIR_ROLE_OTHER, 776 777 MLXCX_TIRS_PER_GROUP 778 } mlxcx_tir_role_t; 779 780 typedef struct { 781 avl_node_t mlgm_group_entry; 782 list_node_t mlgm_fe_entry; 783 mlxcx_ring_group_t *mlgm_group; 784 uint8_t mlgm_mac[6]; 785 mlxcx_flow_entry_t *mlgm_fe; 786 } mlxcx_group_mac_t; 787 788 typedef struct { 789 list_node_t mlgv_entry; 790 boolean_t mlgv_tagged; 791 uint16_t mlgv_vid; 792 mlxcx_flow_entry_t *mlgv_fe; 793 } mlxcx_group_vlan_t; 794 795 struct mlxcx_ring_group { 796 kmutex_t mlg_mtx; 797 mlxcx_t *mlg_mlx; 798 mlxcx_group_state_t mlg_state; 799 mlxcx_group_type_t mlg_type; 800 801 mac_group_handle_t mlg_mac_hdl; 802 803 union { 804 mlxcx_tis_t mlg_tis; 805 mlxcx_tir_t mlg_tir[MLXCX_TIRS_PER_GROUP]; 806 }; 807 mlxcx_port_t *mlg_port; 808 809 size_t mlg_nwqs; 810 size_t mlg_wqs_size; 811 mlxcx_work_queue_t *mlg_wqs; 812 813 mlxcx_rqtable_t *mlg_rqt; 814 815 /* 816 * Flow table for matching VLAN IDs 817 */ 818 mlxcx_flow_table_t *mlg_rx_vlan_ft; 819 mlxcx_flow_group_t *mlg_rx_vlan_fg; 820 mlxcx_flow_group_t *mlg_rx_vlan_def_fg; 821 mlxcx_flow_group_t *mlg_rx_vlan_promisc_fg; 822 list_t mlg_rx_vlans; 823 824 taskq_t *mlg_refill_tq; 825 826 /* 827 * Flow table for separating out by protocol before hashing 828 */ 829 mlxcx_flow_table_t *mlg_rx_hash_ft; 830 831 /* 832 * Links to flow entries on the root flow table which are pointing to 833 * our rx_vlan_ft. 834 */ 835 avl_tree_t mlg_rx_macs; 836 }; 837 838 typedef enum mlxcx_cmd_state { 839 MLXCX_CMD_S_DONE = 1 << 0, 840 MLXCX_CMD_S_ERROR = 1 << 1 841 } mlxcx_cmd_state_t; 842 843 typedef struct mlxcx_cmd { 844 struct mlxcx *mlcmd_mlxp; 845 kmutex_t mlcmd_lock; 846 kcondvar_t mlcmd_cv; 847 848 uint8_t mlcmd_token; 849 mlxcx_cmd_op_t mlcmd_op; 850 851 /* 852 * Command data and extended mailboxes for responses. 853 */ 854 const void *mlcmd_in; 855 uint32_t mlcmd_inlen; 856 void *mlcmd_out; 857 uint32_t mlcmd_outlen; 858 list_t mlcmd_mbox_in; 859 uint8_t mlcmd_nboxes_in; 860 list_t mlcmd_mbox_out; 861 uint8_t mlcmd_nboxes_out; 862 /* 863 * Status information. 864 */ 865 mlxcx_cmd_state_t mlcmd_state; 866 uint8_t mlcmd_status; 867 } mlxcx_cmd_t; 868 869 /* 870 * Our view of capabilities. 871 */ 872 typedef struct mlxcx_hca_cap { 873 mlxcx_hca_cap_mode_t mhc_mode; 874 mlxcx_hca_cap_type_t mhc_type; 875 union { 876 uint8_t mhc_bulk[MLXCX_HCA_CAP_SIZE]; 877 mlxcx_hca_cap_general_caps_t mhc_general; 878 mlxcx_hca_cap_eth_caps_t mhc_eth; 879 mlxcx_hca_cap_flow_caps_t mhc_flow; 880 }; 881 } mlxcx_hca_cap_t; 882 883 typedef struct { 884 /* Cooked values */ 885 boolean_t mlc_checksum; 886 boolean_t mlc_lso; 887 boolean_t mlc_vxlan; 888 size_t mlc_max_lso_size; 889 size_t mlc_max_rqt_size; 890 891 size_t mlc_max_rx_ft_shift; 892 size_t mlc_max_rx_fe_dest; 893 size_t mlc_max_rx_flows; 894 895 size_t mlc_max_tir; 896 897 /* Raw caps data */ 898 mlxcx_hca_cap_t mlc_hca_cur; 899 mlxcx_hca_cap_t mlc_hca_max; 900 mlxcx_hca_cap_t mlc_ether_cur; 901 mlxcx_hca_cap_t mlc_ether_max; 902 mlxcx_hca_cap_t mlc_nic_flow_cur; 903 mlxcx_hca_cap_t mlc_nic_flow_max; 904 } mlxcx_caps_t; 905 906 typedef struct { 907 uint_t mldp_eq_size_shift; 908 uint_t mldp_cq_size_shift; 909 uint_t mldp_cq_size_shift_default; 910 uint_t mldp_rq_size_shift; 911 uint_t mldp_rq_size_shift_default; 912 uint_t mldp_sq_size_shift; 913 uint_t mldp_sq_size_shift_default; 914 uint_t mldp_cqemod_period_usec; 915 uint_t mldp_cqemod_count; 916 uint_t mldp_intrmod_period_usec; 917 uint_t mldp_rx_ngroups_large; 918 uint_t mldp_rx_ngroups_small; 919 uint_t mldp_rx_nrings_per_large_group; 920 uint_t mldp_rx_nrings_per_small_group; 921 uint_t mldp_rx_per_cq; 922 uint_t mldp_tx_ngroups; 923 uint_t mldp_tx_nrings_per_group; 924 uint_t mldp_ftbl_root_size_shift; 925 size_t mldp_tx_bind_threshold; 926 uint_t mldp_ftbl_vlan_size_shift; 927 uint64_t mldp_eq_check_interval_sec; 928 uint64_t mldp_cq_check_interval_sec; 929 uint64_t mldp_wq_check_interval_sec; 930 } mlxcx_drv_props_t; 931 932 typedef enum { 933 MLXCX_ATTACH_FM = 1 << 0, 934 MLXCX_ATTACH_PCI_CONFIG = 1 << 1, 935 MLXCX_ATTACH_REGS = 1 << 2, 936 MLXCX_ATTACH_CMD = 1 << 3, 937 MLXCX_ATTACH_ENABLE_HCA = 1 << 4, 938 MLXCX_ATTACH_PAGE_LIST = 1 << 5, 939 MLXCX_ATTACH_INIT_HCA = 1 << 6, 940 MLXCX_ATTACH_UAR_PD_TD = 1 << 7, 941 MLXCX_ATTACH_INTRS = 1 << 8, 942 MLXCX_ATTACH_PORTS = 1 << 9, 943 MLXCX_ATTACH_MAC_HDL = 1 << 10, 944 MLXCX_ATTACH_CQS = 1 << 11, 945 MLXCX_ATTACH_WQS = 1 << 12, 946 MLXCX_ATTACH_GROUPS = 1 << 13, 947 MLXCX_ATTACH_BUFS = 1 << 14, 948 MLXCX_ATTACH_CAPS = 1 << 15, 949 MLXCX_ATTACH_CHKTIMERS = 1 << 16, 950 } mlxcx_attach_progress_t; 951 952 struct mlxcx { 953 /* entry on the mlxcx_glist */ 954 list_node_t mlx_gentry; 955 956 dev_info_t *mlx_dip; 957 int mlx_inst; 958 mlxcx_attach_progress_t mlx_attach; 959 960 mlxcx_drv_props_t mlx_props; 961 962 /* 963 * Misc. data 964 */ 965 uint16_t mlx_fw_maj; 966 uint16_t mlx_fw_min; 967 uint16_t mlx_fw_rev; 968 uint16_t mlx_cmd_rev; 969 970 /* 971 * Various capabilities of hardware. 972 */ 973 mlxcx_caps_t *mlx_caps; 974 975 uint_t mlx_max_sdu; 976 uint_t mlx_sdu; 977 978 /* 979 * FM State 980 */ 981 int mlx_fm_caps; 982 983 /* 984 * PCI Data 985 */ 986 ddi_acc_handle_t mlx_cfg_handle; 987 ddi_acc_handle_t mlx_regs_handle; 988 caddr_t mlx_regs_base; 989 990 /* 991 * MAC handle 992 */ 993 mac_handle_t mlx_mac_hdl; 994 995 /* 996 * Main command queue for issuing general FW control commands. 997 */ 998 mlxcx_cmd_queue_t mlx_cmd; 999 1000 /* 1001 * Interrupts 1002 */ 1003 uint_t mlx_intr_pri; 1004 uint_t mlx_intr_type; /* always MSI-X */ 1005 int mlx_intr_count; 1006 size_t mlx_intr_size; /* allocation size */ 1007 ddi_intr_handle_t *mlx_intr_handles; 1008 1009 /* 1010 * Basic firmware resources which we use for a variety of things. 1011 * The UAR is a reference to a page where CQ and EQ doorbells are 1012 * located. It also holds all the BlueFlame stuff (which we don't 1013 * use). 1014 */ 1015 mlxcx_uar_t mlx_uar; 1016 /* 1017 * The PD (Protection Domain) and TDOM (Transport Domain) are opaque 1018 * entities to us (they're Infiniband constructs we don't actually care 1019 * about) -- we just allocate them and shove their ID numbers in 1020 * whenever we're asked for one. 1021 * 1022 * The "reserved" LKEY is what we should put in queue entries that 1023 * have references to memory to indicate that they're using linear 1024 * addresses (comes from the QUERY_SPECIAL_CONTEXTS cmd). 1025 */ 1026 mlxcx_pd_t mlx_pd; 1027 mlxcx_tdom_t mlx_tdom; 1028 uint_t mlx_rsvd_lkey; 1029 1030 /* 1031 * Our event queues. These are 1:1 with interrupts. 1032 */ 1033 size_t mlx_eqs_size; /* allocation size */ 1034 mlxcx_event_queue_t *mlx_eqs; 1035 1036 /* 1037 * Page list. These represent the set of 4k pages we've given to 1038 * hardware. 1039 * 1040 * We can add to this list at the request of hardware from interrupt 1041 * context (the PAGE_REQUEST event), so it's protected by pagemtx. 1042 */ 1043 kmutex_t mlx_pagemtx; 1044 uint_t mlx_npages; 1045 avl_tree_t mlx_pages; 1046 1047 /* 1048 * Port state 1049 */ 1050 uint_t mlx_nports; 1051 size_t mlx_ports_size; 1052 mlxcx_port_t *mlx_ports; 1053 1054 /* 1055 * Completion queues (CQs). These are also indexed off the 1056 * event_queue_ts that they each report to. 1057 */ 1058 list_t mlx_cqs; 1059 1060 uint_t mlx_next_eq; 1061 1062 /* 1063 * Work queues (WQs). 1064 */ 1065 list_t mlx_wqs; 1066 1067 /* 1068 * Ring groups 1069 */ 1070 size_t mlx_rx_ngroups; 1071 size_t mlx_rx_groups_size; 1072 mlxcx_ring_group_t *mlx_rx_groups; 1073 1074 size_t mlx_tx_ngroups; 1075 size_t mlx_tx_groups_size; 1076 mlxcx_ring_group_t *mlx_tx_groups; 1077 1078 kmem_cache_t *mlx_bufs_cache; 1079 list_t mlx_buf_shards; 1080 1081 ddi_periodic_t mlx_eq_checktimer; 1082 ddi_periodic_t mlx_cq_checktimer; 1083 ddi_periodic_t mlx_wq_checktimer; 1084 }; 1085 1086 /* 1087 * Register access 1088 */ 1089 extern uint16_t mlxcx_get16(mlxcx_t *, uintptr_t); 1090 extern uint32_t mlxcx_get32(mlxcx_t *, uintptr_t); 1091 extern uint64_t mlxcx_get64(mlxcx_t *, uintptr_t); 1092 1093 extern void mlxcx_put32(mlxcx_t *, uintptr_t, uint32_t); 1094 extern void mlxcx_put64(mlxcx_t *, uintptr_t, uint64_t); 1095 1096 extern void mlxcx_uar_put32(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint32_t); 1097 extern void mlxcx_uar_put64(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint64_t); 1098 1099 /* 1100 * Logging functions. 1101 */ 1102 extern void mlxcx_warn(mlxcx_t *, const char *, ...); 1103 extern void mlxcx_note(mlxcx_t *, const char *, ...); 1104 extern void mlxcx_panic(mlxcx_t *, const char *, ...); 1105 1106 extern void mlxcx_fm_ereport(mlxcx_t *, const char *); 1107 1108 extern void mlxcx_check_sq(mlxcx_t *, mlxcx_work_queue_t *); 1109 extern void mlxcx_check_rq(mlxcx_t *, mlxcx_work_queue_t *); 1110 1111 /* 1112 * DMA Functions 1113 */ 1114 extern void mlxcx_dma_free(mlxcx_dma_buffer_t *); 1115 extern boolean_t mlxcx_dma_alloc(mlxcx_t *, mlxcx_dma_buffer_t *, 1116 ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t, size_t, boolean_t); 1117 extern boolean_t mlxcx_dma_init(mlxcx_t *, mlxcx_dma_buffer_t *, 1118 ddi_dma_attr_t *, boolean_t); 1119 extern boolean_t mlxcx_dma_bind_mblk(mlxcx_t *, mlxcx_dma_buffer_t *, 1120 const mblk_t *, size_t, boolean_t); 1121 extern boolean_t mlxcx_dma_alloc_offset(mlxcx_t *, mlxcx_dma_buffer_t *, 1122 ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t, 1123 size_t, size_t, boolean_t); 1124 extern void mlxcx_dma_unbind(mlxcx_t *, mlxcx_dma_buffer_t *); 1125 extern void mlxcx_dma_acc_attr(mlxcx_t *, ddi_device_acc_attr_t *); 1126 extern void mlxcx_dma_page_attr(mlxcx_t *, ddi_dma_attr_t *); 1127 extern void mlxcx_dma_queue_attr(mlxcx_t *, ddi_dma_attr_t *); 1128 extern void mlxcx_dma_qdbell_attr(mlxcx_t *, ddi_dma_attr_t *); 1129 extern void mlxcx_dma_buf_attr(mlxcx_t *, ddi_dma_attr_t *); 1130 1131 extern boolean_t mlxcx_give_pages(mlxcx_t *, int32_t); 1132 1133 static inline const ddi_dma_cookie_t * 1134 mlxcx_dma_cookie_iter(const mlxcx_dma_buffer_t *db, 1135 const ddi_dma_cookie_t *prev) 1136 { 1137 ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND); 1138 return (ddi_dma_cookie_iter(db->mxdb_dma_handle, prev)); 1139 } 1140 1141 static inline const ddi_dma_cookie_t * 1142 mlxcx_dma_cookie_one(const mlxcx_dma_buffer_t *db) 1143 { 1144 ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND); 1145 return (ddi_dma_cookie_one(db->mxdb_dma_handle)); 1146 } 1147 1148 /* 1149 * From mlxcx_intr.c 1150 */ 1151 extern boolean_t mlxcx_intr_setup(mlxcx_t *); 1152 extern void mlxcx_intr_teardown(mlxcx_t *); 1153 extern void mlxcx_arm_eq(mlxcx_t *, mlxcx_event_queue_t *); 1154 extern void mlxcx_arm_cq(mlxcx_t *, mlxcx_completion_queue_t *); 1155 extern void mlxcx_update_cqci(mlxcx_t *, mlxcx_completion_queue_t *); 1156 1157 extern mblk_t *mlxcx_rx_poll(mlxcx_t *, mlxcx_completion_queue_t *, size_t); 1158 1159 /* 1160 * From mlxcx_gld.c 1161 */ 1162 extern boolean_t mlxcx_register_mac(mlxcx_t *); 1163 1164 /* 1165 * From mlxcx_ring.c 1166 */ 1167 extern boolean_t mlxcx_wq_alloc_dma(mlxcx_t *, mlxcx_work_queue_t *); 1168 extern void mlxcx_wq_rele_dma(mlxcx_t *, mlxcx_work_queue_t *); 1169 1170 extern boolean_t mlxcx_buf_create(mlxcx_t *, mlxcx_buf_shard_t *, 1171 mlxcx_buffer_t **); 1172 extern boolean_t mlxcx_buf_create_foreign(mlxcx_t *, mlxcx_buf_shard_t *, 1173 mlxcx_buffer_t **); 1174 extern mlxcx_buffer_t *mlxcx_buf_take(mlxcx_t *, mlxcx_work_queue_t *); 1175 extern size_t mlxcx_buf_take_n(mlxcx_t *, mlxcx_work_queue_t *, 1176 mlxcx_buffer_t **, size_t); 1177 extern boolean_t mlxcx_buf_loan(mlxcx_t *, mlxcx_buffer_t *); 1178 extern void mlxcx_buf_return(mlxcx_t *, mlxcx_buffer_t *); 1179 extern void mlxcx_buf_return_chain(mlxcx_t *, mlxcx_buffer_t *, boolean_t); 1180 extern void mlxcx_buf_destroy(mlxcx_t *, mlxcx_buffer_t *); 1181 extern void mlxcx_shard_ready(mlxcx_buf_shard_t *); 1182 extern void mlxcx_shard_draining(mlxcx_buf_shard_t *); 1183 1184 extern uint_t mlxcx_buf_bind_or_copy(mlxcx_t *, mlxcx_work_queue_t *, 1185 mblk_t *, size_t, mlxcx_buffer_t **); 1186 1187 extern boolean_t mlxcx_rx_group_setup(mlxcx_t *, mlxcx_ring_group_t *); 1188 extern boolean_t mlxcx_tx_group_setup(mlxcx_t *, mlxcx_ring_group_t *); 1189 1190 extern boolean_t mlxcx_rx_group_start(mlxcx_t *, mlxcx_ring_group_t *); 1191 extern boolean_t mlxcx_tx_ring_start(mlxcx_t *, mlxcx_ring_group_t *, 1192 mlxcx_work_queue_t *); 1193 extern boolean_t mlxcx_rx_ring_start(mlxcx_t *, mlxcx_ring_group_t *, 1194 mlxcx_work_queue_t *); 1195 1196 extern boolean_t mlxcx_rq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *, 1197 mlxcx_buffer_t *); 1198 extern boolean_t mlxcx_rq_add_buffers(mlxcx_t *, mlxcx_work_queue_t *, 1199 mlxcx_buffer_t **, size_t); 1200 extern boolean_t mlxcx_sq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *, 1201 uint8_t *, size_t, uint32_t, mlxcx_buffer_t *); 1202 extern boolean_t mlxcx_sq_add_nop(mlxcx_t *, mlxcx_work_queue_t *); 1203 extern void mlxcx_rq_refill(mlxcx_t *, mlxcx_work_queue_t *); 1204 1205 extern void mlxcx_teardown_groups(mlxcx_t *); 1206 extern void mlxcx_wq_teardown(mlxcx_t *, mlxcx_work_queue_t *); 1207 extern void mlxcx_cq_teardown(mlxcx_t *, mlxcx_completion_queue_t *); 1208 extern void mlxcx_teardown_rx_group(mlxcx_t *, mlxcx_ring_group_t *); 1209 extern void mlxcx_teardown_tx_group(mlxcx_t *, mlxcx_ring_group_t *); 1210 1211 extern void mlxcx_tx_completion(mlxcx_t *, mlxcx_completion_queue_t *, 1212 mlxcx_completionq_ent_t *, mlxcx_buffer_t *); 1213 extern mblk_t *mlxcx_rx_completion(mlxcx_t *, mlxcx_completion_queue_t *, 1214 mlxcx_completionq_ent_t *, mlxcx_buffer_t *); 1215 1216 extern mlxcx_buf_shard_t *mlxcx_mlbs_create(mlxcx_t *); 1217 1218 /* 1219 * Flow mgmt 1220 */ 1221 extern boolean_t mlxcx_add_umcast_entry(mlxcx_t *, mlxcx_port_t *, 1222 mlxcx_ring_group_t *, const uint8_t *); 1223 extern boolean_t mlxcx_remove_umcast_entry(mlxcx_t *, mlxcx_port_t *, 1224 mlxcx_ring_group_t *, const uint8_t *); 1225 extern void mlxcx_remove_all_umcast_entries(mlxcx_t *, mlxcx_port_t *, 1226 mlxcx_ring_group_t *); 1227 extern boolean_t mlxcx_setup_flow_group(mlxcx_t *, mlxcx_flow_table_t *, 1228 mlxcx_flow_group_t *); 1229 extern void mlxcx_teardown_flow_table(mlxcx_t *, mlxcx_flow_table_t *); 1230 1231 extern void mlxcx_remove_all_vlan_entries(mlxcx_t *, mlxcx_ring_group_t *); 1232 extern boolean_t mlxcx_remove_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *, 1233 boolean_t, uint16_t); 1234 extern boolean_t mlxcx_add_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *, 1235 boolean_t, uint16_t); 1236 1237 /* 1238 * Command functions 1239 */ 1240 extern boolean_t mlxcx_cmd_queue_init(mlxcx_t *); 1241 extern void mlxcx_cmd_queue_fini(mlxcx_t *); 1242 1243 extern boolean_t mlxcx_cmd_enable_hca(mlxcx_t *); 1244 extern boolean_t mlxcx_cmd_disable_hca(mlxcx_t *); 1245 1246 extern boolean_t mlxcx_cmd_query_issi(mlxcx_t *, uint_t *); 1247 extern boolean_t mlxcx_cmd_set_issi(mlxcx_t *, uint16_t); 1248 1249 extern boolean_t mlxcx_cmd_query_pages(mlxcx_t *, uint_t, int32_t *); 1250 extern boolean_t mlxcx_cmd_give_pages(mlxcx_t *, uint_t, int32_t, 1251 mlxcx_dev_page_t **); 1252 extern boolean_t mlxcx_cmd_return_pages(mlxcx_t *, int32_t, uint64_t *, 1253 int32_t *); 1254 1255 extern boolean_t mlxcx_cmd_query_hca_cap(mlxcx_t *, mlxcx_hca_cap_type_t, 1256 mlxcx_hca_cap_mode_t, mlxcx_hca_cap_t *); 1257 1258 extern boolean_t mlxcx_cmd_set_driver_version(mlxcx_t *, const char *); 1259 1260 extern boolean_t mlxcx_cmd_init_hca(mlxcx_t *); 1261 extern boolean_t mlxcx_cmd_teardown_hca(mlxcx_t *); 1262 1263 extern boolean_t mlxcx_cmd_alloc_uar(mlxcx_t *, mlxcx_uar_t *); 1264 extern boolean_t mlxcx_cmd_dealloc_uar(mlxcx_t *, mlxcx_uar_t *); 1265 1266 extern boolean_t mlxcx_cmd_alloc_pd(mlxcx_t *, mlxcx_pd_t *); 1267 extern boolean_t mlxcx_cmd_dealloc_pd(mlxcx_t *, mlxcx_pd_t *); 1268 1269 extern boolean_t mlxcx_cmd_alloc_tdom(mlxcx_t *, mlxcx_tdom_t *); 1270 extern boolean_t mlxcx_cmd_dealloc_tdom(mlxcx_t *, mlxcx_tdom_t *); 1271 1272 extern boolean_t mlxcx_cmd_create_eq(mlxcx_t *, mlxcx_event_queue_t *); 1273 extern boolean_t mlxcx_cmd_destroy_eq(mlxcx_t *, mlxcx_event_queue_t *); 1274 extern boolean_t mlxcx_cmd_query_eq(mlxcx_t *, mlxcx_event_queue_t *, 1275 mlxcx_eventq_ctx_t *); 1276 1277 extern boolean_t mlxcx_cmd_create_cq(mlxcx_t *, mlxcx_completion_queue_t *); 1278 extern boolean_t mlxcx_cmd_destroy_cq(mlxcx_t *, mlxcx_completion_queue_t *); 1279 extern boolean_t mlxcx_cmd_query_cq(mlxcx_t *, mlxcx_completion_queue_t *, 1280 mlxcx_completionq_ctx_t *); 1281 1282 extern boolean_t mlxcx_cmd_create_rq(mlxcx_t *, mlxcx_work_queue_t *); 1283 extern boolean_t mlxcx_cmd_start_rq(mlxcx_t *, mlxcx_work_queue_t *); 1284 extern boolean_t mlxcx_cmd_stop_rq(mlxcx_t *, mlxcx_work_queue_t *); 1285 extern boolean_t mlxcx_cmd_destroy_rq(mlxcx_t *, mlxcx_work_queue_t *); 1286 extern boolean_t mlxcx_cmd_query_rq(mlxcx_t *, mlxcx_work_queue_t *, 1287 mlxcx_rq_ctx_t *); 1288 1289 extern boolean_t mlxcx_cmd_create_tir(mlxcx_t *, mlxcx_tir_t *); 1290 extern boolean_t mlxcx_cmd_destroy_tir(mlxcx_t *, mlxcx_tir_t *); 1291 1292 extern boolean_t mlxcx_cmd_create_sq(mlxcx_t *, mlxcx_work_queue_t *); 1293 extern boolean_t mlxcx_cmd_start_sq(mlxcx_t *, mlxcx_work_queue_t *); 1294 extern boolean_t mlxcx_cmd_stop_sq(mlxcx_t *, mlxcx_work_queue_t *); 1295 extern boolean_t mlxcx_cmd_destroy_sq(mlxcx_t *, mlxcx_work_queue_t *); 1296 extern boolean_t mlxcx_cmd_query_sq(mlxcx_t *, mlxcx_work_queue_t *, 1297 mlxcx_sq_ctx_t *); 1298 1299 extern boolean_t mlxcx_cmd_create_tis(mlxcx_t *, mlxcx_tis_t *); 1300 extern boolean_t mlxcx_cmd_destroy_tis(mlxcx_t *, mlxcx_tis_t *); 1301 1302 extern boolean_t mlxcx_cmd_query_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *); 1303 extern boolean_t mlxcx_cmd_query_special_ctxs(mlxcx_t *); 1304 1305 extern boolean_t mlxcx_cmd_modify_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *, 1306 mlxcx_modify_nic_vport_ctx_fields_t); 1307 1308 extern boolean_t mlxcx_cmd_create_flow_table(mlxcx_t *, mlxcx_flow_table_t *); 1309 extern boolean_t mlxcx_cmd_destroy_flow_table(mlxcx_t *, mlxcx_flow_table_t *); 1310 extern boolean_t mlxcx_cmd_set_flow_table_root(mlxcx_t *, mlxcx_flow_table_t *); 1311 1312 extern boolean_t mlxcx_cmd_create_flow_group(mlxcx_t *, mlxcx_flow_group_t *); 1313 extern boolean_t mlxcx_cmd_set_flow_table_entry(mlxcx_t *, 1314 mlxcx_flow_entry_t *); 1315 extern boolean_t mlxcx_cmd_delete_flow_table_entry(mlxcx_t *, 1316 mlxcx_flow_entry_t *); 1317 extern boolean_t mlxcx_cmd_destroy_flow_group(mlxcx_t *, mlxcx_flow_group_t *); 1318 1319 extern boolean_t mlxcx_cmd_access_register(mlxcx_t *, mlxcx_cmd_reg_opmod_t, 1320 mlxcx_register_id_t, mlxcx_register_data_t *); 1321 extern boolean_t mlxcx_cmd_query_port_mtu(mlxcx_t *, mlxcx_port_t *); 1322 extern boolean_t mlxcx_cmd_query_port_status(mlxcx_t *, mlxcx_port_t *); 1323 extern boolean_t mlxcx_cmd_query_port_speed(mlxcx_t *, mlxcx_port_t *); 1324 1325 extern boolean_t mlxcx_cmd_set_port_mtu(mlxcx_t *, mlxcx_port_t *); 1326 1327 extern boolean_t mlxcx_cmd_create_rqt(mlxcx_t *, mlxcx_rqtable_t *); 1328 extern boolean_t mlxcx_cmd_destroy_rqt(mlxcx_t *, mlxcx_rqtable_t *); 1329 1330 extern boolean_t mlxcx_cmd_set_int_mod(mlxcx_t *, uint_t, uint_t); 1331 1332 extern boolean_t mlxcx_cmd_query_module_status(mlxcx_t *, uint_t, 1333 mlxcx_module_status_t *, mlxcx_module_error_type_t *); 1334 extern boolean_t mlxcx_cmd_set_port_led(mlxcx_t *, mlxcx_port_t *, uint16_t); 1335 1336 /* Comparator for avl_ts */ 1337 extern int mlxcx_cq_compare(const void *, const void *); 1338 extern int mlxcx_dmac_fe_compare(const void *, const void *); 1339 extern int mlxcx_grmac_compare(const void *, const void *); 1340 extern int mlxcx_page_compare(const void *, const void *); 1341 1342 extern void mlxcx_update_link_state(mlxcx_t *, mlxcx_port_t *); 1343 1344 extern void mlxcx_eth_proto_to_string(mlxcx_eth_proto_t, char *, size_t); 1345 extern const char *mlxcx_port_status_string(mlxcx_port_status_t); 1346 1347 extern const char *mlxcx_event_name(mlxcx_event_t); 1348 1349 #ifdef __cplusplus 1350 } 1351 #endif 1352 1353 #endif /* _MLXCX_H */ 1354