1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2020, The University of Queensland 14 * Copyright (c) 2018, Joyent, Inc. 15 * Copyright 2020 RackTop Systems, Inc. 16 */ 17 18 /* 19 * Mellanox Connect-X 4/5/6 driver. 20 * 21 * More details in mlxcx.c 22 */ 23 24 #ifndef _MLXCX_H 25 #define _MLXCX_H 26 27 /* 28 * mlxcx(7D) defintions 29 */ 30 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/ddifm.h> 34 #include <sys/id_space.h> 35 #include <sys/list.h> 36 #include <sys/taskq_impl.h> 37 #include <sys/stddef.h> 38 #include <sys/stream.h> 39 #include <sys/strsun.h> 40 #include <sys/mac_provider.h> 41 #include <sys/mac_ether.h> 42 #include <sys/cpuvar.h> 43 #include <sys/ethernet.h> 44 45 #include <inet/ip.h> 46 #include <inet/ip6.h> 47 48 #include <sys/ddifm.h> 49 #include <sys/fm/protocol.h> 50 #include <sys/fm/util.h> 51 #include <sys/fm/io/ddi.h> 52 53 #include <mlxcx_reg.h> 54 55 #ifdef __cplusplus 56 extern "C" { 57 #endif 58 59 /* 60 * Get access to the first PCI BAR. 61 */ 62 #define MLXCX_REG_NUMBER 1 63 64 /* 65 * The command queue is supposed to be a page, which is 4k. 66 */ 67 #define MLXCX_CMD_DMA_PAGE_SIZE 4096 68 69 /* 70 * Queues can allocate in units of this much memory. 71 */ 72 #define MLXCX_QUEUE_DMA_PAGE_SIZE 4096 73 74 /* 75 * We advertise two sizes of groups to MAC -- a certain number of "large" 76 * groups (including the default group, which is sized to at least ncpus) 77 * followed by a certain number of "small" groups. 78 * 79 * This allows us to have a larger amount of classification resources available 80 * for zones/VMs without resorting to software classification. 81 */ 82 #define MLXCX_RX_NGROUPS_LARGE_DFLT 2 83 #define MLXCX_RX_NRINGS_PER_LARGE_GROUP_DFLT 16 84 #define MLXCX_RX_NGROUPS_SMALL_DFLT 256 85 #define MLXCX_RX_NRINGS_PER_SMALL_GROUP_DFLT 4 86 87 #define MLXCX_TX_NGROUPS_DFLT 1 88 #define MLXCX_TX_NRINGS_PER_GROUP_DFLT 64 89 90 /* 91 * Queues will be sized to (1 << *Q_SIZE_SHIFT) entries long. 92 */ 93 #define MLXCX_EQ_SIZE_SHIFT_DFLT 9 94 95 /* 96 * The CQ, SQ and RQ sizes can effect throughput on higher speed interfaces. 97 * EQ less so, as it only takes a single EQ entry to indicate there are 98 * multiple completions on the CQ. 99 * 100 * Particularly on the Rx side, the RQ (and corresponding CQ) would run 101 * low on available entries. A symptom of this is the refill taskq running 102 * frequently. A larger RQ (and CQ) alleviates this, and as there is a 103 * close relationship between SQ and CQ size, the SQ is increased too. 104 */ 105 #define MLXCX_CQ_SIZE_SHIFT_DFLT 10 106 #define MLXCX_CQ_SIZE_SHIFT_25G 12 107 108 /* 109 * Default to making SQs bigger than RQs for 9k MTU, since most packets will 110 * spill over into more than one slot. RQ WQEs are always 1 slot. 111 */ 112 #define MLXCX_SQ_SIZE_SHIFT_DFLT 11 113 #define MLXCX_SQ_SIZE_SHIFT_25G 13 114 115 #define MLXCX_RQ_SIZE_SHIFT_DFLT 10 116 #define MLXCX_RQ_SIZE_SHIFT_25G 12 117 118 #define MLXCX_CQ_HWM_GAP 16 119 #define MLXCX_CQ_LWM_GAP 24 120 121 #define MLXCX_WQ_HWM_GAP MLXCX_CQ_HWM_GAP 122 #define MLXCX_WQ_LWM_GAP MLXCX_CQ_LWM_GAP 123 124 #define MLXCX_RQ_REFILL_STEP 64 125 126 /* 127 * CQ event moderation 128 */ 129 #define MLXCX_CQEMOD_PERIOD_USEC_DFLT 50 130 #define MLXCX_CQEMOD_COUNT_DFLT \ 131 (8 * ((1 << MLXCX_CQ_SIZE_SHIFT_DFLT) / 10)) 132 133 /* 134 * EQ interrupt moderation 135 */ 136 #define MLXCX_INTRMOD_PERIOD_USEC_DFLT 10 137 138 /* Size of root flow tables */ 139 #define MLXCX_FTBL_ROOT_SIZE_SHIFT_DFLT 12 140 141 /* Size of 2nd level flow tables for VLAN filtering */ 142 #define MLXCX_FTBL_VLAN_SIZE_SHIFT_DFLT 4 143 144 /* 145 * How big does an mblk have to be before we dma_bind() it instead of 146 * bcopying? 147 */ 148 #define MLXCX_TX_BIND_THRESHOLD_DFLT 2048 149 150 /* 151 * How often to check the status of completion queues for overflow and 152 * other problems. 153 */ 154 #define MLXCX_WQ_CHECK_INTERVAL_SEC_DFLT 300 155 #define MLXCX_CQ_CHECK_INTERVAL_SEC_DFLT 300 156 #define MLXCX_EQ_CHECK_INTERVAL_SEC_DFLT 30 157 158 /* 159 * After this many packets, the packets received so far are passed to 160 * the mac layer. 161 */ 162 #define MLXCX_RX_PER_CQ_DEFAULT 256 163 #define MLXCX_RX_PER_CQ_MIN 16 164 #define MLXCX_RX_PER_CQ_MAX 4096 165 166 #define MLXCX_DOORBELL_TRIES_DFLT 3 167 extern uint_t mlxcx_doorbell_tries; 168 169 #define MLXCX_STUCK_INTR_COUNT_DFLT 128 170 extern uint_t mlxcx_stuck_intr_count; 171 172 #define MLXCX_BUF_BIND_MAX_ATTEMTPS 50 173 174 #define MLXCX_MTU_OFFSET \ 175 (sizeof (struct ether_vlan_header) + ETHERFCSL) 176 177 /* 178 * This is the current version of the command structure that the driver expects 179 * to be found in the ISS. 180 */ 181 #define MLXCX_CMD_REVISION 5 182 183 #ifdef DEBUG 184 #define MLXCX_DMA_SYNC(dma, flag) VERIFY0(ddi_dma_sync( \ 185 (dma).mxdb_dma_handle, 0, 0, \ 186 (flag))) 187 #else 188 #define MLXCX_DMA_SYNC(dma, flag) (void) ddi_dma_sync( \ 189 (dma).mxdb_dma_handle, 0, 0, \ 190 (flag)) 191 #endif 192 193 #define MLXCX_FM_SERVICE_MLXCX "mlxcx" 194 195 /* 196 * This macro defines the expected value of the 'Interface Step Sequence ID' 197 * (issi) which represents the version of the start up and tear down sequence. 198 * We must check that hardware supports this and tell it which version we're 199 * using as well. 200 */ 201 #define MLXCX_CURRENT_ISSI 1 202 203 /* 204 * This is the size of a page that the hardware expects from us when 205 * manipulating pages. 206 */ 207 #define MLXCX_HW_PAGE_SIZE 4096 208 209 /* 210 * This is a special lkey value used to terminate a list of scatter pointers. 211 */ 212 #define MLXCX_NULL_LKEY 0x100 213 214 /* 215 * Forwards 216 */ 217 struct mlxcx; 218 typedef struct mlxcx mlxcx_t; 219 220 typedef enum { 221 MLXCX_DMABUF_HDL_ALLOC = 1 << 0, 222 MLXCX_DMABUF_MEM_ALLOC = 1 << 1, 223 MLXCX_DMABUF_BOUND = 1 << 2, 224 MLXCX_DMABUF_FOREIGN = 1 << 3, 225 } mlxcx_dma_buffer_flags_t; 226 227 typedef struct mlxcx_dma_buffer { 228 mlxcx_dma_buffer_flags_t mxdb_flags; 229 caddr_t mxdb_va; /* Buffer VA */ 230 size_t mxdb_len; /* Buffer logical len */ 231 ddi_acc_handle_t mxdb_acc_handle; 232 ddi_dma_handle_t mxdb_dma_handle; 233 uint_t mxdb_ncookies; 234 } mlxcx_dma_buffer_t; 235 236 typedef struct mlxcx_dev_page { 237 list_node_t mxdp_list; 238 avl_node_t mxdp_tree; 239 uintptr_t mxdp_pa; 240 mlxcx_dma_buffer_t mxdp_dma; 241 } mlxcx_dev_page_t; 242 243 /* 244 * Data structure to keep track of all information related to the command queue. 245 */ 246 typedef enum { 247 MLXCX_CMD_QUEUE_S_IDLE = 1, 248 MLXCX_CMD_QUEUE_S_BUSY, 249 MLXCX_CMD_QUEUE_S_BROKEN 250 } mlxcx_cmd_queue_status_t; 251 252 typedef struct mlxcx_cmd_queue { 253 kmutex_t mcmd_lock; 254 kcondvar_t mcmd_cv; 255 mlxcx_dma_buffer_t mcmd_dma; 256 mlxcx_cmd_ent_t *mcmd_ent; 257 258 uint8_t mcmd_size_l2; 259 uint8_t mcmd_stride_l2; 260 261 mlxcx_cmd_queue_status_t mcmd_status; 262 263 ddi_taskq_t *mcmd_taskq; 264 id_space_t *mcmd_tokens; 265 } mlxcx_cmd_queue_t; 266 267 typedef struct mlxcd_cmd_mbox { 268 list_node_t mlbox_node; 269 mlxcx_dma_buffer_t mlbox_dma; 270 mlxcx_cmd_mailbox_t *mlbox_data; 271 } mlxcx_cmd_mbox_t; 272 273 typedef enum { 274 MLXCX_EQ_ALLOC = 1 << 0, /* dma mem alloc'd, size set */ 275 MLXCX_EQ_CREATED = 1 << 1, /* CREATE_EQ sent to hw */ 276 MLXCX_EQ_DESTROYED = 1 << 2, /* DESTROY_EQ sent to hw */ 277 MLXCX_EQ_ARMED = 1 << 3, /* Armed through the UAR */ 278 MLXCX_EQ_POLLING = 1 << 4, /* Currently being polled */ 279 } mlxcx_eventq_state_t; 280 281 typedef struct mlxcx_bf { 282 kmutex_t mbf_mtx; 283 uint_t mbf_cnt; 284 uint_t mbf_even; 285 uint_t mbf_odd; 286 } mlxcx_bf_t; 287 288 typedef struct mlxcx_uar { 289 boolean_t mlu_allocated; 290 uint_t mlu_num; 291 uint_t mlu_base; 292 293 volatile uint_t mlu_bfcnt; 294 mlxcx_bf_t mlu_bf[MLXCX_BF_PER_UAR]; 295 } mlxcx_uar_t; 296 297 typedef struct mlxcx_pd { 298 boolean_t mlpd_allocated; 299 uint32_t mlpd_num; 300 } mlxcx_pd_t; 301 302 typedef struct mlxcx_tdom { 303 boolean_t mltd_allocated; 304 uint32_t mltd_num; 305 } mlxcx_tdom_t; 306 307 typedef enum { 308 MLXCX_PORT_VPORT_PROMISC = 1 << 0, 309 } mlxcx_port_flags_t; 310 311 typedef struct mlxcx_flow_table mlxcx_flow_table_t; 312 typedef struct mlxcx_flow_group mlxcx_flow_group_t; 313 314 typedef struct { 315 uint64_t mlps_rx_drops; 316 } mlxcx_port_stats_t; 317 318 typedef enum { 319 MLXCX_PORT_INIT = 1 << 0 320 } mlxcx_port_init_t; 321 322 typedef struct mlxcx_port { 323 kmutex_t mlp_mtx; 324 mlxcx_port_init_t mlp_init; 325 mlxcx_t *mlp_mlx; 326 /* 327 * The mlp_num we have here starts at zero (it's an index), but the 328 * numbering we have to use for register access starts at 1. We 329 * currently write mlp_num into the other_vport fields in mlxcx_cmd.c 330 * (where 0 is a magic number meaning "my vport") so if we ever add 331 * support for virtualisation features and deal with more than one 332 * vport, we will probably have to change this. 333 */ 334 uint_t mlp_num; 335 mlxcx_port_flags_t mlp_flags; 336 uint64_t mlp_guid; 337 uint8_t mlp_mac_address[ETHERADDRL]; 338 339 uint_t mlp_mtu; 340 uint_t mlp_max_mtu; 341 342 mlxcx_port_status_t mlp_admin_status; 343 mlxcx_port_status_t mlp_oper_status; 344 345 boolean_t mlp_autoneg; 346 mlxcx_eth_proto_t mlp_max_proto; 347 mlxcx_eth_proto_t mlp_admin_proto; 348 mlxcx_eth_proto_t mlp_oper_proto; 349 mlxcx_pplm_fec_active_t mlp_fec_active; 350 link_fec_t mlp_fec_requested; 351 352 mlxcx_eth_inline_mode_t mlp_wqe_min_inline; 353 354 /* Root flow tables */ 355 mlxcx_flow_table_t *mlp_rx_flow; 356 mlxcx_flow_table_t *mlp_tx_flow; 357 358 mlxcx_flow_group_t *mlp_promisc; 359 mlxcx_flow_group_t *mlp_bcast; 360 mlxcx_flow_group_t *mlp_umcast; 361 362 avl_tree_t mlp_dmac_fe; 363 364 mlxcx_port_stats_t mlp_stats; 365 366 mlxcx_module_status_t mlp_last_modstate; 367 mlxcx_module_error_type_t mlp_last_moderr; 368 } mlxcx_port_t; 369 370 typedef enum { 371 MLXCX_EQ_TYPE_ANY, 372 MLXCX_EQ_TYPE_RX, 373 MLXCX_EQ_TYPE_TX 374 } mlxcx_eventq_type_t; 375 376 typedef struct mlxcx_event_queue { 377 kmutex_t mleq_mtx; 378 mlxcx_t *mleq_mlx; 379 mlxcx_eventq_state_t mleq_state; 380 mlxcx_eventq_type_t mleq_type; 381 382 mlxcx_dma_buffer_t mleq_dma; 383 384 size_t mleq_entshift; 385 size_t mleq_nents; 386 mlxcx_eventq_ent_t *mleq_ent; 387 uint32_t mleq_cc; /* consumer counter */ 388 uint32_t mleq_cc_armed; 389 390 uint32_t mleq_events; 391 392 uint32_t mleq_badintrs; 393 394 /* Hardware eq number */ 395 uint_t mleq_num; 396 /* Index into the mlxcx_t's interrupts array */ 397 uint_t mleq_intr_index; 398 399 /* UAR region that has this EQ's doorbell in it */ 400 mlxcx_uar_t *mleq_uar; 401 402 /* Tree of CQn => mlxcx_completion_queue_t */ 403 avl_tree_t mleq_cqs; 404 405 uint32_t mleq_check_disarm_cc; 406 uint_t mleq_check_disarm_cnt; 407 } mlxcx_event_queue_t; 408 409 typedef enum { 410 MLXCX_TIS_CREATED = 1 << 0, 411 MLXCX_TIS_DESTROYED = 1 << 1, 412 } mlxcx_tis_state_t; 413 414 typedef struct mlxcx_tis { 415 mlxcx_tis_state_t mltis_state; 416 list_node_t mltis_entry; 417 uint_t mltis_num; 418 mlxcx_tdom_t *mltis_tdom; 419 } mlxcx_tis_t; 420 421 typedef enum { 422 MLXCX_BUFFER_INIT, 423 MLXCX_BUFFER_FREE, 424 MLXCX_BUFFER_ON_WQ, 425 MLXCX_BUFFER_ON_LOAN, 426 MLXCX_BUFFER_ON_CHAIN, 427 } mlxcx_buffer_state_t; 428 429 typedef enum { 430 MLXCX_SHARD_READY, 431 MLXCX_SHARD_DRAINING, 432 } mlxcx_shard_state_t; 433 434 typedef struct mlxcx_buf_shard { 435 mlxcx_shard_state_t mlbs_state; 436 list_node_t mlbs_entry; 437 kmutex_t mlbs_mtx; 438 list_t mlbs_busy; 439 list_t mlbs_free; 440 list_t mlbs_loaned; 441 kcondvar_t mlbs_free_nonempty; 442 } mlxcx_buf_shard_t; 443 444 typedef struct mlxcx_buffer { 445 mlxcx_buf_shard_t *mlb_shard; 446 list_node_t mlb_entry; 447 list_node_t mlb_cq_entry; 448 449 struct mlxcx_buffer *mlb_tx_head; /* head of tx chain */ 450 list_t mlb_tx_chain; 451 list_node_t mlb_tx_chain_entry; 452 453 boolean_t mlb_foreign; 454 size_t mlb_used; 455 mblk_t *mlb_tx_mp; 456 457 /* 458 * The number of work queue basic blocks this buf uses. 459 */ 460 uint_t mlb_wqebbs; 461 462 mlxcx_t *mlb_mlx; 463 mlxcx_buffer_state_t mlb_state; 464 uint_t mlb_wqe_index; 465 mlxcx_dma_buffer_t mlb_dma; 466 mblk_t *mlb_mp; 467 frtn_t mlb_frtn; 468 } mlxcx_buffer_t; 469 470 typedef enum { 471 MLXCX_CQ_ALLOC = 1 << 0, 472 MLXCX_CQ_CREATED = 1 << 1, 473 MLXCX_CQ_DESTROYED = 1 << 2, 474 MLXCX_CQ_EQAVL = 1 << 3, 475 MLXCX_CQ_BLOCKED_MAC = 1 << 4, 476 MLXCX_CQ_TEARDOWN = 1 << 5, 477 MLXCX_CQ_POLLING = 1 << 6, 478 MLXCX_CQ_ARMED = 1 << 7, 479 } mlxcx_completionq_state_t; 480 481 typedef struct mlxcx_work_queue mlxcx_work_queue_t; 482 483 typedef struct mlxcx_completion_queue { 484 kmutex_t mlcq_mtx; 485 mlxcx_t *mlcq_mlx; 486 mlxcx_completionq_state_t mlcq_state; 487 488 mlxcx_port_stats_t *mlcq_stats; 489 490 list_node_t mlcq_entry; 491 avl_node_t mlcq_eq_entry; 492 493 uint_t mlcq_num; 494 495 mlxcx_work_queue_t *mlcq_wq; 496 mlxcx_event_queue_t *mlcq_eq; 497 498 /* UAR region that has this CQ's UAR doorbell in it */ 499 mlxcx_uar_t *mlcq_uar; 500 501 mlxcx_dma_buffer_t mlcq_dma; 502 503 size_t mlcq_entshift; 504 size_t mlcq_nents; 505 mlxcx_completionq_ent_t *mlcq_ent; 506 uint32_t mlcq_cc; /* consumer counter */ 507 uint32_t mlcq_cc_armed; /* cc at last arm */ 508 uint32_t mlcq_ec; /* event counter */ 509 uint32_t mlcq_ec_armed; /* ec at last arm */ 510 511 mlxcx_dma_buffer_t mlcq_doorbell_dma; 512 mlxcx_completionq_doorbell_t *mlcq_doorbell; 513 514 uint64_t mlcq_bufcnt; 515 size_t mlcq_bufhwm; 516 size_t mlcq_buflwm; 517 list_t mlcq_buffers; 518 kmutex_t mlcq_bufbmtx; 519 list_t mlcq_buffers_b; 520 521 uint_t mlcq_check_disarm_cnt; 522 uint64_t mlcq_check_disarm_cc; 523 524 uint_t mlcq_cqemod_period_usec; 525 uint_t mlcq_cqemod_count; 526 527 mac_ring_handle_t mlcq_mac_hdl; 528 uint64_t mlcq_mac_gen; 529 530 boolean_t mlcq_fm_repd_qstate; 531 } mlxcx_completion_queue_t; 532 533 typedef enum { 534 MLXCX_WQ_ALLOC = 1 << 0, 535 MLXCX_WQ_CREATED = 1 << 1, 536 MLXCX_WQ_STARTED = 1 << 2, 537 MLXCX_WQ_DESTROYED = 1 << 3, 538 MLXCX_WQ_TEARDOWN = 1 << 4, 539 MLXCX_WQ_BUFFERS = 1 << 5, 540 MLXCX_WQ_REFILLING = 1 << 6, 541 MLXCX_WQ_BLOCKED_MAC = 1 << 7 542 } mlxcx_workq_state_t; 543 544 typedef enum { 545 MLXCX_WQ_TYPE_SENDQ = 1, 546 MLXCX_WQ_TYPE_RECVQ 547 } mlxcx_workq_type_t; 548 549 typedef struct mlxcx_ring_group mlxcx_ring_group_t; 550 551 struct mlxcx_work_queue { 552 kmutex_t mlwq_mtx; 553 mlxcx_t *mlwq_mlx; 554 mlxcx_workq_type_t mlwq_type; 555 mlxcx_workq_state_t mlwq_state; 556 557 list_node_t mlwq_entry; 558 list_node_t mlwq_group_entry; 559 560 mlxcx_ring_group_t *mlwq_group; 561 562 uint_t mlwq_num; 563 564 mlxcx_completion_queue_t *mlwq_cq; 565 mlxcx_pd_t *mlwq_pd; 566 567 /* Required for send queues */ 568 mlxcx_tis_t *mlwq_tis; 569 570 /* UAR region that has this WQ's blueflame buffers in it */ 571 mlxcx_uar_t *mlwq_uar; 572 573 mlxcx_dma_buffer_t mlwq_dma; 574 575 mlxcx_eth_inline_mode_t mlwq_inline_mode; 576 size_t mlwq_entshift; 577 size_t mlwq_nents; 578 /* Discriminate based on mwq_type */ 579 union { 580 mlxcx_sendq_ent_t *mlwq_send_ent; 581 mlxcx_sendq_extra_ent_t *mlwq_send_extra_ent; 582 mlxcx_recvq_ent_t *mlwq_recv_ent; 583 mlxcx_sendq_bf_t *mlwq_bf_ent; 584 }; 585 uint64_t mlwq_pc; /* producer counter */ 586 587 uint64_t mlwq_wqebb_used; 588 size_t mlwq_bufhwm; 589 size_t mlwq_buflwm; 590 591 mlxcx_dma_buffer_t mlwq_doorbell_dma; 592 mlxcx_workq_doorbell_t *mlwq_doorbell; 593 594 mlxcx_buf_shard_t *mlwq_bufs; 595 mlxcx_buf_shard_t *mlwq_foreign_bufs; 596 597 taskq_ent_t mlwq_tqe; 598 599 boolean_t mlwq_fm_repd_qstate; 600 }; 601 602 #define MLXCX_RQT_MAX_SIZE 64 603 604 typedef enum { 605 MLXCX_RQT_CREATED = 1 << 0, 606 MLXCX_RQT_DESTROYED = 1 << 1, 607 MLXCX_RQT_DIRTY = 1 << 2, 608 } mlxcx_rqtable_state_t; 609 610 typedef struct mlxcx_rqtable { 611 mlxcx_rqtable_state_t mlrqt_state; 612 list_node_t mlrqt_entry; 613 uint_t mlrqt_num; 614 615 size_t mlrqt_max; 616 size_t mlrqt_used; 617 618 size_t mlrqt_rq_size; 619 mlxcx_work_queue_t **mlrqt_rq; 620 } mlxcx_rqtable_t; 621 622 typedef enum { 623 MLXCX_TIR_CREATED = 1 << 0, 624 MLXCX_TIR_DESTROYED = 1 << 1, 625 } mlxcx_tir_state_t; 626 627 typedef struct mlxcx_tir { 628 mlxcx_tir_state_t mltir_state; 629 list_node_t mltir_entry; 630 uint_t mltir_num; 631 mlxcx_tdom_t *mltir_tdom; 632 mlxcx_tir_type_t mltir_type; 633 union { 634 mlxcx_rqtable_t *mltir_rqtable; 635 mlxcx_work_queue_t *mltir_rq; 636 }; 637 mlxcx_tir_hash_fn_t mltir_hash_fn; 638 uint8_t mltir_toeplitz_key[40]; 639 mlxcx_tir_rx_hash_l3_type_t mltir_l3_type; 640 mlxcx_tir_rx_hash_l4_type_t mltir_l4_type; 641 mlxcx_tir_rx_hash_fields_t mltir_hash_fields; 642 } mlxcx_tir_t; 643 644 typedef enum { 645 MLXCX_FLOW_GROUP_CREATED = 1 << 0, 646 MLXCX_FLOW_GROUP_BUSY = 1 << 1, 647 MLXCX_FLOW_GROUP_DESTROYED = 1 << 2, 648 } mlxcx_flow_group_state_t; 649 650 typedef enum { 651 MLXCX_FLOW_MATCH_SMAC = 1 << 0, 652 MLXCX_FLOW_MATCH_DMAC = 1 << 1, 653 MLXCX_FLOW_MATCH_VLAN = 1 << 2, 654 MLXCX_FLOW_MATCH_VID = 1 << 3, 655 MLXCX_FLOW_MATCH_IP_VER = 1 << 4, 656 MLXCX_FLOW_MATCH_SRCIP = 1 << 5, 657 MLXCX_FLOW_MATCH_DSTIP = 1 << 6, 658 MLXCX_FLOW_MATCH_IP_PROTO = 1 << 7, 659 MLXCX_FLOW_MATCH_SQN = 1 << 8, 660 MLXCX_FLOW_MATCH_VXLAN = 1 << 9, 661 } mlxcx_flow_mask_t; 662 663 struct mlxcx_flow_group { 664 list_node_t mlfg_entry; 665 list_node_t mlfg_role_entry; 666 mlxcx_flow_group_state_t mlfg_state; 667 mlxcx_flow_table_t *mlfg_table; 668 uint_t mlfg_num; 669 size_t mlfg_start_idx; 670 size_t mlfg_size; 671 size_t mlfg_avail; 672 list_t mlfg_entries; 673 mlxcx_flow_mask_t mlfg_mask; 674 }; 675 676 typedef enum { 677 MLXCX_FLOW_ENTRY_RESERVED = 1 << 0, 678 MLXCX_FLOW_ENTRY_CREATED = 1 << 1, 679 MLXCX_FLOW_ENTRY_DELETED = 1 << 2, 680 MLXCX_FLOW_ENTRY_DIRTY = 1 << 3, 681 } mlxcx_flow_entry_state_t; 682 683 typedef struct { 684 mlxcx_tir_t *mlfed_tir; 685 mlxcx_flow_table_t *mlfed_flow; 686 } mlxcx_flow_entry_dest_t; 687 688 typedef struct mlxcx_flow_entry { 689 list_node_t mlfe_group_entry; 690 avl_node_t mlfe_dmac_entry; 691 mlxcx_flow_entry_state_t mlfe_state; 692 mlxcx_flow_table_t *mlfe_table; 693 mlxcx_flow_group_t *mlfe_group; 694 uint_t mlfe_index; 695 696 mlxcx_flow_action_t mlfe_action; 697 698 /* Criteria for match */ 699 uint8_t mlfe_smac[ETHERADDRL]; 700 uint8_t mlfe_dmac[ETHERADDRL]; 701 702 mlxcx_vlan_type_t mlfe_vlan_type; 703 uint16_t mlfe_vid; 704 705 uint_t mlfe_ip_version; 706 uint8_t mlfe_srcip[IPV6_ADDR_LEN]; 707 uint8_t mlfe_dstip[IPV6_ADDR_LEN]; 708 709 uint_t mlfe_ip_proto; 710 uint16_t mlfe_sport; 711 uint16_t mlfe_dport; 712 713 uint32_t mlfe_sqn; 714 uint32_t mlfe_vxlan_vni; 715 716 /* Destinations */ 717 size_t mlfe_ndest; 718 mlxcx_flow_entry_dest_t mlfe_dest[MLXCX_FLOW_MAX_DESTINATIONS]; 719 720 /* 721 * mlxcx_group_mac_ts joining this entry to N ring groups 722 * only used by FEs on the root rx flow table 723 */ 724 list_t mlfe_ring_groups; 725 } mlxcx_flow_entry_t; 726 727 typedef enum { 728 MLXCX_FLOW_TABLE_CREATED = 1 << 0, 729 MLXCX_FLOW_TABLE_DESTROYED = 1 << 1, 730 MLXCX_FLOW_TABLE_ROOT = 1 << 2 731 } mlxcx_flow_table_state_t; 732 733 struct mlxcx_flow_table { 734 kmutex_t mlft_mtx; 735 mlxcx_flow_table_state_t mlft_state; 736 uint_t mlft_level; 737 uint_t mlft_num; 738 mlxcx_flow_table_type_t mlft_type; 739 740 mlxcx_port_t *mlft_port; 741 742 size_t mlft_entshift; 743 size_t mlft_nents; 744 745 size_t mlft_entsize; 746 mlxcx_flow_entry_t *mlft_ent; 747 748 /* First entry not yet claimed by a group */ 749 size_t mlft_next_ent; 750 751 list_t mlft_groups; 752 }; 753 754 typedef enum { 755 MLXCX_GROUP_RX, 756 MLXCX_GROUP_TX 757 } mlxcx_group_type_t; 758 759 typedef enum { 760 MLXCX_GROUP_INIT = 1 << 0, 761 MLXCX_GROUP_WQS = 1 << 1, 762 MLXCX_GROUP_TIRTIS = 1 << 2, 763 MLXCX_GROUP_FLOWS = 1 << 3, 764 MLXCX_GROUP_RUNNING = 1 << 4, 765 MLXCX_GROUP_RQT = 1 << 5, 766 } mlxcx_group_state_t; 767 768 #define MLXCX_RX_HASH_FT_SIZE_SHIFT 4 769 770 typedef enum { 771 MLXCX_TIR_ROLE_IPv4 = 0, 772 MLXCX_TIR_ROLE_IPv6, 773 MLXCX_TIR_ROLE_TCPv4, 774 MLXCX_TIR_ROLE_TCPv6, 775 MLXCX_TIR_ROLE_UDPv4, 776 MLXCX_TIR_ROLE_UDPv6, 777 MLXCX_TIR_ROLE_OTHER, 778 779 MLXCX_TIRS_PER_GROUP 780 } mlxcx_tir_role_t; 781 782 typedef struct { 783 avl_node_t mlgm_group_entry; 784 list_node_t mlgm_fe_entry; 785 mlxcx_ring_group_t *mlgm_group; 786 uint8_t mlgm_mac[6]; 787 mlxcx_flow_entry_t *mlgm_fe; 788 } mlxcx_group_mac_t; 789 790 typedef struct { 791 list_node_t mlgv_entry; 792 boolean_t mlgv_tagged; 793 uint16_t mlgv_vid; 794 mlxcx_flow_entry_t *mlgv_fe; 795 } mlxcx_group_vlan_t; 796 797 struct mlxcx_ring_group { 798 kmutex_t mlg_mtx; 799 mlxcx_t *mlg_mlx; 800 mlxcx_group_state_t mlg_state; 801 mlxcx_group_type_t mlg_type; 802 803 mac_group_handle_t mlg_mac_hdl; 804 805 union { 806 mlxcx_tis_t mlg_tis; 807 mlxcx_tir_t mlg_tir[MLXCX_TIRS_PER_GROUP]; 808 }; 809 mlxcx_port_t *mlg_port; 810 811 size_t mlg_nwqs; 812 size_t mlg_wqs_size; 813 mlxcx_work_queue_t *mlg_wqs; 814 815 mlxcx_rqtable_t *mlg_rqt; 816 817 /* 818 * Flow table for matching VLAN IDs 819 */ 820 mlxcx_flow_table_t *mlg_rx_vlan_ft; 821 mlxcx_flow_group_t *mlg_rx_vlan_fg; 822 mlxcx_flow_group_t *mlg_rx_vlan_def_fg; 823 mlxcx_flow_group_t *mlg_rx_vlan_promisc_fg; 824 list_t mlg_rx_vlans; 825 826 taskq_t *mlg_refill_tq; 827 828 /* 829 * Flow table for separating out by protocol before hashing 830 */ 831 mlxcx_flow_table_t *mlg_rx_hash_ft; 832 833 /* 834 * Links to flow entries on the root flow table which are pointing to 835 * our rx_vlan_ft. 836 */ 837 avl_tree_t mlg_rx_macs; 838 }; 839 840 typedef enum mlxcx_cmd_state { 841 MLXCX_CMD_S_DONE = 1 << 0, 842 MLXCX_CMD_S_ERROR = 1 << 1 843 } mlxcx_cmd_state_t; 844 845 typedef struct mlxcx_cmd { 846 struct mlxcx *mlcmd_mlxp; 847 kmutex_t mlcmd_lock; 848 kcondvar_t mlcmd_cv; 849 850 uint8_t mlcmd_token; 851 mlxcx_cmd_op_t mlcmd_op; 852 853 /* 854 * Command data and extended mailboxes for responses. 855 */ 856 const void *mlcmd_in; 857 uint32_t mlcmd_inlen; 858 void *mlcmd_out; 859 uint32_t mlcmd_outlen; 860 list_t mlcmd_mbox_in; 861 uint8_t mlcmd_nboxes_in; 862 list_t mlcmd_mbox_out; 863 uint8_t mlcmd_nboxes_out; 864 /* 865 * Status information. 866 */ 867 mlxcx_cmd_state_t mlcmd_state; 868 uint8_t mlcmd_status; 869 } mlxcx_cmd_t; 870 871 /* 872 * Our view of capabilities. 873 */ 874 typedef struct mlxcx_hca_cap { 875 mlxcx_hca_cap_mode_t mhc_mode; 876 mlxcx_hca_cap_type_t mhc_type; 877 union { 878 uint8_t mhc_bulk[MLXCX_HCA_CAP_SIZE]; 879 mlxcx_hca_cap_general_caps_t mhc_general; 880 mlxcx_hca_cap_eth_caps_t mhc_eth; 881 mlxcx_hca_cap_flow_caps_t mhc_flow; 882 }; 883 } mlxcx_hca_cap_t; 884 885 typedef struct { 886 /* Cooked values */ 887 boolean_t mlc_checksum; 888 boolean_t mlc_lso; 889 boolean_t mlc_vxlan; 890 size_t mlc_max_lso_size; 891 size_t mlc_max_rqt_size; 892 893 size_t mlc_max_rx_ft_shift; 894 size_t mlc_max_rx_fe_dest; 895 size_t mlc_max_rx_flows; 896 897 size_t mlc_max_tir; 898 899 /* Raw caps data */ 900 mlxcx_hca_cap_t mlc_hca_cur; 901 mlxcx_hca_cap_t mlc_hca_max; 902 mlxcx_hca_cap_t mlc_ether_cur; 903 mlxcx_hca_cap_t mlc_ether_max; 904 mlxcx_hca_cap_t mlc_nic_flow_cur; 905 mlxcx_hca_cap_t mlc_nic_flow_max; 906 } mlxcx_caps_t; 907 908 typedef struct { 909 uint_t mldp_eq_size_shift; 910 uint_t mldp_cq_size_shift; 911 uint_t mldp_cq_size_shift_default; 912 uint_t mldp_rq_size_shift; 913 uint_t mldp_rq_size_shift_default; 914 uint_t mldp_sq_size_shift; 915 uint_t mldp_sq_size_shift_default; 916 uint_t mldp_cqemod_period_usec; 917 uint_t mldp_cqemod_count; 918 uint_t mldp_intrmod_period_usec; 919 uint_t mldp_rx_ngroups_large; 920 uint_t mldp_rx_ngroups_small; 921 uint_t mldp_rx_nrings_per_large_group; 922 uint_t mldp_rx_nrings_per_small_group; 923 uint_t mldp_rx_per_cq; 924 uint_t mldp_tx_ngroups; 925 uint_t mldp_tx_nrings_per_group; 926 uint_t mldp_ftbl_root_size_shift; 927 size_t mldp_tx_bind_threshold; 928 uint_t mldp_ftbl_vlan_size_shift; 929 uint64_t mldp_eq_check_interval_sec; 930 uint64_t mldp_cq_check_interval_sec; 931 uint64_t mldp_wq_check_interval_sec; 932 } mlxcx_drv_props_t; 933 934 typedef enum { 935 MLXCX_ATTACH_FM = 1 << 0, 936 MLXCX_ATTACH_PCI_CONFIG = 1 << 1, 937 MLXCX_ATTACH_REGS = 1 << 2, 938 MLXCX_ATTACH_CMD = 1 << 3, 939 MLXCX_ATTACH_ENABLE_HCA = 1 << 4, 940 MLXCX_ATTACH_PAGE_LIST = 1 << 5, 941 MLXCX_ATTACH_INIT_HCA = 1 << 6, 942 MLXCX_ATTACH_UAR_PD_TD = 1 << 7, 943 MLXCX_ATTACH_INTRS = 1 << 8, 944 MLXCX_ATTACH_PORTS = 1 << 9, 945 MLXCX_ATTACH_MAC_HDL = 1 << 10, 946 MLXCX_ATTACH_CQS = 1 << 11, 947 MLXCX_ATTACH_WQS = 1 << 12, 948 MLXCX_ATTACH_GROUPS = 1 << 13, 949 MLXCX_ATTACH_BUFS = 1 << 14, 950 MLXCX_ATTACH_CAPS = 1 << 15, 951 MLXCX_ATTACH_CHKTIMERS = 1 << 16, 952 } mlxcx_attach_progress_t; 953 954 struct mlxcx { 955 /* entry on the mlxcx_glist */ 956 list_node_t mlx_gentry; 957 958 dev_info_t *mlx_dip; 959 int mlx_inst; 960 mlxcx_attach_progress_t mlx_attach; 961 962 mlxcx_drv_props_t mlx_props; 963 964 /* 965 * Misc. data 966 */ 967 uint16_t mlx_fw_maj; 968 uint16_t mlx_fw_min; 969 uint16_t mlx_fw_rev; 970 uint16_t mlx_cmd_rev; 971 972 /* 973 * Various capabilities of hardware. 974 */ 975 mlxcx_caps_t *mlx_caps; 976 977 uint_t mlx_max_sdu; 978 uint_t mlx_sdu; 979 980 /* 981 * FM State 982 */ 983 int mlx_fm_caps; 984 985 /* 986 * PCI Data 987 */ 988 ddi_acc_handle_t mlx_cfg_handle; 989 ddi_acc_handle_t mlx_regs_handle; 990 caddr_t mlx_regs_base; 991 992 /* 993 * MAC handle 994 */ 995 mac_handle_t mlx_mac_hdl; 996 997 /* 998 * Main command queue for issuing general FW control commands. 999 */ 1000 mlxcx_cmd_queue_t mlx_cmd; 1001 1002 /* 1003 * Interrupts 1004 */ 1005 uint_t mlx_intr_pri; 1006 uint_t mlx_intr_type; /* always MSI-X */ 1007 int mlx_intr_count; 1008 size_t mlx_intr_size; /* allocation size */ 1009 ddi_intr_handle_t *mlx_intr_handles; 1010 1011 /* 1012 * Basic firmware resources which we use for a variety of things. 1013 * The UAR is a reference to a page where CQ and EQ doorbells are 1014 * located. It also holds all the BlueFlame stuff (which we don't 1015 * use). 1016 */ 1017 mlxcx_uar_t mlx_uar; 1018 /* 1019 * The PD (Protection Domain) and TDOM (Transport Domain) are opaque 1020 * entities to us (they're Infiniband constructs we don't actually care 1021 * about) -- we just allocate them and shove their ID numbers in 1022 * whenever we're asked for one. 1023 * 1024 * The "reserved" LKEY is what we should put in queue entries that 1025 * have references to memory to indicate that they're using linear 1026 * addresses (comes from the QUERY_SPECIAL_CONTEXTS cmd). 1027 */ 1028 mlxcx_pd_t mlx_pd; 1029 mlxcx_tdom_t mlx_tdom; 1030 uint_t mlx_rsvd_lkey; 1031 1032 /* 1033 * Our event queues. These are 1:1 with interrupts. 1034 */ 1035 size_t mlx_eqs_size; /* allocation size */ 1036 mlxcx_event_queue_t *mlx_eqs; 1037 1038 /* 1039 * Page list. These represent the set of 4k pages we've given to 1040 * hardware. 1041 * 1042 * We can add to this list at the request of hardware from interrupt 1043 * context (the PAGE_REQUEST event), so it's protected by pagemtx. 1044 */ 1045 kmutex_t mlx_pagemtx; 1046 uint_t mlx_npages; 1047 avl_tree_t mlx_pages; 1048 1049 /* 1050 * Port state 1051 */ 1052 uint_t mlx_nports; 1053 size_t mlx_ports_size; 1054 mlxcx_port_t *mlx_ports; 1055 1056 /* 1057 * Completion queues (CQs). These are also indexed off the 1058 * event_queue_ts that they each report to. 1059 */ 1060 list_t mlx_cqs; 1061 1062 uint_t mlx_next_eq; 1063 1064 /* 1065 * Work queues (WQs). 1066 */ 1067 list_t mlx_wqs; 1068 1069 /* 1070 * Ring groups 1071 */ 1072 size_t mlx_rx_ngroups; 1073 size_t mlx_rx_groups_size; 1074 mlxcx_ring_group_t *mlx_rx_groups; 1075 1076 size_t mlx_tx_ngroups; 1077 size_t mlx_tx_groups_size; 1078 mlxcx_ring_group_t *mlx_tx_groups; 1079 1080 kmem_cache_t *mlx_bufs_cache; 1081 list_t mlx_buf_shards; 1082 1083 ddi_periodic_t mlx_eq_checktimer; 1084 ddi_periodic_t mlx_cq_checktimer; 1085 ddi_periodic_t mlx_wq_checktimer; 1086 }; 1087 1088 /* 1089 * Register access 1090 */ 1091 extern uint16_t mlxcx_get16(mlxcx_t *, uintptr_t); 1092 extern uint32_t mlxcx_get32(mlxcx_t *, uintptr_t); 1093 extern uint64_t mlxcx_get64(mlxcx_t *, uintptr_t); 1094 1095 extern void mlxcx_put32(mlxcx_t *, uintptr_t, uint32_t); 1096 extern void mlxcx_put64(mlxcx_t *, uintptr_t, uint64_t); 1097 1098 extern void mlxcx_uar_put32(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint32_t); 1099 extern void mlxcx_uar_put64(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint64_t); 1100 1101 /* 1102 * Logging functions. 1103 */ 1104 extern void mlxcx_warn(mlxcx_t *, const char *, ...); 1105 extern void mlxcx_note(mlxcx_t *, const char *, ...); 1106 extern void mlxcx_panic(mlxcx_t *, const char *, ...); 1107 1108 extern void mlxcx_fm_ereport(mlxcx_t *, const char *); 1109 1110 extern void mlxcx_check_sq(mlxcx_t *, mlxcx_work_queue_t *); 1111 extern void mlxcx_check_rq(mlxcx_t *, mlxcx_work_queue_t *); 1112 1113 /* 1114 * DMA Functions 1115 */ 1116 extern void mlxcx_dma_free(mlxcx_dma_buffer_t *); 1117 extern boolean_t mlxcx_dma_alloc(mlxcx_t *, mlxcx_dma_buffer_t *, 1118 ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t, size_t, boolean_t); 1119 extern boolean_t mlxcx_dma_init(mlxcx_t *, mlxcx_dma_buffer_t *, 1120 ddi_dma_attr_t *, boolean_t); 1121 extern boolean_t mlxcx_dma_bind_mblk(mlxcx_t *, mlxcx_dma_buffer_t *, 1122 const mblk_t *, size_t, boolean_t); 1123 extern boolean_t mlxcx_dma_alloc_offset(mlxcx_t *, mlxcx_dma_buffer_t *, 1124 ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t, 1125 size_t, size_t, boolean_t); 1126 extern void mlxcx_dma_unbind(mlxcx_t *, mlxcx_dma_buffer_t *); 1127 extern void mlxcx_dma_acc_attr(mlxcx_t *, ddi_device_acc_attr_t *); 1128 extern void mlxcx_dma_page_attr(mlxcx_t *, ddi_dma_attr_t *); 1129 extern void mlxcx_dma_queue_attr(mlxcx_t *, ddi_dma_attr_t *); 1130 extern void mlxcx_dma_qdbell_attr(mlxcx_t *, ddi_dma_attr_t *); 1131 extern void mlxcx_dma_buf_attr(mlxcx_t *, ddi_dma_attr_t *); 1132 1133 extern boolean_t mlxcx_give_pages(mlxcx_t *, int32_t); 1134 1135 static inline const ddi_dma_cookie_t * 1136 mlxcx_dma_cookie_iter(const mlxcx_dma_buffer_t *db, 1137 const ddi_dma_cookie_t *prev) 1138 { 1139 ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND); 1140 return (ddi_dma_cookie_iter(db->mxdb_dma_handle, prev)); 1141 } 1142 1143 static inline const ddi_dma_cookie_t * 1144 mlxcx_dma_cookie_one(const mlxcx_dma_buffer_t *db) 1145 { 1146 ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND); 1147 return (ddi_dma_cookie_one(db->mxdb_dma_handle)); 1148 } 1149 1150 /* 1151 * From mlxcx_intr.c 1152 */ 1153 extern boolean_t mlxcx_intr_setup(mlxcx_t *); 1154 extern void mlxcx_intr_teardown(mlxcx_t *); 1155 extern void mlxcx_arm_eq(mlxcx_t *, mlxcx_event_queue_t *); 1156 extern void mlxcx_arm_cq(mlxcx_t *, mlxcx_completion_queue_t *); 1157 extern void mlxcx_update_cqci(mlxcx_t *, mlxcx_completion_queue_t *); 1158 1159 extern mblk_t *mlxcx_rx_poll(mlxcx_t *, mlxcx_completion_queue_t *, size_t); 1160 1161 /* 1162 * From mlxcx_gld.c 1163 */ 1164 extern boolean_t mlxcx_register_mac(mlxcx_t *); 1165 1166 /* 1167 * From mlxcx_ring.c 1168 */ 1169 extern boolean_t mlxcx_wq_alloc_dma(mlxcx_t *, mlxcx_work_queue_t *); 1170 extern void mlxcx_wq_rele_dma(mlxcx_t *, mlxcx_work_queue_t *); 1171 1172 extern boolean_t mlxcx_buf_create(mlxcx_t *, mlxcx_buf_shard_t *, 1173 mlxcx_buffer_t **); 1174 extern boolean_t mlxcx_buf_create_foreign(mlxcx_t *, mlxcx_buf_shard_t *, 1175 mlxcx_buffer_t **); 1176 extern mlxcx_buffer_t *mlxcx_buf_take(mlxcx_t *, mlxcx_work_queue_t *); 1177 extern size_t mlxcx_buf_take_n(mlxcx_t *, mlxcx_work_queue_t *, 1178 mlxcx_buffer_t **, size_t); 1179 extern boolean_t mlxcx_buf_loan(mlxcx_t *, mlxcx_buffer_t *); 1180 extern void mlxcx_buf_return(mlxcx_t *, mlxcx_buffer_t *); 1181 extern void mlxcx_buf_return_chain(mlxcx_t *, mlxcx_buffer_t *, boolean_t); 1182 extern void mlxcx_buf_destroy(mlxcx_t *, mlxcx_buffer_t *); 1183 extern void mlxcx_shard_ready(mlxcx_buf_shard_t *); 1184 extern void mlxcx_shard_draining(mlxcx_buf_shard_t *); 1185 1186 extern uint_t mlxcx_buf_bind_or_copy(mlxcx_t *, mlxcx_work_queue_t *, 1187 mblk_t *, size_t, mlxcx_buffer_t **); 1188 1189 extern boolean_t mlxcx_rx_group_setup(mlxcx_t *, mlxcx_ring_group_t *); 1190 extern boolean_t mlxcx_tx_group_setup(mlxcx_t *, mlxcx_ring_group_t *); 1191 1192 extern boolean_t mlxcx_rx_group_start(mlxcx_t *, mlxcx_ring_group_t *); 1193 extern boolean_t mlxcx_tx_ring_start(mlxcx_t *, mlxcx_ring_group_t *, 1194 mlxcx_work_queue_t *); 1195 extern boolean_t mlxcx_rx_ring_start(mlxcx_t *, mlxcx_ring_group_t *, 1196 mlxcx_work_queue_t *); 1197 1198 extern boolean_t mlxcx_rq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *, 1199 mlxcx_buffer_t *); 1200 extern boolean_t mlxcx_rq_add_buffers(mlxcx_t *, mlxcx_work_queue_t *, 1201 mlxcx_buffer_t **, size_t); 1202 extern boolean_t mlxcx_sq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *, 1203 uint8_t *, size_t, uint32_t, mlxcx_buffer_t *); 1204 extern boolean_t mlxcx_sq_add_nop(mlxcx_t *, mlxcx_work_queue_t *); 1205 extern void mlxcx_rq_refill(mlxcx_t *, mlxcx_work_queue_t *); 1206 1207 extern void mlxcx_teardown_groups(mlxcx_t *); 1208 extern void mlxcx_wq_teardown(mlxcx_t *, mlxcx_work_queue_t *); 1209 extern void mlxcx_cq_teardown(mlxcx_t *, mlxcx_completion_queue_t *); 1210 extern void mlxcx_teardown_rx_group(mlxcx_t *, mlxcx_ring_group_t *); 1211 extern void mlxcx_teardown_tx_group(mlxcx_t *, mlxcx_ring_group_t *); 1212 1213 extern void mlxcx_tx_completion(mlxcx_t *, mlxcx_completion_queue_t *, 1214 mlxcx_completionq_ent_t *, mlxcx_buffer_t *); 1215 extern mblk_t *mlxcx_rx_completion(mlxcx_t *, mlxcx_completion_queue_t *, 1216 mlxcx_completionq_ent_t *, mlxcx_buffer_t *); 1217 1218 extern mlxcx_buf_shard_t *mlxcx_mlbs_create(mlxcx_t *); 1219 1220 /* 1221 * Flow mgmt 1222 */ 1223 extern boolean_t mlxcx_add_umcast_entry(mlxcx_t *, mlxcx_port_t *, 1224 mlxcx_ring_group_t *, const uint8_t *); 1225 extern boolean_t mlxcx_remove_umcast_entry(mlxcx_t *, mlxcx_port_t *, 1226 mlxcx_ring_group_t *, const uint8_t *); 1227 extern void mlxcx_remove_all_umcast_entries(mlxcx_t *, mlxcx_port_t *, 1228 mlxcx_ring_group_t *); 1229 extern boolean_t mlxcx_setup_flow_group(mlxcx_t *, mlxcx_flow_table_t *, 1230 mlxcx_flow_group_t *); 1231 extern void mlxcx_teardown_flow_table(mlxcx_t *, mlxcx_flow_table_t *); 1232 1233 extern void mlxcx_remove_all_vlan_entries(mlxcx_t *, mlxcx_ring_group_t *); 1234 extern boolean_t mlxcx_remove_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *, 1235 boolean_t, uint16_t); 1236 extern boolean_t mlxcx_add_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *, 1237 boolean_t, uint16_t); 1238 1239 /* 1240 * Command functions 1241 */ 1242 extern boolean_t mlxcx_cmd_queue_init(mlxcx_t *); 1243 extern void mlxcx_cmd_queue_fini(mlxcx_t *); 1244 1245 extern boolean_t mlxcx_cmd_enable_hca(mlxcx_t *); 1246 extern boolean_t mlxcx_cmd_disable_hca(mlxcx_t *); 1247 1248 extern boolean_t mlxcx_cmd_query_issi(mlxcx_t *, uint_t *); 1249 extern boolean_t mlxcx_cmd_set_issi(mlxcx_t *, uint16_t); 1250 1251 extern boolean_t mlxcx_cmd_query_pages(mlxcx_t *, uint_t, int32_t *); 1252 extern boolean_t mlxcx_cmd_give_pages(mlxcx_t *, uint_t, int32_t, 1253 mlxcx_dev_page_t **); 1254 extern boolean_t mlxcx_cmd_return_pages(mlxcx_t *, int32_t, uint64_t *, 1255 int32_t *); 1256 1257 extern boolean_t mlxcx_cmd_query_hca_cap(mlxcx_t *, mlxcx_hca_cap_type_t, 1258 mlxcx_hca_cap_mode_t, mlxcx_hca_cap_t *); 1259 1260 extern boolean_t mlxcx_cmd_set_driver_version(mlxcx_t *, const char *); 1261 1262 extern boolean_t mlxcx_cmd_init_hca(mlxcx_t *); 1263 extern boolean_t mlxcx_cmd_teardown_hca(mlxcx_t *); 1264 1265 extern boolean_t mlxcx_cmd_alloc_uar(mlxcx_t *, mlxcx_uar_t *); 1266 extern boolean_t mlxcx_cmd_dealloc_uar(mlxcx_t *, mlxcx_uar_t *); 1267 1268 extern boolean_t mlxcx_cmd_alloc_pd(mlxcx_t *, mlxcx_pd_t *); 1269 extern boolean_t mlxcx_cmd_dealloc_pd(mlxcx_t *, mlxcx_pd_t *); 1270 1271 extern boolean_t mlxcx_cmd_alloc_tdom(mlxcx_t *, mlxcx_tdom_t *); 1272 extern boolean_t mlxcx_cmd_dealloc_tdom(mlxcx_t *, mlxcx_tdom_t *); 1273 1274 extern boolean_t mlxcx_cmd_create_eq(mlxcx_t *, mlxcx_event_queue_t *); 1275 extern boolean_t mlxcx_cmd_destroy_eq(mlxcx_t *, mlxcx_event_queue_t *); 1276 extern boolean_t mlxcx_cmd_query_eq(mlxcx_t *, mlxcx_event_queue_t *, 1277 mlxcx_eventq_ctx_t *); 1278 1279 extern boolean_t mlxcx_cmd_create_cq(mlxcx_t *, mlxcx_completion_queue_t *); 1280 extern boolean_t mlxcx_cmd_destroy_cq(mlxcx_t *, mlxcx_completion_queue_t *); 1281 extern boolean_t mlxcx_cmd_query_cq(mlxcx_t *, mlxcx_completion_queue_t *, 1282 mlxcx_completionq_ctx_t *); 1283 1284 extern boolean_t mlxcx_cmd_create_rq(mlxcx_t *, mlxcx_work_queue_t *); 1285 extern boolean_t mlxcx_cmd_start_rq(mlxcx_t *, mlxcx_work_queue_t *); 1286 extern boolean_t mlxcx_cmd_stop_rq(mlxcx_t *, mlxcx_work_queue_t *); 1287 extern boolean_t mlxcx_cmd_destroy_rq(mlxcx_t *, mlxcx_work_queue_t *); 1288 extern boolean_t mlxcx_cmd_query_rq(mlxcx_t *, mlxcx_work_queue_t *, 1289 mlxcx_rq_ctx_t *); 1290 1291 extern boolean_t mlxcx_cmd_create_tir(mlxcx_t *, mlxcx_tir_t *); 1292 extern boolean_t mlxcx_cmd_destroy_tir(mlxcx_t *, mlxcx_tir_t *); 1293 1294 extern boolean_t mlxcx_cmd_create_sq(mlxcx_t *, mlxcx_work_queue_t *); 1295 extern boolean_t mlxcx_cmd_start_sq(mlxcx_t *, mlxcx_work_queue_t *); 1296 extern boolean_t mlxcx_cmd_stop_sq(mlxcx_t *, mlxcx_work_queue_t *); 1297 extern boolean_t mlxcx_cmd_destroy_sq(mlxcx_t *, mlxcx_work_queue_t *); 1298 extern boolean_t mlxcx_cmd_query_sq(mlxcx_t *, mlxcx_work_queue_t *, 1299 mlxcx_sq_ctx_t *); 1300 1301 extern boolean_t mlxcx_cmd_create_tis(mlxcx_t *, mlxcx_tis_t *); 1302 extern boolean_t mlxcx_cmd_destroy_tis(mlxcx_t *, mlxcx_tis_t *); 1303 1304 extern boolean_t mlxcx_cmd_query_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *); 1305 extern boolean_t mlxcx_cmd_query_special_ctxs(mlxcx_t *); 1306 1307 extern boolean_t mlxcx_cmd_modify_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *, 1308 mlxcx_modify_nic_vport_ctx_fields_t); 1309 1310 extern boolean_t mlxcx_cmd_create_flow_table(mlxcx_t *, mlxcx_flow_table_t *); 1311 extern boolean_t mlxcx_cmd_destroy_flow_table(mlxcx_t *, mlxcx_flow_table_t *); 1312 extern boolean_t mlxcx_cmd_set_flow_table_root(mlxcx_t *, mlxcx_flow_table_t *); 1313 1314 extern boolean_t mlxcx_cmd_create_flow_group(mlxcx_t *, mlxcx_flow_group_t *); 1315 extern boolean_t mlxcx_cmd_set_flow_table_entry(mlxcx_t *, 1316 mlxcx_flow_entry_t *); 1317 extern boolean_t mlxcx_cmd_delete_flow_table_entry(mlxcx_t *, 1318 mlxcx_flow_entry_t *); 1319 extern boolean_t mlxcx_cmd_destroy_flow_group(mlxcx_t *, mlxcx_flow_group_t *); 1320 1321 extern boolean_t mlxcx_cmd_access_register(mlxcx_t *, mlxcx_cmd_reg_opmod_t, 1322 mlxcx_register_id_t, mlxcx_register_data_t *); 1323 extern boolean_t mlxcx_cmd_query_port_mtu(mlxcx_t *, mlxcx_port_t *); 1324 extern boolean_t mlxcx_cmd_query_port_status(mlxcx_t *, mlxcx_port_t *); 1325 extern boolean_t mlxcx_cmd_modify_port_status(mlxcx_t *, mlxcx_port_t *, 1326 mlxcx_port_status_t); 1327 extern boolean_t mlxcx_cmd_query_port_speed(mlxcx_t *, mlxcx_port_t *); 1328 extern boolean_t mlxcx_cmd_query_port_fec(mlxcx_t *, mlxcx_port_t *); 1329 extern boolean_t mlxcx_cmd_modify_port_fec(mlxcx_t *, mlxcx_port_t *, 1330 mlxcx_pplm_fec_caps_t); 1331 1332 extern boolean_t mlxcx_cmd_set_port_mtu(mlxcx_t *, mlxcx_port_t *); 1333 1334 extern boolean_t mlxcx_cmd_create_rqt(mlxcx_t *, mlxcx_rqtable_t *); 1335 extern boolean_t mlxcx_cmd_destroy_rqt(mlxcx_t *, mlxcx_rqtable_t *); 1336 1337 extern boolean_t mlxcx_cmd_set_int_mod(mlxcx_t *, uint_t, uint_t); 1338 1339 extern boolean_t mlxcx_cmd_query_module_status(mlxcx_t *, uint_t, 1340 mlxcx_module_status_t *, mlxcx_module_error_type_t *); 1341 extern boolean_t mlxcx_cmd_set_port_led(mlxcx_t *, mlxcx_port_t *, uint16_t); 1342 1343 /* Comparator for avl_ts */ 1344 extern int mlxcx_cq_compare(const void *, const void *); 1345 extern int mlxcx_dmac_fe_compare(const void *, const void *); 1346 extern int mlxcx_grmac_compare(const void *, const void *); 1347 extern int mlxcx_page_compare(const void *, const void *); 1348 1349 extern void mlxcx_update_link_state(mlxcx_t *, mlxcx_port_t *); 1350 1351 extern void mlxcx_eth_proto_to_string(mlxcx_eth_proto_t, char *, size_t); 1352 extern const char *mlxcx_port_status_string(mlxcx_port_status_t); 1353 1354 extern const char *mlxcx_event_name(mlxcx_event_t); 1355 1356 #ifdef __cplusplus 1357 } 1358 #endif 1359 1360 #endif /* _MLXCX_H */ 1361