1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2021, The University of Queensland 14 * Copyright (c) 2018, Joyent, Inc. 15 * Copyright 2020 RackTop Systems, Inc. 16 */ 17 18 /* 19 * Mellanox Connect-X 4/5/6 driver. 20 * 21 * More details in mlxcx.c 22 */ 23 24 #ifndef _MLXCX_H 25 #define _MLXCX_H 26 27 /* 28 * mlxcx(4D) defintions 29 */ 30 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/ddifm.h> 34 #include <sys/id_space.h> 35 #include <sys/list.h> 36 #include <sys/taskq_impl.h> 37 #include <sys/stddef.h> 38 #include <sys/stream.h> 39 #include <sys/strsun.h> 40 #include <sys/mac_provider.h> 41 #include <sys/mac_ether.h> 42 #include <sys/cpuvar.h> 43 #include <sys/ethernet.h> 44 45 #include <inet/ip.h> 46 #include <inet/ip6.h> 47 48 #include <sys/ddifm.h> 49 #include <sys/fm/protocol.h> 50 #include <sys/fm/util.h> 51 #include <sys/fm/io/ddi.h> 52 53 #include <mlxcx_reg.h> 54 55 #ifdef __cplusplus 56 extern "C" { 57 #endif 58 59 /* 60 * Get access to the first PCI BAR. 61 */ 62 #define MLXCX_REG_NUMBER 1 63 64 /* 65 * The command queue is supposed to be a page, which is 4k. 66 */ 67 #define MLXCX_CMD_DMA_PAGE_SIZE 4096 68 69 /* 70 * Queues can allocate in units of this much memory. 71 */ 72 #define MLXCX_QUEUE_DMA_PAGE_SIZE 4096 73 74 /* 75 * We advertise two sizes of groups to MAC -- a certain number of "large" 76 * groups (including the default group, which is sized to at least ncpus) 77 * followed by a certain number of "small" groups. 78 * 79 * This allows us to have a larger amount of classification resources available 80 * for zones/VMs without resorting to software classification. 81 */ 82 #define MLXCX_RX_NGROUPS_LARGE_DFLT 2 83 #define MLXCX_RX_NRINGS_PER_LARGE_GROUP_DFLT 16 84 #define MLXCX_RX_NGROUPS_SMALL_DFLT 256 85 #define MLXCX_RX_NRINGS_PER_SMALL_GROUP_DFLT 4 86 87 #define MLXCX_TX_NGROUPS_DFLT 1 88 #define MLXCX_TX_NRINGS_PER_GROUP_DFLT 64 89 90 /* 91 * Queues will be sized to (1 << *Q_SIZE_SHIFT) entries long. 92 */ 93 #define MLXCX_EQ_SIZE_SHIFT_DFLT 9 94 95 /* 96 * The CQ, SQ and RQ sizes can effect throughput on higher speed interfaces. 97 * EQ less so, as it only takes a single EQ entry to indicate there are 98 * multiple completions on the CQ. 99 * 100 * Particularly on the Rx side, the RQ (and corresponding CQ) would run 101 * low on available entries. A symptom of this is the refill taskq running 102 * frequently. A larger RQ (and CQ) alleviates this, and as there is a 103 * close relationship between SQ and CQ size, the SQ is increased too. 104 */ 105 #define MLXCX_CQ_SIZE_SHIFT_DFLT 10 106 #define MLXCX_CQ_SIZE_SHIFT_25G 12 107 108 /* 109 * Default to making SQs bigger than RQs for 9k MTU, since most packets will 110 * spill over into more than one slot. RQ WQEs are always 1 slot. 111 */ 112 #define MLXCX_SQ_SIZE_SHIFT_DFLT 11 113 #define MLXCX_SQ_SIZE_SHIFT_25G 13 114 115 #define MLXCX_RQ_SIZE_SHIFT_DFLT 10 116 #define MLXCX_RQ_SIZE_SHIFT_25G 12 117 118 #define MLXCX_CQ_HWM_GAP 16 119 #define MLXCX_CQ_LWM_GAP 24 120 121 #define MLXCX_WQ_HWM_GAP MLXCX_CQ_HWM_GAP 122 #define MLXCX_WQ_LWM_GAP MLXCX_CQ_LWM_GAP 123 124 #define MLXCX_RQ_REFILL_STEP 64 125 126 /* 127 * CQ event moderation 128 */ 129 #define MLXCX_CQEMOD_PERIOD_USEC_DFLT 50 130 #define MLXCX_CQEMOD_COUNT_DFLT \ 131 (8 * ((1 << MLXCX_CQ_SIZE_SHIFT_DFLT) / 10)) 132 133 /* 134 * EQ interrupt moderation 135 */ 136 #define MLXCX_INTRMOD_PERIOD_USEC_DFLT 10 137 138 /* Size of root flow tables */ 139 #define MLXCX_FTBL_ROOT_SIZE_SHIFT_DFLT 12 140 141 /* Size of 2nd level flow tables for VLAN filtering */ 142 #define MLXCX_FTBL_VLAN_SIZE_SHIFT_DFLT 4 143 144 /* 145 * How big does an mblk have to be before we dma_bind() it instead of 146 * bcopying? 147 */ 148 #define MLXCX_TX_BIND_THRESHOLD_DFLT 2048 149 150 /* 151 * How often to check the status of completion queues for overflow and 152 * other problems. 153 */ 154 #define MLXCX_WQ_CHECK_INTERVAL_SEC_DFLT 300 155 #define MLXCX_CQ_CHECK_INTERVAL_SEC_DFLT 300 156 #define MLXCX_EQ_CHECK_INTERVAL_SEC_DFLT 30 157 158 /* 159 * After this many packets, the packets received so far are passed to 160 * the mac layer. 161 */ 162 #define MLXCX_RX_PER_CQ_DEFAULT 256 163 #define MLXCX_RX_PER_CQ_MIN 16 164 #define MLXCX_RX_PER_CQ_MAX 4096 165 166 #define MLXCX_DOORBELL_TRIES_DFLT 3 167 extern uint_t mlxcx_doorbell_tries; 168 169 #define MLXCX_STUCK_INTR_COUNT_DFLT 128 170 extern uint_t mlxcx_stuck_intr_count; 171 172 #define MLXCX_BUF_BIND_MAX_ATTEMTPS 50 173 174 #define MLXCX_MTU_OFFSET \ 175 (sizeof (struct ether_vlan_header) + ETHERFCSL) 176 177 /* 178 * This is the current version of the command structure that the driver expects 179 * to be found in the ISS. 180 */ 181 #define MLXCX_CMD_REVISION 5 182 183 #ifdef DEBUG 184 #define MLXCX_DMA_SYNC(dma, flag) VERIFY0(ddi_dma_sync( \ 185 (dma).mxdb_dma_handle, 0, 0, \ 186 (flag))) 187 #else 188 #define MLXCX_DMA_SYNC(dma, flag) (void) ddi_dma_sync( \ 189 (dma).mxdb_dma_handle, 0, 0, \ 190 (flag)) 191 #endif 192 193 #define MLXCX_FM_SERVICE_MLXCX "mlxcx" 194 195 /* 196 * This macro defines the expected value of the 'Interface Step Sequence ID' 197 * (issi) which represents the version of the start up and tear down sequence. 198 * We must check that hardware supports this and tell it which version we're 199 * using as well. 200 */ 201 #define MLXCX_CURRENT_ISSI 1 202 203 /* 204 * This is the size of a page that the hardware expects from us when 205 * manipulating pages. 206 */ 207 #define MLXCX_HW_PAGE_SIZE 4096 208 209 /* 210 * This is a special lkey value used to terminate a list of scatter pointers. 211 */ 212 #define MLXCX_NULL_LKEY 0x100 213 214 /* 215 * The max function id we support in manage pages requests. 216 * At the moment we only support/expect func 0 from manage pages, but 217 * structures and code are in place to support any number. 218 */ 219 #define MLXCX_FUNC_ID_MAX 0 220 221 /* 222 * Forwards 223 */ 224 struct mlxcx; 225 typedef struct mlxcx mlxcx_t; 226 typedef struct mlxcx_cmd mlxcx_cmd_t; 227 typedef struct mlxcx_port mlxcx_port_t; 228 229 typedef struct { 230 mlxcx_t *mlp_mlx; 231 int32_t mlp_npages; 232 uint16_t mlp_func; 233 } mlxcx_pages_request_t; 234 235 typedef struct mlxcx_async_param { 236 mlxcx_t *mla_mlx; 237 taskq_ent_t mla_tqe; 238 boolean_t mla_pending; 239 kmutex_t mla_mtx; 240 241 /* 242 * Parameters specific to the function dispatched. 243 */ 244 union { 245 void *mla_arg; 246 mlxcx_pages_request_t mla_pages; 247 mlxcx_port_t *mla_port; 248 }; 249 } mlxcx_async_param_t; 250 251 typedef enum { 252 MLXCX_DMABUF_HDL_ALLOC = 1 << 0, 253 MLXCX_DMABUF_MEM_ALLOC = 1 << 1, 254 MLXCX_DMABUF_BOUND = 1 << 2, 255 MLXCX_DMABUF_FOREIGN = 1 << 3, 256 } mlxcx_dma_buffer_flags_t; 257 258 typedef struct mlxcx_dma_buffer { 259 mlxcx_dma_buffer_flags_t mxdb_flags; 260 caddr_t mxdb_va; /* Buffer VA */ 261 size_t mxdb_len; /* Buffer logical len */ 262 ddi_acc_handle_t mxdb_acc_handle; 263 ddi_dma_handle_t mxdb_dma_handle; 264 uint_t mxdb_ncookies; 265 } mlxcx_dma_buffer_t; 266 267 typedef struct mlxcx_dev_page { 268 list_node_t mxdp_list; 269 avl_node_t mxdp_tree; 270 uintptr_t mxdp_pa; 271 mlxcx_dma_buffer_t mxdp_dma; 272 } mlxcx_dev_page_t; 273 274 /* 275 * Data structure to keep track of all information related to the command queue. 276 */ 277 typedef enum { 278 MLXCX_CMD_QUEUE_S_IDLE = 1, 279 MLXCX_CMD_QUEUE_S_BUSY, 280 MLXCX_CMD_QUEUE_S_BROKEN 281 } mlxcx_cmd_queue_status_t; 282 283 typedef struct mlxcx_cmd_queue { 284 kmutex_t mcmd_lock; 285 kcondvar_t mcmd_cv; 286 mlxcx_dma_buffer_t mcmd_dma; 287 288 boolean_t mcmd_polled; 289 290 uint8_t mcmd_size_l2; 291 uint8_t mcmd_stride_l2; 292 uint_t mcmd_size; 293 /* 294 * The mask has a bit for each command slot, there are a maximum 295 * of 32 slots. When the bit is set in the mask, it indicates 296 * the slot is available. 297 */ 298 uint32_t mcmd_mask; 299 300 mlxcx_cmd_t *mcmd_active[MLXCX_CMD_MAX]; 301 302 ddi_taskq_t *mcmd_taskq; 303 id_space_t *mcmd_tokens; 304 } mlxcx_cmd_queue_t; 305 306 typedef struct mlxcd_cmd_mbox { 307 list_node_t mlbox_node; 308 mlxcx_dma_buffer_t mlbox_dma; 309 mlxcx_cmd_mailbox_t *mlbox_data; 310 } mlxcx_cmd_mbox_t; 311 312 typedef enum { 313 MLXCX_EQ_ALLOC = 1 << 0, /* dma mem alloc'd, size set */ 314 MLXCX_EQ_CREATED = 1 << 1, /* CREATE_EQ sent to hw */ 315 MLXCX_EQ_DESTROYED = 1 << 2, /* DESTROY_EQ sent to hw */ 316 MLXCX_EQ_ARMED = 1 << 3, /* Armed through the UAR */ 317 MLXCX_EQ_POLLING = 1 << 4, /* Currently being polled */ 318 MLXCX_EQ_INTR_ENABLED = 1 << 5, /* ddi_intr_enable()'d */ 319 MLXCX_EQ_INTR_ACTIVE = 1 << 6, /* 'rupt handler running */ 320 MLXCX_EQ_INTR_QUIESCE = 1 << 7, /* 'rupt handler to quiesce */ 321 MLXCX_EQ_ATTACHING = 1 << 8, /* mlxcx_attach still running */ 322 } mlxcx_eventq_state_t; 323 324 typedef struct mlxcx_bf { 325 kmutex_t mbf_mtx; 326 uint_t mbf_cnt; 327 uint_t mbf_even; 328 uint_t mbf_odd; 329 } mlxcx_bf_t; 330 331 typedef struct mlxcx_uar { 332 boolean_t mlu_allocated; 333 uint_t mlu_num; 334 uint_t mlu_base; 335 336 volatile uint_t mlu_bfcnt; 337 mlxcx_bf_t mlu_bf[MLXCX_BF_PER_UAR]; 338 } mlxcx_uar_t; 339 340 typedef struct mlxcx_pd { 341 boolean_t mlpd_allocated; 342 uint32_t mlpd_num; 343 } mlxcx_pd_t; 344 345 typedef struct mlxcx_tdom { 346 boolean_t mltd_allocated; 347 uint32_t mltd_num; 348 } mlxcx_tdom_t; 349 350 typedef enum { 351 MLXCX_PORT_VPORT_PROMISC = 1 << 0, 352 } mlxcx_port_flags_t; 353 354 typedef struct mlxcx_flow_table mlxcx_flow_table_t; 355 typedef struct mlxcx_flow_group mlxcx_flow_group_t; 356 357 typedef struct { 358 uint64_t mlps_rx_drops; 359 } mlxcx_port_stats_t; 360 361 typedef enum { 362 MLXCX_PORT_INIT = 1 << 0 363 } mlxcx_port_init_t; 364 365 struct mlxcx_port { 366 kmutex_t mlp_mtx; 367 mlxcx_port_init_t mlp_init; 368 mlxcx_t *mlp_mlx; 369 /* 370 * The mlp_num we have here starts at zero (it's an index), but the 371 * numbering we have to use for register access starts at 1. We 372 * currently write mlp_num into the other_vport fields in mlxcx_cmd.c 373 * (where 0 is a magic number meaning "my vport") so if we ever add 374 * support for virtualisation features and deal with more than one 375 * vport, we will probably have to change this. 376 */ 377 uint_t mlp_num; 378 mlxcx_port_flags_t mlp_flags; 379 uint64_t mlp_guid; 380 uint8_t mlp_mac_address[ETHERADDRL]; 381 382 uint_t mlp_mtu; 383 uint_t mlp_max_mtu; 384 385 mlxcx_port_status_t mlp_admin_status; 386 mlxcx_port_status_t mlp_oper_status; 387 388 boolean_t mlp_autoneg; 389 mlxcx_eth_proto_t mlp_max_proto; 390 mlxcx_eth_proto_t mlp_admin_proto; 391 mlxcx_eth_proto_t mlp_oper_proto; 392 mlxcx_pplm_fec_active_t mlp_fec_active; 393 link_fec_t mlp_fec_requested; 394 395 mlxcx_eth_inline_mode_t mlp_wqe_min_inline; 396 397 /* Root flow tables */ 398 mlxcx_flow_table_t *mlp_rx_flow; 399 mlxcx_flow_table_t *mlp_tx_flow; 400 401 mlxcx_flow_group_t *mlp_promisc; 402 mlxcx_flow_group_t *mlp_bcast; 403 mlxcx_flow_group_t *mlp_umcast; 404 405 avl_tree_t mlp_dmac_fe; 406 407 mlxcx_port_stats_t mlp_stats; 408 409 mlxcx_module_status_t mlp_last_modstate; 410 mlxcx_module_error_type_t mlp_last_moderr; 411 412 mlxcx_async_param_t mlx_port_event; 413 }; 414 415 typedef enum { 416 MLXCX_EQ_TYPE_ANY, 417 MLXCX_EQ_TYPE_RX, 418 MLXCX_EQ_TYPE_TX 419 } mlxcx_eventq_type_t; 420 421 /* 422 * mlxcx_event_queue_t is a representation of an event queue (EQ). 423 * There is a 1-1 tie in between an EQ and an interrupt vector, and 424 * knowledge of that effects how some members of the struct are used 425 * and modified. 426 * 427 * Most of the struct members are immmutable except for during set up and 428 * teardown, for those it is safe to access them without a mutex once 429 * the driver is initialized. 430 * 431 * Members which are not immutable and are protected by mleq_mtx are: 432 * * mleq_state - EQ state. Changes during transitions between 433 * polling modes. 434 * * mleq_cq - an AVL tree of completions queues using this EQ. 435 * 436 * Another member which is not immutable is mleq_cc. This is the EQ 437 * consumer counter, it *must* only be incremented in the EQ's interrupt 438 * context. It is also fed back to the hardware during re-arming of 439 * the EQ, again this *must* only happen in the EQ's interrupt context. 440 * 441 * There are a couple of struct members (mleq_check_disarm_cc and 442 * mleq_check_disarm_cnt) which are used to help monitor the health 443 * and consistency of the EQ. They are only used and modified during health 444 * monitoring, which is both infrequent and single threaded, consequently 445 * no mutex guards are needed. 446 * 447 * Care is taken not to use the mleq_mtx when possible, both to avoid 448 * contention in what is "hot" code and avoid breaking requirements 449 * of mac(9E). 450 */ 451 typedef struct mlxcx_event_queue { 452 kmutex_t mleq_mtx; 453 kcondvar_t mleq_cv; 454 mlxcx_t *mleq_mlx; 455 mlxcx_eventq_state_t mleq_state; 456 mlxcx_eventq_type_t mleq_type; 457 458 mlxcx_dma_buffer_t mleq_dma; 459 460 size_t mleq_entshift; 461 size_t mleq_nents; 462 mlxcx_eventq_ent_t *mleq_ent; 463 uint32_t mleq_cc; /* consumer counter */ 464 uint32_t mleq_cc_armed; 465 466 uint32_t mleq_events; 467 468 uint32_t mleq_badintrs; 469 470 /* Hardware eq number */ 471 uint_t mleq_num; 472 /* Index into the mlxcx_t's interrupts array */ 473 uint_t mleq_intr_index; 474 475 /* UAR region that has this EQ's doorbell in it */ 476 mlxcx_uar_t *mleq_uar; 477 478 /* Tree of CQn => mlxcx_completion_queue_t */ 479 avl_tree_t mleq_cqs; 480 481 uint32_t mleq_check_disarm_cc; 482 uint_t mleq_check_disarm_cnt; 483 } mlxcx_event_queue_t; 484 485 typedef enum { 486 MLXCX_TIS_CREATED = 1 << 0, 487 MLXCX_TIS_DESTROYED = 1 << 1, 488 } mlxcx_tis_state_t; 489 490 typedef struct mlxcx_tis { 491 mlxcx_tis_state_t mltis_state; 492 list_node_t mltis_entry; 493 uint_t mltis_num; 494 mlxcx_tdom_t *mltis_tdom; 495 } mlxcx_tis_t; 496 497 typedef enum { 498 MLXCX_BUFFER_INIT, 499 MLXCX_BUFFER_FREE, 500 MLXCX_BUFFER_ON_WQ, 501 MLXCX_BUFFER_ON_LOAN, 502 MLXCX_BUFFER_ON_CHAIN, 503 } mlxcx_buffer_state_t; 504 505 typedef enum { 506 MLXCX_SHARD_READY, 507 MLXCX_SHARD_DRAINING, 508 } mlxcx_shard_state_t; 509 510 typedef struct mlxcx_buf_shard { 511 mlxcx_shard_state_t mlbs_state; 512 list_node_t mlbs_entry; 513 kmutex_t mlbs_mtx; 514 list_t mlbs_busy; 515 list_t mlbs_free; 516 list_t mlbs_loaned; 517 kcondvar_t mlbs_free_nonempty; 518 } mlxcx_buf_shard_t; 519 520 typedef struct mlxcx_buffer { 521 mlxcx_buf_shard_t *mlb_shard; 522 list_node_t mlb_entry; 523 list_node_t mlb_cq_entry; 524 525 struct mlxcx_buffer *mlb_tx_head; /* head of tx chain */ 526 list_t mlb_tx_chain; 527 list_node_t mlb_tx_chain_entry; 528 529 boolean_t mlb_foreign; 530 size_t mlb_used; 531 mblk_t *mlb_tx_mp; 532 533 /* 534 * The number of work queue basic blocks this buf uses. 535 */ 536 uint_t mlb_wqebbs; 537 538 mlxcx_t *mlb_mlx; 539 mlxcx_buffer_state_t mlb_state; 540 uint_t mlb_wqe_index; 541 mlxcx_dma_buffer_t mlb_dma; 542 mblk_t *mlb_mp; 543 frtn_t mlb_frtn; 544 } mlxcx_buffer_t; 545 546 typedef enum { 547 MLXCX_CQ_ALLOC = 1 << 0, 548 MLXCX_CQ_CREATED = 1 << 1, 549 MLXCX_CQ_DESTROYED = 1 << 2, 550 MLXCX_CQ_EQAVL = 1 << 3, 551 MLXCX_CQ_BLOCKED_MAC = 1 << 4, 552 MLXCX_CQ_TEARDOWN = 1 << 5, 553 MLXCX_CQ_POLLING = 1 << 6, 554 MLXCX_CQ_ARMED = 1 << 7, 555 } mlxcx_completionq_state_t; 556 557 typedef struct mlxcx_work_queue mlxcx_work_queue_t; 558 559 typedef struct mlxcx_completion_queue { 560 kmutex_t mlcq_mtx; 561 kmutex_t mlcq_arm_mtx; 562 mlxcx_t *mlcq_mlx; 563 mlxcx_completionq_state_t mlcq_state; 564 565 mlxcx_port_stats_t *mlcq_stats; 566 567 list_node_t mlcq_entry; 568 avl_node_t mlcq_eq_entry; 569 570 uint_t mlcq_num; 571 572 mlxcx_work_queue_t *mlcq_wq; 573 mlxcx_event_queue_t *mlcq_eq; 574 575 /* UAR region that has this CQ's UAR doorbell in it */ 576 mlxcx_uar_t *mlcq_uar; 577 578 mlxcx_dma_buffer_t mlcq_dma; 579 580 size_t mlcq_entshift; 581 size_t mlcq_nents; 582 mlxcx_completionq_ent_t *mlcq_ent; 583 uint32_t mlcq_cc; /* consumer counter */ 584 uint32_t mlcq_cc_armed; /* cc at last arm */ 585 uint32_t mlcq_ec; /* event counter */ 586 uint32_t mlcq_ec_armed; /* ec at last arm */ 587 588 mlxcx_dma_buffer_t mlcq_doorbell_dma; 589 mlxcx_completionq_doorbell_t *mlcq_doorbell; 590 591 uint64_t mlcq_bufcnt; 592 size_t mlcq_bufhwm; 593 size_t mlcq_buflwm; 594 list_t mlcq_buffers; 595 kmutex_t mlcq_bufbmtx; 596 list_t mlcq_buffers_b; 597 598 uint_t mlcq_check_disarm_cnt; 599 uint64_t mlcq_check_disarm_cc; 600 601 uint_t mlcq_cqemod_period_usec; 602 uint_t mlcq_cqemod_count; 603 604 mac_ring_handle_t mlcq_mac_hdl; 605 uint64_t mlcq_mac_gen; 606 607 boolean_t mlcq_fm_repd_qstate; 608 } mlxcx_completion_queue_t; 609 610 typedef enum { 611 MLXCX_WQ_ALLOC = 1 << 0, 612 MLXCX_WQ_CREATED = 1 << 1, 613 MLXCX_WQ_STARTED = 1 << 2, 614 MLXCX_WQ_DESTROYED = 1 << 3, 615 MLXCX_WQ_TEARDOWN = 1 << 4, 616 MLXCX_WQ_BUFFERS = 1 << 5, 617 MLXCX_WQ_REFILLING = 1 << 6, 618 MLXCX_WQ_BLOCKED_MAC = 1 << 7 619 } mlxcx_workq_state_t; 620 621 typedef enum { 622 MLXCX_WQ_TYPE_SENDQ = 1, 623 MLXCX_WQ_TYPE_RECVQ 624 } mlxcx_workq_type_t; 625 626 typedef struct mlxcx_ring_group mlxcx_ring_group_t; 627 628 struct mlxcx_work_queue { 629 kmutex_t mlwq_mtx; 630 mlxcx_t *mlwq_mlx; 631 mlxcx_workq_type_t mlwq_type; 632 mlxcx_workq_state_t mlwq_state; 633 634 list_node_t mlwq_entry; 635 list_node_t mlwq_group_entry; 636 637 mlxcx_ring_group_t *mlwq_group; 638 639 uint_t mlwq_num; 640 641 mlxcx_completion_queue_t *mlwq_cq; 642 mlxcx_pd_t *mlwq_pd; 643 644 /* Required for send queues */ 645 mlxcx_tis_t *mlwq_tis; 646 647 /* UAR region that has this WQ's blueflame buffers in it */ 648 mlxcx_uar_t *mlwq_uar; 649 650 mlxcx_dma_buffer_t mlwq_dma; 651 652 mlxcx_eth_inline_mode_t mlwq_inline_mode; 653 size_t mlwq_entshift; 654 size_t mlwq_nents; 655 /* Discriminate based on mwq_type */ 656 union { 657 mlxcx_sendq_ent_t *mlwq_send_ent; 658 mlxcx_sendq_extra_ent_t *mlwq_send_extra_ent; 659 mlxcx_recvq_ent_t *mlwq_recv_ent; 660 mlxcx_sendq_bf_t *mlwq_bf_ent; 661 }; 662 uint64_t mlwq_pc; /* producer counter */ 663 664 uint64_t mlwq_wqebb_used; 665 size_t mlwq_bufhwm; 666 size_t mlwq_buflwm; 667 668 mlxcx_dma_buffer_t mlwq_doorbell_dma; 669 mlxcx_workq_doorbell_t *mlwq_doorbell; 670 671 mlxcx_buf_shard_t *mlwq_bufs; 672 mlxcx_buf_shard_t *mlwq_foreign_bufs; 673 674 taskq_ent_t mlwq_tqe; 675 676 boolean_t mlwq_fm_repd_qstate; 677 }; 678 679 #define MLXCX_RQT_MAX_SIZE 64 680 681 typedef enum { 682 MLXCX_RQT_CREATED = 1 << 0, 683 MLXCX_RQT_DESTROYED = 1 << 1, 684 MLXCX_RQT_DIRTY = 1 << 2, 685 } mlxcx_rqtable_state_t; 686 687 typedef struct mlxcx_rqtable { 688 mlxcx_rqtable_state_t mlrqt_state; 689 list_node_t mlrqt_entry; 690 uint_t mlrqt_num; 691 692 size_t mlrqt_max; 693 size_t mlrqt_used; 694 695 size_t mlrqt_rq_size; 696 mlxcx_work_queue_t **mlrqt_rq; 697 } mlxcx_rqtable_t; 698 699 typedef enum { 700 MLXCX_TIR_CREATED = 1 << 0, 701 MLXCX_TIR_DESTROYED = 1 << 1, 702 } mlxcx_tir_state_t; 703 704 typedef struct mlxcx_tir { 705 mlxcx_tir_state_t mltir_state; 706 list_node_t mltir_entry; 707 uint_t mltir_num; 708 mlxcx_tdom_t *mltir_tdom; 709 mlxcx_tir_type_t mltir_type; 710 union { 711 mlxcx_rqtable_t *mltir_rqtable; 712 mlxcx_work_queue_t *mltir_rq; 713 }; 714 mlxcx_tir_hash_fn_t mltir_hash_fn; 715 uint8_t mltir_toeplitz_key[40]; 716 mlxcx_tir_rx_hash_l3_type_t mltir_l3_type; 717 mlxcx_tir_rx_hash_l4_type_t mltir_l4_type; 718 mlxcx_tir_rx_hash_fields_t mltir_hash_fields; 719 } mlxcx_tir_t; 720 721 typedef enum { 722 MLXCX_FLOW_GROUP_CREATED = 1 << 0, 723 MLXCX_FLOW_GROUP_BUSY = 1 << 1, 724 MLXCX_FLOW_GROUP_DESTROYED = 1 << 2, 725 } mlxcx_flow_group_state_t; 726 727 typedef enum { 728 MLXCX_FLOW_MATCH_SMAC = 1 << 0, 729 MLXCX_FLOW_MATCH_DMAC = 1 << 1, 730 MLXCX_FLOW_MATCH_VLAN = 1 << 2, 731 MLXCX_FLOW_MATCH_VID = 1 << 3, 732 MLXCX_FLOW_MATCH_IP_VER = 1 << 4, 733 MLXCX_FLOW_MATCH_SRCIP = 1 << 5, 734 MLXCX_FLOW_MATCH_DSTIP = 1 << 6, 735 MLXCX_FLOW_MATCH_IP_PROTO = 1 << 7, 736 MLXCX_FLOW_MATCH_SQN = 1 << 8, 737 MLXCX_FLOW_MATCH_VXLAN = 1 << 9, 738 } mlxcx_flow_mask_t; 739 740 struct mlxcx_flow_group { 741 list_node_t mlfg_entry; 742 list_node_t mlfg_role_entry; 743 mlxcx_flow_group_state_t mlfg_state; 744 mlxcx_flow_table_t *mlfg_table; 745 uint_t mlfg_num; 746 size_t mlfg_start_idx; 747 size_t mlfg_size; 748 size_t mlfg_avail; 749 list_t mlfg_entries; 750 mlxcx_flow_mask_t mlfg_mask; 751 }; 752 753 typedef enum { 754 MLXCX_FLOW_ENTRY_RESERVED = 1 << 0, 755 MLXCX_FLOW_ENTRY_CREATED = 1 << 1, 756 MLXCX_FLOW_ENTRY_DELETED = 1 << 2, 757 MLXCX_FLOW_ENTRY_DIRTY = 1 << 3, 758 } mlxcx_flow_entry_state_t; 759 760 typedef struct { 761 mlxcx_tir_t *mlfed_tir; 762 mlxcx_flow_table_t *mlfed_flow; 763 } mlxcx_flow_entry_dest_t; 764 765 typedef struct mlxcx_flow_entry { 766 list_node_t mlfe_group_entry; 767 avl_node_t mlfe_dmac_entry; 768 mlxcx_flow_entry_state_t mlfe_state; 769 mlxcx_flow_table_t *mlfe_table; 770 mlxcx_flow_group_t *mlfe_group; 771 uint_t mlfe_index; 772 773 mlxcx_flow_action_t mlfe_action; 774 775 /* Criteria for match */ 776 uint8_t mlfe_smac[ETHERADDRL]; 777 uint8_t mlfe_dmac[ETHERADDRL]; 778 779 mlxcx_vlan_type_t mlfe_vlan_type; 780 uint16_t mlfe_vid; 781 782 uint_t mlfe_ip_version; 783 uint8_t mlfe_srcip[IPV6_ADDR_LEN]; 784 uint8_t mlfe_dstip[IPV6_ADDR_LEN]; 785 786 uint_t mlfe_ip_proto; 787 uint16_t mlfe_sport; 788 uint16_t mlfe_dport; 789 790 uint32_t mlfe_sqn; 791 uint32_t mlfe_vxlan_vni; 792 793 /* Destinations */ 794 size_t mlfe_ndest; 795 mlxcx_flow_entry_dest_t mlfe_dest[MLXCX_FLOW_MAX_DESTINATIONS]; 796 797 /* 798 * mlxcx_group_mac_ts joining this entry to N ring groups 799 * only used by FEs on the root rx flow table 800 */ 801 list_t mlfe_ring_groups; 802 } mlxcx_flow_entry_t; 803 804 typedef enum { 805 MLXCX_FLOW_TABLE_CREATED = 1 << 0, 806 MLXCX_FLOW_TABLE_DESTROYED = 1 << 1, 807 MLXCX_FLOW_TABLE_ROOT = 1 << 2 808 } mlxcx_flow_table_state_t; 809 810 struct mlxcx_flow_table { 811 kmutex_t mlft_mtx; 812 mlxcx_flow_table_state_t mlft_state; 813 uint_t mlft_level; 814 uint_t mlft_num; 815 mlxcx_flow_table_type_t mlft_type; 816 817 mlxcx_port_t *mlft_port; 818 819 size_t mlft_entshift; 820 size_t mlft_nents; 821 822 size_t mlft_entsize; 823 mlxcx_flow_entry_t *mlft_ent; 824 825 /* First entry not yet claimed by a group */ 826 size_t mlft_next_ent; 827 828 list_t mlft_groups; 829 }; 830 831 typedef enum { 832 MLXCX_GROUP_RX, 833 MLXCX_GROUP_TX 834 } mlxcx_group_type_t; 835 836 typedef enum { 837 MLXCX_GROUP_INIT = 1 << 0, 838 MLXCX_GROUP_WQS = 1 << 1, 839 MLXCX_GROUP_TIRTIS = 1 << 2, 840 MLXCX_GROUP_FLOWS = 1 << 3, 841 MLXCX_GROUP_RUNNING = 1 << 4, 842 MLXCX_GROUP_RQT = 1 << 5, 843 } mlxcx_group_state_t; 844 845 #define MLXCX_RX_HASH_FT_SIZE_SHIFT 4 846 847 typedef enum { 848 MLXCX_TIR_ROLE_IPv4 = 0, 849 MLXCX_TIR_ROLE_IPv6, 850 MLXCX_TIR_ROLE_TCPv4, 851 MLXCX_TIR_ROLE_TCPv6, 852 MLXCX_TIR_ROLE_UDPv4, 853 MLXCX_TIR_ROLE_UDPv6, 854 MLXCX_TIR_ROLE_OTHER, 855 856 MLXCX_TIRS_PER_GROUP 857 } mlxcx_tir_role_t; 858 859 typedef struct { 860 avl_node_t mlgm_group_entry; 861 list_node_t mlgm_fe_entry; 862 mlxcx_ring_group_t *mlgm_group; 863 uint8_t mlgm_mac[6]; 864 mlxcx_flow_entry_t *mlgm_fe; 865 } mlxcx_group_mac_t; 866 867 typedef struct { 868 list_node_t mlgv_entry; 869 boolean_t mlgv_tagged; 870 uint16_t mlgv_vid; 871 mlxcx_flow_entry_t *mlgv_fe; 872 } mlxcx_group_vlan_t; 873 874 struct mlxcx_ring_group { 875 kmutex_t mlg_mtx; 876 mlxcx_t *mlg_mlx; 877 mlxcx_group_state_t mlg_state; 878 mlxcx_group_type_t mlg_type; 879 880 mac_group_handle_t mlg_mac_hdl; 881 882 union { 883 mlxcx_tis_t mlg_tis; 884 mlxcx_tir_t mlg_tir[MLXCX_TIRS_PER_GROUP]; 885 }; 886 mlxcx_port_t *mlg_port; 887 888 size_t mlg_nwqs; 889 size_t mlg_wqs_size; 890 mlxcx_work_queue_t *mlg_wqs; 891 892 mlxcx_rqtable_t *mlg_rqt; 893 894 /* 895 * Flow table for matching VLAN IDs 896 */ 897 mlxcx_flow_table_t *mlg_rx_vlan_ft; 898 mlxcx_flow_group_t *mlg_rx_vlan_fg; 899 mlxcx_flow_group_t *mlg_rx_vlan_def_fg; 900 mlxcx_flow_group_t *mlg_rx_vlan_promisc_fg; 901 list_t mlg_rx_vlans; 902 903 taskq_t *mlg_refill_tq; 904 905 /* 906 * Flow table for separating out by protocol before hashing 907 */ 908 mlxcx_flow_table_t *mlg_rx_hash_ft; 909 910 /* 911 * Links to flow entries on the root flow table which are pointing to 912 * our rx_vlan_ft. 913 */ 914 avl_tree_t mlg_rx_macs; 915 }; 916 917 typedef enum mlxcx_cmd_state { 918 MLXCX_CMD_S_DONE = 1 << 0, 919 MLXCX_CMD_S_ERROR = 1 << 1 920 } mlxcx_cmd_state_t; 921 922 struct mlxcx_cmd { 923 struct mlxcx *mlcmd_mlxp; 924 kmutex_t mlcmd_lock; 925 kcondvar_t mlcmd_cv; 926 927 boolean_t mlcmd_poll; 928 uint8_t mlcmd_token; 929 mlxcx_cmd_op_t mlcmd_op; 930 931 /* 932 * Command data and extended mailboxes for responses. 933 */ 934 const void *mlcmd_in; 935 uint32_t mlcmd_inlen; 936 void *mlcmd_out; 937 uint32_t mlcmd_outlen; 938 list_t mlcmd_mbox_in; 939 uint8_t mlcmd_nboxes_in; 940 list_t mlcmd_mbox_out; 941 uint8_t mlcmd_nboxes_out; 942 /* 943 * Status information. 944 */ 945 mlxcx_cmd_state_t mlcmd_state; 946 uint8_t mlcmd_status; 947 }; 948 949 /* 950 * Our view of capabilities. 951 */ 952 typedef struct mlxcx_hca_cap { 953 mlxcx_hca_cap_mode_t mhc_mode; 954 mlxcx_hca_cap_type_t mhc_type; 955 union { 956 uint8_t mhc_bulk[MLXCX_HCA_CAP_SIZE]; 957 mlxcx_hca_cap_general_caps_t mhc_general; 958 mlxcx_hca_cap_eth_caps_t mhc_eth; 959 mlxcx_hca_cap_flow_caps_t mhc_flow; 960 }; 961 } mlxcx_hca_cap_t; 962 963 typedef struct { 964 /* Cooked values */ 965 boolean_t mlc_checksum; 966 boolean_t mlc_lso; 967 boolean_t mlc_vxlan; 968 size_t mlc_max_lso_size; 969 size_t mlc_max_rqt_size; 970 971 size_t mlc_max_rx_ft_shift; 972 size_t mlc_max_rx_fe_dest; 973 size_t mlc_max_rx_flows; 974 size_t mlc_max_rx_ft; 975 976 size_t mlc_max_tir; 977 978 /* Raw caps data */ 979 mlxcx_hca_cap_t mlc_hca_cur; 980 mlxcx_hca_cap_t mlc_hca_max; 981 mlxcx_hca_cap_t mlc_ether_cur; 982 mlxcx_hca_cap_t mlc_ether_max; 983 mlxcx_hca_cap_t mlc_nic_flow_cur; 984 mlxcx_hca_cap_t mlc_nic_flow_max; 985 } mlxcx_caps_t; 986 987 typedef struct { 988 uint_t mldp_eq_size_shift; 989 uint_t mldp_cq_size_shift; 990 uint_t mldp_cq_size_shift_default; 991 uint_t mldp_rq_size_shift; 992 uint_t mldp_rq_size_shift_default; 993 uint_t mldp_sq_size_shift; 994 uint_t mldp_sq_size_shift_default; 995 uint_t mldp_cqemod_period_usec; 996 uint_t mldp_cqemod_count; 997 uint_t mldp_intrmod_period_usec; 998 uint_t mldp_rx_ngroups_large; 999 uint_t mldp_rx_ngroups_small; 1000 uint_t mldp_rx_nrings_per_large_group; 1001 uint_t mldp_rx_nrings_per_small_group; 1002 uint_t mldp_rx_per_cq; 1003 uint_t mldp_tx_ngroups; 1004 uint_t mldp_tx_nrings_per_group; 1005 uint_t mldp_ftbl_root_size_shift; 1006 size_t mldp_tx_bind_threshold; 1007 uint_t mldp_ftbl_vlan_size_shift; 1008 uint64_t mldp_eq_check_interval_sec; 1009 uint64_t mldp_cq_check_interval_sec; 1010 uint64_t mldp_wq_check_interval_sec; 1011 } mlxcx_drv_props_t; 1012 1013 typedef struct { 1014 mlxcx_t *mlts_mlx; 1015 uint8_t mlts_index; 1016 id_t mlts_ksensor; 1017 int16_t mlts_value; 1018 int16_t mlts_max_value; 1019 uint8_t mlts_name[MLXCX_MTMP_NAMELEN]; 1020 } mlxcx_temp_sensor_t; 1021 1022 typedef enum { 1023 MLXCX_ATTACH_FM = 1 << 0, 1024 MLXCX_ATTACH_PCI_CONFIG = 1 << 1, 1025 MLXCX_ATTACH_REGS = 1 << 2, 1026 MLXCX_ATTACH_CMD = 1 << 3, 1027 MLXCX_ATTACH_ENABLE_HCA = 1 << 4, 1028 MLXCX_ATTACH_PAGE_LIST = 1 << 5, 1029 MLXCX_ATTACH_INIT_HCA = 1 << 6, 1030 MLXCX_ATTACH_UAR_PD_TD = 1 << 7, 1031 MLXCX_ATTACH_INTRS = 1 << 8, 1032 MLXCX_ATTACH_PORTS = 1 << 9, 1033 MLXCX_ATTACH_MAC_HDL = 1 << 10, 1034 MLXCX_ATTACH_CQS = 1 << 11, 1035 MLXCX_ATTACH_WQS = 1 << 12, 1036 MLXCX_ATTACH_GROUPS = 1 << 13, 1037 MLXCX_ATTACH_BUFS = 1 << 14, 1038 MLXCX_ATTACH_CAPS = 1 << 15, 1039 MLXCX_ATTACH_CHKTIMERS = 1 << 16, 1040 MLXCX_ATTACH_ASYNC_TQ = 1 << 17, 1041 MLXCX_ATTACH_SENSORS = 1 << 18 1042 } mlxcx_attach_progress_t; 1043 1044 struct mlxcx { 1045 /* entry on the mlxcx_glist */ 1046 list_node_t mlx_gentry; 1047 1048 dev_info_t *mlx_dip; 1049 int mlx_inst; 1050 mlxcx_attach_progress_t mlx_attach; 1051 1052 mlxcx_drv_props_t mlx_props; 1053 1054 /* 1055 * Misc. data 1056 */ 1057 uint16_t mlx_fw_maj; 1058 uint16_t mlx_fw_min; 1059 uint16_t mlx_fw_rev; 1060 uint16_t mlx_cmd_rev; 1061 1062 /* 1063 * Various capabilities of hardware. 1064 */ 1065 mlxcx_caps_t *mlx_caps; 1066 1067 uint_t mlx_max_sdu; 1068 uint_t mlx_sdu; 1069 1070 /* 1071 * FM State 1072 */ 1073 int mlx_fm_caps; 1074 1075 /* 1076 * PCI Data 1077 */ 1078 ddi_acc_handle_t mlx_cfg_handle; 1079 ddi_acc_handle_t mlx_regs_handle; 1080 caddr_t mlx_regs_base; 1081 1082 /* 1083 * MAC handle 1084 */ 1085 mac_handle_t mlx_mac_hdl; 1086 1087 /* 1088 * Main command queue for issuing general FW control commands. 1089 */ 1090 mlxcx_cmd_queue_t mlx_cmd; 1091 1092 /* 1093 * Interrupts 1094 */ 1095 uint_t mlx_intr_pri; 1096 uint_t mlx_async_intr_pri; 1097 uint_t mlx_intr_type; /* always MSI-X */ 1098 int mlx_intr_count; 1099 size_t mlx_intr_size; /* allocation size */ 1100 int mlx_intr_cq0; 1101 ddi_intr_handle_t *mlx_intr_handles; 1102 1103 /* 1104 * Basic firmware resources which we use for a variety of things. 1105 * The UAR is a reference to a page where CQ and EQ doorbells are 1106 * located. It also holds all the BlueFlame stuff (which we don't 1107 * use). 1108 */ 1109 mlxcx_uar_t mlx_uar; 1110 /* 1111 * The PD (Protection Domain) and TDOM (Transport Domain) are opaque 1112 * entities to us (they're Infiniband constructs we don't actually care 1113 * about) -- we just allocate them and shove their ID numbers in 1114 * whenever we're asked for one. 1115 * 1116 * The "reserved" LKEY is what we should put in queue entries that 1117 * have references to memory to indicate that they're using linear 1118 * addresses (comes from the QUERY_SPECIAL_CONTEXTS cmd). 1119 */ 1120 mlxcx_pd_t mlx_pd; 1121 mlxcx_tdom_t mlx_tdom; 1122 uint_t mlx_rsvd_lkey; 1123 1124 /* 1125 * Our event queues. These are 1:1 with interrupts. 1126 */ 1127 size_t mlx_eqs_size; /* allocation size */ 1128 mlxcx_event_queue_t *mlx_eqs; 1129 1130 /* 1131 * Page list. These represent the set of 4k pages we've given to 1132 * hardware. 1133 * 1134 * We can add to this list at the request of hardware from interrupt 1135 * context (the PAGE_REQUEST event), so it's protected by pagemtx. 1136 */ 1137 kmutex_t mlx_pagemtx; 1138 uint_t mlx_npages; 1139 avl_tree_t mlx_pages; 1140 1141 mlxcx_async_param_t mlx_npages_req[MLXCX_FUNC_ID_MAX + 1]; 1142 1143 /* 1144 * Taskq for processing asynchronous events which may issue 1145 * commands to the HCA. 1146 */ 1147 taskq_t *mlx_async_tq; 1148 1149 /* 1150 * Port state 1151 */ 1152 uint_t mlx_nports; 1153 size_t mlx_ports_size; 1154 mlxcx_port_t *mlx_ports; 1155 1156 /* 1157 * Completion queues (CQs). These are also indexed off the 1158 * event_queue_ts that they each report to. 1159 */ 1160 list_t mlx_cqs; 1161 1162 uint_t mlx_next_eq; 1163 1164 /* 1165 * Work queues (WQs). 1166 */ 1167 list_t mlx_wqs; 1168 1169 /* 1170 * Ring groups 1171 */ 1172 size_t mlx_rx_ngroups; 1173 size_t mlx_rx_groups_size; 1174 mlxcx_ring_group_t *mlx_rx_groups; 1175 1176 size_t mlx_tx_ngroups; 1177 size_t mlx_tx_groups_size; 1178 mlxcx_ring_group_t *mlx_tx_groups; 1179 1180 kmem_cache_t *mlx_bufs_cache; 1181 list_t mlx_buf_shards; 1182 1183 ddi_periodic_t mlx_eq_checktimer; 1184 ddi_periodic_t mlx_cq_checktimer; 1185 ddi_periodic_t mlx_wq_checktimer; 1186 1187 /* 1188 * Sensors 1189 */ 1190 uint8_t mlx_temp_nsensors; 1191 mlxcx_temp_sensor_t *mlx_temp_sensors; 1192 }; 1193 1194 /* 1195 * Register access 1196 */ 1197 extern uint16_t mlxcx_get16(mlxcx_t *, uintptr_t); 1198 extern uint32_t mlxcx_get32(mlxcx_t *, uintptr_t); 1199 extern uint64_t mlxcx_get64(mlxcx_t *, uintptr_t); 1200 1201 extern void mlxcx_put32(mlxcx_t *, uintptr_t, uint32_t); 1202 extern void mlxcx_put64(mlxcx_t *, uintptr_t, uint64_t); 1203 1204 extern void mlxcx_uar_put32(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint32_t); 1205 extern void mlxcx_uar_put64(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint64_t); 1206 1207 /* 1208 * Logging functions. 1209 */ 1210 extern void mlxcx_warn(mlxcx_t *, const char *, ...); 1211 extern void mlxcx_note(mlxcx_t *, const char *, ...); 1212 extern void mlxcx_panic(mlxcx_t *, const char *, ...); 1213 1214 extern void mlxcx_fm_ereport(mlxcx_t *, const char *); 1215 1216 extern void mlxcx_check_sq(mlxcx_t *, mlxcx_work_queue_t *); 1217 extern void mlxcx_check_rq(mlxcx_t *, mlxcx_work_queue_t *); 1218 1219 /* 1220 * DMA Functions 1221 */ 1222 extern void mlxcx_dma_free(mlxcx_dma_buffer_t *); 1223 extern boolean_t mlxcx_dma_alloc(mlxcx_t *, mlxcx_dma_buffer_t *, 1224 ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t, size_t, boolean_t); 1225 extern boolean_t mlxcx_dma_init(mlxcx_t *, mlxcx_dma_buffer_t *, 1226 ddi_dma_attr_t *, boolean_t); 1227 extern boolean_t mlxcx_dma_bind_mblk(mlxcx_t *, mlxcx_dma_buffer_t *, 1228 const mblk_t *, size_t, boolean_t); 1229 extern boolean_t mlxcx_dma_alloc_offset(mlxcx_t *, mlxcx_dma_buffer_t *, 1230 ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t, 1231 size_t, size_t, boolean_t); 1232 extern void mlxcx_dma_unbind(mlxcx_t *, mlxcx_dma_buffer_t *); 1233 extern void mlxcx_dma_acc_attr(mlxcx_t *, ddi_device_acc_attr_t *); 1234 extern void mlxcx_dma_page_attr(mlxcx_t *, ddi_dma_attr_t *); 1235 extern void mlxcx_dma_queue_attr(mlxcx_t *, ddi_dma_attr_t *); 1236 extern void mlxcx_dma_qdbell_attr(mlxcx_t *, ddi_dma_attr_t *); 1237 extern void mlxcx_dma_buf_attr(mlxcx_t *, ddi_dma_attr_t *); 1238 1239 extern boolean_t mlxcx_give_pages(mlxcx_t *, int32_t, int32_t *); 1240 1241 static inline const ddi_dma_cookie_t * 1242 mlxcx_dma_cookie_iter(const mlxcx_dma_buffer_t *db, 1243 const ddi_dma_cookie_t *prev) 1244 { 1245 ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND); 1246 return (ddi_dma_cookie_iter(db->mxdb_dma_handle, prev)); 1247 } 1248 1249 static inline const ddi_dma_cookie_t * 1250 mlxcx_dma_cookie_one(const mlxcx_dma_buffer_t *db) 1251 { 1252 ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND); 1253 return (ddi_dma_cookie_one(db->mxdb_dma_handle)); 1254 } 1255 1256 /* 1257 * From mlxcx_intr.c 1258 */ 1259 extern boolean_t mlxcx_intr_setup(mlxcx_t *); 1260 extern void mlxcx_intr_disable(mlxcx_t *); 1261 extern void mlxcx_intr_teardown(mlxcx_t *); 1262 extern void mlxcx_arm_eq(mlxcx_t *, mlxcx_event_queue_t *); 1263 extern void mlxcx_arm_cq(mlxcx_t *, mlxcx_completion_queue_t *); 1264 extern void mlxcx_update_cqci(mlxcx_t *, mlxcx_completion_queue_t *); 1265 1266 extern mblk_t *mlxcx_rx_poll(mlxcx_t *, mlxcx_completion_queue_t *, size_t); 1267 1268 /* 1269 * From mlxcx_gld.c 1270 */ 1271 extern boolean_t mlxcx_register_mac(mlxcx_t *); 1272 1273 /* 1274 * From mlxcx_ring.c 1275 */ 1276 extern boolean_t mlxcx_wq_alloc_dma(mlxcx_t *, mlxcx_work_queue_t *); 1277 extern void mlxcx_wq_rele_dma(mlxcx_t *, mlxcx_work_queue_t *); 1278 1279 extern boolean_t mlxcx_buf_create(mlxcx_t *, mlxcx_buf_shard_t *, 1280 mlxcx_buffer_t **); 1281 extern boolean_t mlxcx_buf_create_foreign(mlxcx_t *, mlxcx_buf_shard_t *, 1282 mlxcx_buffer_t **); 1283 extern mlxcx_buffer_t *mlxcx_buf_take(mlxcx_t *, mlxcx_work_queue_t *); 1284 extern size_t mlxcx_buf_take_n(mlxcx_t *, mlxcx_work_queue_t *, 1285 mlxcx_buffer_t **, size_t); 1286 extern boolean_t mlxcx_buf_loan(mlxcx_t *, mlxcx_buffer_t *); 1287 extern void mlxcx_buf_return(mlxcx_t *, mlxcx_buffer_t *); 1288 extern void mlxcx_buf_return_chain(mlxcx_t *, mlxcx_buffer_t *, boolean_t); 1289 extern void mlxcx_buf_destroy(mlxcx_t *, mlxcx_buffer_t *); 1290 extern void mlxcx_shard_ready(mlxcx_buf_shard_t *); 1291 extern void mlxcx_shard_draining(mlxcx_buf_shard_t *); 1292 1293 extern uint_t mlxcx_buf_bind_or_copy(mlxcx_t *, mlxcx_work_queue_t *, 1294 mblk_t *, size_t, mlxcx_buffer_t **); 1295 1296 extern boolean_t mlxcx_rx_group_setup(mlxcx_t *, mlxcx_ring_group_t *); 1297 extern boolean_t mlxcx_tx_group_setup(mlxcx_t *, mlxcx_ring_group_t *); 1298 1299 extern boolean_t mlxcx_rx_group_start(mlxcx_t *, mlxcx_ring_group_t *); 1300 extern boolean_t mlxcx_tx_ring_start(mlxcx_t *, mlxcx_ring_group_t *, 1301 mlxcx_work_queue_t *); 1302 extern boolean_t mlxcx_rx_ring_start(mlxcx_t *, mlxcx_ring_group_t *, 1303 mlxcx_work_queue_t *); 1304 1305 extern boolean_t mlxcx_rq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *, 1306 mlxcx_buffer_t *); 1307 extern boolean_t mlxcx_rq_add_buffers(mlxcx_t *, mlxcx_work_queue_t *, 1308 mlxcx_buffer_t **, size_t); 1309 extern boolean_t mlxcx_sq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *, 1310 uint8_t *, size_t, uint32_t, mlxcx_buffer_t *); 1311 extern boolean_t mlxcx_sq_add_nop(mlxcx_t *, mlxcx_work_queue_t *); 1312 extern void mlxcx_rq_refill(mlxcx_t *, mlxcx_work_queue_t *); 1313 1314 extern void mlxcx_teardown_groups(mlxcx_t *); 1315 extern void mlxcx_wq_teardown(mlxcx_t *, mlxcx_work_queue_t *); 1316 extern void mlxcx_cq_teardown(mlxcx_t *, mlxcx_completion_queue_t *); 1317 extern void mlxcx_teardown_rx_group(mlxcx_t *, mlxcx_ring_group_t *); 1318 extern void mlxcx_teardown_tx_group(mlxcx_t *, mlxcx_ring_group_t *); 1319 1320 extern void mlxcx_tx_completion(mlxcx_t *, mlxcx_completion_queue_t *, 1321 mlxcx_completionq_ent_t *, mlxcx_buffer_t *); 1322 extern mblk_t *mlxcx_rx_completion(mlxcx_t *, mlxcx_completion_queue_t *, 1323 mlxcx_completionq_ent_t *, mlxcx_buffer_t *); 1324 1325 extern mlxcx_buf_shard_t *mlxcx_mlbs_create(mlxcx_t *); 1326 1327 /* 1328 * Flow mgmt 1329 */ 1330 extern boolean_t mlxcx_add_umcast_entry(mlxcx_t *, mlxcx_port_t *, 1331 mlxcx_ring_group_t *, const uint8_t *); 1332 extern boolean_t mlxcx_remove_umcast_entry(mlxcx_t *, mlxcx_port_t *, 1333 mlxcx_ring_group_t *, const uint8_t *); 1334 extern void mlxcx_remove_all_umcast_entries(mlxcx_t *, mlxcx_port_t *, 1335 mlxcx_ring_group_t *); 1336 extern boolean_t mlxcx_setup_flow_group(mlxcx_t *, mlxcx_flow_table_t *, 1337 mlxcx_flow_group_t *); 1338 extern void mlxcx_teardown_flow_table(mlxcx_t *, mlxcx_flow_table_t *); 1339 1340 extern void mlxcx_remove_all_vlan_entries(mlxcx_t *, mlxcx_ring_group_t *); 1341 extern boolean_t mlxcx_remove_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *, 1342 boolean_t, uint16_t); 1343 extern boolean_t mlxcx_add_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *, 1344 boolean_t, uint16_t); 1345 1346 /* 1347 * Command functions 1348 */ 1349 extern boolean_t mlxcx_cmd_queue_init(mlxcx_t *); 1350 extern void mlxcx_cmd_queue_fini(mlxcx_t *); 1351 1352 extern void mlxcx_cmd_completion(mlxcx_t *, mlxcx_eventq_ent_t *); 1353 extern void mlxcx_cmd_eq_enable(mlxcx_t *); 1354 extern void mlxcx_cmd_eq_disable(mlxcx_t *); 1355 1356 extern boolean_t mlxcx_cmd_enable_hca(mlxcx_t *); 1357 extern boolean_t mlxcx_cmd_disable_hca(mlxcx_t *); 1358 1359 extern boolean_t mlxcx_cmd_query_issi(mlxcx_t *, uint_t *); 1360 extern boolean_t mlxcx_cmd_set_issi(mlxcx_t *, uint16_t); 1361 1362 extern boolean_t mlxcx_cmd_query_pages(mlxcx_t *, uint_t, int32_t *); 1363 extern boolean_t mlxcx_cmd_give_pages(mlxcx_t *, uint_t, int32_t, 1364 mlxcx_dev_page_t **); 1365 extern boolean_t mlxcx_cmd_return_pages(mlxcx_t *, int32_t, uint64_t *, 1366 int32_t *); 1367 1368 extern boolean_t mlxcx_cmd_query_hca_cap(mlxcx_t *, mlxcx_hca_cap_type_t, 1369 mlxcx_hca_cap_mode_t, mlxcx_hca_cap_t *); 1370 1371 extern boolean_t mlxcx_cmd_set_driver_version(mlxcx_t *, const char *); 1372 1373 extern boolean_t mlxcx_cmd_init_hca(mlxcx_t *); 1374 extern boolean_t mlxcx_cmd_teardown_hca(mlxcx_t *); 1375 1376 extern boolean_t mlxcx_cmd_alloc_uar(mlxcx_t *, mlxcx_uar_t *); 1377 extern boolean_t mlxcx_cmd_dealloc_uar(mlxcx_t *, mlxcx_uar_t *); 1378 1379 extern boolean_t mlxcx_cmd_alloc_pd(mlxcx_t *, mlxcx_pd_t *); 1380 extern boolean_t mlxcx_cmd_dealloc_pd(mlxcx_t *, mlxcx_pd_t *); 1381 1382 extern boolean_t mlxcx_cmd_alloc_tdom(mlxcx_t *, mlxcx_tdom_t *); 1383 extern boolean_t mlxcx_cmd_dealloc_tdom(mlxcx_t *, mlxcx_tdom_t *); 1384 1385 extern boolean_t mlxcx_cmd_create_eq(mlxcx_t *, mlxcx_event_queue_t *); 1386 extern boolean_t mlxcx_cmd_destroy_eq(mlxcx_t *, mlxcx_event_queue_t *); 1387 extern boolean_t mlxcx_cmd_query_eq(mlxcx_t *, mlxcx_event_queue_t *, 1388 mlxcx_eventq_ctx_t *); 1389 1390 extern boolean_t mlxcx_cmd_create_cq(mlxcx_t *, mlxcx_completion_queue_t *); 1391 extern boolean_t mlxcx_cmd_destroy_cq(mlxcx_t *, mlxcx_completion_queue_t *); 1392 extern boolean_t mlxcx_cmd_query_cq(mlxcx_t *, mlxcx_completion_queue_t *, 1393 mlxcx_completionq_ctx_t *); 1394 1395 extern boolean_t mlxcx_cmd_create_rq(mlxcx_t *, mlxcx_work_queue_t *); 1396 extern boolean_t mlxcx_cmd_start_rq(mlxcx_t *, mlxcx_work_queue_t *); 1397 extern boolean_t mlxcx_cmd_stop_rq(mlxcx_t *, mlxcx_work_queue_t *); 1398 extern boolean_t mlxcx_cmd_destroy_rq(mlxcx_t *, mlxcx_work_queue_t *); 1399 extern boolean_t mlxcx_cmd_query_rq(mlxcx_t *, mlxcx_work_queue_t *, 1400 mlxcx_rq_ctx_t *); 1401 1402 extern boolean_t mlxcx_cmd_create_tir(mlxcx_t *, mlxcx_tir_t *); 1403 extern boolean_t mlxcx_cmd_destroy_tir(mlxcx_t *, mlxcx_tir_t *); 1404 1405 extern boolean_t mlxcx_cmd_create_sq(mlxcx_t *, mlxcx_work_queue_t *); 1406 extern boolean_t mlxcx_cmd_start_sq(mlxcx_t *, mlxcx_work_queue_t *); 1407 extern boolean_t mlxcx_cmd_stop_sq(mlxcx_t *, mlxcx_work_queue_t *); 1408 extern boolean_t mlxcx_cmd_destroy_sq(mlxcx_t *, mlxcx_work_queue_t *); 1409 extern boolean_t mlxcx_cmd_query_sq(mlxcx_t *, mlxcx_work_queue_t *, 1410 mlxcx_sq_ctx_t *); 1411 1412 extern boolean_t mlxcx_cmd_create_tis(mlxcx_t *, mlxcx_tis_t *); 1413 extern boolean_t mlxcx_cmd_destroy_tis(mlxcx_t *, mlxcx_tis_t *); 1414 1415 extern boolean_t mlxcx_cmd_query_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *); 1416 extern boolean_t mlxcx_cmd_query_special_ctxs(mlxcx_t *); 1417 1418 extern boolean_t mlxcx_cmd_modify_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *, 1419 mlxcx_modify_nic_vport_ctx_fields_t); 1420 1421 extern boolean_t mlxcx_cmd_create_flow_table(mlxcx_t *, mlxcx_flow_table_t *); 1422 extern boolean_t mlxcx_cmd_destroy_flow_table(mlxcx_t *, mlxcx_flow_table_t *); 1423 extern boolean_t mlxcx_cmd_set_flow_table_root(mlxcx_t *, mlxcx_flow_table_t *); 1424 1425 extern boolean_t mlxcx_cmd_create_flow_group(mlxcx_t *, mlxcx_flow_group_t *); 1426 extern boolean_t mlxcx_cmd_set_flow_table_entry(mlxcx_t *, 1427 mlxcx_flow_entry_t *); 1428 extern boolean_t mlxcx_cmd_delete_flow_table_entry(mlxcx_t *, 1429 mlxcx_flow_entry_t *); 1430 extern boolean_t mlxcx_cmd_destroy_flow_group(mlxcx_t *, mlxcx_flow_group_t *); 1431 1432 extern boolean_t mlxcx_cmd_access_register(mlxcx_t *, mlxcx_cmd_reg_opmod_t, 1433 mlxcx_register_id_t, mlxcx_register_data_t *); 1434 extern boolean_t mlxcx_cmd_query_port_mtu(mlxcx_t *, mlxcx_port_t *); 1435 extern boolean_t mlxcx_cmd_query_port_status(mlxcx_t *, mlxcx_port_t *); 1436 extern boolean_t mlxcx_cmd_modify_port_status(mlxcx_t *, mlxcx_port_t *, 1437 mlxcx_port_status_t); 1438 extern boolean_t mlxcx_cmd_query_port_speed(mlxcx_t *, mlxcx_port_t *); 1439 extern boolean_t mlxcx_cmd_query_port_fec(mlxcx_t *, mlxcx_port_t *); 1440 extern boolean_t mlxcx_cmd_modify_port_fec(mlxcx_t *, mlxcx_port_t *, 1441 mlxcx_pplm_fec_caps_t); 1442 1443 extern boolean_t mlxcx_cmd_set_port_mtu(mlxcx_t *, mlxcx_port_t *); 1444 1445 extern boolean_t mlxcx_cmd_create_rqt(mlxcx_t *, mlxcx_rqtable_t *); 1446 extern boolean_t mlxcx_cmd_destroy_rqt(mlxcx_t *, mlxcx_rqtable_t *); 1447 1448 extern boolean_t mlxcx_cmd_set_int_mod(mlxcx_t *, uint_t, uint_t); 1449 1450 extern boolean_t mlxcx_cmd_query_module_status(mlxcx_t *, uint_t, 1451 mlxcx_module_status_t *, mlxcx_module_error_type_t *); 1452 extern boolean_t mlxcx_cmd_set_port_led(mlxcx_t *, mlxcx_port_t *, uint16_t); 1453 1454 /* Comparator for avl_ts */ 1455 extern int mlxcx_cq_compare(const void *, const void *); 1456 extern int mlxcx_dmac_fe_compare(const void *, const void *); 1457 extern int mlxcx_grmac_compare(const void *, const void *); 1458 extern int mlxcx_page_compare(const void *, const void *); 1459 1460 extern void mlxcx_update_link_state(mlxcx_t *, mlxcx_port_t *); 1461 1462 extern void mlxcx_eth_proto_to_string(mlxcx_eth_proto_t, char *, size_t); 1463 extern const char *mlxcx_port_status_string(mlxcx_port_status_t); 1464 1465 extern const char *mlxcx_event_name(mlxcx_event_t); 1466 1467 /* 1468 * Sensor Functions 1469 */ 1470 extern boolean_t mlxcx_setup_sensors(mlxcx_t *); 1471 extern void mlxcx_teardown_sensors(mlxcx_t *); 1472 1473 #ifdef __cplusplus 1474 } 1475 #endif 1476 1477 #endif /* _MLXCX_H */ 1478