1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * An implementation of the IPoIB standard based on PSARC 2001/289. 29 */ 30 31 #include <sys/types.h> 32 #include <sys/conf.h> 33 #include <sys/ddi.h> 34 #include <sys/sunddi.h> 35 #include <sys/modctl.h> 36 #include <sys/stropts.h> 37 #include <sys/stream.h> 38 #include <sys/strsun.h> 39 #include <sys/strsubr.h> 40 #include <sys/dlpi.h> 41 #include <sys/mac_provider.h> 42 43 #include <sys/pattr.h> /* for HCK_FULLCKSUM */ 44 #include <sys/sysmacros.h> /* for offsetof */ 45 #include <sys/disp.h> /* for async thread pri */ 46 #include <sys/atomic.h> /* for atomic_add*() */ 47 #include <sys/ethernet.h> /* for ETHERTYPE_IPV6 */ 48 #include <netinet/in.h> /* for netinet/ip.h below */ 49 #include <netinet/ip.h> /* for struct ip */ 50 #include <netinet/udp.h> /* for struct udphdr */ 51 #include <inet/common.h> /* for inet/ip.h below */ 52 #include <inet/ip.h> /* for ipha_t */ 53 #include <inet/ip6.h> /* for ip6_t */ 54 #include <inet/tcp.h> /* for tcph_t */ 55 #include <netinet/icmp6.h> /* for icmp6_t */ 56 #include <sys/callb.h> 57 #include <sys/modhash.h> 58 59 #include <sys/ib/clients/ibd/ibd.h> 60 #include <sys/ib/mgt/sm_attr.h> /* for SM_INIT_TYPE_* */ 61 #include <sys/note.h> 62 #include <sys/multidata.h> 63 64 #include <sys/ib/mgt/ibmf/ibmf.h> /* for ibd_get_portspeed */ 65 66 /* 67 * Per-interface tunables (for developers) 68 * 69 * ibd_tx_copy_thresh 70 * This sets the threshold at which ibd will attempt to do a bcopy of the 71 * outgoing data into a pre-mapped buffer. The IPoIB driver's send behavior 72 * is restricted by various parameters, so setting of this value must be 73 * made after careful considerations only. For instance, IB HCAs currently 74 * impose a relatively small limit (when compared to ethernet NICs) on the 75 * length of the SGL for transmit. On the other hand, the ip stack could 76 * send down mp chains that are quite long when LSO is enabled. 77 * 78 * ibd_num_swqe 79 * Number of "send WQE" elements that will be allocated and used by ibd. 80 * When tuning this parameter, the size of pre-allocated, pre-mapped copy 81 * buffer in each of these send wqes must be taken into account. This 82 * copy buffer size is determined by the value of IBD_TX_BUF_SZ (this is 83 * currently set to the same value of ibd_tx_copy_thresh, but may be 84 * changed independently if needed). 85 * 86 * ibd_num_rwqe 87 * Number of "receive WQE" elements that will be allocated and used by 88 * ibd. This parameter is limited by the maximum channel size of the HCA. 89 * Each buffer in the receive wqe will be of MTU size. 90 * 91 * ibd_num_lso_bufs 92 * Number of "larger-than-MTU" copy buffers to use for cases when the 93 * outgoing mblk chain is too fragmented to be used with ibt_map_mem_iov() 94 * and too large to be used with regular MTU-sized copy buffers. It is 95 * not recommended to tune this variable without understanding the 96 * application environment and/or memory resources. The size of each of 97 * these lso buffers is determined by the value of IBD_LSO_BUFSZ. 98 * 99 * ibd_num_ah 100 * Number of AH cache entries to allocate 101 * 102 * ibd_hash_size 103 * Hash table size for the active AH list 104 * 105 * ibd_tx_softintr 106 * ibd_rx_softintr 107 * The softintr mechanism allows ibd to avoid event queue overflows if 108 * the receive/completion handlers are to be expensive. These are enabled 109 * by default. 110 * 111 * ibd_log_sz 112 * This specifies the size of the ibd log buffer in bytes. The buffer is 113 * allocated and logging is enabled only when IBD_LOGGING is defined. 114 * 115 */ 116 uint_t ibd_tx_copy_thresh = 0x1000; 117 uint_t ibd_num_swqe = 4000; 118 uint_t ibd_num_rwqe = 4000; 119 uint_t ibd_num_lso_bufs = 0x400; 120 uint_t ibd_num_ah = 256; 121 uint_t ibd_hash_size = 32; 122 uint_t ibd_rx_softintr = 1; 123 uint_t ibd_tx_softintr = 1; 124 uint_t ibd_create_broadcast_group = 1; 125 #ifdef IBD_LOGGING 126 uint_t ibd_log_sz = 0x20000; 127 #endif 128 129 #define IBD_TX_COPY_THRESH ibd_tx_copy_thresh 130 #define IBD_TX_BUF_SZ ibd_tx_copy_thresh 131 #define IBD_NUM_SWQE ibd_num_swqe 132 #define IBD_NUM_RWQE ibd_num_rwqe 133 #define IBD_NUM_LSO_BUFS ibd_num_lso_bufs 134 #define IBD_NUM_AH ibd_num_ah 135 #define IBD_HASH_SIZE ibd_hash_size 136 #ifdef IBD_LOGGING 137 #define IBD_LOG_SZ ibd_log_sz 138 #endif 139 140 /* 141 * ibd_rc_tx_copy_thresh 142 * This sets the threshold upto which ibd will attempt to do a bcopy of the 143 * outgoing data into a pre-mapped buffer. 144 */ 145 uint_t ibd_rc_tx_copy_thresh = 0x1000; 146 147 /* 148 * Receive CQ moderation parameters: tunable (for developers) 149 */ 150 uint_t ibd_rxcomp_count = 4; 151 uint_t ibd_rxcomp_usec = 10; 152 153 /* 154 * Send CQ moderation parameters: tunable (for developers) 155 */ 156 uint_t ibd_txcomp_count = 16; 157 uint_t ibd_txcomp_usec = 300; 158 159 /* Post IBD_RX_POST_CNT receive work requests at a time. */ 160 #define IBD_RX_POST_CNT 8 161 162 /* Hash into 1 << IBD_LOG_RX_POST number of rx post queues */ 163 #define IBD_LOG_RX_POST 4 164 165 /* Minimum number of receive work requests driver needs to always have */ 166 #define IBD_RWQE_MIN ((IBD_RX_POST_CNT << IBD_LOG_RX_POST) * 4) 167 168 /* 169 * LSO parameters 170 */ 171 #define IBD_LSO_MAXLEN 65536 172 #define IBD_LSO_BUFSZ 8192 173 #define IBD_PROP_LSO_POLICY "lso-policy" 174 175 /* 176 * Async operation states 177 */ 178 #define IBD_OP_NOTSTARTED 0 179 #define IBD_OP_ONGOING 1 180 #define IBD_OP_COMPLETED 2 181 #define IBD_OP_ERRORED 3 182 #define IBD_OP_ROUTERED 4 183 184 /* 185 * State of IBD driver initialization during attach/m_start 186 */ 187 #define IBD_DRV_STATE_INITIALIZED 0x00001 188 #define IBD_DRV_RXINTR_ADDED 0x00002 189 #define IBD_DRV_TXINTR_ADDED 0x00004 190 #define IBD_DRV_IBTL_ATTACH_DONE 0x00008 191 #define IBD_DRV_HCA_OPENED 0x00010 192 #define IBD_DRV_PD_ALLOCD 0x00020 193 #define IBD_DRV_MAC_REGISTERED 0x00040 194 #define IBD_DRV_PORT_DETAILS_OBTAINED 0x00080 195 #define IBD_DRV_BCAST_GROUP_FOUND 0x00100 196 #define IBD_DRV_ACACHE_INITIALIZED 0x00200 197 #define IBD_DRV_CQS_ALLOCD 0x00400 198 #define IBD_DRV_UD_CHANNEL_SETUP 0x00800 199 #define IBD_DRV_TXLIST_ALLOCD 0x01000 200 #define IBD_DRV_SCQ_NOTIFY_ENABLED 0x02000 201 #define IBD_DRV_RXLIST_ALLOCD 0x04000 202 #define IBD_DRV_BCAST_GROUP_JOINED 0x08000 203 #define IBD_DRV_ASYNC_THR_CREATED 0x10000 204 #define IBD_DRV_RCQ_NOTIFY_ENABLED 0x20000 205 #define IBD_DRV_SM_NOTICES_REGISTERED 0x40000 206 #define IBD_DRV_STARTED 0x80000 207 #define IBD_DRV_RC_SRQ_ALLOCD 0x100000 208 #define IBD_DRV_RC_LARGEBUF_ALLOCD 0x200000 209 #define IBD_DRV_RC_LISTEN 0x400000 210 #ifdef DEBUG 211 #define IBD_DRV_RC_PRIVATE_STATE 0x800000 212 #endif 213 214 /* 215 * Start/stop in-progress flags; note that restart must always remain 216 * the OR of start and stop flag values. 217 */ 218 #define IBD_DRV_START_IN_PROGRESS 0x10000000 219 #define IBD_DRV_STOP_IN_PROGRESS 0x20000000 220 #define IBD_DRV_RESTART_IN_PROGRESS 0x30000000 221 222 /* 223 * Miscellaneous constants 224 */ 225 #define IB_MGID_IPV4_LOWGRP_MASK 0xFFFFFFFF 226 #define IBD_DEF_MAX_SDU 2044 227 #define IBD_DEFAULT_QKEY 0xB1B 228 #ifdef IBD_LOGGING 229 #define IBD_DMAX_LINE 100 230 #endif 231 232 /* 233 * Enumerations for link states 234 */ 235 typedef enum { 236 IBD_LINK_DOWN, 237 IBD_LINK_UP, 238 IBD_LINK_UP_ABSENT 239 } ibd_link_op_t; 240 241 /* 242 * Driver State Pointer 243 */ 244 void *ibd_list; 245 246 /* 247 * Driver Global Data 248 */ 249 ibd_global_state_t ibd_gstate; 250 251 /* 252 * Logging 253 */ 254 #ifdef IBD_LOGGING 255 kmutex_t ibd_lbuf_lock; 256 uint8_t *ibd_lbuf; 257 uint32_t ibd_lbuf_ndx; 258 #endif 259 260 /* 261 * Required system entry points 262 */ 263 static int ibd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 264 static int ibd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 265 266 /* 267 * Required driver entry points for GLDv3 268 */ 269 static int ibd_m_stat(void *, uint_t, uint64_t *); 270 static int ibd_m_start(void *); 271 static void ibd_m_stop(void *); 272 static int ibd_m_promisc(void *, boolean_t); 273 static int ibd_m_multicst(void *, boolean_t, const uint8_t *); 274 static int ibd_m_unicst(void *, const uint8_t *); 275 static mblk_t *ibd_m_tx(void *, mblk_t *); 276 static boolean_t ibd_m_getcapab(void *, mac_capab_t, void *); 277 278 /* 279 * Private driver entry points for GLDv3 280 */ 281 282 /* 283 * Initialization 284 */ 285 static int ibd_state_init(ibd_state_t *, dev_info_t *); 286 static int ibd_init_txlist(ibd_state_t *); 287 static int ibd_init_rxlist(ibd_state_t *); 288 static int ibd_acache_init(ibd_state_t *); 289 #ifdef IBD_LOGGING 290 static void ibd_log_init(void); 291 #endif 292 293 /* 294 * Termination/cleanup 295 */ 296 static void ibd_state_fini(ibd_state_t *); 297 static void ibd_fini_txlist(ibd_state_t *); 298 static void ibd_fini_rxlist(ibd_state_t *); 299 static void ibd_tx_cleanup(ibd_state_t *, ibd_swqe_t *); 300 static void ibd_tx_cleanup_list(ibd_state_t *, ibd_swqe_t *, ibd_swqe_t *); 301 static void ibd_acache_fini(ibd_state_t *); 302 #ifdef IBD_LOGGING 303 static void ibd_log_fini(void); 304 #endif 305 306 /* 307 * Allocation/acquire/map routines 308 */ 309 static int ibd_alloc_tx_copybufs(ibd_state_t *); 310 static int ibd_alloc_rx_copybufs(ibd_state_t *); 311 static int ibd_alloc_tx_lsobufs(ibd_state_t *); 312 static ibd_swqe_t *ibd_acquire_swqe(ibd_state_t *); 313 static int ibd_acquire_lsobufs(ibd_state_t *, uint_t, ibt_wr_ds_t *, 314 uint32_t *); 315 316 /* 317 * Free/release/unmap routines 318 */ 319 static void ibd_free_rwqe(ibd_state_t *, ibd_rwqe_t *); 320 static void ibd_free_tx_copybufs(ibd_state_t *); 321 static void ibd_free_rx_copybufs(ibd_state_t *); 322 static void ibd_free_rx_rsrcs(ibd_state_t *); 323 static void ibd_free_tx_lsobufs(ibd_state_t *); 324 static void ibd_release_swqe(ibd_state_t *, ibd_swqe_t *, ibd_swqe_t *, int); 325 static void ibd_release_lsobufs(ibd_state_t *, ibt_wr_ds_t *, uint32_t); 326 static void ibd_free_lsohdr(ibd_swqe_t *, mblk_t *); 327 328 /* 329 * Handlers/callback routines 330 */ 331 static uint_t ibd_intr(caddr_t); 332 static uint_t ibd_tx_recycle(caddr_t); 333 static void ibd_rcq_handler(ibt_cq_hdl_t, void *); 334 static void ibd_scq_handler(ibt_cq_hdl_t, void *); 335 static void ibd_poll_rcq(ibd_state_t *, ibt_cq_hdl_t); 336 static void ibd_poll_scq(ibd_state_t *, ibt_cq_hdl_t); 337 static void ibd_drain_rcq(ibd_state_t *, ibt_cq_hdl_t); 338 static void ibd_drain_scq(ibd_state_t *, ibt_cq_hdl_t); 339 static void ibd_freemsg_cb(char *); 340 static void ibd_async_handler(void *, ibt_hca_hdl_t, ibt_async_code_t, 341 ibt_async_event_t *); 342 static void ibd_snet_notices_handler(void *, ib_gid_t, 343 ibt_subnet_event_code_t, ibt_subnet_event_t *); 344 345 /* 346 * Send/receive routines 347 */ 348 static boolean_t ibd_send(ibd_state_t *, mblk_t *); 349 static void ibd_post_send(ibd_state_t *, ibd_swqe_t *); 350 static void ibd_post_recv(ibd_state_t *, ibd_rwqe_t *); 351 static mblk_t *ibd_process_rx(ibd_state_t *, ibd_rwqe_t *, ibt_wc_t *); 352 353 /* 354 * Threads 355 */ 356 static void ibd_async_work(ibd_state_t *); 357 358 /* 359 * Async tasks 360 */ 361 static void ibd_async_acache(ibd_state_t *, ipoib_mac_t *); 362 static void ibd_async_multicast(ibd_state_t *, ib_gid_t, int); 363 static void ibd_async_setprom(ibd_state_t *); 364 static void ibd_async_unsetprom(ibd_state_t *); 365 static void ibd_async_reap_group(ibd_state_t *, ibd_mce_t *, ib_gid_t, uint8_t); 366 static void ibd_async_trap(ibd_state_t *, ibd_req_t *); 367 static void ibd_async_txsched(ibd_state_t *); 368 static void ibd_async_link(ibd_state_t *, ibd_req_t *); 369 370 /* 371 * Async task helpers 372 */ 373 static ibd_mce_t *ibd_async_mcache(ibd_state_t *, ipoib_mac_t *, boolean_t *); 374 static ibd_mce_t *ibd_join_group(ibd_state_t *, ib_gid_t, uint8_t); 375 static ibd_mce_t *ibd_mcache_find(ib_gid_t, struct list *); 376 static boolean_t ibd_get_allroutergroup(ibd_state_t *, 377 ipoib_mac_t *, ipoib_mac_t *); 378 static void ibd_leave_group(ibd_state_t *, ib_gid_t, uint8_t); 379 static void ibd_reacquire_group(ibd_state_t *, ibd_mce_t *); 380 static ibt_status_t ibd_iba_join(ibd_state_t *, ib_gid_t, ibd_mce_t *); 381 static ibt_status_t ibd_find_bgroup(ibd_state_t *); 382 static void ibd_n2h_gid(ipoib_mac_t *, ib_gid_t *); 383 static void ibd_h2n_mac(ipoib_mac_t *, ib_qpn_t, ib_sn_prefix_t, ib_guid_t); 384 static uint64_t ibd_get_portspeed(ibd_state_t *); 385 static boolean_t ibd_async_safe(ibd_state_t *); 386 static void ibd_async_done(ibd_state_t *); 387 static ibd_ace_t *ibd_acache_lookup(ibd_state_t *, ipoib_mac_t *, int *, int); 388 static ibd_ace_t *ibd_acache_get_unref(ibd_state_t *); 389 static void ibd_link_mod(ibd_state_t *, ibt_async_code_t); 390 static int ibd_locate_pkey(ib_pkey_t *, uint16_t, ib_pkey_t, uint16_t *); 391 392 /* 393 * Helpers for attach/start routines 394 */ 395 static int ibd_register_mac(ibd_state_t *, dev_info_t *); 396 static int ibd_record_capab(ibd_state_t *, dev_info_t *); 397 static int ibd_unattach(ibd_state_t *, dev_info_t *); 398 static int ibd_get_port_details(ibd_state_t *); 399 static int ibd_alloc_cqs(ibd_state_t *); 400 static int ibd_setup_ud_channel(ibd_state_t *); 401 static int ibd_start(ibd_state_t *); 402 static int ibd_undo_start(ibd_state_t *, link_state_t); 403 static void ibd_set_mac_progress(ibd_state_t *, uint_t); 404 static void ibd_clr_mac_progress(ibd_state_t *, uint_t); 405 406 407 /* 408 * Miscellaneous helpers 409 */ 410 static int ibd_sched_poll(ibd_state_t *, int, int); 411 static void ibd_resume_transmission(ibd_state_t *); 412 static int ibd_setup_lso(ibd_swqe_t *, mblk_t *, uint32_t, ibt_ud_dest_hdl_t); 413 static int ibd_prepare_sgl(ibd_state_t *, mblk_t *, ibd_swqe_t *, uint_t); 414 static void *list_get_head(list_t *); 415 static int ibd_hash_key_cmp(mod_hash_key_t, mod_hash_key_t); 416 static uint_t ibd_hash_by_id(void *, mod_hash_key_t); 417 #ifdef IBD_LOGGING 418 static void ibd_log(const char *, ...); 419 #endif 420 421 DDI_DEFINE_STREAM_OPS(ibd_dev_ops, nulldev, nulldev, ibd_attach, ibd_detach, 422 nodev, NULL, D_MP, NULL, ddi_quiesce_not_needed); 423 424 /* Module Driver Info */ 425 static struct modldrv ibd_modldrv = { 426 &mod_driverops, /* This one is a driver */ 427 "InfiniBand GLDv3 Driver", /* short description */ 428 &ibd_dev_ops /* driver specific ops */ 429 }; 430 431 /* Module Linkage */ 432 static struct modlinkage ibd_modlinkage = { 433 MODREV_1, (void *)&ibd_modldrv, NULL 434 }; 435 436 /* 437 * Module (static) info passed to IBTL during ibt_attach 438 */ 439 static struct ibt_clnt_modinfo_s ibd_clnt_modinfo = { 440 IBTI_V_CURR, 441 IBT_NETWORK, 442 ibd_async_handler, 443 NULL, 444 "IPIB" 445 }; 446 447 /* 448 * GLDv3 entry points 449 */ 450 #define IBD_M_CALLBACK_FLAGS (MC_GETCAPAB) 451 static mac_callbacks_t ibd_m_callbacks = { 452 IBD_M_CALLBACK_FLAGS, 453 ibd_m_stat, 454 ibd_m_start, 455 ibd_m_stop, 456 ibd_m_promisc, 457 ibd_m_multicst, 458 ibd_m_unicst, 459 ibd_m_tx, 460 NULL, 461 NULL, 462 ibd_m_getcapab 463 }; 464 465 /* 466 * Fill/clear <scope> and <p_key> in multicast/broadcast address 467 */ 468 #define IBD_FILL_SCOPE_PKEY(maddr, scope, pkey) \ 469 { \ 470 *(uint32_t *)((char *)(maddr) + 4) |= \ 471 htonl((uint32_t)(scope) << 16); \ 472 *(uint32_t *)((char *)(maddr) + 8) |= \ 473 htonl((uint32_t)(pkey) << 16); \ 474 } 475 476 #define IBD_CLEAR_SCOPE_PKEY(maddr) \ 477 { \ 478 *(uint32_t *)((char *)(maddr) + 4) &= \ 479 htonl(~((uint32_t)0xF << 16)); \ 480 *(uint32_t *)((char *)(maddr) + 8) &= \ 481 htonl(~((uint32_t)0xFFFF << 16)); \ 482 } 483 484 /* 485 * Rudimentary debugging support 486 */ 487 #ifdef DEBUG 488 int ibd_debuglevel = 100; 489 void 490 debug_print(int l, char *fmt, ...) 491 { 492 va_list ap; 493 494 if (l < ibd_debuglevel) 495 return; 496 va_start(ap, fmt); 497 vcmn_err(CE_CONT, fmt, ap); 498 va_end(ap); 499 } 500 #endif 501 502 /* 503 * Common routine to print warning messages; adds in hca guid, port number 504 * and pkey to be able to identify the IBA interface. 505 */ 506 void 507 ibd_print_warn(ibd_state_t *state, char *fmt, ...) 508 { 509 ib_guid_t hca_guid; 510 char ibd_print_buf[256]; 511 int len; 512 va_list ap; 513 514 hca_guid = ddi_prop_get_int64(DDI_DEV_T_ANY, state->id_dip, 515 0, "hca-guid", 0); 516 len = snprintf(ibd_print_buf, sizeof (ibd_print_buf), 517 "%s%d: HCA GUID %016llx port %d PKEY %02x ", 518 ddi_driver_name(state->id_dip), ddi_get_instance(state->id_dip), 519 (u_longlong_t)hca_guid, state->id_port, state->id_pkey); 520 va_start(ap, fmt); 521 (void) vsnprintf(ibd_print_buf + len, sizeof (ibd_print_buf) - len, 522 fmt, ap); 523 cmn_err(CE_NOTE, "!%s", ibd_print_buf); 524 va_end(ap); 525 } 526 527 /* 528 * Warlock directives 529 */ 530 531 /* 532 * id_lso_lock 533 * 534 * state->id_lso->bkt_nfree may be accessed without a lock to 535 * determine the threshold at which we have to ask the nw layer 536 * to resume transmission (see ibd_resume_transmission()). 537 */ 538 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_lso_lock, 539 ibd_state_t::id_lso)) 540 _NOTE(DATA_READABLE_WITHOUT_LOCK(ibd_state_t::id_lso)) 541 _NOTE(SCHEME_PROTECTS_DATA("init", ibd_state_t::id_lso_policy)) 542 _NOTE(DATA_READABLE_WITHOUT_LOCK(ibd_lsobkt_t::bkt_nfree)) 543 544 /* 545 * id_scq_poll_lock 546 */ 547 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_scq_poll_lock, 548 ibd_state_t::id_scq_poll_busy)) 549 550 /* 551 * id_txpost_lock 552 */ 553 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_txpost_lock, 554 ibd_state_t::id_tx_head)) 555 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_txpost_lock, 556 ibd_state_t::id_tx_busy)) 557 558 /* 559 * id_acache_req_lock 560 */ 561 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_acache_req_lock, 562 ibd_state_t::id_acache_req_cv)) 563 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_acache_req_lock, 564 ibd_state_t::id_req_list)) 565 _NOTE(SCHEME_PROTECTS_DATA("atomic", 566 ibd_acache_s::ac_ref)) 567 568 /* 569 * id_ac_mutex 570 * 571 * This mutex is actually supposed to protect id_ah_op as well, 572 * but this path of the code isn't clean (see update of id_ah_op 573 * in ibd_async_acache(), immediately after the call to 574 * ibd_async_mcache()). For now, we'll skip this check by 575 * declaring that id_ah_op is protected by some internal scheme 576 * that warlock isn't aware of. 577 */ 578 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_ac_mutex, 579 ibd_state_t::id_ah_active)) 580 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_ac_mutex, 581 ibd_state_t::id_ah_free)) 582 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_ac_mutex, 583 ibd_state_t::id_ah_addr)) 584 _NOTE(SCHEME_PROTECTS_DATA("ac mutex should protect this", 585 ibd_state_t::id_ah_op)) 586 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_ac_mutex, 587 ibd_state_t::id_ah_error)) 588 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_ac_mutex, 589 ibd_state_t::id_ac_hot_ace)) 590 _NOTE(DATA_READABLE_WITHOUT_LOCK(ibd_state_t::id_ah_error)) 591 592 /* 593 * id_mc_mutex 594 */ 595 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_mc_mutex, 596 ibd_state_t::id_mc_full)) 597 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_mc_mutex, 598 ibd_state_t::id_mc_non)) 599 600 /* 601 * id_trap_lock 602 */ 603 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_trap_lock, 604 ibd_state_t::id_trap_cv)) 605 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_trap_lock, 606 ibd_state_t::id_trap_stop)) 607 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_trap_lock, 608 ibd_state_t::id_trap_inprog)) 609 610 /* 611 * id_prom_op 612 */ 613 _NOTE(SCHEME_PROTECTS_DATA("only by async thread", 614 ibd_state_t::id_prom_op)) 615 616 /* 617 * id_sched_lock 618 */ 619 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_sched_lock, 620 ibd_state_t::id_sched_needed)) 621 622 /* 623 * id_link_mutex 624 */ 625 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_link_mutex, 626 ibd_state_t::id_link_state)) 627 _NOTE(DATA_READABLE_WITHOUT_LOCK(ibd_state_t::id_link_state)) 628 _NOTE(SCHEME_PROTECTS_DATA("only async thr and ibd_m_start", 629 ibd_state_t::id_link_speed)) 630 _NOTE(DATA_READABLE_WITHOUT_LOCK(ibd_state_t::id_sgid)) 631 632 /* 633 * id_tx_list.dl_mutex 634 */ 635 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_tx_list.dl_mutex, 636 ibd_state_t::id_tx_list.dl_head)) 637 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_tx_list.dl_mutex, 638 ibd_state_t::id_tx_list.dl_pending_sends)) 639 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_t::id_tx_list.dl_mutex, 640 ibd_state_t::id_tx_list.dl_cnt)) 641 642 /* 643 * id_rx_list.dl_mutex 644 */ 645 _NOTE(SCHEME_PROTECTS_DATA("atomic or dl mutex or single thr", 646 ibd_state_t::id_rx_list.dl_bufs_outstanding)) 647 _NOTE(SCHEME_PROTECTS_DATA("atomic or dl mutex or single thr", 648 ibd_state_t::id_rx_list.dl_cnt)) 649 650 651 /* 652 * Items protected by atomic updates 653 */ 654 _NOTE(SCHEME_PROTECTS_DATA("atomic update only", 655 ibd_state_s::id_brd_rcv 656 ibd_state_s::id_brd_xmt 657 ibd_state_s::id_multi_rcv 658 ibd_state_s::id_multi_xmt 659 ibd_state_s::id_num_intrs 660 ibd_state_s::id_rcv_bytes 661 ibd_state_s::id_rcv_pkt 662 ibd_state_s::id_rx_post_queue_index 663 ibd_state_s::id_tx_short 664 ibd_state_s::id_xmt_bytes 665 ibd_state_s::id_xmt_pkt 666 ibd_state_s::rc_rcv_trans_byte 667 ibd_state_s::rc_rcv_trans_pkt 668 ibd_state_s::rc_rcv_copy_byte 669 ibd_state_s::rc_rcv_copy_pkt 670 ibd_state_s::rc_xmt_bytes 671 ibd_state_s::rc_xmt_small_pkt 672 ibd_state_s::rc_xmt_fragmented_pkt 673 ibd_state_s::rc_xmt_map_fail_pkt 674 ibd_state_s::rc_xmt_map_succ_pkt)) 675 676 /* 677 * Non-mutex protection schemes for data elements. Almost all of 678 * these are non-shared items. 679 */ 680 _NOTE(SCHEME_PROTECTS_DATA("unshared or single-threaded", 681 callb_cpr 682 ib_gid_s 683 ib_header_info 684 ibd_acache_rq 685 ibd_acache_s::ac_mce 686 ibd_acache_s::ac_chan 687 ibd_mcache::mc_fullreap 688 ibd_mcache::mc_jstate 689 ibd_mcache::mc_req 690 ibd_rwqe_s 691 ibd_swqe_s 692 ibd_wqe_s 693 ibt_wr_ds_s::ds_va 694 ibt_wr_lso_s 695 ipoib_mac::ipoib_qpn 696 mac_capab_lso_s 697 msgb::b_next 698 msgb::b_cont 699 msgb::b_rptr 700 msgb::b_wptr 701 ibd_state_s::id_bgroup_created 702 ibd_state_s::id_mac_state 703 ibd_state_s::id_mtu 704 ibd_state_s::id_num_rwqe 705 ibd_state_s::id_num_swqe 706 ibd_state_s::id_qpnum 707 ibd_state_s::id_rcq_hdl 708 ibd_state_s::id_rx_buf_sz 709 ibd_state_s::id_rx_bufs 710 ibd_state_s::id_rx_mr_hdl 711 ibd_state_s::id_rx_wqes 712 ibd_state_s::id_rxwcs 713 ibd_state_s::id_rxwcs_size 714 ibd_state_s::id_rx_nqueues 715 ibd_state_s::id_rx_queues 716 ibd_state_s::id_scope 717 ibd_state_s::id_scq_hdl 718 ibd_state_s::id_tx_buf_sz 719 ibd_state_s::id_tx_bufs 720 ibd_state_s::id_tx_mr_hdl 721 ibd_state_s::id_tx_rel_list.dl_cnt 722 ibd_state_s::id_tx_wqes 723 ibd_state_s::id_txwcs 724 ibd_state_s::id_txwcs_size 725 ibd_state_s::rc_listen_hdl 726 ibd_state_s::rc_listen_hdl_OFED_interop 727 ibd_state_s::rc_srq_size 728 ibd_state_s::rc_srq_rwqes 729 ibd_state_s::rc_srq_rx_bufs 730 ibd_state_s::rc_srq_rx_mr_hdl 731 ibd_state_s::rc_tx_largebuf_desc_base 732 ibd_state_s::rc_tx_mr_bufs 733 ibd_state_s::rc_tx_mr_hdl 734 ipha_s 735 icmph_s 736 ibt_path_info_s::pi_sid 737 ibd_rc_chan_s::ace 738 ibd_rc_chan_s::chan_hdl 739 ibd_rc_chan_s::state 740 ibd_rc_chan_s::chan_state 741 ibd_rc_chan_s::is_tx_chan 742 ibd_rc_chan_s::rcq_hdl 743 ibd_rc_chan_s::rcq_size 744 ibd_rc_chan_s::scq_hdl 745 ibd_rc_chan_s::scq_size 746 ibd_rc_chan_s::requester_gid 747 ibd_rc_chan_s::requester_pkey 748 ibd_rc_chan_s::rx_bufs 749 ibd_rc_chan_s::rx_mr_hdl 750 ibd_rc_chan_s::rx_rwqes 751 ibd_rc_chan_s::tx_wqes 752 ibd_rc_chan_s::tx_mr_bufs 753 ibd_rc_chan_s::tx_mr_hdl 754 ibd_rc_chan_s::tx_rel_list.dl_cnt 755 ibd_rc_chan_s::tx_trans_error_cnt 756 ibd_rc_tx_largebuf_s::lb_buf 757 ibd_rc_msg_hello_s 758 ibt_cm_return_args_s)) 759 760 /* 761 * ibd_rc_chan_s::next is protected by two mutexes: 762 * 1) ibd_state_s::rc_pass_chan_list.chan_list_mutex 763 * 2) ibd_state_s::rc_obs_act_chan_list.chan_list_mutex. 764 */ 765 _NOTE(SCHEME_PROTECTS_DATA("protected by two mutexes", 766 ibd_rc_chan_s::next)) 767 768 /* 769 * ibd_state_s.rc_tx_large_bufs_lock 770 */ 771 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_s::rc_tx_large_bufs_lock, 772 ibd_state_s::rc_tx_largebuf_free_head)) 773 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_s::rc_tx_large_bufs_lock, 774 ibd_state_s::rc_tx_largebuf_nfree)) 775 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_s::rc_tx_large_bufs_lock, 776 ibd_rc_tx_largebuf_s::lb_next)) 777 778 /* 779 * ibd_acache_s.tx_too_big_mutex 780 */ 781 _NOTE(MUTEX_PROTECTS_DATA(ibd_acache_s::tx_too_big_mutex, 782 ibd_acache_s::tx_too_big_ongoing)) 783 784 /* 785 * tx_wqe_list.dl_mutex 786 */ 787 _NOTE(MUTEX_PROTECTS_DATA(ibd_rc_chan_s::tx_wqe_list.dl_mutex, 788 ibd_rc_chan_s::tx_wqe_list.dl_head)) 789 _NOTE(MUTEX_PROTECTS_DATA(ibd_rc_chan_s::tx_wqe_list.dl_mutex, 790 ibd_rc_chan_s::tx_wqe_list.dl_pending_sends)) 791 _NOTE(MUTEX_PROTECTS_DATA(ibd_rc_chan_s::tx_wqe_list.dl_mutex, 792 ibd_rc_chan_s::tx_wqe_list.dl_cnt)) 793 794 /* 795 * ibd_state_s.rc_ace_recycle_lock 796 */ 797 _NOTE(MUTEX_PROTECTS_DATA(ibd_state_s::rc_ace_recycle_lock, 798 ibd_state_s::rc_ace_recycle)) 799 800 /* 801 * rc_srq_rwqe_list.dl_mutex 802 */ 803 _NOTE(SCHEME_PROTECTS_DATA("atomic or dl mutex or single thr", 804 ibd_state_t::rc_srq_rwqe_list.dl_bufs_outstanding)) 805 _NOTE(SCHEME_PROTECTS_DATA("atomic or dl mutex or single thr", 806 ibd_state_t::rc_srq_rwqe_list.dl_cnt)) 807 808 /* 809 * Non-mutex protection schemes for data elements. They are counters 810 * for problem diagnosis. Don't need be protected. 811 */ 812 _NOTE(SCHEME_PROTECTS_DATA("counters for problem diagnosis", 813 ibd_state_s::rc_rcv_alloc_fail 814 ibd_state_s::rc_rcq_invoke 815 ibd_state_s::rc_rcq_err 816 ibd_state_s::rc_ace_not_found 817 ibd_state_s::rc_xmt_drop_too_long_pkt 818 ibd_state_s::rc_xmt_icmp_too_long_pkt 819 ibd_state_s::rc_xmt_reenter_too_long_pkt 820 ibd_state_s::rc_swqe_short 821 ibd_state_s::rc_swqe_mac_update 822 ibd_state_s::rc_xmt_buf_short 823 ibd_state_s::rc_xmt_buf_mac_update 824 ibd_state_s::rc_scq_no_swqe 825 ibd_state_s::rc_scq_no_largebuf 826 ibd_state_s::rc_scq_invoke 827 ibd_state_s::rc_conn_succ 828 ibd_state_s::rc_conn_fail 829 ibd_state_s::rc_null_conn 830 ibd_state_s::rc_no_estab_conn 831 ibd_state_s::rc_act_close 832 ibd_state_s::rc_pas_close 833 ibd_state_s::rc_delay_ace_recycle 834 ibd_state_s::rc_act_close_simultaneous 835 ibd_state_s::rc_reset_cnt)) 836 837 #ifdef DEBUG 838 /* 839 * Non-mutex protection schemes for data elements. They are counters 840 * for problem diagnosis. Don't need be protected. 841 */ 842 _NOTE(SCHEME_PROTECTS_DATA("counters for problem diagnosis", 843 ibd_state_s::rc_rwqe_short 844 ibd_rc_stat_s::rc_rcv_trans_byte 845 ibd_rc_stat_s::rc_rcv_trans_pkt 846 ibd_rc_stat_s::rc_rcv_copy_byte 847 ibd_rc_stat_s::rc_rcv_copy_pkt 848 ibd_rc_stat_s::rc_rcv_alloc_fail 849 ibd_rc_stat_s::rc_rcq_invoke 850 ibd_rc_stat_s::rc_rcq_err 851 ibd_rc_stat_s::rc_scq_invoke 852 ibd_rc_stat_s::rc_rwqe_short 853 ibd_rc_stat_s::rc_xmt_bytes 854 ibd_rc_stat_s::rc_xmt_small_pkt 855 ibd_rc_stat_s::rc_xmt_fragmented_pkt 856 ibd_rc_stat_s::rc_xmt_map_fail_pkt 857 ibd_rc_stat_s::rc_xmt_map_succ_pkt 858 ibd_rc_stat_s::rc_ace_not_found 859 ibd_rc_stat_s::rc_scq_no_swqe 860 ibd_rc_stat_s::rc_scq_no_largebuf 861 ibd_rc_stat_s::rc_swqe_short 862 ibd_rc_stat_s::rc_swqe_mac_update 863 ibd_rc_stat_s::rc_xmt_buf_short 864 ibd_rc_stat_s::rc_xmt_buf_mac_update 865 ibd_rc_stat_s::rc_conn_succ 866 ibd_rc_stat_s::rc_conn_fail 867 ibd_rc_stat_s::rc_null_conn 868 ibd_rc_stat_s::rc_no_estab_conn 869 ibd_rc_stat_s::rc_act_close 870 ibd_rc_stat_s::rc_pas_close 871 ibd_rc_stat_s::rc_delay_ace_recycle 872 ibd_rc_stat_s::rc_act_close_simultaneous 873 ibd_rc_stat_s::rc_reset_cnt)) 874 #endif 875 876 int 877 _init() 878 { 879 int status; 880 881 status = ddi_soft_state_init(&ibd_list, max(sizeof (ibd_state_t), 882 PAGESIZE), 0); 883 if (status != 0) { 884 DPRINT(10, "_init:failed in ddi_soft_state_init()"); 885 return (status); 886 } 887 888 mac_init_ops(&ibd_dev_ops, "ibd"); 889 status = mod_install(&ibd_modlinkage); 890 if (status != 0) { 891 DPRINT(10, "_init:failed in mod_install()"); 892 ddi_soft_state_fini(&ibd_list); 893 mac_fini_ops(&ibd_dev_ops); 894 return (status); 895 } 896 897 mutex_init(&ibd_gstate.ig_mutex, NULL, MUTEX_DRIVER, NULL); 898 mutex_enter(&ibd_gstate.ig_mutex); 899 ibd_gstate.ig_ibt_hdl = NULL; 900 ibd_gstate.ig_ibt_hdl_ref_cnt = 0; 901 ibd_gstate.ig_service_list = NULL; 902 mutex_exit(&ibd_gstate.ig_mutex); 903 904 #ifdef IBD_LOGGING 905 ibd_log_init(); 906 #endif 907 return (0); 908 } 909 910 int 911 _info(struct modinfo *modinfop) 912 { 913 return (mod_info(&ibd_modlinkage, modinfop)); 914 } 915 916 int 917 _fini() 918 { 919 int status; 920 921 status = mod_remove(&ibd_modlinkage); 922 if (status != 0) 923 return (status); 924 925 mac_fini_ops(&ibd_dev_ops); 926 ddi_soft_state_fini(&ibd_list); 927 mutex_destroy(&ibd_gstate.ig_mutex); 928 #ifdef IBD_LOGGING 929 ibd_log_fini(); 930 #endif 931 return (0); 932 } 933 934 /* 935 * Convert the GID part of the mac address from network byte order 936 * to host order. 937 */ 938 static void 939 ibd_n2h_gid(ipoib_mac_t *mac, ib_gid_t *dgid) 940 { 941 ib_sn_prefix_t nbopref; 942 ib_guid_t nboguid; 943 944 bcopy(mac->ipoib_gidpref, &nbopref, sizeof (ib_sn_prefix_t)); 945 bcopy(mac->ipoib_gidsuff, &nboguid, sizeof (ib_guid_t)); 946 dgid->gid_prefix = b2h64(nbopref); 947 dgid->gid_guid = b2h64(nboguid); 948 } 949 950 /* 951 * Create the IPoIB address in network byte order from host order inputs. 952 */ 953 static void 954 ibd_h2n_mac(ipoib_mac_t *mac, ib_qpn_t qpn, ib_sn_prefix_t prefix, 955 ib_guid_t guid) 956 { 957 ib_sn_prefix_t nbopref; 958 ib_guid_t nboguid; 959 960 mac->ipoib_qpn = htonl(qpn); 961 nbopref = h2b64(prefix); 962 nboguid = h2b64(guid); 963 bcopy(&nbopref, mac->ipoib_gidpref, sizeof (ib_sn_prefix_t)); 964 bcopy(&nboguid, mac->ipoib_gidsuff, sizeof (ib_guid_t)); 965 } 966 967 /* 968 * Send to the appropriate all-routers group when the IBA multicast group 969 * does not exist, based on whether the target group is v4 or v6. 970 */ 971 static boolean_t 972 ibd_get_allroutergroup(ibd_state_t *state, ipoib_mac_t *mcmac, 973 ipoib_mac_t *rmac) 974 { 975 boolean_t retval = B_TRUE; 976 uint32_t adjscope = state->id_scope << 16; 977 uint32_t topword; 978 979 /* 980 * Copy the first 4 bytes in without assuming any alignment of 981 * input mac address; this will have IPoIB signature, flags and 982 * scope bits. 983 */ 984 bcopy(mcmac->ipoib_gidpref, &topword, sizeof (uint32_t)); 985 topword = ntohl(topword); 986 987 /* 988 * Generate proper address for IPv4/v6, adding in the Pkey properly. 989 */ 990 if ((topword == (IB_MCGID_IPV4_PREFIX | adjscope)) || 991 (topword == (IB_MCGID_IPV6_PREFIX | adjscope))) 992 ibd_h2n_mac(rmac, IB_MC_QPN, (((uint64_t)topword << 32) | 993 ((uint32_t)(state->id_pkey << 16))), 994 (INADDR_ALLRTRS_GROUP - INADDR_UNSPEC_GROUP)); 995 else 996 /* 997 * Does not have proper bits in the mgid address. 998 */ 999 retval = B_FALSE; 1000 1001 return (retval); 1002 } 1003 1004 /* 1005 * Membership states for different mcg's are tracked by two lists: 1006 * the "non" list is used for promiscuous mode, when all mcg traffic 1007 * needs to be inspected. This type of membership is never used for 1008 * transmission, so there can not be an AH in the active list 1009 * corresponding to a member in this list. This list does not need 1010 * any protection, since all operations are performed by the async 1011 * thread. 1012 * 1013 * "Full" and "SendOnly" membership is tracked using a single list, 1014 * the "full" list. This is because this single list can then be 1015 * searched during transmit to a multicast group (if an AH for the 1016 * mcg is not found in the active list), since at least one type 1017 * of membership must be present before initiating the transmit. 1018 * This list is also emptied during driver detach, since sendonly 1019 * membership acquired during transmit is dropped at detach time 1020 * along with ipv4 broadcast full membership. Insert/deletes to 1021 * this list are done only by the async thread, but it is also 1022 * searched in program context (see multicast disable case), thus 1023 * the id_mc_mutex protects the list. The driver detach path also 1024 * deconstructs the "full" list, but it ensures that the async 1025 * thread will not be accessing the list (by blocking out mcg 1026 * trap handling and making sure no more Tx reaping will happen). 1027 * 1028 * Currently, an IBA attach is done in the SendOnly case too, 1029 * although this is not required. 1030 */ 1031 #define IBD_MCACHE_INSERT_FULL(state, mce) \ 1032 list_insert_head(&state->id_mc_full, mce) 1033 #define IBD_MCACHE_INSERT_NON(state, mce) \ 1034 list_insert_head(&state->id_mc_non, mce) 1035 #define IBD_MCACHE_FIND_FULL(state, mgid) \ 1036 ibd_mcache_find(mgid, &state->id_mc_full) 1037 #define IBD_MCACHE_FIND_NON(state, mgid) \ 1038 ibd_mcache_find(mgid, &state->id_mc_non) 1039 #define IBD_MCACHE_PULLOUT_FULL(state, mce) \ 1040 list_remove(&state->id_mc_full, mce) 1041 #define IBD_MCACHE_PULLOUT_NON(state, mce) \ 1042 list_remove(&state->id_mc_non, mce) 1043 1044 static void * 1045 list_get_head(list_t *list) 1046 { 1047 list_node_t *lhead = list_head(list); 1048 1049 if (lhead != NULL) 1050 list_remove(list, lhead); 1051 return (lhead); 1052 } 1053 1054 /* 1055 * This is always guaranteed to be able to queue the work. 1056 */ 1057 void 1058 ibd_queue_work_slot(ibd_state_t *state, ibd_req_t *ptr, int op) 1059 { 1060 /* Initialize request */ 1061 DPRINT(1, "ibd_queue_work_slot : op: %d \n", op); 1062 ptr->rq_op = op; 1063 1064 /* 1065 * Queue provided slot onto request pool. 1066 */ 1067 mutex_enter(&state->id_acache_req_lock); 1068 list_insert_tail(&state->id_req_list, ptr); 1069 1070 /* Go, fetch, async thread */ 1071 cv_signal(&state->id_acache_req_cv); 1072 mutex_exit(&state->id_acache_req_lock); 1073 } 1074 1075 /* 1076 * Main body of the per interface async thread. 1077 */ 1078 static void 1079 ibd_async_work(ibd_state_t *state) 1080 { 1081 ibd_req_t *ptr; 1082 callb_cpr_t cprinfo; 1083 1084 mutex_enter(&state->id_acache_req_lock); 1085 CALLB_CPR_INIT(&cprinfo, &state->id_acache_req_lock, 1086 callb_generic_cpr, "ibd_async_work"); 1087 1088 for (;;) { 1089 ptr = list_get_head(&state->id_req_list); 1090 if (ptr != NULL) { 1091 mutex_exit(&state->id_acache_req_lock); 1092 1093 /* 1094 * Once we have done the operation, there is no 1095 * guarantee the request slot is going to be valid, 1096 * it might be freed up (as in IBD_ASYNC_LEAVE, REAP, 1097 * TRAP). 1098 * 1099 * Perform the request. 1100 */ 1101 switch (ptr->rq_op) { 1102 case IBD_ASYNC_GETAH: 1103 ibd_async_acache(state, &ptr->rq_mac); 1104 break; 1105 case IBD_ASYNC_JOIN: 1106 case IBD_ASYNC_LEAVE: 1107 ibd_async_multicast(state, 1108 ptr->rq_gid, ptr->rq_op); 1109 break; 1110 case IBD_ASYNC_PROMON: 1111 ibd_async_setprom(state); 1112 break; 1113 case IBD_ASYNC_PROMOFF: 1114 ibd_async_unsetprom(state); 1115 break; 1116 case IBD_ASYNC_REAP: 1117 ibd_async_reap_group(state, 1118 ptr->rq_ptr, ptr->rq_gid, 1119 IB_MC_JSTATE_FULL); 1120 /* 1121 * the req buf contains in mce 1122 * structure, so we do not need 1123 * to free it here. 1124 */ 1125 ptr = NULL; 1126 break; 1127 case IBD_ASYNC_TRAP: 1128 ibd_async_trap(state, ptr); 1129 break; 1130 case IBD_ASYNC_SCHED: 1131 ibd_async_txsched(state); 1132 break; 1133 case IBD_ASYNC_LINK: 1134 ibd_async_link(state, ptr); 1135 break; 1136 case IBD_ASYNC_EXIT: 1137 mutex_enter(&state->id_acache_req_lock); 1138 #ifndef __lock_lint 1139 CALLB_CPR_EXIT(&cprinfo); 1140 #else 1141 mutex_exit(&state->id_acache_req_lock); 1142 #endif 1143 return; 1144 case IBD_ASYNC_RC_TOO_BIG: 1145 ibd_async_rc_process_too_big(state, 1146 ptr); 1147 break; 1148 case IBD_ASYNC_RC_CLOSE_ACT_CHAN: 1149 ibd_async_rc_close_act_chan(state, ptr); 1150 break; 1151 case IBD_ASYNC_RC_RECYCLE_ACE: 1152 ibd_async_rc_recycle_ace(state, ptr); 1153 break; 1154 } 1155 if (ptr != NULL) 1156 kmem_cache_free(state->id_req_kmc, ptr); 1157 1158 mutex_enter(&state->id_acache_req_lock); 1159 } else { 1160 #ifndef __lock_lint 1161 /* 1162 * Nothing to do: wait till new request arrives. 1163 */ 1164 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1165 cv_wait(&state->id_acache_req_cv, 1166 &state->id_acache_req_lock); 1167 CALLB_CPR_SAFE_END(&cprinfo, 1168 &state->id_acache_req_lock); 1169 #endif 1170 } 1171 } 1172 1173 /*NOTREACHED*/ 1174 _NOTE(NOT_REACHED) 1175 } 1176 1177 /* 1178 * Return when it is safe to queue requests to the async daemon; primarily 1179 * for subnet trap and async event handling. Disallow requests before the 1180 * daemon is created, and when interface deinitilization starts. 1181 */ 1182 static boolean_t 1183 ibd_async_safe(ibd_state_t *state) 1184 { 1185 mutex_enter(&state->id_trap_lock); 1186 if (state->id_trap_stop) { 1187 mutex_exit(&state->id_trap_lock); 1188 return (B_FALSE); 1189 } 1190 state->id_trap_inprog++; 1191 mutex_exit(&state->id_trap_lock); 1192 return (B_TRUE); 1193 } 1194 1195 /* 1196 * Wake up ibd_m_stop() if the unplumb code is waiting for pending subnet 1197 * trap or event handling to complete to kill the async thread and deconstruct 1198 * the mcg/ace list. 1199 */ 1200 static void 1201 ibd_async_done(ibd_state_t *state) 1202 { 1203 mutex_enter(&state->id_trap_lock); 1204 if (--state->id_trap_inprog == 0) 1205 cv_signal(&state->id_trap_cv); 1206 mutex_exit(&state->id_trap_lock); 1207 } 1208 1209 /* 1210 * Hash functions: 1211 * ibd_hash_by_id: Returns the qpn as the hash entry into bucket. 1212 * ibd_hash_key_cmp: Compares two keys, return 0 on success or else 1. 1213 * These operate on mac addresses input into ibd_send, but there is no 1214 * guarantee on the alignment of the ipoib_mac_t structure. 1215 */ 1216 /*ARGSUSED*/ 1217 static uint_t 1218 ibd_hash_by_id(void *hash_data, mod_hash_key_t key) 1219 { 1220 ulong_t ptraddr = (ulong_t)key; 1221 uint_t hval; 1222 1223 /* 1224 * If the input address is 4 byte aligned, we can just dereference 1225 * it. This is most common, since IP will send in a 4 byte aligned 1226 * IP header, which implies the 24 byte IPoIB psuedo header will be 1227 * 4 byte aligned too. 1228 */ 1229 if ((ptraddr & 3) == 0) 1230 return ((uint_t)((ipoib_mac_t *)key)->ipoib_qpn); 1231 1232 bcopy(&(((ipoib_mac_t *)key)->ipoib_qpn), &hval, sizeof (uint_t)); 1233 return (hval); 1234 } 1235 1236 static int 1237 ibd_hash_key_cmp(mod_hash_key_t key1, mod_hash_key_t key2) 1238 { 1239 if (bcmp((char *)key1, (char *)key2, IPOIB_ADDRL) == 0) 1240 return (0); 1241 else 1242 return (1); 1243 } 1244 1245 /* 1246 * Initialize all the per interface caches and lists; AH cache, 1247 * MCG list etc. 1248 */ 1249 static int 1250 ibd_acache_init(ibd_state_t *state) 1251 { 1252 ibd_ace_t *ce; 1253 int i; 1254 1255 mutex_init(&state->id_acache_req_lock, NULL, MUTEX_DRIVER, NULL); 1256 cv_init(&state->id_acache_req_cv, NULL, CV_DEFAULT, NULL); 1257 1258 mutex_init(&state->id_ac_mutex, NULL, MUTEX_DRIVER, NULL); 1259 mutex_init(&state->id_mc_mutex, NULL, MUTEX_DRIVER, NULL); 1260 mutex_enter(&state->id_ac_mutex); 1261 list_create(&state->id_ah_free, sizeof (ibd_ace_t), 1262 offsetof(ibd_ace_t, ac_list)); 1263 list_create(&state->id_ah_active, sizeof (ibd_ace_t), 1264 offsetof(ibd_ace_t, ac_list)); 1265 state->id_ah_active_hash = mod_hash_create_extended("IBD AH hash", 1266 IBD_HASH_SIZE, mod_hash_null_keydtor, mod_hash_null_valdtor, 1267 ibd_hash_by_id, NULL, ibd_hash_key_cmp, KM_SLEEP); 1268 list_create(&state->id_mc_full, sizeof (ibd_mce_t), 1269 offsetof(ibd_mce_t, mc_list)); 1270 list_create(&state->id_mc_non, sizeof (ibd_mce_t), 1271 offsetof(ibd_mce_t, mc_list)); 1272 list_create(&state->id_req_list, sizeof (ibd_req_t), 1273 offsetof(ibd_req_t, rq_list)); 1274 state->id_ac_hot_ace = NULL; 1275 1276 state->id_ac_list = ce = (ibd_ace_t *)kmem_zalloc(sizeof (ibd_ace_t) * 1277 IBD_NUM_AH, KM_SLEEP); 1278 for (i = 0; i < IBD_NUM_AH; i++, ce++) { 1279 if (ibt_alloc_ud_dest(state->id_hca_hdl, IBT_UD_DEST_NO_FLAGS, 1280 state->id_pd_hdl, &ce->ac_dest) != IBT_SUCCESS) { 1281 mutex_exit(&state->id_ac_mutex); 1282 ibd_acache_fini(state); 1283 return (DDI_FAILURE); 1284 } else { 1285 CLEAR_REFCYCLE(ce); 1286 ce->ac_mce = NULL; 1287 mutex_init(&ce->tx_too_big_mutex, NULL, 1288 MUTEX_DRIVER, NULL); 1289 IBD_ACACHE_INSERT_FREE(state, ce); 1290 } 1291 } 1292 mutex_exit(&state->id_ac_mutex); 1293 return (DDI_SUCCESS); 1294 } 1295 1296 static void 1297 ibd_acache_fini(ibd_state_t *state) 1298 { 1299 ibd_ace_t *ptr; 1300 1301 mutex_enter(&state->id_ac_mutex); 1302 1303 while ((ptr = IBD_ACACHE_GET_ACTIVE(state)) != NULL) { 1304 ASSERT(GET_REF(ptr) == 0); 1305 mutex_destroy(&ptr->tx_too_big_mutex); 1306 (void) ibt_free_ud_dest(ptr->ac_dest); 1307 } 1308 1309 while ((ptr = IBD_ACACHE_GET_FREE(state)) != NULL) { 1310 ASSERT(GET_REF(ptr) == 0); 1311 mutex_destroy(&ptr->tx_too_big_mutex); 1312 (void) ibt_free_ud_dest(ptr->ac_dest); 1313 } 1314 1315 list_destroy(&state->id_ah_free); 1316 list_destroy(&state->id_ah_active); 1317 list_destroy(&state->id_mc_full); 1318 list_destroy(&state->id_mc_non); 1319 list_destroy(&state->id_req_list); 1320 kmem_free(state->id_ac_list, sizeof (ibd_ace_t) * IBD_NUM_AH); 1321 mutex_exit(&state->id_ac_mutex); 1322 mutex_destroy(&state->id_ac_mutex); 1323 mutex_destroy(&state->id_mc_mutex); 1324 mutex_destroy(&state->id_acache_req_lock); 1325 cv_destroy(&state->id_acache_req_cv); 1326 } 1327 1328 /* 1329 * Search AH active hash list for a cached path to input destination. 1330 * If we are "just looking", hold == F. When we are in the Tx path, 1331 * we set hold == T to grab a reference on the AH so that it can not 1332 * be recycled to a new destination while the Tx request is posted. 1333 */ 1334 ibd_ace_t * 1335 ibd_acache_find(ibd_state_t *state, ipoib_mac_t *mac, boolean_t hold, int num) 1336 { 1337 ibd_ace_t *ptr; 1338 1339 ASSERT(mutex_owned(&state->id_ac_mutex)); 1340 1341 /* 1342 * Do hash search. 1343 */ 1344 if (mod_hash_find(state->id_ah_active_hash, 1345 (mod_hash_key_t)mac, (mod_hash_val_t)&ptr) == 0) { 1346 if (hold) 1347 INC_REF(ptr, num); 1348 return (ptr); 1349 } 1350 return (NULL); 1351 } 1352 1353 /* 1354 * This is called by the tx side; if an initialized AH is found in 1355 * the active list, it is locked down and can be used; if no entry 1356 * is found, an async request is queued to do path resolution. 1357 */ 1358 static ibd_ace_t * 1359 ibd_acache_lookup(ibd_state_t *state, ipoib_mac_t *mac, int *err, int numwqe) 1360 { 1361 ibd_ace_t *ptr; 1362 ibd_req_t *req; 1363 1364 /* 1365 * Only attempt to print when we can; in the mdt pattr case, the 1366 * address is not aligned properly. 1367 */ 1368 if (((ulong_t)mac & 3) == 0) { 1369 DPRINT(4, 1370 "ibd_acache_lookup : lookup for %08X:%08X:%08X:%08X:%08X", 1371 htonl(mac->ipoib_qpn), htonl(mac->ipoib_gidpref[0]), 1372 htonl(mac->ipoib_gidpref[1]), htonl(mac->ipoib_gidsuff[0]), 1373 htonl(mac->ipoib_gidsuff[1])); 1374 } 1375 1376 mutex_enter(&state->id_ac_mutex); 1377 1378 if (((ptr = state->id_ac_hot_ace) != NULL) && 1379 (memcmp(&ptr->ac_mac, mac, sizeof (*mac)) == 0)) { 1380 INC_REF(ptr, numwqe); 1381 mutex_exit(&state->id_ac_mutex); 1382 return (ptr); 1383 } 1384 if (((ptr = ibd_acache_find(state, mac, B_TRUE, numwqe)) != NULL)) { 1385 state->id_ac_hot_ace = ptr; 1386 mutex_exit(&state->id_ac_mutex); 1387 return (ptr); 1388 } 1389 1390 /* 1391 * Implementation of a single outstanding async request; if 1392 * the operation is not started yet, queue a request and move 1393 * to ongoing state. Remember in id_ah_addr for which address 1394 * we are queueing the request, in case we need to flag an error; 1395 * Any further requests, for the same or different address, until 1396 * the operation completes, is sent back to GLDv3 to be retried. 1397 * The async thread will update id_ah_op with an error indication 1398 * or will set it to indicate the next look up can start; either 1399 * way, it will mac_tx_update() so that all blocked requests come 1400 * back here. 1401 */ 1402 *err = EAGAIN; 1403 if (state->id_ah_op == IBD_OP_NOTSTARTED) { 1404 req = kmem_cache_alloc(state->id_req_kmc, KM_NOSLEEP); 1405 if (req != NULL) { 1406 /* 1407 * We did not even find the entry; queue a request 1408 * for it. 1409 */ 1410 bcopy(mac, &(req->rq_mac), IPOIB_ADDRL); 1411 ibd_queue_work_slot(state, req, IBD_ASYNC_GETAH); 1412 state->id_ah_op = IBD_OP_ONGOING; 1413 bcopy(mac, &state->id_ah_addr, IPOIB_ADDRL); 1414 } 1415 } else if ((state->id_ah_op != IBD_OP_ONGOING) && 1416 (bcmp(&state->id_ah_addr, mac, IPOIB_ADDRL) == 0)) { 1417 /* 1418 * Check the status of the pathrecord lookup request 1419 * we had queued before. 1420 */ 1421 if (state->id_ah_op == IBD_OP_ERRORED) { 1422 *err = EFAULT; 1423 state->id_ah_error++; 1424 } else { 1425 /* 1426 * IBD_OP_ROUTERED case: We need to send to the 1427 * all-router MCG. If we can find the AH for 1428 * the mcg, the Tx will be attempted. If we 1429 * do not find the AH, we return NORESOURCES 1430 * to retry. 1431 */ 1432 ipoib_mac_t routermac; 1433 1434 (void) ibd_get_allroutergroup(state, mac, &routermac); 1435 ptr = ibd_acache_find(state, &routermac, B_TRUE, 1436 numwqe); 1437 } 1438 state->id_ah_op = IBD_OP_NOTSTARTED; 1439 } else if ((state->id_ah_op != IBD_OP_ONGOING) && 1440 (bcmp(&state->id_ah_addr, mac, IPOIB_ADDRL) != 0)) { 1441 /* 1442 * This case can happen when we get a higher band 1443 * packet. The easiest way is to reset the state machine 1444 * to accommodate the higher priority packet. 1445 */ 1446 state->id_ah_op = IBD_OP_NOTSTARTED; 1447 } 1448 mutex_exit(&state->id_ac_mutex); 1449 1450 return (ptr); 1451 } 1452 1453 /* 1454 * Grab a not-currently-in-use AH/PathRecord from the active 1455 * list to recycle to a new destination. Only the async thread 1456 * executes this code. 1457 */ 1458 static ibd_ace_t * 1459 ibd_acache_get_unref(ibd_state_t *state) 1460 { 1461 ibd_ace_t *ptr = list_tail(&state->id_ah_active); 1462 boolean_t try_rc_chan_recycle = B_FALSE; 1463 1464 ASSERT(mutex_owned(&state->id_ac_mutex)); 1465 1466 /* 1467 * Do plain linear search. 1468 */ 1469 while (ptr != NULL) { 1470 /* 1471 * Note that it is possible that the "cycle" bit 1472 * is set on the AH w/o any reference count. The 1473 * mcg must have been deleted, and the tx cleanup 1474 * just decremented the reference count to 0, but 1475 * hasn't gotten around to grabbing the id_ac_mutex 1476 * to move the AH into the free list. 1477 */ 1478 if (GET_REF(ptr) == 0) { 1479 if (ptr->ac_chan != NULL) { 1480 ASSERT(state->id_enable_rc == B_TRUE); 1481 if (!try_rc_chan_recycle) { 1482 try_rc_chan_recycle = B_TRUE; 1483 ibd_rc_signal_ace_recycle(state, ptr); 1484 } 1485 } else { 1486 IBD_ACACHE_PULLOUT_ACTIVE(state, ptr); 1487 break; 1488 } 1489 } 1490 ptr = list_prev(&state->id_ah_active, ptr); 1491 } 1492 return (ptr); 1493 } 1494 1495 /* 1496 * Invoked to clean up AH from active list in case of multicast 1497 * disable and to handle sendonly memberships during mcg traps. 1498 * And for port up processing for multicast and unicast AHs. 1499 * Normally, the AH is taken off the active list, and put into 1500 * the free list to be recycled for a new destination. In case 1501 * Tx requests on the AH have not completed yet, the AH is marked 1502 * for reaping (which will put the AH on the free list) once the Tx's 1503 * complete; in this case, depending on the "force" input, we take 1504 * out the AH from the active list right now, or leave it also for 1505 * the reap operation. Returns TRUE if the AH is taken off the active 1506 * list (and either put into the free list right now, or arranged for 1507 * later), FALSE otherwise. 1508 */ 1509 boolean_t 1510 ibd_acache_recycle(ibd_state_t *state, ipoib_mac_t *acmac, boolean_t force) 1511 { 1512 ibd_ace_t *acactive; 1513 boolean_t ret = B_TRUE; 1514 1515 ASSERT(mutex_owned(&state->id_ac_mutex)); 1516 1517 if ((acactive = ibd_acache_find(state, acmac, B_FALSE, 0)) != NULL) { 1518 1519 /* 1520 * Note that the AH might already have the cycle bit set 1521 * on it; this might happen if sequences of multicast 1522 * enables and disables are coming so fast, that posted 1523 * Tx's to the mcg have not completed yet, and the cycle 1524 * bit is set successively by each multicast disable. 1525 */ 1526 if (SET_CYCLE_IF_REF(acactive)) { 1527 if (!force) { 1528 /* 1529 * The ace is kept on the active list, further 1530 * Tx's can still grab a reference on it; the 1531 * ace is reaped when all pending Tx's 1532 * referencing the AH complete. 1533 */ 1534 ret = B_FALSE; 1535 } else { 1536 /* 1537 * In the mcg trap case, we always pull the 1538 * AH from the active list. And also the port 1539 * up multi/unicast case. 1540 */ 1541 ASSERT(acactive->ac_chan == NULL); 1542 IBD_ACACHE_PULLOUT_ACTIVE(state, acactive); 1543 acactive->ac_mce = NULL; 1544 } 1545 } else { 1546 /* 1547 * Determined the ref count is 0, thus reclaim 1548 * immediately after pulling out the ace from 1549 * the active list. 1550 */ 1551 ASSERT(acactive->ac_chan == NULL); 1552 IBD_ACACHE_PULLOUT_ACTIVE(state, acactive); 1553 acactive->ac_mce = NULL; 1554 IBD_ACACHE_INSERT_FREE(state, acactive); 1555 } 1556 1557 } 1558 return (ret); 1559 } 1560 1561 /* 1562 * Helper function for async path record lookup. If we are trying to 1563 * Tx to a MCG, check our membership, possibly trying to join the 1564 * group if required. If that fails, try to send the packet to the 1565 * all router group (indicated by the redirect output), pointing 1566 * the input mac address to the router mcg address. 1567 */ 1568 static ibd_mce_t * 1569 ibd_async_mcache(ibd_state_t *state, ipoib_mac_t *mac, boolean_t *redirect) 1570 { 1571 ib_gid_t mgid; 1572 ibd_mce_t *mce; 1573 ipoib_mac_t routermac; 1574 1575 *redirect = B_FALSE; 1576 ibd_n2h_gid(mac, &mgid); 1577 1578 /* 1579 * Check the FullMember+SendOnlyNonMember list. 1580 * Since we are the only one who manipulates the 1581 * id_mc_full list, no locks are needed. 1582 */ 1583 mce = IBD_MCACHE_FIND_FULL(state, mgid); 1584 if (mce != NULL) { 1585 DPRINT(4, "ibd_async_mcache : already joined to group"); 1586 return (mce); 1587 } 1588 1589 /* 1590 * Not found; try to join(SendOnlyNonMember) and attach. 1591 */ 1592 DPRINT(4, "ibd_async_mcache : not joined to group"); 1593 if ((mce = ibd_join_group(state, mgid, IB_MC_JSTATE_SEND_ONLY_NON)) != 1594 NULL) { 1595 DPRINT(4, "ibd_async_mcache : nonmem joined to group"); 1596 return (mce); 1597 } 1598 1599 /* 1600 * MCGroup not present; try to join the all-router group. If 1601 * any of the following steps succeed, we will be redirecting 1602 * to the all router group. 1603 */ 1604 DPRINT(4, "ibd_async_mcache : nonmem join failed"); 1605 if (!ibd_get_allroutergroup(state, mac, &routermac)) 1606 return (NULL); 1607 *redirect = B_TRUE; 1608 ibd_n2h_gid(&routermac, &mgid); 1609 bcopy(&routermac, mac, IPOIB_ADDRL); 1610 DPRINT(4, "ibd_async_mcache : router mgid : %016llx:%016llx\n", 1611 mgid.gid_prefix, mgid.gid_guid); 1612 1613 /* 1614 * Are we already joined to the router group? 1615 */ 1616 if ((mce = IBD_MCACHE_FIND_FULL(state, mgid)) != NULL) { 1617 DPRINT(4, "ibd_async_mcache : using already joined router" 1618 "group\n"); 1619 return (mce); 1620 } 1621 1622 /* 1623 * Can we join(SendOnlyNonMember) the router group? 1624 */ 1625 DPRINT(4, "ibd_async_mcache : attempting join to router grp"); 1626 if ((mce = ibd_join_group(state, mgid, IB_MC_JSTATE_SEND_ONLY_NON)) != 1627 NULL) { 1628 DPRINT(4, "ibd_async_mcache : joined to router grp"); 1629 return (mce); 1630 } 1631 1632 return (NULL); 1633 } 1634 1635 /* 1636 * Async path record lookup code. 1637 */ 1638 static void 1639 ibd_async_acache(ibd_state_t *state, ipoib_mac_t *mac) 1640 { 1641 ibd_ace_t *ce; 1642 ibd_mce_t *mce = NULL; 1643 ibt_path_attr_t path_attr; 1644 ibt_path_info_t path_info; 1645 ib_gid_t destgid; 1646 char ret = IBD_OP_NOTSTARTED; 1647 1648 DPRINT(4, "ibd_async_acache : %08X:%08X:%08X:%08X:%08X", 1649 htonl(mac->ipoib_qpn), htonl(mac->ipoib_gidpref[0]), 1650 htonl(mac->ipoib_gidpref[1]), htonl(mac->ipoib_gidsuff[0]), 1651 htonl(mac->ipoib_gidsuff[1])); 1652 1653 /* 1654 * Check whether we are trying to transmit to a MCG. 1655 * In that case, we need to make sure we are a member of 1656 * the MCG. 1657 */ 1658 if (mac->ipoib_qpn == htonl(IB_MC_QPN)) { 1659 boolean_t redirected; 1660 1661 /* 1662 * If we can not find or join the group or even 1663 * redirect, error out. 1664 */ 1665 if ((mce = ibd_async_mcache(state, mac, &redirected)) == 1666 NULL) { 1667 state->id_ah_op = IBD_OP_ERRORED; 1668 return; 1669 } 1670 1671 /* 1672 * If we got redirected, we need to determine whether 1673 * the AH for the new mcg is in the cache already, and 1674 * not pull it in then; otherwise proceed to get the 1675 * path for the new mcg. There is no guarantee that 1676 * if the AH is currently in the cache, it will still be 1677 * there when we look in ibd_acache_lookup(), but that's 1678 * okay, we will come back here. 1679 */ 1680 if (redirected) { 1681 ret = IBD_OP_ROUTERED; 1682 DPRINT(4, "ibd_async_acache : redirected to " 1683 "%08X:%08X:%08X:%08X:%08X", 1684 htonl(mac->ipoib_qpn), htonl(mac->ipoib_gidpref[0]), 1685 htonl(mac->ipoib_gidpref[1]), 1686 htonl(mac->ipoib_gidsuff[0]), 1687 htonl(mac->ipoib_gidsuff[1])); 1688 1689 mutex_enter(&state->id_ac_mutex); 1690 if (ibd_acache_find(state, mac, B_FALSE, 0) != NULL) { 1691 state->id_ah_op = IBD_OP_ROUTERED; 1692 mutex_exit(&state->id_ac_mutex); 1693 DPRINT(4, "ibd_async_acache : router AH found"); 1694 return; 1695 } 1696 mutex_exit(&state->id_ac_mutex); 1697 } 1698 } 1699 1700 /* 1701 * Get an AH from the free list. 1702 */ 1703 mutex_enter(&state->id_ac_mutex); 1704 if ((ce = IBD_ACACHE_GET_FREE(state)) == NULL) { 1705 /* 1706 * No free ones; try to grab an unreferenced active 1707 * one. Maybe we need to make the active list LRU, 1708 * but that will create more work for Tx callbacks. 1709 * Is there a way of not having to pull out the 1710 * entry from the active list, but just indicate it 1711 * is being recycled? Yes, but that creates one more 1712 * check in the fast lookup path. 1713 */ 1714 if ((ce = ibd_acache_get_unref(state)) == NULL) { 1715 /* 1716 * Pretty serious shortage now. 1717 */ 1718 state->id_ah_op = IBD_OP_NOTSTARTED; 1719 mutex_exit(&state->id_ac_mutex); 1720 DPRINT(10, "ibd_async_acache : failed to find AH " 1721 "slot\n"); 1722 return; 1723 } 1724 /* 1725 * We could check whether ac_mce points to a SendOnly 1726 * member and drop that membership now. Or do it lazily 1727 * at detach time. 1728 */ 1729 ce->ac_mce = NULL; 1730 } 1731 mutex_exit(&state->id_ac_mutex); 1732 ASSERT(ce->ac_mce == NULL); 1733 1734 /* 1735 * Update the entry. 1736 */ 1737 bcopy((char *)mac, &ce->ac_mac, IPOIB_ADDRL); 1738 1739 bzero(&path_info, sizeof (path_info)); 1740 bzero(&path_attr, sizeof (ibt_path_attr_t)); 1741 path_attr.pa_sgid = state->id_sgid; 1742 path_attr.pa_num_dgids = 1; 1743 ibd_n2h_gid(&ce->ac_mac, &destgid); 1744 path_attr.pa_dgids = &destgid; 1745 path_attr.pa_sl = state->id_mcinfo->mc_adds_vect.av_srvl; 1746 if (ibt_get_paths(state->id_ibt_hdl, IBT_PATH_NO_FLAGS, 1747 &path_attr, 1, &path_info, NULL) != IBT_SUCCESS) { 1748 DPRINT(10, "ibd_async_acache : failed in ibt_get_paths"); 1749 goto error; 1750 } 1751 if (ibt_modify_ud_dest(ce->ac_dest, state->id_mcinfo->mc_qkey, 1752 ntohl(ce->ac_mac.ipoib_qpn), 1753 &path_info.pi_prim_cep_path.cep_adds_vect) != IBT_SUCCESS) { 1754 DPRINT(10, "ibd_async_acache : failed in ibt_modify_ud_dest"); 1755 goto error; 1756 } 1757 1758 /* 1759 * mce is set whenever an AH is being associated with a 1760 * MCG; this will come in handy when we leave the MCG. The 1761 * lock protects Tx fastpath from scanning the active list. 1762 */ 1763 if (mce != NULL) 1764 ce->ac_mce = mce; 1765 1766 /* 1767 * initiate a RC mode connection for unicast address 1768 */ 1769 if (state->id_enable_rc && (mac->ipoib_qpn != htonl(IB_MC_QPN)) && 1770 (htonl(mac->ipoib_qpn) & IBD_MAC_ADDR_RC)) { 1771 ASSERT(ce->ac_chan == NULL); 1772 DPRINT(10, "ibd_async_acache: call " 1773 "ibd_rc_try_connect(ace=%p)", ce); 1774 ibd_rc_try_connect(state, ce, &path_info); 1775 if (ce->ac_chan == NULL) { 1776 DPRINT(10, "ibd_async_acache: fail to setup RC" 1777 " channel"); 1778 state->rc_conn_fail++; 1779 goto error; 1780 } 1781 } 1782 1783 mutex_enter(&state->id_ac_mutex); 1784 IBD_ACACHE_INSERT_ACTIVE(state, ce); 1785 state->id_ah_op = ret; 1786 mutex_exit(&state->id_ac_mutex); 1787 return; 1788 error: 1789 /* 1790 * We might want to drop SendOnly membership here if we 1791 * joined above. The lock protects Tx callbacks inserting 1792 * into the free list. 1793 */ 1794 mutex_enter(&state->id_ac_mutex); 1795 state->id_ah_op = IBD_OP_ERRORED; 1796 IBD_ACACHE_INSERT_FREE(state, ce); 1797 mutex_exit(&state->id_ac_mutex); 1798 } 1799 1800 /* 1801 * While restoring port's presence on the subnet on a port up, it is possible 1802 * that the port goes down again. 1803 */ 1804 static void 1805 ibd_async_link(ibd_state_t *state, ibd_req_t *req) 1806 { 1807 ibd_link_op_t opcode = (ibd_link_op_t)req->rq_ptr; 1808 link_state_t lstate = (opcode == IBD_LINK_DOWN) ? LINK_STATE_DOWN : 1809 LINK_STATE_UP; 1810 ibd_mce_t *mce, *pmce; 1811 ibd_ace_t *ace, *pace; 1812 1813 DPRINT(10, "ibd_async_link(): %d", opcode); 1814 1815 /* 1816 * On a link up, revalidate the link speed/width. No point doing 1817 * this on a link down, since we will be unable to do SA operations, 1818 * defaulting to the lowest speed. Also notice that we update our 1819 * notion of speed before calling mac_link_update(), which will do 1820 * necessary higher level notifications for speed changes. 1821 */ 1822 if ((opcode == IBD_LINK_UP_ABSENT) || (opcode == IBD_LINK_UP)) { 1823 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*state)) 1824 state->id_link_speed = ibd_get_portspeed(state); 1825 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*state)) 1826 } 1827 1828 /* 1829 * Do all the work required to establish our presence on 1830 * the subnet. 1831 */ 1832 if (opcode == IBD_LINK_UP_ABSENT) { 1833 /* 1834 * If in promiscuous mode ... 1835 */ 1836 if (state->id_prom_op == IBD_OP_COMPLETED) { 1837 /* 1838 * Drop all nonmembership. 1839 */ 1840 ibd_async_unsetprom(state); 1841 1842 /* 1843 * Then, try to regain nonmembership to all mcg's. 1844 */ 1845 ibd_async_setprom(state); 1846 1847 } 1848 1849 /* 1850 * Drop all sendonly membership (which also gets rid of the 1851 * AHs); try to reacquire all full membership. 1852 */ 1853 mce = list_head(&state->id_mc_full); 1854 while ((pmce = mce) != NULL) { 1855 mce = list_next(&state->id_mc_full, mce); 1856 if (pmce->mc_jstate == IB_MC_JSTATE_SEND_ONLY_NON) 1857 ibd_leave_group(state, 1858 pmce->mc_info.mc_adds_vect.av_dgid, 1859 IB_MC_JSTATE_SEND_ONLY_NON); 1860 else 1861 ibd_reacquire_group(state, pmce); 1862 } 1863 1864 /* 1865 * Recycle all active AHs to free list (and if there are 1866 * pending posts, make sure they will go into the free list 1867 * once the Tx's complete). Grab the lock to prevent 1868 * concurrent Tx's as well as Tx cleanups. 1869 */ 1870 mutex_enter(&state->id_ac_mutex); 1871 ace = list_head(&state->id_ah_active); 1872 while ((pace = ace) != NULL) { 1873 boolean_t cycled; 1874 1875 ace = list_next(&state->id_ah_active, ace); 1876 mce = pace->ac_mce; 1877 if (pace->ac_chan != NULL) { 1878 ASSERT(mce == NULL); 1879 ASSERT(state->id_enable_rc == B_TRUE); 1880 if (pace->ac_chan->chan_state == 1881 IBD_RC_STATE_ACT_ESTAB) { 1882 INC_REF(pace, 1); 1883 IBD_ACACHE_PULLOUT_ACTIVE(state, pace); 1884 pace->ac_chan->chan_state = 1885 IBD_RC_STATE_ACT_CLOSING; 1886 ibd_rc_signal_act_close(state, pace); 1887 } else { 1888 state->rc_act_close_simultaneous++; 1889 DPRINT(40, "ibd_async_link: other " 1890 "thread is closing it, ace=%p, " 1891 "ac_chan=%p, chan_state=%d", 1892 pace, pace->ac_chan, 1893 pace->ac_chan->chan_state); 1894 } 1895 } else { 1896 cycled = ibd_acache_recycle(state, 1897 &pace->ac_mac, B_TRUE); 1898 } 1899 /* 1900 * If this is for an mcg, it must be for a fullmember, 1901 * since we got rid of send-only members above when 1902 * processing the mce list. 1903 */ 1904 ASSERT(cycled && ((mce == NULL) || (mce->mc_jstate == 1905 IB_MC_JSTATE_FULL))); 1906 1907 /* 1908 * Check if the fullmember mce needs to be torn down, 1909 * ie whether the DLPI disable has already been done. 1910 * If so, do some of the work of tx_cleanup, namely 1911 * causing leave (which will fail), detach and 1912 * mce-freeing. tx_cleanup will put the AH into free 1913 * list. The reason to duplicate some of this 1914 * tx_cleanup work is because we want to delete the 1915 * AH right now instead of waiting for tx_cleanup, to 1916 * force subsequent Tx's to reacquire an AH. 1917 */ 1918 if ((mce != NULL) && (mce->mc_fullreap)) 1919 ibd_async_reap_group(state, mce, 1920 mce->mc_info.mc_adds_vect.av_dgid, 1921 mce->mc_jstate); 1922 } 1923 mutex_exit(&state->id_ac_mutex); 1924 } 1925 1926 /* 1927 * mac handle is guaranteed to exist since driver does ibt_close_hca() 1928 * (which stops further events from being delivered) before 1929 * mac_unregister(). At this point, it is guaranteed that mac_register 1930 * has already been done. 1931 */ 1932 mutex_enter(&state->id_link_mutex); 1933 state->id_link_state = lstate; 1934 mac_link_update(state->id_mh, lstate); 1935 mutex_exit(&state->id_link_mutex); 1936 1937 ibd_async_done(state); 1938 } 1939 1940 /* 1941 * Check the pkey table to see if we can find the pkey we're looking for. 1942 * Set the pkey index in 'pkix' if found. Return 0 on success and -1 on 1943 * failure. 1944 */ 1945 static int 1946 ibd_locate_pkey(ib_pkey_t *pkey_tbl, uint16_t pkey_tbl_sz, ib_pkey_t pkey, 1947 uint16_t *pkix) 1948 { 1949 uint16_t ndx; 1950 1951 ASSERT(pkix != NULL); 1952 1953 for (ndx = 0; ndx < pkey_tbl_sz; ndx++) { 1954 if (pkey_tbl[ndx] == pkey) { 1955 *pkix = ndx; 1956 return (0); 1957 } 1958 } 1959 return (-1); 1960 } 1961 1962 /* 1963 * When the link is notified up, we need to do a few things, based 1964 * on the port's current p_init_type_reply claiming a reinit has been 1965 * done or not. The reinit steps are: 1966 * 1. If in InitTypeReply, NoLoadReply == PreserveContentReply == 0, verify 1967 * the old Pkey and GID0 are correct. 1968 * 2. Register for mcg traps (already done by ibmf). 1969 * 3. If PreservePresenceReply indicates the SM has restored port's presence 1970 * in subnet, nothing more to do. Else go to next steps (on async daemon). 1971 * 4. Give up all sendonly memberships. 1972 * 5. Acquire all full memberships. 1973 * 6. In promiscuous mode, acquire all non memberships. 1974 * 7. Recycle all AHs to free list. 1975 */ 1976 static void 1977 ibd_link_mod(ibd_state_t *state, ibt_async_code_t code) 1978 { 1979 ibt_hca_portinfo_t *port_infop = NULL; 1980 ibt_status_t ibt_status; 1981 uint_t psize, port_infosz; 1982 ibd_link_op_t opcode; 1983 ibd_req_t *req; 1984 link_state_t new_link_state = LINK_STATE_UP; 1985 uint8_t itreply; 1986 uint16_t pkix; 1987 int ret; 1988 1989 /* 1990 * Let's not race with a plumb or an unplumb; if we detect a 1991 * pkey relocation event later on here, we may have to restart. 1992 */ 1993 ibd_set_mac_progress(state, IBD_DRV_RESTART_IN_PROGRESS); 1994 1995 mutex_enter(&state->id_link_mutex); 1996 1997 /* 1998 * If the init code in ibd_m_start hasn't yet set up the 1999 * pkey/gid, nothing to do; that code will set the link state. 2000 */ 2001 if (state->id_link_state == LINK_STATE_UNKNOWN) { 2002 mutex_exit(&state->id_link_mutex); 2003 goto link_mod_return; 2004 } 2005 2006 /* 2007 * If this routine was called in response to a port down event, 2008 * we just need to see if this should be informed. 2009 */ 2010 if (code == IBT_ERROR_PORT_DOWN) { 2011 new_link_state = LINK_STATE_DOWN; 2012 goto update_link_state; 2013 } 2014 2015 /* 2016 * If it's not a port down event we've received, try to get the port 2017 * attributes first. If we fail here, the port is as good as down. 2018 * Otherwise, if the link went down by the time the handler gets 2019 * here, give up - we cannot even validate the pkey/gid since those 2020 * are not valid and this is as bad as a port down anyway. 2021 */ 2022 ibt_status = ibt_query_hca_ports(state->id_hca_hdl, state->id_port, 2023 &port_infop, &psize, &port_infosz); 2024 if ((ibt_status != IBT_SUCCESS) || (psize != 1) || 2025 (port_infop->p_linkstate != IBT_PORT_ACTIVE)) { 2026 new_link_state = LINK_STATE_DOWN; 2027 goto update_link_state; 2028 } 2029 2030 /* 2031 * Check the SM InitTypeReply flags. If both NoLoadReply and 2032 * PreserveContentReply are 0, we don't know anything about the 2033 * data loaded into the port attributes, so we need to verify 2034 * if gid0 and pkey are still valid. 2035 */ 2036 itreply = port_infop->p_init_type_reply; 2037 if (((itreply & SM_INIT_TYPE_REPLY_NO_LOAD_REPLY) == 0) && 2038 ((itreply & SM_INIT_TYPE_PRESERVE_CONTENT_REPLY) == 0)) { 2039 /* 2040 * Check to see if the subnet part of GID0 has changed. If 2041 * not, check the simple case first to see if the pkey 2042 * index is the same as before; finally check to see if the 2043 * pkey has been relocated to a different index in the table. 2044 */ 2045 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state->id_sgid)) 2046 if (bcmp(port_infop->p_sgid_tbl, 2047 &state->id_sgid, sizeof (ib_gid_t)) != 0) { 2048 2049 new_link_state = LINK_STATE_DOWN; 2050 2051 } else if (port_infop->p_pkey_tbl[state->id_pkix] == 2052 state->id_pkey) { 2053 2054 new_link_state = LINK_STATE_UP; 2055 2056 } else if (ibd_locate_pkey(port_infop->p_pkey_tbl, 2057 port_infop->p_pkey_tbl_sz, state->id_pkey, &pkix) == 0) { 2058 2059 ibt_free_portinfo(port_infop, port_infosz); 2060 mutex_exit(&state->id_link_mutex); 2061 2062 /* 2063 * Currently a restart is required if our pkey has moved 2064 * in the pkey table. If we get the ibt_recycle_ud() to 2065 * work as documented (expected), we may be able to 2066 * avoid a complete restart. Note that we've already 2067 * marked both the start and stop 'in-progress' flags, 2068 * so it is ok to go ahead and do this restart. 2069 */ 2070 (void) ibd_undo_start(state, LINK_STATE_DOWN); 2071 if ((ret = ibd_start(state)) != 0) { 2072 DPRINT(10, "ibd_restart: cannot restart, " 2073 "ret=%d", ret); 2074 } 2075 2076 goto link_mod_return; 2077 } else { 2078 new_link_state = LINK_STATE_DOWN; 2079 } 2080 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(state->id_sgid)) 2081 } 2082 2083 update_link_state: 2084 if (port_infop) { 2085 ibt_free_portinfo(port_infop, port_infosz); 2086 } 2087 2088 /* 2089 * If the old state is the same as the new state, nothing to do 2090 */ 2091 if (state->id_link_state == new_link_state) { 2092 mutex_exit(&state->id_link_mutex); 2093 goto link_mod_return; 2094 } 2095 2096 /* 2097 * Ok, so there was a link state change; see if it's safe to ask 2098 * the async thread to do the work 2099 */ 2100 if (!ibd_async_safe(state)) { 2101 state->id_link_state = new_link_state; 2102 mutex_exit(&state->id_link_mutex); 2103 goto link_mod_return; 2104 } 2105 2106 mutex_exit(&state->id_link_mutex); 2107 2108 /* 2109 * If we're reporting a link up, check InitTypeReply to see if 2110 * the SM has ensured that the port's presence in mcg, traps, 2111 * etc. is intact. 2112 */ 2113 if (new_link_state == LINK_STATE_DOWN) { 2114 opcode = IBD_LINK_DOWN; 2115 } else { 2116 if ((itreply & SM_INIT_TYPE_PRESERVE_PRESENCE_REPLY) == 2117 SM_INIT_TYPE_PRESERVE_PRESENCE_REPLY) { 2118 opcode = IBD_LINK_UP; 2119 } else { 2120 opcode = IBD_LINK_UP_ABSENT; 2121 } 2122 } 2123 2124 /* 2125 * Queue up a request for ibd_async_link() to handle this link 2126 * state change event 2127 */ 2128 req = kmem_cache_alloc(state->id_req_kmc, KM_SLEEP); 2129 req->rq_ptr = (void *)opcode; 2130 ibd_queue_work_slot(state, req, IBD_ASYNC_LINK); 2131 2132 link_mod_return: 2133 ibd_clr_mac_progress(state, IBD_DRV_RESTART_IN_PROGRESS); 2134 } 2135 2136 /* 2137 * For the port up/down events, IBTL guarantees there will not be concurrent 2138 * invocations of the handler. IBTL might coalesce link transition events, 2139 * and not invoke the handler for _each_ up/down transition, but it will 2140 * invoke the handler with last known state 2141 */ 2142 static void 2143 ibd_async_handler(void *clnt_private, ibt_hca_hdl_t hca_hdl, 2144 ibt_async_code_t code, ibt_async_event_t *event) 2145 { 2146 ibd_state_t *state = (ibd_state_t *)clnt_private; 2147 2148 switch (code) { 2149 case IBT_ERROR_CATASTROPHIC_CHAN: 2150 ibd_print_warn(state, "catastrophic channel error"); 2151 break; 2152 case IBT_ERROR_CQ: 2153 ibd_print_warn(state, "completion queue error"); 2154 break; 2155 case IBT_PORT_CHANGE_EVENT: 2156 /* 2157 * Events will be delivered to all instances that have 2158 * done ibt_open_hca() but not yet done ibt_close_hca(). 2159 * Only need to do work for our port; IBTF will deliver 2160 * events for other ports on the hca we have ibt_open_hca'ed 2161 * too. Note that id_port is initialized in ibd_attach() 2162 * before we do an ibt_open_hca() in ibd_attach(). 2163 */ 2164 ASSERT(state->id_hca_hdl == hca_hdl); 2165 if (state->id_port != event->ev_port) 2166 break; 2167 2168 if ((event->ev_port_flags & IBT_PORT_CHANGE_PKEY) == 2169 IBT_PORT_CHANGE_PKEY) { 2170 ibd_link_mod(state, code); 2171 } 2172 break; 2173 case IBT_ERROR_PORT_DOWN: 2174 case IBT_CLNT_REREG_EVENT: 2175 case IBT_EVENT_PORT_UP: 2176 /* 2177 * Events will be delivered to all instances that have 2178 * done ibt_open_hca() but not yet done ibt_close_hca(). 2179 * Only need to do work for our port; IBTF will deliver 2180 * events for other ports on the hca we have ibt_open_hca'ed 2181 * too. Note that id_port is initialized in ibd_attach() 2182 * before we do an ibt_open_hca() in ibd_attach(). 2183 */ 2184 ASSERT(state->id_hca_hdl == hca_hdl); 2185 if (state->id_port != event->ev_port) 2186 break; 2187 2188 ibd_link_mod(state, code); 2189 break; 2190 2191 case IBT_HCA_ATTACH_EVENT: 2192 case IBT_HCA_DETACH_EVENT: 2193 /* 2194 * When a new card is plugged to the system, attach_event is 2195 * invoked. Additionally, a cfgadm needs to be run to make the 2196 * card known to the system, and an ifconfig needs to be run to 2197 * plumb up any ibd interfaces on the card. In the case of card 2198 * unplug, a cfgadm is run that will trigger any RCM scripts to 2199 * unplumb the ibd interfaces on the card; when the card is 2200 * actually unplugged, the detach_event is invoked; 2201 * additionally, if any ibd instances are still active on the 2202 * card (eg there were no associated RCM scripts), driver's 2203 * detach routine is invoked. 2204 */ 2205 break; 2206 default: 2207 break; 2208 } 2209 } 2210 2211 static int 2212 ibd_register_mac(ibd_state_t *state, dev_info_t *dip) 2213 { 2214 mac_register_t *macp; 2215 int ret; 2216 2217 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 2218 DPRINT(10, "ibd_register_mac: mac_alloc() failed"); 2219 return (DDI_FAILURE); 2220 } 2221 2222 /* 2223 * Note that when we register with mac during attach, we don't 2224 * have the id_macaddr yet, so we'll simply be registering a 2225 * zero macaddr that we'll overwrite later during plumb (in 2226 * ibd_m_start()). Similar is the case with id_mtu - we'll 2227 * update the mac layer with the correct mtu during plumb. 2228 */ 2229 macp->m_type_ident = MAC_PLUGIN_IDENT_IB; 2230 macp->m_driver = state; 2231 macp->m_dip = dip; 2232 macp->m_src_addr = (uint8_t *)&state->id_macaddr; 2233 macp->m_callbacks = &ibd_m_callbacks; 2234 macp->m_min_sdu = 0; 2235 if (state->id_enable_rc) { 2236 macp->m_max_sdu = state->rc_mtu - IPOIB_HDRSIZE; 2237 } else { 2238 macp->m_max_sdu = IBD_DEF_MAX_SDU; 2239 } 2240 2241 /* 2242 * Register ourselves with the GLDv3 interface 2243 */ 2244 if ((ret = mac_register(macp, &state->id_mh)) != 0) { 2245 mac_free(macp); 2246 DPRINT(10, 2247 "ibd_register_mac: mac_register() failed, ret=%d", ret); 2248 return (DDI_FAILURE); 2249 } 2250 2251 mac_free(macp); 2252 return (DDI_SUCCESS); 2253 } 2254 2255 static int 2256 ibd_record_capab(ibd_state_t *state, dev_info_t *dip) 2257 { 2258 ibt_hca_attr_t hca_attrs; 2259 ibt_status_t ibt_status; 2260 2261 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*state)) 2262 2263 /* 2264 * Query the HCA and fetch its attributes 2265 */ 2266 ibt_status = ibt_query_hca(state->id_hca_hdl, &hca_attrs); 2267 ASSERT(ibt_status == IBT_SUCCESS); 2268 2269 /* 2270 * 1. Set the Hardware Checksum capability. Currently we only consider 2271 * full checksum offload. 2272 */ 2273 if (state->id_enable_rc) { 2274 state->id_hwcksum_capab = 0; 2275 } else { 2276 if ((hca_attrs.hca_flags & IBT_HCA_CKSUM_FULL) 2277 == IBT_HCA_CKSUM_FULL) { 2278 state->id_hwcksum_capab = IBT_HCA_CKSUM_FULL; 2279 } 2280 } 2281 2282 /* 2283 * 2. Set LSO policy, capability and maximum length 2284 */ 2285 if (state->id_enable_rc) { 2286 state->id_lso_policy = B_FALSE; 2287 state->id_lso_capable = B_FALSE; 2288 state->id_lso_maxlen = 0; 2289 } else { 2290 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS 2291 |DDI_PROP_NOTPROM, IBD_PROP_LSO_POLICY, 1)) { 2292 state->id_lso_policy = B_TRUE; 2293 } else { 2294 state->id_lso_policy = B_FALSE; 2295 } 2296 2297 if (hca_attrs.hca_max_lso_size > 0) { 2298 state->id_lso_capable = B_TRUE; 2299 if (hca_attrs.hca_max_lso_size > IBD_LSO_MAXLEN) 2300 state->id_lso_maxlen = IBD_LSO_MAXLEN; 2301 else 2302 state->id_lso_maxlen = 2303 hca_attrs.hca_max_lso_size; 2304 } else { 2305 state->id_lso_capable = B_FALSE; 2306 state->id_lso_maxlen = 0; 2307 } 2308 } 2309 2310 /* 2311 * 3. Set Reserved L_Key capability 2312 */ 2313 if ((hca_attrs.hca_flags2 & IBT_HCA2_RES_LKEY) == IBT_HCA2_RES_LKEY) { 2314 state->id_hca_res_lkey_capab = 1; 2315 state->id_res_lkey = hca_attrs.hca_reserved_lkey; 2316 state->rc_enable_iov_map = B_TRUE; 2317 } else { 2318 /* If no reserved lkey, we will not use ibt_map_mem_iov */ 2319 state->rc_enable_iov_map = B_FALSE; 2320 } 2321 2322 /* 2323 * 4. Set maximum sqseg value after checking to see if extended sgl 2324 * size information is provided by the hca 2325 */ 2326 if (hca_attrs.hca_flags & IBT_HCA_WQE_SIZE_INFO) { 2327 state->id_max_sqseg = hca_attrs.hca_ud_send_sgl_sz; 2328 state->rc_tx_max_sqseg = hca_attrs.hca_conn_send_sgl_sz; 2329 } else { 2330 state->id_max_sqseg = hca_attrs.hca_max_sgl; 2331 state->rc_tx_max_sqseg = hca_attrs.hca_max_sgl; 2332 } 2333 if (state->id_max_sqseg > IBD_MAX_SQSEG) { 2334 state->id_max_sqseg = IBD_MAX_SQSEG; 2335 } else if (state->id_max_sqseg < IBD_MAX_SQSEG) { 2336 ibd_print_warn(state, "Set #sgl = %d instead of default %d", 2337 state->id_max_sqseg, IBD_MAX_SQSEG); 2338 } 2339 if (state->rc_tx_max_sqseg > IBD_MAX_SQSEG) { 2340 state->rc_tx_max_sqseg = IBD_MAX_SQSEG; 2341 } else if (state->rc_tx_max_sqseg < IBD_MAX_SQSEG) { 2342 ibd_print_warn(state, "RC mode: Set #sgl = %d instead of " 2343 "default %d", state->rc_tx_max_sqseg, IBD_MAX_SQSEG); 2344 } 2345 2346 /* 2347 * Translating the virtual address regions into physical regions 2348 * for using the Reserved LKey feature results in a wr sgl that 2349 * is a little longer. Since failing ibt_map_mem_iov() is costly, 2350 * we'll fix a high-water mark (65%) for when we should stop. 2351 */ 2352 state->id_max_sqseg_hiwm = (state->id_max_sqseg * 65) / 100; 2353 state->rc_max_sqseg_hiwm = (state->rc_tx_max_sqseg * 65) / 100; 2354 2355 /* 2356 * 5. Set number of recv and send wqes after checking hca maximum 2357 * channel size 2358 */ 2359 if (hca_attrs.hca_max_chan_sz < IBD_NUM_RWQE) { 2360 state->id_num_rwqe = hca_attrs.hca_max_chan_sz; 2361 } else { 2362 state->id_num_rwqe = IBD_NUM_RWQE; 2363 } 2364 state->id_rx_bufs_outstanding_limit = state->id_num_rwqe - IBD_RWQE_MIN; 2365 if (hca_attrs.hca_max_chan_sz < IBD_NUM_SWQE) { 2366 state->id_num_swqe = hca_attrs.hca_max_chan_sz; 2367 } else { 2368 state->id_num_swqe = IBD_NUM_SWQE; 2369 } 2370 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*state)) 2371 2372 return (DDI_SUCCESS); 2373 } 2374 2375 static int 2376 ibd_unattach(ibd_state_t *state, dev_info_t *dip) 2377 { 2378 int instance; 2379 uint32_t progress = state->id_mac_state; 2380 ibt_status_t ret; 2381 2382 if (atomic_add_32_nv(&state->id_rx_list.dl_bufs_outstanding, 0) != 0) { 2383 cmn_err(CE_CONT, "ibd_detach: failed: rx bufs outstanding\n"); 2384 return (DDI_FAILURE); 2385 } 2386 2387 if (state->rc_srq_rwqe_list.dl_bufs_outstanding != 0) { 2388 cmn_err(CE_CONT, "ibd_detach: failed: srq bufs outstanding\n"); 2389 return (DDI_FAILURE); 2390 } 2391 2392 /* make sure rx resources are freed */ 2393 ibd_free_rx_rsrcs(state); 2394 2395 if (progress & IBD_DRV_RC_SRQ_ALLOCD) { 2396 ASSERT(state->id_enable_rc); 2397 ibd_rc_fini_srq_list(state); 2398 state->id_mac_state &= (~IBD_DRV_RC_SRQ_ALLOCD); 2399 } 2400 2401 if (progress & IBD_DRV_MAC_REGISTERED) { 2402 (void) mac_unregister(state->id_mh); 2403 state->id_mac_state &= (~IBD_DRV_MAC_REGISTERED); 2404 } 2405 2406 if (progress & IBD_DRV_PD_ALLOCD) { 2407 if ((ret = ibt_free_pd(state->id_hca_hdl, 2408 state->id_pd_hdl)) != IBT_SUCCESS) { 2409 ibd_print_warn(state, "failed to free " 2410 "protection domain, ret=%d", ret); 2411 } 2412 state->id_pd_hdl = NULL; 2413 state->id_mac_state &= (~IBD_DRV_PD_ALLOCD); 2414 } 2415 2416 if (progress & IBD_DRV_HCA_OPENED) { 2417 if ((ret = ibt_close_hca(state->id_hca_hdl)) != 2418 IBT_SUCCESS) { 2419 ibd_print_warn(state, "failed to close " 2420 "HCA device, ret=%d", ret); 2421 } 2422 state->id_hca_hdl = NULL; 2423 state->id_mac_state &= (~IBD_DRV_HCA_OPENED); 2424 } 2425 2426 mutex_enter(&ibd_gstate.ig_mutex); 2427 if (progress & IBD_DRV_IBTL_ATTACH_DONE) { 2428 if ((ret = ibt_detach(state->id_ibt_hdl)) != 2429 IBT_SUCCESS) { 2430 ibd_print_warn(state, 2431 "ibt_detach() failed, ret=%d", ret); 2432 } 2433 state->id_ibt_hdl = NULL; 2434 state->id_mac_state &= (~IBD_DRV_IBTL_ATTACH_DONE); 2435 ibd_gstate.ig_ibt_hdl_ref_cnt--; 2436 } 2437 if ((ibd_gstate.ig_ibt_hdl_ref_cnt == 0) && 2438 (ibd_gstate.ig_ibt_hdl != NULL)) { 2439 if ((ret = ibt_detach(ibd_gstate.ig_ibt_hdl)) != 2440 IBT_SUCCESS) { 2441 ibd_print_warn(state, "ibt_detach(): global " 2442 "failed, ret=%d", ret); 2443 } 2444 ibd_gstate.ig_ibt_hdl = NULL; 2445 } 2446 mutex_exit(&ibd_gstate.ig_mutex); 2447 2448 if (progress & IBD_DRV_TXINTR_ADDED) { 2449 ddi_remove_softintr(state->id_tx); 2450 state->id_tx = NULL; 2451 state->id_mac_state &= (~IBD_DRV_TXINTR_ADDED); 2452 } 2453 2454 if (progress & IBD_DRV_RXINTR_ADDED) { 2455 ddi_remove_softintr(state->id_rx); 2456 state->id_rx = NULL; 2457 state->id_mac_state &= (~IBD_DRV_RXINTR_ADDED); 2458 } 2459 2460 #ifdef DEBUG 2461 if (progress & IBD_DRV_RC_PRIVATE_STATE) { 2462 kstat_delete(state->rc_ksp); 2463 state->id_mac_state &= (~IBD_DRV_RC_PRIVATE_STATE); 2464 } 2465 #endif 2466 2467 if (progress & IBD_DRV_STATE_INITIALIZED) { 2468 ibd_state_fini(state); 2469 state->id_mac_state &= (~IBD_DRV_STATE_INITIALIZED); 2470 } 2471 2472 instance = ddi_get_instance(dip); 2473 ddi_soft_state_free(ibd_list, instance); 2474 2475 return (DDI_SUCCESS); 2476 } 2477 2478 /* 2479 * Attach device to the IO framework. 2480 */ 2481 static int 2482 ibd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2483 { 2484 ibd_state_t *state = NULL; 2485 ib_guid_t hca_guid; 2486 int instance; 2487 ibt_status_t ret; 2488 int rv; 2489 2490 /* 2491 * IBD doesn't support suspend/resume 2492 */ 2493 if (cmd != DDI_ATTACH) 2494 return (DDI_FAILURE); 2495 2496 /* 2497 * Allocate softstate structure 2498 */ 2499 instance = ddi_get_instance(dip); 2500 if (ddi_soft_state_zalloc(ibd_list, instance) == DDI_FAILURE) 2501 return (DDI_FAILURE); 2502 state = ddi_get_soft_state(ibd_list, instance); 2503 2504 /* 2505 * Initialize mutexes and condition variables 2506 */ 2507 if (ibd_state_init(state, dip) != DDI_SUCCESS) { 2508 DPRINT(10, "ibd_attach: failed in ibd_state_init()"); 2509 goto attach_fail; 2510 } 2511 state->id_mac_state |= IBD_DRV_STATE_INITIALIZED; 2512 2513 /* 2514 * Allocate rx,tx softintr 2515 */ 2516 if (ibd_rx_softintr == 1) { 2517 if ((rv = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &state->id_rx, 2518 NULL, NULL, ibd_intr, (caddr_t)state)) != DDI_SUCCESS) { 2519 DPRINT(10, "ibd_attach: failed in " 2520 "ddi_add_softintr(id_rx), ret=%d", rv); 2521 goto attach_fail; 2522 } 2523 state->id_mac_state |= IBD_DRV_RXINTR_ADDED; 2524 } 2525 if (ibd_tx_softintr == 1) { 2526 if ((rv = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &state->id_tx, 2527 NULL, NULL, ibd_tx_recycle, 2528 (caddr_t)state)) != DDI_SUCCESS) { 2529 DPRINT(10, "ibd_attach: failed in " 2530 "ddi_add_softintr(id_tx), ret=%d", rv); 2531 goto attach_fail; 2532 } 2533 state->id_mac_state |= IBD_DRV_TXINTR_ADDED; 2534 } 2535 2536 /* 2537 * Obtain IBA P_Key, port number and HCA guid and validate 2538 * them (for P_Key, only full members are allowed as per 2539 * IPoIB specification; neither port number nor HCA guid 2540 * can be zero) 2541 */ 2542 if ((state->id_pkey = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 2543 "port-pkey", IB_PKEY_INVALID_LIMITED)) <= IB_PKEY_INVALID_FULL) { 2544 DPRINT(10, "ibd_attach: port device has wrong partition (0x%x)", 2545 state->id_pkey); 2546 goto attach_fail; 2547 } 2548 if ((state->id_port = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 2549 "port-number", 0)) == 0) { 2550 DPRINT(10, "ibd_attach: invalid port number (%d)", 2551 state->id_port); 2552 goto attach_fail; 2553 } 2554 if ((hca_guid = ddi_prop_get_int64(DDI_DEV_T_ANY, dip, 0, 2555 "hca-guid", 0)) == 0) { 2556 DPRINT(10, "ibd_attach: port hca has invalid guid (0x%llx)", 2557 hca_guid); 2558 goto attach_fail; 2559 } 2560 2561 /* 2562 * Attach to IBTL 2563 */ 2564 mutex_enter(&ibd_gstate.ig_mutex); 2565 if (ibd_gstate.ig_ibt_hdl == NULL) { 2566 if ((ret = ibt_attach(&ibd_clnt_modinfo, dip, state, 2567 &ibd_gstate.ig_ibt_hdl)) != IBT_SUCCESS) { 2568 DPRINT(10, "ibd_attach: global: failed in " 2569 "ibt_attach(), ret=%d", ret); 2570 mutex_exit(&ibd_gstate.ig_mutex); 2571 goto attach_fail; 2572 } 2573 } 2574 if ((ret = ibt_attach(&ibd_clnt_modinfo, dip, state, 2575 &state->id_ibt_hdl)) != IBT_SUCCESS) { 2576 DPRINT(10, "ibd_attach: failed in ibt_attach(), ret=%d", 2577 ret); 2578 mutex_exit(&ibd_gstate.ig_mutex); 2579 goto attach_fail; 2580 } 2581 ibd_gstate.ig_ibt_hdl_ref_cnt++; 2582 mutex_exit(&ibd_gstate.ig_mutex); 2583 state->id_mac_state |= IBD_DRV_IBTL_ATTACH_DONE; 2584 2585 /* 2586 * Open the HCA 2587 */ 2588 if ((ret = ibt_open_hca(state->id_ibt_hdl, hca_guid, 2589 &state->id_hca_hdl)) != IBT_SUCCESS) { 2590 DPRINT(10, "ibd_attach: ibt_open_hca() failed, ret=%d", ret); 2591 goto attach_fail; 2592 } 2593 state->id_mac_state |= IBD_DRV_HCA_OPENED; 2594 2595 /* Get RC config before ibd_record_capab */ 2596 ibd_rc_get_conf(state); 2597 2598 #ifdef DEBUG 2599 /* Initialize Driver Counters for Reliable Connected Mode */ 2600 if (state->id_enable_rc) { 2601 if (ibd_rc_init_stats(state) != DDI_SUCCESS) { 2602 DPRINT(10, "ibd_attach: failed in ibd_rc_init_stats"); 2603 goto attach_fail; 2604 } 2605 state->id_mac_state |= IBD_DRV_RC_PRIVATE_STATE; 2606 } 2607 #endif 2608 2609 /* 2610 * Record capabilities 2611 */ 2612 (void) ibd_record_capab(state, dip); 2613 2614 /* 2615 * Allocate a protection domain on the HCA 2616 */ 2617 if ((ret = ibt_alloc_pd(state->id_hca_hdl, IBT_PD_NO_FLAGS, 2618 &state->id_pd_hdl)) != IBT_SUCCESS) { 2619 DPRINT(10, "ibd_attach: ibt_alloc_pd() failed, ret=%d", ret); 2620 goto attach_fail; 2621 } 2622 state->id_mac_state |= IBD_DRV_PD_ALLOCD; 2623 2624 2625 /* 2626 * Register ibd interfaces with the Nemo framework 2627 */ 2628 if (ibd_register_mac(state, dip) != IBT_SUCCESS) { 2629 DPRINT(10, "ibd_attach: failed in ibd_register_mac()"); 2630 goto attach_fail; 2631 } 2632 state->id_mac_state |= IBD_DRV_MAC_REGISTERED; 2633 2634 /* 2635 * We're done with everything we could to make the attach 2636 * succeed. All the buffer allocations and IPoIB broadcast 2637 * group joins are deferred to when the interface instance 2638 * is actually plumbed to avoid wasting memory. 2639 */ 2640 return (DDI_SUCCESS); 2641 2642 attach_fail: 2643 (void) ibd_unattach(state, dip); 2644 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*state)) 2645 return (DDI_FAILURE); 2646 } 2647 2648 /* 2649 * Detach device from the IO framework. 2650 */ 2651 static int 2652 ibd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2653 { 2654 ibd_state_t *state; 2655 int instance; 2656 2657 /* 2658 * IBD doesn't support suspend/resume 2659 */ 2660 if (cmd != DDI_DETACH) 2661 return (DDI_FAILURE); 2662 2663 /* 2664 * Get the instance softstate 2665 */ 2666 instance = ddi_get_instance(dip); 2667 state = ddi_get_soft_state(ibd_list, instance); 2668 2669 /* 2670 * Release all resources we're holding still. Note that if we'd 2671 * done ibd_attach(), ibd_m_start() and ibd_m_stop() correctly 2672 * so far, we should find all the flags we need in id_mac_state. 2673 */ 2674 return (ibd_unattach(state, dip)); 2675 } 2676 2677 /* 2678 * Pre ibt_attach() driver initialization 2679 */ 2680 static int 2681 ibd_state_init(ibd_state_t *state, dev_info_t *dip) 2682 { 2683 char buf[64]; 2684 2685 mutex_init(&state->id_link_mutex, NULL, MUTEX_DRIVER, NULL); 2686 state->id_link_state = LINK_STATE_UNKNOWN; 2687 2688 mutex_init(&state->id_trap_lock, NULL, MUTEX_DRIVER, NULL); 2689 cv_init(&state->id_trap_cv, NULL, CV_DEFAULT, NULL); 2690 state->id_trap_stop = B_TRUE; 2691 state->id_trap_inprog = 0; 2692 2693 mutex_init(&state->id_scq_poll_lock, NULL, MUTEX_DRIVER, NULL); 2694 mutex_init(&state->id_rcq_poll_lock, NULL, MUTEX_DRIVER, NULL); 2695 state->id_dip = dip; 2696 2697 mutex_init(&state->id_sched_lock, NULL, MUTEX_DRIVER, NULL); 2698 2699 mutex_init(&state->id_tx_list.dl_mutex, NULL, MUTEX_DRIVER, NULL); 2700 mutex_init(&state->id_tx_rel_list.dl_mutex, NULL, MUTEX_DRIVER, NULL); 2701 mutex_init(&state->id_txpost_lock, NULL, MUTEX_DRIVER, NULL); 2702 state->id_tx_busy = 0; 2703 mutex_init(&state->id_lso_lock, NULL, MUTEX_DRIVER, NULL); 2704 2705 state->id_rx_list.dl_bufs_outstanding = 0; 2706 state->id_rx_list.dl_cnt = 0; 2707 mutex_init(&state->id_rx_list.dl_mutex, NULL, MUTEX_DRIVER, NULL); 2708 mutex_init(&state->id_rx_free_list.dl_mutex, NULL, MUTEX_DRIVER, NULL); 2709 (void) sprintf(buf, "ibd_req%d", ddi_get_instance(dip)); 2710 state->id_req_kmc = kmem_cache_create(buf, sizeof (ibd_req_t), 2711 0, NULL, NULL, NULL, NULL, NULL, 0); 2712 2713 mutex_init(&state->id_macst_lock, NULL, MUTEX_DRIVER, NULL); 2714 cv_init(&state->id_macst_cv, NULL, CV_DEFAULT, NULL); 2715 2716 /* For Reliable Connected Mode */ 2717 mutex_init(&state->rc_rx_lock, NULL, MUTEX_DRIVER, NULL); 2718 mutex_init(&state->rc_tx_large_bufs_lock, NULL, MUTEX_DRIVER, NULL); 2719 mutex_init(&state->rc_srq_rwqe_list.dl_mutex, NULL, MUTEX_DRIVER, NULL); 2720 mutex_init(&state->rc_srq_free_list.dl_mutex, NULL, MUTEX_DRIVER, NULL); 2721 mutex_init(&state->rc_pass_chan_list.chan_list_mutex, NULL, 2722 MUTEX_DRIVER, NULL); 2723 2724 return (DDI_SUCCESS); 2725 } 2726 2727 /* 2728 * Post ibt_detach() driver deconstruction 2729 */ 2730 static void 2731 ibd_state_fini(ibd_state_t *state) 2732 { 2733 cv_destroy(&state->id_macst_cv); 2734 mutex_destroy(&state->id_macst_lock); 2735 2736 kmem_cache_destroy(state->id_req_kmc); 2737 2738 mutex_destroy(&state->id_rx_list.dl_mutex); 2739 mutex_destroy(&state->id_rx_free_list.dl_mutex); 2740 2741 mutex_destroy(&state->id_txpost_lock); 2742 mutex_destroy(&state->id_tx_list.dl_mutex); 2743 mutex_destroy(&state->id_tx_rel_list.dl_mutex); 2744 mutex_destroy(&state->id_lso_lock); 2745 2746 mutex_destroy(&state->id_sched_lock); 2747 mutex_destroy(&state->id_scq_poll_lock); 2748 mutex_destroy(&state->id_rcq_poll_lock); 2749 2750 cv_destroy(&state->id_trap_cv); 2751 mutex_destroy(&state->id_trap_lock); 2752 mutex_destroy(&state->id_link_mutex); 2753 2754 /* For Reliable Connected Mode */ 2755 mutex_destroy(&state->rc_srq_free_list.dl_mutex); 2756 mutex_destroy(&state->rc_srq_rwqe_list.dl_mutex); 2757 mutex_destroy(&state->rc_pass_chan_list.chan_list_mutex); 2758 mutex_destroy(&state->rc_tx_large_bufs_lock); 2759 mutex_destroy(&state->rc_rx_lock); 2760 } 2761 2762 /* 2763 * Fetch link speed from SA for snmp ifspeed reporting. 2764 */ 2765 static uint64_t 2766 ibd_get_portspeed(ibd_state_t *state) 2767 { 2768 int ret; 2769 ibt_path_info_t path; 2770 ibt_path_attr_t path_attr; 2771 uint8_t num_paths; 2772 uint64_t ifspeed; 2773 2774 /* 2775 * Due to serdes 8b10b encoding on the wire, 2.5 Gbps on wire 2776 * translates to 2 Gbps data rate. Thus, 1X single data rate is 2777 * 2000000000. Start with that as default. 2778 */ 2779 ifspeed = 2000000000; 2780 2781 bzero(&path_attr, sizeof (path_attr)); 2782 2783 /* 2784 * Get the port speed from Loopback path information. 2785 */ 2786 path_attr.pa_dgids = &state->id_sgid; 2787 path_attr.pa_num_dgids = 1; 2788 path_attr.pa_sgid = state->id_sgid; 2789 2790 if (ibt_get_paths(state->id_ibt_hdl, IBT_PATH_NO_FLAGS, 2791 &path_attr, 1, &path, &num_paths) != IBT_SUCCESS) 2792 goto earlydone; 2793 2794 if (num_paths < 1) 2795 goto earlydone; 2796 2797 /* 2798 * In case SA does not return an expected value, report the default 2799 * speed as 1X. 2800 */ 2801 ret = 1; 2802 switch (path.pi_prim_cep_path.cep_adds_vect.av_srate) { 2803 case IBT_SRATE_2: /* 1X SDR i.e 2.5 Gbps */ 2804 ret = 1; 2805 break; 2806 case IBT_SRATE_10: /* 4X SDR or 1X QDR i.e 10 Gbps */ 2807 ret = 4; 2808 break; 2809 case IBT_SRATE_30: /* 12X SDR i.e 30 Gbps */ 2810 ret = 12; 2811 break; 2812 case IBT_SRATE_5: /* 1X DDR i.e 5 Gbps */ 2813 ret = 2; 2814 break; 2815 case IBT_SRATE_20: /* 4X DDR or 8X SDR i.e 20 Gbps */ 2816 ret = 8; 2817 break; 2818 case IBT_SRATE_40: /* 8X DDR or 4X QDR i.e 40 Gbps */ 2819 ret = 16; 2820 break; 2821 case IBT_SRATE_60: /* 12X DDR i.e 60 Gbps */ 2822 ret = 24; 2823 break; 2824 case IBT_SRATE_80: /* 8X QDR i.e 80 Gbps */ 2825 ret = 32; 2826 break; 2827 case IBT_SRATE_120: /* 12X QDR i.e 120 Gbps */ 2828 ret = 48; 2829 break; 2830 } 2831 2832 ifspeed *= ret; 2833 2834 earlydone: 2835 return (ifspeed); 2836 } 2837 2838 /* 2839 * Search input mcg list (id_mc_full or id_mc_non) for an entry 2840 * representing the input mcg mgid. 2841 */ 2842 static ibd_mce_t * 2843 ibd_mcache_find(ib_gid_t mgid, struct list *mlist) 2844 { 2845 ibd_mce_t *ptr = list_head(mlist); 2846 2847 /* 2848 * Do plain linear search. 2849 */ 2850 while (ptr != NULL) { 2851 if (bcmp(&mgid, &ptr->mc_info.mc_adds_vect.av_dgid, 2852 sizeof (ib_gid_t)) == 0) 2853 return (ptr); 2854 ptr = list_next(mlist, ptr); 2855 } 2856 return (NULL); 2857 } 2858 2859 /* 2860 * Execute IBA JOIN. 2861 */ 2862 static ibt_status_t 2863 ibd_iba_join(ibd_state_t *state, ib_gid_t mgid, ibd_mce_t *mce) 2864 { 2865 ibt_mcg_attr_t mcg_attr; 2866 2867 bzero(&mcg_attr, sizeof (ibt_mcg_attr_t)); 2868 mcg_attr.mc_qkey = state->id_mcinfo->mc_qkey; 2869 mcg_attr.mc_mgid = mgid; 2870 mcg_attr.mc_join_state = mce->mc_jstate; 2871 mcg_attr.mc_scope = state->id_scope; 2872 mcg_attr.mc_pkey = state->id_pkey; 2873 mcg_attr.mc_flow = state->id_mcinfo->mc_adds_vect.av_flow; 2874 mcg_attr.mc_sl = state->id_mcinfo->mc_adds_vect.av_srvl; 2875 mcg_attr.mc_tclass = state->id_mcinfo->mc_adds_vect.av_tclass; 2876 return (ibt_join_mcg(state->id_sgid, &mcg_attr, &mce->mc_info, 2877 NULL, NULL)); 2878 } 2879 2880 /* 2881 * This code JOINs the port in the proper way (depending on the join 2882 * state) so that IBA fabric will forward mcg packets to/from the port. 2883 * It also attaches the QPN to the mcg so it can receive those mcg 2884 * packets. This code makes sure not to attach the mcg to the QP if 2885 * that has been previously done due to the mcg being joined with a 2886 * different join state, even though this is not required by SWG_0216, 2887 * refid 3610. 2888 */ 2889 static ibd_mce_t * 2890 ibd_join_group(ibd_state_t *state, ib_gid_t mgid, uint8_t jstate) 2891 { 2892 ibt_status_t ibt_status; 2893 ibd_mce_t *mce, *tmce, *omce = NULL; 2894 boolean_t do_attach = B_TRUE; 2895 2896 DPRINT(2, "ibd_join_group : join_group state %d : %016llx:%016llx\n", 2897 jstate, mgid.gid_prefix, mgid.gid_guid); 2898 2899 /* 2900 * For enable_multicast Full member joins, we need to do some 2901 * extra work. If there is already an mce on the list that 2902 * indicates full membership, that means the membership has 2903 * not yet been dropped (since the disable_multicast was issued) 2904 * because there are pending Tx's to the mcg; in that case, just 2905 * mark the mce not to be reaped when the Tx completion queues 2906 * an async reap operation. 2907 * 2908 * If there is already an mce on the list indicating sendonly 2909 * membership, try to promote to full membership. Be careful 2910 * not to deallocate the old mce, since there might be an AH 2911 * pointing to it; instead, update the old mce with new data 2912 * that tracks the full membership. 2913 */ 2914 if ((jstate == IB_MC_JSTATE_FULL) && ((omce = 2915 IBD_MCACHE_FIND_FULL(state, mgid)) != NULL)) { 2916 if (omce->mc_jstate == IB_MC_JSTATE_FULL) { 2917 ASSERT(omce->mc_fullreap); 2918 omce->mc_fullreap = B_FALSE; 2919 return (omce); 2920 } else { 2921 ASSERT(omce->mc_jstate == IB_MC_JSTATE_SEND_ONLY_NON); 2922 } 2923 } 2924 2925 /* 2926 * Allocate the ibd_mce_t to track this JOIN. 2927 */ 2928 mce = kmem_zalloc(sizeof (ibd_mce_t), KM_SLEEP); 2929 mce->mc_fullreap = B_FALSE; 2930 mce->mc_jstate = jstate; 2931 2932 if ((ibt_status = ibd_iba_join(state, mgid, mce)) != IBT_SUCCESS) { 2933 DPRINT(10, "ibd_join_group : failed ibt_join_mcg() %d", 2934 ibt_status); 2935 kmem_free(mce, sizeof (ibd_mce_t)); 2936 return (NULL); 2937 } 2938 2939 /* 2940 * Is an IBA attach required? Not if the interface is already joined 2941 * to the mcg in a different appropriate join state. 2942 */ 2943 if (jstate == IB_MC_JSTATE_NON) { 2944 tmce = IBD_MCACHE_FIND_FULL(state, mgid); 2945 if ((tmce != NULL) && (tmce->mc_jstate == IB_MC_JSTATE_FULL)) 2946 do_attach = B_FALSE; 2947 } else if (jstate == IB_MC_JSTATE_FULL) { 2948 if (IBD_MCACHE_FIND_NON(state, mgid) != NULL) 2949 do_attach = B_FALSE; 2950 } else { /* jstate == IB_MC_JSTATE_SEND_ONLY_NON */ 2951 do_attach = B_FALSE; 2952 } 2953 2954 if (do_attach) { 2955 /* 2956 * Do the IBA attach. 2957 */ 2958 DPRINT(10, "ibd_join_group: ibt_attach_mcg \n"); 2959 if ((ibt_status = ibt_attach_mcg(state->id_chnl_hdl, 2960 &mce->mc_info)) != IBT_SUCCESS) { 2961 DPRINT(10, "ibd_join_group : failed qp attachment " 2962 "%d\n", ibt_status); 2963 /* 2964 * NOTE that we should probably preserve the join info 2965 * in the list and later try to leave again at detach 2966 * time. 2967 */ 2968 (void) ibt_leave_mcg(state->id_sgid, mgid, 2969 state->id_sgid, jstate); 2970 kmem_free(mce, sizeof (ibd_mce_t)); 2971 return (NULL); 2972 } 2973 } 2974 2975 /* 2976 * Insert the ibd_mce_t in the proper list. 2977 */ 2978 if (jstate == IB_MC_JSTATE_NON) { 2979 IBD_MCACHE_INSERT_NON(state, mce); 2980 } else { 2981 /* 2982 * Set up the mc_req fields used for reaping the 2983 * mcg in case of delayed tx completion (see 2984 * ibd_tx_cleanup()). Also done for sendonly join in 2985 * case we are promoted to fullmembership later and 2986 * keep using the same mce. 2987 */ 2988 mce->mc_req.rq_gid = mgid; 2989 mce->mc_req.rq_ptr = mce; 2990 /* 2991 * Check whether this is the case of trying to join 2992 * full member, and we were already joined send only. 2993 * We try to drop our SendOnly membership, but it is 2994 * possible that the mcg does not exist anymore (and 2995 * the subnet trap never reached us), so the leave 2996 * operation might fail. 2997 */ 2998 if (omce != NULL) { 2999 (void) ibt_leave_mcg(state->id_sgid, mgid, 3000 state->id_sgid, IB_MC_JSTATE_SEND_ONLY_NON); 3001 omce->mc_jstate = IB_MC_JSTATE_FULL; 3002 bcopy(&mce->mc_info, &omce->mc_info, 3003 sizeof (ibt_mcg_info_t)); 3004 kmem_free(mce, sizeof (ibd_mce_t)); 3005 return (omce); 3006 } 3007 mutex_enter(&state->id_mc_mutex); 3008 IBD_MCACHE_INSERT_FULL(state, mce); 3009 mutex_exit(&state->id_mc_mutex); 3010 } 3011 3012 return (mce); 3013 } 3014 3015 /* 3016 * Called during port up event handling to attempt to reacquire full 3017 * membership to an mcg. Stripped down version of ibd_join_group(). 3018 * Note that it is possible that the mcg might have gone away, and 3019 * gets recreated at this point. 3020 */ 3021 static void 3022 ibd_reacquire_group(ibd_state_t *state, ibd_mce_t *mce) 3023 { 3024 ib_gid_t mgid; 3025 3026 /* 3027 * If the mc_fullreap flag is set, or this join fails, a subsequent 3028 * reap/leave is going to try to leave the group. We could prevent 3029 * that by adding a boolean flag into ibd_mce_t, if required. 3030 */ 3031 if (mce->mc_fullreap) 3032 return; 3033 3034 mgid = mce->mc_info.mc_adds_vect.av_dgid; 3035 3036 DPRINT(2, "ibd_reacquire_group : %016llx:%016llx\n", mgid.gid_prefix, 3037 mgid.gid_guid); 3038 3039 if (ibd_iba_join(state, mgid, mce) != IBT_SUCCESS) 3040 ibd_print_warn(state, "Failure on port up to rejoin " 3041 "multicast gid %016llx:%016llx", 3042 (u_longlong_t)mgid.gid_prefix, 3043 (u_longlong_t)mgid.gid_guid); 3044 } 3045 3046 /* 3047 * This code handles delayed Tx completion cleanups for mcg's to which 3048 * disable_multicast has been issued, regular mcg related cleanups during 3049 * disable_multicast, disable_promiscuous and mcg traps, as well as 3050 * cleanups during driver detach time. Depending on the join state, 3051 * it deletes the mce from the appropriate list and issues the IBA 3052 * leave/detach; except in the disable_multicast case when the mce 3053 * is left on the active list for a subsequent Tx completion cleanup. 3054 */ 3055 static void 3056 ibd_async_reap_group(ibd_state_t *state, ibd_mce_t *mce, ib_gid_t mgid, 3057 uint8_t jstate) 3058 { 3059 ibd_mce_t *tmce; 3060 boolean_t do_detach = B_TRUE; 3061 3062 /* 3063 * Before detaching, we must check whether the other list 3064 * contains the mcg; if we detach blindly, the consumer 3065 * who set up the other list will also stop receiving 3066 * traffic. 3067 */ 3068 if (jstate == IB_MC_JSTATE_FULL) { 3069 /* 3070 * The following check is only relevant while coming 3071 * from the Tx completion path in the reap case. 3072 */ 3073 if (!mce->mc_fullreap) 3074 return; 3075 mutex_enter(&state->id_mc_mutex); 3076 IBD_MCACHE_PULLOUT_FULL(state, mce); 3077 mutex_exit(&state->id_mc_mutex); 3078 if (IBD_MCACHE_FIND_NON(state, mgid) != NULL) 3079 do_detach = B_FALSE; 3080 } else if (jstate == IB_MC_JSTATE_NON) { 3081 IBD_MCACHE_PULLOUT_NON(state, mce); 3082 tmce = IBD_MCACHE_FIND_FULL(state, mgid); 3083 if ((tmce != NULL) && (tmce->mc_jstate == IB_MC_JSTATE_FULL)) 3084 do_detach = B_FALSE; 3085 } else { /* jstate == IB_MC_JSTATE_SEND_ONLY_NON */ 3086 mutex_enter(&state->id_mc_mutex); 3087 IBD_MCACHE_PULLOUT_FULL(state, mce); 3088 mutex_exit(&state->id_mc_mutex); 3089 do_detach = B_FALSE; 3090 } 3091 3092 /* 3093 * If we are reacting to a mcg trap and leaving our sendonly or 3094 * non membership, the mcg is possibly already gone, so attempting 3095 * to leave might fail. On the other hand, we must try to leave 3096 * anyway, since this might be a trap from long ago, and we could 3097 * have potentially sendonly joined to a recent incarnation of 3098 * the mcg and are about to loose track of this information. 3099 */ 3100 if (do_detach) { 3101 DPRINT(2, "ibd_async_reap_group : ibt_detach_mcg : " 3102 "%016llx:%016llx\n", mgid.gid_prefix, mgid.gid_guid); 3103 (void) ibt_detach_mcg(state->id_chnl_hdl, &mce->mc_info); 3104 } 3105 3106 (void) ibt_leave_mcg(state->id_sgid, mgid, state->id_sgid, jstate); 3107 kmem_free(mce, sizeof (ibd_mce_t)); 3108 } 3109 3110 /* 3111 * Async code executed due to multicast and promiscuous disable requests 3112 * and mcg trap handling; also executed during driver detach. Mostly, a 3113 * leave and detach is done; except for the fullmember case when Tx 3114 * requests are pending, whence arrangements are made for subsequent 3115 * cleanup on Tx completion. 3116 */ 3117 static void 3118 ibd_leave_group(ibd_state_t *state, ib_gid_t mgid, uint8_t jstate) 3119 { 3120 ipoib_mac_t mcmac; 3121 boolean_t recycled; 3122 ibd_mce_t *mce; 3123 3124 DPRINT(2, "ibd_leave_group : leave_group state %d : %016llx:%016llx\n", 3125 jstate, mgid.gid_prefix, mgid.gid_guid); 3126 3127 if (jstate == IB_MC_JSTATE_NON) { 3128 recycled = B_TRUE; 3129 mce = IBD_MCACHE_FIND_NON(state, mgid); 3130 /* 3131 * In case we are handling a mcg trap, we might not find 3132 * the mcg in the non list. 3133 */ 3134 if (mce == NULL) { 3135 return; 3136 } 3137 } else { 3138 mce = IBD_MCACHE_FIND_FULL(state, mgid); 3139 3140 /* 3141 * In case we are handling a mcg trap, make sure the trap 3142 * is not arriving late; if we have an mce that indicates 3143 * that we are already a fullmember, that would be a clear 3144 * indication that the trap arrived late (ie, is for a 3145 * previous incarnation of the mcg). 3146 */ 3147 if (jstate == IB_MC_JSTATE_SEND_ONLY_NON) { 3148 if ((mce == NULL) || (mce->mc_jstate == 3149 IB_MC_JSTATE_FULL)) { 3150 return; 3151 } 3152 } else { 3153 ASSERT(jstate == IB_MC_JSTATE_FULL); 3154 3155 /* 3156 * If join group failed, mce will be NULL here. 3157 * This is because in GLDv3 driver, set multicast 3158 * will always return success. 3159 */ 3160 if (mce == NULL) { 3161 return; 3162 } 3163 3164 mce->mc_fullreap = B_TRUE; 3165 } 3166 3167 /* 3168 * If no pending Tx's remain that reference the AH 3169 * for the mcg, recycle it from active to free list. 3170 * Else in the IB_MC_JSTATE_FULL case, just mark the AH, 3171 * so the last completing Tx will cause an async reap 3172 * operation to be invoked, at which time we will drop our 3173 * membership to the mcg so that the pending Tx's complete 3174 * successfully. Refer to comments on "AH and MCE active 3175 * list manipulation" at top of this file. The lock protects 3176 * against Tx fast path and Tx cleanup code. 3177 */ 3178 mutex_enter(&state->id_ac_mutex); 3179 ibd_h2n_mac(&mcmac, IB_MC_QPN, mgid.gid_prefix, mgid.gid_guid); 3180 recycled = ibd_acache_recycle(state, &mcmac, (jstate == 3181 IB_MC_JSTATE_SEND_ONLY_NON)); 3182 mutex_exit(&state->id_ac_mutex); 3183 } 3184 3185 if (recycled) { 3186 DPRINT(2, "ibd_leave_group : leave_group reaping : " 3187 "%016llx:%016llx\n", mgid.gid_prefix, mgid.gid_guid); 3188 ibd_async_reap_group(state, mce, mgid, jstate); 3189 } 3190 } 3191 3192 /* 3193 * Find the broadcast address as defined by IPoIB; implicitly 3194 * determines the IBA scope, mtu, tclass etc of the link the 3195 * interface is going to be a member of. 3196 */ 3197 static ibt_status_t 3198 ibd_find_bgroup(ibd_state_t *state) 3199 { 3200 ibt_mcg_attr_t mcg_attr; 3201 uint_t numg; 3202 uchar_t scopes[] = { IB_MC_SCOPE_SUBNET_LOCAL, 3203 IB_MC_SCOPE_SITE_LOCAL, IB_MC_SCOPE_ORG_LOCAL, 3204 IB_MC_SCOPE_GLOBAL }; 3205 int i, mcgmtu; 3206 boolean_t found = B_FALSE; 3207 int ret; 3208 ibt_mcg_info_t mcg_info; 3209 3210 state->id_bgroup_created = B_FALSE; 3211 3212 query_bcast_grp: 3213 bzero(&mcg_attr, sizeof (ibt_mcg_attr_t)); 3214 mcg_attr.mc_pkey = state->id_pkey; 3215 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state->id_mgid)) 3216 state->id_mgid.gid_guid = IB_MGID_IPV4_LOWGRP_MASK; 3217 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(state->id_mgid)) 3218 3219 for (i = 0; i < sizeof (scopes)/sizeof (scopes[0]); i++) { 3220 state->id_scope = mcg_attr.mc_scope = scopes[i]; 3221 3222 /* 3223 * Look for the IPoIB broadcast group. 3224 */ 3225 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state->id_mgid)) 3226 state->id_mgid.gid_prefix = 3227 (((uint64_t)IB_MCGID_IPV4_PREFIX << 32) | 3228 ((uint64_t)state->id_scope << 48) | 3229 ((uint32_t)(state->id_pkey << 16))); 3230 mcg_attr.mc_mgid = state->id_mgid; 3231 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(state->id_mgid)) 3232 if (ibt_query_mcg(state->id_sgid, &mcg_attr, 1, 3233 &state->id_mcinfo, &numg) == IBT_SUCCESS) { 3234 found = B_TRUE; 3235 break; 3236 } 3237 } 3238 3239 if (!found) { 3240 if (ibd_create_broadcast_group) { 3241 /* 3242 * If we created the broadcast group, but failed to 3243 * find it, we can't do anything except leave the 3244 * one we created and return failure. 3245 */ 3246 if (state->id_bgroup_created) { 3247 ibd_print_warn(state, "IPoIB broadcast group " 3248 "absent. Unable to query after create."); 3249 goto find_bgroup_fail; 3250 } 3251 3252 /* 3253 * Create the ipoib broadcast group if it didn't exist 3254 */ 3255 bzero(&mcg_attr, sizeof (ibt_mcg_attr_t)); 3256 mcg_attr.mc_qkey = IBD_DEFAULT_QKEY; 3257 mcg_attr.mc_join_state = IB_MC_JSTATE_FULL; 3258 mcg_attr.mc_scope = IB_MC_SCOPE_SUBNET_LOCAL; 3259 mcg_attr.mc_pkey = state->id_pkey; 3260 mcg_attr.mc_flow = 0; 3261 mcg_attr.mc_sl = 0; 3262 mcg_attr.mc_tclass = 0; 3263 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state->id_mgid)) 3264 state->id_mgid.gid_prefix = 3265 (((uint64_t)IB_MCGID_IPV4_PREFIX << 32) | 3266 ((uint64_t)IB_MC_SCOPE_SUBNET_LOCAL << 48) | 3267 ((uint32_t)(state->id_pkey << 16))); 3268 mcg_attr.mc_mgid = state->id_mgid; 3269 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(state->id_mgid)) 3270 3271 if ((ret = ibt_join_mcg(state->id_sgid, &mcg_attr, 3272 &mcg_info, NULL, NULL)) != IBT_SUCCESS) { 3273 ibd_print_warn(state, "IPoIB broadcast group " 3274 "absent, create failed: ret = %d\n", ret); 3275 state->id_bgroup_created = B_FALSE; 3276 return (IBT_FAILURE); 3277 } 3278 state->id_bgroup_created = B_TRUE; 3279 goto query_bcast_grp; 3280 } else { 3281 ibd_print_warn(state, "IPoIB broadcast group absent"); 3282 return (IBT_FAILURE); 3283 } 3284 } 3285 3286 /* 3287 * Assert that the mcg mtu <= id_mtu. Fill in updated id_mtu. 3288 */ 3289 mcgmtu = (128 << state->id_mcinfo->mc_mtu); 3290 if (state->id_mtu < mcgmtu) { 3291 ibd_print_warn(state, "IPoIB broadcast group MTU %d " 3292 "greater than port's maximum MTU %d", mcgmtu, 3293 state->id_mtu); 3294 ibt_free_mcg_info(state->id_mcinfo, 1); 3295 goto find_bgroup_fail; 3296 } 3297 state->id_mtu = mcgmtu; 3298 3299 return (IBT_SUCCESS); 3300 3301 find_bgroup_fail: 3302 if (state->id_bgroup_created) { 3303 (void) ibt_leave_mcg(state->id_sgid, 3304 mcg_info.mc_adds_vect.av_dgid, state->id_sgid, 3305 IB_MC_JSTATE_FULL); 3306 } 3307 3308 return (IBT_FAILURE); 3309 } 3310 3311 static int 3312 ibd_alloc_tx_copybufs(ibd_state_t *state) 3313 { 3314 ibt_mr_attr_t mem_attr; 3315 3316 /* 3317 * Allocate one big chunk for all regular tx copy bufs 3318 */ 3319 state->id_tx_buf_sz = state->id_mtu; 3320 if (state->id_lso_policy && state->id_lso_capable && 3321 (IBD_TX_BUF_SZ > state->id_mtu)) { 3322 state->id_tx_buf_sz = IBD_TX_BUF_SZ; 3323 } 3324 3325 state->id_tx_bufs = kmem_zalloc(state->id_num_swqe * 3326 state->id_tx_buf_sz, KM_SLEEP); 3327 3328 state->id_tx_wqes = kmem_zalloc(state->id_num_swqe * 3329 sizeof (ibd_swqe_t), KM_SLEEP); 3330 3331 /* 3332 * Do one memory registration on the entire txbuf area 3333 */ 3334 mem_attr.mr_vaddr = (uint64_t)(uintptr_t)state->id_tx_bufs; 3335 mem_attr.mr_len = state->id_num_swqe * state->id_tx_buf_sz; 3336 mem_attr.mr_as = NULL; 3337 mem_attr.mr_flags = IBT_MR_SLEEP; 3338 if (ibt_register_mr(state->id_hca_hdl, state->id_pd_hdl, &mem_attr, 3339 &state->id_tx_mr_hdl, &state->id_tx_mr_desc) != IBT_SUCCESS) { 3340 DPRINT(10, "ibd_alloc_tx_copybufs: ibt_register_mr failed"); 3341 kmem_free(state->id_tx_wqes, 3342 state->id_num_swqe * sizeof (ibd_swqe_t)); 3343 kmem_free(state->id_tx_bufs, 3344 state->id_num_swqe * state->id_tx_buf_sz); 3345 state->id_tx_bufs = NULL; 3346 return (DDI_FAILURE); 3347 } 3348 3349 return (DDI_SUCCESS); 3350 } 3351 3352 static int 3353 ibd_alloc_tx_lsobufs(ibd_state_t *state) 3354 { 3355 ibt_mr_attr_t mem_attr; 3356 ibd_lsobuf_t *buflist; 3357 ibd_lsobuf_t *lbufp; 3358 ibd_lsobuf_t *tail; 3359 ibd_lsobkt_t *bktp; 3360 uint8_t *membase; 3361 uint8_t *memp; 3362 uint_t memsz; 3363 int i; 3364 3365 /* 3366 * Allocate the lso bucket 3367 */ 3368 bktp = kmem_zalloc(sizeof (ibd_lsobkt_t), KM_SLEEP); 3369 3370 /* 3371 * Allocate the entire lso memory and register it 3372 */ 3373 memsz = IBD_NUM_LSO_BUFS * IBD_LSO_BUFSZ; 3374 membase = kmem_zalloc(memsz, KM_SLEEP); 3375 3376 mem_attr.mr_vaddr = (uint64_t)(uintptr_t)membase; 3377 mem_attr.mr_len = memsz; 3378 mem_attr.mr_as = NULL; 3379 mem_attr.mr_flags = IBT_MR_SLEEP; 3380 if (ibt_register_mr(state->id_hca_hdl, state->id_pd_hdl, 3381 &mem_attr, &bktp->bkt_mr_hdl, &bktp->bkt_mr_desc) != IBT_SUCCESS) { 3382 DPRINT(10, "ibd_alloc_tx_lsobufs: ibt_register_mr failed"); 3383 kmem_free(membase, memsz); 3384 kmem_free(bktp, sizeof (ibd_lsobkt_t)); 3385 return (DDI_FAILURE); 3386 } 3387 3388 mutex_enter(&state->id_lso_lock); 3389 3390 /* 3391 * Now allocate the buflist. Note that the elements in the buflist and 3392 * the buffers in the lso memory have a permanent 1-1 relation, so we 3393 * can always derive the address of a buflist entry from the address of 3394 * an lso buffer. 3395 */ 3396 buflist = kmem_zalloc(IBD_NUM_LSO_BUFS * sizeof (ibd_lsobuf_t), 3397 KM_SLEEP); 3398 3399 /* 3400 * Set up the lso buf chain 3401 */ 3402 memp = membase; 3403 lbufp = buflist; 3404 for (i = 0; i < IBD_NUM_LSO_BUFS; i++) { 3405 lbufp->lb_isfree = 1; 3406 lbufp->lb_buf = memp; 3407 lbufp->lb_next = lbufp + 1; 3408 3409 tail = lbufp; 3410 3411 memp += IBD_LSO_BUFSZ; 3412 lbufp++; 3413 } 3414 tail->lb_next = NULL; 3415 3416 /* 3417 * Set up the LSO buffer information in ibd state 3418 */ 3419 bktp->bkt_bufl = buflist; 3420 bktp->bkt_free_head = buflist; 3421 bktp->bkt_mem = membase; 3422 bktp->bkt_nelem = IBD_NUM_LSO_BUFS; 3423 bktp->bkt_nfree = bktp->bkt_nelem; 3424 3425 state->id_lso = bktp; 3426 mutex_exit(&state->id_lso_lock); 3427 3428 return (DDI_SUCCESS); 3429 } 3430 3431 /* 3432 * Statically allocate Tx buffer list(s). 3433 */ 3434 static int 3435 ibd_init_txlist(ibd_state_t *state) 3436 { 3437 ibd_swqe_t *swqe; 3438 ibt_lkey_t lkey; 3439 int i; 3440 uint_t len; 3441 uint8_t *bufaddr; 3442 3443 if (ibd_alloc_tx_copybufs(state) != DDI_SUCCESS) 3444 return (DDI_FAILURE); 3445 3446 if (state->id_lso_policy && state->id_lso_capable) { 3447 if (ibd_alloc_tx_lsobufs(state) != DDI_SUCCESS) 3448 state->id_lso_policy = B_FALSE; 3449 } 3450 3451 mutex_enter(&state->id_tx_list.dl_mutex); 3452 state->id_tx_list.dl_head = NULL; 3453 state->id_tx_list.dl_pending_sends = B_FALSE; 3454 state->id_tx_list.dl_cnt = 0; 3455 mutex_exit(&state->id_tx_list.dl_mutex); 3456 mutex_enter(&state->id_tx_rel_list.dl_mutex); 3457 state->id_tx_rel_list.dl_head = NULL; 3458 state->id_tx_rel_list.dl_pending_sends = B_FALSE; 3459 state->id_tx_rel_list.dl_cnt = 0; 3460 mutex_exit(&state->id_tx_rel_list.dl_mutex); 3461 3462 /* 3463 * Allocate and setup the swqe list 3464 */ 3465 lkey = state->id_tx_mr_desc.md_lkey; 3466 bufaddr = state->id_tx_bufs; 3467 len = state->id_tx_buf_sz; 3468 swqe = state->id_tx_wqes; 3469 mutex_enter(&state->id_tx_list.dl_mutex); 3470 for (i = 0; i < state->id_num_swqe; i++, swqe++, bufaddr += len) { 3471 swqe->swqe_next = NULL; 3472 swqe->swqe_im_mblk = NULL; 3473 3474 swqe->swqe_copybuf.ic_sgl.ds_va = (ib_vaddr_t)(uintptr_t) 3475 bufaddr; 3476 swqe->swqe_copybuf.ic_sgl.ds_key = lkey; 3477 swqe->swqe_copybuf.ic_sgl.ds_len = 0; /* set in send */ 3478 3479 swqe->w_swr.wr_id = (ibt_wrid_t)(uintptr_t)swqe; 3480 swqe->w_swr.wr_flags = IBT_WR_NO_FLAGS; 3481 swqe->w_swr.wr_trans = IBT_UD_SRV; 3482 3483 /* These are set in send */ 3484 swqe->w_swr.wr_nds = 0; 3485 swqe->w_swr.wr_sgl = NULL; 3486 swqe->w_swr.wr_opcode = IBT_WRC_SEND; 3487 3488 /* add to list */ 3489 state->id_tx_list.dl_cnt++; 3490 swqe->swqe_next = state->id_tx_list.dl_head; 3491 state->id_tx_list.dl_head = SWQE_TO_WQE(swqe); 3492 } 3493 mutex_exit(&state->id_tx_list.dl_mutex); 3494 3495 return (DDI_SUCCESS); 3496 } 3497 3498 static int 3499 ibd_acquire_lsobufs(ibd_state_t *state, uint_t req_sz, ibt_wr_ds_t *sgl_p, 3500 uint32_t *nds_p) 3501 { 3502 ibd_lsobkt_t *bktp; 3503 ibd_lsobuf_t *lbufp; 3504 ibd_lsobuf_t *nextp; 3505 ibt_lkey_t lso_lkey; 3506 uint_t frag_sz; 3507 uint_t num_needed; 3508 int i; 3509 3510 ASSERT(sgl_p != NULL); 3511 ASSERT(nds_p != NULL); 3512 ASSERT(req_sz != 0); 3513 3514 /* 3515 * Determine how many bufs we'd need for the size requested 3516 */ 3517 num_needed = req_sz / IBD_LSO_BUFSZ; 3518 if ((frag_sz = req_sz % IBD_LSO_BUFSZ) != 0) 3519 num_needed++; 3520 3521 mutex_enter(&state->id_lso_lock); 3522 3523 /* 3524 * If we don't have enough lso bufs, return failure 3525 */ 3526 ASSERT(state->id_lso != NULL); 3527 bktp = state->id_lso; 3528 if (bktp->bkt_nfree < num_needed) { 3529 mutex_exit(&state->id_lso_lock); 3530 return (-1); 3531 } 3532 3533 /* 3534 * Pick the first 'num_needed' bufs from the free list 3535 */ 3536 lso_lkey = bktp->bkt_mr_desc.md_lkey; 3537 lbufp = bktp->bkt_free_head; 3538 for (i = 0; i < num_needed; i++) { 3539 ASSERT(lbufp->lb_isfree != 0); 3540 ASSERT(lbufp->lb_buf != NULL); 3541 3542 nextp = lbufp->lb_next; 3543 3544 sgl_p[i].ds_va = (ib_vaddr_t)(uintptr_t)lbufp->lb_buf; 3545 sgl_p[i].ds_key = lso_lkey; 3546 sgl_p[i].ds_len = IBD_LSO_BUFSZ; 3547 3548 lbufp->lb_isfree = 0; 3549 lbufp->lb_next = NULL; 3550 3551 lbufp = nextp; 3552 } 3553 bktp->bkt_free_head = lbufp; 3554 3555 /* 3556 * If the requested size is not a multiple of IBD_LSO_BUFSZ, we need 3557 * to adjust the last sgl entry's length. Since we know we need atleast 3558 * one, the i-1 use below is ok. 3559 */ 3560 if (frag_sz) { 3561 sgl_p[i-1].ds_len = frag_sz; 3562 } 3563 3564 /* 3565 * Update nfree count and return 3566 */ 3567 bktp->bkt_nfree -= num_needed; 3568 3569 mutex_exit(&state->id_lso_lock); 3570 3571 *nds_p = num_needed; 3572 3573 return (0); 3574 } 3575 3576 static void 3577 ibd_release_lsobufs(ibd_state_t *state, ibt_wr_ds_t *sgl_p, uint32_t nds) 3578 { 3579 ibd_lsobkt_t *bktp; 3580 ibd_lsobuf_t *lbufp; 3581 uint8_t *lso_mem_end; 3582 uint_t ndx; 3583 int i; 3584 3585 mutex_enter(&state->id_lso_lock); 3586 3587 bktp = state->id_lso; 3588 ASSERT(bktp != NULL); 3589 3590 lso_mem_end = bktp->bkt_mem + bktp->bkt_nelem * IBD_LSO_BUFSZ; 3591 for (i = 0; i < nds; i++) { 3592 uint8_t *va; 3593 3594 va = (uint8_t *)(uintptr_t)sgl_p[i].ds_va; 3595 ASSERT(va >= bktp->bkt_mem && va < lso_mem_end); 3596 3597 /* 3598 * Figure out the buflist element this sgl buffer corresponds 3599 * to and put it back at the head 3600 */ 3601 ndx = (va - bktp->bkt_mem) / IBD_LSO_BUFSZ; 3602 lbufp = bktp->bkt_bufl + ndx; 3603 3604 ASSERT(lbufp->lb_isfree == 0); 3605 ASSERT(lbufp->lb_buf == va); 3606 3607 lbufp->lb_isfree = 1; 3608 lbufp->lb_next = bktp->bkt_free_head; 3609 bktp->bkt_free_head = lbufp; 3610 } 3611 bktp->bkt_nfree += nds; 3612 3613 mutex_exit(&state->id_lso_lock); 3614 } 3615 3616 static void 3617 ibd_free_tx_copybufs(ibd_state_t *state) 3618 { 3619 /* 3620 * Unregister txbuf mr 3621 */ 3622 if (ibt_deregister_mr(state->id_hca_hdl, 3623 state->id_tx_mr_hdl) != IBT_SUCCESS) { 3624 DPRINT(10, "ibd_free_tx_copybufs: ibt_deregister_mr failed"); 3625 } 3626 state->id_tx_mr_hdl = NULL; 3627 3628 /* 3629 * Free txbuf memory 3630 */ 3631 kmem_free(state->id_tx_wqes, state->id_num_swqe * sizeof (ibd_swqe_t)); 3632 kmem_free(state->id_tx_bufs, state->id_num_swqe * state->id_tx_buf_sz); 3633 state->id_tx_wqes = NULL; 3634 state->id_tx_bufs = NULL; 3635 } 3636 3637 static void 3638 ibd_free_tx_lsobufs(ibd_state_t *state) 3639 { 3640 ibd_lsobkt_t *bktp; 3641 3642 mutex_enter(&state->id_lso_lock); 3643 3644 if ((bktp = state->id_lso) == NULL) { 3645 mutex_exit(&state->id_lso_lock); 3646 return; 3647 } 3648 3649 /* 3650 * First, free the buflist 3651 */ 3652 ASSERT(bktp->bkt_bufl != NULL); 3653 kmem_free(bktp->bkt_bufl, bktp->bkt_nelem * sizeof (ibd_lsobuf_t)); 3654 3655 /* 3656 * Unregister the LSO memory and free it 3657 */ 3658 ASSERT(bktp->bkt_mr_hdl != NULL); 3659 if (ibt_deregister_mr(state->id_hca_hdl, 3660 bktp->bkt_mr_hdl) != IBT_SUCCESS) { 3661 DPRINT(10, 3662 "ibd_free_lsobufs: ibt_deregister_mr failed"); 3663 } 3664 ASSERT(bktp->bkt_mem); 3665 kmem_free(bktp->bkt_mem, bktp->bkt_nelem * IBD_LSO_BUFSZ); 3666 3667 /* 3668 * Finally free the bucket 3669 */ 3670 kmem_free(bktp, sizeof (ibd_lsobkt_t)); 3671 state->id_lso = NULL; 3672 3673 mutex_exit(&state->id_lso_lock); 3674 } 3675 3676 /* 3677 * Free the statically allocated Tx buffer list. 3678 */ 3679 static void 3680 ibd_fini_txlist(ibd_state_t *state) 3681 { 3682 /* 3683 * Free the allocated swqes 3684 */ 3685 mutex_enter(&state->id_tx_list.dl_mutex); 3686 mutex_enter(&state->id_tx_rel_list.dl_mutex); 3687 state->id_tx_list.dl_head = NULL; 3688 state->id_tx_list.dl_pending_sends = B_FALSE; 3689 state->id_tx_list.dl_cnt = 0; 3690 state->id_tx_rel_list.dl_head = NULL; 3691 state->id_tx_rel_list.dl_pending_sends = B_FALSE; 3692 state->id_tx_rel_list.dl_cnt = 0; 3693 mutex_exit(&state->id_tx_rel_list.dl_mutex); 3694 mutex_exit(&state->id_tx_list.dl_mutex); 3695 3696 ibd_free_tx_lsobufs(state); 3697 ibd_free_tx_copybufs(state); 3698 } 3699 3700 /* 3701 * post a list of rwqes, NULL terminated. 3702 */ 3703 static void 3704 ibd_post_recv_list(ibd_state_t *state, ibd_rwqe_t *rwqe) 3705 { 3706 uint_t i; 3707 uint_t num_posted; 3708 ibt_status_t ibt_status; 3709 ibt_recv_wr_t wrs[IBD_RX_POST_CNT]; 3710 3711 while (rwqe) { 3712 /* Post up to IBD_RX_POST_CNT receive work requests */ 3713 for (i = 0; i < IBD_RX_POST_CNT; i++) { 3714 wrs[i] = rwqe->w_rwr; 3715 rwqe = WQE_TO_RWQE(rwqe->rwqe_next); 3716 if (rwqe == NULL) { 3717 i++; 3718 break; 3719 } 3720 } 3721 3722 /* 3723 * If posting fails for some reason, we'll never receive 3724 * completion intimation, so we'll need to cleanup. But 3725 * we need to make sure we don't clean up nodes whose 3726 * wrs have been successfully posted. We assume that the 3727 * hca driver returns on the first failure to post and 3728 * therefore the first 'num_posted' entries don't need 3729 * cleanup here. 3730 */ 3731 atomic_add_32(&state->id_rx_list.dl_cnt, i); 3732 3733 num_posted = 0; 3734 ibt_status = ibt_post_recv(state->id_chnl_hdl, wrs, i, 3735 &num_posted); 3736 if (ibt_status != IBT_SUCCESS) { 3737 /* This cannot happen unless the device has an error. */ 3738 ibd_print_warn(state, "ibd_post_recv: FATAL: " 3739 "posting multiple wrs failed: " 3740 "requested=%d, done=%d, ret=%d", 3741 IBD_RX_POST_CNT, num_posted, ibt_status); 3742 atomic_add_32(&state->id_rx_list.dl_cnt, 3743 num_posted - i); 3744 } 3745 } 3746 } 3747 3748 /* 3749 * Grab a list of rwqes from the array of lists, and post the list. 3750 */ 3751 static void 3752 ibd_post_recv_intr(ibd_state_t *state) 3753 { 3754 ibd_rx_queue_t *rxp; 3755 ibd_rwqe_t *list; 3756 3757 /* rotate through the rx_queue array, expecting an adequate number */ 3758 state->id_rx_post_queue_index = 3759 (state->id_rx_post_queue_index + 1) & 3760 (state->id_rx_nqueues - 1); 3761 3762 rxp = state->id_rx_queues + state->id_rx_post_queue_index; 3763 mutex_enter(&rxp->rx_post_lock); 3764 list = WQE_TO_RWQE(rxp->rx_head); 3765 rxp->rx_head = NULL; 3766 rxp->rx_cnt = 0; 3767 mutex_exit(&rxp->rx_post_lock); 3768 ibd_post_recv_list(state, list); 3769 } 3770 3771 /* macro explained below */ 3772 #define RX_QUEUE_HASH(rwqe) \ 3773 (((uintptr_t)(rwqe) >> 8) & (state->id_rx_nqueues - 1)) 3774 3775 /* 3776 * Add a rwqe to one of the the Rx lists. If the list is large enough 3777 * (exactly IBD_RX_POST_CNT), post the list to the hardware. 3778 * 3779 * Note: one of 2^N lists is chosen via a hash. This is done 3780 * because using one list is contentious. If the first list is busy 3781 * (mutex_tryenter fails), use a second list (just call mutex_enter). 3782 * 3783 * The number 8 in RX_QUEUE_HASH is a random choice that provides 3784 * even distribution of mapping rwqes to the 2^N queues. 3785 */ 3786 static void 3787 ibd_post_recv(ibd_state_t *state, ibd_rwqe_t *rwqe) 3788 { 3789 ibd_rx_queue_t *rxp; 3790 3791 rxp = state->id_rx_queues + RX_QUEUE_HASH(rwqe); 3792 3793 if (!mutex_tryenter(&rxp->rx_post_lock)) { 3794 /* Failed. Try a different queue ("ptr + 16" ensures that). */ 3795 rxp = state->id_rx_queues + RX_QUEUE_HASH(rwqe + 16); 3796 mutex_enter(&rxp->rx_post_lock); 3797 } 3798 rwqe->rwqe_next = rxp->rx_head; 3799 if (++rxp->rx_cnt >= IBD_RX_POST_CNT - 2) { 3800 uint_t active = atomic_inc_32_nv(&state->id_rx_post_active); 3801 3802 /* only call ibt_post_recv() every Nth time through here */ 3803 if ((active & (state->id_rx_nqueues - 1)) == 0) { 3804 rxp->rx_head = NULL; 3805 rxp->rx_cnt = 0; 3806 mutex_exit(&rxp->rx_post_lock); 3807 ibd_post_recv_list(state, rwqe); 3808 return; 3809 } 3810 } 3811 rxp->rx_head = RWQE_TO_WQE(rwqe); 3812 mutex_exit(&rxp->rx_post_lock); 3813 } 3814 3815 static int 3816 ibd_alloc_rx_copybufs(ibd_state_t *state) 3817 { 3818 ibt_mr_attr_t mem_attr; 3819 int i; 3820 3821 /* 3822 * Allocate one big chunk for all regular rx copy bufs 3823 */ 3824 state->id_rx_buf_sz = state->id_mtu + IPOIB_GRH_SIZE; 3825 3826 state->id_rx_bufs = kmem_zalloc(state->id_num_rwqe * 3827 state->id_rx_buf_sz, KM_SLEEP); 3828 3829 state->id_rx_wqes = kmem_zalloc(state->id_num_rwqe * 3830 sizeof (ibd_rwqe_t), KM_SLEEP); 3831 3832 state->id_rx_nqueues = 1 << IBD_LOG_RX_POST; 3833 state->id_rx_queues = kmem_zalloc(state->id_rx_nqueues * 3834 sizeof (ibd_rx_queue_t), KM_SLEEP); 3835 for (i = 0; i < state->id_rx_nqueues; i++) { 3836 ibd_rx_queue_t *rxp = state->id_rx_queues + i; 3837 mutex_init(&rxp->rx_post_lock, NULL, MUTEX_DRIVER, NULL); 3838 } 3839 3840 /* 3841 * Do one memory registration on the entire rxbuf area 3842 */ 3843 mem_attr.mr_vaddr = (uint64_t)(uintptr_t)state->id_rx_bufs; 3844 mem_attr.mr_len = state->id_num_rwqe * state->id_rx_buf_sz; 3845 mem_attr.mr_as = NULL; 3846 mem_attr.mr_flags = IBT_MR_SLEEP | IBT_MR_ENABLE_LOCAL_WRITE; 3847 if (ibt_register_mr(state->id_hca_hdl, state->id_pd_hdl, &mem_attr, 3848 &state->id_rx_mr_hdl, &state->id_rx_mr_desc) != IBT_SUCCESS) { 3849 DPRINT(10, "ibd_alloc_rx_copybufs: ibt_register_mr failed"); 3850 kmem_free(state->id_rx_wqes, 3851 state->id_num_rwqe * sizeof (ibd_rwqe_t)); 3852 kmem_free(state->id_rx_bufs, 3853 state->id_num_rwqe * state->id_rx_buf_sz); 3854 state->id_rx_bufs = NULL; 3855 state->id_rx_wqes = NULL; 3856 return (DDI_FAILURE); 3857 } 3858 3859 return (DDI_SUCCESS); 3860 } 3861 3862 /* 3863 * Allocate the statically allocated Rx buffer list. 3864 */ 3865 static int 3866 ibd_init_rxlist(ibd_state_t *state) 3867 { 3868 ibd_rwqe_t *rwqe, *next; 3869 ibd_wqe_t *list; 3870 ibt_lkey_t lkey; 3871 int i; 3872 uint_t len; 3873 uint8_t *bufaddr; 3874 3875 mutex_enter(&state->id_rx_free_list.dl_mutex); 3876 if (state->id_rx_free_list.dl_head != NULL) { 3877 /* rx rsrcs were never freed. Just repost them */ 3878 len = state->id_rx_buf_sz; 3879 list = state->id_rx_free_list.dl_head; 3880 state->id_rx_free_list.dl_head = NULL; 3881 state->id_rx_free_list.dl_cnt = 0; 3882 mutex_exit(&state->id_rx_free_list.dl_mutex); 3883 for (rwqe = WQE_TO_RWQE(list); rwqe != NULL; 3884 rwqe = WQE_TO_RWQE(rwqe->rwqe_next)) { 3885 if ((rwqe->rwqe_im_mblk = desballoc( 3886 rwqe->rwqe_copybuf.ic_bufaddr, len, 0, 3887 &rwqe->w_freemsg_cb)) == NULL) { 3888 /* allow freemsg_cb to free the rwqes */ 3889 if (atomic_dec_32_nv(&state->id_running) != 0) { 3890 cmn_err(CE_WARN, "ibd_init_rxlist: " 3891 "id_running was not 1\n"); 3892 } 3893 DPRINT(10, "ibd_init_rxlist : " 3894 "failed in desballoc()"); 3895 for (rwqe = WQE_TO_RWQE(list); rwqe != NULL; 3896 rwqe = next) { 3897 next = WQE_TO_RWQE(rwqe->rwqe_next); 3898 if (rwqe->rwqe_im_mblk) { 3899 atomic_inc_32(&state-> 3900 id_rx_list. 3901 dl_bufs_outstanding); 3902 freemsg(rwqe->rwqe_im_mblk); 3903 } else 3904 ibd_free_rwqe(state, rwqe); 3905 } 3906 atomic_inc_32(&state->id_running); 3907 return (DDI_FAILURE); 3908 } 3909 } 3910 ibd_post_recv_list(state, WQE_TO_RWQE(list)); 3911 return (DDI_SUCCESS); 3912 } 3913 mutex_exit(&state->id_rx_free_list.dl_mutex); 3914 3915 if (ibd_alloc_rx_copybufs(state) != DDI_SUCCESS) 3916 return (DDI_FAILURE); 3917 3918 /* 3919 * Allocate and setup the rwqe list 3920 */ 3921 len = state->id_rx_buf_sz; 3922 lkey = state->id_rx_mr_desc.md_lkey; 3923 rwqe = state->id_rx_wqes; 3924 bufaddr = state->id_rx_bufs; 3925 list = NULL; 3926 for (i = 0; i < state->id_num_rwqe; i++, rwqe++, bufaddr += len) { 3927 rwqe->w_state = state; 3928 rwqe->w_freemsg_cb.free_func = ibd_freemsg_cb; 3929 rwqe->w_freemsg_cb.free_arg = (char *)rwqe; 3930 3931 rwqe->rwqe_copybuf.ic_bufaddr = bufaddr; 3932 3933 if ((rwqe->rwqe_im_mblk = desballoc(bufaddr, len, 0, 3934 &rwqe->w_freemsg_cb)) == NULL) { 3935 DPRINT(10, "ibd_init_rxlist : failed in desballoc()"); 3936 /* allow freemsg_cb to free the rwqes */ 3937 if (atomic_dec_32_nv(&state->id_running) != 0) { 3938 cmn_err(CE_WARN, "ibd_init_rxlist: " 3939 "id_running was not 1\n"); 3940 } 3941 DPRINT(10, "ibd_init_rxlist : " 3942 "failed in desballoc()"); 3943 for (rwqe = WQE_TO_RWQE(list); rwqe != NULL; 3944 rwqe = next) { 3945 next = WQE_TO_RWQE(rwqe->rwqe_next); 3946 freemsg(rwqe->rwqe_im_mblk); 3947 } 3948 atomic_inc_32(&state->id_running); 3949 3950 /* remove reference to free'd rwqes */ 3951 mutex_enter(&state->id_rx_free_list.dl_mutex); 3952 state->id_rx_free_list.dl_head = NULL; 3953 state->id_rx_free_list.dl_cnt = 0; 3954 mutex_exit(&state->id_rx_free_list.dl_mutex); 3955 3956 ibd_fini_rxlist(state); 3957 return (DDI_FAILURE); 3958 } 3959 3960 rwqe->rwqe_copybuf.ic_sgl.ds_key = lkey; 3961 rwqe->rwqe_copybuf.ic_sgl.ds_va = 3962 (ib_vaddr_t)(uintptr_t)bufaddr; 3963 rwqe->rwqe_copybuf.ic_sgl.ds_len = len; 3964 rwqe->w_rwr.wr_id = (ibt_wrid_t)(uintptr_t)rwqe; 3965 rwqe->w_rwr.wr_nds = 1; 3966 rwqe->w_rwr.wr_sgl = &rwqe->rwqe_copybuf.ic_sgl; 3967 3968 rwqe->rwqe_next = list; 3969 list = RWQE_TO_WQE(rwqe); 3970 } 3971 ibd_post_recv_list(state, WQE_TO_RWQE(list)); 3972 3973 return (DDI_SUCCESS); 3974 } 3975 3976 static void 3977 ibd_free_rx_copybufs(ibd_state_t *state) 3978 { 3979 int i; 3980 3981 /* 3982 * Unregister rxbuf mr 3983 */ 3984 if (ibt_deregister_mr(state->id_hca_hdl, 3985 state->id_rx_mr_hdl) != IBT_SUCCESS) { 3986 DPRINT(10, "ibd_free_rx_copybufs: ibt_deregister_mr failed"); 3987 } 3988 state->id_rx_mr_hdl = NULL; 3989 3990 /* 3991 * Free rxbuf memory 3992 */ 3993 for (i = 0; i < state->id_rx_nqueues; i++) { 3994 ibd_rx_queue_t *rxp = state->id_rx_queues + i; 3995 mutex_destroy(&rxp->rx_post_lock); 3996 } 3997 kmem_free(state->id_rx_queues, state->id_rx_nqueues * 3998 sizeof (ibd_rx_queue_t)); 3999 kmem_free(state->id_rx_wqes, state->id_num_rwqe * sizeof (ibd_rwqe_t)); 4000 kmem_free(state->id_rx_bufs, state->id_num_rwqe * state->id_rx_buf_sz); 4001 state->id_rx_queues = NULL; 4002 state->id_rx_wqes = NULL; 4003 state->id_rx_bufs = NULL; 4004 } 4005 4006 static void 4007 ibd_free_rx_rsrcs(ibd_state_t *state) 4008 { 4009 mutex_enter(&state->id_rx_free_list.dl_mutex); 4010 if (state->id_rx_free_list.dl_head == NULL) { 4011 /* already freed */ 4012 mutex_exit(&state->id_rx_free_list.dl_mutex); 4013 return; 4014 } 4015 ASSERT(state->id_rx_free_list.dl_cnt == state->id_num_rwqe); 4016 ibd_free_rx_copybufs(state); 4017 state->id_rx_free_list.dl_cnt = 0; 4018 state->id_rx_free_list.dl_head = NULL; 4019 mutex_exit(&state->id_rx_free_list.dl_mutex); 4020 } 4021 4022 /* 4023 * Free the statically allocated Rx buffer list. 4024 */ 4025 static void 4026 ibd_fini_rxlist(ibd_state_t *state) 4027 { 4028 ibd_rwqe_t *rwqe; 4029 int i; 4030 4031 /* run through the rx_queue's, calling freemsg() */ 4032 for (i = 0; i < state->id_rx_nqueues; i++) { 4033 ibd_rx_queue_t *rxp = state->id_rx_queues + i; 4034 mutex_enter(&rxp->rx_post_lock); 4035 for (rwqe = WQE_TO_RWQE(rxp->rx_head); rwqe; 4036 rwqe = WQE_TO_RWQE(rwqe->rwqe_next)) { 4037 freemsg(rwqe->rwqe_im_mblk); 4038 rxp->rx_cnt--; 4039 } 4040 rxp->rx_head = NULL; 4041 mutex_exit(&rxp->rx_post_lock); 4042 } 4043 4044 /* cannot free rx resources unless gld returned everything */ 4045 if (atomic_add_32_nv(&state->id_rx_list.dl_bufs_outstanding, 0) == 0) 4046 ibd_free_rx_rsrcs(state); 4047 } 4048 4049 /* 4050 * Free an allocated recv wqe. 4051 */ 4052 /* ARGSUSED */ 4053 static void 4054 ibd_free_rwqe(ibd_state_t *state, ibd_rwqe_t *rwqe) 4055 { 4056 /* 4057 * desballoc() failed (no memory). 4058 * 4059 * This rwqe is placed on a free list so that it 4060 * can be reinstated when memory is available. 4061 * 4062 * NOTE: no code currently exists to reinstate 4063 * these "lost" rwqes. 4064 */ 4065 mutex_enter(&state->id_rx_free_list.dl_mutex); 4066 state->id_rx_free_list.dl_cnt++; 4067 rwqe->rwqe_next = state->id_rx_free_list.dl_head; 4068 state->id_rx_free_list.dl_head = RWQE_TO_WQE(rwqe); 4069 mutex_exit(&state->id_rx_free_list.dl_mutex); 4070 } 4071 4072 /* 4073 * IBA Rx completion queue handler. Guaranteed to be single 4074 * threaded and nonreentrant for this CQ. 4075 */ 4076 /* ARGSUSED */ 4077 static void 4078 ibd_rcq_handler(ibt_cq_hdl_t cq_hdl, void *arg) 4079 { 4080 ibd_state_t *state = (ibd_state_t *)arg; 4081 4082 atomic_inc_64(&state->id_num_intrs); 4083 4084 if (ibd_rx_softintr == 1) { 4085 mutex_enter(&state->id_rcq_poll_lock); 4086 if (state->id_rcq_poll_busy & IBD_CQ_POLLING) { 4087 state->id_rcq_poll_busy |= IBD_REDO_CQ_POLLING; 4088 mutex_exit(&state->id_rcq_poll_lock); 4089 return; 4090 } else { 4091 mutex_exit(&state->id_rcq_poll_lock); 4092 ddi_trigger_softintr(state->id_rx); 4093 } 4094 } else 4095 (void) ibd_intr((caddr_t)state); 4096 } 4097 4098 /* 4099 * CQ handler for Tx completions, when the Tx CQ is in 4100 * interrupt driven mode. 4101 */ 4102 /* ARGSUSED */ 4103 static void 4104 ibd_scq_handler(ibt_cq_hdl_t cq_hdl, void *arg) 4105 { 4106 ibd_state_t *state = (ibd_state_t *)arg; 4107 4108 atomic_inc_64(&state->id_num_intrs); 4109 4110 if (ibd_tx_softintr == 1) { 4111 mutex_enter(&state->id_scq_poll_lock); 4112 if (state->id_scq_poll_busy & IBD_CQ_POLLING) { 4113 state->id_scq_poll_busy |= IBD_REDO_CQ_POLLING; 4114 mutex_exit(&state->id_scq_poll_lock); 4115 return; 4116 } else { 4117 mutex_exit(&state->id_scq_poll_lock); 4118 ddi_trigger_softintr(state->id_tx); 4119 } 4120 } else 4121 (void) ibd_tx_recycle((caddr_t)state); 4122 } 4123 4124 /* 4125 * Multicast group create/delete trap handler. These will be delivered 4126 * on a kernel thread (handling can thus block) and can be invoked 4127 * concurrently. The handler can be invoked anytime after it is 4128 * registered and before ibt_detach(). 4129 */ 4130 /* ARGSUSED */ 4131 static void 4132 ibd_snet_notices_handler(void *arg, ib_gid_t gid, ibt_subnet_event_code_t code, 4133 ibt_subnet_event_t *event) 4134 { 4135 ibd_state_t *state = (ibd_state_t *)arg; 4136 ibd_req_t *req; 4137 4138 /* 4139 * The trap handler will get invoked once for every event for 4140 * every port. The input "gid" is the GID0 of the port the 4141 * trap came in on; we just need to act on traps that came 4142 * to our port, meaning the port on which the ipoib interface 4143 * resides. Since ipoib uses GID0 of the port, we just match 4144 * the gids to check whether we need to handle the trap. 4145 */ 4146 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state->id_sgid)) 4147 if (bcmp(&gid, &state->id_sgid, sizeof (ib_gid_t)) != 0) 4148 return; 4149 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(state->id_sgid)) 4150 4151 DPRINT(10, "ibd_notices_handler : %d\n", code); 4152 4153 switch (code) { 4154 case IBT_SM_EVENT_UNAVAILABLE: 4155 /* 4156 * If we are in promiscuous mode or have 4157 * sendnonmembers, we need to print a warning 4158 * message right now. Else, just store the 4159 * information, print when we enter promiscuous 4160 * mode or attempt nonmember send. We might 4161 * also want to stop caching sendnonmember. 4162 */ 4163 ibd_print_warn(state, "IBA multicast support " 4164 "degraded due to unavailability of multicast " 4165 "traps"); 4166 break; 4167 case IBT_SM_EVENT_AVAILABLE: 4168 /* 4169 * If we printed a warning message above or 4170 * while trying to nonmember send or get into 4171 * promiscuous mode, print an okay message. 4172 */ 4173 ibd_print_warn(state, "IBA multicast support " 4174 "restored due to availability of multicast " 4175 "traps"); 4176 break; 4177 case IBT_SM_EVENT_MCG_CREATED: 4178 case IBT_SM_EVENT_MCG_DELETED: 4179 /* 4180 * Common processing of creation/deletion traps. 4181 * First check if the instance is being 4182 * [de]initialized; back off then, without doing 4183 * anything more, since we are not sure if the 4184 * async thread is around, or whether we might 4185 * be racing with the detach code in ibd_m_stop() 4186 * that scans the mcg list. 4187 */ 4188 if (!ibd_async_safe(state)) 4189 return; 4190 4191 req = kmem_cache_alloc(state->id_req_kmc, KM_SLEEP); 4192 req->rq_gid = event->sm_notice_gid; 4193 req->rq_ptr = (void *)code; 4194 ibd_queue_work_slot(state, req, IBD_ASYNC_TRAP); 4195 break; 4196 } 4197 } 4198 4199 static void 4200 ibd_async_trap(ibd_state_t *state, ibd_req_t *req) 4201 { 4202 ib_gid_t mgid = req->rq_gid; 4203 ibt_subnet_event_code_t code = (ibt_subnet_event_code_t)req->rq_ptr; 4204 4205 DPRINT(10, "ibd_async_trap : %d\n", code); 4206 4207 /* 4208 * Atomically search the nonmember and sendonlymember lists and 4209 * delete. 4210 */ 4211 ibd_leave_group(state, mgid, IB_MC_JSTATE_SEND_ONLY_NON); 4212 4213 if (state->id_prom_op == IBD_OP_COMPLETED) { 4214 ibd_leave_group(state, mgid, IB_MC_JSTATE_NON); 4215 4216 /* 4217 * If in promiscuous mode, try to join/attach to the new 4218 * mcg. Given the unreliable out-of-order mode of trap 4219 * delivery, we can never be sure whether it is a problem 4220 * if the join fails. Thus, we warn the admin of a failure 4221 * if this was a creation trap. Note that the trap might 4222 * actually be reporting a long past event, and the mcg 4223 * might already have been deleted, thus we might be warning 4224 * in vain. 4225 */ 4226 if ((ibd_join_group(state, mgid, IB_MC_JSTATE_NON) == 4227 NULL) && (code == IBT_SM_EVENT_MCG_CREATED)) 4228 ibd_print_warn(state, "IBA promiscuous mode missed " 4229 "new multicast gid %016llx:%016llx", 4230 (u_longlong_t)mgid.gid_prefix, 4231 (u_longlong_t)mgid.gid_guid); 4232 } 4233 4234 /* 4235 * Free the request slot allocated by the subnet event thread. 4236 */ 4237 ibd_async_done(state); 4238 } 4239 4240 /* 4241 * GLDv3 entry point to get capabilities. 4242 */ 4243 static boolean_t 4244 ibd_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4245 { 4246 ibd_state_t *state = arg; 4247 4248 switch (cap) { 4249 case MAC_CAPAB_HCKSUM: { 4250 uint32_t *txflags = cap_data; 4251 4252 /* 4253 * We either do full checksum or not do it at all 4254 */ 4255 if (state->id_hwcksum_capab & IBT_HCA_CKSUM_FULL) 4256 *txflags = HCK_FULLCKSUM | HCKSUM_INET_FULL_V4; 4257 else 4258 return (B_FALSE); 4259 break; 4260 } 4261 4262 case MAC_CAPAB_LSO: { 4263 mac_capab_lso_t *cap_lso = cap_data; 4264 4265 /* 4266 * In addition to the capability and policy, since LSO 4267 * relies on hw checksum, we'll not enable LSO if we 4268 * don't have hw checksum. Of course, if the HCA doesn't 4269 * provide the reserved lkey capability, enabling LSO will 4270 * actually affect performance adversely, so we'll disable 4271 * LSO even for that case. 4272 */ 4273 if (!state->id_lso_policy || !state->id_lso_capable) 4274 return (B_FALSE); 4275 4276 if ((state->id_hwcksum_capab & IBT_HCA_CKSUM_FULL) == 0) 4277 return (B_FALSE); 4278 4279 if (state->id_hca_res_lkey_capab == 0) { 4280 ibd_print_warn(state, "no reserved-lkey capability, " 4281 "disabling LSO"); 4282 return (B_FALSE); 4283 } 4284 4285 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4286 cap_lso->lso_basic_tcp_ipv4.lso_max = state->id_lso_maxlen - 1; 4287 break; 4288 } 4289 4290 default: 4291 return (B_FALSE); 4292 } 4293 4294 return (B_TRUE); 4295 } 4296 4297 static int 4298 ibd_get_port_details(ibd_state_t *state) 4299 { 4300 ibt_hca_portinfo_t *port_infop; 4301 ibt_status_t ret; 4302 uint_t psize, port_infosz; 4303 4304 mutex_enter(&state->id_link_mutex); 4305 4306 /* 4307 * Query for port information 4308 */ 4309 ret = ibt_query_hca_ports(state->id_hca_hdl, state->id_port, 4310 &port_infop, &psize, &port_infosz); 4311 if ((ret != IBT_SUCCESS) || (psize != 1)) { 4312 mutex_exit(&state->id_link_mutex); 4313 DPRINT(10, "ibd_get_port_details: ibt_query_hca_ports() " 4314 "failed, ret=%d", ret); 4315 return (ENETDOWN); 4316 } 4317 4318 /* 4319 * If the link already went down by the time we get here, 4320 * give up 4321 */ 4322 if (port_infop->p_linkstate != IBT_PORT_ACTIVE) { 4323 mutex_exit(&state->id_link_mutex); 4324 ibt_free_portinfo(port_infop, port_infosz); 4325 DPRINT(10, "ibd_get_port_details: port is not active"); 4326 return (ENETDOWN); 4327 } 4328 4329 /* 4330 * If the link is active, verify the pkey 4331 */ 4332 if ((ret = ibt_pkey2index(state->id_hca_hdl, state->id_port, 4333 state->id_pkey, &state->id_pkix)) != IBT_SUCCESS) { 4334 mutex_exit(&state->id_link_mutex); 4335 ibt_free_portinfo(port_infop, port_infosz); 4336 DPRINT(10, "ibd_get_port_details: ibt_pkey2index " 4337 "failed, ret=%d", ret); 4338 return (ENONET); 4339 } 4340 4341 state->id_mtu = (128 << port_infop->p_mtu); 4342 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state->id_sgid)) 4343 state->id_sgid = *port_infop->p_sgid_tbl; 4344 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(state->id_sgid)) 4345 state->id_link_state = LINK_STATE_UP; 4346 4347 mutex_exit(&state->id_link_mutex); 4348 ibt_free_portinfo(port_infop, port_infosz); 4349 4350 /* 4351 * Now that the port is active, record the port speed 4352 */ 4353 state->id_link_speed = ibd_get_portspeed(state); 4354 4355 return (0); 4356 } 4357 4358 static int 4359 ibd_alloc_cqs(ibd_state_t *state) 4360 { 4361 ibt_hca_attr_t hca_attrs; 4362 ibt_cq_attr_t cq_attr; 4363 ibt_status_t ret; 4364 uint32_t real_size; 4365 4366 ret = ibt_query_hca(state->id_hca_hdl, &hca_attrs); 4367 ASSERT(ret == IBT_SUCCESS); 4368 4369 /* 4370 * Allocate Rx/combined CQ: 4371 * Theoretically, there is no point in having more than #rwqe 4372 * plus #swqe cqe's, except that the CQ will be signaled for 4373 * overflow when the last wqe completes, if none of the previous 4374 * cqe's have been polled. Thus, we allocate just a few less wqe's 4375 * to make sure such overflow does not occur. 4376 */ 4377 cq_attr.cq_sched = NULL; 4378 cq_attr.cq_flags = IBT_CQ_NO_FLAGS; 4379 4380 /* 4381 * Allocate Receive CQ. 4382 */ 4383 if (hca_attrs.hca_max_cq_sz >= (state->id_num_rwqe + 1)) { 4384 cq_attr.cq_size = state->id_num_rwqe + 1; 4385 } else { 4386 cq_attr.cq_size = hca_attrs.hca_max_cq_sz; 4387 state->id_num_rwqe = cq_attr.cq_size - 1; 4388 } 4389 4390 if ((ret = ibt_alloc_cq(state->id_hca_hdl, &cq_attr, 4391 &state->id_rcq_hdl, &real_size)) != IBT_SUCCESS) { 4392 DPRINT(10, "ibd_alloc_cqs: ibt_alloc_cq(rcq) " 4393 "failed, ret=%d\n", ret); 4394 return (DDI_FAILURE); 4395 } 4396 4397 if ((ret = ibt_modify_cq(state->id_rcq_hdl, 4398 ibd_rxcomp_count, ibd_rxcomp_usec, 0)) != IBT_SUCCESS) { 4399 DPRINT(10, "ibd_alloc_cqs: Receive CQ interrupt " 4400 "moderation failed, ret=%d\n", ret); 4401 } 4402 4403 /* make the #rx wc's the same as max rx chain size */ 4404 state->id_rxwcs_size = IBD_MAX_RX_MP_LEN; 4405 state->id_rxwcs = kmem_alloc(sizeof (ibt_wc_t) * 4406 state->id_rxwcs_size, KM_SLEEP); 4407 4408 /* 4409 * Allocate Send CQ. 4410 */ 4411 if (hca_attrs.hca_max_cq_sz >= (state->id_num_swqe + 1)) { 4412 cq_attr.cq_size = state->id_num_swqe + 1; 4413 } else { 4414 cq_attr.cq_size = hca_attrs.hca_max_cq_sz; 4415 state->id_num_swqe = cq_attr.cq_size - 1; 4416 } 4417 4418 if ((ret = ibt_alloc_cq(state->id_hca_hdl, &cq_attr, 4419 &state->id_scq_hdl, &real_size)) != IBT_SUCCESS) { 4420 DPRINT(10, "ibd_alloc_cqs: ibt_alloc_cq(scq) " 4421 "failed, ret=%d\n", ret); 4422 kmem_free(state->id_rxwcs, sizeof (ibt_wc_t) * 4423 state->id_rxwcs_size); 4424 (void) ibt_free_cq(state->id_rcq_hdl); 4425 return (DDI_FAILURE); 4426 } 4427 if ((ret = ibt_modify_cq(state->id_scq_hdl, 4428 ibd_txcomp_count, ibd_txcomp_usec, 0)) != IBT_SUCCESS) { 4429 DPRINT(10, "ibd_alloc_cqs: Send CQ interrupt " 4430 "moderation failed, ret=%d\n", ret); 4431 } 4432 4433 state->id_txwcs_size = IBD_TX_POLL_THRESH; 4434 state->id_txwcs = kmem_alloc(sizeof (ibt_wc_t) * 4435 state->id_txwcs_size, KM_SLEEP); 4436 4437 /* 4438 * Print message in case we could not allocate as many wqe's 4439 * as was requested. 4440 */ 4441 if (state->id_num_rwqe != IBD_NUM_RWQE) { 4442 ibd_print_warn(state, "Setting #rwqe = %d instead of default " 4443 "%d", state->id_num_rwqe, IBD_NUM_RWQE); 4444 } 4445 if (state->id_num_swqe != IBD_NUM_SWQE) { 4446 ibd_print_warn(state, "Setting #swqe = %d instead of default " 4447 "%d", state->id_num_swqe, IBD_NUM_SWQE); 4448 } 4449 4450 return (DDI_SUCCESS); 4451 } 4452 4453 static int 4454 ibd_setup_ud_channel(ibd_state_t *state) 4455 { 4456 ibt_ud_chan_alloc_args_t ud_alloc_attr; 4457 ibt_ud_chan_query_attr_t ud_chan_attr; 4458 ibt_status_t ret; 4459 4460 ud_alloc_attr.ud_flags = IBT_ALL_SIGNALED; 4461 if (state->id_hca_res_lkey_capab) 4462 ud_alloc_attr.ud_flags |= IBT_FAST_REG_RES_LKEY; 4463 if (state->id_lso_policy && state->id_lso_capable) 4464 ud_alloc_attr.ud_flags |= IBT_USES_LSO; 4465 4466 ud_alloc_attr.ud_hca_port_num = state->id_port; 4467 ud_alloc_attr.ud_sizes.cs_sq_sgl = state->id_max_sqseg; 4468 ud_alloc_attr.ud_sizes.cs_rq_sgl = IBD_MAX_RQSEG; 4469 ud_alloc_attr.ud_sizes.cs_sq = state->id_num_swqe; 4470 ud_alloc_attr.ud_sizes.cs_rq = state->id_num_rwqe; 4471 ud_alloc_attr.ud_qkey = state->id_mcinfo->mc_qkey; 4472 ud_alloc_attr.ud_scq = state->id_scq_hdl; 4473 ud_alloc_attr.ud_rcq = state->id_rcq_hdl; 4474 ud_alloc_attr.ud_pd = state->id_pd_hdl; 4475 ud_alloc_attr.ud_pkey_ix = state->id_pkix; 4476 ud_alloc_attr.ud_clone_chan = NULL; 4477 4478 if ((ret = ibt_alloc_ud_channel(state->id_hca_hdl, IBT_ACHAN_NO_FLAGS, 4479 &ud_alloc_attr, &state->id_chnl_hdl, NULL)) != IBT_SUCCESS) { 4480 DPRINT(10, "ibd_setup_ud_channel: ibt_alloc_ud_channel() " 4481 "failed, ret=%d\n", ret); 4482 return (DDI_FAILURE); 4483 } 4484 4485 if ((ret = ibt_query_ud_channel(state->id_chnl_hdl, 4486 &ud_chan_attr)) != IBT_SUCCESS) { 4487 DPRINT(10, "ibd_setup_ud_channel: ibt_query_ud_channel() " 4488 "failed, ret=%d\n", ret); 4489 (void) ibt_free_channel(state->id_chnl_hdl); 4490 return (DDI_FAILURE); 4491 } 4492 4493 state->id_qpnum = ud_chan_attr.ud_qpn; 4494 4495 return (DDI_SUCCESS); 4496 } 4497 4498 static int 4499 ibd_undo_start(ibd_state_t *state, link_state_t cur_link_state) 4500 { 4501 uint32_t progress = state->id_mac_state; 4502 uint_t attempts; 4503 ibt_status_t ret; 4504 ib_gid_t mgid; 4505 ibd_mce_t *mce; 4506 uint8_t jstate; 4507 4508 if (atomic_dec_32_nv(&state->id_running) != 0) 4509 cmn_err(CE_WARN, "ibd_undo_start: id_running was not 1\n"); 4510 4511 /* 4512 * Before we try to stop/undo whatever we did in ibd_start(), 4513 * we need to mark the link state appropriately to prevent the 4514 * ip layer from using this instance for any new transfers. Note 4515 * that if the original state of the link was "up" when we're 4516 * here, we'll set the final link state to "unknown", to behave 4517 * in the same fashion as other ethernet drivers. 4518 */ 4519 mutex_enter(&state->id_link_mutex); 4520 if (cur_link_state == LINK_STATE_DOWN) { 4521 state->id_link_state = cur_link_state; 4522 } else { 4523 state->id_link_state = LINK_STATE_UNKNOWN; 4524 } 4525 mutex_exit(&state->id_link_mutex); 4526 mac_link_update(state->id_mh, state->id_link_state); 4527 4528 state->id_mac_state &= (~IBD_DRV_PORT_DETAILS_OBTAINED); 4529 if (progress & IBD_DRV_STARTED) { 4530 state->id_mac_state &= (~IBD_DRV_STARTED); 4531 } 4532 4533 /* Stop listen under Reliable Connected Mode */ 4534 if (progress & IBD_DRV_RC_LISTEN) { 4535 ASSERT(state->id_enable_rc); 4536 if (state->rc_listen_hdl != NULL) { 4537 ibd_rc_stop_listen(state); 4538 } 4539 state->id_mac_state &= (~IBD_DRV_RC_LISTEN); 4540 } 4541 4542 if ((state->id_enable_rc) && (progress & IBD_DRV_ACACHE_INITIALIZED)) { 4543 (void) ibd_rc_close_all_chan(state); 4544 } 4545 4546 /* 4547 * First, stop receive interrupts; this stops the driver from 4548 * handing up buffers to higher layers. Wait for receive buffers 4549 * to be returned and give up after 1 second. 4550 */ 4551 if (progress & IBD_DRV_RCQ_NOTIFY_ENABLED) { 4552 attempts = 10; 4553 while (atomic_add_32_nv(&state->id_rx_list.dl_bufs_outstanding, 4554 0) > 0) { 4555 delay(drv_usectohz(100000)); 4556 if (--attempts == 0) { 4557 /* 4558 * There are pending bufs with the network 4559 * layer and we have no choice but to wait 4560 * for them to be done with. Reap all the 4561 * Tx/Rx completions that were posted since 4562 * we turned off the notification and 4563 * return failure. 4564 */ 4565 cmn_err(CE_CONT, "!ibd: bufs outstanding\n"); 4566 DPRINT(2, "ibd_undo_start: " 4567 "reclaiming failed"); 4568 break; 4569 } 4570 } 4571 state->id_mac_state &= (~IBD_DRV_RCQ_NOTIFY_ENABLED); 4572 } 4573 4574 if (progress & IBD_DRV_RC_LARGEBUF_ALLOCD) { 4575 ibd_rc_fini_tx_largebuf_list(state); 4576 state->id_mac_state &= (~IBD_DRV_RC_LARGEBUF_ALLOCD); 4577 } 4578 4579 if (progress & IBD_DRV_RC_SRQ_ALLOCD) { 4580 ASSERT(state->id_enable_rc); 4581 if (state->rc_srq_rwqe_list.dl_bufs_outstanding == 0) { 4582 ibd_rc_fini_srq_list(state); 4583 state->id_mac_state &= (~IBD_DRV_RC_SRQ_ALLOCD); 4584 } else { 4585 cmn_err(CE_CONT, "ibd_undo_start: srq bufs " 4586 "outstanding\n"); 4587 } 4588 } 4589 4590 if (progress & IBD_DRV_SM_NOTICES_REGISTERED) { 4591 ibt_register_subnet_notices(state->id_ibt_hdl, NULL, NULL); 4592 4593 mutex_enter(&state->id_trap_lock); 4594 state->id_trap_stop = B_TRUE; 4595 while (state->id_trap_inprog > 0) 4596 cv_wait(&state->id_trap_cv, &state->id_trap_lock); 4597 mutex_exit(&state->id_trap_lock); 4598 4599 state->id_mac_state &= (~IBD_DRV_SM_NOTICES_REGISTERED); 4600 } 4601 4602 if (progress & IBD_DRV_SCQ_NOTIFY_ENABLED) { 4603 /* 4604 * Flushing the channel ensures that all pending WQE's 4605 * are marked with flush_error and handed to the CQ. It 4606 * does not guarantee the invocation of the CQ handler. 4607 * This call is guaranteed to return successfully for 4608 * UD QPNs. 4609 */ 4610 if ((ret = ibt_flush_channel(state->id_chnl_hdl)) != 4611 IBT_SUCCESS) { 4612 DPRINT(10, "ibd_undo_start: flush_channel " 4613 "failed, ret=%d", ret); 4614 } 4615 4616 /* 4617 * Give some time for the TX CQ handler to process the 4618 * completions. 4619 */ 4620 mutex_enter(&state->id_tx_list.dl_mutex); 4621 mutex_enter(&state->id_tx_rel_list.dl_mutex); 4622 attempts = 10; 4623 while (state->id_tx_list.dl_cnt + state->id_tx_rel_list.dl_cnt 4624 != state->id_num_swqe) { 4625 if (--attempts == 0) 4626 break; 4627 mutex_exit(&state->id_tx_rel_list.dl_mutex); 4628 mutex_exit(&state->id_tx_list.dl_mutex); 4629 delay(drv_usectohz(100000)); 4630 mutex_enter(&state->id_tx_list.dl_mutex); 4631 mutex_enter(&state->id_tx_rel_list.dl_mutex); 4632 } 4633 ibt_set_cq_handler(state->id_scq_hdl, 0, 0); 4634 if (state->id_tx_list.dl_cnt + state->id_tx_rel_list.dl_cnt != 4635 state->id_num_swqe) { 4636 cmn_err(CE_WARN, "tx resources not freed\n"); 4637 } 4638 mutex_exit(&state->id_tx_rel_list.dl_mutex); 4639 mutex_exit(&state->id_tx_list.dl_mutex); 4640 4641 attempts = 10; 4642 while (atomic_add_32_nv(&state->id_rx_list.dl_cnt, 0) != 0) { 4643 if (--attempts == 0) 4644 break; 4645 delay(drv_usectohz(100000)); 4646 } 4647 ibt_set_cq_handler(state->id_rcq_hdl, 0, 0); 4648 if (atomic_add_32_nv(&state->id_rx_list.dl_cnt, 0) != 0) { 4649 cmn_err(CE_WARN, "rx resources not freed\n"); 4650 } 4651 4652 state->id_mac_state &= (~IBD_DRV_SCQ_NOTIFY_ENABLED); 4653 } 4654 4655 if (progress & IBD_DRV_ASYNC_THR_CREATED) { 4656 /* 4657 * No new async requests will be posted since the device 4658 * link state has been marked as unknown; completion handlers 4659 * have been turned off, so Tx handler will not cause any 4660 * more IBD_ASYNC_REAP requests. 4661 * 4662 * Queue a request for the async thread to exit, which will 4663 * be serviced after any pending ones. This can take a while, 4664 * specially if the SM is unreachable, since IBMF will slowly 4665 * timeout each SM request issued by the async thread. Reap 4666 * the thread before continuing on, we do not want it to be 4667 * lingering in modunloaded code (or we could move the reap 4668 * to ibd_detach(), provided we keep track of the current 4669 * id_async_thrid somewhere safe). 4670 */ 4671 ibd_queue_work_slot(state, &state->id_ah_req, IBD_ASYNC_EXIT); 4672 thread_join(state->id_async_thrid); 4673 4674 state->id_mac_state &= (~IBD_DRV_ASYNC_THR_CREATED); 4675 } 4676 4677 if (progress & IBD_DRV_BCAST_GROUP_JOINED) { 4678 /* 4679 * Drop all residual full/non membership. This includes full 4680 * membership to the broadcast group, and any nonmembership 4681 * acquired during transmits. We do this after the Tx completion 4682 * handlers are done, since those might result in some late 4683 * leaves; this also eliminates a potential race with that 4684 * path wrt the mc full list insert/delete. Trap handling 4685 * has also been suppressed at this point. Thus, no locks 4686 * are required while traversing the mc full list. 4687 */ 4688 DPRINT(2, "ibd_undo_start: clear full cache entries"); 4689 mce = list_head(&state->id_mc_full); 4690 while (mce != NULL) { 4691 mgid = mce->mc_info.mc_adds_vect.av_dgid; 4692 jstate = mce->mc_jstate; 4693 mce = list_next(&state->id_mc_full, mce); 4694 ibd_leave_group(state, mgid, jstate); 4695 } 4696 state->id_mac_state &= (~IBD_DRV_BCAST_GROUP_JOINED); 4697 } 4698 4699 if (progress & IBD_DRV_RXLIST_ALLOCD) { 4700 ibd_fini_rxlist(state); 4701 state->id_mac_state &= (~IBD_DRV_RXLIST_ALLOCD); 4702 } 4703 4704 if (progress & IBD_DRV_TXLIST_ALLOCD) { 4705 ibd_fini_txlist(state); 4706 state->id_mac_state &= (~IBD_DRV_TXLIST_ALLOCD); 4707 } 4708 4709 if (progress & IBD_DRV_UD_CHANNEL_SETUP) { 4710 if ((ret = ibt_free_channel(state->id_chnl_hdl)) != 4711 IBT_SUCCESS) { 4712 DPRINT(10, "ibd_undo_start: free_channel " 4713 "failed, ret=%d", ret); 4714 } 4715 4716 state->id_mac_state &= (~IBD_DRV_UD_CHANNEL_SETUP); 4717 } 4718 4719 if (progress & IBD_DRV_CQS_ALLOCD) { 4720 kmem_free(state->id_txwcs, 4721 sizeof (ibt_wc_t) * state->id_txwcs_size); 4722 if ((ret = ibt_free_cq(state->id_scq_hdl)) != 4723 IBT_SUCCESS) { 4724 DPRINT(10, "ibd_undo_start: free_cq(scq) " 4725 "failed, ret=%d", ret); 4726 } 4727 4728 kmem_free(state->id_rxwcs, 4729 sizeof (ibt_wc_t) * state->id_rxwcs_size); 4730 if ((ret = ibt_free_cq(state->id_rcq_hdl)) != IBT_SUCCESS) { 4731 DPRINT(10, "ibd_undo_start: free_cq(rcq) failed, " 4732 "ret=%d", ret); 4733 } 4734 4735 state->id_txwcs = NULL; 4736 state->id_rxwcs = NULL; 4737 state->id_scq_hdl = NULL; 4738 state->id_rcq_hdl = NULL; 4739 4740 state->id_mac_state &= (~IBD_DRV_CQS_ALLOCD); 4741 } 4742 4743 if (progress & IBD_DRV_ACACHE_INITIALIZED) { 4744 mutex_enter(&state->id_ac_mutex); 4745 mod_hash_destroy_hash(state->id_ah_active_hash); 4746 mutex_exit(&state->id_ac_mutex); 4747 ibd_acache_fini(state); 4748 4749 state->id_mac_state &= (~IBD_DRV_ACACHE_INITIALIZED); 4750 } 4751 4752 if (progress & IBD_DRV_BCAST_GROUP_FOUND) { 4753 /* 4754 * If we'd created the ipoib broadcast group and had 4755 * successfully joined it, leave it now 4756 */ 4757 if (state->id_bgroup_created) { 4758 mgid = state->id_mcinfo->mc_adds_vect.av_dgid; 4759 jstate = IB_MC_JSTATE_FULL; 4760 (void) ibt_leave_mcg(state->id_sgid, mgid, 4761 state->id_sgid, jstate); 4762 } 4763 ibt_free_mcg_info(state->id_mcinfo, 1); 4764 4765 state->id_mac_state &= (~IBD_DRV_BCAST_GROUP_FOUND); 4766 } 4767 4768 return (DDI_SUCCESS); 4769 } 4770 4771 /* 4772 * These pair of routines are used to set/clear the condition that 4773 * the caller is likely to do something to change the id_mac_state. 4774 * If there's already someone doing either a start or a stop (possibly 4775 * due to the async handler detecting a pkey relocation event, a plumb 4776 * or dlpi_open, or an unplumb or dlpi_close coming in), we wait until 4777 * that's done. 4778 */ 4779 static void 4780 ibd_set_mac_progress(ibd_state_t *state, uint_t flag) 4781 { 4782 mutex_enter(&state->id_macst_lock); 4783 while (state->id_mac_state & IBD_DRV_RESTART_IN_PROGRESS) 4784 cv_wait(&state->id_macst_cv, &state->id_macst_lock); 4785 4786 state->id_mac_state |= flag; 4787 mutex_exit(&state->id_macst_lock); 4788 } 4789 4790 static void 4791 ibd_clr_mac_progress(ibd_state_t *state, uint_t flag) 4792 { 4793 mutex_enter(&state->id_macst_lock); 4794 state->id_mac_state &= (~flag); 4795 cv_signal(&state->id_macst_cv); 4796 mutex_exit(&state->id_macst_lock); 4797 } 4798 4799 /* 4800 * GLDv3 entry point to start hardware. 4801 */ 4802 /*ARGSUSED*/ 4803 static int 4804 ibd_m_start(void *arg) 4805 { 4806 ibd_state_t *state = arg; 4807 int ret; 4808 4809 ibd_set_mac_progress(state, IBD_DRV_START_IN_PROGRESS); 4810 4811 ret = ibd_start(state); 4812 4813 ibd_clr_mac_progress(state, IBD_DRV_START_IN_PROGRESS); 4814 4815 return (ret); 4816 } 4817 4818 static int 4819 ibd_start(ibd_state_t *state) 4820 { 4821 kthread_t *kht; 4822 int err; 4823 ibt_status_t ret; 4824 4825 if (state->id_mac_state & IBD_DRV_STARTED) 4826 return (DDI_SUCCESS); 4827 4828 if (atomic_inc_32_nv(&state->id_running) != 1) { 4829 DPRINT(10, "ibd_start: id_running is non-zero"); 4830 cmn_err(CE_WARN, "ibd_start: id_running was not 0\n"); 4831 atomic_dec_32(&state->id_running); 4832 return (EINVAL); 4833 } 4834 4835 /* 4836 * Get port details; if we fail here, very likely the port 4837 * state is inactive or the pkey can't be verified. 4838 */ 4839 if ((err = ibd_get_port_details(state)) != 0) { 4840 DPRINT(10, "ibd_start: ibd_get_port_details() failed"); 4841 goto start_fail; 4842 } 4843 state->id_mac_state |= IBD_DRV_PORT_DETAILS_OBTAINED; 4844 4845 /* 4846 * Find the IPoIB broadcast group 4847 */ 4848 if (ibd_find_bgroup(state) != IBT_SUCCESS) { 4849 DPRINT(10, "ibd_start: ibd_find_bgroup() failed"); 4850 err = ENOTACTIVE; 4851 goto start_fail; 4852 } 4853 state->id_mac_state |= IBD_DRV_BCAST_GROUP_FOUND; 4854 4855 /* 4856 * Initialize per-interface caches and lists; if we fail here, 4857 * it is most likely due to a lack of resources 4858 */ 4859 if (ibd_acache_init(state) != DDI_SUCCESS) { 4860 DPRINT(10, "ibd_start: ibd_acache_init() failed"); 4861 err = ENOMEM; 4862 goto start_fail; 4863 } 4864 state->id_mac_state |= IBD_DRV_ACACHE_INITIALIZED; 4865 4866 /* 4867 * Allocate send and receive completion queues 4868 */ 4869 if (ibd_alloc_cqs(state) != DDI_SUCCESS) { 4870 DPRINT(10, "ibd_start: ibd_alloc_cqs() failed"); 4871 err = ENOMEM; 4872 goto start_fail; 4873 } 4874 state->id_mac_state |= IBD_DRV_CQS_ALLOCD; 4875 4876 /* 4877 * Setup a UD channel 4878 */ 4879 if (ibd_setup_ud_channel(state) != DDI_SUCCESS) { 4880 err = ENOMEM; 4881 DPRINT(10, "ibd_start: ibd_setup_ud_channel() failed"); 4882 goto start_fail; 4883 } 4884 state->id_mac_state |= IBD_DRV_UD_CHANNEL_SETUP; 4885 4886 /* 4887 * Allocate and initialize the tx buffer list 4888 */ 4889 if (ibd_init_txlist(state) != DDI_SUCCESS) { 4890 DPRINT(10, "ibd_start: ibd_init_txlist() failed"); 4891 err = ENOMEM; 4892 goto start_fail; 4893 } 4894 state->id_mac_state |= IBD_DRV_TXLIST_ALLOCD; 4895 4896 /* 4897 * Create the send cq handler here 4898 */ 4899 ibt_set_cq_handler(state->id_scq_hdl, ibd_scq_handler, state); 4900 if ((ret = ibt_enable_cq_notify(state->id_scq_hdl, 4901 IBT_NEXT_COMPLETION)) != IBT_SUCCESS) { 4902 DPRINT(10, "ibd_start: ibt_enable_cq_notify(scq) " 4903 "failed, ret=%d", ret); 4904 err = EINVAL; 4905 goto start_fail; 4906 } 4907 state->id_mac_state |= IBD_DRV_SCQ_NOTIFY_ENABLED; 4908 4909 /* 4910 * Allocate and initialize the rx buffer list 4911 */ 4912 if (ibd_init_rxlist(state) != DDI_SUCCESS) { 4913 DPRINT(10, "ibd_start: ibd_init_rxlist() failed"); 4914 err = ENOMEM; 4915 goto start_fail; 4916 } 4917 state->id_mac_state |= IBD_DRV_RXLIST_ALLOCD; 4918 4919 /* 4920 * Join IPoIB broadcast group 4921 */ 4922 if (ibd_join_group(state, state->id_mgid, IB_MC_JSTATE_FULL) == NULL) { 4923 DPRINT(10, "ibd_start: ibd_join_group() failed"); 4924 err = ENOTACTIVE; 4925 goto start_fail; 4926 } 4927 state->id_mac_state |= IBD_DRV_BCAST_GROUP_JOINED; 4928 4929 /* 4930 * Create the async thread; thread_create never fails. 4931 */ 4932 kht = thread_create(NULL, 0, ibd_async_work, state, 0, &p0, 4933 TS_RUN, minclsyspri); 4934 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state->id_async_thrid)) 4935 state->id_async_thrid = kht->t_did; 4936 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(state->id_async_thrid)) 4937 state->id_mac_state |= IBD_DRV_ASYNC_THR_CREATED; 4938 4939 /* 4940 * When we did mac_register() in ibd_attach(), we didn't register 4941 * the real macaddr and we didn't have the true port mtu. Now that 4942 * we're almost ready, set the local mac address and broadcast 4943 * addresses and update gldv3 about the real values of these 4944 * parameters. 4945 */ 4946 if (state->id_enable_rc) { 4947 ibd_h2n_mac(&state->id_macaddr, 4948 IBD_MAC_ADDR_RC + state->id_qpnum, 4949 state->id_sgid.gid_prefix, state->id_sgid.gid_guid); 4950 ibd_h2n_mac(&state->rc_macaddr_loopback, state->id_qpnum, 4951 state->id_sgid.gid_prefix, state->id_sgid.gid_guid); 4952 } else { 4953 ibd_h2n_mac(&state->id_macaddr, state->id_qpnum, 4954 state->id_sgid.gid_prefix, state->id_sgid.gid_guid); 4955 } 4956 ibd_h2n_mac(&state->id_bcaddr, IB_QPN_MASK, 4957 state->id_mgid.gid_prefix, state->id_mgid.gid_guid); 4958 4959 if (!state->id_enable_rc) { 4960 (void) mac_maxsdu_update(state->id_mh, state->id_mtu 4961 - IPOIB_HDRSIZE); 4962 } 4963 mac_unicst_update(state->id_mh, (uint8_t *)&state->id_macaddr); 4964 4965 /* 4966 * Setup the receive cq handler 4967 */ 4968 ibt_set_cq_handler(state->id_rcq_hdl, ibd_rcq_handler, state); 4969 if ((ret = ibt_enable_cq_notify(state->id_rcq_hdl, 4970 IBT_NEXT_COMPLETION)) != IBT_SUCCESS) { 4971 DPRINT(10, "ibd_start: ibt_enable_cq_notify(rcq) " 4972 "failed, ret=%d", ret); 4973 err = EINVAL; 4974 goto start_fail; 4975 } 4976 state->id_mac_state |= IBD_DRV_RCQ_NOTIFY_ENABLED; 4977 4978 /* 4979 * Setup the subnet notices handler after we've initialized the acache/ 4980 * mcache and started the async thread, both of which are required for 4981 * the trap handler to function properly. 4982 * 4983 * Now that the async thread has been started (and we've already done 4984 * a mac_register() during attach so mac_tx_update() can be called 4985 * if necessary without any problem), we can enable the trap handler 4986 * to queue requests to the async thread. 4987 */ 4988 ibt_register_subnet_notices(state->id_ibt_hdl, 4989 ibd_snet_notices_handler, state); 4990 mutex_enter(&state->id_trap_lock); 4991 state->id_trap_stop = B_FALSE; 4992 mutex_exit(&state->id_trap_lock); 4993 state->id_mac_state |= IBD_DRV_SM_NOTICES_REGISTERED; 4994 4995 if (state->id_enable_rc) { 4996 if (state->rc_enable_srq) { 4997 if (state->id_mac_state & IBD_DRV_RC_SRQ_ALLOCD) { 4998 if (ibd_rc_repost_srq_free_list(state) != 4999 IBT_SUCCESS) { 5000 err = ENOMEM; 5001 goto start_fail; 5002 } 5003 } else { 5004 /* Allocate SRQ resource */ 5005 if (ibd_rc_init_srq_list(state) != 5006 IBT_SUCCESS) { 5007 err = ENOMEM; 5008 goto start_fail; 5009 } 5010 state->id_mac_state |= IBD_DRV_RC_SRQ_ALLOCD; 5011 } 5012 } 5013 5014 if (ibd_rc_init_tx_largebuf_list(state) != IBT_SUCCESS) { 5015 DPRINT(10, "ibd_start: ibd_rc_init_tx_largebuf_list() " 5016 "failed"); 5017 err = ENOMEM; 5018 goto start_fail; 5019 } 5020 state->id_mac_state |= IBD_DRV_RC_LARGEBUF_ALLOCD; 5021 5022 /* RC: begin to listen only after everything is available */ 5023 if (ibd_rc_listen(state) != IBT_SUCCESS) { 5024 DPRINT(10, "ibd_start: ibd_rc_listen() failed"); 5025 err = EINVAL; 5026 goto start_fail; 5027 } 5028 state->id_mac_state |= IBD_DRV_RC_LISTEN; 5029 } 5030 5031 /* 5032 * Indicate link status to GLDv3 and higher layers. By default, 5033 * we assume we are in up state (which must have been true at 5034 * least at the time the broadcast mcg's were probed); if there 5035 * were any up/down transitions till the time we come here, the 5036 * async handler will have updated last known state, which we 5037 * use to tell GLDv3. The async handler will not send any 5038 * notifications to GLDv3 till we reach here in the initialization 5039 * sequence. 5040 */ 5041 state->id_mac_state |= IBD_DRV_STARTED; 5042 mac_link_update(state->id_mh, state->id_link_state); 5043 5044 return (DDI_SUCCESS); 5045 5046 start_fail: 5047 /* 5048 * If we ran into a problem during ibd_start() and ran into 5049 * some other problem during undoing our partial work, we can't 5050 * do anything about it. Ignore any errors we might get from 5051 * ibd_undo_start() and just return the original error we got. 5052 */ 5053 (void) ibd_undo_start(state, LINK_STATE_DOWN); 5054 return (err); 5055 } 5056 5057 /* 5058 * GLDv3 entry point to stop hardware from receiving packets. 5059 */ 5060 /*ARGSUSED*/ 5061 static void 5062 ibd_m_stop(void *arg) 5063 { 5064 ibd_state_t *state = (ibd_state_t *)arg; 5065 5066 ibd_set_mac_progress(state, IBD_DRV_STOP_IN_PROGRESS); 5067 5068 (void) ibd_undo_start(state, state->id_link_state); 5069 5070 ibd_clr_mac_progress(state, IBD_DRV_STOP_IN_PROGRESS); 5071 } 5072 5073 /* 5074 * GLDv3 entry point to modify device's mac address. We do not 5075 * allow address modifications. 5076 */ 5077 static int 5078 ibd_m_unicst(void *arg, const uint8_t *macaddr) 5079 { 5080 ibd_state_t *state = arg; 5081 5082 /* 5083 * Don't bother even comparing the macaddr if we haven't 5084 * completed ibd_m_start(). 5085 */ 5086 if ((state->id_mac_state & IBD_DRV_STARTED) == 0) 5087 return (0); 5088 5089 if (bcmp(macaddr, &state->id_macaddr, IPOIB_ADDRL) == 0) 5090 return (0); 5091 else 5092 return (EINVAL); 5093 } 5094 5095 /* 5096 * The blocking part of the IBA join/leave operations are done out 5097 * of here on the async thread. 5098 */ 5099 static void 5100 ibd_async_multicast(ibd_state_t *state, ib_gid_t mgid, int op) 5101 { 5102 DPRINT(3, "ibd_async_multicast : async_setmc op %d :" 5103 "%016llx:%016llx\n", op, mgid.gid_prefix, mgid.gid_guid); 5104 5105 if (op == IBD_ASYNC_JOIN) { 5106 if (ibd_join_group(state, mgid, IB_MC_JSTATE_FULL) == NULL) { 5107 ibd_print_warn(state, "Join multicast group failed :" 5108 "%016llx:%016llx", mgid.gid_prefix, mgid.gid_guid); 5109 } 5110 } else { 5111 /* 5112 * Here, we must search for the proper mcg_info and 5113 * use that to leave the group. 5114 */ 5115 ibd_leave_group(state, mgid, IB_MC_JSTATE_FULL); 5116 } 5117 } 5118 5119 /* 5120 * GLDv3 entry point for multicast enable/disable requests. 5121 * This function queues the operation to the async thread and 5122 * return success for a valid multicast address. 5123 */ 5124 static int 5125 ibd_m_multicst(void *arg, boolean_t add, const uint8_t *mcmac) 5126 { 5127 ibd_state_t *state = (ibd_state_t *)arg; 5128 ipoib_mac_t maddr, *mcast; 5129 ib_gid_t mgid; 5130 ibd_req_t *req; 5131 5132 /* 5133 * If we haven't completed ibd_m_start(), async thread wouldn't 5134 * have been started and id_bcaddr wouldn't be set, so there's 5135 * no point in continuing. 5136 */ 5137 if ((state->id_mac_state & IBD_DRV_STARTED) == 0) 5138 return (0); 5139 5140 /* 5141 * The incoming multicast address might not be aligned properly 5142 * on a 4 byte boundary to be considered an ipoib_mac_t. We force 5143 * it to look like one though, to get the offsets of the mc gid, 5144 * since we know we are not going to dereference any values with 5145 * the ipoib_mac_t pointer. 5146 */ 5147 bcopy(mcmac, &maddr, sizeof (ipoib_mac_t)); 5148 mcast = &maddr; 5149 5150 /* 5151 * Check validity of MCG address. We could additionally check 5152 * that a enable/disable is not being issued on the "broadcast" 5153 * mcg, but since this operation is only invokable by privileged 5154 * programs anyway, we allow the flexibility to those dlpi apps. 5155 * Note that we do not validate the "scope" of the IBA mcg. 5156 */ 5157 if ((ntohl(mcast->ipoib_qpn) & IB_QPN_MASK) != IB_MC_QPN) 5158 return (EINVAL); 5159 5160 /* 5161 * fill in multicast pkey and scope 5162 */ 5163 IBD_FILL_SCOPE_PKEY(mcast, state->id_scope, state->id_pkey); 5164 5165 /* 5166 * If someone is trying to JOIN/LEAVE the broadcast group, we do 5167 * nothing (i.e. we stay JOINed to the broadcast group done in 5168 * ibd_m_start()), to mimic ethernet behavior. IPv4 specifically 5169 * requires to be joined to broadcast groups at all times. 5170 * ibd_join_group() has an ASSERT(omce->mc_fullreap) that also 5171 * depends on this. 5172 */ 5173 if (bcmp(mcast, &state->id_bcaddr, IPOIB_ADDRL) == 0) 5174 return (0); 5175 5176 ibd_n2h_gid(mcast, &mgid); 5177 req = kmem_cache_alloc(state->id_req_kmc, KM_NOSLEEP); 5178 if (req == NULL) 5179 return (ENOMEM); 5180 5181 req->rq_gid = mgid; 5182 5183 if (add) { 5184 DPRINT(1, "ibd_m_multicst : %016llx:%016llx\n", 5185 mgid.gid_prefix, mgid.gid_guid); 5186 ibd_queue_work_slot(state, req, IBD_ASYNC_JOIN); 5187 } else { 5188 DPRINT(1, "ibd_m_multicst : unset_multicast : " 5189 "%016llx:%016llx", mgid.gid_prefix, mgid.gid_guid); 5190 ibd_queue_work_slot(state, req, IBD_ASYNC_LEAVE); 5191 } 5192 return (0); 5193 } 5194 5195 /* 5196 * The blocking part of the IBA promiscuous operations are done 5197 * out of here on the async thread. The dlpireq parameter indicates 5198 * whether this invocation is due to a dlpi request or due to 5199 * a port up/down event. 5200 */ 5201 static void 5202 ibd_async_unsetprom(ibd_state_t *state) 5203 { 5204 ibd_mce_t *mce = list_head(&state->id_mc_non); 5205 ib_gid_t mgid; 5206 5207 DPRINT(2, "ibd_async_unsetprom : async_unset_promisc"); 5208 5209 while (mce != NULL) { 5210 mgid = mce->mc_info.mc_adds_vect.av_dgid; 5211 mce = list_next(&state->id_mc_non, mce); 5212 ibd_leave_group(state, mgid, IB_MC_JSTATE_NON); 5213 } 5214 state->id_prom_op = IBD_OP_NOTSTARTED; 5215 } 5216 5217 /* 5218 * The blocking part of the IBA promiscuous operations are done 5219 * out of here on the async thread. The dlpireq parameter indicates 5220 * whether this invocation is due to a dlpi request or due to 5221 * a port up/down event. 5222 */ 5223 static void 5224 ibd_async_setprom(ibd_state_t *state) 5225 { 5226 ibt_mcg_attr_t mcg_attr; 5227 ibt_mcg_info_t *mcg_info; 5228 ib_gid_t mgid; 5229 uint_t numg; 5230 int i; 5231 char ret = IBD_OP_COMPLETED; 5232 5233 DPRINT(2, "ibd_async_setprom : async_set_promisc"); 5234 5235 /* 5236 * Obtain all active MC groups on the IB fabric with 5237 * specified criteria (scope + Pkey + Qkey + mtu). 5238 */ 5239 bzero(&mcg_attr, sizeof (mcg_attr)); 5240 mcg_attr.mc_pkey = state->id_pkey; 5241 mcg_attr.mc_scope = state->id_scope; 5242 mcg_attr.mc_qkey = state->id_mcinfo->mc_qkey; 5243 mcg_attr.mc_mtu_req.r_mtu = state->id_mcinfo->mc_mtu; 5244 mcg_attr.mc_mtu_req.r_selector = IBT_EQU; 5245 if (ibt_query_mcg(state->id_sgid, &mcg_attr, 0, &mcg_info, &numg) != 5246 IBT_SUCCESS) { 5247 ibd_print_warn(state, "Could not get list of IBA multicast " 5248 "groups"); 5249 ret = IBD_OP_ERRORED; 5250 goto done; 5251 } 5252 5253 /* 5254 * Iterate over the returned mcg's and join as NonMember 5255 * to the IP mcg's. 5256 */ 5257 for (i = 0; i < numg; i++) { 5258 /* 5259 * Do a NonMember JOIN on the MC group. 5260 */ 5261 mgid = mcg_info[i].mc_adds_vect.av_dgid; 5262 if (ibd_join_group(state, mgid, IB_MC_JSTATE_NON) == NULL) 5263 ibd_print_warn(state, "IBA promiscuous mode missed " 5264 "multicast gid %016llx:%016llx", 5265 (u_longlong_t)mgid.gid_prefix, 5266 (u_longlong_t)mgid.gid_guid); 5267 } 5268 5269 ibt_free_mcg_info(mcg_info, numg); 5270 DPRINT(4, "ibd_async_setprom : async_set_promisc completes"); 5271 done: 5272 state->id_prom_op = ret; 5273 } 5274 5275 /* 5276 * GLDv3 entry point for multicast promiscuous enable/disable requests. 5277 * GLDv3 assumes phys state receives more packets than multi state, 5278 * which is not true for IPoIB. Thus, treat the multi and phys 5279 * promiscuous states the same way to work with GLDv3's assumption. 5280 */ 5281 static int 5282 ibd_m_promisc(void *arg, boolean_t on) 5283 { 5284 ibd_state_t *state = (ibd_state_t *)arg; 5285 ibd_req_t *req; 5286 5287 /* 5288 * Async thread wouldn't have been started if we haven't 5289 * passed ibd_m_start() 5290 */ 5291 if ((state->id_mac_state & IBD_DRV_STARTED) == 0) 5292 return (0); 5293 5294 req = kmem_cache_alloc(state->id_req_kmc, KM_NOSLEEP); 5295 if (req == NULL) 5296 return (ENOMEM); 5297 if (on) { 5298 DPRINT(1, "ibd_m_promisc : set_promisc : %d", on); 5299 ibd_queue_work_slot(state, req, IBD_ASYNC_PROMON); 5300 } else { 5301 DPRINT(1, "ibd_m_promisc : unset_promisc"); 5302 ibd_queue_work_slot(state, req, IBD_ASYNC_PROMOFF); 5303 } 5304 5305 return (0); 5306 } 5307 5308 /* 5309 * GLDv3 entry point for gathering statistics. 5310 */ 5311 static int 5312 ibd_m_stat(void *arg, uint_t stat, uint64_t *val) 5313 { 5314 ibd_state_t *state = (ibd_state_t *)arg; 5315 5316 switch (stat) { 5317 case MAC_STAT_IFSPEED: 5318 *val = state->id_link_speed; 5319 break; 5320 case MAC_STAT_MULTIRCV: 5321 *val = state->id_multi_rcv; 5322 break; 5323 case MAC_STAT_BRDCSTRCV: 5324 *val = state->id_brd_rcv; 5325 break; 5326 case MAC_STAT_MULTIXMT: 5327 *val = state->id_multi_xmt; 5328 break; 5329 case MAC_STAT_BRDCSTXMT: 5330 *val = state->id_brd_xmt; 5331 break; 5332 case MAC_STAT_RBYTES: 5333 *val = state->id_rcv_bytes + state->rc_rcv_trans_byte 5334 + state->rc_rcv_copy_byte; 5335 break; 5336 case MAC_STAT_IPACKETS: 5337 *val = state->id_rcv_pkt + state->rc_rcv_trans_pkt 5338 + state->rc_rcv_copy_pkt; 5339 break; 5340 case MAC_STAT_OBYTES: 5341 *val = state->id_xmt_bytes + state->rc_xmt_bytes; 5342 break; 5343 case MAC_STAT_OPACKETS: 5344 *val = state->id_xmt_pkt + state->rc_xmt_small_pkt + 5345 state->rc_xmt_fragmented_pkt + 5346 state->rc_xmt_map_fail_pkt + state->rc_xmt_map_succ_pkt; 5347 break; 5348 case MAC_STAT_OERRORS: 5349 *val = state->id_ah_error; /* failed AH translation */ 5350 break; 5351 case MAC_STAT_IERRORS: 5352 *val = 0; 5353 break; 5354 case MAC_STAT_NOXMTBUF: 5355 *val = state->id_tx_short + state->rc_swqe_short + 5356 state->rc_xmt_buf_short; 5357 break; 5358 case MAC_STAT_NORCVBUF: 5359 default: 5360 return (ENOTSUP); 5361 } 5362 5363 return (0); 5364 } 5365 5366 static void 5367 ibd_async_txsched(ibd_state_t *state) 5368 { 5369 ibd_resume_transmission(state); 5370 } 5371 5372 static void 5373 ibd_resume_transmission(ibd_state_t *state) 5374 { 5375 int flag; 5376 int met_thresh = 0; 5377 int thresh = 0; 5378 int ret = -1; 5379 5380 mutex_enter(&state->id_sched_lock); 5381 if (state->id_sched_needed & IBD_RSRC_SWQE) { 5382 mutex_enter(&state->id_tx_list.dl_mutex); 5383 mutex_enter(&state->id_tx_rel_list.dl_mutex); 5384 met_thresh = state->id_tx_list.dl_cnt + 5385 state->id_tx_rel_list.dl_cnt; 5386 mutex_exit(&state->id_tx_rel_list.dl_mutex); 5387 mutex_exit(&state->id_tx_list.dl_mutex); 5388 thresh = IBD_FREE_SWQES_THRESH; 5389 flag = IBD_RSRC_SWQE; 5390 } else if (state->id_sched_needed & IBD_RSRC_LSOBUF) { 5391 ASSERT(state->id_lso != NULL); 5392 mutex_enter(&state->id_lso_lock); 5393 met_thresh = state->id_lso->bkt_nfree; 5394 thresh = IBD_FREE_LSOS_THRESH; 5395 mutex_exit(&state->id_lso_lock); 5396 flag = IBD_RSRC_LSOBUF; 5397 if (met_thresh > thresh) 5398 state->id_sched_lso_cnt++; 5399 } 5400 if (met_thresh > thresh) { 5401 state->id_sched_needed &= ~flag; 5402 state->id_sched_cnt++; 5403 ret = 0; 5404 } 5405 mutex_exit(&state->id_sched_lock); 5406 5407 if (ret == 0) 5408 mac_tx_update(state->id_mh); 5409 } 5410 5411 /* 5412 * Release the send wqe back into free list. 5413 */ 5414 static void 5415 ibd_release_swqe(ibd_state_t *state, ibd_swqe_t *head, ibd_swqe_t *tail, int n) 5416 { 5417 /* 5418 * Add back on Tx list for reuse. 5419 */ 5420 ASSERT(tail->swqe_next == NULL); 5421 mutex_enter(&state->id_tx_rel_list.dl_mutex); 5422 state->id_tx_rel_list.dl_pending_sends = B_FALSE; 5423 tail->swqe_next = state->id_tx_rel_list.dl_head; 5424 state->id_tx_rel_list.dl_head = SWQE_TO_WQE(head); 5425 state->id_tx_rel_list.dl_cnt += n; 5426 mutex_exit(&state->id_tx_rel_list.dl_mutex); 5427 } 5428 5429 /* 5430 * Acquire a send wqe from free list. 5431 * Returns error number and send wqe pointer. 5432 */ 5433 static ibd_swqe_t * 5434 ibd_acquire_swqe(ibd_state_t *state) 5435 { 5436 ibd_swqe_t *wqe; 5437 5438 mutex_enter(&state->id_tx_rel_list.dl_mutex); 5439 if (state->id_tx_rel_list.dl_head != NULL) { 5440 /* transfer id_tx_rel_list to id_tx_list */ 5441 state->id_tx_list.dl_head = 5442 state->id_tx_rel_list.dl_head; 5443 state->id_tx_list.dl_cnt = 5444 state->id_tx_rel_list.dl_cnt; 5445 state->id_tx_list.dl_pending_sends = B_FALSE; 5446 5447 /* clear id_tx_rel_list */ 5448 state->id_tx_rel_list.dl_head = NULL; 5449 state->id_tx_rel_list.dl_cnt = 0; 5450 mutex_exit(&state->id_tx_rel_list.dl_mutex); 5451 5452 wqe = WQE_TO_SWQE(state->id_tx_list.dl_head); 5453 state->id_tx_list.dl_cnt -= 1; 5454 state->id_tx_list.dl_head = wqe->swqe_next; 5455 } else { /* no free swqe */ 5456 mutex_exit(&state->id_tx_rel_list.dl_mutex); 5457 state->id_tx_list.dl_pending_sends = B_TRUE; 5458 DPRINT(5, "ibd_acquire_swqe: out of Tx wqe"); 5459 state->id_tx_short++; 5460 wqe = NULL; 5461 } 5462 return (wqe); 5463 } 5464 5465 static int 5466 ibd_setup_lso(ibd_swqe_t *node, mblk_t *mp, uint32_t mss, 5467 ibt_ud_dest_hdl_t ud_dest) 5468 { 5469 mblk_t *nmp; 5470 int iph_len, tcph_len; 5471 ibt_wr_lso_t *lso; 5472 uintptr_t ip_start, tcp_start; 5473 uint8_t *dst; 5474 uint_t pending, mblen; 5475 5476 /* 5477 * The code in ibd_send would've set 'wr.ud.udwr_dest' by default; 5478 * we need to adjust it here for lso. 5479 */ 5480 lso = &(node->w_swr.wr.ud_lso); 5481 lso->lso_ud_dest = ud_dest; 5482 lso->lso_mss = mss; 5483 5484 /* 5485 * Calculate the LSO header size and set it in the UD LSO structure. 5486 * Note that the only assumption we make is that each of the IPoIB, 5487 * IP and TCP headers will be contained in a single mblk fragment; 5488 * together, the headers may span multiple mblk fragments. 5489 */ 5490 nmp = mp; 5491 ip_start = (uintptr_t)(nmp->b_rptr) + IPOIB_HDRSIZE; 5492 if (ip_start >= (uintptr_t)(nmp->b_wptr)) { 5493 ip_start = (uintptr_t)nmp->b_cont->b_rptr 5494 + (ip_start - (uintptr_t)(nmp->b_wptr)); 5495 nmp = nmp->b_cont; 5496 5497 } 5498 iph_len = IPH_HDR_LENGTH((ipha_t *)ip_start); 5499 5500 tcp_start = ip_start + iph_len; 5501 if (tcp_start >= (uintptr_t)(nmp->b_wptr)) { 5502 tcp_start = (uintptr_t)nmp->b_cont->b_rptr 5503 + (tcp_start - (uintptr_t)(nmp->b_wptr)); 5504 nmp = nmp->b_cont; 5505 } 5506 tcph_len = TCP_HDR_LENGTH((tcph_t *)tcp_start); 5507 lso->lso_hdr_sz = IPOIB_HDRSIZE + iph_len + tcph_len; 5508 5509 /* 5510 * If the lso header fits entirely within a single mblk fragment, 5511 * we'll avoid an additional copy of the lso header here and just 5512 * pass the b_rptr of the mblk directly. 5513 * 5514 * If this isn't true, we'd have to allocate for it explicitly. 5515 */ 5516 if (lso->lso_hdr_sz <= MBLKL(mp)) { 5517 lso->lso_hdr = mp->b_rptr; 5518 } else { 5519 /* On work completion, remember to free this allocated hdr */ 5520 lso->lso_hdr = kmem_zalloc(lso->lso_hdr_sz, KM_NOSLEEP); 5521 if (lso->lso_hdr == NULL) { 5522 DPRINT(10, "ibd_setup_lso: couldn't allocate lso hdr, " 5523 "sz = %d", lso->lso_hdr_sz); 5524 lso->lso_hdr_sz = 0; 5525 lso->lso_mss = 0; 5526 return (-1); 5527 } 5528 } 5529 5530 /* 5531 * Copy in the lso header only if we need to 5532 */ 5533 if (lso->lso_hdr != mp->b_rptr) { 5534 dst = lso->lso_hdr; 5535 pending = lso->lso_hdr_sz; 5536 5537 for (nmp = mp; nmp && pending; nmp = nmp->b_cont) { 5538 mblen = MBLKL(nmp); 5539 if (pending > mblen) { 5540 bcopy(nmp->b_rptr, dst, mblen); 5541 dst += mblen; 5542 pending -= mblen; 5543 } else { 5544 bcopy(nmp->b_rptr, dst, pending); 5545 break; 5546 } 5547 } 5548 } 5549 5550 return (0); 5551 } 5552 5553 static void 5554 ibd_free_lsohdr(ibd_swqe_t *node, mblk_t *mp) 5555 { 5556 ibt_wr_lso_t *lso; 5557 5558 if ((!node) || (!mp)) 5559 return; 5560 5561 /* 5562 * Free any header space that we might've allocated if we 5563 * did an LSO 5564 */ 5565 if (node->w_swr.wr_opcode == IBT_WRC_SEND_LSO) { 5566 lso = &(node->w_swr.wr.ud_lso); 5567 if ((lso->lso_hdr) && (lso->lso_hdr != mp->b_rptr)) { 5568 kmem_free(lso->lso_hdr, lso->lso_hdr_sz); 5569 lso->lso_hdr = NULL; 5570 lso->lso_hdr_sz = 0; 5571 } 5572 } 5573 } 5574 5575 static void 5576 ibd_post_send(ibd_state_t *state, ibd_swqe_t *node) 5577 { 5578 uint_t i; 5579 uint_t num_posted; 5580 uint_t n_wrs; 5581 ibt_status_t ibt_status; 5582 ibt_send_wr_t wrs[IBD_MAX_TX_POST_MULTIPLE]; 5583 ibd_swqe_t *tx_head, *elem; 5584 ibd_swqe_t *nodes[IBD_MAX_TX_POST_MULTIPLE]; 5585 5586 /* post the one request, then check for more */ 5587 ibt_status = ibt_post_send(state->id_chnl_hdl, 5588 &node->w_swr, 1, NULL); 5589 if (ibt_status != IBT_SUCCESS) { 5590 ibd_print_warn(state, "ibd_post_send: " 5591 "posting one wr failed: ret=%d", ibt_status); 5592 ibd_tx_cleanup(state, node); 5593 } 5594 5595 tx_head = NULL; 5596 for (;;) { 5597 if (tx_head == NULL) { 5598 mutex_enter(&state->id_txpost_lock); 5599 tx_head = state->id_tx_head; 5600 if (tx_head == NULL) { 5601 state->id_tx_busy = 0; 5602 mutex_exit(&state->id_txpost_lock); 5603 return; 5604 } 5605 state->id_tx_head = NULL; 5606 mutex_exit(&state->id_txpost_lock); 5607 } 5608 5609 /* 5610 * Collect pending requests, IBD_MAX_TX_POST_MULTIPLE wrs 5611 * at a time if possible, and keep posting them. 5612 */ 5613 for (n_wrs = 0, elem = tx_head; 5614 (elem) && (n_wrs < IBD_MAX_TX_POST_MULTIPLE); 5615 elem = WQE_TO_SWQE(elem->swqe_next), n_wrs++) { 5616 nodes[n_wrs] = elem; 5617 wrs[n_wrs] = elem->w_swr; 5618 } 5619 tx_head = elem; 5620 5621 ASSERT(n_wrs != 0); 5622 5623 /* 5624 * If posting fails for some reason, we'll never receive 5625 * completion intimation, so we'll need to cleanup. But 5626 * we need to make sure we don't clean up nodes whose 5627 * wrs have been successfully posted. We assume that the 5628 * hca driver returns on the first failure to post and 5629 * therefore the first 'num_posted' entries don't need 5630 * cleanup here. 5631 */ 5632 num_posted = 0; 5633 ibt_status = ibt_post_send(state->id_chnl_hdl, 5634 wrs, n_wrs, &num_posted); 5635 if (ibt_status != IBT_SUCCESS) { 5636 ibd_print_warn(state, "ibd_post_send: " 5637 "posting multiple wrs failed: " 5638 "requested=%d, done=%d, ret=%d", 5639 n_wrs, num_posted, ibt_status); 5640 5641 for (i = num_posted; i < n_wrs; i++) 5642 ibd_tx_cleanup(state, nodes[i]); 5643 } 5644 } 5645 } 5646 5647 static int 5648 ibd_prepare_sgl(ibd_state_t *state, mblk_t *mp, ibd_swqe_t *node, 5649 uint_t lsohdr_sz) 5650 { 5651 ibt_wr_ds_t *sgl; 5652 ibt_status_t ibt_status; 5653 mblk_t *nmp; 5654 mblk_t *data_mp; 5655 uchar_t *bufp; 5656 size_t blksize; 5657 size_t skip; 5658 size_t avail; 5659 uint_t pktsize; 5660 uint_t frag_len; 5661 uint_t pending_hdr; 5662 int nmblks; 5663 int i; 5664 5665 /* 5666 * Let's skip ahead to the data if this is LSO 5667 */ 5668 data_mp = mp; 5669 pending_hdr = 0; 5670 if (lsohdr_sz) { 5671 pending_hdr = lsohdr_sz; 5672 for (nmp = mp; nmp; nmp = nmp->b_cont) { 5673 frag_len = nmp->b_wptr - nmp->b_rptr; 5674 if (frag_len > pending_hdr) 5675 break; 5676 pending_hdr -= frag_len; 5677 } 5678 data_mp = nmp; /* start of data past lso header */ 5679 ASSERT(data_mp != NULL); 5680 } 5681 5682 /* 5683 * Calculate the size of message data and number of msg blocks 5684 */ 5685 pktsize = 0; 5686 for (nmblks = 0, nmp = data_mp; nmp != NULL; 5687 nmp = nmp->b_cont, nmblks++) { 5688 pktsize += MBLKL(nmp); 5689 } 5690 pktsize -= pending_hdr; 5691 5692 /* 5693 * We only do ibt_map_mem_iov() if the pktsize is above the 5694 * "copy-threshold", and if the number of mp fragments is less than 5695 * the maximum acceptable. 5696 */ 5697 if ((state->id_hca_res_lkey_capab) && 5698 (pktsize > IBD_TX_COPY_THRESH) && 5699 (nmblks < state->id_max_sqseg_hiwm)) { 5700 ibt_iov_t iov_arr[IBD_MAX_SQSEG]; 5701 ibt_iov_attr_t iov_attr; 5702 5703 iov_attr.iov_as = NULL; 5704 iov_attr.iov = iov_arr; 5705 iov_attr.iov_buf = NULL; 5706 iov_attr.iov_list_len = nmblks; 5707 iov_attr.iov_wr_nds = state->id_max_sqseg; 5708 iov_attr.iov_lso_hdr_sz = lsohdr_sz; 5709 iov_attr.iov_flags = IBT_IOV_SLEEP; 5710 5711 for (nmp = data_mp, i = 0; i < nmblks; i++, nmp = nmp->b_cont) { 5712 iov_arr[i].iov_addr = (caddr_t)(void *)nmp->b_rptr; 5713 iov_arr[i].iov_len = MBLKL(nmp); 5714 if (i == 0) { 5715 iov_arr[i].iov_addr += pending_hdr; 5716 iov_arr[i].iov_len -= pending_hdr; 5717 } 5718 } 5719 5720 node->w_buftype = IBD_WQE_MAPPED; 5721 node->w_swr.wr_sgl = node->w_sgl; 5722 5723 ibt_status = ibt_map_mem_iov(state->id_hca_hdl, &iov_attr, 5724 (ibt_all_wr_t *)&node->w_swr, &node->w_mi_hdl); 5725 if (ibt_status != IBT_SUCCESS) { 5726 ibd_print_warn(state, "ibd_send: ibt_map_mem_iov " 5727 "failed, nmblks=%d, ret=%d\n", nmblks, ibt_status); 5728 goto ibd_copy_path; 5729 } 5730 5731 return (0); 5732 } 5733 5734 ibd_copy_path: 5735 if (pktsize <= state->id_tx_buf_sz) { 5736 node->swqe_copybuf.ic_sgl.ds_len = pktsize; 5737 node->w_swr.wr_nds = 1; 5738 node->w_swr.wr_sgl = &node->swqe_copybuf.ic_sgl; 5739 node->w_buftype = IBD_WQE_TXBUF; 5740 5741 /* 5742 * Even though this is the copy path for transfers less than 5743 * id_tx_buf_sz, it could still be an LSO packet. If so, it 5744 * is possible the first data mblk fragment (data_mp) still 5745 * contains part of the LSO header that we need to skip. 5746 */ 5747 bufp = (uchar_t *)(uintptr_t)node->w_swr.wr_sgl->ds_va; 5748 for (nmp = data_mp; nmp != NULL; nmp = nmp->b_cont) { 5749 blksize = MBLKL(nmp) - pending_hdr; 5750 bcopy(nmp->b_rptr + pending_hdr, bufp, blksize); 5751 bufp += blksize; 5752 pending_hdr = 0; 5753 } 5754 5755 return (0); 5756 } 5757 5758 /* 5759 * Copy path for transfers greater than id_tx_buf_sz 5760 */ 5761 node->w_swr.wr_sgl = node->w_sgl; 5762 if (ibd_acquire_lsobufs(state, pktsize, 5763 node->w_swr.wr_sgl, &(node->w_swr.wr_nds)) != 0) { 5764 DPRINT(10, "ibd_prepare_sgl: lso bufs acquire failed"); 5765 return (-1); 5766 } 5767 node->w_buftype = IBD_WQE_LSOBUF; 5768 5769 /* 5770 * Copy the larger-than-id_tx_buf_sz packet into a set of 5771 * fixed-sized, pre-mapped LSO buffers. Note that we might 5772 * need to skip part of the LSO header in the first fragment 5773 * as before. 5774 */ 5775 nmp = data_mp; 5776 skip = pending_hdr; 5777 for (i = 0; i < node->w_swr.wr_nds; i++) { 5778 sgl = node->w_swr.wr_sgl + i; 5779 bufp = (uchar_t *)(uintptr_t)sgl->ds_va; 5780 avail = IBD_LSO_BUFSZ; 5781 while (nmp && avail) { 5782 blksize = MBLKL(nmp) - skip; 5783 if (blksize > avail) { 5784 bcopy(nmp->b_rptr + skip, bufp, avail); 5785 skip += avail; 5786 avail = 0; 5787 } else { 5788 bcopy(nmp->b_rptr + skip, bufp, blksize); 5789 skip = 0; 5790 avail -= blksize; 5791 bufp += blksize; 5792 nmp = nmp->b_cont; 5793 } 5794 } 5795 } 5796 5797 return (0); 5798 } 5799 5800 /* 5801 * Schedule a completion queue polling to reap the resource we're 5802 * short on. If we implement the change to reap tx completions 5803 * in a separate thread, we'll need to wake up that thread here. 5804 */ 5805 static int 5806 ibd_sched_poll(ibd_state_t *state, int resource_type, int q_flag) 5807 { 5808 ibd_req_t *req; 5809 5810 mutex_enter(&state->id_sched_lock); 5811 state->id_sched_needed |= resource_type; 5812 mutex_exit(&state->id_sched_lock); 5813 5814 /* 5815 * If we are asked to queue a work entry, we need to do it 5816 */ 5817 if (q_flag) { 5818 req = kmem_cache_alloc(state->id_req_kmc, KM_NOSLEEP); 5819 if (req == NULL) 5820 return (-1); 5821 5822 ibd_queue_work_slot(state, req, IBD_ASYNC_SCHED); 5823 } 5824 5825 return (0); 5826 } 5827 5828 /* 5829 * The passed in packet has this format: 5830 * IPOIB_ADDRL b dest addr :: 2b sap :: 2b 0's :: data 5831 */ 5832 static boolean_t 5833 ibd_send(ibd_state_t *state, mblk_t *mp) 5834 { 5835 ibd_ace_t *ace; 5836 ibd_swqe_t *node; 5837 ipoib_mac_t *dest; 5838 ib_header_info_t *ipibp; 5839 ip6_t *ip6h; 5840 uint_t pktsize; 5841 uint32_t mss; 5842 uint32_t hckflags; 5843 uint32_t lsoflags = 0; 5844 uint_t lsohdr_sz = 0; 5845 int ret, len; 5846 boolean_t dofree = B_FALSE; 5847 boolean_t rc; 5848 /* if (rc_chan == NULL) send by UD; else send by RC; */ 5849 ibd_rc_chan_t *rc_chan; 5850 int nmblks; 5851 mblk_t *nmp; 5852 5853 /* 5854 * If we aren't done with the device initialization and start, 5855 * we shouldn't be here. 5856 */ 5857 if ((state->id_mac_state & IBD_DRV_STARTED) == 0) 5858 return (B_FALSE); 5859 5860 /* 5861 * Obtain an address handle for the destination. 5862 */ 5863 ipibp = (ib_header_info_t *)mp->b_rptr; 5864 dest = (ipoib_mac_t *)&ipibp->ib_dst; 5865 if ((ntohl(dest->ipoib_qpn) & IB_QPN_MASK) == IB_MC_QPN) 5866 IBD_FILL_SCOPE_PKEY(dest, state->id_scope, state->id_pkey); 5867 5868 rc_chan = NULL; 5869 ace = ibd_acache_lookup(state, dest, &ret, 1); 5870 if (state->id_enable_rc && (ace != NULL) && 5871 (ace->ac_mac.ipoib_qpn != htonl(IB_MC_QPN))) { 5872 if (ace->ac_chan == NULL) { 5873 state->rc_null_conn++; 5874 } else { 5875 if (ace->ac_chan->chan_state == 5876 IBD_RC_STATE_ACT_ESTAB) { 5877 rc_chan = ace->ac_chan; 5878 mutex_enter(&rc_chan->tx_wqe_list.dl_mutex); 5879 node = WQE_TO_SWQE( 5880 rc_chan->tx_wqe_list.dl_head); 5881 if (node != NULL) { 5882 rc_chan->tx_wqe_list.dl_cnt -= 1; 5883 rc_chan->tx_wqe_list.dl_head = 5884 node->swqe_next; 5885 } else { 5886 node = ibd_rc_acquire_swqes(rc_chan); 5887 } 5888 mutex_exit(&rc_chan->tx_wqe_list.dl_mutex); 5889 5890 if (node == NULL) { 5891 state->rc_swqe_short++; 5892 mutex_enter(&state->id_sched_lock); 5893 state->id_sched_needed |= 5894 IBD_RSRC_RC_SWQE; 5895 mutex_exit(&state->id_sched_lock); 5896 ibd_dec_ref_ace(state, ace); 5897 return (B_FALSE); 5898 } 5899 } else { 5900 state->rc_no_estab_conn++; 5901 } 5902 } 5903 } 5904 5905 if (rc_chan == NULL) { 5906 mutex_enter(&state->id_tx_list.dl_mutex); 5907 node = WQE_TO_SWQE(state->id_tx_list.dl_head); 5908 if (node != NULL) { 5909 state->id_tx_list.dl_cnt -= 1; 5910 state->id_tx_list.dl_head = node->swqe_next; 5911 } else { 5912 node = ibd_acquire_swqe(state); 5913 } 5914 mutex_exit(&state->id_tx_list.dl_mutex); 5915 if (node == NULL) { 5916 /* 5917 * If we don't have an swqe available, schedule a 5918 * transmit completion queue cleanup and hold off on 5919 * sending more packets until we have some free swqes 5920 */ 5921 if (ibd_sched_poll(state, IBD_RSRC_SWQE, 0) == 0) { 5922 if (ace != NULL) { 5923 ibd_dec_ref_ace(state, ace); 5924 } 5925 return (B_FALSE); 5926 } 5927 5928 /* 5929 * If a poll cannot be scheduled, we have no choice but 5930 * to drop this packet 5931 */ 5932 ibd_print_warn(state, "ibd_send: no swqe, pkt drop"); 5933 if (ace != NULL) { 5934 ibd_dec_ref_ace(state, ace); 5935 } 5936 return (B_TRUE); 5937 } 5938 } 5939 5940 /* 5941 * Initialize the commonly used fields in swqe to NULL to protect 5942 * against ibd_tx_cleanup accidentally misinterpreting these on a 5943 * failure. 5944 */ 5945 node->swqe_im_mblk = NULL; 5946 node->w_swr.wr_nds = 0; 5947 node->w_swr.wr_sgl = NULL; 5948 node->w_swr.wr_opcode = IBT_WRC_SEND; 5949 5950 /* 5951 * Calculate the size of message data and number of msg blocks 5952 */ 5953 pktsize = 0; 5954 for (nmblks = 0, nmp = mp; nmp != NULL; 5955 nmp = nmp->b_cont, nmblks++) { 5956 pktsize += MBLKL(nmp); 5957 } 5958 5959 if (bcmp(&ipibp->ib_dst, &state->id_bcaddr, IPOIB_ADDRL) == 0) 5960 atomic_inc_64(&state->id_brd_xmt); 5961 else if ((ntohl(ipibp->ib_dst.ipoib_qpn) & IB_QPN_MASK) == IB_MC_QPN) 5962 atomic_inc_64(&state->id_multi_xmt); 5963 5964 if (ace != NULL) { 5965 node->w_ahandle = ace; 5966 node->w_swr.wr.ud.udwr_dest = ace->ac_dest; 5967 } else { 5968 DPRINT(5, 5969 "ibd_send: acache lookup %s for %08X:%08X:%08X:%08X:%08X", 5970 ((ret == EFAULT) ? "failed" : "queued"), 5971 htonl(dest->ipoib_qpn), htonl(dest->ipoib_gidpref[0]), 5972 htonl(dest->ipoib_gidpref[1]), 5973 htonl(dest->ipoib_gidsuff[0]), 5974 htonl(dest->ipoib_gidsuff[1])); 5975 state->rc_ace_not_found++; 5976 node->w_ahandle = NULL; 5977 5978 /* 5979 * Here if ibd_acache_lookup() returns EFAULT, it means ibd 5980 * can not find a path for the specific dest address. We 5981 * should get rid of this kind of packet. We also should get 5982 * rid of the packet if we cannot schedule a poll via the 5983 * async thread. For the normal case, ibd will return the 5984 * packet to upper layer and wait for AH creating. 5985 * 5986 * Note that we always queue a work slot entry for the async 5987 * thread when we fail AH lookup (even in intr mode); this is 5988 * due to the convoluted way the code currently looks for AH. 5989 */ 5990 if (ret == EFAULT) { 5991 dofree = B_TRUE; 5992 rc = B_TRUE; 5993 } else if (ibd_sched_poll(state, IBD_RSRC_SWQE, 1) != 0) { 5994 dofree = B_TRUE; 5995 rc = B_TRUE; 5996 } else { 5997 dofree = B_FALSE; 5998 rc = B_FALSE; 5999 } 6000 goto ibd_send_fail; 6001 } 6002 6003 /* 6004 * For ND6 packets, padding is at the front of the source lladdr. 6005 * Insert the padding at front. 6006 */ 6007 if (ntohs(ipibp->ipib_rhdr.ipoib_type) == ETHERTYPE_IPV6) { 6008 if (MBLKL(mp) < sizeof (ib_header_info_t) + IPV6_HDR_LEN) { 6009 if (!pullupmsg(mp, IPV6_HDR_LEN + 6010 sizeof (ib_header_info_t))) { 6011 DPRINT(10, "ibd_send: pullupmsg failure "); 6012 dofree = B_TRUE; 6013 rc = B_TRUE; 6014 goto ibd_send_fail; 6015 } 6016 ipibp = (ib_header_info_t *)mp->b_rptr; 6017 } 6018 ip6h = (ip6_t *)((uchar_t *)ipibp + 6019 sizeof (ib_header_info_t)); 6020 len = ntohs(ip6h->ip6_plen); 6021 if (ip6h->ip6_nxt == IPPROTO_ICMPV6) { 6022 mblk_t *pad; 6023 6024 pad = allocb(4, 0); 6025 pad->b_wptr = (uchar_t *)pad->b_rptr + 4; 6026 linkb(mp, pad); 6027 if (MBLKL(mp) < sizeof (ib_header_info_t) + 6028 IPV6_HDR_LEN + len + 4) { 6029 if (!pullupmsg(mp, sizeof (ib_header_info_t) + 6030 IPV6_HDR_LEN + len + 4)) { 6031 DPRINT(10, "ibd_send: pullupmsg " 6032 "failure "); 6033 dofree = B_TRUE; 6034 rc = B_TRUE; 6035 goto ibd_send_fail; 6036 } 6037 ip6h = (ip6_t *)((uchar_t *)mp->b_rptr + 6038 sizeof (ib_header_info_t)); 6039 } 6040 6041 /* LINTED: E_CONSTANT_CONDITION */ 6042 IBD_PAD_NSNA(ip6h, len, IBD_SEND); 6043 } 6044 } 6045 6046 ASSERT(mp->b_wptr - mp->b_rptr >= sizeof (ib_addrs_t)); 6047 mp->b_rptr += sizeof (ib_addrs_t); 6048 pktsize -= sizeof (ib_addrs_t); 6049 6050 if (rc_chan) { /* send in RC mode */ 6051 ibt_iov_t iov_arr[IBD_MAX_SQSEG]; 6052 ibt_iov_attr_t iov_attr; 6053 uint_t i; 6054 size_t blksize; 6055 uchar_t *bufp; 6056 ibd_rc_tx_largebuf_t *lbufp; 6057 6058 atomic_add_64(&state->rc_xmt_bytes, pktsize); 6059 6060 /* 6061 * Upper layer does Tx checksum, we don't need do any 6062 * checksum here. 6063 */ 6064 ASSERT(node->w_swr.wr_trans == IBT_RC_SRV); 6065 6066 /* 6067 * We only do ibt_map_mem_iov() if the pktsize is above 6068 * the "copy-threshold", and if the number of mp 6069 * fragments is less than the maximum acceptable. 6070 */ 6071 if (pktsize <= ibd_rc_tx_copy_thresh) { 6072 atomic_inc_64(&state->rc_xmt_small_pkt); 6073 /* 6074 * Only process unicast packet in Reliable Connected 6075 * mode. 6076 */ 6077 node->swqe_copybuf.ic_sgl.ds_len = pktsize; 6078 node->w_swr.wr_nds = 1; 6079 node->w_swr.wr_sgl = &node->swqe_copybuf.ic_sgl; 6080 node->w_buftype = IBD_WQE_TXBUF; 6081 6082 bufp = (uchar_t *)(uintptr_t)node->w_swr.wr_sgl->ds_va; 6083 for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) { 6084 blksize = MBLKL(nmp); 6085 bcopy(nmp->b_rptr, bufp, blksize); 6086 bufp += blksize; 6087 } 6088 freemsg(mp); 6089 ASSERT(node->swqe_im_mblk == NULL); 6090 } else { 6091 if ((state->rc_enable_iov_map) && 6092 (nmblks < state->rc_max_sqseg_hiwm)) { 6093 6094 /* do ibt_map_mem_iov() */ 6095 iov_attr.iov_as = NULL; 6096 iov_attr.iov = iov_arr; 6097 iov_attr.iov_buf = NULL; 6098 iov_attr.iov_wr_nds = state->rc_tx_max_sqseg; 6099 iov_attr.iov_lso_hdr_sz = 0; 6100 iov_attr.iov_flags = IBT_IOV_SLEEP; 6101 6102 i = 0; 6103 for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) { 6104 iov_arr[i].iov_len = MBLKL(nmp); 6105 if (iov_arr[i].iov_len != 0) { 6106 iov_arr[i].iov_addr = (caddr_t) 6107 (void *)nmp->b_rptr; 6108 i++; 6109 } 6110 } 6111 iov_attr.iov_list_len = i; 6112 node->w_swr.wr_sgl = node->w_sgl; 6113 6114 ret = ibt_map_mem_iov(state->id_hca_hdl, 6115 &iov_attr, (ibt_all_wr_t *)&node->w_swr, 6116 &node->w_mi_hdl); 6117 if (ret != IBT_SUCCESS) { 6118 atomic_inc_64( 6119 &state->rc_xmt_map_fail_pkt); 6120 DPRINT(30, "ibd_send: ibt_map_mem_iov(" 6121 ") failed, nmblks=%d, real_nmblks" 6122 "=%d, ret=0x%x", nmblks, i, ret); 6123 goto ibd_rc_large_copy; 6124 } 6125 6126 atomic_inc_64(&state->rc_xmt_map_succ_pkt); 6127 node->w_buftype = IBD_WQE_MAPPED; 6128 node->swqe_im_mblk = mp; 6129 } else { 6130 atomic_inc_64(&state->rc_xmt_fragmented_pkt); 6131 ibd_rc_large_copy: 6132 mutex_enter(&state->rc_tx_large_bufs_lock); 6133 if (state->rc_tx_largebuf_nfree == 0) { 6134 state->rc_xmt_buf_short++; 6135 mutex_exit 6136 (&state->rc_tx_large_bufs_lock); 6137 mutex_enter(&state->id_sched_lock); 6138 state->id_sched_needed |= 6139 IBD_RSRC_RC_TX_LARGEBUF; 6140 mutex_exit(&state->id_sched_lock); 6141 dofree = B_FALSE; 6142 rc = B_FALSE; 6143 /* 6144 * If we don't have Tx large bufs, 6145 * return failure. node->w_buftype 6146 * should not be IBD_WQE_RC_COPYBUF, 6147 * otherwise it will cause problem 6148 * in ibd_rc_tx_cleanup() 6149 */ 6150 node->w_buftype = IBD_WQE_TXBUF; 6151 goto ibd_send_fail; 6152 } 6153 6154 lbufp = state->rc_tx_largebuf_free_head; 6155 ASSERT(lbufp->lb_buf != NULL); 6156 state->rc_tx_largebuf_free_head = 6157 lbufp->lb_next; 6158 lbufp->lb_next = NULL; 6159 /* Update nfree count */ 6160 state->rc_tx_largebuf_nfree --; 6161 mutex_exit(&state->rc_tx_large_bufs_lock); 6162 bufp = lbufp->lb_buf; 6163 node->w_sgl[0].ds_va = 6164 (ib_vaddr_t)(uintptr_t)bufp; 6165 node->w_sgl[0].ds_key = 6166 state->rc_tx_mr_desc.md_lkey; 6167 node->w_sgl[0].ds_len = pktsize; 6168 node->w_swr.wr_sgl = node->w_sgl; 6169 node->w_swr.wr_nds = 1; 6170 node->w_buftype = IBD_WQE_RC_COPYBUF; 6171 node->w_rc_tx_largebuf = lbufp; 6172 6173 for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) { 6174 blksize = MBLKL(nmp); 6175 if (blksize != 0) { 6176 bcopy(nmp->b_rptr, bufp, 6177 blksize); 6178 bufp += blksize; 6179 } 6180 } 6181 freemsg(mp); 6182 ASSERT(node->swqe_im_mblk == NULL); 6183 } 6184 } 6185 6186 node->swqe_next = NULL; 6187 mutex_enter(&rc_chan->tx_post_lock); 6188 if (rc_chan->tx_busy) { 6189 if (rc_chan->tx_head) { 6190 rc_chan->tx_tail->swqe_next = 6191 SWQE_TO_WQE(node); 6192 } else { 6193 rc_chan->tx_head = node; 6194 } 6195 rc_chan->tx_tail = node; 6196 mutex_exit(&rc_chan->tx_post_lock); 6197 } else { 6198 rc_chan->tx_busy = 1; 6199 mutex_exit(&rc_chan->tx_post_lock); 6200 ibd_rc_post_send(rc_chan, node); 6201 } 6202 6203 return (B_TRUE); 6204 } /* send by RC */ 6205 6206 if ((state->id_enable_rc) && (pktsize > state->id_mtu)) { 6207 /* 6208 * Too long pktsize. The packet size from GLD should <= 6209 * state->id_mtu + sizeof (ib_addrs_t) 6210 */ 6211 if (ace->ac_mac.ipoib_qpn != htonl(IB_MC_QPN)) { 6212 ibd_req_t *req; 6213 6214 mutex_enter(&ace->tx_too_big_mutex); 6215 if (ace->tx_too_big_ongoing) { 6216 mutex_exit(&ace->tx_too_big_mutex); 6217 state->rc_xmt_reenter_too_long_pkt++; 6218 dofree = B_TRUE; 6219 } else { 6220 ace->tx_too_big_ongoing = B_TRUE; 6221 mutex_exit(&ace->tx_too_big_mutex); 6222 state->rc_xmt_icmp_too_long_pkt++; 6223 6224 req = kmem_cache_alloc(state->id_req_kmc, 6225 KM_NOSLEEP); 6226 if (req == NULL) { 6227 ibd_print_warn(state, "ibd_send: alloc " 6228 "ibd_req_t fail"); 6229 /* Drop it. */ 6230 dofree = B_TRUE; 6231 } else { 6232 req->rq_ptr = mp; 6233 req->rq_ptr2 = ace; 6234 ibd_queue_work_slot(state, req, 6235 IBD_ASYNC_RC_TOO_BIG); 6236 dofree = B_FALSE; 6237 } 6238 } 6239 } else { 6240 ibd_print_warn(state, "Reliable Connected mode is on. " 6241 "Multicast packet length %d > %d is too long to " 6242 "send packet (%d > %d), drop it", 6243 pktsize, state->id_mtu); 6244 state->rc_xmt_drop_too_long_pkt++; 6245 /* Drop it. */ 6246 dofree = B_TRUE; 6247 } 6248 rc = B_TRUE; 6249 goto ibd_send_fail; 6250 } 6251 6252 atomic_add_64(&state->id_xmt_bytes, pktsize); 6253 atomic_inc_64(&state->id_xmt_pkt); 6254 6255 /* 6256 * Do LSO and checksum related work here. For LSO send, adjust the 6257 * ud destination, the opcode and the LSO header information to the 6258 * work request. 6259 */ 6260 mac_lso_get(mp, &mss, &lsoflags); 6261 if ((lsoflags & HW_LSO) != HW_LSO) { 6262 node->w_swr.wr_opcode = IBT_WRC_SEND; 6263 lsohdr_sz = 0; 6264 } else { 6265 if (ibd_setup_lso(node, mp, mss, ace->ac_dest) != 0) { 6266 /* 6267 * The routine can only fail if there's no memory; we 6268 * can only drop the packet if this happens 6269 */ 6270 ibd_print_warn(state, 6271 "ibd_send: no memory, lso posting failed"); 6272 dofree = B_TRUE; 6273 rc = B_TRUE; 6274 goto ibd_send_fail; 6275 } 6276 6277 node->w_swr.wr_opcode = IBT_WRC_SEND_LSO; 6278 lsohdr_sz = (node->w_swr.wr.ud_lso).lso_hdr_sz; 6279 } 6280 6281 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &hckflags); 6282 if ((hckflags & HCK_FULLCKSUM) == HCK_FULLCKSUM) 6283 node->w_swr.wr_flags |= IBT_WR_SEND_CKSUM; 6284 else 6285 node->w_swr.wr_flags &= ~IBT_WR_SEND_CKSUM; 6286 6287 /* 6288 * Prepare the sgl for posting; the routine can only fail if there's 6289 * no lso buf available for posting. If this is the case, we should 6290 * probably resched for lso bufs to become available and then try again. 6291 */ 6292 if (ibd_prepare_sgl(state, mp, node, lsohdr_sz) != 0) { 6293 if (ibd_sched_poll(state, IBD_RSRC_LSOBUF, 1) != 0) { 6294 dofree = B_TRUE; 6295 rc = B_TRUE; 6296 } else { 6297 dofree = B_FALSE; 6298 rc = B_FALSE; 6299 } 6300 goto ibd_send_fail; 6301 } 6302 node->swqe_im_mblk = mp; 6303 6304 /* 6305 * Queue the wqe to hardware; since we can now simply queue a 6306 * post instead of doing it serially, we cannot assume anything 6307 * about the 'node' after ibd_post_send() returns. 6308 */ 6309 node->swqe_next = NULL; 6310 6311 mutex_enter(&state->id_txpost_lock); 6312 if (state->id_tx_busy) { 6313 if (state->id_tx_head) { 6314 state->id_tx_tail->swqe_next = 6315 SWQE_TO_WQE(node); 6316 } else { 6317 state->id_tx_head = node; 6318 } 6319 state->id_tx_tail = node; 6320 mutex_exit(&state->id_txpost_lock); 6321 } else { 6322 state->id_tx_busy = 1; 6323 mutex_exit(&state->id_txpost_lock); 6324 ibd_post_send(state, node); 6325 } 6326 6327 return (B_TRUE); 6328 6329 ibd_send_fail: 6330 if (node && mp) 6331 ibd_free_lsohdr(node, mp); 6332 6333 if (dofree) 6334 freemsg(mp); 6335 6336 if (node != NULL) { 6337 if (rc_chan) { 6338 ibd_rc_tx_cleanup(node); 6339 } else { 6340 ibd_tx_cleanup(state, node); 6341 } 6342 } 6343 6344 return (rc); 6345 } 6346 6347 /* 6348 * GLDv3 entry point for transmitting datagram. 6349 */ 6350 static mblk_t * 6351 ibd_m_tx(void *arg, mblk_t *mp) 6352 { 6353 ibd_state_t *state = (ibd_state_t *)arg; 6354 mblk_t *next; 6355 6356 if (state->id_link_state != LINK_STATE_UP) { 6357 freemsgchain(mp); 6358 mp = NULL; 6359 } 6360 6361 while (mp != NULL) { 6362 next = mp->b_next; 6363 mp->b_next = NULL; 6364 if (ibd_send(state, mp) == B_FALSE) { 6365 /* Send fail */ 6366 mp->b_next = next; 6367 break; 6368 } 6369 mp = next; 6370 } 6371 6372 return (mp); 6373 } 6374 6375 /* 6376 * this handles Tx and Rx completions. With separate CQs, this handles 6377 * only Rx completions. 6378 */ 6379 static uint_t 6380 ibd_intr(caddr_t arg) 6381 { 6382 ibd_state_t *state = (ibd_state_t *)arg; 6383 6384 ibd_poll_rcq(state, state->id_rcq_hdl); 6385 6386 return (DDI_INTR_CLAIMED); 6387 } 6388 6389 /* 6390 * Poll and fully drain the send cq 6391 */ 6392 static void 6393 ibd_drain_scq(ibd_state_t *state, ibt_cq_hdl_t cq_hdl) 6394 { 6395 ibt_wc_t *wcs = state->id_txwcs; 6396 uint_t numwcs = state->id_txwcs_size; 6397 ibd_wqe_t *wqe; 6398 ibd_swqe_t *head, *tail; 6399 ibt_wc_t *wc; 6400 uint_t num_polled; 6401 int i; 6402 6403 while (ibt_poll_cq(cq_hdl, wcs, numwcs, &num_polled) == IBT_SUCCESS) { 6404 head = tail = NULL; 6405 for (i = 0, wc = wcs; i < num_polled; i++, wc++) { 6406 wqe = (ibd_wqe_t *)(uintptr_t)wc->wc_id; 6407 if (wc->wc_status != IBT_WC_SUCCESS) { 6408 /* 6409 * Channel being torn down. 6410 */ 6411 if (wc->wc_status == IBT_WC_WR_FLUSHED_ERR) { 6412 DPRINT(5, "ibd_drain_scq: flush error"); 6413 DPRINT(10, "ibd_drain_scq: Bad " 6414 "status %d", wc->wc_status); 6415 } else { 6416 DPRINT(10, "ibd_drain_scq: " 6417 "unexpected wc_status %d", 6418 wc->wc_status); 6419 } 6420 /* 6421 * Fallthrough to invoke the Tx handler to 6422 * release held resources, e.g., AH refcount. 6423 */ 6424 } 6425 /* 6426 * Add this swqe to the list to be cleaned up. 6427 */ 6428 if (head) 6429 tail->swqe_next = wqe; 6430 else 6431 head = WQE_TO_SWQE(wqe); 6432 tail = WQE_TO_SWQE(wqe); 6433 } 6434 tail->swqe_next = NULL; 6435 ibd_tx_cleanup_list(state, head, tail); 6436 6437 /* 6438 * Resume any blocked transmissions if possible 6439 */ 6440 ibd_resume_transmission(state); 6441 } 6442 } 6443 6444 /* 6445 * Poll and fully drain the receive cq 6446 */ 6447 static void 6448 ibd_drain_rcq(ibd_state_t *state, ibt_cq_hdl_t cq_hdl) 6449 { 6450 ibt_wc_t *wcs = state->id_rxwcs; 6451 uint_t numwcs = state->id_rxwcs_size; 6452 ibd_rwqe_t *rwqe; 6453 ibt_wc_t *wc; 6454 uint_t num_polled; 6455 int i; 6456 mblk_t *head, *tail, *mp; 6457 6458 while (ibt_poll_cq(cq_hdl, wcs, numwcs, &num_polled) == IBT_SUCCESS) { 6459 head = tail = NULL; 6460 for (i = 0, wc = wcs; i < num_polled; i++, wc++) { 6461 rwqe = (ibd_rwqe_t *)(uintptr_t)wc->wc_id; 6462 if (wc->wc_status != IBT_WC_SUCCESS) { 6463 /* 6464 * Channel being torn down. 6465 */ 6466 if (wc->wc_status == IBT_WC_WR_FLUSHED_ERR) { 6467 DPRINT(5, "ibd_drain_rcq: " 6468 "expected flushed rwqe"); 6469 } else { 6470 DPRINT(5, "ibd_drain_rcq: " 6471 "unexpected wc_status %d", 6472 wc->wc_status); 6473 } 6474 atomic_inc_32( 6475 &state->id_rx_list.dl_bufs_outstanding); 6476 freemsg(rwqe->rwqe_im_mblk); 6477 continue; 6478 } 6479 mp = ibd_process_rx(state, rwqe, wc); 6480 if (mp == NULL) 6481 continue; 6482 6483 /* 6484 * Add this mp to the list to send to the nw layer. 6485 */ 6486 if (head) 6487 tail->b_next = mp; 6488 else 6489 head = mp; 6490 tail = mp; 6491 } 6492 if (head) 6493 mac_rx(state->id_mh, state->id_rh, head); 6494 6495 /* 6496 * Account for #rwqes polled. 6497 * Post more here, if less than one fourth full. 6498 */ 6499 if (atomic_add_32_nv(&state->id_rx_list.dl_cnt, -num_polled) < 6500 (state->id_num_rwqe / 4)) 6501 ibd_post_recv_intr(state); 6502 } 6503 } 6504 6505 /* 6506 * Common code for interrupt handling as well as for polling 6507 * for all completed wqe's while detaching. 6508 */ 6509 static void 6510 ibd_poll_scq(ibd_state_t *state, ibt_cq_hdl_t cq_hdl) 6511 { 6512 int flag, redo_flag; 6513 int redo = 1; 6514 6515 flag = IBD_CQ_POLLING; 6516 redo_flag = IBD_REDO_CQ_POLLING; 6517 6518 mutex_enter(&state->id_scq_poll_lock); 6519 if (state->id_scq_poll_busy & flag) { 6520 ibd_print_warn(state, "ibd_poll_scq: multiple polling threads"); 6521 state->id_scq_poll_busy |= redo_flag; 6522 mutex_exit(&state->id_scq_poll_lock); 6523 return; 6524 } 6525 state->id_scq_poll_busy |= flag; 6526 mutex_exit(&state->id_scq_poll_lock); 6527 6528 /* 6529 * In some cases (eg detaching), this code can be invoked on 6530 * any cpu after disabling cq notification (thus no concurrency 6531 * exists). Apart from that, the following applies normally: 6532 * Transmit completion handling could be from any cpu if 6533 * Tx CQ is poll driven, but always on Tx interrupt cpu if Tx CQ 6534 * is interrupt driven. 6535 */ 6536 6537 /* 6538 * Poll and drain the CQ 6539 */ 6540 ibd_drain_scq(state, cq_hdl); 6541 6542 /* 6543 * Enable CQ notifications and redrain the cq to catch any 6544 * completions we might have missed after the ibd_drain_scq() 6545 * above and before the ibt_enable_cq_notify() that follows. 6546 * Finally, service any new requests to poll the cq that 6547 * could've come in after the ibt_enable_cq_notify(). 6548 */ 6549 do { 6550 if (ibt_enable_cq_notify(cq_hdl, IBT_NEXT_COMPLETION) != 6551 IBT_SUCCESS) { 6552 DPRINT(10, "ibd_intr: ibt_enable_cq_notify() failed"); 6553 } 6554 6555 ibd_drain_scq(state, cq_hdl); 6556 6557 mutex_enter(&state->id_scq_poll_lock); 6558 if (state->id_scq_poll_busy & redo_flag) 6559 state->id_scq_poll_busy &= ~redo_flag; 6560 else { 6561 state->id_scq_poll_busy &= ~flag; 6562 redo = 0; 6563 } 6564 mutex_exit(&state->id_scq_poll_lock); 6565 6566 } while (redo); 6567 } 6568 6569 /* 6570 * Common code for interrupt handling as well as for polling 6571 * for all completed wqe's while detaching. 6572 */ 6573 static void 6574 ibd_poll_rcq(ibd_state_t *state, ibt_cq_hdl_t rcq) 6575 { 6576 int flag, redo_flag; 6577 int redo = 1; 6578 6579 flag = IBD_CQ_POLLING; 6580 redo_flag = IBD_REDO_CQ_POLLING; 6581 6582 mutex_enter(&state->id_rcq_poll_lock); 6583 if (state->id_rcq_poll_busy & flag) { 6584 ibd_print_warn(state, "ibd_poll_rcq: multiple polling threads"); 6585 state->id_rcq_poll_busy |= redo_flag; 6586 mutex_exit(&state->id_rcq_poll_lock); 6587 return; 6588 } 6589 state->id_rcq_poll_busy |= flag; 6590 mutex_exit(&state->id_rcq_poll_lock); 6591 6592 /* 6593 * Poll and drain the CQ 6594 */ 6595 ibd_drain_rcq(state, rcq); 6596 6597 /* 6598 * Enable CQ notifications and redrain the cq to catch any 6599 * completions we might have missed after the ibd_drain_cq() 6600 * above and before the ibt_enable_cq_notify() that follows. 6601 * Finally, service any new requests to poll the cq that 6602 * could've come in after the ibt_enable_cq_notify(). 6603 */ 6604 do { 6605 if (ibt_enable_cq_notify(rcq, IBT_NEXT_COMPLETION) != 6606 IBT_SUCCESS) { 6607 DPRINT(10, "ibd_intr: ibt_enable_cq_notify() failed"); 6608 } 6609 6610 ibd_drain_rcq(state, rcq); 6611 6612 mutex_enter(&state->id_rcq_poll_lock); 6613 if (state->id_rcq_poll_busy & redo_flag) 6614 state->id_rcq_poll_busy &= ~redo_flag; 6615 else { 6616 state->id_rcq_poll_busy &= ~flag; 6617 redo = 0; 6618 } 6619 mutex_exit(&state->id_rcq_poll_lock); 6620 6621 } while (redo); 6622 } 6623 6624 /* 6625 * Unmap the memory area associated with a given swqe. 6626 */ 6627 void 6628 ibd_unmap_mem(ibd_state_t *state, ibd_swqe_t *swqe) 6629 { 6630 ibt_status_t stat; 6631 6632 DPRINT(20, "ibd_unmap_mem: wqe=%p, seg=%d\n", swqe, swqe->w_swr.wr_nds); 6633 6634 if (swqe->w_mi_hdl) { 6635 if ((stat = ibt_unmap_mem_iov(state->id_hca_hdl, 6636 swqe->w_mi_hdl)) != IBT_SUCCESS) { 6637 DPRINT(10, 6638 "failed in ibt_unmap_mem_iov, ret=%d\n", stat); 6639 } 6640 swqe->w_mi_hdl = NULL; 6641 } 6642 swqe->w_swr.wr_nds = 0; 6643 } 6644 6645 void 6646 ibd_dec_ref_ace(ibd_state_t *state, ibd_ace_t *ace) 6647 { 6648 /* 6649 * The recycling logic can be eliminated from here 6650 * and put into the async thread if we create another 6651 * list to hold ACE's for unjoined mcg's. 6652 */ 6653 if (DEC_REF_DO_CYCLE(ace)) { 6654 ibd_mce_t *mce; 6655 6656 /* 6657 * Check with the lock taken: we decremented 6658 * reference count without the lock, and some 6659 * transmitter might already have bumped the 6660 * reference count (possible in case of multicast 6661 * disable when we leave the AH on the active 6662 * list). If not still 0, get out, leaving the 6663 * recycle bit intact. 6664 * 6665 * Atomically transition the AH from active 6666 * to free list, and queue a work request to 6667 * leave the group and destroy the mce. No 6668 * transmitter can be looking at the AH or 6669 * the MCE in between, since we have the 6670 * ac_mutex lock. In the SendOnly reap case, 6671 * it is not necessary to hold the ac_mutex 6672 * and recheck the ref count (since the AH was 6673 * taken off the active list), we just do it 6674 * to have uniform processing with the Full 6675 * reap case. 6676 */ 6677 mutex_enter(&state->id_ac_mutex); 6678 mce = ace->ac_mce; 6679 if (GET_REF_CYCLE(ace) == 0) { 6680 CLEAR_REFCYCLE(ace); 6681 /* 6682 * Identify the case of fullmember reap as 6683 * opposed to mcg trap reap. Also, port up 6684 * might set ac_mce to NULL to indicate Tx 6685 * cleanup should do no more than put the 6686 * AH in the free list (see ibd_async_link). 6687 */ 6688 if (mce != NULL) { 6689 ace->ac_mce = NULL; 6690 IBD_ACACHE_PULLOUT_ACTIVE(state, ace); 6691 /* 6692 * mc_req was initialized at mce 6693 * creation time. 6694 */ 6695 ibd_queue_work_slot(state, 6696 &mce->mc_req, IBD_ASYNC_REAP); 6697 } 6698 IBD_ACACHE_INSERT_FREE(state, ace); 6699 } 6700 mutex_exit(&state->id_ac_mutex); 6701 } 6702 } 6703 6704 /* 6705 * Common code that deals with clean ups after a successful or 6706 * erroneous transmission attempt. 6707 */ 6708 static void 6709 ibd_tx_cleanup(ibd_state_t *state, ibd_swqe_t *swqe) 6710 { 6711 ibd_ace_t *ace = swqe->w_ahandle; 6712 6713 DPRINT(20, "ibd_tx_cleanup %p\n", swqe); 6714 6715 /* 6716 * If this was a dynamic mapping in ibd_send(), we need to 6717 * unmap here. If this was an lso buffer we'd used for sending, 6718 * we need to release the lso buf to the pool, since the resource 6719 * is scarce. However, if this was simply a normal send using 6720 * the copybuf (present in each swqe), we don't need to release it. 6721 */ 6722 if (swqe->swqe_im_mblk != NULL) { 6723 if (swqe->w_buftype == IBD_WQE_MAPPED) { 6724 ibd_unmap_mem(state, swqe); 6725 } else if (swqe->w_buftype == IBD_WQE_LSOBUF) { 6726 ibd_release_lsobufs(state, 6727 swqe->w_swr.wr_sgl, swqe->w_swr.wr_nds); 6728 } 6729 ibd_free_lsohdr(swqe, swqe->swqe_im_mblk); 6730 freemsg(swqe->swqe_im_mblk); 6731 swqe->swqe_im_mblk = NULL; 6732 } 6733 6734 /* 6735 * Drop the reference count on the AH; it can be reused 6736 * now for a different destination if there are no more 6737 * posted sends that will use it. This can be eliminated 6738 * if we can always associate each Tx buffer with an AH. 6739 * The ace can be null if we are cleaning up from the 6740 * ibd_send() error path. 6741 */ 6742 if (ace != NULL) { 6743 ibd_dec_ref_ace(state, ace); 6744 } 6745 6746 /* 6747 * Release the send wqe for reuse. 6748 */ 6749 swqe->swqe_next = NULL; 6750 ibd_release_swqe(state, swqe, swqe, 1); 6751 } 6752 6753 static void 6754 ibd_tx_cleanup_list(ibd_state_t *state, ibd_swqe_t *head, ibd_swqe_t *tail) 6755 { 6756 ibd_ace_t *ace; 6757 ibd_swqe_t *swqe; 6758 int n = 0; 6759 6760 DPRINT(20, "ibd_tx_cleanup_list %p %p\n", head, tail); 6761 6762 for (swqe = head; swqe != NULL; swqe = WQE_TO_SWQE(swqe->swqe_next)) { 6763 6764 /* 6765 * If this was a dynamic mapping in ibd_send(), we need to 6766 * unmap here. If this was an lso buffer we'd used for sending, 6767 * we need to release the lso buf to the pool, since the 6768 * resource is scarce. However, if this was simply a normal 6769 * send using the copybuf (present in each swqe), we don't need 6770 * to release it. 6771 */ 6772 if (swqe->swqe_im_mblk != NULL) { 6773 if (swqe->w_buftype == IBD_WQE_MAPPED) { 6774 ibd_unmap_mem(state, swqe); 6775 } else if (swqe->w_buftype == IBD_WQE_LSOBUF) { 6776 ibd_release_lsobufs(state, 6777 swqe->w_swr.wr_sgl, swqe->w_swr.wr_nds); 6778 } 6779 ibd_free_lsohdr(swqe, swqe->swqe_im_mblk); 6780 freemsg(swqe->swqe_im_mblk); 6781 swqe->swqe_im_mblk = NULL; 6782 } 6783 6784 /* 6785 * Drop the reference count on the AH; it can be reused 6786 * now for a different destination if there are no more 6787 * posted sends that will use it. This can be eliminated 6788 * if we can always associate each Tx buffer with an AH. 6789 * The ace can be null if we are cleaning up from the 6790 * ibd_send() error path. 6791 */ 6792 ace = swqe->w_ahandle; 6793 if (ace != NULL) { 6794 ibd_dec_ref_ace(state, ace); 6795 } 6796 n++; 6797 } 6798 6799 /* 6800 * Release the send wqes for reuse. 6801 */ 6802 ibd_release_swqe(state, head, tail, n); 6803 } 6804 6805 /* 6806 * Processing to be done after receipt of a packet; hand off to GLD 6807 * in the format expected by GLD. The received packet has this 6808 * format: 2b sap :: 00 :: data. 6809 */ 6810 static mblk_t * 6811 ibd_process_rx(ibd_state_t *state, ibd_rwqe_t *rwqe, ibt_wc_t *wc) 6812 { 6813 ib_header_info_t *phdr; 6814 mblk_t *mp; 6815 ipoib_hdr_t *ipibp; 6816 ipha_t *iphap; 6817 ip6_t *ip6h; 6818 int len; 6819 ib_msglen_t pkt_len = wc->wc_bytes_xfer; 6820 uint32_t bufs; 6821 6822 /* 6823 * Track number handed to upper layer that need to be returned. 6824 */ 6825 bufs = atomic_inc_32_nv(&state->id_rx_list.dl_bufs_outstanding); 6826 6827 /* Never run out of rwqes, use allocb when running low */ 6828 if (bufs >= state->id_rx_bufs_outstanding_limit) { 6829 atomic_dec_32(&state->id_rx_list.dl_bufs_outstanding); 6830 atomic_inc_32(&state->id_rx_allocb); 6831 mp = allocb(pkt_len, BPRI_HI); 6832 if (mp) { 6833 bcopy(rwqe->rwqe_im_mblk->b_rptr, mp->b_rptr, pkt_len); 6834 ibd_post_recv(state, rwqe); 6835 } else { /* no memory */ 6836 atomic_inc_32(&state->id_rx_allocb_failed); 6837 ibd_post_recv(state, rwqe); 6838 return (NULL); 6839 } 6840 } else { 6841 mp = rwqe->rwqe_im_mblk; 6842 } 6843 6844 6845 /* 6846 * Adjust write pointer depending on how much data came in. 6847 */ 6848 mp->b_wptr = mp->b_rptr + pkt_len; 6849 6850 /* 6851 * Make sure this is NULL or we're in trouble. 6852 */ 6853 if (mp->b_next != NULL) { 6854 ibd_print_warn(state, 6855 "ibd_process_rx: got duplicate mp from rcq?"); 6856 mp->b_next = NULL; 6857 } 6858 6859 /* 6860 * the IB link will deliver one of the IB link layer 6861 * headers called, the Global Routing Header (GRH). 6862 * ibd driver uses the information in GRH to build the 6863 * Header_info structure and pass it with the datagram up 6864 * to GLDv3. 6865 * If the GRH is not valid, indicate to GLDv3 by setting 6866 * the VerTcFlow field to 0. 6867 */ 6868 phdr = (ib_header_info_t *)mp->b_rptr; 6869 if (wc->wc_flags & IBT_WC_GRH_PRESENT) { 6870 phdr->ib_grh.ipoib_sqpn = htonl(wc->wc_qpn); 6871 6872 /* if it is loop back packet, just drop it. */ 6873 if (state->id_enable_rc) { 6874 if (bcmp(&phdr->ib_grh.ipoib_sqpn, 6875 &state->rc_macaddr_loopback, 6876 IPOIB_ADDRL) == 0) { 6877 freemsg(mp); 6878 return (NULL); 6879 } 6880 } else { 6881 if (bcmp(&phdr->ib_grh.ipoib_sqpn, &state->id_macaddr, 6882 IPOIB_ADDRL) == 0) { 6883 freemsg(mp); 6884 return (NULL); 6885 } 6886 } 6887 6888 ovbcopy(&phdr->ib_grh.ipoib_sqpn, &phdr->ib_src, 6889 sizeof (ipoib_mac_t)); 6890 if (*(uint8_t *)(phdr->ib_grh.ipoib_dgid_pref) == 0xFF) { 6891 phdr->ib_dst.ipoib_qpn = htonl(IB_MC_QPN); 6892 IBD_CLEAR_SCOPE_PKEY(&phdr->ib_dst); 6893 } else { 6894 phdr->ib_dst.ipoib_qpn = state->id_macaddr.ipoib_qpn; 6895 } 6896 } else { 6897 /* 6898 * It can not be a IBA multicast packet. Must have been 6899 * unicast for us. Just copy the interface address to dst. 6900 */ 6901 phdr->ib_grh.ipoib_vertcflow = 0; 6902 ovbcopy(&state->id_macaddr, &phdr->ib_dst, 6903 sizeof (ipoib_mac_t)); 6904 } 6905 6906 /* 6907 * For ND6 packets, padding is at the front of the source/target 6908 * lladdr. However the inet6 layer is not aware of it, hence remove 6909 * the padding from such packets. 6910 */ 6911 ipibp = (ipoib_hdr_t *)((uchar_t *)mp->b_rptr + sizeof (ipoib_pgrh_t)); 6912 if (ntohs(ipibp->ipoib_type) == ETHERTYPE_IPV6) { 6913 ip6h = (ip6_t *)((uchar_t *)ipibp + sizeof (ipoib_hdr_t)); 6914 len = ntohs(ip6h->ip6_plen); 6915 if (ip6h->ip6_nxt == IPPROTO_ICMPV6) { 6916 /* LINTED: E_CONSTANT_CONDITION */ 6917 IBD_PAD_NSNA(ip6h, len, IBD_RECV); 6918 } 6919 } 6920 6921 /* 6922 * Update statistics 6923 */ 6924 atomic_add_64(&state->id_rcv_bytes, pkt_len); 6925 atomic_inc_64(&state->id_rcv_pkt); 6926 if (bcmp(&phdr->ib_dst, &state->id_bcaddr, IPOIB_ADDRL) == 0) 6927 atomic_inc_64(&state->id_brd_rcv); 6928 else if ((ntohl(phdr->ib_dst.ipoib_qpn) & IB_QPN_MASK) == IB_MC_QPN) 6929 atomic_inc_64(&state->id_multi_rcv); 6930 6931 iphap = (ipha_t *)((uchar_t *)ipibp + sizeof (ipoib_hdr_t)); 6932 /* 6933 * Set receive checksum status in mp 6934 * Hardware checksumming can be considered valid only if: 6935 * 1. CQE.IP_OK bit is set 6936 * 2. CQE.CKSUM = 0xffff 6937 * 3. IPv6 routing header is not present in the packet 6938 * 4. If there are no IP_OPTIONS in the IP HEADER 6939 */ 6940 6941 if (((wc->wc_flags & IBT_WC_CKSUM_OK) == IBT_WC_CKSUM_OK) && 6942 (wc->wc_cksum == 0xFFFF) && 6943 (iphap->ipha_version_and_hdr_length == IP_SIMPLE_HDR_VERSION)) { 6944 mac_hcksum_set(mp, 0, 0, 0, 0, HCK_FULLCKSUM_OK); 6945 } 6946 6947 return (mp); 6948 } 6949 6950 /* 6951 * Callback code invoked from STREAMs when the receive data buffer is 6952 * free for recycling. 6953 */ 6954 static void 6955 ibd_freemsg_cb(char *arg) 6956 { 6957 ibd_rwqe_t *rwqe = (ibd_rwqe_t *)arg; 6958 ibd_state_t *state = rwqe->w_state; 6959 6960 atomic_dec_32(&state->id_rx_list.dl_bufs_outstanding); 6961 6962 /* 6963 * If the driver is stopped, just free the rwqe. 6964 */ 6965 if (atomic_add_32_nv(&state->id_running, 0) == 0) { 6966 DPRINT(6, "ibd_freemsg: wqe being freed"); 6967 rwqe->rwqe_im_mblk = NULL; 6968 ibd_free_rwqe(state, rwqe); 6969 return; 6970 } 6971 6972 rwqe->rwqe_im_mblk = desballoc(rwqe->rwqe_copybuf.ic_bufaddr, 6973 state->id_mtu + IPOIB_GRH_SIZE, 0, &rwqe->w_freemsg_cb); 6974 if (rwqe->rwqe_im_mblk == NULL) { 6975 ibd_free_rwqe(state, rwqe); 6976 DPRINT(6, "ibd_freemsg: desballoc failed"); 6977 return; 6978 } 6979 6980 ibd_post_recv(state, rwqe); 6981 } 6982 6983 static uint_t 6984 ibd_tx_recycle(caddr_t arg) 6985 { 6986 ibd_state_t *state = (ibd_state_t *)arg; 6987 6988 /* 6989 * Poll for completed entries 6990 */ 6991 ibd_poll_scq(state, state->id_scq_hdl); 6992 6993 return (DDI_INTR_CLAIMED); 6994 } 6995 6996 #ifdef IBD_LOGGING 6997 static void 6998 ibd_log_init(void) 6999 { 7000 ibd_lbuf = kmem_zalloc(IBD_LOG_SZ, KM_SLEEP); 7001 ibd_lbuf_ndx = 0; 7002 7003 mutex_init(&ibd_lbuf_lock, NULL, MUTEX_DRIVER, NULL); 7004 } 7005 7006 static void 7007 ibd_log_fini(void) 7008 { 7009 if (ibd_lbuf) 7010 kmem_free(ibd_lbuf, IBD_LOG_SZ); 7011 ibd_lbuf_ndx = 0; 7012 ibd_lbuf = NULL; 7013 7014 mutex_destroy(&ibd_lbuf_lock); 7015 } 7016 7017 static void 7018 ibd_log(const char *fmt, ...) 7019 { 7020 va_list ap; 7021 uint32_t off; 7022 uint32_t msglen; 7023 char tmpbuf[IBD_DMAX_LINE]; 7024 7025 if (ibd_lbuf == NULL) 7026 return; 7027 7028 va_start(ap, fmt); 7029 msglen = vsnprintf(tmpbuf, IBD_DMAX_LINE, fmt, ap); 7030 va_end(ap); 7031 7032 if (msglen >= IBD_DMAX_LINE) 7033 msglen = IBD_DMAX_LINE - 1; 7034 7035 mutex_enter(&ibd_lbuf_lock); 7036 7037 off = ibd_lbuf_ndx; /* current msg should go here */ 7038 if ((ibd_lbuf_ndx) && (ibd_lbuf[ibd_lbuf_ndx-1] != '\n')) 7039 ibd_lbuf[ibd_lbuf_ndx-1] = '\n'; 7040 7041 ibd_lbuf_ndx += msglen; /* place where next msg should start */ 7042 ibd_lbuf[ibd_lbuf_ndx] = 0; /* current msg should terminate */ 7043 7044 if (ibd_lbuf_ndx >= (IBD_LOG_SZ - 2 * IBD_DMAX_LINE)) 7045 ibd_lbuf_ndx = 0; 7046 7047 mutex_exit(&ibd_lbuf_lock); 7048 7049 bcopy(tmpbuf, ibd_lbuf+off, msglen); /* no lock needed for this */ 7050 } 7051 #endif 7052