1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/ib/ibtl/impl/ibtl.h> 29 #include <sys/ib/ibtl/impl/ibtl_cm.h> 30 #include <sys/taskq.h> 31 #include <sys/disp.h> 32 #include <sys/callb.h> 33 #include <sys/proc.h> 34 35 /* 36 * ibtl_handlers.c 37 */ 38 39 /* 40 * What's in this file? 41 * 42 * This file started as an implementation of Asynchronous Event/Error 43 * handling and Completion Queue handling. As the implementation 44 * evolved, code has been added for other ibc_* interfaces (resume, 45 * predetach, etc.) that use the same mechanisms as used for asyncs. 46 * 47 * Async and CQ handling at interrupt level. 48 * 49 * CQ handling is normally done at interrupt level using the CQ callback 50 * handler to call the appropriate IBT Client (owner of the CQ). For 51 * clients that would prefer a fully flexible non-interrupt context to 52 * do their CQ handling, a CQ can be created so that its handler is 53 * called from a non-interrupt thread. CQ handling is done frequently 54 * whereas Async handling is expected to occur very infrequently. 55 * 56 * Async handling is done by marking (or'ing in of an async_code of) the 57 * pertinent IBTL data structure, and then notifying the async_thread(s) 58 * that the data structure has async work to be done. The notification 59 * occurs by linking the data structure through its async_link onto a 60 * list of like data structures and waking up an async_thread. This 61 * list append is not done if there is already async work pending on 62 * this data structure (IBTL_ASYNC_PENDING). 63 * 64 * Async Mutex and CQ Mutex 65 * 66 * The global ibtl_async_mutex is "the" mutex used to control access 67 * to all the data needed by ibc_async_handler. All the threads that 68 * use this mutex are written so that the mutex is held for very short 69 * periods of time, and never held while making calls to functions 70 * that may block. 71 * 72 * The global ibtl_cq_mutex is used similarly by ibc_cq_handler and 73 * the ibtl_cq_thread(s). 74 * 75 * Mutex hierarchy 76 * 77 * The ibtl_clnt_list_mutex is above the ibtl_async_mutex. 78 * ibtl_clnt_list_mutex protects all of the various lists. 79 * The ibtl_async_mutex is below this in the hierarchy. 80 * 81 * The ibtl_cq_mutex is independent of the above mutexes. 82 * 83 * Threads 84 * 85 * There are "ibtl_cq_threads" number of threads created for handling 86 * Completion Queues in threads. If this feature really gets used, 87 * then we will want to do some suitable tuning. Similarly, we may 88 * want to tune the number of "ibtl_async_thread_init". 89 * 90 * The function ibtl_cq_thread is the main loop for handling a CQ in a 91 * thread. There can be multiple threads executing this same code. 92 * The code sleeps when there is no work to be done (list is empty), 93 * otherwise it pulls the first CQ structure off the list and performs 94 * the CQ handler callback to the client. After that returns, a check 95 * is made, and if another ibc_cq_handler call was made for this CQ, 96 * the client is called again. 97 * 98 * The function ibtl_async_thread is the main loop for handling async 99 * events/errors. There can be multiple threads executing this same code. 100 * The code sleeps when there is no work to be done (lists are empty), 101 * otherwise it pulls the first structure off one of the lists and 102 * performs the async callback(s) to the client(s). Note that HCA 103 * async handling is done by calling each of the clients using the HCA. 104 * When the async handling completes, the data structure having the async 105 * event/error is checked for more work before it's considered "done". 106 * 107 * Taskq 108 * 109 * The async_taskq is used here for allowing async handler callbacks to 110 * occur simultaneously to multiple clients of an HCA. This taskq could 111 * be used for other purposes, e.g., if all the async_threads are in 112 * use, but this is deemed as overkill since asyncs should occur rarely. 113 */ 114 115 /* Globals */ 116 static char ibtf_handlers[] = "ibtl_handlers"; 117 118 /* priority for IBTL threads (async, cq, and taskq) */ 119 static pri_t ibtl_pri = MAXCLSYSPRI - 1; /* maybe override in /etc/system */ 120 121 /* taskq used for HCA asyncs */ 122 #define ibtl_async_taskq system_taskq 123 124 /* data for async handling by threads */ 125 static kmutex_t ibtl_async_mutex; /* protects most *_async_* data */ 126 static kcondvar_t ibtl_async_cv; /* async_threads wait on this */ 127 static kcondvar_t ibtl_clnt_cv; /* ibt_detach might wait on this */ 128 static void ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp); 129 static void ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp); 130 131 static kt_did_t *ibtl_async_did; /* for thread_join() */ 132 static int ibtl_async_thread_init = 4; /* total # of async_threads to create */ 133 static int ibtl_async_thread_exit = 0; /* set if/when thread(s) should exit */ 134 135 /* async lists for various structures */ 136 static ibtl_hca_devinfo_t *ibtl_async_hca_list_start, *ibtl_async_hca_list_end; 137 static ibtl_eec_t *ibtl_async_eec_list_start, *ibtl_async_eec_list_end; 138 static ibtl_qp_t *ibtl_async_qp_list_start, *ibtl_async_qp_list_end; 139 static ibtl_cq_t *ibtl_async_cq_list_start, *ibtl_async_cq_list_end; 140 static ibtl_srq_t *ibtl_async_srq_list_start, *ibtl_async_srq_list_end; 141 142 /* data for CQ completion handling by threads */ 143 static kmutex_t ibtl_cq_mutex; /* protects the cv and the list below */ 144 static kcondvar_t ibtl_cq_cv; 145 static ibtl_cq_t *ibtl_cq_list_start, *ibtl_cq_list_end; 146 147 static int ibtl_cq_threads = 0; /* total # of cq threads */ 148 static int ibtl_cqs_using_threads = 0; /* total # of cqs using threads */ 149 static int ibtl_cq_thread_exit = 0; /* set if/when thread(s) should exit */ 150 151 /* value used to tell IBTL threads to exit */ 152 #define IBTL_THREAD_EXIT 0x1b7fdead /* IBTF DEAD */ 153 154 int ibtl_eec_not_supported = 1; 155 156 char *ibtl_last_client_name; /* may help debugging */ 157 158 _NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex)) 159 160 /* 161 * ibc_async_handler() 162 * 163 * Asynchronous Event/Error Handler. 164 * 165 * This is the function called HCA drivers to post various async 166 * event and errors mention in the IB architecture spec. See 167 * ibtl_types.h for additional details of this. 168 * 169 * This function marks the pertinent IBTF object with the async_code, 170 * and queues the object for handling by an ibtl_async_thread. If 171 * the object is NOT already marked for async processing, it is added 172 * to the associated list for that type of object, and an 173 * ibtl_async_thread is signaled to finish the async work. 174 */ 175 void 176 ibc_async_handler(ibc_clnt_hdl_t hca_devp, ibt_async_code_t code, 177 ibc_async_event_t *event_p) 178 { 179 ibtl_qp_t *ibtl_qp; 180 ibtl_cq_t *ibtl_cq; 181 ibtl_srq_t *ibtl_srq; 182 ibtl_eec_t *ibtl_eec; 183 uint8_t port_minus1; 184 185 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler(%p, 0x%x, %p)", 186 hca_devp, code, event_p); 187 188 mutex_enter(&ibtl_async_mutex); 189 190 switch (code) { 191 case IBT_EVENT_PATH_MIGRATED_QP: 192 case IBT_EVENT_SQD: 193 case IBT_ERROR_CATASTROPHIC_QP: 194 case IBT_ERROR_PATH_MIGRATE_REQ_QP: 195 case IBT_EVENT_COM_EST_QP: 196 case IBT_ERROR_INVALID_REQUEST_QP: 197 case IBT_ERROR_ACCESS_VIOLATION_QP: 198 case IBT_EVENT_EMPTY_QP: 199 ibtl_qp = event_p->ev_qp_hdl; 200 if (ibtl_qp == NULL) { 201 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 202 "bad qp handle"); 203 break; 204 } 205 switch (code) { 206 case IBT_ERROR_CATASTROPHIC_QP: 207 ibtl_qp->qp_cat_fma_ena = event_p->ev_fma_ena; break; 208 case IBT_ERROR_PATH_MIGRATE_REQ_QP: 209 ibtl_qp->qp_pth_fma_ena = event_p->ev_fma_ena; break; 210 case IBT_ERROR_INVALID_REQUEST_QP: 211 ibtl_qp->qp_inv_fma_ena = event_p->ev_fma_ena; break; 212 case IBT_ERROR_ACCESS_VIOLATION_QP: 213 ibtl_qp->qp_acc_fma_ena = event_p->ev_fma_ena; break; 214 } 215 216 ibtl_qp->qp_async_codes |= code; 217 if ((ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) == 0) { 218 ibtl_qp->qp_async_flags |= IBTL_ASYNC_PENDING; 219 ibtl_qp->qp_async_link = NULL; 220 if (ibtl_async_qp_list_end == NULL) 221 ibtl_async_qp_list_start = ibtl_qp; 222 else 223 ibtl_async_qp_list_end->qp_async_link = ibtl_qp; 224 ibtl_async_qp_list_end = ibtl_qp; 225 cv_signal(&ibtl_async_cv); 226 } 227 break; 228 229 case IBT_ERROR_CQ: 230 ibtl_cq = event_p->ev_cq_hdl; 231 if (ibtl_cq == NULL) { 232 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 233 "bad cq handle"); 234 break; 235 } 236 ibtl_cq->cq_async_codes |= code; 237 ibtl_cq->cq_fma_ena = event_p->ev_fma_ena; 238 if ((ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) == 0) { 239 ibtl_cq->cq_async_flags |= IBTL_ASYNC_PENDING; 240 ibtl_cq->cq_async_link = NULL; 241 if (ibtl_async_cq_list_end == NULL) 242 ibtl_async_cq_list_start = ibtl_cq; 243 else 244 ibtl_async_cq_list_end->cq_async_link = ibtl_cq; 245 ibtl_async_cq_list_end = ibtl_cq; 246 cv_signal(&ibtl_async_cv); 247 } 248 break; 249 250 case IBT_ERROR_CATASTROPHIC_SRQ: 251 case IBT_EVENT_LIMIT_REACHED_SRQ: 252 ibtl_srq = event_p->ev_srq_hdl; 253 if (ibtl_srq == NULL) { 254 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 255 "bad srq handle"); 256 break; 257 } 258 ibtl_srq->srq_async_codes |= code; 259 ibtl_srq->srq_fma_ena = event_p->ev_fma_ena; 260 if ((ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) == 0) { 261 ibtl_srq->srq_async_flags |= IBTL_ASYNC_PENDING; 262 ibtl_srq->srq_async_link = NULL; 263 if (ibtl_async_srq_list_end == NULL) 264 ibtl_async_srq_list_start = ibtl_srq; 265 else 266 ibtl_async_srq_list_end->srq_async_link = 267 ibtl_srq; 268 ibtl_async_srq_list_end = ibtl_srq; 269 cv_signal(&ibtl_async_cv); 270 } 271 break; 272 273 case IBT_EVENT_PATH_MIGRATED_EEC: 274 case IBT_ERROR_PATH_MIGRATE_REQ_EEC: 275 case IBT_ERROR_CATASTROPHIC_EEC: 276 case IBT_EVENT_COM_EST_EEC: 277 if (ibtl_eec_not_supported) { 278 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 279 "EEC events are disabled."); 280 break; 281 } 282 ibtl_eec = event_p->ev_eec_hdl; 283 if (ibtl_eec == NULL) { 284 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 285 "bad eec handle"); 286 break; 287 } 288 switch (code) { 289 case IBT_ERROR_PATH_MIGRATE_REQ_EEC: 290 ibtl_eec->eec_pth_fma_ena = event_p->ev_fma_ena; break; 291 case IBT_ERROR_CATASTROPHIC_EEC: 292 ibtl_eec->eec_cat_fma_ena = event_p->ev_fma_ena; break; 293 } 294 ibtl_eec->eec_async_codes |= code; 295 if ((ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) == 0) { 296 ibtl_eec->eec_async_flags |= IBTL_ASYNC_PENDING; 297 ibtl_eec->eec_async_link = NULL; 298 if (ibtl_async_eec_list_end == NULL) 299 ibtl_async_eec_list_start = ibtl_eec; 300 else 301 ibtl_async_eec_list_end->eec_async_link = 302 ibtl_eec; 303 ibtl_async_eec_list_end = ibtl_eec; 304 cv_signal(&ibtl_async_cv); 305 } 306 break; 307 308 case IBT_ERROR_LOCAL_CATASTROPHIC: 309 hca_devp->hd_async_codes |= code; 310 hca_devp->hd_fma_ena = event_p->ev_fma_ena; 311 /* FALLTHROUGH */ 312 313 case IBT_EVENT_PORT_UP: 314 case IBT_ERROR_PORT_DOWN: 315 if ((code == IBT_EVENT_PORT_UP) || 316 (code == IBT_ERROR_PORT_DOWN)) { 317 if ((port_minus1 = event_p->ev_port - 1) >= 318 hca_devp->hd_hca_attr->hca_nports) { 319 IBTF_DPRINTF_L2(ibtf_handlers, 320 "ibc_async_handler: bad port #: %d", 321 event_p->ev_port); 322 break; 323 } 324 hca_devp->hd_async_port[port_minus1] = 325 ((code == IBT_EVENT_PORT_UP) ? IBTL_HCA_PORT_UP : 326 IBTL_HCA_PORT_DOWN) | IBTL_HCA_PORT_CHANGED; 327 hca_devp->hd_async_codes |= code; 328 } 329 330 if ((hca_devp->hd_async_flags & IBTL_ASYNC_PENDING) == 0) { 331 hca_devp->hd_async_flags |= IBTL_ASYNC_PENDING; 332 hca_devp->hd_async_link = NULL; 333 if (ibtl_async_hca_list_end == NULL) 334 ibtl_async_hca_list_start = hca_devp; 335 else 336 ibtl_async_hca_list_end->hd_async_link = 337 hca_devp; 338 ibtl_async_hca_list_end = hca_devp; 339 cv_signal(&ibtl_async_cv); 340 } 341 342 break; 343 344 default: 345 IBTF_DPRINTF_L1(ibtf_handlers, "ibc_async_handler: " 346 "invalid code (0x%x)", code); 347 } 348 349 mutex_exit(&ibtl_async_mutex); 350 } 351 352 353 /* Finally, make the async call to the client. */ 354 355 static void 356 ibtl_async_client_call(ibtl_hca_t *ibt_hca, ibt_async_code_t code, 357 ibt_async_event_t *event_p) 358 { 359 ibtl_clnt_t *clntp; 360 void *client_private; 361 ibt_async_handler_t async_handler; 362 char *client_name; 363 364 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call(%p, 0x%x, %p)", 365 ibt_hca, code, event_p); 366 367 clntp = ibt_hca->ha_clnt_devp; 368 369 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_last_client_name)) 370 /* Record who is being called (just a debugging aid) */ 371 ibtl_last_client_name = client_name = clntp->clnt_name; 372 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_last_client_name)) 373 374 client_private = clntp->clnt_private; 375 async_handler = clntp->clnt_modinfop->mi_async_handler; 376 377 if (code & (IBT_EVENT_COM_EST_QP | IBT_EVENT_COM_EST_EEC)) { 378 mutex_enter(&ibtl_clnt_list_mutex); 379 async_handler = ibtl_cm_async_handler; 380 client_private = ibtl_cm_clnt_private; 381 mutex_exit(&ibtl_clnt_list_mutex); 382 ibt_hca = NULL; 383 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: " 384 "calling CM for COM_EST"); 385 } else { 386 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: " 387 "calling client '%s'", client_name); 388 } 389 if (async_handler != NULL) 390 async_handler(client_private, ibt_hca, code, event_p); 391 else 392 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: " 393 "client '%s' has no async handler", client_name); 394 } 395 396 /* 397 * Inform CM or DM about HCA events. 398 * 399 * We use taskqs to allow simultaneous notification, with sleeping. 400 * Since taskqs only allow one argument, we define a structure 401 * because we need to pass in more than one argument. 402 */ 403 404 struct ibtl_mgr_s { 405 ibtl_hca_devinfo_t *mgr_hca_devp; 406 ibt_async_handler_t mgr_async_handler; 407 void *mgr_clnt_private; 408 }; 409 410 /* 411 * Asyncs of HCA level events for CM and DM. Call CM or DM and tell them 412 * about the HCA for the event recorded in the ibtl_hca_devinfo_t. 413 */ 414 static void 415 ibtl_do_mgr_async_task(void *arg) 416 { 417 struct ibtl_mgr_s *mgrp = (struct ibtl_mgr_s *)arg; 418 ibtl_hca_devinfo_t *hca_devp = mgrp->mgr_hca_devp; 419 420 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_mgr_async_task(0x%x)", 421 hca_devp->hd_async_code); 422 423 mgrp->mgr_async_handler(mgrp->mgr_clnt_private, NULL, 424 hca_devp->hd_async_code, &hca_devp->hd_async_event); 425 kmem_free(mgrp, sizeof (*mgrp)); 426 427 mutex_enter(&ibtl_clnt_list_mutex); 428 if (--hca_devp->hd_async_task_cnt == 0) 429 cv_signal(&hca_devp->hd_async_task_cv); 430 mutex_exit(&ibtl_clnt_list_mutex); 431 } 432 433 static void 434 ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler, 435 void *clnt_private) 436 { 437 struct ibtl_mgr_s *mgrp; 438 439 if (async_handler == NULL) 440 return; 441 442 _NOTE(NO_COMPETING_THREADS_NOW) 443 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP); 444 mgrp->mgr_hca_devp = hca_devp; 445 mgrp->mgr_async_handler = async_handler; 446 mgrp->mgr_clnt_private = clnt_private; 447 hca_devp->hd_async_task_cnt++; 448 449 (void) taskq_dispatch(ibtl_async_taskq, ibtl_do_mgr_async_task, mgrp, 450 TQ_SLEEP); 451 #ifndef lint 452 _NOTE(COMPETING_THREADS_NOW) 453 #endif 454 } 455 456 /* 457 * Per client-device asyncs for HCA level events. Call each client that is 458 * using the HCA for the event recorded in the ibtl_hca_devinfo_t. 459 */ 460 static void 461 ibtl_hca_client_async_task(void *arg) 462 { 463 ibtl_hca_t *ibt_hca = (ibtl_hca_t *)arg; 464 ibtl_hca_devinfo_t *hca_devp = ibt_hca->ha_hca_devp; 465 ibtl_clnt_t *clntp = ibt_hca->ha_clnt_devp; 466 ibt_async_event_t async_event; 467 468 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_hca_client_async_task(%p, 0x%x)", 469 ibt_hca, hca_devp->hd_async_code); 470 471 bcopy(&hca_devp->hd_async_event, &async_event, sizeof (async_event)); 472 ibtl_async_client_call(ibt_hca, hca_devp->hd_async_code, &async_event); 473 474 mutex_enter(&ibtl_async_mutex); 475 if (--ibt_hca->ha_async_cnt == 0 && 476 (ibt_hca->ha_async_flags & IBTL_ASYNC_FREE_OBJECT)) { 477 mutex_exit(&ibtl_async_mutex); 478 kmem_free(ibt_hca, sizeof (ibtl_hca_t)); 479 } else 480 mutex_exit(&ibtl_async_mutex); 481 482 mutex_enter(&ibtl_clnt_list_mutex); 483 if (--hca_devp->hd_async_task_cnt == 0) 484 cv_signal(&hca_devp->hd_async_task_cv); 485 if (--clntp->clnt_async_cnt == 0) 486 cv_broadcast(&ibtl_clnt_cv); 487 488 mutex_exit(&ibtl_clnt_list_mutex); 489 } 490 491 /* 492 * Asyncs for HCA level events. 493 * 494 * The function continues to run until there are no more async 495 * events/errors for this HCA. An event is chosen for dispatch 496 * to all clients of this HCA. This thread dispatches them via 497 * the ibtl_async_taskq, then sleeps until all tasks are done. 498 * 499 * This thread records the async_code and async_event in the 500 * ibtl_hca_devinfo_t for all client taskq threads to reference. 501 * 502 * This is called from an async or taskq thread with ibtl_async_mutex held. 503 */ 504 static void 505 ibtl_do_hca_asyncs(ibtl_hca_devinfo_t *hca_devp) 506 { 507 ibtl_hca_t *ibt_hca; 508 ibt_async_code_t code; 509 ibtl_async_port_status_t temp; 510 uint8_t nports; 511 uint8_t port_minus1; 512 ibtl_async_port_status_t *portp; 513 514 mutex_exit(&ibtl_async_mutex); 515 516 mutex_enter(&ibtl_clnt_list_mutex); 517 while (hca_devp->hd_async_busy) 518 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex); 519 hca_devp->hd_async_busy = 1; 520 mutex_enter(&ibtl_async_mutex); 521 522 bzero(&hca_devp->hd_async_event, sizeof (hca_devp->hd_async_event)); 523 for (;;) { 524 525 hca_devp->hd_async_event.ev_fma_ena = 0; 526 527 code = hca_devp->hd_async_codes; 528 if (code & IBT_ERROR_LOCAL_CATASTROPHIC) { 529 code = IBT_ERROR_LOCAL_CATASTROPHIC; 530 hca_devp->hd_async_event.ev_fma_ena = 531 hca_devp->hd_fma_ena; 532 } else if (code & IBT_ERROR_PORT_DOWN) 533 code = IBT_ERROR_PORT_DOWN; 534 else if (code & IBT_EVENT_PORT_UP) 535 code = IBT_EVENT_PORT_UP; 536 else { 537 hca_devp->hd_async_codes = 0; 538 code = 0; 539 } 540 541 if (code == 0) { 542 hca_devp->hd_async_flags &= ~IBTL_ASYNC_PENDING; 543 break; 544 } 545 hca_devp->hd_async_codes &= ~code; 546 547 if ((code == IBT_EVENT_PORT_UP) || 548 (code == IBT_ERROR_PORT_DOWN)) { 549 /* PORT_UP or PORT_DOWN */ 550 portp = hca_devp->hd_async_port; 551 nports = hca_devp->hd_hca_attr->hca_nports; 552 for (port_minus1 = 0; port_minus1 < nports; 553 port_minus1++) { 554 temp = ((code == IBT_EVENT_PORT_UP) ? 555 IBTL_HCA_PORT_UP : IBTL_HCA_PORT_DOWN) | 556 IBTL_HCA_PORT_CHANGED; 557 if (portp[port_minus1] == temp) 558 break; 559 } 560 if (port_minus1 >= nports) { 561 /* we checked again, but found nothing */ 562 continue; 563 } 564 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_do_hca_asyncs: " 565 "async: port# %x code %x", port_minus1 + 1, code); 566 /* mark it to check for other ports after we're done */ 567 hca_devp->hd_async_codes |= code; 568 569 hca_devp->hd_async_event.ev_port = port_minus1 + 1; 570 hca_devp->hd_async_port[port_minus1] &= 571 ~IBTL_HCA_PORT_CHANGED; 572 573 mutex_exit(&ibtl_async_mutex); 574 ibtl_reinit_hca_portinfo(hca_devp, port_minus1 + 1); 575 mutex_enter(&ibtl_async_mutex); 576 } 577 578 hca_devp->hd_async_code = code; 579 hca_devp->hd_async_event.ev_hca_guid = 580 hca_devp->hd_hca_attr->hca_node_guid; 581 mutex_exit(&ibtl_async_mutex); 582 583 /* 584 * Make sure to inform CM, DM, and IBMA if we know of them. 585 * Also, make sure not to inform them a second time, which 586 * would occur if they have the HCA open. 587 */ 588 589 if (ibtl_ibma_async_handler) 590 ibtl_tell_mgr(hca_devp, ibtl_ibma_async_handler, 591 ibtl_ibma_clnt_private); 592 /* wait for all tasks to complete */ 593 while (hca_devp->hd_async_task_cnt != 0) 594 cv_wait(&hca_devp->hd_async_task_cv, 595 &ibtl_clnt_list_mutex); 596 597 if (ibtl_dm_async_handler) 598 ibtl_tell_mgr(hca_devp, ibtl_dm_async_handler, 599 ibtl_dm_clnt_private); 600 if (ibtl_cm_async_handler) 601 ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler, 602 ibtl_cm_clnt_private); 603 /* wait for all tasks to complete */ 604 while (hca_devp->hd_async_task_cnt != 0) 605 cv_wait(&hca_devp->hd_async_task_cv, 606 &ibtl_clnt_list_mutex); 607 608 for (ibt_hca = hca_devp->hd_clnt_list; 609 ibt_hca != NULL; 610 ibt_hca = ibt_hca->ha_clnt_link) { 611 612 /* Managers are handled above */ 613 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler == 614 ibtl_cm_async_handler) 615 continue; 616 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler == 617 ibtl_dm_async_handler) 618 continue; 619 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler == 620 ibtl_ibma_async_handler) 621 continue; 622 ++ibt_hca->ha_clnt_devp->clnt_async_cnt; 623 624 mutex_enter(&ibtl_async_mutex); 625 ibt_hca->ha_async_cnt++; 626 mutex_exit(&ibtl_async_mutex); 627 hca_devp->hd_async_task_cnt++; 628 (void) taskq_dispatch(ibtl_async_taskq, 629 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP); 630 } 631 632 /* wait for all tasks to complete */ 633 while (hca_devp->hd_async_task_cnt != 0) 634 cv_wait(&hca_devp->hd_async_task_cv, 635 &ibtl_clnt_list_mutex); 636 637 mutex_enter(&ibtl_async_mutex); 638 } 639 hca_devp->hd_async_code = 0; 640 hca_devp->hd_async_busy = 0; 641 cv_broadcast(&hca_devp->hd_async_busy_cv); 642 mutex_exit(&ibtl_clnt_list_mutex); 643 } 644 645 /* 646 * Asyncs for QP objects. 647 * 648 * The function continues to run until there are no more async 649 * events/errors for this object. 650 */ 651 static void 652 ibtl_do_qp_asyncs(ibtl_qp_t *ibtl_qp) 653 { 654 ibt_async_code_t code; 655 ibt_async_event_t async_event; 656 657 ASSERT(MUTEX_HELD(&ibtl_async_mutex)); 658 bzero(&async_event, sizeof (async_event)); 659 async_event.ev_chan_hdl = IBTL_QP2CHAN(ibtl_qp); 660 661 while ((code = ibtl_qp->qp_async_codes) != 0) { 662 async_event.ev_fma_ena = 0; 663 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT) 664 code = 0; /* fallthrough to "kmem_free" */ 665 else if (code & IBT_ERROR_CATASTROPHIC_QP) { 666 code = IBT_ERROR_CATASTROPHIC_QP; 667 async_event.ev_fma_ena = ibtl_qp->qp_cat_fma_ena; 668 } else if (code & IBT_ERROR_INVALID_REQUEST_QP) { 669 code = IBT_ERROR_INVALID_REQUEST_QP; 670 async_event.ev_fma_ena = ibtl_qp->qp_inv_fma_ena; 671 } else if (code & IBT_ERROR_ACCESS_VIOLATION_QP) { 672 code = IBT_ERROR_ACCESS_VIOLATION_QP; 673 async_event.ev_fma_ena = ibtl_qp->qp_acc_fma_ena; 674 } else if (code & IBT_ERROR_PATH_MIGRATE_REQ_QP) { 675 code = IBT_ERROR_PATH_MIGRATE_REQ_QP; 676 async_event.ev_fma_ena = ibtl_qp->qp_pth_fma_ena; 677 } else if (code & IBT_EVENT_PATH_MIGRATED_QP) 678 code = IBT_EVENT_PATH_MIGRATED_QP; 679 else if (code & IBT_EVENT_SQD) 680 code = IBT_EVENT_SQD; 681 else if (code & IBT_EVENT_COM_EST_QP) 682 code = IBT_EVENT_COM_EST_QP; 683 else if (code & IBT_EVENT_EMPTY_QP) 684 code = IBT_EVENT_EMPTY_QP; 685 else { 686 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_qp_asyncs: " 687 "async: unexpected QP async code 0x%x", code); 688 ibtl_qp->qp_async_codes = 0; 689 code = 0; 690 } 691 ibtl_qp->qp_async_codes &= ~code; 692 693 if (code) { 694 mutex_exit(&ibtl_async_mutex); 695 ibtl_async_client_call(ibtl_qp->qp_hca, 696 code, &async_event); 697 mutex_enter(&ibtl_async_mutex); 698 } 699 700 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT) { 701 mutex_exit(&ibtl_async_mutex); 702 cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv); 703 mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex); 704 kmem_free(IBTL_QP2CHAN(ibtl_qp), 705 sizeof (ibtl_channel_t)); 706 mutex_enter(&ibtl_async_mutex); 707 return; 708 } 709 } 710 ibtl_qp->qp_async_flags &= ~IBTL_ASYNC_PENDING; 711 } 712 713 /* 714 * Asyncs for SRQ objects. 715 * 716 * The function continues to run until there are no more async 717 * events/errors for this object. 718 */ 719 static void 720 ibtl_do_srq_asyncs(ibtl_srq_t *ibtl_srq) 721 { 722 ibt_async_code_t code; 723 ibt_async_event_t async_event; 724 725 ASSERT(MUTEX_HELD(&ibtl_async_mutex)); 726 bzero(&async_event, sizeof (async_event)); 727 async_event.ev_srq_hdl = ibtl_srq; 728 async_event.ev_fma_ena = ibtl_srq->srq_fma_ena; 729 730 while ((code = ibtl_srq->srq_async_codes) != 0) { 731 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT) 732 code = 0; /* fallthrough to "kmem_free" */ 733 else if (code & IBT_ERROR_CATASTROPHIC_SRQ) 734 code = IBT_ERROR_CATASTROPHIC_SRQ; 735 else if (code & IBT_EVENT_LIMIT_REACHED_SRQ) 736 code = IBT_EVENT_LIMIT_REACHED_SRQ; 737 else { 738 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_srq_asyncs: " 739 "async: unexpected SRQ async code 0x%x", code); 740 ibtl_srq->srq_async_codes = 0; 741 code = 0; 742 } 743 ibtl_srq->srq_async_codes &= ~code; 744 745 if (code) { 746 mutex_exit(&ibtl_async_mutex); 747 ibtl_async_client_call(ibtl_srq->srq_hca, 748 code, &async_event); 749 mutex_enter(&ibtl_async_mutex); 750 } 751 752 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT) { 753 mutex_exit(&ibtl_async_mutex); 754 kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s)); 755 mutex_enter(&ibtl_async_mutex); 756 return; 757 } 758 } 759 ibtl_srq->srq_async_flags &= ~IBTL_ASYNC_PENDING; 760 } 761 762 /* 763 * Asyncs for CQ objects. 764 * 765 * The function continues to run until there are no more async 766 * events/errors for this object. 767 */ 768 static void 769 ibtl_do_cq_asyncs(ibtl_cq_t *ibtl_cq) 770 { 771 ibt_async_code_t code; 772 ibt_async_event_t async_event; 773 774 ASSERT(MUTEX_HELD(&ibtl_async_mutex)); 775 bzero(&async_event, sizeof (async_event)); 776 async_event.ev_cq_hdl = ibtl_cq; 777 async_event.ev_fma_ena = ibtl_cq->cq_fma_ena; 778 779 while ((code = ibtl_cq->cq_async_codes) != 0) { 780 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT) 781 code = 0; /* fallthrough to "kmem_free" */ 782 else if (code & IBT_ERROR_CQ) 783 code = IBT_ERROR_CQ; 784 else { 785 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_cq_asyncs: " 786 "async: unexpected CQ async code 0x%x", code); 787 ibtl_cq->cq_async_codes = 0; 788 code = 0; 789 } 790 ibtl_cq->cq_async_codes &= ~code; 791 792 if (code) { 793 mutex_exit(&ibtl_async_mutex); 794 ibtl_async_client_call(ibtl_cq->cq_hca, 795 code, &async_event); 796 mutex_enter(&ibtl_async_mutex); 797 } 798 799 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT) { 800 mutex_exit(&ibtl_async_mutex); 801 mutex_destroy(&ibtl_cq->cq_mutex); 802 kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s)); 803 mutex_enter(&ibtl_async_mutex); 804 return; 805 } 806 } 807 ibtl_cq->cq_async_flags &= ~IBTL_ASYNC_PENDING; 808 } 809 810 /* 811 * Asyncs for EEC objects. 812 * 813 * The function continues to run until there are no more async 814 * events/errors for this object. 815 */ 816 static void 817 ibtl_do_eec_asyncs(ibtl_eec_t *ibtl_eec) 818 { 819 ibt_async_code_t code; 820 ibt_async_event_t async_event; 821 822 ASSERT(MUTEX_HELD(&ibtl_async_mutex)); 823 bzero(&async_event, sizeof (async_event)); 824 async_event.ev_chan_hdl = ibtl_eec->eec_channel; 825 826 while ((code = ibtl_eec->eec_async_codes) != 0) { 827 async_event.ev_fma_ena = 0; 828 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) 829 code = 0; /* fallthrough to "kmem_free" */ 830 else if (code & IBT_ERROR_CATASTROPHIC_EEC) { 831 code = IBT_ERROR_CATASTROPHIC_CHAN; 832 async_event.ev_fma_ena = ibtl_eec->eec_cat_fma_ena; 833 } else if (code & IBT_ERROR_PATH_MIGRATE_REQ_EEC) { 834 code = IBT_ERROR_PATH_MIGRATE_REQ; 835 async_event.ev_fma_ena = ibtl_eec->eec_pth_fma_ena; 836 } else if (code & IBT_EVENT_PATH_MIGRATED_EEC) 837 code = IBT_EVENT_PATH_MIGRATED; 838 else if (code & IBT_EVENT_COM_EST_EEC) 839 code = IBT_EVENT_COM_EST; 840 else { 841 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_eec_asyncs: " 842 "async: unexpected code 0x%x", code); 843 ibtl_eec->eec_async_codes = 0; 844 code = 0; 845 } 846 ibtl_eec->eec_async_codes &= ~code; 847 848 if (code) { 849 mutex_exit(&ibtl_async_mutex); 850 ibtl_async_client_call(ibtl_eec->eec_hca, 851 code, &async_event); 852 mutex_enter(&ibtl_async_mutex); 853 } 854 855 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) { 856 mutex_exit(&ibtl_async_mutex); 857 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s)); 858 mutex_enter(&ibtl_async_mutex); 859 return; 860 } 861 } 862 ibtl_eec->eec_async_flags &= ~IBTL_ASYNC_PENDING; 863 } 864 865 #ifdef __lock_lint 866 kmutex_t cpr_mutex; 867 #endif 868 869 /* 870 * Loop forever, calling async_handlers until all of the async lists 871 * are empty. 872 */ 873 874 static void 875 ibtl_async_thread(void) 876 { 877 #ifndef __lock_lint 878 kmutex_t cpr_mutex; 879 #endif 880 callb_cpr_t cprinfo; 881 882 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo)) 883 _NOTE(NO_COMPETING_THREADS_NOW) 884 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL); 885 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr, 886 "ibtl_async_thread"); 887 #ifndef lint 888 _NOTE(COMPETING_THREADS_NOW) 889 #endif 890 891 mutex_enter(&ibtl_async_mutex); 892 893 for (;;) { 894 if (ibtl_async_hca_list_start) { 895 ibtl_hca_devinfo_t *hca_devp; 896 897 /* remove first entry from list */ 898 hca_devp = ibtl_async_hca_list_start; 899 ibtl_async_hca_list_start = hca_devp->hd_async_link; 900 hca_devp->hd_async_link = NULL; 901 if (ibtl_async_hca_list_start == NULL) 902 ibtl_async_hca_list_end = NULL; 903 904 ibtl_do_hca_asyncs(hca_devp); 905 906 } else if (ibtl_async_qp_list_start) { 907 ibtl_qp_t *ibtl_qp; 908 909 /* remove from list */ 910 ibtl_qp = ibtl_async_qp_list_start; 911 ibtl_async_qp_list_start = ibtl_qp->qp_async_link; 912 ibtl_qp->qp_async_link = NULL; 913 if (ibtl_async_qp_list_start == NULL) 914 ibtl_async_qp_list_end = NULL; 915 916 ibtl_do_qp_asyncs(ibtl_qp); 917 918 } else if (ibtl_async_srq_list_start) { 919 ibtl_srq_t *ibtl_srq; 920 921 /* remove from list */ 922 ibtl_srq = ibtl_async_srq_list_start; 923 ibtl_async_srq_list_start = ibtl_srq->srq_async_link; 924 ibtl_srq->srq_async_link = NULL; 925 if (ibtl_async_srq_list_start == NULL) 926 ibtl_async_srq_list_end = NULL; 927 928 ibtl_do_srq_asyncs(ibtl_srq); 929 930 } else if (ibtl_async_eec_list_start) { 931 ibtl_eec_t *ibtl_eec; 932 933 /* remove from list */ 934 ibtl_eec = ibtl_async_eec_list_start; 935 ibtl_async_eec_list_start = ibtl_eec->eec_async_link; 936 ibtl_eec->eec_async_link = NULL; 937 if (ibtl_async_eec_list_start == NULL) 938 ibtl_async_eec_list_end = NULL; 939 940 ibtl_do_eec_asyncs(ibtl_eec); 941 942 } else if (ibtl_async_cq_list_start) { 943 ibtl_cq_t *ibtl_cq; 944 945 /* remove from list */ 946 ibtl_cq = ibtl_async_cq_list_start; 947 ibtl_async_cq_list_start = ibtl_cq->cq_async_link; 948 ibtl_cq->cq_async_link = NULL; 949 if (ibtl_async_cq_list_start == NULL) 950 ibtl_async_cq_list_end = NULL; 951 952 ibtl_do_cq_asyncs(ibtl_cq); 953 954 } else { 955 if (ibtl_async_thread_exit == IBTL_THREAD_EXIT) 956 break; 957 mutex_enter(&cpr_mutex); 958 CALLB_CPR_SAFE_BEGIN(&cprinfo); 959 mutex_exit(&cpr_mutex); 960 961 cv_wait(&ibtl_async_cv, &ibtl_async_mutex); 962 963 mutex_exit(&ibtl_async_mutex); 964 mutex_enter(&cpr_mutex); 965 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex); 966 mutex_exit(&cpr_mutex); 967 mutex_enter(&ibtl_async_mutex); 968 } 969 } 970 971 mutex_exit(&ibtl_async_mutex); 972 973 #ifndef __lock_lint 974 mutex_enter(&cpr_mutex); 975 CALLB_CPR_EXIT(&cprinfo); 976 #endif 977 mutex_destroy(&cpr_mutex); 978 } 979 980 981 void 982 ibtl_free_qp_async_check(ibtl_qp_t *ibtl_qp) 983 { 984 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_qp_async_check(%p)", ibtl_qp); 985 986 mutex_enter(&ibtl_async_mutex); 987 988 /* 989 * If there is an active async, mark this object to be freed 990 * by the async_thread when it's done. 991 */ 992 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) { 993 ibtl_qp->qp_async_flags |= IBTL_ASYNC_FREE_OBJECT; 994 mutex_exit(&ibtl_async_mutex); 995 } else { /* free the object now */ 996 mutex_exit(&ibtl_async_mutex); 997 cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv); 998 mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex); 999 kmem_free(IBTL_QP2CHAN(ibtl_qp), sizeof (ibtl_channel_t)); 1000 } 1001 } 1002 1003 void 1004 ibtl_free_cq_async_check(ibtl_cq_t *ibtl_cq) 1005 { 1006 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_cq_async_check(%p)", ibtl_cq); 1007 1008 mutex_enter(&ibtl_async_mutex); 1009 1010 /* if there is an active async, mark this object to be freed */ 1011 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) { 1012 ibtl_cq->cq_async_flags |= IBTL_ASYNC_FREE_OBJECT; 1013 mutex_exit(&ibtl_async_mutex); 1014 } else { /* free the object now */ 1015 mutex_exit(&ibtl_async_mutex); 1016 mutex_destroy(&ibtl_cq->cq_mutex); 1017 kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s)); 1018 } 1019 } 1020 1021 void 1022 ibtl_free_srq_async_check(ibtl_srq_t *ibtl_srq) 1023 { 1024 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_srq_async_check(%p)", 1025 ibtl_srq); 1026 1027 mutex_enter(&ibtl_async_mutex); 1028 1029 /* if there is an active async, mark this object to be freed */ 1030 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) { 1031 ibtl_srq->srq_async_flags |= IBTL_ASYNC_FREE_OBJECT; 1032 mutex_exit(&ibtl_async_mutex); 1033 } else { /* free the object now */ 1034 mutex_exit(&ibtl_async_mutex); 1035 kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s)); 1036 } 1037 } 1038 1039 void 1040 ibtl_free_eec_async_check(ibtl_eec_t *ibtl_eec) 1041 { 1042 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_eec_async_check(%p)", 1043 ibtl_eec); 1044 1045 mutex_enter(&ibtl_async_mutex); 1046 1047 /* if there is an active async, mark this object to be freed */ 1048 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) { 1049 ibtl_eec->eec_async_flags |= IBTL_ASYNC_FREE_OBJECT; 1050 mutex_exit(&ibtl_async_mutex); 1051 } else { /* free the object now */ 1052 mutex_exit(&ibtl_async_mutex); 1053 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s)); 1054 } 1055 } 1056 1057 /* 1058 * This function differs from above in that we assume this is called 1059 * from non-interrupt context, and never called from the async_thread. 1060 */ 1061 1062 void 1063 ibtl_free_hca_async_check(ibtl_hca_t *ibt_hca) 1064 { 1065 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_hca_async_check(%p)", 1066 ibt_hca); 1067 1068 mutex_enter(&ibtl_async_mutex); 1069 1070 /* if there is an active async, mark this object to be freed */ 1071 if (ibt_hca->ha_async_cnt > 0) { 1072 ibt_hca->ha_async_flags |= IBTL_ASYNC_FREE_OBJECT; 1073 mutex_exit(&ibtl_async_mutex); 1074 } else { /* free the object now */ 1075 mutex_exit(&ibtl_async_mutex); 1076 kmem_free(ibt_hca, sizeof (ibtl_hca_t)); 1077 } 1078 } 1079 1080 /* 1081 * Completion Queue Handling. 1082 * 1083 * A completion queue can be handled through a simple callback 1084 * at interrupt level, or it may be queued for an ibtl_cq_thread 1085 * to handle. The latter is chosen during ibt_alloc_cq when the 1086 * IBTF_CQ_HANDLER_IN_THREAD is specified. 1087 */ 1088 1089 static void 1090 ibtl_cq_handler_call(ibtl_cq_t *ibtl_cq) 1091 { 1092 ibt_cq_handler_t cq_handler; 1093 void *arg; 1094 1095 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_cq_handler_call(%p)", ibtl_cq); 1096 1097 mutex_enter(&ibtl_cq->cq_mutex); 1098 cq_handler = ibtl_cq->cq_comp_handler; 1099 arg = ibtl_cq->cq_arg; 1100 if (cq_handler != NULL) 1101 cq_handler(ibtl_cq, arg); 1102 else 1103 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_cq_handler_call: " 1104 "no cq_handler for cq %p", ibtl_cq); 1105 mutex_exit(&ibtl_cq->cq_mutex); 1106 } 1107 1108 /* 1109 * Before ibt_free_cq can continue, we need to ensure no more cq_handler 1110 * callbacks can occur. When we get the mutex, we know there are no 1111 * outstanding cq_handler callbacks. We set the cq_handler to NULL to 1112 * prohibit future callbacks. 1113 */ 1114 void 1115 ibtl_free_cq_check(ibtl_cq_t *ibtl_cq) 1116 { 1117 mutex_enter(&ibtl_cq->cq_mutex); 1118 ibtl_cq->cq_comp_handler = NULL; 1119 mutex_exit(&ibtl_cq->cq_mutex); 1120 if (ibtl_cq->cq_in_thread) { 1121 mutex_enter(&ibtl_cq_mutex); 1122 --ibtl_cqs_using_threads; 1123 while (ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) { 1124 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT; 1125 ibtl_cq->cq_impl_flags |= IBTL_CQ_FREE; 1126 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex); 1127 } 1128 mutex_exit(&ibtl_cq_mutex); 1129 } 1130 } 1131 1132 /* 1133 * Loop forever, calling cq_handlers until the cq list 1134 * is empty. 1135 */ 1136 1137 static void 1138 ibtl_cq_thread(void) 1139 { 1140 #ifndef __lock_lint 1141 kmutex_t cpr_mutex; 1142 #endif 1143 callb_cpr_t cprinfo; 1144 1145 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo)) 1146 _NOTE(NO_COMPETING_THREADS_NOW) 1147 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL); 1148 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr, 1149 "ibtl_cq_thread"); 1150 #ifndef lint 1151 _NOTE(COMPETING_THREADS_NOW) 1152 #endif 1153 1154 mutex_enter(&ibtl_cq_mutex); 1155 1156 for (;;) { 1157 if (ibtl_cq_list_start) { 1158 ibtl_cq_t *ibtl_cq; 1159 1160 ibtl_cq = ibtl_cq_list_start; 1161 ibtl_cq_list_start = ibtl_cq->cq_link; 1162 ibtl_cq->cq_link = NULL; 1163 if (ibtl_cq == ibtl_cq_list_end) 1164 ibtl_cq_list_end = NULL; 1165 1166 while (ibtl_cq->cq_impl_flags & IBTL_CQ_CALL_CLIENT) { 1167 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT; 1168 mutex_exit(&ibtl_cq_mutex); 1169 ibtl_cq_handler_call(ibtl_cq); 1170 mutex_enter(&ibtl_cq_mutex); 1171 } 1172 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_PENDING; 1173 if (ibtl_cq->cq_impl_flags & IBTL_CQ_FREE) 1174 cv_broadcast(&ibtl_cq_cv); 1175 } else { 1176 if (ibtl_cq_thread_exit == IBTL_THREAD_EXIT) 1177 break; 1178 mutex_enter(&cpr_mutex); 1179 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1180 mutex_exit(&cpr_mutex); 1181 1182 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex); 1183 1184 mutex_exit(&ibtl_cq_mutex); 1185 mutex_enter(&cpr_mutex); 1186 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex); 1187 mutex_exit(&cpr_mutex); 1188 mutex_enter(&ibtl_cq_mutex); 1189 } 1190 } 1191 1192 mutex_exit(&ibtl_cq_mutex); 1193 #ifndef __lock_lint 1194 mutex_enter(&cpr_mutex); 1195 CALLB_CPR_EXIT(&cprinfo); 1196 #endif 1197 mutex_destroy(&cpr_mutex); 1198 } 1199 1200 1201 /* 1202 * ibc_cq_handler() 1203 * 1204 * Completion Queue Notification Handler. 1205 * 1206 */ 1207 /*ARGSUSED*/ 1208 void 1209 ibc_cq_handler(ibc_clnt_hdl_t ibc_hdl, ibt_cq_hdl_t ibtl_cq) 1210 { 1211 IBTF_DPRINTF_L4(ibtf_handlers, "ibc_cq_handler(%p, %p)", 1212 ibc_hdl, ibtl_cq); 1213 1214 if (ibtl_cq->cq_in_thread) { 1215 mutex_enter(&ibtl_cq_mutex); 1216 ibtl_cq->cq_impl_flags |= IBTL_CQ_CALL_CLIENT; 1217 if ((ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) == 0) { 1218 ibtl_cq->cq_impl_flags |= IBTL_CQ_PENDING; 1219 ibtl_cq->cq_link = NULL; 1220 if (ibtl_cq_list_end == NULL) 1221 ibtl_cq_list_start = ibtl_cq; 1222 else 1223 ibtl_cq_list_end->cq_link = ibtl_cq; 1224 ibtl_cq_list_end = ibtl_cq; 1225 cv_signal(&ibtl_cq_cv); 1226 } 1227 mutex_exit(&ibtl_cq_mutex); 1228 return; 1229 } else 1230 ibtl_cq_handler_call(ibtl_cq); 1231 } 1232 1233 1234 /* 1235 * ibt_enable_cq_notify() 1236 * Enable Notification requests on the specified CQ. 1237 * 1238 * ibt_cq The CQ handle. 1239 * 1240 * notify_type Enable notifications for all (IBT_NEXT_COMPLETION) 1241 * completions, or the next Solicited completion 1242 * (IBT_NEXT_SOLICITED) only. 1243 * 1244 * Completion notifications are disabled by setting the completion 1245 * handler to NULL by calling ibt_set_cq_handler(). 1246 */ 1247 ibt_status_t 1248 ibt_enable_cq_notify(ibt_cq_hdl_t ibtl_cq, ibt_cq_notify_flags_t notify_type) 1249 { 1250 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_enable_cq_notify(%p, %d)", 1251 ibtl_cq, notify_type); 1252 1253 return (IBTL_CQ2CIHCAOPS_P(ibtl_cq)->ibc_notify_cq( 1254 IBTL_CQ2CIHCA(ibtl_cq), ibtl_cq->cq_ibc_cq_hdl, notify_type)); 1255 } 1256 1257 1258 /* 1259 * ibt_set_cq_handler() 1260 * Register a work request completion handler with the IBTF. 1261 * 1262 * ibt_cq The CQ handle. 1263 * 1264 * completion_handler The completion handler. 1265 * 1266 * arg The IBTF client private argument to be passed 1267 * back to the client when calling the CQ 1268 * completion handler. 1269 * 1270 * Completion notifications are disabled by setting the completion 1271 * handler to NULL. When setting the handler to NULL, no additional 1272 * calls to the previous CQ handler will be initiated, but there may 1273 * be one in progress. 1274 * 1275 * This function does not otherwise change the state of previous 1276 * calls to ibt_enable_cq_notify(). 1277 */ 1278 void 1279 ibt_set_cq_handler(ibt_cq_hdl_t ibtl_cq, ibt_cq_handler_t completion_handler, 1280 void *arg) 1281 { 1282 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_set_cq_handler(%p, %p, %p)", 1283 ibtl_cq, completion_handler, arg); 1284 1285 mutex_enter(&ibtl_cq->cq_mutex); 1286 ibtl_cq->cq_comp_handler = completion_handler; 1287 ibtl_cq->cq_arg = arg; 1288 mutex_exit(&ibtl_cq->cq_mutex); 1289 } 1290 1291 1292 /* 1293 * Inform IBT clients about New HCAs. 1294 * 1295 * We use taskqs to allow simultaneous notification, with sleeping. 1296 * Since taskqs only allow one argument, we define a structure 1297 * because we need to pass in two arguments. 1298 */ 1299 1300 struct ibtl_new_hca_s { 1301 ibtl_clnt_t *nh_clntp; 1302 ibtl_hca_devinfo_t *nh_hca_devp; 1303 ibt_async_code_t nh_code; 1304 }; 1305 1306 static void 1307 ibtl_tell_client_about_new_hca(void *arg) 1308 { 1309 struct ibtl_new_hca_s *new_hcap = (struct ibtl_new_hca_s *)arg; 1310 ibtl_clnt_t *clntp = new_hcap->nh_clntp; 1311 ibt_async_event_t async_event; 1312 ibtl_hca_devinfo_t *hca_devp = new_hcap->nh_hca_devp; 1313 1314 bzero(&async_event, sizeof (async_event)); 1315 async_event.ev_hca_guid = hca_devp->hd_hca_attr->hca_node_guid; 1316 clntp->clnt_modinfop->mi_async_handler( 1317 clntp->clnt_private, NULL, new_hcap->nh_code, &async_event); 1318 kmem_free(new_hcap, sizeof (*new_hcap)); 1319 #ifdef __lock_lint 1320 { 1321 ibt_hca_hdl_t hca_hdl; 1322 (void) ibt_open_hca(clntp, 0ULL, &hca_hdl); 1323 } 1324 #endif 1325 mutex_enter(&ibtl_clnt_list_mutex); 1326 if (--hca_devp->hd_async_task_cnt == 0) 1327 cv_signal(&hca_devp->hd_async_task_cv); 1328 if (--clntp->clnt_async_cnt == 0) 1329 cv_broadcast(&ibtl_clnt_cv); 1330 mutex_exit(&ibtl_clnt_list_mutex); 1331 } 1332 1333 /* 1334 * ibtl_announce_new_hca: 1335 * 1336 * o First attach these clients in the given order 1337 * IBMA 1338 * IBCM 1339 * 1340 * o Next attach all other clients in parallel. 1341 * 1342 * NOTE: Use the taskq to simultaneously notify all clients of the new HCA. 1343 * Retval from clients is ignored. 1344 */ 1345 void 1346 ibtl_announce_new_hca(ibtl_hca_devinfo_t *hca_devp) 1347 { 1348 ibtl_clnt_t *clntp; 1349 struct ibtl_new_hca_s *new_hcap; 1350 1351 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_announce_new_hca(%p, %llX)", 1352 hca_devp, hca_devp->hd_hca_attr->hca_node_guid); 1353 1354 mutex_enter(&ibtl_clnt_list_mutex); 1355 1356 clntp = ibtl_clnt_list; 1357 while (clntp != NULL) { 1358 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) { 1359 IBTF_DPRINTF_L4(ibtf_handlers, 1360 "ibtl_announce_new_hca: calling IBMF"); 1361 if (clntp->clnt_modinfop->mi_async_handler) { 1362 _NOTE(NO_COMPETING_THREADS_NOW) 1363 new_hcap = kmem_alloc(sizeof (*new_hcap), 1364 KM_SLEEP); 1365 new_hcap->nh_clntp = clntp; 1366 new_hcap->nh_hca_devp = hca_devp; 1367 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT; 1368 #ifndef lint 1369 _NOTE(COMPETING_THREADS_NOW) 1370 #endif 1371 clntp->clnt_async_cnt++; 1372 hca_devp->hd_async_task_cnt++; 1373 1374 (void) taskq_dispatch(ibtl_async_taskq, 1375 ibtl_tell_client_about_new_hca, new_hcap, 1376 TQ_SLEEP); 1377 } 1378 break; 1379 } 1380 clntp = clntp->clnt_list_link; 1381 } 1382 if (clntp != NULL) 1383 while (clntp->clnt_async_cnt > 0) 1384 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex); 1385 clntp = ibtl_clnt_list; 1386 while (clntp != NULL) { 1387 if ((clntp->clnt_modinfop->mi_clnt_class == IBT_DM) || 1388 (clntp->clnt_modinfop->mi_clnt_class == IBT_CM)) { 1389 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: " 1390 "calling %s", clntp->clnt_modinfop->mi_clnt_name); 1391 if (clntp->clnt_modinfop->mi_async_handler) { 1392 _NOTE(NO_COMPETING_THREADS_NOW) 1393 new_hcap = kmem_alloc(sizeof (*new_hcap), 1394 KM_SLEEP); 1395 new_hcap->nh_clntp = clntp; 1396 new_hcap->nh_hca_devp = hca_devp; 1397 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT; 1398 #ifndef lint 1399 _NOTE(COMPETING_THREADS_NOW) 1400 #endif 1401 clntp->clnt_async_cnt++; 1402 hca_devp->hd_async_task_cnt++; 1403 1404 (void) taskq_dispatch(ibtl_async_taskq, 1405 ibtl_tell_client_about_new_hca, new_hcap, 1406 TQ_SLEEP); 1407 } 1408 break; 1409 } 1410 clntp = clntp->clnt_list_link; 1411 } 1412 if (clntp != NULL) 1413 while (clntp->clnt_async_cnt > 0) 1414 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex); 1415 clntp = ibtl_clnt_list; 1416 while (clntp != NULL) { 1417 if ((clntp->clnt_modinfop->mi_clnt_class != IBT_DM) && 1418 (clntp->clnt_modinfop->mi_clnt_class != IBT_CM) && 1419 (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA)) { 1420 IBTF_DPRINTF_L4(ibtf_handlers, 1421 "ibtl_announce_new_hca: Calling %s ", 1422 clntp->clnt_modinfop->mi_clnt_name); 1423 if (clntp->clnt_modinfop->mi_async_handler) { 1424 _NOTE(NO_COMPETING_THREADS_NOW) 1425 new_hcap = kmem_alloc(sizeof (*new_hcap), 1426 KM_SLEEP); 1427 new_hcap->nh_clntp = clntp; 1428 new_hcap->nh_hca_devp = hca_devp; 1429 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT; 1430 #ifndef lint 1431 _NOTE(COMPETING_THREADS_NOW) 1432 #endif 1433 clntp->clnt_async_cnt++; 1434 hca_devp->hd_async_task_cnt++; 1435 1436 (void) taskq_dispatch(ibtl_async_taskq, 1437 ibtl_tell_client_about_new_hca, new_hcap, 1438 TQ_SLEEP); 1439 } 1440 } 1441 clntp = clntp->clnt_list_link; 1442 } 1443 1444 /* wait for all tasks to complete */ 1445 while (hca_devp->hd_async_task_cnt != 0) 1446 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex); 1447 1448 /* wakeup thread that may be waiting to send an HCA async */ 1449 ASSERT(hca_devp->hd_async_busy == 1); 1450 hca_devp->hd_async_busy = 0; 1451 cv_broadcast(&hca_devp->hd_async_busy_cv); 1452 mutex_exit(&ibtl_clnt_list_mutex); 1453 } 1454 1455 /* 1456 * ibtl_detach_all_clients: 1457 * 1458 * Return value - 0 for Success, 1 for Failure 1459 * 1460 * o First detach general clients. 1461 * 1462 * o Next detach these clients 1463 * IBCM 1464 * IBDM 1465 * 1466 * o Finally, detach this client 1467 * IBMA 1468 */ 1469 int 1470 ibtl_detach_all_clients(ibtl_hca_devinfo_t *hca_devp) 1471 { 1472 ib_guid_t hcaguid = hca_devp->hd_hca_attr->hca_node_guid; 1473 ibtl_hca_t *ibt_hca; 1474 ibtl_clnt_t *clntp; 1475 int retval; 1476 1477 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_detach_all_clients(%llX)", 1478 hcaguid); 1479 1480 ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex)); 1481 1482 while (hca_devp->hd_async_busy) 1483 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex); 1484 hca_devp->hd_async_busy = 1; 1485 1486 /* First inform general clients asynchronously */ 1487 hca_devp->hd_async_event.ev_hca_guid = hcaguid; 1488 hca_devp->hd_async_event.ev_fma_ena = 0; 1489 hca_devp->hd_async_event.ev_chan_hdl = NULL; 1490 hca_devp->hd_async_event.ev_cq_hdl = NULL; 1491 hca_devp->hd_async_code = IBT_HCA_DETACH_EVENT; 1492 1493 ibt_hca = hca_devp->hd_clnt_list; 1494 while (ibt_hca != NULL) { 1495 clntp = ibt_hca->ha_clnt_devp; 1496 if (IBTL_GENERIC_CLIENT(clntp)) { 1497 ++ibt_hca->ha_clnt_devp->clnt_async_cnt; 1498 mutex_enter(&ibtl_async_mutex); 1499 ibt_hca->ha_async_cnt++; 1500 mutex_exit(&ibtl_async_mutex); 1501 hca_devp->hd_async_task_cnt++; 1502 1503 (void) taskq_dispatch(ibtl_async_taskq, 1504 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP); 1505 } 1506 ibt_hca = ibt_hca->ha_clnt_link; 1507 } 1508 1509 /* wait for all clients to complete */ 1510 while (hca_devp->hd_async_task_cnt != 0) { 1511 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex); 1512 } 1513 /* Go thru the clients and check if any have not closed this HCA. */ 1514 retval = 0; 1515 ibt_hca = hca_devp->hd_clnt_list; 1516 while (ibt_hca != NULL) { 1517 clntp = ibt_hca->ha_clnt_devp; 1518 if (IBTL_GENERIC_CLIENT(clntp)) { 1519 IBTF_DPRINTF_L2(ibtf_handlers, 1520 "ibtl_detach_all_clients: " 1521 "client '%s' failed to close the HCA.", 1522 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name); 1523 retval = 1; 1524 } 1525 ibt_hca = ibt_hca->ha_clnt_link; 1526 } 1527 if (retval == 1) 1528 goto bailout; 1529 1530 /* Next inform IBDM asynchronously */ 1531 ibt_hca = hca_devp->hd_clnt_list; 1532 while (ibt_hca != NULL) { 1533 clntp = ibt_hca->ha_clnt_devp; 1534 if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) { 1535 ++ibt_hca->ha_clnt_devp->clnt_async_cnt; 1536 mutex_enter(&ibtl_async_mutex); 1537 ibt_hca->ha_async_cnt++; 1538 mutex_exit(&ibtl_async_mutex); 1539 hca_devp->hd_async_task_cnt++; 1540 1541 (void) taskq_dispatch(ibtl_async_taskq, 1542 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP); 1543 } 1544 ibt_hca = ibt_hca->ha_clnt_link; 1545 } 1546 /* wait for IBDM to complete */ 1547 while (hca_devp->hd_async_task_cnt != 0) { 1548 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex); 1549 } 1550 1551 /* 1552 * Next inform IBCM. 1553 * As IBCM doesn't perform ibt_open_hca(), IBCM will not be 1554 * accessible via hca_devp->hd_clnt_list. 1555 * ibtl_cm_async_handler will NOT be NULL, if IBCM is registered. 1556 */ 1557 if (ibtl_cm_async_handler) { 1558 ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler, 1559 ibtl_cm_clnt_private); 1560 1561 /* wait for all tasks to complete */ 1562 while (hca_devp->hd_async_task_cnt != 0) 1563 cv_wait(&hca_devp->hd_async_task_cv, 1564 &ibtl_clnt_list_mutex); 1565 } 1566 1567 /* Go thru the clients and check if any have not closed this HCA. */ 1568 retval = 0; 1569 ibt_hca = hca_devp->hd_clnt_list; 1570 while (ibt_hca != NULL) { 1571 clntp = ibt_hca->ha_clnt_devp; 1572 if (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA) { 1573 IBTF_DPRINTF_L2(ibtf_handlers, 1574 "ibtl_detach_all_clients: " 1575 "client '%s' failed to close the HCA.", 1576 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name); 1577 retval = 1; 1578 } 1579 ibt_hca = ibt_hca->ha_clnt_link; 1580 } 1581 if (retval == 1) 1582 goto bailout; 1583 1584 /* Finally, inform IBMA */ 1585 ibt_hca = hca_devp->hd_clnt_list; 1586 while (ibt_hca != NULL) { 1587 clntp = ibt_hca->ha_clnt_devp; 1588 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) { 1589 ++ibt_hca->ha_clnt_devp->clnt_async_cnt; 1590 mutex_enter(&ibtl_async_mutex); 1591 ibt_hca->ha_async_cnt++; 1592 mutex_exit(&ibtl_async_mutex); 1593 hca_devp->hd_async_task_cnt++; 1594 1595 (void) taskq_dispatch(ibtl_async_taskq, 1596 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP); 1597 } else 1598 IBTF_DPRINTF_L2(ibtf_handlers, 1599 "ibtl_detach_all_clients: " 1600 "client '%s' is unexpectedly on the client list", 1601 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name); 1602 ibt_hca = ibt_hca->ha_clnt_link; 1603 } 1604 1605 /* wait for IBMA to complete */ 1606 while (hca_devp->hd_async_task_cnt != 0) { 1607 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex); 1608 } 1609 1610 /* Check if this HCA's client list is empty. */ 1611 ibt_hca = hca_devp->hd_clnt_list; 1612 if (ibt_hca != NULL) { 1613 IBTF_DPRINTF_L2(ibtf_handlers, 1614 "ibtl_detach_all_clients: " 1615 "client '%s' failed to close the HCA.", 1616 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name); 1617 retval = 1; 1618 } else 1619 retval = 0; 1620 1621 bailout: 1622 hca_devp->hd_async_busy = 0; 1623 cv_broadcast(&hca_devp->hd_async_busy_cv); 1624 return (retval); 1625 } 1626 1627 void 1628 ibtl_free_clnt_async_check(ibtl_clnt_t *clntp) 1629 { 1630 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_clnt_async_check(%p)", clntp); 1631 1632 ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex)); 1633 1634 /* wait for all asyncs based on "ibtl_clnt_list" to complete */ 1635 while (clntp->clnt_async_cnt != 0) { 1636 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex); 1637 } 1638 } 1639 1640 static void 1641 ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp) 1642 { 1643 mutex_enter(&ibtl_clnt_list_mutex); 1644 if (--clntp->clnt_async_cnt == 0) { 1645 cv_broadcast(&ibtl_clnt_cv); 1646 } 1647 mutex_exit(&ibtl_clnt_list_mutex); 1648 } 1649 1650 static void 1651 ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp) 1652 { 1653 mutex_enter(&ibtl_clnt_list_mutex); 1654 ++clntp->clnt_async_cnt; 1655 mutex_exit(&ibtl_clnt_list_mutex); 1656 } 1657 1658 1659 /* 1660 * Functions and data structures to inform clients that a notification 1661 * has occurred about Multicast Groups that might interest them. 1662 */ 1663 struct ibtl_sm_notice { 1664 ibt_clnt_hdl_t np_ibt_hdl; 1665 ib_gid_t np_sgid; 1666 ibt_subnet_event_code_t np_code; 1667 ibt_subnet_event_t np_event; 1668 }; 1669 1670 static void 1671 ibtl_sm_notice_task(void *arg) 1672 { 1673 struct ibtl_sm_notice *noticep = (struct ibtl_sm_notice *)arg; 1674 ibt_clnt_hdl_t ibt_hdl = noticep->np_ibt_hdl; 1675 ibt_sm_notice_handler_t sm_notice_handler; 1676 1677 sm_notice_handler = ibt_hdl->clnt_sm_trap_handler; 1678 if (sm_notice_handler != NULL) 1679 sm_notice_handler(ibt_hdl->clnt_sm_trap_handler_arg, 1680 noticep->np_sgid, noticep->np_code, ¬icep->np_event); 1681 kmem_free(noticep, sizeof (*noticep)); 1682 ibtl_dec_clnt_async_cnt(ibt_hdl); 1683 } 1684 1685 /* 1686 * Inform the client that MCG notices are not working at this time. 1687 */ 1688 void 1689 ibtl_cm_sm_notice_init_failure(ibtl_cm_sm_init_fail_t *ifail) 1690 { 1691 ibt_clnt_hdl_t ibt_hdl = ifail->smf_ibt_hdl; 1692 struct ibtl_sm_notice *noticep; 1693 ib_gid_t *sgidp = &ifail->smf_sgid[0]; 1694 int i; 1695 1696 for (i = 0; i < ifail->smf_num_sgids; i++) { 1697 _NOTE(NO_COMPETING_THREADS_NOW) 1698 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP); 1699 noticep->np_ibt_hdl = ibt_hdl; 1700 noticep->np_sgid = *sgidp++; 1701 noticep->np_code = IBT_SM_EVENT_UNAVAILABLE; 1702 #ifndef lint 1703 _NOTE(COMPETING_THREADS_NOW) 1704 #endif 1705 ibtl_inc_clnt_async_cnt(ibt_hdl); 1706 (void) taskq_dispatch(ibtl_async_taskq, 1707 ibtl_sm_notice_task, noticep, TQ_SLEEP); 1708 } 1709 } 1710 1711 /* 1712 * Inform all clients of the event. 1713 */ 1714 void 1715 ibtl_cm_sm_notice_handler(ib_gid_t sgid, ibt_subnet_event_code_t code, 1716 ibt_subnet_event_t *event) 1717 { 1718 _NOTE(NO_COMPETING_THREADS_NOW) 1719 struct ibtl_sm_notice *noticep; 1720 ibtl_clnt_t *clntp; 1721 1722 mutex_enter(&ibtl_clnt_list_mutex); 1723 clntp = ibtl_clnt_list; 1724 while (clntp != NULL) { 1725 if (clntp->clnt_sm_trap_handler) { 1726 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP); 1727 noticep->np_ibt_hdl = clntp; 1728 noticep->np_sgid = sgid; 1729 noticep->np_code = code; 1730 noticep->np_event = *event; 1731 ++clntp->clnt_async_cnt; 1732 (void) taskq_dispatch(ibtl_async_taskq, 1733 ibtl_sm_notice_task, noticep, TQ_SLEEP); 1734 } 1735 clntp = clntp->clnt_list_link; 1736 } 1737 mutex_exit(&ibtl_clnt_list_mutex); 1738 #ifndef lint 1739 _NOTE(COMPETING_THREADS_NOW) 1740 #endif 1741 } 1742 1743 /* 1744 * Record the handler for this client. 1745 */ 1746 void 1747 ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl, 1748 ibt_sm_notice_handler_t sm_notice_handler, void *private) 1749 { 1750 _NOTE(NO_COMPETING_THREADS_NOW) 1751 ibt_hdl->clnt_sm_trap_handler = sm_notice_handler; 1752 ibt_hdl->clnt_sm_trap_handler_arg = private; 1753 #ifndef lint 1754 _NOTE(COMPETING_THREADS_NOW) 1755 #endif 1756 } 1757 1758 1759 /* 1760 * ibtl_another_cq_handler_in_thread() 1761 * 1762 * Conditionally increase the number of cq_threads. 1763 * The number of threads grows, based on the number of cqs using threads. 1764 * 1765 * The table below controls the number of threads as follows: 1766 * 1767 * Number of CQs Number of cq_threads 1768 * 0 0 1769 * 1 1 1770 * 2-3 2 1771 * 4-5 3 1772 * 6-9 4 1773 * 10-15 5 1774 * 16-23 6 1775 * 24-31 7 1776 * 32+ 8 1777 */ 1778 1779 #define IBTL_CQ_MAXTHREADS 8 1780 static uint8_t ibtl_cq_scaling[IBTL_CQ_MAXTHREADS] = { 1781 1, 2, 4, 6, 10, 16, 24, 32 1782 }; 1783 1784 static kt_did_t ibtl_cq_did[IBTL_CQ_MAXTHREADS]; 1785 1786 void 1787 ibtl_another_cq_handler_in_thread(void) 1788 { 1789 kthread_t *t; 1790 int my_idx; 1791 1792 mutex_enter(&ibtl_cq_mutex); 1793 if ((ibtl_cq_threads == IBTL_CQ_MAXTHREADS) || 1794 (++ibtl_cqs_using_threads < ibtl_cq_scaling[ibtl_cq_threads])) { 1795 mutex_exit(&ibtl_cq_mutex); 1796 return; 1797 } 1798 my_idx = ibtl_cq_threads++; 1799 mutex_exit(&ibtl_cq_mutex); 1800 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, TS_RUN, 1801 ibtl_pri - 1); 1802 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did)) 1803 ibtl_cq_did[my_idx] = t->t_did; /* save for thread_join() */ 1804 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did)) 1805 } 1806 1807 void 1808 ibtl_thread_init(void) 1809 { 1810 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init()"); 1811 1812 mutex_init(&ibtl_async_mutex, NULL, MUTEX_DEFAULT, NULL); 1813 cv_init(&ibtl_async_cv, NULL, CV_DEFAULT, NULL); 1814 cv_init(&ibtl_clnt_cv, NULL, CV_DEFAULT, NULL); 1815 1816 mutex_init(&ibtl_cq_mutex, NULL, MUTEX_DEFAULT, NULL); 1817 cv_init(&ibtl_cq_cv, NULL, CV_DEFAULT, NULL); 1818 } 1819 1820 void 1821 ibtl_thread_init2(void) 1822 { 1823 int i; 1824 static int initted = 0; 1825 kthread_t *t; 1826 1827 mutex_enter(&ibtl_async_mutex); 1828 if (initted == 1) { 1829 mutex_exit(&ibtl_async_mutex); 1830 return; 1831 } 1832 initted = 1; 1833 mutex_exit(&ibtl_async_mutex); 1834 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_async_did)) 1835 ibtl_async_did = kmem_zalloc(ibtl_async_thread_init * sizeof (kt_did_t), 1836 KM_SLEEP); 1837 1838 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init2()"); 1839 1840 for (i = 0; i < ibtl_async_thread_init; i++) { 1841 t = thread_create(NULL, 0, ibtl_async_thread, NULL, 0, &p0, 1842 TS_RUN, ibtl_pri - 1); 1843 ibtl_async_did[i] = t->t_did; /* thread_join() */ 1844 } 1845 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_async_did)) 1846 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads)) 1847 for (i = 0; i < ibtl_cq_threads; i++) { 1848 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, 1849 TS_RUN, ibtl_pri - 1); 1850 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did)) 1851 ibtl_cq_did[i] = t->t_did; /* save for thread_join() */ 1852 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did)) 1853 } 1854 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads)) 1855 } 1856 1857 void 1858 ibtl_thread_fini(void) 1859 { 1860 int i; 1861 1862 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_fini()"); 1863 1864 /* undo the work done by ibtl_thread_init() */ 1865 1866 mutex_enter(&ibtl_cq_mutex); 1867 ibtl_cq_thread_exit = IBTL_THREAD_EXIT; 1868 cv_broadcast(&ibtl_cq_cv); 1869 mutex_exit(&ibtl_cq_mutex); 1870 1871 mutex_enter(&ibtl_async_mutex); 1872 ibtl_async_thread_exit = IBTL_THREAD_EXIT; 1873 cv_broadcast(&ibtl_async_cv); 1874 mutex_exit(&ibtl_async_mutex); 1875 1876 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads)) 1877 for (i = 0; i < ibtl_cq_threads; i++) 1878 thread_join(ibtl_cq_did[i]); 1879 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads)) 1880 1881 if (ibtl_async_did) { 1882 for (i = 0; i < ibtl_async_thread_init; i++) 1883 thread_join(ibtl_async_did[i]); 1884 1885 kmem_free(ibtl_async_did, 1886 ibtl_async_thread_init * sizeof (kt_did_t)); 1887 } 1888 mutex_destroy(&ibtl_cq_mutex); 1889 cv_destroy(&ibtl_cq_cv); 1890 1891 mutex_destroy(&ibtl_async_mutex); 1892 cv_destroy(&ibtl_async_cv); 1893 cv_destroy(&ibtl_clnt_cv); 1894 } 1895