1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/ib/ibtl/impl/ibtl.h> 30 #include <sys/ib/ibtl/impl/ibtl_cm.h> 31 #include <sys/taskq.h> 32 #include <sys/disp.h> 33 #include <sys/callb.h> 34 #include <sys/proc.h> 35 36 /* 37 * ibtl_handlers.c 38 */ 39 40 /* 41 * What's in this file? 42 * 43 * This file started as an implementation of Asynchronous Event/Error 44 * handling and Completion Queue handling. As the implementation 45 * evolved, code has been added for other ibc_* interfaces (resume, 46 * predetach, etc.) that use the same mechanisms as used for asyncs. 47 * 48 * Async and CQ handling at interrupt level. 49 * 50 * CQ handling is normally done at interrupt level using the CQ callback 51 * handler to call the appropriate IBT Client (owner of the CQ). For 52 * clients that would prefer a fully flexible non-interrupt context to 53 * do their CQ handling, a CQ can be created so that its handler is 54 * called from a non-interrupt thread. CQ handling is done frequently 55 * whereas Async handling is expected to occur very infrequently. 56 * 57 * Async handling is done by marking (or'ing in of an async_code of) the 58 * pertinent IBTL data structure, and then notifying the async_thread(s) 59 * that the data structure has async work to be done. The notification 60 * occurs by linking the data structure through its async_link onto a 61 * list of like data structures and waking up an async_thread. This 62 * list append is not done if there is already async work pending on 63 * this data structure (IBTL_ASYNC_PENDING). 64 * 65 * Async Mutex and CQ Mutex 66 * 67 * The global ibtl_async_mutex is "the" mutex used to control access 68 * to all the data needed by ibc_async_handler. All the threads that 69 * use this mutex are written so that the mutex is held for very short 70 * periods of time, and never held while making calls to functions 71 * that may block. 72 * 73 * The global ibtl_cq_mutex is used similarly by ibc_cq_handler and 74 * the ibtl_cq_thread(s). 75 * 76 * Mutex hierarchy 77 * 78 * The ibtl_clnt_list_mutex is above the ibtl_async_mutex. 79 * ibtl_clnt_list_mutex protects all of the various lists. 80 * The ibtl_async_mutex is below this in the hierarchy. 81 * 82 * The ibtl_cq_mutex is independent of the above mutexes. 83 * 84 * Threads 85 * 86 * There are "ibtl_cq_threads" number of threads created for handling 87 * Completion Queues in threads. If this feature really gets used, 88 * then we will want to do some suitable tuning. Similarly, we may 89 * want to tune the number of "ibtl_async_thread_init". 90 * 91 * The function ibtl_cq_thread is the main loop for handling a CQ in a 92 * thread. There can be multiple threads executing this same code. 93 * The code sleeps when there is no work to be done (list is empty), 94 * otherwise it pulls the first CQ structure off the list and performs 95 * the CQ handler callback to the client. After that returns, a check 96 * is made, and if another ibc_cq_handler call was made for this CQ, 97 * the client is called again. 98 * 99 * The function ibtl_async_thread is the main loop for handling async 100 * events/errors. There can be multiple threads executing this same code. 101 * The code sleeps when there is no work to be done (lists are empty), 102 * otherwise it pulls the first structure off one of the lists and 103 * performs the async callback(s) to the client(s). Note that HCA 104 * async handling is done by calling each of the clients using the HCA. 105 * When the async handling completes, the data structure having the async 106 * event/error is checked for more work before it's considered "done". 107 * 108 * Taskq 109 * 110 * The async_taskq is used here for allowing async handler callbacks to 111 * occur simultaneously to multiple clients of an HCA. This taskq could 112 * be used for other purposes, e.g., if all the async_threads are in 113 * use, but this is deemed as overkill since asyncs should occur rarely. 114 */ 115 116 /* Globals */ 117 static char ibtf_handlers[] = "ibtl_handlers"; 118 119 /* priority for IBTL threads (async, cq, and taskq) */ 120 static pri_t ibtl_pri = MAXCLSYSPRI - 1; /* maybe override in /etc/system */ 121 122 /* taskq used for HCA asyncs */ 123 #define ibtl_async_taskq system_taskq 124 125 /* data for async handling by threads */ 126 static kmutex_t ibtl_async_mutex; /* protects most *_async_* data */ 127 static kcondvar_t ibtl_async_cv; /* async_threads wait on this */ 128 static kcondvar_t ibtl_clnt_cv; /* ibt_detach might wait on this */ 129 static void ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp); 130 static void ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp); 131 132 static kt_did_t *ibtl_async_did; /* for thread_join() */ 133 static int ibtl_async_thread_init = 4; /* total # of async_threads to create */ 134 static int ibtl_async_thread_exit = 0; /* set if/when thread(s) should exit */ 135 136 /* async lists for various structures */ 137 static ibtl_hca_devinfo_t *ibtl_async_hca_list_start, *ibtl_async_hca_list_end; 138 static ibtl_eec_t *ibtl_async_eec_list_start, *ibtl_async_eec_list_end; 139 static ibtl_qp_t *ibtl_async_qp_list_start, *ibtl_async_qp_list_end; 140 static ibtl_cq_t *ibtl_async_cq_list_start, *ibtl_async_cq_list_end; 141 static ibtl_srq_t *ibtl_async_srq_list_start, *ibtl_async_srq_list_end; 142 143 /* data for CQ completion handling by threads */ 144 static kmutex_t ibtl_cq_mutex; /* protects the cv and the list below */ 145 static kcondvar_t ibtl_cq_cv; 146 static ibtl_cq_t *ibtl_cq_list_start, *ibtl_cq_list_end; 147 148 static int ibtl_cq_threads = 0; /* total # of cq threads */ 149 static int ibtl_cqs_using_threads = 0; /* total # of cqs using threads */ 150 static int ibtl_cq_thread_exit = 0; /* set if/when thread(s) should exit */ 151 152 /* value used to tell IBTL threads to exit */ 153 #define IBTL_THREAD_EXIT 0x1b7fdead /* IBTF DEAD */ 154 155 int ibtl_eec_not_supported = 1; 156 157 char *ibtl_last_client_name; /* may help debugging */ 158 159 _NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex)) 160 161 /* 162 * ibc_async_handler() 163 * 164 * Asynchronous Event/Error Handler. 165 * 166 * This is the function called HCA drivers to post various async 167 * event and errors mention in the IB architecture spec. See 168 * ibtl_types.h for additional details of this. 169 * 170 * This function marks the pertinent IBTF object with the async_code, 171 * and queues the object for handling by an ibtl_async_thread. If 172 * the object is NOT already marked for async processing, it is added 173 * to the associated list for that type of object, and an 174 * ibtl_async_thread is signaled to finish the async work. 175 */ 176 void 177 ibc_async_handler(ibc_clnt_hdl_t hca_devp, ibt_async_code_t code, 178 ibc_async_event_t *event_p) 179 { 180 ibtl_qp_t *ibtl_qp; 181 ibtl_cq_t *ibtl_cq; 182 ibtl_srq_t *ibtl_srq; 183 ibtl_eec_t *ibtl_eec; 184 uint8_t port_minus1; 185 186 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler(%p, 0x%x, %p)", 187 hca_devp, code, event_p); 188 189 mutex_enter(&ibtl_async_mutex); 190 191 switch (code) { 192 case IBT_EVENT_PATH_MIGRATED_QP: 193 case IBT_EVENT_SQD: 194 case IBT_ERROR_CATASTROPHIC_QP: 195 case IBT_ERROR_PATH_MIGRATE_REQ_QP: 196 case IBT_EVENT_COM_EST_QP: 197 case IBT_ERROR_INVALID_REQUEST_QP: 198 case IBT_ERROR_ACCESS_VIOLATION_QP: 199 case IBT_EVENT_EMPTY_QP: 200 ibtl_qp = event_p->ev_qp_hdl; 201 if (ibtl_qp == NULL) { 202 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 203 "bad qp handle"); 204 break; 205 } 206 switch (code) { 207 case IBT_ERROR_CATASTROPHIC_QP: 208 ibtl_qp->qp_cat_fma_ena = event_p->ev_fma_ena; break; 209 case IBT_ERROR_PATH_MIGRATE_REQ_QP: 210 ibtl_qp->qp_pth_fma_ena = event_p->ev_fma_ena; break; 211 case IBT_ERROR_INVALID_REQUEST_QP: 212 ibtl_qp->qp_inv_fma_ena = event_p->ev_fma_ena; break; 213 case IBT_ERROR_ACCESS_VIOLATION_QP: 214 ibtl_qp->qp_acc_fma_ena = event_p->ev_fma_ena; break; 215 } 216 217 ibtl_qp->qp_async_codes |= code; 218 if ((ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) == 0) { 219 ibtl_qp->qp_async_flags |= IBTL_ASYNC_PENDING; 220 ibtl_qp->qp_async_link = NULL; 221 if (ibtl_async_qp_list_end == NULL) 222 ibtl_async_qp_list_start = ibtl_qp; 223 else 224 ibtl_async_qp_list_end->qp_async_link = ibtl_qp; 225 ibtl_async_qp_list_end = ibtl_qp; 226 cv_signal(&ibtl_async_cv); 227 } 228 break; 229 230 case IBT_ERROR_CQ: 231 ibtl_cq = event_p->ev_cq_hdl; 232 if (ibtl_cq == NULL) { 233 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 234 "bad cq handle"); 235 break; 236 } 237 ibtl_cq->cq_async_codes |= code; 238 ibtl_cq->cq_fma_ena = event_p->ev_fma_ena; 239 if ((ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) == 0) { 240 ibtl_cq->cq_async_flags |= IBTL_ASYNC_PENDING; 241 ibtl_cq->cq_async_link = NULL; 242 if (ibtl_async_cq_list_end == NULL) 243 ibtl_async_cq_list_start = ibtl_cq; 244 else 245 ibtl_async_cq_list_end->cq_async_link = ibtl_cq; 246 ibtl_async_cq_list_end = ibtl_cq; 247 cv_signal(&ibtl_async_cv); 248 } 249 break; 250 251 case IBT_ERROR_CATASTROPHIC_SRQ: 252 case IBT_EVENT_LIMIT_REACHED_SRQ: 253 ibtl_srq = event_p->ev_srq_hdl; 254 if (ibtl_srq == NULL) { 255 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 256 "bad srq handle"); 257 break; 258 } 259 ibtl_srq->srq_async_codes |= code; 260 ibtl_srq->srq_fma_ena = event_p->ev_fma_ena; 261 if ((ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) == 0) { 262 ibtl_srq->srq_async_flags |= IBTL_ASYNC_PENDING; 263 ibtl_srq->srq_async_link = NULL; 264 if (ibtl_async_srq_list_end == NULL) 265 ibtl_async_srq_list_start = ibtl_srq; 266 else 267 ibtl_async_srq_list_end->srq_async_link = 268 ibtl_srq; 269 ibtl_async_srq_list_end = ibtl_srq; 270 cv_signal(&ibtl_async_cv); 271 } 272 break; 273 274 case IBT_EVENT_PATH_MIGRATED_EEC: 275 case IBT_ERROR_PATH_MIGRATE_REQ_EEC: 276 case IBT_ERROR_CATASTROPHIC_EEC: 277 case IBT_EVENT_COM_EST_EEC: 278 if (ibtl_eec_not_supported) { 279 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 280 "EEC events are disabled."); 281 break; 282 } 283 ibtl_eec = event_p->ev_eec_hdl; 284 if (ibtl_eec == NULL) { 285 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: " 286 "bad eec handle"); 287 break; 288 } 289 switch (code) { 290 case IBT_ERROR_PATH_MIGRATE_REQ_EEC: 291 ibtl_eec->eec_pth_fma_ena = event_p->ev_fma_ena; break; 292 case IBT_ERROR_CATASTROPHIC_EEC: 293 ibtl_eec->eec_cat_fma_ena = event_p->ev_fma_ena; break; 294 } 295 ibtl_eec->eec_async_codes |= code; 296 if ((ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) == 0) { 297 ibtl_eec->eec_async_flags |= IBTL_ASYNC_PENDING; 298 ibtl_eec->eec_async_link = NULL; 299 if (ibtl_async_eec_list_end == NULL) 300 ibtl_async_eec_list_start = ibtl_eec; 301 else 302 ibtl_async_eec_list_end->eec_async_link = 303 ibtl_eec; 304 ibtl_async_eec_list_end = ibtl_eec; 305 cv_signal(&ibtl_async_cv); 306 } 307 break; 308 309 case IBT_ERROR_LOCAL_CATASTROPHIC: 310 hca_devp->hd_async_codes |= code; 311 hca_devp->hd_fma_ena = event_p->ev_fma_ena; 312 /* FALLTHROUGH */ 313 314 case IBT_EVENT_PORT_UP: 315 case IBT_ERROR_PORT_DOWN: 316 if ((code == IBT_EVENT_PORT_UP) || 317 (code == IBT_ERROR_PORT_DOWN)) { 318 if ((port_minus1 = event_p->ev_port - 1) >= 319 hca_devp->hd_hca_attr->hca_nports) { 320 IBTF_DPRINTF_L2(ibtf_handlers, 321 "ibc_async_handler: bad port #: %d", 322 event_p->ev_port); 323 break; 324 } 325 hca_devp->hd_async_port[port_minus1] = 326 ((code == IBT_EVENT_PORT_UP) ? IBTL_HCA_PORT_UP : 327 IBTL_HCA_PORT_DOWN) | IBTL_HCA_PORT_CHANGED; 328 hca_devp->hd_async_codes |= code; 329 } 330 331 if ((hca_devp->hd_async_flags & IBTL_ASYNC_PENDING) == 0) { 332 hca_devp->hd_async_flags |= IBTL_ASYNC_PENDING; 333 hca_devp->hd_async_link = NULL; 334 if (ibtl_async_hca_list_end == NULL) 335 ibtl_async_hca_list_start = hca_devp; 336 else 337 ibtl_async_hca_list_end->hd_async_link = 338 hca_devp; 339 ibtl_async_hca_list_end = hca_devp; 340 cv_signal(&ibtl_async_cv); 341 } 342 343 break; 344 345 default: 346 IBTF_DPRINTF_L1(ibtf_handlers, "ibc_async_handler: " 347 "invalid code (0x%x)", code); 348 } 349 350 mutex_exit(&ibtl_async_mutex); 351 } 352 353 354 /* Finally, make the async call to the client. */ 355 356 static void 357 ibtl_async_client_call(ibtl_hca_t *ibt_hca, ibt_async_code_t code, 358 ibt_async_event_t *event_p) 359 { 360 ibtl_clnt_t *clntp; 361 void *client_private; 362 ibt_async_handler_t async_handler; 363 char *client_name; 364 365 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call(%p, 0x%x, %p)", 366 ibt_hca, code, event_p); 367 368 clntp = ibt_hca->ha_clnt_devp; 369 370 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_last_client_name)) 371 /* Record who is being called (just a debugging aid) */ 372 ibtl_last_client_name = client_name = clntp->clnt_name; 373 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_last_client_name)) 374 375 client_private = clntp->clnt_private; 376 async_handler = clntp->clnt_modinfop->mi_async_handler; 377 378 if (code & (IBT_EVENT_COM_EST_QP | IBT_EVENT_COM_EST_EEC)) { 379 mutex_enter(&ibtl_clnt_list_mutex); 380 async_handler = ibtl_cm_async_handler; 381 client_private = ibtl_cm_clnt_private; 382 mutex_exit(&ibtl_clnt_list_mutex); 383 ibt_hca = NULL; 384 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: " 385 "calling CM for COM_EST"); 386 } else { 387 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: " 388 "calling client '%s'", client_name); 389 } 390 if (async_handler != NULL) 391 async_handler(client_private, ibt_hca, code, event_p); 392 else 393 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: " 394 "client '%s' has no async handler", client_name); 395 } 396 397 /* 398 * Inform CM or DM about HCA events. 399 * 400 * We use taskqs to allow simultaneous notification, with sleeping. 401 * Since taskqs only allow one argument, we define a structure 402 * because we need to pass in more than one argument. 403 */ 404 405 struct ibtl_mgr_s { 406 ibtl_hca_devinfo_t *mgr_hca_devp; 407 ibt_async_handler_t mgr_async_handler; 408 void *mgr_clnt_private; 409 }; 410 411 /* 412 * Asyncs of HCA level events for CM and DM. Call CM or DM and tell them 413 * about the HCA for the event recorded in the ibtl_hca_devinfo_t. 414 */ 415 static void 416 ibtl_do_mgr_async_task(void *arg) 417 { 418 struct ibtl_mgr_s *mgrp = (struct ibtl_mgr_s *)arg; 419 ibtl_hca_devinfo_t *hca_devp = mgrp->mgr_hca_devp; 420 421 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_do_mgr_async_task(0x%x)", 422 hca_devp->hd_async_code); 423 424 mgrp->mgr_async_handler(mgrp->mgr_clnt_private, NULL, 425 hca_devp->hd_async_code, &hca_devp->hd_async_event); 426 kmem_free(mgrp, sizeof (*mgrp)); 427 428 mutex_enter(&ibtl_clnt_list_mutex); 429 if (--hca_devp->hd_async_task_cnt == 0) 430 cv_signal(&hca_devp->hd_async_task_cv); 431 mutex_exit(&ibtl_clnt_list_mutex); 432 } 433 434 static void 435 ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler, 436 void *clnt_private) 437 { 438 struct ibtl_mgr_s *mgrp; 439 440 if (async_handler == NULL) 441 return; 442 443 _NOTE(NO_COMPETING_THREADS_NOW) 444 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP); 445 mgrp->mgr_hca_devp = hca_devp; 446 mgrp->mgr_async_handler = async_handler; 447 mgrp->mgr_clnt_private = clnt_private; 448 hca_devp->hd_async_task_cnt++; 449 450 (void) taskq_dispatch(ibtl_async_taskq, ibtl_do_mgr_async_task, mgrp, 451 TQ_SLEEP); 452 _NOTE(COMPETING_THREADS_NOW) 453 } 454 455 /* 456 * Per client-device asyncs for HCA level events. Call each client that is 457 * using the HCA for the event recorded in the ibtl_hca_devinfo_t. 458 */ 459 static void 460 ibtl_hca_client_async_task(void *arg) 461 { 462 ibtl_hca_t *ibt_hca = (ibtl_hca_t *)arg; 463 ibtl_hca_devinfo_t *hca_devp = ibt_hca->ha_hca_devp; 464 ibtl_clnt_t *clntp = ibt_hca->ha_clnt_devp; 465 ibt_async_event_t async_event; 466 467 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_hca_client_async_task(%p, 0x%x)", 468 ibt_hca, hca_devp->hd_async_code); 469 470 bcopy(&hca_devp->hd_async_event, &async_event, sizeof (async_event)); 471 ibtl_async_client_call(ibt_hca, hca_devp->hd_async_code, &async_event); 472 473 mutex_enter(&ibtl_async_mutex); 474 if (--ibt_hca->ha_async_cnt == 0 && 475 (ibt_hca->ha_async_flags & IBTL_ASYNC_FREE_OBJECT)) { 476 mutex_exit(&ibtl_async_mutex); 477 kmem_free(ibt_hca, sizeof (ibtl_hca_t)); 478 } else 479 mutex_exit(&ibtl_async_mutex); 480 481 mutex_enter(&ibtl_clnt_list_mutex); 482 if (--hca_devp->hd_async_task_cnt == 0) 483 cv_signal(&hca_devp->hd_async_task_cv); 484 if (--clntp->clnt_async_cnt == 0) 485 cv_broadcast(&ibtl_clnt_cv); 486 487 mutex_exit(&ibtl_clnt_list_mutex); 488 } 489 490 /* 491 * Asyncs for HCA level events. 492 * 493 * The function continues to run until there are no more async 494 * events/errors for this HCA. An event is chosen for dispatch 495 * to all clients of this HCA. This thread dispatches them via 496 * the ibtl_async_taskq, then sleeps until all tasks are done. 497 * 498 * This thread records the async_code and async_event in the 499 * ibtl_hca_devinfo_t for all client taskq threads to reference. 500 * 501 * This is called from an async or taskq thread with ibtl_async_mutex held. 502 */ 503 static void 504 ibtl_do_hca_asyncs(ibtl_hca_devinfo_t *hca_devp) 505 { 506 ibtl_hca_t *ibt_hca; 507 ibt_async_code_t code; 508 ibtl_async_port_status_t temp; 509 uint8_t nports; 510 uint8_t port_minus1; 511 ibtl_async_port_status_t *portp; 512 513 mutex_exit(&ibtl_async_mutex); 514 515 mutex_enter(&ibtl_clnt_list_mutex); 516 while (hca_devp->hd_async_busy) 517 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex); 518 hca_devp->hd_async_busy = 1; 519 mutex_enter(&ibtl_async_mutex); 520 521 bzero(&hca_devp->hd_async_event, sizeof (hca_devp->hd_async_event)); 522 for (;;) { 523 524 hca_devp->hd_async_event.ev_fma_ena = 0; 525 526 code = hca_devp->hd_async_codes; 527 if (code & IBT_ERROR_LOCAL_CATASTROPHIC) { 528 code = IBT_ERROR_LOCAL_CATASTROPHIC; 529 hca_devp->hd_async_event.ev_fma_ena = 530 hca_devp->hd_fma_ena; 531 } else if (code & IBT_ERROR_PORT_DOWN) 532 code = IBT_ERROR_PORT_DOWN; 533 else if (code & IBT_EVENT_PORT_UP) 534 code = IBT_EVENT_PORT_UP; 535 else { 536 hca_devp->hd_async_codes = 0; 537 code = 0; 538 } 539 540 if (code == 0) { 541 hca_devp->hd_async_flags &= ~IBTL_ASYNC_PENDING; 542 break; 543 } 544 hca_devp->hd_async_codes &= ~code; 545 546 if ((code == IBT_EVENT_PORT_UP) || 547 (code == IBT_ERROR_PORT_DOWN)) { 548 /* PORT_UP or PORT_DOWN */ 549 portp = hca_devp->hd_async_port; 550 nports = hca_devp->hd_hca_attr->hca_nports; 551 for (port_minus1 = 0; port_minus1 < nports; 552 port_minus1++) { 553 temp = ((code == IBT_EVENT_PORT_UP) ? 554 IBTL_HCA_PORT_UP : IBTL_HCA_PORT_DOWN) | 555 IBTL_HCA_PORT_CHANGED; 556 if (portp[port_minus1] == temp) 557 break; 558 } 559 if (port_minus1 >= nports) { 560 /* we checked again, but found nothing */ 561 continue; 562 } 563 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_do_hca_asyncs: " 564 "async: port# %x code %x", port_minus1 + 1, code); 565 /* mark it to check for other ports after we're done */ 566 hca_devp->hd_async_codes |= code; 567 568 hca_devp->hd_async_event.ev_port = port_minus1 + 1; 569 hca_devp->hd_async_port[port_minus1] &= 570 ~IBTL_HCA_PORT_CHANGED; 571 572 mutex_exit(&ibtl_async_mutex); 573 ibtl_reinit_hca_portinfo(hca_devp, port_minus1 + 1); 574 mutex_enter(&ibtl_async_mutex); 575 } 576 577 hca_devp->hd_async_code = code; 578 hca_devp->hd_async_event.ev_hca_guid = 579 hca_devp->hd_hca_attr->hca_node_guid; 580 mutex_exit(&ibtl_async_mutex); 581 582 /* 583 * Make sure to inform CM, DM, and IBMA if we know of them. 584 * Also, make sure not to inform them a second time, which 585 * would occur if they have the HCA open. 586 */ 587 588 if (ibtl_ibma_async_handler) 589 ibtl_tell_mgr(hca_devp, ibtl_ibma_async_handler, 590 ibtl_ibma_clnt_private); 591 /* wait for all tasks to complete */ 592 while (hca_devp->hd_async_task_cnt != 0) 593 cv_wait(&hca_devp->hd_async_task_cv, 594 &ibtl_clnt_list_mutex); 595 596 if (ibtl_dm_async_handler) 597 ibtl_tell_mgr(hca_devp, ibtl_dm_async_handler, 598 ibtl_dm_clnt_private); 599 if (ibtl_cm_async_handler) 600 ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler, 601 ibtl_cm_clnt_private); 602 /* wait for all tasks to complete */ 603 while (hca_devp->hd_async_task_cnt != 0) 604 cv_wait(&hca_devp->hd_async_task_cv, 605 &ibtl_clnt_list_mutex); 606 607 for (ibt_hca = hca_devp->hd_clnt_list; 608 ibt_hca != NULL; 609 ibt_hca = ibt_hca->ha_clnt_link) { 610 611 /* Managers are handled below */ 612 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler == 613 ibtl_cm_async_handler) 614 continue; 615 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler == 616 ibtl_dm_async_handler) 617 continue; 618 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler == 619 ibtl_ibma_async_handler) 620 continue; 621 ++ibt_hca->ha_clnt_devp->clnt_async_cnt; 622 623 mutex_enter(&ibtl_async_mutex); 624 ibt_hca->ha_async_cnt++; 625 mutex_exit(&ibtl_async_mutex); 626 hca_devp->hd_async_task_cnt++; 627 (void) taskq_dispatch(ibtl_async_taskq, 628 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP); 629 } 630 631 /* wait for all tasks to complete */ 632 while (hca_devp->hd_async_task_cnt != 0) 633 cv_wait(&hca_devp->hd_async_task_cv, 634 &ibtl_clnt_list_mutex); 635 636 mutex_enter(&ibtl_async_mutex); 637 } 638 hca_devp->hd_async_code = 0; 639 hca_devp->hd_async_busy = 0; 640 cv_broadcast(&hca_devp->hd_async_busy_cv); 641 mutex_exit(&ibtl_clnt_list_mutex); 642 } 643 644 /* 645 * Asyncs for QP objects. 646 * 647 * The function continues to run until there are no more async 648 * events/errors for this object. 649 */ 650 static void 651 ibtl_do_qp_asyncs(ibtl_qp_t *ibtl_qp) 652 { 653 ibt_async_code_t code; 654 ibt_async_event_t async_event; 655 656 ASSERT(MUTEX_HELD(&ibtl_async_mutex)); 657 bzero(&async_event, sizeof (async_event)); 658 async_event.ev_chan_hdl = IBTL_QP2CHAN(ibtl_qp); 659 660 while ((code = ibtl_qp->qp_async_codes) != 0) { 661 async_event.ev_fma_ena = 0; 662 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT) 663 code = 0; /* fallthrough to "kmem_free" */ 664 else if (code & IBT_ERROR_CATASTROPHIC_QP) { 665 code = IBT_ERROR_CATASTROPHIC_QP; 666 async_event.ev_fma_ena = ibtl_qp->qp_cat_fma_ena; 667 } else if (code & IBT_ERROR_INVALID_REQUEST_QP) { 668 code = IBT_ERROR_INVALID_REQUEST_QP; 669 async_event.ev_fma_ena = ibtl_qp->qp_inv_fma_ena; 670 } else if (code & IBT_ERROR_ACCESS_VIOLATION_QP) { 671 code = IBT_ERROR_ACCESS_VIOLATION_QP; 672 async_event.ev_fma_ena = ibtl_qp->qp_acc_fma_ena; 673 } else if (code & IBT_ERROR_PATH_MIGRATE_REQ_QP) { 674 code = IBT_ERROR_PATH_MIGRATE_REQ_QP; 675 async_event.ev_fma_ena = ibtl_qp->qp_pth_fma_ena; 676 } else if (code & IBT_EVENT_PATH_MIGRATED_QP) 677 code = IBT_EVENT_PATH_MIGRATED_QP; 678 else if (code & IBT_EVENT_SQD) 679 code = IBT_EVENT_SQD; 680 else if (code & IBT_EVENT_COM_EST_QP) 681 code = IBT_EVENT_COM_EST_QP; 682 else if (code & IBT_EVENT_EMPTY_QP) 683 code = IBT_EVENT_EMPTY_QP; 684 else { 685 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_qp_asyncs: " 686 "async: unexpected QP async code 0x%x", code); 687 ibtl_qp->qp_async_codes = 0; 688 code = 0; 689 } 690 ibtl_qp->qp_async_codes &= ~code; 691 692 if (code) { 693 mutex_exit(&ibtl_async_mutex); 694 ibtl_async_client_call(ibtl_qp->qp_hca, 695 code, &async_event); 696 mutex_enter(&ibtl_async_mutex); 697 } 698 699 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT) { 700 mutex_exit(&ibtl_async_mutex); 701 cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv); 702 mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex); 703 kmem_free(IBTL_QP2CHAN(ibtl_qp), 704 sizeof (ibtl_channel_t)); 705 mutex_enter(&ibtl_async_mutex); 706 return; 707 } 708 } 709 ibtl_qp->qp_async_flags &= ~IBTL_ASYNC_PENDING; 710 } 711 712 /* 713 * Asyncs for SRQ objects. 714 * 715 * The function continues to run until there are no more async 716 * events/errors for this object. 717 */ 718 static void 719 ibtl_do_srq_asyncs(ibtl_srq_t *ibtl_srq) 720 { 721 ibt_async_code_t code; 722 ibt_async_event_t async_event; 723 724 ASSERT(MUTEX_HELD(&ibtl_async_mutex)); 725 bzero(&async_event, sizeof (async_event)); 726 async_event.ev_srq_hdl = ibtl_srq; 727 async_event.ev_fma_ena = ibtl_srq->srq_fma_ena; 728 729 while ((code = ibtl_srq->srq_async_codes) != 0) { 730 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT) 731 code = 0; /* fallthrough to "kmem_free" */ 732 else if (code & IBT_ERROR_CATASTROPHIC_SRQ) 733 code = IBT_ERROR_CATASTROPHIC_SRQ; 734 else if (code & IBT_EVENT_LIMIT_REACHED_SRQ) 735 code = IBT_EVENT_LIMIT_REACHED_SRQ; 736 else { 737 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_srq_asyncs: " 738 "async: unexpected SRQ async code 0x%x", code); 739 ibtl_srq->srq_async_codes = 0; 740 code = 0; 741 } 742 ibtl_srq->srq_async_codes &= ~code; 743 744 if (code) { 745 mutex_exit(&ibtl_async_mutex); 746 ibtl_async_client_call(ibtl_srq->srq_hca, 747 code, &async_event); 748 mutex_enter(&ibtl_async_mutex); 749 } 750 751 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT) { 752 mutex_exit(&ibtl_async_mutex); 753 kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s)); 754 mutex_enter(&ibtl_async_mutex); 755 return; 756 } 757 } 758 ibtl_srq->srq_async_flags &= ~IBTL_ASYNC_PENDING; 759 } 760 761 /* 762 * Asyncs for CQ objects. 763 * 764 * The function continues to run until there are no more async 765 * events/errors for this object. 766 */ 767 static void 768 ibtl_do_cq_asyncs(ibtl_cq_t *ibtl_cq) 769 { 770 ibt_async_code_t code; 771 ibt_async_event_t async_event; 772 773 ASSERT(MUTEX_HELD(&ibtl_async_mutex)); 774 bzero(&async_event, sizeof (async_event)); 775 async_event.ev_cq_hdl = ibtl_cq; 776 async_event.ev_fma_ena = ibtl_cq->cq_fma_ena; 777 778 while ((code = ibtl_cq->cq_async_codes) != 0) { 779 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT) 780 code = 0; /* fallthrough to "kmem_free" */ 781 else if (code & IBT_ERROR_CQ) 782 code = IBT_ERROR_CQ; 783 else { 784 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_cq_asyncs: " 785 "async: unexpected CQ async code 0x%x", code); 786 ibtl_cq->cq_async_codes = 0; 787 code = 0; 788 } 789 ibtl_cq->cq_async_codes &= ~code; 790 791 if (code) { 792 mutex_exit(&ibtl_async_mutex); 793 ibtl_async_client_call(ibtl_cq->cq_hca, 794 code, &async_event); 795 mutex_enter(&ibtl_async_mutex); 796 } 797 798 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT) { 799 mutex_exit(&ibtl_async_mutex); 800 mutex_destroy(&ibtl_cq->cq_mutex); 801 kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s)); 802 mutex_enter(&ibtl_async_mutex); 803 return; 804 } 805 } 806 ibtl_cq->cq_async_flags &= ~IBTL_ASYNC_PENDING; 807 } 808 809 /* 810 * Asyncs for EEC objects. 811 * 812 * The function continues to run until there are no more async 813 * events/errors for this object. 814 */ 815 static void 816 ibtl_do_eec_asyncs(ibtl_eec_t *ibtl_eec) 817 { 818 ibt_async_code_t code; 819 ibt_async_event_t async_event; 820 821 ASSERT(MUTEX_HELD(&ibtl_async_mutex)); 822 bzero(&async_event, sizeof (async_event)); 823 async_event.ev_chan_hdl = ibtl_eec->eec_channel; 824 825 while ((code = ibtl_eec->eec_async_codes) != 0) { 826 async_event.ev_fma_ena = 0; 827 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) 828 code = 0; /* fallthrough to "kmem_free" */ 829 else if (code & IBT_ERROR_CATASTROPHIC_EEC) { 830 code = IBT_ERROR_CATASTROPHIC_CHAN; 831 async_event.ev_fma_ena = ibtl_eec->eec_cat_fma_ena; 832 } else if (code & IBT_ERROR_PATH_MIGRATE_REQ_EEC) { 833 code = IBT_ERROR_PATH_MIGRATE_REQ; 834 async_event.ev_fma_ena = ibtl_eec->eec_pth_fma_ena; 835 } else if (code & IBT_EVENT_PATH_MIGRATED_EEC) 836 code = IBT_EVENT_PATH_MIGRATED; 837 else if (code & IBT_EVENT_COM_EST_EEC) 838 code = IBT_EVENT_COM_EST; 839 else { 840 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_eec_asyncs: " 841 "async: unexpected code 0x%x", code); 842 ibtl_eec->eec_async_codes = 0; 843 code = 0; 844 } 845 ibtl_eec->eec_async_codes &= ~code; 846 847 if (code) { 848 mutex_exit(&ibtl_async_mutex); 849 ibtl_async_client_call(ibtl_eec->eec_hca, 850 code, &async_event); 851 mutex_enter(&ibtl_async_mutex); 852 } 853 854 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) { 855 mutex_exit(&ibtl_async_mutex); 856 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s)); 857 mutex_enter(&ibtl_async_mutex); 858 return; 859 } 860 } 861 ibtl_eec->eec_async_flags &= ~IBTL_ASYNC_PENDING; 862 } 863 864 #ifdef __lock_lint 865 kmutex_t cpr_mutex; 866 #endif 867 868 /* 869 * Loop forever, calling async_handlers until all of the async lists 870 * are empty. 871 */ 872 873 static void 874 ibtl_async_thread(void) 875 { 876 #ifndef __lock_lint 877 kmutex_t cpr_mutex; 878 #endif 879 callb_cpr_t cprinfo; 880 881 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo)) 882 _NOTE(NO_COMPETING_THREADS_NOW) 883 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL); 884 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr, 885 "ibtl_async_thread"); 886 _NOTE(COMPETING_THREADS_NOW) 887 888 mutex_enter(&ibtl_async_mutex); 889 890 for (;;) { 891 if (ibtl_async_hca_list_start) { 892 ibtl_hca_devinfo_t *hca_devp; 893 894 /* remove first entry from list */ 895 hca_devp = ibtl_async_hca_list_start; 896 ibtl_async_hca_list_start = hca_devp->hd_async_link; 897 hca_devp->hd_async_link = NULL; 898 if (ibtl_async_hca_list_start == NULL) 899 ibtl_async_hca_list_end = NULL; 900 901 ibtl_do_hca_asyncs(hca_devp); 902 903 } else if (ibtl_async_qp_list_start) { 904 ibtl_qp_t *ibtl_qp; 905 906 /* remove from list */ 907 ibtl_qp = ibtl_async_qp_list_start; 908 ibtl_async_qp_list_start = ibtl_qp->qp_async_link; 909 ibtl_qp->qp_async_link = NULL; 910 if (ibtl_async_qp_list_start == NULL) 911 ibtl_async_qp_list_end = NULL; 912 913 ibtl_do_qp_asyncs(ibtl_qp); 914 915 } else if (ibtl_async_srq_list_start) { 916 ibtl_srq_t *ibtl_srq; 917 918 /* remove from list */ 919 ibtl_srq = ibtl_async_srq_list_start; 920 ibtl_async_srq_list_start = ibtl_srq->srq_async_link; 921 ibtl_srq->srq_async_link = NULL; 922 if (ibtl_async_srq_list_start == NULL) 923 ibtl_async_srq_list_end = NULL; 924 925 ibtl_do_srq_asyncs(ibtl_srq); 926 927 } else if (ibtl_async_eec_list_start) { 928 ibtl_eec_t *ibtl_eec; 929 930 /* remove from list */ 931 ibtl_eec = ibtl_async_eec_list_start; 932 ibtl_async_eec_list_start = ibtl_eec->eec_async_link; 933 ibtl_eec->eec_async_link = NULL; 934 if (ibtl_async_eec_list_start == NULL) 935 ibtl_async_eec_list_end = NULL; 936 937 ibtl_do_eec_asyncs(ibtl_eec); 938 939 } else if (ibtl_async_cq_list_start) { 940 ibtl_cq_t *ibtl_cq; 941 942 /* remove from list */ 943 ibtl_cq = ibtl_async_cq_list_start; 944 ibtl_async_cq_list_start = ibtl_cq->cq_async_link; 945 ibtl_cq->cq_async_link = NULL; 946 if (ibtl_async_cq_list_start == NULL) 947 ibtl_async_cq_list_end = NULL; 948 949 ibtl_do_cq_asyncs(ibtl_cq); 950 951 } else { 952 if (ibtl_async_thread_exit == IBTL_THREAD_EXIT) 953 break; 954 mutex_enter(&cpr_mutex); 955 CALLB_CPR_SAFE_BEGIN(&cprinfo); 956 mutex_exit(&cpr_mutex); 957 958 cv_wait(&ibtl_async_cv, &ibtl_async_mutex); 959 960 mutex_exit(&ibtl_async_mutex); 961 mutex_enter(&cpr_mutex); 962 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex); 963 mutex_exit(&cpr_mutex); 964 mutex_enter(&ibtl_async_mutex); 965 } 966 } 967 968 mutex_exit(&ibtl_async_mutex); 969 970 #ifndef __lock_lint 971 mutex_enter(&cpr_mutex); 972 CALLB_CPR_EXIT(&cprinfo); 973 #endif 974 mutex_destroy(&cpr_mutex); 975 } 976 977 978 void 979 ibtl_free_qp_async_check(ibtl_qp_t *ibtl_qp) 980 { 981 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_qp_async_check(%p)", ibtl_qp); 982 983 mutex_enter(&ibtl_async_mutex); 984 985 /* 986 * If there is an active async, mark this object to be freed 987 * by the async_thread when it's done. 988 */ 989 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) { 990 ibtl_qp->qp_async_flags |= IBTL_ASYNC_FREE_OBJECT; 991 mutex_exit(&ibtl_async_mutex); 992 } else { /* free the object now */ 993 mutex_exit(&ibtl_async_mutex); 994 cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv); 995 mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex); 996 kmem_free(IBTL_QP2CHAN(ibtl_qp), sizeof (ibtl_channel_t)); 997 } 998 } 999 1000 void 1001 ibtl_free_cq_async_check(ibtl_cq_t *ibtl_cq) 1002 { 1003 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_cq_async_check(%p)", ibtl_cq); 1004 1005 mutex_enter(&ibtl_async_mutex); 1006 1007 /* if there is an active async, mark this object to be freed */ 1008 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) { 1009 ibtl_cq->cq_async_flags |= IBTL_ASYNC_FREE_OBJECT; 1010 mutex_exit(&ibtl_async_mutex); 1011 } else { /* free the object now */ 1012 mutex_exit(&ibtl_async_mutex); 1013 mutex_destroy(&ibtl_cq->cq_mutex); 1014 kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s)); 1015 } 1016 } 1017 1018 void 1019 ibtl_free_srq_async_check(ibtl_srq_t *ibtl_srq) 1020 { 1021 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_srq_async_check(%p)", 1022 ibtl_srq); 1023 1024 mutex_enter(&ibtl_async_mutex); 1025 1026 /* if there is an active async, mark this object to be freed */ 1027 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) { 1028 ibtl_srq->srq_async_flags |= IBTL_ASYNC_FREE_OBJECT; 1029 mutex_exit(&ibtl_async_mutex); 1030 } else { /* free the object now */ 1031 mutex_exit(&ibtl_async_mutex); 1032 kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s)); 1033 } 1034 } 1035 1036 void 1037 ibtl_free_eec_async_check(ibtl_eec_t *ibtl_eec) 1038 { 1039 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_eec_async_check(%p)", 1040 ibtl_eec); 1041 1042 mutex_enter(&ibtl_async_mutex); 1043 1044 /* if there is an active async, mark this object to be freed */ 1045 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) { 1046 ibtl_eec->eec_async_flags |= IBTL_ASYNC_FREE_OBJECT; 1047 mutex_exit(&ibtl_async_mutex); 1048 } else { /* free the object now */ 1049 mutex_exit(&ibtl_async_mutex); 1050 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s)); 1051 } 1052 } 1053 1054 /* 1055 * This function differs from above in that we assume this is called 1056 * from non-interrupt context, and never called from the async_thread. 1057 */ 1058 1059 void 1060 ibtl_free_hca_async_check(ibtl_hca_t *ibt_hca) 1061 { 1062 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_hca_async_check(%p)", 1063 ibt_hca); 1064 1065 mutex_enter(&ibtl_async_mutex); 1066 1067 /* if there is an active async, mark this object to be freed */ 1068 if (ibt_hca->ha_async_cnt > 0) { 1069 ibt_hca->ha_async_flags |= IBTL_ASYNC_FREE_OBJECT; 1070 mutex_exit(&ibtl_async_mutex); 1071 } else { /* free the object now */ 1072 mutex_exit(&ibtl_async_mutex); 1073 kmem_free(ibt_hca, sizeof (ibtl_hca_t)); 1074 } 1075 } 1076 1077 /* 1078 * Completion Queue Handling. 1079 * 1080 * A completion queue can be handled through a simple callback 1081 * at interrupt level, or it may be queued for an ibtl_cq_thread 1082 * to handle. The latter is chosen during ibt_alloc_cq when the 1083 * IBTF_CQ_HANDLER_IN_THREAD is specified. 1084 */ 1085 1086 static void 1087 ibtl_cq_handler_call(ibtl_cq_t *ibtl_cq) 1088 { 1089 ibt_cq_handler_t cq_handler; 1090 void *arg; 1091 1092 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_cq_handler_call(%p)", ibtl_cq); 1093 1094 mutex_enter(&ibtl_cq->cq_mutex); 1095 cq_handler = ibtl_cq->cq_comp_handler; 1096 arg = ibtl_cq->cq_arg; 1097 if (cq_handler != NULL) 1098 cq_handler(ibtl_cq, arg); 1099 else 1100 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_cq_handler_call: " 1101 "no cq_handler for cq %p", ibtl_cq); 1102 mutex_exit(&ibtl_cq->cq_mutex); 1103 } 1104 1105 /* 1106 * Before ibt_free_cq can continue, we need to ensure no more cq_handler 1107 * callbacks can occur. When we get the mutex, we know there are no 1108 * outstanding cq_handler callbacks. We set the cq_handler to NULL to 1109 * prohibit future callbacks. 1110 */ 1111 void 1112 ibtl_free_cq_check(ibtl_cq_t *ibtl_cq) 1113 { 1114 mutex_enter(&ibtl_cq->cq_mutex); 1115 ibtl_cq->cq_comp_handler = NULL; 1116 mutex_exit(&ibtl_cq->cq_mutex); 1117 if (ibtl_cq->cq_in_thread) { 1118 mutex_enter(&ibtl_cq_mutex); 1119 --ibtl_cqs_using_threads; 1120 while (ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) { 1121 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT; 1122 ibtl_cq->cq_impl_flags |= IBTL_CQ_FREE; 1123 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex); 1124 } 1125 mutex_exit(&ibtl_cq_mutex); 1126 } 1127 } 1128 1129 /* 1130 * Loop forever, calling cq_handlers until the cq list 1131 * is empty. 1132 */ 1133 1134 static void 1135 ibtl_cq_thread(void) 1136 { 1137 #ifndef __lock_lint 1138 kmutex_t cpr_mutex; 1139 #endif 1140 callb_cpr_t cprinfo; 1141 1142 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo)) 1143 _NOTE(NO_COMPETING_THREADS_NOW) 1144 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL); 1145 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr, 1146 "ibtl_cq_thread"); 1147 _NOTE(COMPETING_THREADS_NOW) 1148 1149 mutex_enter(&ibtl_cq_mutex); 1150 1151 for (;;) { 1152 if (ibtl_cq_list_start) { 1153 ibtl_cq_t *ibtl_cq; 1154 1155 ibtl_cq = ibtl_cq_list_start; 1156 ibtl_cq_list_start = ibtl_cq->cq_link; 1157 ibtl_cq->cq_link = NULL; 1158 if (ibtl_cq == ibtl_cq_list_end) 1159 ibtl_cq_list_end = NULL; 1160 1161 while (ibtl_cq->cq_impl_flags & IBTL_CQ_CALL_CLIENT) { 1162 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT; 1163 mutex_exit(&ibtl_cq_mutex); 1164 ibtl_cq_handler_call(ibtl_cq); 1165 mutex_enter(&ibtl_cq_mutex); 1166 } 1167 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_PENDING; 1168 if (ibtl_cq->cq_impl_flags & IBTL_CQ_FREE) 1169 cv_broadcast(&ibtl_cq_cv); 1170 } else { 1171 if (ibtl_cq_thread_exit == IBTL_THREAD_EXIT) 1172 break; 1173 mutex_enter(&cpr_mutex); 1174 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1175 mutex_exit(&cpr_mutex); 1176 1177 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex); 1178 1179 mutex_exit(&ibtl_cq_mutex); 1180 mutex_enter(&cpr_mutex); 1181 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex); 1182 mutex_exit(&cpr_mutex); 1183 mutex_enter(&ibtl_cq_mutex); 1184 } 1185 } 1186 1187 mutex_exit(&ibtl_cq_mutex); 1188 #ifndef __lock_lint 1189 mutex_enter(&cpr_mutex); 1190 CALLB_CPR_EXIT(&cprinfo); 1191 #endif 1192 mutex_destroy(&cpr_mutex); 1193 } 1194 1195 1196 /* 1197 * ibc_cq_handler() 1198 * 1199 * Completion Queue Notification Handler. 1200 * 1201 */ 1202 /*ARGSUSED*/ 1203 void 1204 ibc_cq_handler(ibc_clnt_hdl_t ibc_hdl, ibt_cq_hdl_t ibtl_cq) 1205 { 1206 IBTF_DPRINTF_L4(ibtf_handlers, "ibc_cq_handler(%p, %p)", 1207 ibc_hdl, ibtl_cq); 1208 1209 if (ibtl_cq->cq_in_thread) { 1210 mutex_enter(&ibtl_cq_mutex); 1211 ibtl_cq->cq_impl_flags |= IBTL_CQ_CALL_CLIENT; 1212 if ((ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) == 0) { 1213 ibtl_cq->cq_impl_flags |= IBTL_CQ_PENDING; 1214 ibtl_cq->cq_link = NULL; 1215 if (ibtl_cq_list_end == NULL) 1216 ibtl_cq_list_start = ibtl_cq; 1217 else 1218 ibtl_cq_list_end->cq_link = ibtl_cq; 1219 ibtl_cq_list_end = ibtl_cq; 1220 cv_signal(&ibtl_cq_cv); 1221 } 1222 mutex_exit(&ibtl_cq_mutex); 1223 return; 1224 } else 1225 ibtl_cq_handler_call(ibtl_cq); 1226 } 1227 1228 1229 /* 1230 * ibt_enable_cq_notify() 1231 * Enable Notification requests on the specified CQ. 1232 * 1233 * ibt_cq The CQ handle. 1234 * 1235 * notify_type Enable notifications for all (IBT_NEXT_COMPLETION) 1236 * completions, or the next Solicited completion 1237 * (IBT_NEXT_SOLICITED) only. 1238 * 1239 * Completion notifications are disabled by setting the completion 1240 * handler to NULL by calling ibt_set_cq_handler(). 1241 */ 1242 ibt_status_t 1243 ibt_enable_cq_notify(ibt_cq_hdl_t ibtl_cq, ibt_cq_notify_flags_t notify_type) 1244 { 1245 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_enable_cq_notify(%p, %d)", 1246 ibtl_cq, notify_type); 1247 1248 return (IBTL_CQ2CIHCAOPS_P(ibtl_cq)->ibc_notify_cq( 1249 IBTL_CQ2CIHCA(ibtl_cq), ibtl_cq->cq_ibc_cq_hdl, notify_type)); 1250 } 1251 1252 1253 /* 1254 * ibt_set_cq_handler() 1255 * Register a work request completion handler with the IBTF. 1256 * 1257 * ibt_cq The CQ handle. 1258 * 1259 * completion_handler The completion handler. 1260 * 1261 * arg The IBTF client private argument to be passed 1262 * back to the client when calling the CQ 1263 * completion handler. 1264 * 1265 * Completion notifications are disabled by setting the completion 1266 * handler to NULL. When setting the handler to NULL, no additional 1267 * calls to the previous CQ handler will be initiated, but there may 1268 * be one in progress. 1269 * 1270 * This function does not otherwise change the state of previous 1271 * calls to ibt_enable_cq_notify(). 1272 */ 1273 void 1274 ibt_set_cq_handler(ibt_cq_hdl_t ibtl_cq, ibt_cq_handler_t completion_handler, 1275 void *arg) 1276 { 1277 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_set_cq_handler(%p, %p, %p)", 1278 ibtl_cq, completion_handler, arg); 1279 1280 mutex_enter(&ibtl_cq->cq_mutex); 1281 ibtl_cq->cq_comp_handler = completion_handler; 1282 ibtl_cq->cq_arg = arg; 1283 mutex_exit(&ibtl_cq->cq_mutex); 1284 } 1285 1286 1287 /* 1288 * Inform IBT clients about New HCAs. 1289 * 1290 * We use taskqs to allow simultaneous notification, with sleeping. 1291 * Since taskqs only allow one argument, we define a structure 1292 * because we need to pass in two arguments. 1293 */ 1294 1295 struct ibtl_new_hca_s { 1296 ibtl_clnt_t *nh_clntp; 1297 ibtl_hca_devinfo_t *nh_hca_devp; 1298 ibt_async_code_t nh_code; 1299 }; 1300 1301 static void 1302 ibtl_tell_client_about_new_hca(void *arg) 1303 { 1304 struct ibtl_new_hca_s *new_hcap = (struct ibtl_new_hca_s *)arg; 1305 ibtl_clnt_t *clntp = new_hcap->nh_clntp; 1306 ibt_async_event_t async_event; 1307 ibtl_hca_devinfo_t *hca_devp = new_hcap->nh_hca_devp; 1308 1309 bzero(&async_event, sizeof (async_event)); 1310 async_event.ev_hca_guid = hca_devp->hd_hca_attr->hca_node_guid; 1311 clntp->clnt_modinfop->mi_async_handler( 1312 clntp->clnt_private, NULL, new_hcap->nh_code, &async_event); 1313 kmem_free(new_hcap, sizeof (*new_hcap)); 1314 #ifdef __lock_lint 1315 { 1316 ibt_hca_hdl_t hca_hdl; 1317 (void) ibt_open_hca(clntp, 0ULL, &hca_hdl); 1318 } 1319 #endif 1320 mutex_enter(&ibtl_clnt_list_mutex); 1321 if (--hca_devp->hd_async_task_cnt == 0) 1322 cv_signal(&hca_devp->hd_async_task_cv); 1323 if (--clntp->clnt_async_cnt == 0) 1324 cv_broadcast(&ibtl_clnt_cv); 1325 mutex_exit(&ibtl_clnt_list_mutex); 1326 } 1327 1328 /* 1329 * ibtl_announce_new_hca: 1330 * 1331 * o First attach these clients in the given order 1332 * IBMA 1333 * IBCM 1334 * 1335 * o Next attach all other clients in parallel. 1336 * 1337 * NOTE: Use the taskq to simultaneously notify all clients of the new HCA. 1338 * Retval from clients is ignored. 1339 */ 1340 void 1341 ibtl_announce_new_hca(ibtl_hca_devinfo_t *hca_devp) 1342 { 1343 ibtl_clnt_t *clntp; 1344 struct ibtl_new_hca_s *new_hcap; 1345 1346 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_announce_new_hca(%p, %llX)", 1347 hca_devp, hca_devp->hd_hca_attr->hca_node_guid); 1348 1349 mutex_enter(&ibtl_clnt_list_mutex); 1350 1351 clntp = ibtl_clnt_list; 1352 while (clntp != NULL) { 1353 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) { 1354 IBTF_DPRINTF_L4(ibtf_handlers, 1355 "ibtl_announce_new_hca: calling IBMF"); 1356 if (clntp->clnt_modinfop->mi_async_handler) { 1357 _NOTE(NO_COMPETING_THREADS_NOW) 1358 new_hcap = kmem_alloc(sizeof (*new_hcap), 1359 KM_SLEEP); 1360 new_hcap->nh_clntp = clntp; 1361 new_hcap->nh_hca_devp = hca_devp; 1362 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT; 1363 _NOTE(COMPETING_THREADS_NOW) 1364 clntp->clnt_async_cnt++; 1365 hca_devp->hd_async_task_cnt++; 1366 1367 (void) taskq_dispatch(ibtl_async_taskq, 1368 ibtl_tell_client_about_new_hca, new_hcap, 1369 TQ_SLEEP); 1370 } 1371 break; 1372 } 1373 clntp = clntp->clnt_list_link; 1374 } 1375 if (clntp != NULL) 1376 while (clntp->clnt_async_cnt > 0) 1377 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex); 1378 clntp = ibtl_clnt_list; 1379 while (clntp != NULL) { 1380 if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) { 1381 IBTF_DPRINTF_L4(ibtf_handlers, 1382 "ibtl_announce_new_hca: calling IBDM"); 1383 if (clntp->clnt_modinfop->mi_async_handler) { 1384 _NOTE(NO_COMPETING_THREADS_NOW) 1385 new_hcap = kmem_alloc(sizeof (*new_hcap), 1386 KM_SLEEP); 1387 new_hcap->nh_clntp = clntp; 1388 new_hcap->nh_hca_devp = hca_devp; 1389 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT; 1390 _NOTE(COMPETING_THREADS_NOW) 1391 clntp->clnt_async_cnt++; 1392 hca_devp->hd_async_task_cnt++; 1393 1394 (void) taskq_dispatch(ibtl_async_taskq, 1395 ibtl_tell_client_about_new_hca, new_hcap, 1396 TQ_SLEEP); 1397 } 1398 break; 1399 } 1400 clntp = clntp->clnt_list_link; 1401 } 1402 if (clntp != NULL) 1403 while (clntp->clnt_async_cnt > 0) 1404 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex); 1405 clntp = ibtl_clnt_list; 1406 while (clntp != NULL) { 1407 if ((clntp->clnt_modinfop->mi_clnt_class != IBT_DM) && 1408 (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA)) { 1409 IBTF_DPRINTF_L4(ibtf_handlers, 1410 "ibtl_announce_new_hca: generic Class %x", 1411 clntp->clnt_modinfop->mi_clnt_class); 1412 if (clntp->clnt_modinfop->mi_async_handler) { 1413 _NOTE(NO_COMPETING_THREADS_NOW) 1414 new_hcap = kmem_alloc(sizeof (*new_hcap), 1415 KM_SLEEP); 1416 new_hcap->nh_clntp = clntp; 1417 new_hcap->nh_hca_devp = hca_devp; 1418 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT; 1419 _NOTE(COMPETING_THREADS_NOW) 1420 clntp->clnt_async_cnt++; 1421 hca_devp->hd_async_task_cnt++; 1422 1423 (void) taskq_dispatch(ibtl_async_taskq, 1424 ibtl_tell_client_about_new_hca, new_hcap, 1425 TQ_SLEEP); 1426 } 1427 } 1428 clntp = clntp->clnt_list_link; 1429 } 1430 1431 /* wait for all tasks to complete */ 1432 while (hca_devp->hd_async_task_cnt != 0) 1433 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex); 1434 1435 /* wakeup thread that may be waiting to send an HCA async */ 1436 ASSERT(hca_devp->hd_async_busy == 1); 1437 hca_devp->hd_async_busy = 0; 1438 cv_broadcast(&hca_devp->hd_async_busy_cv); 1439 mutex_exit(&ibtl_clnt_list_mutex); 1440 } 1441 1442 /* 1443 * ibtl_detach_all_clients: 1444 * 1445 * Return value - 0 for Success, 1 for Failure 1446 * 1447 * o First detach general clients. 1448 * 1449 * o Next detach these clients 1450 * IBCM 1451 * IBDM 1452 * 1453 * o Finally, detach this client 1454 * IBMA 1455 */ 1456 int 1457 ibtl_detach_all_clients(ibtl_hca_devinfo_t *hca_devp) 1458 { 1459 ib_guid_t hcaguid = hca_devp->hd_hca_attr->hca_node_guid; 1460 ibtl_hca_t *ibt_hca; 1461 ibtl_clnt_t *clntp; 1462 int retval; 1463 int ibdm_done = 0; 1464 int ibcm_done = 0; 1465 #ifdef _NOT_READY 1466 ibt_async_event_t async_event; 1467 #endif 1468 1469 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_detach_all_clients(%llX)", 1470 hcaguid); 1471 1472 ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex)); 1473 1474 while (hca_devp->hd_async_busy) 1475 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex); 1476 hca_devp->hd_async_busy = 1; 1477 1478 /* First inform general clients asynchronously */ 1479 hca_devp->hd_async_event.ev_hca_guid = hcaguid; 1480 hca_devp->hd_async_event.ev_fma_ena = 0; 1481 hca_devp->hd_async_event.ev_chan_hdl = NULL; 1482 hca_devp->hd_async_event.ev_cq_hdl = NULL; 1483 hca_devp->hd_async_code = IBT_HCA_DETACH_EVENT; 1484 1485 ibt_hca = hca_devp->hd_clnt_list; 1486 while (ibt_hca != NULL) { 1487 clntp = ibt_hca->ha_clnt_devp; 1488 if (IBTL_GENERIC_CLIENT(clntp)) { 1489 ++ibt_hca->ha_clnt_devp->clnt_async_cnt; 1490 mutex_enter(&ibtl_async_mutex); 1491 ibt_hca->ha_async_cnt++; 1492 mutex_exit(&ibtl_async_mutex); 1493 hca_devp->hd_async_task_cnt++; 1494 1495 (void) taskq_dispatch(ibtl_async_taskq, 1496 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP); 1497 } 1498 ibt_hca = ibt_hca->ha_clnt_link; 1499 } 1500 1501 /* wait for all clients to complete */ 1502 while (hca_devp->hd_async_task_cnt != 0) { 1503 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex); 1504 } 1505 /* Go thru the clients and check if any have not closed this HCA. */ 1506 ibt_hca = hca_devp->hd_clnt_list; 1507 while (ibt_hca != NULL) { 1508 clntp = ibt_hca->ha_clnt_devp; 1509 if (IBTL_GENERIC_CLIENT(clntp)) { 1510 IBTF_DPRINTF_L2(ibtf_handlers, 1511 "ibtl_detach_all_clients: " 1512 "client '%s' failed to close the HCA.", 1513 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name); 1514 retval = 1; 1515 goto bailout; 1516 } 1517 ibt_hca = ibt_hca->ha_clnt_link; 1518 } 1519 1520 /* Next inform CM and DM asynchronously */ 1521 ibt_hca = hca_devp->hd_clnt_list; 1522 while (ibt_hca != NULL) { 1523 clntp = ibt_hca->ha_clnt_devp; 1524 if (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA) { 1525 ++ibt_hca->ha_clnt_devp->clnt_async_cnt; 1526 mutex_enter(&ibtl_async_mutex); 1527 ibt_hca->ha_async_cnt++; 1528 mutex_exit(&ibtl_async_mutex); 1529 hca_devp->hd_async_task_cnt++; 1530 if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) 1531 ibdm_done = 1; 1532 if (clntp->clnt_modinfop->mi_clnt_class == IBT_CM) 1533 ibcm_done = 1; 1534 1535 (void) taskq_dispatch(ibtl_async_taskq, 1536 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP); 1537 } 1538 ibt_hca = ibt_hca->ha_clnt_link; 1539 } 1540 #ifdef _NOT_READY 1541 /* this code will likely cause a recursive mutex panic */ 1542 mutex_enter(&ibtl_clnt_list_mutex); 1543 if (ibdm_done == 0 && ibtl_dm_async_handler != NULL) { 1544 bzero(&async_event, sizeof (async_event)); 1545 async_event.ev_hca_guid = hcaguid; 1546 ibtl_dm_async_handler(ibtl_dm_clnt_private, NULL, 1547 IBT_HCA_DETACH_EVENT, &async_event); 1548 } 1549 if (ibcm_done == 0 && ibtl_cm_async_handler != NULL) { 1550 bzero(&async_event, sizeof (async_event)); 1551 async_event.ev_hca_guid = hcaguid; 1552 ibtl_cm_async_handler(ibtl_cm_clnt_private, NULL, 1553 IBT_HCA_DETACH_EVENT, &async_event); 1554 } 1555 mutex_exit(&ibtl_clnt_list_mutex); 1556 #else 1557 /* make sure lint does not complain */ 1558 IBTF_DPRINTF_L5(ibtf_handlers, "ibtl_detach_all_clients: " 1559 "DM done %d, CM done %d", ibdm_done, ibcm_done); 1560 #endif 1561 1562 /* wait for CM and DM to complete */ 1563 while (hca_devp->hd_async_task_cnt != 0) { 1564 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex); 1565 } 1566 1567 /* Go thru the clients and check if any have not closed this HCA. */ 1568 retval = 0; 1569 ibt_hca = hca_devp->hd_clnt_list; 1570 while (ibt_hca != NULL) { 1571 clntp = ibt_hca->ha_clnt_devp; 1572 if (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA) { 1573 IBTF_DPRINTF_L2(ibtf_handlers, 1574 "ibtl_detach_all_clients: " 1575 "client '%s' failed to close the HCA.", 1576 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name); 1577 retval = 1; 1578 goto bailout; 1579 } 1580 ibt_hca = ibt_hca->ha_clnt_link; 1581 } 1582 1583 /* Finally, inform IBMA */ 1584 ibt_hca = hca_devp->hd_clnt_list; 1585 while (ibt_hca != NULL) { 1586 clntp = ibt_hca->ha_clnt_devp; 1587 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) { 1588 ++ibt_hca->ha_clnt_devp->clnt_async_cnt; 1589 mutex_enter(&ibtl_async_mutex); 1590 ibt_hca->ha_async_cnt++; 1591 mutex_exit(&ibtl_async_mutex); 1592 hca_devp->hd_async_task_cnt++; 1593 1594 (void) taskq_dispatch(ibtl_async_taskq, 1595 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP); 1596 } else 1597 IBTF_DPRINTF_L2(ibtf_handlers, 1598 "ibtl_detach_all_clients: " 1599 "client '%s' is unexpectedly on the client list", 1600 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name); 1601 ibt_hca = ibt_hca->ha_clnt_link; 1602 } 1603 1604 /* wait for IBMA to complete */ 1605 while (hca_devp->hd_async_task_cnt != 0) { 1606 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex); 1607 } 1608 1609 /* Check if this HCA's client list is empty. */ 1610 ibt_hca = hca_devp->hd_clnt_list; 1611 if (ibt_hca != NULL) { 1612 IBTF_DPRINTF_L2(ibtf_handlers, 1613 "ibtl_detach_all_clients: " 1614 "client '%s' failed to close the HCA.", 1615 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name); 1616 retval = 1; 1617 } else 1618 retval = 0; 1619 1620 bailout: 1621 hca_devp->hd_async_busy = 0; 1622 cv_broadcast(&hca_devp->hd_async_busy_cv); 1623 return (retval); 1624 } 1625 1626 void 1627 ibtl_free_clnt_async_check(ibtl_clnt_t *clntp) 1628 { 1629 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_clnt_async_check(%p)", clntp); 1630 1631 ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex)); 1632 1633 /* wait for all asyncs based on "ibtl_clnt_list" to complete */ 1634 while (clntp->clnt_async_cnt != 0) { 1635 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex); 1636 } 1637 } 1638 1639 static void 1640 ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp) 1641 { 1642 mutex_enter(&ibtl_clnt_list_mutex); 1643 if (--clntp->clnt_async_cnt == 0) { 1644 cv_broadcast(&ibtl_clnt_cv); 1645 } 1646 mutex_exit(&ibtl_clnt_list_mutex); 1647 } 1648 1649 static void 1650 ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp) 1651 { 1652 mutex_enter(&ibtl_clnt_list_mutex); 1653 ++clntp->clnt_async_cnt; 1654 mutex_exit(&ibtl_clnt_list_mutex); 1655 } 1656 1657 1658 /* 1659 * Functions and data structures to inform clients that a notification 1660 * has occurred about Multicast Groups that might interest them. 1661 */ 1662 struct ibtl_sm_notice { 1663 ibt_clnt_hdl_t np_ibt_hdl; 1664 ib_gid_t np_sgid; 1665 ibt_subnet_event_code_t np_code; 1666 ibt_subnet_event_t np_event; 1667 }; 1668 1669 static void 1670 ibtl_sm_notice_task(void *arg) 1671 { 1672 struct ibtl_sm_notice *noticep = (struct ibtl_sm_notice *)arg; 1673 ibt_clnt_hdl_t ibt_hdl = noticep->np_ibt_hdl; 1674 ibt_sm_notice_handler_t sm_notice_handler; 1675 1676 sm_notice_handler = ibt_hdl->clnt_sm_trap_handler; 1677 if (sm_notice_handler != NULL) 1678 sm_notice_handler(ibt_hdl->clnt_sm_trap_handler_arg, 1679 noticep->np_sgid, noticep->np_code, ¬icep->np_event); 1680 kmem_free(noticep, sizeof (*noticep)); 1681 ibtl_dec_clnt_async_cnt(ibt_hdl); 1682 } 1683 1684 /* 1685 * Inform the client that MCG notices are not working at this time. 1686 */ 1687 void 1688 ibtl_cm_sm_notice_init_failure(ibtl_cm_sm_init_fail_t *ifail) 1689 { 1690 ibt_clnt_hdl_t ibt_hdl = ifail->smf_ibt_hdl; 1691 struct ibtl_sm_notice *noticep; 1692 ib_gid_t *sgidp = &ifail->smf_sgid[0]; 1693 int i; 1694 1695 for (i = 0; i < ifail->smf_num_sgids; i++) { 1696 _NOTE(NO_COMPETING_THREADS_NOW) 1697 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP); 1698 noticep->np_ibt_hdl = ibt_hdl; 1699 noticep->np_sgid = *sgidp++; 1700 noticep->np_code = IBT_SM_EVENT_UNAVAILABLE; 1701 _NOTE(COMPETING_THREADS_NOW) 1702 ibtl_inc_clnt_async_cnt(ibt_hdl); 1703 (void) taskq_dispatch(ibtl_async_taskq, 1704 ibtl_sm_notice_task, noticep, TQ_SLEEP); 1705 } 1706 } 1707 1708 /* 1709 * Inform all clients of the event. 1710 */ 1711 void 1712 ibtl_cm_sm_notice_handler(ib_gid_t sgid, ibt_subnet_event_code_t code, 1713 ibt_subnet_event_t *event) 1714 { 1715 _NOTE(NO_COMPETING_THREADS_NOW) 1716 struct ibtl_sm_notice *noticep; 1717 ibtl_clnt_t *clntp; 1718 1719 mutex_enter(&ibtl_clnt_list_mutex); 1720 clntp = ibtl_clnt_list; 1721 while (clntp != NULL) { 1722 if (clntp->clnt_sm_trap_handler) { 1723 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP); 1724 noticep->np_ibt_hdl = clntp; 1725 noticep->np_sgid = sgid; 1726 noticep->np_code = code; 1727 noticep->np_event = *event; 1728 ++clntp->clnt_async_cnt; 1729 (void) taskq_dispatch(ibtl_async_taskq, 1730 ibtl_sm_notice_task, noticep, TQ_SLEEP); 1731 } 1732 clntp = clntp->clnt_list_link; 1733 } 1734 mutex_exit(&ibtl_clnt_list_mutex); 1735 _NOTE(COMPETING_THREADS_NOW) 1736 } 1737 1738 /* 1739 * Record the handler for this client. 1740 */ 1741 void 1742 ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl, 1743 ibt_sm_notice_handler_t sm_notice_handler, void *private) 1744 { 1745 _NOTE(NO_COMPETING_THREADS_NOW) 1746 ibt_hdl->clnt_sm_trap_handler = sm_notice_handler; 1747 ibt_hdl->clnt_sm_trap_handler_arg = private; 1748 _NOTE(COMPETING_THREADS_NOW) 1749 } 1750 1751 1752 /* 1753 * ibtl_another_cq_handler_in_thread() 1754 * 1755 * Conditionally increase the number of cq_threads. 1756 * The number of threads grows, based on the number of cqs using threads. 1757 * 1758 * The table below controls the number of threads as follows: 1759 * 1760 * Number of CQs Number of cq_threads 1761 * 0 0 1762 * 1 1 1763 * 2-3 2 1764 * 4-5 3 1765 * 6-9 4 1766 * 10-15 5 1767 * 16-23 6 1768 * 24-31 7 1769 * 32+ 8 1770 */ 1771 1772 #define IBTL_CQ_MAXTHREADS 8 1773 static uint8_t ibtl_cq_scaling[IBTL_CQ_MAXTHREADS] = { 1774 1, 2, 4, 6, 10, 16, 24, 32 1775 }; 1776 1777 static kt_did_t ibtl_cq_did[IBTL_CQ_MAXTHREADS]; 1778 1779 void 1780 ibtl_another_cq_handler_in_thread(void) 1781 { 1782 kthread_t *t; 1783 int my_idx; 1784 1785 mutex_enter(&ibtl_cq_mutex); 1786 if ((ibtl_cq_threads == IBTL_CQ_MAXTHREADS) || 1787 (++ibtl_cqs_using_threads < ibtl_cq_scaling[ibtl_cq_threads])) { 1788 mutex_exit(&ibtl_cq_mutex); 1789 return; 1790 } 1791 my_idx = ibtl_cq_threads++; 1792 mutex_exit(&ibtl_cq_mutex); 1793 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, TS_RUN, 1794 ibtl_pri - 1); 1795 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did)) 1796 ibtl_cq_did[my_idx] = t->t_did; /* save for thread_join() */ 1797 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did)) 1798 } 1799 1800 void 1801 ibtl_thread_init(void) 1802 { 1803 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init()"); 1804 1805 mutex_init(&ibtl_async_mutex, NULL, MUTEX_DEFAULT, NULL); 1806 cv_init(&ibtl_async_cv, NULL, CV_DEFAULT, NULL); 1807 cv_init(&ibtl_clnt_cv, NULL, CV_DEFAULT, NULL); 1808 1809 mutex_init(&ibtl_cq_mutex, NULL, MUTEX_DEFAULT, NULL); 1810 cv_init(&ibtl_cq_cv, NULL, CV_DEFAULT, NULL); 1811 } 1812 1813 void 1814 ibtl_thread_init2(void) 1815 { 1816 int i; 1817 static int initted = 0; 1818 kthread_t *t; 1819 1820 mutex_enter(&ibtl_async_mutex); 1821 if (initted == 1) { 1822 mutex_exit(&ibtl_async_mutex); 1823 return; 1824 } 1825 initted = 1; 1826 mutex_exit(&ibtl_async_mutex); 1827 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_async_did)) 1828 ibtl_async_did = kmem_zalloc(ibtl_async_thread_init * sizeof (kt_did_t), 1829 KM_SLEEP); 1830 1831 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init2()"); 1832 1833 for (i = 0; i < ibtl_async_thread_init; i++) { 1834 t = thread_create(NULL, 0, ibtl_async_thread, NULL, 0, &p0, 1835 TS_RUN, ibtl_pri - 1); 1836 ibtl_async_did[i] = t->t_did; /* thread_join() */ 1837 } 1838 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_async_did)) 1839 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads)) 1840 for (i = 0; i < ibtl_cq_threads; i++) { 1841 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, 1842 TS_RUN, ibtl_pri - 1); 1843 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did)) 1844 ibtl_cq_did[i] = t->t_did; /* save for thread_join() */ 1845 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did)) 1846 } 1847 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads)) 1848 } 1849 1850 void 1851 ibtl_thread_fini(void) 1852 { 1853 int i; 1854 1855 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_fini()"); 1856 1857 /* undo the work done by ibtl_thread_init() */ 1858 1859 mutex_enter(&ibtl_cq_mutex); 1860 ibtl_cq_thread_exit = IBTL_THREAD_EXIT; 1861 cv_broadcast(&ibtl_cq_cv); 1862 mutex_exit(&ibtl_cq_mutex); 1863 1864 mutex_enter(&ibtl_async_mutex); 1865 ibtl_async_thread_exit = IBTL_THREAD_EXIT; 1866 cv_broadcast(&ibtl_async_cv); 1867 mutex_exit(&ibtl_async_mutex); 1868 1869 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads)) 1870 for (i = 0; i < ibtl_cq_threads; i++) 1871 thread_join(ibtl_cq_did[i]); 1872 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads)) 1873 1874 if (ibtl_async_did) { 1875 for (i = 0; i < ibtl_async_thread_init; i++) 1876 thread_join(ibtl_async_did[i]); 1877 1878 kmem_free(ibtl_async_did, 1879 ibtl_async_thread_init * sizeof (kt_did_t)); 1880 } 1881 mutex_destroy(&ibtl_cq_mutex); 1882 cv_destroy(&ibtl_cq_cv); 1883 1884 mutex_destroy(&ibtl_async_mutex); 1885 cv_destroy(&ibtl_async_cv); 1886 cv_destroy(&ibtl_clnt_cv); 1887 } 1888