1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Infiniband Device Management Agent for IB storage. 29 */ 30 31 #include <sys/conf.h> 32 #include <sys/file.h> 33 #include <sys/ddi.h> 34 #include <sys/sunddi.h> 35 #include <sys/modctl.h> 36 #include <sys/priv.h> 37 #include <sys/sysmacros.h> 38 39 #include <sys/ib/ibtl/ibti.h> /* IB public interfaces */ 40 41 #include <sys/ib/mgt/ibdma/ibdma.h> 42 #include <sys/ib/mgt/ibdma/ibdma_impl.h> 43 44 /* 45 * NOTE: The IB Device Management Agent function, like other IB 46 * managers and agents is best implemented as a kernel misc. 47 * module. 48 * Eventually we could modify IBT_DM_AGENT so that we don't need to 49 * open each HCA to receive asynchronous events. 50 */ 51 52 #define IBDMA_NAME_VERSION "IB Device Management Agent" 53 54 extern struct mod_ops mod_miscops; 55 56 static void ibdma_ibt_async_handler(void *clnt, ibt_hca_hdl_t hdl, 57 ibt_async_code_t code, ibt_async_event_t *event); 58 59 static void ibdma_mad_recv_cb(ibmf_handle_t ibmf_hdl, 60 ibmf_msg_t *msgp, void *args); 61 static void ibdma_create_resp_mad(ibmf_msg_t *msgp); 62 63 /* 64 * Misc. kernel module for now. 65 */ 66 static struct modlmisc modlmisc = { 67 &mod_miscops, 68 IBDMA_NAME_VERSION 69 }; 70 71 static struct modlinkage modlinkage = { 72 MODREV_1, (void *)&modlmisc, NULL 73 }; 74 75 static ibt_clnt_modinfo_t ibdma_ibt_modinfo = { 76 IBTI_V_CURR, 77 IBT_DM_AGENT, 78 ibdma_ibt_async_handler, 79 NULL, 80 "ibdma" 81 }; 82 83 /* 84 * Module global state allocated at init(). 85 */ 86 static ibdma_mod_state_t *ibdma = NULL; 87 88 /* 89 * Init/Fini handlers and IBTL HCA management prototypes. 90 */ 91 static int ibdma_init(); 92 static int ibdma_fini(); 93 static int ibdma_ibt_init(); 94 static void ibdma_ibt_fini(); 95 static ibdma_hca_t *ibdma_hca_init(ib_guid_t guid); 96 static void ibdma_hca_fini(ibdma_hca_t *hca); 97 static ibdma_hca_t *ibdma_find_hca(ib_guid_t guid); 98 99 /* 100 * DevMgmt Agent MAD attribute handlers prototypes. 101 */ 102 static void ibdma_get_class_portinfo(ibmf_msg_t *msg); 103 static void ibdma_get_io_unitinfo(ibdma_hca_t *hca, ibmf_msg_t *msg); 104 static void ibdma_get_ioc_profile(ibdma_hca_t *hca, ibmf_msg_t *msg); 105 static void ibdma_get_ioc_services(ibdma_hca_t *hca, ibmf_msg_t *msg); 106 107 /* 108 * _init() 109 */ 110 int 111 _init(void) 112 { 113 int status; 114 115 ASSERT(ibdma == NULL); 116 117 ibdma = kmem_zalloc(sizeof (*ibdma), KM_SLEEP); 118 ASSERT(ibdma != NULL); 119 120 status = ibdma_init(); 121 if (status != DDI_SUCCESS) { 122 kmem_free(ibdma, sizeof (*ibdma)); 123 ibdma = NULL; 124 return (status); 125 } 126 127 status = mod_install(&modlinkage); 128 if (status != DDI_SUCCESS) { 129 cmn_err(CE_NOTE, "_init, mod_install error (%d)", status); 130 (void) ibdma_fini(); 131 kmem_free(ibdma, sizeof (*ibdma)); 132 ibdma = NULL; 133 } 134 return (status); 135 } 136 137 /* 138 * _info() 139 */ 140 int 141 _info(struct modinfo *modinfop) 142 { 143 return (mod_info(&modlinkage, modinfop)); 144 } 145 146 /* 147 * _fini() 148 */ 149 int 150 _fini(void) 151 { 152 int status; 153 int slot; 154 ibdma_hca_t *hca; 155 156 status = mod_remove(&modlinkage); 157 if (status != DDI_SUCCESS) { 158 cmn_err(CE_NOTE, "_fini, mod_remove error (%d)", status); 159 return (status); 160 } 161 162 /* 163 * Sanity check to see if anyone is not cleaning 164 * up appropriately. 165 */ 166 mutex_enter(&ibdma->ms_hca_list_lock); 167 hca = list_head(&ibdma->ms_hca_list); 168 while (hca != NULL) { 169 for (slot = 0; slot < IBDMA_MAX_IOC; slot++) { 170 if (hca->ih_ioc[slot].ii_inuse) { 171 cmn_err(CE_NOTE, "_fini, IOC %d still attached" 172 " for (0x%0llx)", slot+1, 173 (u_longlong_t)hca->ih_iou_guid); 174 } 175 } 176 hca = list_next(&ibdma->ms_hca_list, hca); 177 } 178 mutex_exit(&ibdma->ms_hca_list_lock); 179 180 (void) ibdma_fini(); 181 kmem_free(ibdma, sizeof (*ibdma)); 182 return (status); 183 } 184 185 /* 186 * ibdma_init() 187 * 188 * Initialize I/O Unit structure, generate initial HCA list and register 189 * it port with the IBMF. 190 */ 191 static int 192 ibdma_init() 193 { 194 int status; 195 196 /* 197 * Global lock and I/O Unit initialization. 198 */ 199 mutex_init(&ibdma->ms_hca_list_lock, NULL, MUTEX_DRIVER, NULL); 200 201 /* 202 * Discover IB hardware and setup for device management agent 203 * support. 204 */ 205 status = ibdma_ibt_init(); 206 if (status != DDI_SUCCESS) { 207 cmn_err(CE_NOTE, "ibdma_init, ibt_attach failed (%d)", 208 status); 209 mutex_destroy(&ibdma->ms_hca_list_lock); 210 return (status); 211 } 212 213 return (status); 214 } 215 216 /* 217 * ibdma_fini() 218 * 219 * Release resource if we are no longer in use. 220 */ 221 static int 222 ibdma_fini() 223 { 224 ibdma_ibt_fini(); 225 mutex_destroy(&ibdma->ms_hca_list_lock); 226 return (DDI_SUCCESS); 227 } 228 229 /* 230 * ibdma_ibt_async_handler() 231 */ 232 /* ARGSUSED */ 233 static void 234 ibdma_ibt_async_handler(void *clnt, ibt_hca_hdl_t hdl, 235 ibt_async_code_t code, ibt_async_event_t *event) 236 { 237 ibdma_hca_t *hca; 238 239 switch (code) { 240 241 case IBT_EVENT_PORT_UP: 242 case IBT_ERROR_PORT_DOWN: 243 break; 244 245 case IBT_HCA_ATTACH_EVENT: 246 mutex_enter(&ibdma->ms_hca_list_lock); 247 hca = ibdma_hca_init(event->ev_hca_guid); 248 if (hca != NULL) { 249 list_insert_tail(&ibdma->ms_hca_list, hca); 250 cmn_err(CE_NOTE, "hca ibt hdl (%p)", 251 (void *)hca->ih_ibt_hdl); 252 ibdma->ms_num_hcas++; 253 } 254 mutex_exit(&ibdma->ms_hca_list_lock); 255 break; 256 257 case IBT_HCA_DETACH_EVENT: 258 mutex_enter(&ibdma->ms_hca_list_lock); 259 hca = ibdma_find_hca(event->ev_hca_guid); 260 if (hca != NULL) { 261 list_remove(&ibdma->ms_hca_list, hca); 262 cmn_err(CE_NOTE, "removing hca (%p) (0x%llx)", 263 (void *)hca, hca ? 264 (u_longlong_t)hca->ih_iou_guid : 0x0ll); 265 ibdma_hca_fini(hca); 266 } 267 mutex_exit(&ibdma->ms_hca_list_lock); 268 break; 269 270 default: 271 cmn_err(CE_NOTE, "ibt_async_handler, unhandled event(%d)", 272 code); 273 break; 274 } 275 276 } 277 278 /* 279 * ibdma_ibt_init() 280 */ 281 static int 282 ibdma_ibt_init() 283 { 284 int status; 285 int hca_cnt; 286 int hca_ndx; 287 ib_guid_t *guid; 288 ibdma_hca_t *hca; 289 290 /* 291 * Attach to IBTF and get HCA list. 292 */ 293 status = ibt_attach(&ibdma_ibt_modinfo, NULL, 294 ibdma, &ibdma->ms_ibt_hdl); 295 if (status != DDI_SUCCESS) { 296 cmn_err(CE_NOTE, "ibt_init, ibt_attach failed (%d)", 297 status); 298 return (status); 299 } 300 301 list_create(&ibdma->ms_hca_list, sizeof (ibdma_hca_t), 302 offsetof(ibdma_hca_t, ih_node)); 303 304 hca_cnt = ibt_get_hca_list(&guid); 305 if (hca_cnt < 1) { 306 #ifdef DEBUG_IBDMA 307 cmn_err(CE_NOTE, "ibt_init, no HCA(s) found"); 308 #endif 309 /* not an error if no HCAs, but nothing more to do here */ 310 return (DDI_SUCCESS); 311 } 312 313 mutex_enter(&ibdma->ms_hca_list_lock); 314 315 for (hca_ndx = 0; hca_ndx < hca_cnt; hca_ndx++) { 316 #ifdef DEBUG_IBDMA 317 cmn_err(CE_NOTE, "adding hca GUID(0x%llx)", 318 (u_longlong_t)guid[hca_ndx]); 319 #endif 320 321 hca = ibdma_hca_init(guid[hca_ndx]); 322 if (hca == NULL) { 323 cmn_err(CE_NOTE, "ibt_init, hca_init GUID(0x%llx)" 324 " failed", (u_longlong_t)guid[hca_ndx]); 325 continue; 326 } 327 list_insert_tail(&ibdma->ms_hca_list, hca); 328 ibdma->ms_num_hcas++; 329 } 330 331 mutex_exit(&ibdma->ms_hca_list_lock); 332 333 ibt_free_hca_list(guid, hca_cnt); 334 #ifdef DEBUG_IBDMA 335 cmn_err(CE_NOTE, "Added %d HCA(s)", 336 ibdma->ms_num_hcas); 337 #endif 338 return (DDI_SUCCESS); 339 } 340 341 /* 342 * ibdma_ibt_fini() 343 */ 344 static void 345 ibdma_ibt_fini() 346 { 347 ibdma_hca_t *hca; 348 ibdma_hca_t *next; 349 350 mutex_enter(&ibdma->ms_hca_list_lock); 351 hca = list_head(&ibdma->ms_hca_list); 352 while (hca != NULL) { 353 next = list_next(&ibdma->ms_hca_list, hca); 354 list_remove(&ibdma->ms_hca_list, hca); 355 #ifdef DEBUG_IBDMA 356 cmn_err(CE_NOTE, "removing hca (%p) (0x%llx)", 357 (void *)hca, hca ? 358 (u_longlong_t)hca->ih_iou_guid : 0x0ll); 359 cmn_err(CE_NOTE, "hca ibt hdl (%p)", 360 (void *)hca->ih_ibt_hdl); 361 #endif 362 ibdma_hca_fini(hca); 363 hca = next; 364 } 365 list_destroy(&ibdma->ms_hca_list); 366 367 (void) ibt_detach(ibdma->ms_ibt_hdl); 368 ibdma->ms_ibt_hdl = NULL; 369 ibdma->ms_num_hcas = 0; 370 mutex_exit(&ibdma->ms_hca_list_lock); 371 } 372 373 /* 374 * ibdma_find_hca() 375 */ 376 static ibdma_hca_t * 377 ibdma_find_hca(ib_guid_t guid) 378 { 379 ibdma_hca_t *hca; 380 381 ASSERT(mutex_owned(&ibdma->ms_hca_list_lock)); 382 383 hca = list_head(&ibdma->ms_hca_list); 384 while (hca != NULL) { 385 if (hca->ih_iou_guid == guid) { 386 break; 387 } 388 hca = list_next(&ibdma->ms_hca_list, hca); 389 } 390 return (hca); 391 } 392 393 /* 394 * ibdma_hca_init() 395 */ 396 static ibdma_hca_t * 397 ibdma_hca_init(ib_guid_t guid) 398 { 399 ibt_status_t status; 400 ibdma_hca_t *hca; 401 ibdma_port_t *port; 402 ibt_hca_attr_t hca_attr; 403 int ndx; 404 405 ASSERT(mutex_owned(&ibdma->ms_hca_list_lock)); 406 407 status = ibt_query_hca_byguid(guid, &hca_attr); 408 if (status != IBT_SUCCESS) { 409 cmn_err(CE_NOTE, "hca_init HCA query error (%d)", 410 status); 411 return (NULL); 412 } 413 414 if (ibdma_find_hca(guid) != NULL) { 415 #ifdef DEBUG_IBDMA 416 cmn_err(CE_NOTE, "hca_init HCA already exists"); 417 #endif 418 return (NULL); 419 } 420 421 hca = kmem_zalloc(sizeof (ibdma_hca_t) + 422 (hca_attr.hca_nports-1)*sizeof (ibdma_port_t), KM_SLEEP); 423 ASSERT(hca != NULL); 424 425 hca->ih_nports = hca_attr.hca_nports; 426 427 rw_init(&hca->ih_iou_rwlock, NULL, RW_DRIVER, NULL); 428 rw_enter(&hca->ih_iou_rwlock, RW_WRITER); 429 hca->ih_iou_guid = guid; 430 hca->ih_iou.iou_changeid = h2b16(1); 431 hca->ih_iou.iou_num_ctrl_slots = IBDMA_MAX_IOC; 432 hca->ih_iou.iou_flag = IB_DM_IOU_OPTIONROM_ABSENT; 433 434 list_create(&hca->ih_hdl_list, sizeof (ibdma_hdl_impl_t), 435 offsetof(ibdma_hdl_impl_t, ih_node)); 436 rw_exit(&hca->ih_iou_rwlock); 437 438 /* 439 * It would be better to not open, but IBTL is setup to only allow 440 * certain managers to get async call backs if not open. 441 */ 442 status = ibt_open_hca(ibdma->ms_ibt_hdl, guid, &hca->ih_ibt_hdl); 443 if (status != IBT_SUCCESS) { 444 cmn_err(CE_NOTE, "hca_init() IBT open failed (%d)", 445 status); 446 447 list_destroy(&hca->ih_hdl_list); 448 rw_destroy(&hca->ih_iou_rwlock); 449 kmem_free(hca, sizeof (ibdma_hca_t) + 450 (hca_attr.hca_nports-1)*sizeof (ibdma_port_t)); 451 return (NULL); 452 } 453 454 /* 455 * Register with the IB Management Framework and setup MAD call-back. 456 */ 457 for (ndx = 0; ndx < hca->ih_nports; ndx++) { 458 port = &hca->ih_port[ndx]; 459 port->ip_hcap = hca; 460 port->ip_ibmf_reg.ir_ci_guid = hca->ih_iou_guid; 461 port->ip_ibmf_reg.ir_port_num = ndx + 1; 462 port->ip_ibmf_reg.ir_client_class = DEV_MGT_AGENT; 463 464 status = ibmf_register(&port->ip_ibmf_reg, IBMF_VERSION, 465 0, NULL, NULL, &port->ip_ibmf_hdl, &port->ip_ibmf_caps); 466 if (status != IBMF_SUCCESS) { 467 cmn_err(CE_NOTE, "hca_init, IBMF register failed (%d)", 468 status); 469 port->ip_ibmf_hdl = NULL; 470 ibdma_hca_fini(hca); 471 return (NULL); 472 } 473 474 status = ibmf_setup_async_cb(port->ip_ibmf_hdl, 475 IBMF_QP_HANDLE_DEFAULT, ibdma_mad_recv_cb, port, 0); 476 if (status != IBMF_SUCCESS) { 477 cmn_err(CE_NOTE, "hca_init, IBMF cb setup failed (%d)", 478 status); 479 ibdma_hca_fini(hca); 480 return (NULL); 481 } 482 483 status = ibt_modify_port_byguid(hca->ih_iou_guid, 484 ndx+1, IBT_PORT_SET_DEVMGT, 0); 485 if (status != IBT_SUCCESS) { 486 cmn_err(CE_NOTE, "hca_init, IBT modify port caps" 487 " error (%d)", status); 488 ibdma_hca_fini(hca); 489 return (NULL); 490 } 491 } 492 return (hca); 493 } 494 495 /* 496 * ibdma_hca_fini() 497 */ 498 static void 499 ibdma_hca_fini(ibdma_hca_t *hca) 500 { 501 int status; 502 int ndx; 503 ibdma_port_t *port; 504 ibdma_hdl_impl_t *hdl; 505 ibdma_hdl_impl_t *hdl_next; 506 507 ASSERT(mutex_owned(&ibdma->ms_hca_list_lock)); 508 ASSERT(hca != NULL); 509 510 rw_enter(&hca->ih_iou_rwlock, RW_WRITER); 511 512 /* 513 * All handles should have been de-registered, but release 514 * any that are outstanding. 515 */ 516 hdl = list_head(&hca->ih_hdl_list); 517 while (hdl != NULL) { 518 hdl_next = list_next(&hca->ih_hdl_list, hdl); 519 list_remove(&hca->ih_hdl_list, hdl); 520 cmn_err(CE_NOTE, "hca_fini, unexpected ibdma user handle" 521 " exists"); 522 kmem_free(hdl, sizeof (*hdl)); 523 hdl = hdl_next; 524 } 525 list_destroy(&hca->ih_hdl_list); 526 527 /* 528 * Un-register with the IBMF. 529 */ 530 for (ndx = 0; ndx < hca->ih_nports; ndx++) { 531 port = &hca->ih_port[ndx]; 532 port->ip_hcap = NULL; 533 534 status = ibt_modify_port_byguid(hca->ih_iou_guid, 535 ndx+1, IBT_PORT_RESET_DEVMGT, 0); 536 if (status != IBT_SUCCESS) 537 cmn_err(CE_NOTE, "hca_fini, IBT modify port caps" 538 " error (%d)", status); 539 540 if (port->ip_ibmf_hdl == NULL) 541 continue; 542 543 status = ibmf_tear_down_async_cb(port->ip_ibmf_hdl, 544 IBMF_QP_HANDLE_DEFAULT, 0); 545 if (status != IBMF_SUCCESS) 546 cmn_err(CE_NOTE, "hca_fini, IBMF tear down cb" 547 " error (%d)", status); 548 549 status = ibmf_unregister(&port->ip_ibmf_hdl, 0); 550 if (status != IBMF_SUCCESS) 551 cmn_err(CE_NOTE, "hca_fini, IBMF un-register" 552 " error (%d)", status); 553 port->ip_ibmf_hdl = NULL; 554 } 555 556 status = ibt_close_hca(hca->ih_ibt_hdl); 557 if (status != IBT_SUCCESS) 558 cmn_err(CE_NOTE, "hca_fini close error (%d)", status); 559 560 rw_exit(&hca->ih_iou_rwlock); 561 rw_destroy(&hca->ih_iou_rwlock); 562 kmem_free(hca, sizeof (ibdma_hca_t) + 563 (hca->ih_nports-1) * sizeof (ibdma_port_t)); 564 } 565 566 /* DM IBMF MAD handlers */ 567 /* 568 * ibdma_create_resp_mad() 569 */ 570 static void 571 ibdma_create_resp_mad(ibmf_msg_t *msgp) 572 { 573 /* 574 * Allocate send buffer fix up hdr for response. 575 */ 576 msgp->im_msgbufs_send.im_bufs_mad_hdr = 577 kmem_zalloc(IBDMA_MAD_SIZE, KM_SLEEP); 578 579 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *) 580 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 581 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDMA_DM_MAD_HDR_SIZE; 582 msgp->im_msgbufs_send.im_bufs_cl_data = 583 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + 584 IBDMA_DM_MAD_HDR_SIZE); 585 msgp->im_msgbufs_send.im_bufs_cl_data_len = 586 IBDMA_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDMA_DM_MAD_HDR_SIZE; 587 (void) memcpy(msgp->im_msgbufs_send.im_bufs_mad_hdr, 588 msgp->im_msgbufs_recv.im_bufs_mad_hdr, IBDMA_MAD_SIZE); 589 590 /* 591 * We may want to support a GRH since this is a GMP; not 592 * required for current SRP device manager platforms. 593 */ 594 #if 0 595 if (msgp->im_msg_flags & IBMF_MSG_FLAGS_GLOBAL_ADDRESS) { 596 ib_gid_t temp = msgp->im_global_addr.ig_recver_gid; 597 598 msgp->im_global_addr.ig_recver_gid = 599 msgp->im_global_addr.ig_sender_gid; 600 msgp->im_global_addr.ig_sender_gid = temp; 601 } 602 #endif 603 } 604 605 /* 606 * ibdma_mad_send_cb() 607 */ 608 /* ARGSUSED */ 609 static void 610 ibdma_mad_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msgp, void *arg) 611 { 612 /* 613 * Just free the buffers and release the message. 614 */ 615 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) { 616 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, 617 IBDMA_MAD_SIZE); 618 msgp->im_msgbufs_send.im_bufs_mad_hdr = NULL; 619 } 620 if (ibmf_free_msg(ibmf_hdl, &msgp) != IBMF_SUCCESS) { 621 cmn_err(CE_NOTE, "mad_send_cb, IBMF message free error"); 622 } 623 } 624 625 /* 626 * ibdma_mad_recv_cb() 627 */ 628 static void 629 ibdma_mad_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msgp, void *args) 630 { 631 int status; 632 ib_mad_hdr_t *in_mad; 633 ib_mad_hdr_t *out_mad; 634 ibdma_port_t *port = args; 635 636 ASSERT(msgp != NULL); 637 ASSERT(port != NULL); 638 639 if (msgp->im_msg_status != IBMF_SUCCESS) { 640 cmn_err(CE_NOTE, "mad_recv_cb, bad MAD receive status (%d)", 641 msgp->im_msg_status); 642 goto drop; 643 } 644 645 in_mad = msgp->im_msgbufs_recv.im_bufs_mad_hdr; 646 647 if (in_mad->MgmtClass != MAD_MGMT_CLASS_DEV_MGT) { 648 #ifdef DEBUG_IBDMA 649 cmn_err(CE_NOTE, "mad_recv_cb, MAD not of Dev Mgmt Class"); 650 #endif 651 goto drop; 652 } 653 654 ibdma_create_resp_mad(msgp); 655 out_mad = msgp->im_msgbufs_send.im_bufs_mad_hdr; 656 657 out_mad->R_Method = IB_DM_DEVMGT_METHOD_GET_RESP; 658 out_mad->Status = 0; 659 660 if (in_mad->R_Method == MAD_METHOD_SET) { 661 #ifdef DEBUG_IBDMA 662 cmn_err(CE_NOTE, "mad_recv_cb, no attributes supported" 663 " for set"); 664 #endif 665 out_mad->Status = MAD_STATUS_UNSUPP_METHOD_ATTR; 666 goto send_resp; 667 } 668 669 if (in_mad->R_Method != MAD_METHOD_GET) { 670 #ifdef DEBUG_IBDMA 671 cmn_err(CE_NOTE, "mad_recv_cb, no attributes supported" 672 " for set"); 673 #endif 674 out_mad->Status = MAD_STATUS_UNSUPP_METHOD; 675 goto send_resp; 676 } 677 678 /* 679 * Process a GET method. 680 */ 681 switch (b2h16(in_mad->AttributeID)) { 682 683 case IB_DM_ATTR_CLASSPORTINFO: 684 ibdma_get_class_portinfo(msgp); 685 break; 686 687 case IB_DM_ATTR_IO_UNITINFO: 688 ibdma_get_io_unitinfo(port->ip_hcap, msgp); 689 break; 690 691 case IB_DM_ATTR_IOC_CTRL_PROFILE: 692 ibdma_get_ioc_profile(port->ip_hcap, msgp); 693 break; 694 695 case IB_DM_ATTR_SERVICE_ENTRIES: 696 ibdma_get_ioc_services(port->ip_hcap, msgp); 697 break; 698 699 default: 700 out_mad->Status = MAD_STATUS_UNSUPP_METHOD_ATTR; 701 break; 702 } 703 704 send_resp: 705 status = ibmf_msg_transport(ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 706 msgp, NULL, ibdma_mad_send_cb, NULL, 0); 707 if (status != IBMF_SUCCESS) { 708 cmn_err(CE_NOTE, "mad_recv_cb, send error (%d)", status); 709 ibdma_mad_send_cb(ibmf_hdl, msgp, NULL); 710 } 711 return; 712 713 drop: 714 status = ibmf_free_msg(ibmf_hdl, &msgp); 715 if (status != IBMF_SUCCESS) { 716 cmn_err(CE_NOTE, "mad_recv_cb, error dropping (%d)", 717 status); 718 } 719 } 720 721 /* 722 * ibdma_get_class_portinfo() 723 */ 724 static void 725 ibdma_get_class_portinfo(ibmf_msg_t *msg) 726 { 727 ib_mad_classportinfo_t *cpip; 728 729 cpip = (ib_mad_classportinfo_t *)msg->im_msgbufs_send.im_bufs_cl_data; 730 bzero(cpip, sizeof (*cpip)); 731 cpip->BaseVersion = MAD_CLASS_BASE_VERS_1; 732 cpip->ClassVersion = IB_DM_CLASS_VERSION_1; 733 cpip->RespTimeValue = h2b32(IBDMA_DM_RESP_TIME); 734 } 735 736 /* 737 * ibdma_get_io_unitinfo() 738 */ 739 static void 740 ibdma_get_io_unitinfo(ibdma_hca_t *hca, ibmf_msg_t *msg) 741 { 742 ib_dm_io_unitinfo_t *uip; 743 744 uip = (ib_dm_io_unitinfo_t *)msg->im_msgbufs_send.im_bufs_cl_data; 745 rw_enter(&hca->ih_iou_rwlock, RW_READER); 746 bcopy(&hca->ih_iou, uip, sizeof (ib_dm_io_unitinfo_t)); 747 rw_exit(&hca->ih_iou_rwlock); 748 } 749 750 /* 751 * ibdma_get_ioc_profile() 752 */ 753 static void 754 ibdma_get_ioc_profile(ibdma_hca_t *hca, ibmf_msg_t *msg) 755 { 756 ib_dm_ioc_ctrl_profile_t *iocp; 757 uint32_t slot; 758 759 ASSERT(msg != NULL); 760 761 slot = b2h32(msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier); 762 iocp = (ib_dm_ioc_ctrl_profile_t *) 763 msg->im_msgbufs_send.im_bufs_cl_data; 764 if (slot == 0 || slot > IBDMA_MAX_IOC) { 765 msg->im_msgbufs_send.im_bufs_mad_hdr->Status = 766 MAD_STATUS_INVALID_FIELD; 767 return; 768 } 769 770 slot--; 771 rw_enter(&hca->ih_iou_rwlock, RW_READER); 772 if (ibdma_get_ioc_state(hca, slot) == IBDMA_IOC_PRESENT) { 773 bcopy(&hca->ih_ioc[slot].ii_profile, iocp, 774 sizeof (ib_dm_ioc_ctrl_profile_t)); 775 } else { 776 msg->im_msgbufs_send.im_bufs_mad_hdr->Status = 777 IB_DM_DEVMGT_MAD_STAT_NORESP; 778 } 779 rw_exit(&hca->ih_iou_rwlock); 780 } 781 782 /* 783 * ibdma_get_ioc_services() 784 */ 785 static void 786 ibdma_get_ioc_services(ibdma_hca_t *hca, ibmf_msg_t *msg) 787 { 788 ib_dm_srv_t *to_svcp; 789 ib_dm_srv_t *from_svcp; 790 uint32_t slot; 791 uint8_t hi; 792 uint8_t low; 793 794 ASSERT(msg != NULL); 795 796 slot = b2h32(msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier); 797 hi = (slot >> 8) & 0x00FF; 798 low = slot & 0x00FF; 799 slot = (slot >> 16) & 0x0FFFF; 800 if (slot == 0 || slot > IBDMA_MAX_IOC) { 801 msg->im_msgbufs_send.im_bufs_mad_hdr->Status = 802 MAD_STATUS_INVALID_FIELD; 803 return; 804 } 805 806 slot--; 807 808 rw_enter(&hca->ih_iou_rwlock, RW_READER); 809 if (ibdma_get_ioc_state(hca, slot) != IBDMA_IOC_PRESENT) { 810 msg->im_msgbufs_send.im_bufs_mad_hdr->Status = 811 IB_DM_DEVMGT_MAD_STAT_NORESP; 812 rw_exit(&hca->ih_iou_rwlock); 813 return; 814 } 815 816 if ((low > hi) || (hi - low > 4)) { 817 msg->im_msgbufs_send.im_bufs_mad_hdr->Status = 818 MAD_STATUS_INVALID_FIELD; 819 rw_exit(&hca->ih_iou_rwlock); 820 return; 821 } 822 823 if (hi > hca->ih_ioc[slot].ii_profile.ioc_service_entries) { 824 msg->im_msgbufs_send.im_bufs_mad_hdr->Status = 825 MAD_STATUS_INVALID_FIELD; 826 rw_exit(&hca->ih_iou_rwlock); 827 return; 828 } 829 830 to_svcp = (ib_dm_srv_t *)msg->im_msgbufs_send.im_bufs_cl_data; 831 from_svcp = hca->ih_ioc[slot].ii_srvcs + low; 832 bcopy(from_svcp, to_svcp, sizeof (ib_dm_srv_t) * (hi - low + 1)); 833 rw_exit(&hca->ih_iou_rwlock); 834 } 835 836 837 /* 838 * Client API internal helpers 839 */ 840 841 /* 842 * ibdma_hdl_to_ioc() 843 */ 844 ibdma_hdl_impl_t * 845 ibdma_get_hdl_impl(ibdma_hdl_t hdl) 846 { 847 ibdma_hca_t *hca; 848 ibdma_hdl_impl_t *hdl_tmp = hdl; 849 ibdma_hdl_impl_t *hdl_impl; 850 851 ASSERT(mutex_owned(&ibdma->ms_hca_list_lock)); 852 853 if (hdl_tmp == NULL) { 854 cmn_err(CE_NOTE, "get_hdl_impl, NULL handle"); 855 return (NULL); 856 } 857 858 hca = ibdma_find_hca(hdl_tmp->ih_iou_guid); 859 if (hca == NULL) { 860 cmn_err(CE_NOTE, "get_hdl_impl, invalid handle, bad IOU"); 861 return (NULL); 862 } 863 864 hdl_impl = list_head(&hca->ih_hdl_list); 865 while (hdl_impl != NULL) { 866 if (hdl_impl == hdl_tmp) { 867 break; 868 } 869 hdl_impl = list_next(&hca->ih_hdl_list, hdl_impl); 870 } 871 return (hdl_impl); 872 } 873 874 /* 875 * ibdma_set_ioc_state() 876 * 877 * slot should be 0 based (not DM 1 based slot). 878 * 879 * I/O Unit write lock should be held outside of this function. 880 */ 881 static void 882 ibdma_set_ioc_state(ibdma_hca_t *hca, int slot, ibdma_ioc_state_t state) 883 { 884 uint8_t cur; 885 uint16_t id; 886 887 cur = hca->ih_iou.iou_ctrl_list[slot >> 1]; 888 if (slot & 1) { 889 cur = (cur & 0xF0) | state; 890 } else { 891 cur = (cur & 0x0F) | (state << 4); 892 } 893 hca->ih_iou.iou_ctrl_list[slot >> 1] = cur; 894 id = b2h16(hca->ih_iou.iou_changeid); 895 id++; 896 hca->ih_iou.iou_changeid = h2b16(id); 897 #ifdef DEBUG_IBDMA 898 cmn_err(CE_NOTE, "set_ioc_state, slot offset(%d), value(%d)", 899 slot, hca->ih_iou.iou_ctrl_list[slot >> 1]); 900 #endif 901 } 902 903 /* 904 * ibdma_get_ioc_state() 905 * 906 * slot should be 0 based (not DM 1 based slot). 907 * 908 * I/O Unit read lock should be held outside of this function. 909 */ 910 static ibdma_ioc_state_t 911 ibdma_get_ioc_state(ibdma_hca_t *hca, int slot) 912 { 913 uint8_t cur; 914 915 if (slot >= IBDMA_MAX_IOC) 916 return (0xFF); 917 918 cur = hca->ih_iou.iou_ctrl_list[slot >> 1]; 919 cur = slot & 1 ? cur & 0x0F : cur >> 4; 920 return (cur); 921 } 922 923 /* CLIENT API Implementation */ 924 /* 925 * ibdma_ioc_register() 926 * 927 */ 928 ibdma_hdl_t 929 ibdma_ioc_register(ib_guid_t iou_guid, ib_dm_ioc_ctrl_profile_t *profile, 930 ib_dm_srv_t *services) 931 { 932 int free_slot = -1; 933 int svc_entries; 934 int slot; 935 ibdma_hca_t *hca; 936 ibdma_hdl_impl_t *hdl; 937 938 if (profile == NULL || services == NULL) { 939 cmn_err(CE_NOTE, "ioc_register, bad parameter"); 940 return (NULL); 941 } 942 943 svc_entries = profile->ioc_service_entries; 944 if (svc_entries == 0) { 945 cmn_err(CE_NOTE, "ioc_register, bad profile no service"); 946 return (NULL); 947 } 948 949 /* 950 * Find the associated I/O Unit. 951 */ 952 mutex_enter(&ibdma->ms_hca_list_lock); 953 hca = ibdma_find_hca(iou_guid); 954 if (hca == NULL) { 955 mutex_exit(&ibdma->ms_hca_list_lock); 956 cmn_err(CE_NOTE, "ioc_register, bad I/O Unit GUID (0x%llx)", 957 (u_longlong_t)iou_guid); 958 return (NULL); 959 } 960 961 rw_enter(&hca->ih_iou_rwlock, RW_WRITER); 962 for (slot = 0; slot < IBDMA_MAX_IOC; slot++) { 963 if (hca->ih_ioc[slot].ii_inuse == 0) { 964 if (free_slot == -1) { 965 free_slot = slot; 966 } 967 continue; 968 } 969 970 if (profile->ioc_guid == 971 hca->ih_ioc[slot].ii_profile.ioc_guid) { 972 rw_exit(&hca->ih_iou_rwlock); 973 mutex_exit(&ibdma->ms_hca_list_lock); 974 #ifdef DEBUG_IBDMA 975 cmn_err(CE_NOTE, "ioc_register, IOC previously" 976 " registered"); 977 #endif 978 return (NULL); 979 } 980 } 981 982 if (free_slot < 0) { 983 rw_exit(&hca->ih_iou_rwlock); 984 cmn_err(CE_NOTE, "ioc_register, error - I/O Unit full"); 985 return (NULL); 986 } 987 #ifdef DEBUG_IBDMA 988 cmn_err(CE_NOTE, "ibdma_ioc_register, assigned to 0 based slot (%d)", 989 free_slot); 990 #endif 991 992 hca->ih_ioc[free_slot].ii_inuse = 1; 993 hca->ih_ioc[free_slot].ii_slot = free_slot; 994 hca->ih_ioc[free_slot].ii_hcap = hca; 995 996 /* 997 * Allocate local copy of profile and services. 998 */ 999 hca->ih_ioc[free_slot].ii_srvcs = 1000 kmem_zalloc(sizeof (ib_dm_srv_t) * svc_entries, KM_SLEEP); 1001 bcopy(profile, &hca->ih_ioc[free_slot].ii_profile, 1002 sizeof (ib_dm_ioc_ctrl_profile_t)); 1003 bcopy(services, hca->ih_ioc[free_slot].ii_srvcs, 1004 sizeof (ib_dm_srv_t) * svc_entries); 1005 1006 /* 1007 * Update the profile copy with the I/O controller slot assigned. 1008 * The slot occupies the lower 8 biths of the vendor ID/slot 32bit 1009 * field. 1010 */ 1011 profile->ioc_vendorid |= h2b32(free_slot); 1012 1013 ibdma_set_ioc_state(hca, free_slot, IBDMA_IOC_PRESENT); 1014 1015 hdl = kmem_alloc(sizeof (*hdl), KM_SLEEP); 1016 hdl->ih_iou_guid = hca->ih_iou_guid; 1017 hdl->ih_ioc_ndx = (uint8_t)free_slot; 1018 list_insert_tail(&hca->ih_hdl_list, hdl); 1019 1020 rw_exit(&hca->ih_iou_rwlock); 1021 mutex_exit(&ibdma->ms_hca_list_lock); 1022 1023 return ((ibdma_hdl_t)hdl); 1024 } 1025 1026 /* 1027 * ibdma_ioc_unregister() 1028 * 1029 */ 1030 ibdma_status_t 1031 ibdma_ioc_unregister(ibdma_hdl_t hdl) 1032 { 1033 ibdma_ioc_t *ioc; 1034 ibdma_hca_t *hca; 1035 int slot; 1036 ibdma_hdl_impl_t *hdl_tmp = hdl; 1037 ibdma_hdl_impl_t *hdl_impl; 1038 1039 if (hdl == NULL) { 1040 cmn_err(CE_NOTE, "ioc_unregister, NULL handle"); 1041 return (IBDMA_BAD_PARAM); 1042 } 1043 1044 mutex_enter(&ibdma->ms_hca_list_lock); 1045 hca = ibdma_find_hca(hdl_tmp->ih_iou_guid); 1046 if (hca == NULL) { 1047 cmn_err(CE_NOTE, "ioc_unregsiter, invalid handle, IOU" 1048 " not found"); 1049 mutex_exit(&ibdma->ms_hca_list_lock); 1050 return (IBDMA_BAD_PARAM); 1051 } 1052 1053 hdl_impl = list_head(&hca->ih_hdl_list); 1054 while (hdl_impl != NULL) { 1055 if (hdl_impl == hdl_tmp) { 1056 break; 1057 } 1058 hdl_impl = list_next(&hca->ih_hdl_list, hdl_impl); 1059 } 1060 1061 if (hdl_impl == NULL) { 1062 cmn_err(CE_NOTE, "ioc_unregsiter, invalid handle, not found"); 1063 mutex_exit(&ibdma->ms_hca_list_lock); 1064 return (IBDMA_BAD_PARAM); 1065 } 1066 1067 list_remove(&hca->ih_hdl_list, hdl_impl); 1068 1069 if (hdl_impl->ih_ioc_ndx >= IBDMA_MAX_IOC) { 1070 cmn_err(CE_NOTE, "ioc_unregister, corrupted handle"); 1071 kmem_free(hdl_impl, sizeof (*hdl_impl)); 1072 mutex_exit(&ibdma->ms_hca_list_lock); 1073 return (IBDMA_BAD_PARAM); 1074 } 1075 ioc = &hca->ih_ioc[hdl_impl->ih_ioc_ndx]; 1076 kmem_free(hdl_impl, sizeof (*hdl_impl)); 1077 1078 if (ioc->ii_slot > IBDMA_MAX_IOC) { 1079 cmn_err(CE_NOTE, "ioc_unregister, IOC corrupted, bad" 1080 " slot in IOC"); 1081 mutex_exit(&ibdma->ms_hca_list_lock); 1082 return (IBDMA_BAD_PARAM); 1083 } 1084 1085 rw_enter(&ioc->ii_hcap->ih_iou_rwlock, RW_WRITER); 1086 if (ioc->ii_inuse == 0) { 1087 rw_exit(&ioc->ii_hcap->ih_iou_rwlock); 1088 mutex_exit(&ibdma->ms_hca_list_lock); 1089 cmn_err(CE_NOTE, "ioc_unregister, slot not in use (%d)", 1090 ioc->ii_slot+1); 1091 return (IBDMA_BAD_PARAM); 1092 } 1093 1094 ASSERT(ioc->ii_srvcs != NULL); 1095 1096 slot = ioc->ii_slot; 1097 hca = ioc->ii_hcap; 1098 kmem_free(ioc->ii_srvcs, sizeof (ib_dm_srv_t) * 1099 ioc->ii_profile.ioc_service_entries); 1100 bzero(ioc, sizeof (ibdma_ioc_t)); 1101 ibdma_set_ioc_state(hca, slot, IBDMA_IOC_NOT_INSTALLED); 1102 1103 rw_exit(&hca->ih_iou_rwlock); 1104 mutex_exit(&ibdma->ms_hca_list_lock); 1105 1106 return (IBDMA_SUCCESS); 1107 } 1108 1109 /* 1110 * ibdma_ioc_update() 1111 * 1112 */ 1113 ibdma_status_t 1114 ibdma_ioc_update(ibdma_hdl_t hdl, ib_dm_ioc_ctrl_profile_t *profile, 1115 ib_dm_srv_t *services) 1116 { 1117 ibdma_ioc_t *ioc; 1118 ibdma_hca_t *hca; 1119 ibdma_hdl_impl_t *hdl_tmp = hdl; 1120 ibdma_hdl_impl_t *hdl_impl; 1121 1122 if (hdl == NULL) { 1123 cmn_err(CE_NOTE, "ioc_update, NULL handle"); 1124 return (IBDMA_BAD_PARAM); 1125 } 1126 1127 if (profile == NULL || services == NULL) { 1128 cmn_err(CE_NOTE, "ioc_update, NULL parameter"); 1129 return (IBDMA_BAD_PARAM); 1130 } 1131 1132 mutex_enter(&ibdma->ms_hca_list_lock); 1133 hca = ibdma_find_hca(hdl_tmp->ih_iou_guid); 1134 if (hca == NULL) { 1135 cmn_err(CE_NOTE, "ioc_update, invalid handle, IOU not found"); 1136 mutex_exit(&ibdma->ms_hca_list_lock); 1137 return (IBDMA_BAD_PARAM); 1138 } 1139 1140 hdl_impl = list_head(&hca->ih_hdl_list); 1141 while (hdl_impl != NULL) { 1142 if (hdl_impl == hdl_tmp) { 1143 break; 1144 } 1145 hdl_impl = list_next(&hca->ih_hdl_list, hdl_impl); 1146 } 1147 1148 if (hdl_impl == NULL) { 1149 cmn_err(CE_NOTE, "ioc_update, invalid handle, not found"); 1150 mutex_exit(&ibdma->ms_hca_list_lock); 1151 return (IBDMA_BAD_PARAM); 1152 } 1153 1154 if (hdl_impl->ih_ioc_ndx >= IBDMA_MAX_IOC) { 1155 cmn_err(CE_NOTE, "ioc_update, corrupted handle"); 1156 mutex_exit(&ibdma->ms_hca_list_lock); 1157 return (IBDMA_BAD_PARAM); 1158 } 1159 ioc = &hca->ih_ioc[hdl_impl->ih_ioc_ndx]; 1160 1161 if (ioc->ii_slot >= IBDMA_MAX_IOC || ioc->ii_hcap == NULL) { 1162 cmn_err(CE_NOTE, "ioc_update, bad handle (%p)", 1163 (void *)hdl); 1164 mutex_exit(&ibdma->ms_hca_list_lock); 1165 return (IBDMA_BAD_PARAM); 1166 } 1167 1168 rw_enter(&ioc->ii_hcap->ih_iou_rwlock, RW_WRITER); 1169 if (ioc->ii_inuse == 0) { 1170 rw_exit(&ioc->ii_hcap->ih_iou_rwlock); 1171 mutex_exit(&ibdma->ms_hca_list_lock); 1172 cmn_err(CE_NOTE, "ioc_udate slot not in use (%d)", 1173 ioc->ii_slot+1); 1174 return (IBDMA_BAD_PARAM); 1175 } 1176 1177 ASSERT(ioc->ii_srvcs != NULL); 1178 1179 kmem_free(ioc->ii_srvcs, ioc->ii_profile.ioc_service_entries * 1180 sizeof (ib_dm_srv_t)); 1181 ioc->ii_srvcs = kmem_zalloc(profile->ioc_service_entries * 1182 sizeof (ib_dm_srv_t), KM_SLEEP); 1183 1184 bcopy(profile, &ioc->ii_profile, sizeof (ib_dm_ioc_ctrl_profile_t)); 1185 bcopy(services, ioc->ii_srvcs, sizeof (ib_dm_srv_t) * 1186 profile->ioc_service_entries); 1187 /* 1188 * Update the profile copy with the I/O controller slot assigned. 1189 * The slot occupies the lower 8 biths of the vendor ID/slot 32bit 1190 * field. 1191 */ 1192 profile->ioc_vendorid |= h2b32(ioc->ii_slot); 1193 ibdma_set_ioc_state(ioc->ii_hcap, ioc->ii_slot, IBDMA_IOC_PRESENT); 1194 rw_exit(&ioc->ii_hcap->ih_iou_rwlock); 1195 mutex_exit(&ibdma->ms_hca_list_lock); 1196 1197 return (IBDMA_SUCCESS); 1198 } 1199