1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * ibdm.c 30 * 31 * This file contains the InifiniBand Device Manager (IBDM) support functions. 32 * IB nexus driver will only be the client for the IBDM module. 33 * 34 * IBDM registers with IBTF for HCA arrival/removal notification. 35 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 36 * the IOU's. 37 * 38 * IB nexus driver registers with IBDM to find the information about the 39 * HCA's and IOC's (behind the IOU) present on the IB fabric. 40 */ 41 42 #include <sys/systm.h> 43 #include <sys/taskq.h> 44 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 45 #include <sys/modctl.h> 46 47 /* Function Prototype declarations */ 48 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *); 49 static int ibdm_fini(void); 50 static int ibdm_init(void); 51 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 52 ibdm_hca_list_t *); 53 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 54 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 55 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 56 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 57 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 58 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 59 ib_guid_t *, ib_guid_t *); 60 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 61 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 62 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 63 static int ibdm_handle_redirection(ibmf_msg_t *, 64 ibdm_dp_gidinfo_t *, int *); 65 static void ibdm_wait_probe_completion(void); 66 static void ibdm_sweep_fabric(int); 67 static void ibdm_probe_gid_thread(void *); 68 static void ibdm_wakeup_probe_gid_cv(void); 69 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 70 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 71 static void ibdm_update_port_attr(ibdm_port_attr_t *); 72 static void ibdm_handle_hca_attach(ib_guid_t); 73 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 74 ibdm_dp_gidinfo_t *, int *); 75 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 76 static void ibdm_recv_incoming_mad(void *); 77 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 78 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 79 static void ibdm_pkt_timeout_hdlr(void *arg); 80 static void ibdm_initialize_port(ibdm_port_attr_t *); 81 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 82 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 83 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 84 static void ibdm_free_send_buffers(ibmf_msg_t *); 85 static void ibdm_handle_hca_detach(ib_guid_t); 86 static int ibdm_fini_port(ibdm_port_attr_t *); 87 static int ibdm_uninit_hca(ibdm_hca_list_t *); 88 static void ibdm_handle_iounitinfo(ibmf_handle_t, 89 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 90 static void ibdm_handle_ioc_profile(ibmf_handle_t, 91 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 92 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 93 ibt_async_code_t, ibt_async_event_t *); 94 static void ibdm_handle_classportinfo(ibmf_handle_t, 95 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 96 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 97 ibdm_dp_gidinfo_t *); 98 99 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 100 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 101 ibdm_dp_gidinfo_t *gid_list); 102 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 103 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 104 ibdm_dp_gidinfo_t *, int *); 105 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 106 ibdm_hca_list_t **); 107 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 108 size_t *, ib_guid_t); 109 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 110 ib_lid_t); 111 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 112 ib_gid_t, ib_gid_t); 113 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 114 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 115 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 116 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 117 ibmf_saa_event_details_t *, void *); 118 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 119 ibdm_dp_gidinfo_t *); 120 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 121 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 122 ibdm_dp_gidinfo_t *); 123 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 124 static void ibdm_free_gid_list(ibdm_gid_t *); 125 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 126 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 127 static void ibdm_saa_event_taskq(void *); 128 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 129 static void ibdm_get_next_port(ibdm_hca_list_t **, 130 ibdm_port_attr_t **, int); 131 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 132 ibdm_dp_gidinfo_t *); 133 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 134 ibdm_hca_list_t *); 135 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 136 static void ibdm_saa_handle_new_gid(void *); 137 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 138 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 139 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 140 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 141 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 142 143 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 144 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 145 #ifdef DEBUG 146 int ibdm_ignore_saa_event = 0; 147 #endif 148 149 /* Modload support */ 150 static struct modlmisc ibdm_modlmisc = { 151 &mod_miscops, 152 "InfiniBand Device Manager %I%", 153 }; 154 155 struct modlinkage ibdm_modlinkage = { 156 MODREV_1, 157 (void *)&ibdm_modlmisc, 158 NULL 159 }; 160 161 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 162 IBTI_V2, 163 IBT_DM, 164 ibdm_event_hdlr, 165 NULL, 166 "ibdm" 167 }; 168 169 /* Global variables */ 170 ibdm_t ibdm; 171 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 172 char *ibdm_string = "ibdm"; 173 174 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 175 ibdm.ibdm_dp_gidlist_head)) 176 177 /* 178 * _init 179 * Loadable module init, called before any other module. 180 * Initialize mutex 181 * Register with IBTF 182 */ 183 int 184 _init(void) 185 { 186 int err; 187 188 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 189 190 if ((err = ibdm_init()) != IBDM_SUCCESS) { 191 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 192 (void) ibdm_fini(); 193 return (DDI_FAILURE); 194 } 195 196 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 197 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 198 (void) ibdm_fini(); 199 } 200 return (err); 201 } 202 203 204 int 205 _fini(void) 206 { 207 int err; 208 209 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 210 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 211 (void) ibdm_init(); 212 return (EBUSY); 213 } 214 215 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 216 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 217 (void) ibdm_init(); 218 } 219 return (err); 220 } 221 222 223 int 224 _info(struct modinfo *modinfop) 225 { 226 return (mod_info(&ibdm_modlinkage, modinfop)); 227 } 228 229 230 /* 231 * ibdm_init(): 232 * Register with IBTF 233 * Allocate memory for the HCAs 234 * Allocate minor-nodes for the HCAs 235 */ 236 static int 237 ibdm_init(void) 238 { 239 int i, hca_count; 240 ib_guid_t *hca_guids; 241 ibt_status_t status; 242 243 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 244 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 245 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 246 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 247 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 248 mutex_enter(&ibdm.ibdm_mutex); 249 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 250 } 251 252 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 253 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 254 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 255 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 256 "failed %x", status); 257 mutex_exit(&ibdm.ibdm_mutex); 258 return (IBDM_FAILURE); 259 } 260 261 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 262 mutex_exit(&ibdm.ibdm_mutex); 263 } 264 265 266 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 267 hca_count = ibt_get_hca_list(&hca_guids); 268 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 269 for (i = 0; i < hca_count; i++) 270 (void) ibdm_handle_hca_attach(hca_guids[i]); 271 if (hca_count) 272 ibt_free_hca_list(hca_guids, hca_count); 273 274 mutex_enter(&ibdm.ibdm_mutex); 275 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 276 mutex_exit(&ibdm.ibdm_mutex); 277 } 278 279 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 280 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 281 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 282 mutex_enter(&ibdm.ibdm_mutex); 283 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 284 mutex_exit(&ibdm.ibdm_mutex); 285 } 286 return (IBDM_SUCCESS); 287 } 288 289 290 static int 291 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info) 292 { 293 int ii, k, niocs; 294 size_t size; 295 ibdm_gid_t *delete, *head; 296 timeout_id_t timeout_id; 297 ibdm_ioc_info_t *ioc; 298 299 ASSERT(mutex_owned(&gid_info->gl_mutex)); 300 if (gid_info->gl_iou == NULL) { 301 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 302 return (0); 303 } 304 305 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 306 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 307 gid_info, niocs); 308 309 for (ii = 0; ii < niocs; ii++) { 310 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 311 312 /* handle the case where an ioc_timeout_id is scheduled */ 313 if (ioc->ioc_timeout_id) { 314 timeout_id = ioc->ioc_timeout_id; 315 mutex_exit(&gid_info->gl_mutex); 316 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 317 "ioc_timeout_id = 0x%x", timeout_id); 318 if (untimeout(timeout_id) == -1) { 319 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 320 "untimeout ioc_timeout_id failed"); 321 mutex_enter(&gid_info->gl_mutex); 322 return (-1); 323 } 324 mutex_enter(&gid_info->gl_mutex); 325 ioc->ioc_timeout_id = 0; 326 } 327 328 /* handle the case where an ioc_dc_timeout_id is scheduled */ 329 if (ioc->ioc_dc_timeout_id) { 330 timeout_id = ioc->ioc_dc_timeout_id; 331 mutex_exit(&gid_info->gl_mutex); 332 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 333 "ioc_dc_timeout_id = 0x%x", timeout_id); 334 if (untimeout(timeout_id) == -1) { 335 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 336 "untimeout ioc_dc_timeout_id failed"); 337 mutex_enter(&gid_info->gl_mutex); 338 return (-1); 339 } 340 mutex_enter(&gid_info->gl_mutex); 341 ioc->ioc_dc_timeout_id = 0; 342 } 343 344 /* handle the case where serv[k].se_timeout_id is scheduled */ 345 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 346 if (ioc->ioc_serv[k].se_timeout_id) { 347 timeout_id = ioc->ioc_serv[k].se_timeout_id; 348 mutex_exit(&gid_info->gl_mutex); 349 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 350 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 351 k, timeout_id); 352 if (untimeout(timeout_id) == -1) { 353 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 354 " untimeout se_timeout_id failed"); 355 mutex_enter(&gid_info->gl_mutex); 356 return (-1); 357 } 358 mutex_enter(&gid_info->gl_mutex); 359 ioc->ioc_serv[k].se_timeout_id = 0; 360 } 361 } 362 363 /* delete GID list */ 364 head = ioc->ioc_gid_list; 365 while (head) { 366 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 367 "Deleting gid_list struct %p", head); 368 delete = head; 369 head = head->gid_next; 370 kmem_free(delete, sizeof (ibdm_gid_t)); 371 } 372 ioc->ioc_gid_list = NULL; 373 374 /* delete ioc_serv */ 375 size = ioc->ioc_profile.ioc_service_entries * 376 sizeof (ibdm_srvents_info_t); 377 if (ioc->ioc_serv && size) { 378 kmem_free(ioc->ioc_serv, size); 379 ioc->ioc_serv = NULL; 380 } 381 } 382 383 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 384 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 385 kmem_free(gid_info->gl_iou, size); 386 gid_info->gl_iou = NULL; 387 return (0); 388 } 389 390 391 /* 392 * ibdm_fini(): 393 * Un-register with IBTF 394 * De allocate memory for the GID info 395 */ 396 static int 397 ibdm_fini() 398 { 399 int ii; 400 ibdm_hca_list_t *hca_list, *temp; 401 ibdm_dp_gidinfo_t *gid_info, *tmp; 402 ibdm_gid_t *head, *delete; 403 404 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 405 406 mutex_enter(&ibdm.ibdm_hl_mutex); 407 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 408 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 409 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 410 mutex_exit(&ibdm.ibdm_hl_mutex); 411 return (IBDM_FAILURE); 412 } 413 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 414 ibdm.ibdm_ibt_clnt_hdl = NULL; 415 } 416 417 hca_list = ibdm.ibdm_hca_list_head; 418 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 419 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 420 temp = hca_list; 421 hca_list = hca_list->hl_next; 422 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 423 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 424 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 425 "uninit_hca %p failed", temp); 426 mutex_exit(&ibdm.ibdm_hl_mutex); 427 return (IBDM_FAILURE); 428 } 429 } 430 mutex_exit(&ibdm.ibdm_hl_mutex); 431 432 mutex_enter(&ibdm.ibdm_mutex); 433 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 434 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 435 436 gid_info = ibdm.ibdm_dp_gidlist_head; 437 while (gid_info) { 438 mutex_enter(&gid_info->gl_mutex); 439 (void) ibdm_free_iou_info(gid_info); 440 mutex_exit(&gid_info->gl_mutex); 441 ibdm_delete_glhca_list(gid_info); 442 443 tmp = gid_info; 444 gid_info = gid_info->gl_next; 445 mutex_destroy(&tmp->gl_mutex); 446 head = tmp->gl_gid; 447 while (head) { 448 IBTF_DPRINTF_L4("ibdm", 449 "\tibdm_fini: Deleting gid structs"); 450 delete = head; 451 head = head->gid_next; 452 kmem_free(delete, sizeof (ibdm_gid_t)); 453 } 454 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 455 } 456 mutex_exit(&ibdm.ibdm_mutex); 457 458 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 459 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 460 mutex_destroy(&ibdm.ibdm_mutex); 461 mutex_destroy(&ibdm.ibdm_hl_mutex); 462 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 463 } 464 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 465 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 466 cv_destroy(&ibdm.ibdm_probe_cv); 467 cv_destroy(&ibdm.ibdm_busy_cv); 468 } 469 return (IBDM_SUCCESS); 470 } 471 472 473 /* 474 * ibdm_event_hdlr() 475 * 476 * IBDM registers this asynchronous event handler at the time of 477 * ibt_attach. IBDM support the following async events. For other 478 * event, simply returns success. 479 * IBT_HCA_ATTACH_EVENT: 480 * Retrieves the information about all the port that are 481 * present on this HCA, allocates the port attributes 482 * structure and calls IB nexus callback routine with 483 * the port attributes structure as an input argument. 484 * IBT_HCA_DETACH_EVENT: 485 * Retrieves the information about all the ports that are 486 * present on this HCA and calls IB nexus callback with 487 * port guid as an argument 488 * IBT_EVENT_PORT_UP: 489 * Register with IBMF and SA access 490 * Setup IBMF receive callback routine 491 * IBT_EVENT_PORT_DOWN: 492 * Un-Register with IBMF and SA access 493 * Teardown IBMF receive callback routine 494 */ 495 /*ARGSUSED*/ 496 static void 497 ibdm_event_hdlr(void *clnt_hdl, 498 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 499 { 500 ibdm_hca_list_t *hca_list; 501 ibdm_port_attr_t *port; 502 ibmf_saa_handle_t port_sa_hdl; 503 504 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 505 506 switch (code) { 507 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 508 ibdm_handle_hca_attach(event->ev_hca_guid); 509 break; 510 511 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 512 ibdm_handle_hca_detach(event->ev_hca_guid); 513 mutex_enter(&ibdm.ibdm_ibnex_mutex); 514 if (ibdm.ibdm_ibnex_callback != NULL) { 515 (*ibdm.ibdm_ibnex_callback)((void *) 516 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 517 } 518 mutex_exit(&ibdm.ibdm_ibnex_mutex); 519 break; 520 521 case IBT_EVENT_PORT_UP: 522 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 523 mutex_enter(&ibdm.ibdm_hl_mutex); 524 port = ibdm_get_port_attr(event, &hca_list); 525 if (port == NULL) { 526 IBTF_DPRINTF_L2("ibdm", 527 "\tevent_hdlr: HCA not present"); 528 mutex_exit(&ibdm.ibdm_hl_mutex); 529 break; 530 } 531 ibdm_initialize_port(port); 532 hca_list->hl_nports_active++; 533 mutex_exit(&ibdm.ibdm_hl_mutex); 534 break; 535 536 case IBT_ERROR_PORT_DOWN: 537 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 538 mutex_enter(&ibdm.ibdm_hl_mutex); 539 port = ibdm_get_port_attr(event, &hca_list); 540 if (port == NULL) { 541 IBTF_DPRINTF_L2("ibdm", 542 "\tevent_hdlr: HCA not present"); 543 mutex_exit(&ibdm.ibdm_hl_mutex); 544 break; 545 } 546 hca_list->hl_nports_active--; 547 port_sa_hdl = port->pa_sa_hdl; 548 (void) ibdm_fini_port(port); 549 port->pa_state = IBT_PORT_DOWN; 550 mutex_exit(&ibdm.ibdm_hl_mutex); 551 ibdm_reset_all_dgids(port_sa_hdl); 552 break; 553 554 default: /* Ignore all other events/errors */ 555 break; 556 } 557 } 558 559 560 /* 561 * ibdm_initialize_port() 562 * Register with IBMF 563 * Register with SA access 564 * Register a receive callback routine with IBMF. IBMF invokes 565 * this routine whenever a MAD arrives at this port. 566 * Update the port attributes 567 */ 568 static void 569 ibdm_initialize_port(ibdm_port_attr_t *port) 570 { 571 int ii; 572 uint_t nports, size; 573 uint_t pkey_idx; 574 ib_pkey_t pkey; 575 ibt_hca_portinfo_t *pinfop; 576 ibmf_register_info_t ibmf_reg; 577 ibmf_saa_subnet_event_args_t event_args; 578 579 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 580 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 581 582 /* Check whether the port is active */ 583 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 584 NULL) != IBT_SUCCESS) 585 return; 586 587 if (port->pa_sa_hdl != NULL) 588 return; 589 590 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 591 &pinfop, &nports, &size) != IBT_SUCCESS) { 592 /* This should not occur */ 593 port->pa_npkeys = 0; 594 port->pa_pkey_tbl = NULL; 595 return; 596 } 597 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 598 599 port->pa_state = pinfop->p_linkstate; 600 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 601 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 602 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 603 604 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 605 port->pa_pkey_tbl[pkey_idx].pt_pkey = 606 pinfop->p_pkey_tbl[pkey_idx]; 607 608 ibt_free_portinfo(pinfop, size); 609 610 event_args.is_event_callback = ibdm_saa_event_cb; 611 event_args.is_event_callback_arg = port; 612 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 613 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 614 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 615 "sa access registration failed"); 616 return; 617 } 618 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 619 ibmf_reg.ir_port_num = port->pa_port_num; 620 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 621 622 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 623 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 624 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 625 "IBMF registration failed"); 626 (void) ibdm_fini_port(port); 627 return; 628 } 629 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 630 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 631 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 632 "IBMF setup recv cb failed"); 633 (void) ibdm_fini_port(port); 634 return; 635 } 636 637 for (ii = 0; ii < port->pa_npkeys; ii++) { 638 pkey = port->pa_pkey_tbl[ii].pt_pkey; 639 if (IBDM_INVALID_PKEY(pkey)) { 640 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 641 continue; 642 } 643 ibdm_port_attr_ibmf_init(port, pkey, ii); 644 } 645 } 646 647 648 /* 649 * ibdm_port_attr_ibmf_init: 650 * With IBMF - Alloc QP Handle and Setup Async callback 651 */ 652 static void 653 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 654 { 655 int ret; 656 657 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 658 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 659 IBMF_SUCCESS) { 660 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 661 "IBMF failed to alloc qp %d", ret); 662 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 663 return; 664 } 665 666 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 667 port->pa_ibmf_hdl); 668 669 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 670 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 671 IBMF_SUCCESS) { 672 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 673 "IBMF setup recv cb failed %d", ret); 674 (void) ibmf_free_qp(port->pa_ibmf_hdl, 675 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 676 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 677 } 678 } 679 680 681 /* 682 * ibdm_get_port_attr() 683 * Get port attributes from HCA guid and port number 684 * Return pointer to ibdm_port_attr_t on Success 685 * and NULL on failure 686 */ 687 static ibdm_port_attr_t * 688 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 689 { 690 ibdm_hca_list_t *hca_list; 691 ibdm_port_attr_t *port_attr; 692 int ii; 693 694 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 695 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 696 hca_list = ibdm.ibdm_hca_list_head; 697 while (hca_list) { 698 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 699 for (ii = 0; ii < hca_list->hl_nports; ii++) { 700 port_attr = &hca_list->hl_port_attr[ii]; 701 if (port_attr->pa_port_num == event->ev_port) { 702 *retval = hca_list; 703 return (port_attr); 704 } 705 } 706 } 707 hca_list = hca_list->hl_next; 708 } 709 return (NULL); 710 } 711 712 713 /* 714 * ibdm_update_port_attr() 715 * Update the port attributes 716 */ 717 static void 718 ibdm_update_port_attr(ibdm_port_attr_t *port) 719 { 720 uint_t nports, size; 721 uint_t pkey_idx; 722 ibt_hca_portinfo_t *portinfop; 723 724 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 725 if (ibt_query_hca_ports(port->pa_hca_hdl, 726 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 727 /* This should not occur */ 728 port->pa_npkeys = 0; 729 port->pa_pkey_tbl = NULL; 730 return; 731 } 732 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 733 734 port->pa_state = portinfop->p_linkstate; 735 736 /* 737 * PKey information in portinfo valid only if port is 738 * ACTIVE. Bail out if not. 739 */ 740 if (port->pa_state != IBT_PORT_ACTIVE) { 741 port->pa_npkeys = 0; 742 port->pa_pkey_tbl = NULL; 743 ibt_free_portinfo(portinfop, size); 744 return; 745 } 746 747 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 748 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 749 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 750 751 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 752 port->pa_pkey_tbl[pkey_idx].pt_pkey = 753 portinfop->p_pkey_tbl[pkey_idx]; 754 } 755 ibt_free_portinfo(portinfop, size); 756 } 757 758 759 /* 760 * ibdm_handle_hca_attach() 761 */ 762 static void 763 ibdm_handle_hca_attach(ib_guid_t hca_guid) 764 { 765 uint_t size; 766 uint_t ii, nports; 767 ibt_status_t status; 768 ibt_hca_hdl_t hca_hdl; 769 ibt_hca_attr_t *hca_attr; 770 ibdm_hca_list_t *hca_list, *temp; 771 ibdm_port_attr_t *port_attr; 772 ibt_hca_portinfo_t *portinfop; 773 774 IBTF_DPRINTF_L4("ibdm", 775 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 776 777 /* open the HCA first */ 778 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 779 &hca_hdl)) != IBT_SUCCESS) { 780 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 781 "open_hca failed, status 0x%x", status); 782 return; 783 } 784 785 hca_attr = (ibt_hca_attr_t *) 786 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 787 /* ibt_query_hca always returns IBT_SUCCESS */ 788 (void) ibt_query_hca(hca_hdl, hca_attr); 789 790 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 791 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 792 hca_attr->hca_version_id, hca_attr->hca_nports); 793 794 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 795 &size)) != IBT_SUCCESS) { 796 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 797 "ibt_query_hca_ports failed, status 0x%x", status); 798 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 799 (void) ibt_close_hca(hca_hdl); 800 return; 801 } 802 hca_list = (ibdm_hca_list_t *) 803 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 804 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 805 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 806 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 807 hca_list->hl_nports = hca_attr->hca_nports; 808 hca_list->hl_attach_time = ddi_get_time(); 809 hca_list->hl_hca_hdl = hca_hdl; 810 811 /* 812 * Init a dummy port attribute for the HCA node 813 * This is for Per-HCA Node. Initialize port_attr : 814 * hca_guid & port_guid -> hca_guid 815 * npkeys, pkey_tbl is NULL 816 * port_num, sn_prefix is 0 817 * vendorid, product_id, dev_version from HCA 818 * pa_state is IBT_PORT_ACTIVE 819 */ 820 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 821 sizeof (ibdm_port_attr_t), KM_SLEEP); 822 port_attr = hca_list->hl_hca_port_attr; 823 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 824 port_attr->pa_productid = hca_attr->hca_device_id; 825 port_attr->pa_dev_version = hca_attr->hca_version_id; 826 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 827 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 828 port_attr->pa_port_guid = hca_attr->hca_node_guid; 829 port_attr->pa_state = IBT_PORT_ACTIVE; 830 831 832 for (ii = 0; ii < nports; ii++) { 833 port_attr = &hca_list->hl_port_attr[ii]; 834 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 835 port_attr->pa_productid = hca_attr->hca_device_id; 836 port_attr->pa_dev_version = hca_attr->hca_version_id; 837 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 838 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 839 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 840 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 841 port_attr->pa_port_num = portinfop[ii].p_port_num; 842 port_attr->pa_state = portinfop[ii].p_linkstate; 843 844 /* 845 * Register with IBMF, SA access when the port is in 846 * ACTIVE state. Also register a callback routine 847 * with IBMF to receive incoming DM MAD's. 848 * The IBDM event handler takes care of registration of 849 * port which are not active. 850 */ 851 IBTF_DPRINTF_L4("ibdm", 852 "\thandle_hca_attach: port guid %llx Port state 0x%x", 853 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 854 855 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 856 mutex_enter(&ibdm.ibdm_hl_mutex); 857 hca_list->hl_nports_active++; 858 ibdm_initialize_port(port_attr); 859 mutex_exit(&ibdm.ibdm_hl_mutex); 860 } 861 } 862 mutex_enter(&ibdm.ibdm_hl_mutex); 863 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 864 if (temp->hl_hca_guid == hca_guid) { 865 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 866 "already seen by IBDM", hca_guid); 867 mutex_exit(&ibdm.ibdm_hl_mutex); 868 (void) ibdm_uninit_hca(hca_list); 869 return; 870 } 871 } 872 ibdm.ibdm_hca_count++; 873 if (ibdm.ibdm_hca_list_head == NULL) { 874 ibdm.ibdm_hca_list_head = hca_list; 875 ibdm.ibdm_hca_list_tail = hca_list; 876 } else { 877 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 878 ibdm.ibdm_hca_list_tail = hca_list; 879 } 880 mutex_exit(&ibdm.ibdm_hl_mutex); 881 mutex_enter(&ibdm.ibdm_ibnex_mutex); 882 if (ibdm.ibdm_ibnex_callback != NULL) { 883 (*ibdm.ibdm_ibnex_callback)((void *) 884 &hca_guid, IBDM_EVENT_HCA_ADDED); 885 } 886 mutex_exit(&ibdm.ibdm_ibnex_mutex); 887 888 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 889 ibt_free_portinfo(portinfop, size); 890 } 891 892 893 /* 894 * ibdm_handle_hca_detach() 895 */ 896 static void 897 ibdm_handle_hca_detach(ib_guid_t hca_guid) 898 { 899 ibdm_hca_list_t *head, *prev = NULL; 900 size_t len; 901 ibdm_dp_gidinfo_t *gidinfo; 902 903 IBTF_DPRINTF_L4("ibdm", 904 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 905 906 /* Make sure no probes are running */ 907 mutex_enter(&ibdm.ibdm_mutex); 908 while (ibdm.ibdm_busy & IBDM_BUSY) 909 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 910 ibdm.ibdm_busy |= IBDM_BUSY; 911 mutex_exit(&ibdm.ibdm_mutex); 912 913 mutex_enter(&ibdm.ibdm_hl_mutex); 914 head = ibdm.ibdm_hca_list_head; 915 while (head) { 916 if (head->hl_hca_guid == hca_guid) { 917 if (prev == NULL) 918 ibdm.ibdm_hca_list_head = head->hl_next; 919 else 920 prev->hl_next = head->hl_next; 921 ibdm.ibdm_hca_count--; 922 break; 923 } 924 prev = head; 925 head = head->hl_next; 926 } 927 mutex_exit(&ibdm.ibdm_hl_mutex); 928 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 929 (void) ibdm_handle_hca_attach(hca_guid); 930 931 /* 932 * Now clean up the HCA lists in the gidlist. 933 */ 934 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 935 gidinfo->gl_next) { 936 prev = NULL; 937 head = gidinfo->gl_hca_list; 938 while (head) { 939 if (head->hl_hca_guid == hca_guid) { 940 if (prev == NULL) 941 gidinfo->gl_hca_list = 942 head->hl_next; 943 else 944 prev->hl_next = head->hl_next; 945 946 len = sizeof (ibdm_hca_list_t) + 947 (head->hl_nports * 948 sizeof (ibdm_port_attr_t)); 949 kmem_free(head, len); 950 951 break; 952 } 953 prev = head; 954 head = head->hl_next; 955 } 956 } 957 958 mutex_enter(&ibdm.ibdm_mutex); 959 ibdm.ibdm_busy &= ~IBDM_BUSY; 960 cv_broadcast(&ibdm.ibdm_busy_cv); 961 mutex_exit(&ibdm.ibdm_mutex); 962 } 963 964 965 static int 966 ibdm_uninit_hca(ibdm_hca_list_t *head) 967 { 968 int ii; 969 ibdm_port_attr_t *port_attr; 970 971 for (ii = 0; ii < head->hl_nports; ii++) { 972 port_attr = &head->hl_port_attr[ii]; 973 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 974 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 975 "ibdm_fini_port() failed", head, ii); 976 return (IBDM_FAILURE); 977 } 978 } 979 if (head->hl_hca_hdl) 980 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) 981 return (IBDM_FAILURE); 982 kmem_free(head->hl_port_attr, 983 head->hl_nports * sizeof (ibdm_port_attr_t)); 984 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 985 kmem_free(head, sizeof (ibdm_hca_list_t)); 986 return (IBDM_SUCCESS); 987 } 988 989 990 /* 991 * For each port on the HCA, 992 * 1) Teardown IBMF receive callback function 993 * 2) Unregister with IBMF 994 * 3) Unregister with SA access 995 */ 996 static int 997 ibdm_fini_port(ibdm_port_attr_t *port_attr) 998 { 999 int ii, ibmf_status; 1000 1001 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1002 if (port_attr->pa_pkey_tbl == NULL) 1003 break; 1004 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1005 continue; 1006 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1007 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1008 "ibdm_port_attr_ibmf_fini failed for " 1009 "port pkey 0x%x", ii); 1010 return (IBDM_FAILURE); 1011 } 1012 } 1013 1014 if (port_attr->pa_ibmf_hdl) { 1015 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1016 IBMF_QP_HANDLE_DEFAULT, 0); 1017 if (ibmf_status != IBMF_SUCCESS) { 1018 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1019 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1020 return (IBDM_FAILURE); 1021 } 1022 1023 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1024 if (ibmf_status != IBMF_SUCCESS) { 1025 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1026 "ibmf_unregister failed %d", ibmf_status); 1027 return (IBDM_FAILURE); 1028 } 1029 1030 port_attr->pa_ibmf_hdl = NULL; 1031 } 1032 1033 if (port_attr->pa_sa_hdl) { 1034 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1035 if (ibmf_status != IBMF_SUCCESS) { 1036 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1037 "ibmf_sa_session_close failed %d", ibmf_status); 1038 return (IBDM_FAILURE); 1039 } 1040 port_attr->pa_sa_hdl = NULL; 1041 } 1042 1043 if (port_attr->pa_pkey_tbl != NULL) { 1044 kmem_free(port_attr->pa_pkey_tbl, 1045 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1046 port_attr->pa_pkey_tbl = NULL; 1047 port_attr->pa_npkeys = 0; 1048 } 1049 1050 return (IBDM_SUCCESS); 1051 } 1052 1053 1054 /* 1055 * ibdm_port_attr_ibmf_fini: 1056 * With IBMF - Tear down Async callback and free QP Handle 1057 */ 1058 static int 1059 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1060 { 1061 int ibmf_status; 1062 1063 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1064 1065 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1066 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1067 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1068 if (ibmf_status != IBMF_SUCCESS) { 1069 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1070 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1071 return (IBDM_FAILURE); 1072 } 1073 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1074 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1075 if (ibmf_status != IBMF_SUCCESS) { 1076 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1077 "ibmf_free_qp failed %d", ibmf_status); 1078 return (IBDM_FAILURE); 1079 } 1080 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1081 } 1082 return (IBDM_SUCCESS); 1083 } 1084 1085 1086 /* 1087 * ibdm_gid_decr_pending: 1088 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1089 */ 1090 static void 1091 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1092 { 1093 mutex_enter(&ibdm.ibdm_mutex); 1094 mutex_enter(&gidinfo->gl_mutex); 1095 if (--gidinfo->gl_pending_cmds == 0) { 1096 /* 1097 * Handle DGID getting removed. 1098 */ 1099 if (gidinfo->gl_disconnected) { 1100 mutex_exit(&gidinfo->gl_mutex); 1101 mutex_exit(&ibdm.ibdm_mutex); 1102 1103 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1104 "gidinfo %p hot removal", gidinfo); 1105 ibdm_delete_gidinfo(gidinfo); 1106 1107 mutex_enter(&ibdm.ibdm_mutex); 1108 ibdm.ibdm_ngid_probes_in_progress--; 1109 ibdm_wait_probe_completion(); 1110 mutex_exit(&ibdm.ibdm_mutex); 1111 return; 1112 } 1113 mutex_exit(&gidinfo->gl_mutex); 1114 mutex_exit(&ibdm.ibdm_mutex); 1115 ibdm_notify_newgid_iocs(gidinfo); 1116 mutex_enter(&ibdm.ibdm_mutex); 1117 mutex_enter(&gidinfo->gl_mutex); 1118 1119 ibdm.ibdm_ngid_probes_in_progress--; 1120 ibdm_wait_probe_completion(); 1121 } 1122 mutex_exit(&gidinfo->gl_mutex); 1123 mutex_exit(&ibdm.ibdm_mutex); 1124 } 1125 1126 1127 /* 1128 * ibdm_wait_probe_completion: 1129 * wait for probing to complete 1130 */ 1131 static void 1132 ibdm_wait_probe_completion(void) 1133 { 1134 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1135 if (ibdm.ibdm_ngid_probes_in_progress) { 1136 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1137 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1138 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1139 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1140 } 1141 } 1142 1143 1144 /* 1145 * ibdm_wakeup_probe_gid_cv: 1146 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1147 */ 1148 static void 1149 ibdm_wakeup_probe_gid_cv(void) 1150 { 1151 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1152 if (!ibdm.ibdm_ngid_probes_in_progress) { 1153 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1154 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1155 cv_broadcast(&ibdm.ibdm_probe_cv); 1156 } 1157 1158 } 1159 1160 1161 /* 1162 * ibdm_sweep_fabric(reprobe_flag) 1163 * Find all possible Managed IOU's and their IOC's that are visible 1164 * to the host. The algorithm used is as follows 1165 * 1166 * Send a "bus walk" request for each port on the host HCA to SA access 1167 * SA returns complete set of GID's that are reachable from 1168 * source port. This is done in parallel. 1169 * 1170 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1171 * 1172 * Sort the GID list and eliminate duplicate GID's 1173 * 1) Use DGID for sorting 1174 * 2) use PortGuid for sorting 1175 * Send SA query to retrieve NodeRecord and 1176 * extract PortGuid from that. 1177 * 1178 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1179 * support DM MAD's 1180 * Send a "Portinfo" query to get the port capabilities and 1181 * then check for DM MAD's support 1182 * 1183 * Send "ClassPortInfo" request for all the GID's in parallel, 1184 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1185 * cv_signal to complete. 1186 * 1187 * When DM agent on the remote GID sends back the response, IBMF 1188 * invokes DM callback routine. 1189 * 1190 * If the response is proper, send "IOUnitInfo" request and set 1191 * GID state to IBDM_GET_IOUNITINFO. 1192 * 1193 * If the response is proper, send "IocProfileInfo" request to 1194 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1195 * 1196 * Send request to get Service entries simultaneously 1197 * 1198 * Signal the waiting thread when received response for all the commands. 1199 * 1200 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1201 * response during the probing period. 1202 * 1203 * Note: 1204 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1205 * keep track of number commands in progress at any point of time. 1206 * MAD transaction ID is used to identify a particular GID 1207 * TBD: Consider registering the IBMF receive callback on demand 1208 * 1209 * Note: This routine must be called with ibdm.ibdm_mutex held 1210 * TBD: Re probe the failure GID (for certain failures) when requested 1211 * for fabric sweep next time 1212 * 1213 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1214 */ 1215 static void 1216 ibdm_sweep_fabric(int reprobe_flag) 1217 { 1218 int ii; 1219 int new_paths = 0; 1220 uint8_t niocs; 1221 taskqid_t tid; 1222 ibdm_ioc_info_t *ioc; 1223 ibdm_hca_list_t *hca_list = NULL; 1224 ibdm_port_attr_t *port = NULL; 1225 ibdm_dp_gidinfo_t *gid_info; 1226 1227 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1228 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1229 1230 /* 1231 * Check whether a sweep already in progress. If so, just 1232 * wait for the fabric sweep to complete 1233 */ 1234 while (ibdm.ibdm_busy & IBDM_BUSY) 1235 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1236 ibdm.ibdm_busy |= IBDM_BUSY; 1237 mutex_exit(&ibdm.ibdm_mutex); 1238 1239 ibdm_dump_sweep_fabric_timestamp(0); 1240 1241 /* Rescan the GID list for any removed GIDs for reprobe */ 1242 if (reprobe_flag) 1243 ibdm_rescan_gidlist(NULL); 1244 1245 /* 1246 * Get list of all the ports reachable from the local known HCA 1247 * ports which are active 1248 */ 1249 mutex_enter(&ibdm.ibdm_hl_mutex); 1250 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1251 ibdm_get_next_port(&hca_list, &port, 1)) { 1252 /* 1253 * Get PATHS to all the reachable ports from 1254 * SGID and update the global ibdm structure. 1255 */ 1256 new_paths = ibdm_get_reachable_ports(port, hca_list); 1257 ibdm.ibdm_ngids += new_paths; 1258 } 1259 mutex_exit(&ibdm.ibdm_hl_mutex); 1260 1261 mutex_enter(&ibdm.ibdm_mutex); 1262 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1263 mutex_exit(&ibdm.ibdm_mutex); 1264 1265 /* Send a request to probe GIDs asynchronously. */ 1266 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1267 gid_info = gid_info->gl_next) { 1268 mutex_enter(&gid_info->gl_mutex); 1269 gid_info->gl_reprobe_flag = reprobe_flag; 1270 mutex_exit(&gid_info->gl_mutex); 1271 1272 /* process newly encountered GIDs */ 1273 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1274 (void *)gid_info, TQ_NOSLEEP); 1275 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1276 " taskq_id = %x", gid_info, tid); 1277 /* taskq failed to dispatch call it directly */ 1278 if (tid == NULL) 1279 ibdm_probe_gid_thread((void *)gid_info); 1280 } 1281 1282 mutex_enter(&ibdm.ibdm_mutex); 1283 ibdm_wait_probe_completion(); 1284 1285 /* 1286 * Update the properties, if reprobe_flag is set 1287 * Skip if gl_reprobe_flag is set, this will be 1288 * a re-inserted / new GID, for which notifications 1289 * have already been send. 1290 */ 1291 if (reprobe_flag) { 1292 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1293 gid_info = gid_info->gl_next) { 1294 if (gid_info->gl_iou == NULL) 1295 continue; 1296 if (gid_info->gl_reprobe_flag) { 1297 gid_info->gl_reprobe_flag = 0; 1298 continue; 1299 } 1300 1301 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1302 for (ii = 0; ii < niocs; ii++) { 1303 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1304 if (ioc) 1305 ibdm_reprobe_update_port_srv(ioc, 1306 gid_info); 1307 } 1308 } 1309 } 1310 ibdm_dump_sweep_fabric_timestamp(1); 1311 1312 ibdm.ibdm_busy &= ~IBDM_BUSY; 1313 cv_broadcast(&ibdm.ibdm_busy_cv); 1314 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1315 } 1316 1317 1318 /* 1319 * ibdm_probe_gid_thread: 1320 * thread that does the actual work for sweeping the fabric 1321 * for a given GID 1322 */ 1323 static void 1324 ibdm_probe_gid_thread(void *args) 1325 { 1326 int reprobe_flag; 1327 ib_guid_t node_guid; 1328 ib_guid_t port_guid; 1329 ibdm_dp_gidinfo_t *gid_info; 1330 1331 gid_info = (ibdm_dp_gidinfo_t *)args; 1332 reprobe_flag = gid_info->gl_reprobe_flag; 1333 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1334 gid_info, reprobe_flag); 1335 ASSERT(gid_info != NULL); 1336 ASSERT(gid_info->gl_pending_cmds == 0); 1337 1338 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1339 reprobe_flag == 0) { 1340 /* 1341 * This GID may have been already probed. Send 1342 * in a CLP to check if IOUnitInfo changed? 1343 * Explicitly set gl_reprobe_flag to 0 so that 1344 * IBnex is not notified on completion 1345 */ 1346 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1347 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1348 "get new IOCs information"); 1349 mutex_enter(&gid_info->gl_mutex); 1350 gid_info->gl_pending_cmds++; 1351 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1352 gid_info->gl_reprobe_flag = 0; 1353 mutex_exit(&gid_info->gl_mutex); 1354 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1355 mutex_enter(&gid_info->gl_mutex); 1356 gid_info->gl_pending_cmds = 0; 1357 mutex_exit(&gid_info->gl_mutex); 1358 mutex_enter(&ibdm.ibdm_mutex); 1359 --ibdm.ibdm_ngid_probes_in_progress; 1360 ibdm_wakeup_probe_gid_cv(); 1361 mutex_exit(&ibdm.ibdm_mutex); 1362 } 1363 } else { 1364 mutex_enter(&ibdm.ibdm_mutex); 1365 --ibdm.ibdm_ngid_probes_in_progress; 1366 ibdm_wakeup_probe_gid_cv(); 1367 mutex_exit(&ibdm.ibdm_mutex); 1368 } 1369 return; 1370 } else if (reprobe_flag && gid_info->gl_state == 1371 IBDM_GID_PROBING_COMPLETE) { 1372 /* 1373 * Reprobe all IOCs for the GID which has completed 1374 * probe. Skip other port GIDs to same IOU. 1375 * Explicitly set gl_reprobe_flag to 0 so that 1376 * IBnex is not notified on completion 1377 */ 1378 ibdm_ioc_info_t *ioc_info; 1379 uint8_t niocs, ii; 1380 1381 ASSERT(gid_info->gl_iou); 1382 mutex_enter(&gid_info->gl_mutex); 1383 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1384 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1385 gid_info->gl_pending_cmds += niocs; 1386 gid_info->gl_reprobe_flag = 0; 1387 mutex_exit(&gid_info->gl_mutex); 1388 for (ii = 0; ii < niocs; ii++) { 1389 uchar_t slot_info; 1390 ib_dm_io_unitinfo_t *giou_info; 1391 1392 /* 1393 * Check whether IOC is present in the slot 1394 * Series of nibbles (in the field 1395 * iou_ctrl_list) represents a slot in the 1396 * IOU. 1397 * Byte format: 76543210 1398 * Bits 0-3 of first byte represent Slot 2 1399 * bits 4-7 of first byte represent slot 1, 1400 * bits 0-3 of second byte represent slot 4 1401 * and so on 1402 * Each 4-bit nibble has the following meaning 1403 * 0x0 : IOC not installed 1404 * 0x1 : IOC is present 1405 * 0xf : Slot does not exist 1406 * and all other values are reserved. 1407 */ 1408 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1409 giou_info = &gid_info->gl_iou->iou_info; 1410 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1411 if ((ii % 2) == 0) 1412 slot_info = (slot_info >> 4); 1413 1414 if ((slot_info & 0xf) != 1) { 1415 ioc_info->ioc_state = 1416 IBDM_IOC_STATE_PROBE_FAILED; 1417 ibdm_gid_decr_pending(gid_info); 1418 continue; 1419 } 1420 1421 if (ibdm_send_ioc_profile(gid_info, ii) != 1422 IBDM_SUCCESS) { 1423 ibdm_gid_decr_pending(gid_info); 1424 } 1425 } 1426 1427 return; 1428 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1429 mutex_enter(&ibdm.ibdm_mutex); 1430 --ibdm.ibdm_ngid_probes_in_progress; 1431 ibdm_wakeup_probe_gid_cv(); 1432 mutex_exit(&ibdm.ibdm_mutex); 1433 return; 1434 } 1435 1436 mutex_enter(&gid_info->gl_mutex); 1437 gid_info->gl_pending_cmds++; 1438 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1439 mutex_exit(&gid_info->gl_mutex); 1440 1441 /* 1442 * Check whether the destination GID supports DM agents. If 1443 * not, stop probing the GID and continue with the next GID 1444 * in the list. 1445 */ 1446 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1447 mutex_enter(&gid_info->gl_mutex); 1448 gid_info->gl_pending_cmds = 0; 1449 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1450 mutex_exit(&gid_info->gl_mutex); 1451 ibdm_delete_glhca_list(gid_info); 1452 mutex_enter(&ibdm.ibdm_mutex); 1453 --ibdm.ibdm_ngid_probes_in_progress; 1454 ibdm_wakeup_probe_gid_cv(); 1455 mutex_exit(&ibdm.ibdm_mutex); 1456 return; 1457 } 1458 1459 /* Get the nodeguid and portguid of the port */ 1460 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1461 &node_guid, &port_guid) != IBDM_SUCCESS) { 1462 mutex_enter(&gid_info->gl_mutex); 1463 gid_info->gl_pending_cmds = 0; 1464 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1465 mutex_exit(&gid_info->gl_mutex); 1466 ibdm_delete_glhca_list(gid_info); 1467 mutex_enter(&ibdm.ibdm_mutex); 1468 --ibdm.ibdm_ngid_probes_in_progress; 1469 ibdm_wakeup_probe_gid_cv(); 1470 mutex_exit(&ibdm.ibdm_mutex); 1471 return; 1472 } 1473 1474 /* 1475 * Check whether we already knew about this NodeGuid 1476 * If so, do not probe the GID and continue with the 1477 * next GID in the gid list. Set the GID state to 1478 * probing done. 1479 */ 1480 mutex_enter(&ibdm.ibdm_mutex); 1481 gid_info->gl_nodeguid = node_guid; 1482 gid_info->gl_portguid = port_guid; 1483 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1484 mutex_exit(&ibdm.ibdm_mutex); 1485 mutex_enter(&gid_info->gl_mutex); 1486 gid_info->gl_pending_cmds = 0; 1487 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1488 mutex_exit(&gid_info->gl_mutex); 1489 ibdm_delete_glhca_list(gid_info); 1490 mutex_enter(&ibdm.ibdm_mutex); 1491 --ibdm.ibdm_ngid_probes_in_progress; 1492 ibdm_wakeup_probe_gid_cv(); 1493 mutex_exit(&ibdm.ibdm_mutex); 1494 return; 1495 } 1496 ibdm_add_to_gl_gid(gid_info, gid_info); 1497 mutex_exit(&ibdm.ibdm_mutex); 1498 1499 /* 1500 * New or reinserted GID : Enable notification to IBnex 1501 */ 1502 mutex_enter(&gid_info->gl_mutex); 1503 gid_info->gl_reprobe_flag = 1; 1504 mutex_exit(&gid_info->gl_mutex); 1505 1506 /* 1507 * Send ClassPortInfo request to the GID asynchronously. 1508 */ 1509 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1510 mutex_enter(&gid_info->gl_mutex); 1511 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1512 gid_info->gl_pending_cmds = 0; 1513 mutex_exit(&gid_info->gl_mutex); 1514 ibdm_delete_glhca_list(gid_info); 1515 mutex_enter(&ibdm.ibdm_mutex); 1516 --ibdm.ibdm_ngid_probes_in_progress; 1517 ibdm_wakeup_probe_gid_cv(); 1518 mutex_exit(&ibdm.ibdm_mutex); 1519 return; 1520 } 1521 } 1522 1523 1524 /* 1525 * ibdm_check_dest_nodeguid 1526 * Searches for the NodeGuid in the GID list 1527 * Returns matching gid_info if found and otherwise NULL 1528 * 1529 * This function is called to handle new GIDs discovered 1530 * during device sweep / probe or for GID_AVAILABLE event. 1531 * 1532 * Parameter : 1533 * gid_info GID to check 1534 */ 1535 static ibdm_dp_gidinfo_t * 1536 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1537 { 1538 ibdm_dp_gidinfo_t *gid_list; 1539 ibdm_gid_t *tmp; 1540 1541 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1542 1543 gid_list = ibdm.ibdm_dp_gidlist_head; 1544 while (gid_list) { 1545 if ((gid_list != gid_info) && 1546 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1547 IBTF_DPRINTF_L4("ibdm", 1548 "\tcheck_dest_nodeguid: NodeGuid is present"); 1549 1550 /* Add to gid_list */ 1551 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1552 KM_SLEEP); 1553 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1554 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1555 tmp->gid_next = gid_list->gl_gid; 1556 gid_list->gl_gid = tmp; 1557 gid_list->gl_ngids++; 1558 return (gid_list); 1559 } 1560 1561 gid_list = gid_list->gl_next; 1562 } 1563 1564 return (NULL); 1565 } 1566 1567 1568 /* 1569 * ibdm_is_dev_mgt_supported 1570 * Get the PortInfo attribute (SA Query) 1571 * Check "CompatabilityMask" field in the Portinfo. 1572 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1573 * by the port, otherwise IBDM_FAILURE 1574 */ 1575 static int 1576 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1577 { 1578 int ret; 1579 size_t length = 0; 1580 sa_portinfo_record_t req, *resp = NULL; 1581 ibmf_saa_access_args_t qargs; 1582 1583 bzero(&req, sizeof (sa_portinfo_record_t)); 1584 req.EndportLID = gid_info->gl_dlid; 1585 1586 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1587 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1588 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1589 qargs.sq_template = &req; 1590 qargs.sq_callback = NULL; 1591 qargs.sq_callback_arg = NULL; 1592 1593 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1594 &qargs, 0, &length, (void **)&resp); 1595 1596 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1597 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1598 "failed to get PORTINFO attribute %d", ret); 1599 return (IBDM_FAILURE); 1600 } 1601 1602 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1603 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1604 ret = IBDM_SUCCESS; 1605 } else { 1606 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1607 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1608 ret = IBDM_FAILURE; 1609 } 1610 kmem_free(resp, length); 1611 return (ret); 1612 } 1613 1614 1615 /* 1616 * ibdm_get_node_port_guids() 1617 * Get the NodeInfoRecord of the port 1618 * Save NodeGuid and PortGUID values in the GID list structure. 1619 * Return IBDM_SUCCESS/IBDM_FAILURE 1620 */ 1621 static int 1622 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1623 ib_guid_t *node_guid, ib_guid_t *port_guid) 1624 { 1625 int ret; 1626 size_t length = 0; 1627 sa_node_record_t req, *resp = NULL; 1628 ibmf_saa_access_args_t qargs; 1629 1630 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1631 1632 bzero(&req, sizeof (sa_node_record_t)); 1633 req.LID = dlid; 1634 1635 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1636 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1637 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1638 qargs.sq_template = &req; 1639 qargs.sq_callback = NULL; 1640 qargs.sq_callback_arg = NULL; 1641 1642 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1643 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1644 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1645 " SA Retrieve Failed: %d", ret); 1646 return (IBDM_FAILURE); 1647 } 1648 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1649 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1650 1651 *node_guid = resp->NodeInfo.NodeGUID; 1652 *port_guid = resp->NodeInfo.PortGUID; 1653 kmem_free(resp, length); 1654 return (IBDM_SUCCESS); 1655 } 1656 1657 1658 /* 1659 * ibdm_get_reachable_ports() 1660 * Get list of the destination GID (and its path records) by 1661 * querying the SA access. 1662 * 1663 * Returns Number paths 1664 */ 1665 static int 1666 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1667 { 1668 uint_t ii, jj, nrecs; 1669 uint_t npaths = 0; 1670 size_t length; 1671 ib_gid_t sgid; 1672 ibdm_pkey_tbl_t *pkey_tbl; 1673 sa_path_record_t *result; 1674 sa_path_record_t *precp; 1675 ibdm_dp_gidinfo_t *gid_info; 1676 1677 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1678 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1679 1680 sgid.gid_prefix = portinfo->pa_sn_prefix; 1681 sgid.gid_guid = portinfo->pa_port_guid; 1682 1683 /* get reversible paths */ 1684 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1685 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1686 != IBMF_SUCCESS) { 1687 IBTF_DPRINTF_L2("ibdm", 1688 "\tget_reachable_ports: Getting path records failed"); 1689 return (0); 1690 } 1691 1692 for (ii = 0; ii < nrecs; ii++) { 1693 precp = &result[ii]; 1694 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1695 precp->DGID.gid_prefix)) != NULL) { 1696 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: " 1697 "Already exists nrecs %d, ii %d", nrecs, ii); 1698 ibdm_addto_glhcalist(gid_info, hca); 1699 continue; 1700 } 1701 /* 1702 * This is a new GID. Allocate a GID structure and 1703 * initialize the structure 1704 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 1705 * by kmem_zalloc call 1706 */ 1707 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 1708 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 1709 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 1710 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 1711 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 1712 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 1713 gid_info->gl_p_key = precp->P_Key; 1714 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 1715 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 1716 gid_info->gl_slid = precp->SLID; 1717 gid_info->gl_dlid = precp->DLID; 1718 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 1719 << IBDM_GID_TRANSACTIONID_SHIFT; 1720 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 1721 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 1722 << IBDM_GID_TRANSACTIONID_SHIFT; 1723 ibdm_addto_glhcalist(gid_info, hca); 1724 1725 ibdm_dump_path_info(precp); 1726 1727 gid_info->gl_qp_hdl = NULL; 1728 ASSERT(portinfo->pa_pkey_tbl != NULL && 1729 portinfo->pa_npkeys != 0); 1730 1731 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 1732 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 1733 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 1734 (pkey_tbl->pt_qp_hdl != NULL)) { 1735 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 1736 break; 1737 } 1738 } 1739 1740 /* 1741 * QP handle for GID not initialized. No matching Pkey 1742 * was found!! ibdm should *not* hit this case. Flag an 1743 * error and drop the GID if ibdm does encounter this. 1744 */ 1745 if (gid_info->gl_qp_hdl == NULL) { 1746 IBTF_DPRINTF_L2(ibdm_string, 1747 "\tget_reachable_ports: No matching Pkey"); 1748 ibdm_delete_gidinfo(gid_info); 1749 continue; 1750 } 1751 if (ibdm.ibdm_dp_gidlist_head == NULL) { 1752 ibdm.ibdm_dp_gidlist_head = gid_info; 1753 ibdm.ibdm_dp_gidlist_tail = gid_info; 1754 } else { 1755 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 1756 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 1757 ibdm.ibdm_dp_gidlist_tail = gid_info; 1758 } 1759 npaths++; 1760 } 1761 kmem_free(result, length); 1762 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 1763 return (npaths); 1764 } 1765 1766 1767 /* 1768 * ibdm_check_dgid() 1769 * Look in the global list to check whether we know this DGID already 1770 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 1771 */ 1772 static ibdm_dp_gidinfo_t * 1773 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 1774 { 1775 ibdm_dp_gidinfo_t *gid_list; 1776 1777 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1778 gid_list = gid_list->gl_next) { 1779 if ((guid == gid_list->gl_dgid_lo) && 1780 (prefix == gid_list->gl_dgid_hi)) { 1781 break; 1782 } 1783 } 1784 return (gid_list); 1785 } 1786 1787 1788 /* 1789 * ibdm_find_gid() 1790 * Look in the global list to find a GID entry with matching 1791 * port & node GUID. 1792 * Return pointer to gidinfo if found, else return NULL 1793 */ 1794 static ibdm_dp_gidinfo_t * 1795 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 1796 { 1797 ibdm_dp_gidinfo_t *gid_list; 1798 1799 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 1800 nodeguid, portguid); 1801 1802 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1803 gid_list = gid_list->gl_next) { 1804 if ((portguid == gid_list->gl_portguid) && 1805 (nodeguid == gid_list->gl_nodeguid)) { 1806 break; 1807 } 1808 } 1809 1810 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 1811 gid_list); 1812 return (gid_list); 1813 } 1814 1815 1816 /* 1817 * ibdm_send_classportinfo() 1818 * Send classportinfo request. When the request is completed 1819 * IBMF calls ibdm_classportinfo_cb routine to inform about 1820 * the completion. 1821 * Returns IBDM_SUCCESS/IBDM_FAILURE 1822 */ 1823 static int 1824 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 1825 { 1826 ibmf_msg_t *msg; 1827 ib_mad_hdr_t *hdr; 1828 ibdm_timeout_cb_args_t *cb_args; 1829 1830 IBTF_DPRINTF_L4("ibdm", 1831 "\tsend_classportinfo: gid info 0x%p", gid_info); 1832 1833 /* 1834 * Send command to get classportinfo attribute. Allocate a IBMF 1835 * packet and initialize the packet. 1836 */ 1837 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 1838 &msg) != IBMF_SUCCESS) { 1839 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 1840 return (IBDM_FAILURE); 1841 } 1842 1843 ibdm_alloc_send_buffers(msg); 1844 1845 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 1846 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 1847 msg->im_local_addr.ia_remote_qno = 1; 1848 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 1849 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 1850 1851 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 1852 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 1853 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 1854 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 1855 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 1856 hdr->Status = 0; 1857 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 1858 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 1859 hdr->AttributeModifier = 0; 1860 1861 cb_args = &gid_info->gl_cpi_cb_args; 1862 cb_args->cb_gid_info = gid_info; 1863 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 1864 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 1865 1866 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 1867 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 1868 1869 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 1870 "timeout id %x", gid_info->gl_timeout_id); 1871 1872 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 1873 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 1874 IBTF_DPRINTF_L2("ibdm", 1875 "\tsend_classportinfo: ibmf send failed"); 1876 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 1877 } 1878 1879 return (IBDM_SUCCESS); 1880 } 1881 1882 1883 /* 1884 * ibdm_handle_classportinfo() 1885 * Invoked by the IBMF when the classportinfo request is completed. 1886 */ 1887 static void 1888 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 1889 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 1890 { 1891 void *data; 1892 timeout_id_t timeout_id; 1893 ib_mad_hdr_t *hdr; 1894 ibdm_mad_classportinfo_t *cpi; 1895 1896 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 1897 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 1898 1899 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 1900 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 1901 "Not a ClassPortInfo resp"); 1902 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 1903 return; 1904 } 1905 1906 /* 1907 * Verify whether timeout handler is created/active. 1908 * If created/ active, cancel the timeout handler 1909 */ 1910 mutex_enter(&gid_info->gl_mutex); 1911 ibdm_bump_transactionID(gid_info); 1912 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 1913 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 1914 *flag |= IBDM_IBMF_PKT_DUP_RESP; 1915 mutex_exit(&gid_info->gl_mutex); 1916 return; 1917 } 1918 gid_info->gl_iou_cb_args.cb_req_type = 0; 1919 if (gid_info->gl_timeout_id) { 1920 timeout_id = gid_info->gl_timeout_id; 1921 mutex_exit(&gid_info->gl_mutex); 1922 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 1923 "gl_timeout_id = 0x%x", timeout_id); 1924 if (untimeout(timeout_id) == -1) { 1925 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 1926 "untimeout gl_timeout_id failed"); 1927 } 1928 mutex_enter(&gid_info->gl_mutex); 1929 gid_info->gl_timeout_id = 0; 1930 } 1931 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1932 gid_info->gl_pending_cmds++; 1933 mutex_exit(&gid_info->gl_mutex); 1934 1935 data = msg->im_msgbufs_recv.im_bufs_cl_data; 1936 cpi = (ibdm_mad_classportinfo_t *)data; 1937 1938 /* 1939 * Cache the "RespTimeValue" and redirection information in the 1940 * global gid list data structure. This cached information will 1941 * be used to send any further requests to the GID. 1942 */ 1943 gid_info->gl_resp_timeout = 1944 (b2h32(cpi->RespTimeValue) & 0x1F); 1945 1946 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 1947 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 1948 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 1949 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 1950 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 1951 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 1952 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 1953 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 1954 1955 ibdm_dump_classportinfo(cpi); 1956 1957 /* 1958 * Send IOUnitInfo request 1959 * Reuse previously allocated IBMF packet for sending ClassPortInfo 1960 * Check whether DM agent on the remote node requested redirection 1961 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 1962 */ 1963 ibdm_alloc_send_buffers(msg); 1964 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 1965 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 1966 1967 if (gid_info->gl_redirected == B_TRUE) { 1968 if (gid_info->gl_redirect_dlid != 0) { 1969 msg->im_local_addr.ia_remote_lid = 1970 gid_info->gl_redirect_dlid; 1971 } 1972 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 1973 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 1974 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 1975 } else { 1976 msg->im_local_addr.ia_remote_qno = 1; 1977 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 1978 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 1979 } 1980 1981 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 1982 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 1983 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 1984 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 1985 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 1986 hdr->Status = 0; 1987 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 1988 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 1989 hdr->AttributeModifier = 0; 1990 1991 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 1992 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 1993 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 1994 1995 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 1996 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 1997 1998 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 1999 "timeout %x", gid_info->gl_timeout_id); 2000 2001 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2002 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2003 IBTF_DPRINTF_L2("ibdm", 2004 "\thandle_classportinfo: msg transport failed"); 2005 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2006 } 2007 (*flag) |= IBDM_IBMF_PKT_REUSED; 2008 } 2009 2010 2011 /* 2012 * ibdm_send_iounitinfo: 2013 * Sends a DM request to get IOU unitinfo. 2014 */ 2015 static int 2016 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2017 { 2018 ibmf_msg_t *msg; 2019 ib_mad_hdr_t *hdr; 2020 2021 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2022 2023 /* 2024 * Send command to get iounitinfo attribute. Allocate a IBMF 2025 * packet and initialize the packet. 2026 */ 2027 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2028 IBMF_SUCCESS) { 2029 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2030 return (IBDM_FAILURE); 2031 } 2032 2033 mutex_enter(&gid_info->gl_mutex); 2034 ibdm_bump_transactionID(gid_info); 2035 mutex_exit(&gid_info->gl_mutex); 2036 2037 2038 ibdm_alloc_send_buffers(msg); 2039 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2040 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2041 msg->im_local_addr.ia_remote_qno = 1; 2042 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2043 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2044 2045 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2046 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2047 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2048 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2049 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2050 hdr->Status = 0; 2051 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2052 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2053 hdr->AttributeModifier = 0; 2054 2055 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2056 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2057 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2058 2059 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2060 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2061 2062 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2063 "timeout %x", gid_info->gl_timeout_id); 2064 2065 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2066 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2067 IBMF_SUCCESS) { 2068 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2069 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2070 msg, &gid_info->gl_iou_cb_args); 2071 } 2072 return (IBDM_SUCCESS); 2073 } 2074 2075 /* 2076 * ibdm_handle_iounitinfo() 2077 * Invoked by the IBMF when IO Unitinfo request is completed. 2078 */ 2079 static void 2080 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2081 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2082 { 2083 int ii, first = B_TRUE; 2084 int num_iocs; 2085 size_t size; 2086 uchar_t slot_info; 2087 timeout_id_t timeout_id; 2088 ib_mad_hdr_t *hdr; 2089 ibdm_ioc_info_t *ioc_info; 2090 ib_dm_io_unitinfo_t *iou_info; 2091 ib_dm_io_unitinfo_t *giou_info; 2092 ibdm_timeout_cb_args_t *cb_args; 2093 2094 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2095 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2096 2097 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2098 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2099 "Unexpected response"); 2100 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2101 return; 2102 } 2103 2104 mutex_enter(&gid_info->gl_mutex); 2105 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2106 IBTF_DPRINTF_L4("ibdm", 2107 "\thandle_iounitinfo: DUP resp"); 2108 mutex_exit(&gid_info->gl_mutex); 2109 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2110 return; 2111 } 2112 gid_info->gl_iou_cb_args.cb_req_type = 0; 2113 if (gid_info->gl_timeout_id) { 2114 timeout_id = gid_info->gl_timeout_id; 2115 mutex_exit(&gid_info->gl_mutex); 2116 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2117 "gl_timeout_id = 0x%x", timeout_id); 2118 if (untimeout(timeout_id) == -1) { 2119 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2120 "untimeout gl_timeout_id failed"); 2121 } 2122 mutex_enter(&gid_info->gl_mutex); 2123 gid_info->gl_timeout_id = 0; 2124 } 2125 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2126 2127 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2128 ibdm_dump_iounitinfo(iou_info); 2129 num_iocs = iou_info->iou_num_ctrl_slots; 2130 /* 2131 * check if number of IOCs reported is zero? if yes, return. 2132 * when num_iocs are reported zero internal IOC database needs 2133 * to be updated. To ensure that save the number of IOCs in 2134 * the new field "gl_num_iocs". Use a new field instead of 2135 * "giou_info->iou_num_ctrl_slots" as that would prevent 2136 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2137 */ 2138 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2139 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2140 mutex_exit(&gid_info->gl_mutex); 2141 return; 2142 } 2143 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2144 2145 /* 2146 * if there is an existing gl_iou (IOU has been probed before) 2147 * check if the "iou_changeid" is same as saved entry in 2148 * "giou_info->iou_changeid". 2149 * (note: this logic can prevent IOC enumeration if a given 2150 * vendor doesn't support setting iou_changeid field for its IOU) 2151 * 2152 * if there is an existing gl_iou and iou_changeid has changed : 2153 * free up existing gl_iou info and its related structures. 2154 * reallocate gl_iou info all over again. 2155 * if we donot free this up; then this leads to memory leaks 2156 */ 2157 if (gid_info->gl_iou) { 2158 giou_info = &gid_info->gl_iou->iou_info; 2159 if (iou_info->iou_changeid == giou_info->iou_changeid) { 2160 IBTF_DPRINTF_L3("ibdm", 2161 "\thandle_iounitinfo: no IOCs changed"); 2162 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2163 mutex_exit(&gid_info->gl_mutex); 2164 return; 2165 } 2166 if (ibdm_free_iou_info(gid_info)) { 2167 IBTF_DPRINTF_L3("ibdm", 2168 "\thandle_iounitinfo: failed to cleanup resources"); 2169 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2170 mutex_exit(&gid_info->gl_mutex); 2171 return; 2172 } 2173 } 2174 2175 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2176 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2177 giou_info = &gid_info->gl_iou->iou_info; 2178 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2179 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2180 2181 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2182 giou_info->iou_flag = iou_info->iou_flag; 2183 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2184 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2185 gid_info->gl_pending_cmds += num_iocs; 2186 gid_info->gl_pending_cmds += 1; /* for diag code */ 2187 mutex_exit(&gid_info->gl_mutex); 2188 2189 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2190 mutex_enter(&gid_info->gl_mutex); 2191 gid_info->gl_pending_cmds--; 2192 mutex_exit(&gid_info->gl_mutex); 2193 } 2194 /* 2195 * Parallelize getting IOC controller profiles from here. 2196 * Allocate IBMF packets and send commands to get IOC profile for 2197 * each IOC present on the IOU. 2198 */ 2199 for (ii = 0; ii < num_iocs; ii++) { 2200 /* 2201 * Check whether IOC is present in the slot 2202 * Series of nibbles (in the field iou_ctrl_list) represents 2203 * a slot in the IOU. 2204 * Byte format: 76543210 2205 * Bits 0-3 of first byte represent Slot 2 2206 * bits 4-7 of first byte represent slot 1, 2207 * bits 0-3 of second byte represent slot 4 and so on 2208 * Each 4-bit nibble has the following meaning 2209 * 0x0 : IOC not installed 2210 * 0x1 : IOC is present 2211 * 0xf : Slot does not exist 2212 * and all other values are reserved. 2213 */ 2214 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2215 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2216 if ((ii % 2) == 0) 2217 slot_info = (slot_info >> 4); 2218 2219 if ((slot_info & 0xf) != 1) { 2220 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2221 "No IOC is present in the slot = %d", ii); 2222 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2223 mutex_enter(&gid_info->gl_mutex); 2224 gid_info->gl_pending_cmds--; 2225 mutex_exit(&gid_info->gl_mutex); 2226 continue; 2227 } 2228 2229 mutex_enter(&gid_info->gl_mutex); 2230 ibdm_bump_transactionID(gid_info); 2231 mutex_exit(&gid_info->gl_mutex); 2232 2233 /* 2234 * Re use the already allocated packet (for IOUnitinfo) to 2235 * send the first IOC controller attribute. Allocate new 2236 * IBMF packets for the rest of the IOC's 2237 */ 2238 if (first != B_TRUE) { 2239 msg = NULL; 2240 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2241 &msg) != IBMF_SUCCESS) { 2242 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2243 "IBMF packet allocation failed"); 2244 mutex_enter(&gid_info->gl_mutex); 2245 gid_info->gl_pending_cmds--; 2246 mutex_exit(&gid_info->gl_mutex); 2247 continue; 2248 } 2249 2250 } 2251 2252 /* allocate send buffers for all messages */ 2253 ibdm_alloc_send_buffers(msg); 2254 2255 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2256 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2257 if (gid_info->gl_redirected == B_TRUE) { 2258 if (gid_info->gl_redirect_dlid != 0) { 2259 msg->im_local_addr.ia_remote_lid = 2260 gid_info->gl_redirect_dlid; 2261 } 2262 msg->im_local_addr.ia_remote_qno = 2263 gid_info->gl_redirect_QP; 2264 msg->im_local_addr.ia_p_key = 2265 gid_info->gl_redirect_pkey; 2266 msg->im_local_addr.ia_q_key = 2267 gid_info->gl_redirect_qkey; 2268 } else { 2269 msg->im_local_addr.ia_remote_qno = 1; 2270 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2271 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2272 } 2273 2274 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2275 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2276 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2277 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2278 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2279 hdr->Status = 0; 2280 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2281 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2282 hdr->AttributeModifier = h2b32(ii + 1); 2283 2284 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2285 cb_args = &ioc_info->ioc_cb_args; 2286 cb_args->cb_gid_info = gid_info; 2287 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2288 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2289 cb_args->cb_ioc_num = ii; 2290 2291 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2292 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2293 2294 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2295 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2296 2297 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2298 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2299 IBTF_DPRINTF_L2("ibdm", 2300 "\thandle_iounitinfo: msg transport failed"); 2301 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2302 } 2303 (*flag) |= IBDM_IBMF_PKT_REUSED; 2304 first = B_FALSE; 2305 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2306 } 2307 } 2308 2309 2310 /* 2311 * ibdm_handle_ioc_profile() 2312 * Invoked by the IBMF when the IOCControllerProfile request 2313 * gets completed 2314 */ 2315 static void 2316 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2317 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2318 { 2319 int first = B_TRUE, reprobe = 0; 2320 uint_t ii, ioc_no, srv_start; 2321 uint_t nserv_entries; 2322 timeout_id_t timeout_id; 2323 ib_mad_hdr_t *hdr; 2324 ibdm_ioc_info_t *ioc_info; 2325 ibdm_timeout_cb_args_t *cb_args; 2326 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2327 2328 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2329 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2330 2331 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2332 /* 2333 * Check whether we know this IOC already 2334 * This will return NULL if reprobe is in progress 2335 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2336 * Do not hold mutexes here. 2337 */ 2338 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2339 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2340 "IOC guid %llx is present", ioc->ioc_guid); 2341 return; 2342 } 2343 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2344 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2345 2346 /* Make sure that IOC index is with the valid range */ 2347 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2348 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2349 "IOC index Out of range, index %d", ioc); 2350 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2351 return; 2352 } 2353 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2354 ioc_info->ioc_iou_info = gid_info->gl_iou; 2355 2356 mutex_enter(&gid_info->gl_mutex); 2357 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2358 reprobe = 1; 2359 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2360 ioc_info->ioc_serv = NULL; 2361 ioc_info->ioc_prev_serv_cnt = 2362 ioc_info->ioc_profile.ioc_service_entries; 2363 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2364 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2365 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2366 mutex_exit(&gid_info->gl_mutex); 2367 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2368 return; 2369 } 2370 ioc_info->ioc_cb_args.cb_req_type = 0; 2371 if (ioc_info->ioc_timeout_id) { 2372 timeout_id = ioc_info->ioc_timeout_id; 2373 mutex_exit(&gid_info->gl_mutex); 2374 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2375 "ioc_timeout_id = 0x%x", timeout_id); 2376 if (untimeout(timeout_id) == -1) { 2377 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2378 "untimeout ioc_timeout_id failed"); 2379 } 2380 mutex_enter(&gid_info->gl_mutex); 2381 ioc_info->ioc_timeout_id = 0; 2382 } 2383 2384 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2385 if (reprobe == 0) { 2386 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2387 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2388 } 2389 2390 /* 2391 * Save all the IOC information in the global structures. 2392 * Note the wire format is Big Endian and the Sparc process also 2393 * big endian. So, there is no need to convert the data fields 2394 * The conversion routines used below are ineffective on Sparc 2395 * machines where as they will be effective on little endian 2396 * machines such as Intel processors. 2397 */ 2398 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2399 2400 /* 2401 * Restrict updates to onlyport GIDs and service entries during reprobe 2402 */ 2403 if (reprobe == 0) { 2404 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2405 gioc->ioc_vendorid = 2406 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2407 >> IB_DM_VENDORID_SHIFT); 2408 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2409 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2410 gioc->ioc_subsys_vendorid = 2411 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2412 >> IB_DM_VENDORID_SHIFT); 2413 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2414 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2415 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2416 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2417 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2418 gioc->ioc_send_msg_qdepth = 2419 b2h16(ioc->ioc_send_msg_qdepth); 2420 gioc->ioc_rdma_read_qdepth = 2421 b2h16(ioc->ioc_rdma_read_qdepth); 2422 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2423 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2424 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2425 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2426 IB_DM_IOC_ID_STRING_LEN); 2427 2428 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2429 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2430 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2431 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2432 2433 if (ioc_info->ioc_diagdeviceid == B_TRUE) 2434 gid_info->gl_pending_cmds++; 2435 } 2436 gioc->ioc_service_entries = ioc->ioc_service_entries; 2437 gid_info->gl_pending_cmds += (gioc->ioc_service_entries/4); 2438 if (gioc->ioc_service_entries % 4) 2439 gid_info->gl_pending_cmds++; 2440 2441 mutex_exit(&gid_info->gl_mutex); 2442 2443 ibdm_dump_ioc_profile(gioc); 2444 2445 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2446 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2447 mutex_enter(&gid_info->gl_mutex); 2448 gid_info->gl_pending_cmds--; 2449 mutex_exit(&gid_info->gl_mutex); 2450 } 2451 } 2452 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2453 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2454 KM_SLEEP); 2455 2456 /* 2457 * In one single request, maximum number of requests that can be 2458 * obtained is 4. If number of service entries are more than four, 2459 * calculate number requests needed and send them parallelly. 2460 */ 2461 nserv_entries = ioc->ioc_service_entries; 2462 ii = 0; 2463 while (nserv_entries) { 2464 mutex_enter(&gid_info->gl_mutex); 2465 ibdm_bump_transactionID(gid_info); 2466 mutex_exit(&gid_info->gl_mutex); 2467 2468 if (first != B_TRUE) { 2469 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2470 &msg) != IBMF_SUCCESS) { 2471 continue; 2472 } 2473 2474 } 2475 ibdm_alloc_send_buffers(msg); 2476 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2477 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2478 if (gid_info->gl_redirected == B_TRUE) { 2479 if (gid_info->gl_redirect_dlid != 0) { 2480 msg->im_local_addr.ia_remote_lid = 2481 gid_info->gl_redirect_dlid; 2482 } 2483 msg->im_local_addr.ia_remote_qno = 2484 gid_info->gl_redirect_QP; 2485 msg->im_local_addr.ia_p_key = 2486 gid_info->gl_redirect_pkey; 2487 msg->im_local_addr.ia_q_key = 2488 gid_info->gl_redirect_qkey; 2489 } else { 2490 msg->im_local_addr.ia_remote_qno = 1; 2491 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2492 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2493 } 2494 2495 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2496 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2497 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2498 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2499 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2500 hdr->Status = 0; 2501 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2502 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2503 2504 srv_start = ii * 4; 2505 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2506 cb_args->cb_gid_info = gid_info; 2507 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2508 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 2509 cb_args->cb_srvents_start = srv_start; 2510 cb_args->cb_ioc_num = ioc_no - 1; 2511 2512 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 2513 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 2514 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 2515 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 2516 } else { 2517 cb_args->cb_srvents_end = 2518 (cb_args->cb_srvents_start + nserv_entries - 1); 2519 nserv_entries = 0; 2520 } 2521 ibdm_fill_srv_attr_mod(hdr, cb_args); 2522 2523 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 2524 ibdm_pkt_timeout_hdlr, cb_args, 2525 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2526 2527 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 2528 "timeout %x, ioc %d srv %d", 2529 ioc_info->ioc_serv[srv_start].se_timeout_id, 2530 ioc_no - 1, srv_start); 2531 2532 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2533 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2534 IBTF_DPRINTF_L2("ibdm", 2535 "\thandle_ioc_profile: msg send failed"); 2536 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2537 } 2538 (*flag) |= IBDM_IBMF_PKT_REUSED; 2539 first = B_FALSE; 2540 ii++; 2541 } 2542 } 2543 2544 2545 /* 2546 * ibdm_handle_srventry_mad() 2547 */ 2548 static void 2549 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 2550 ibdm_dp_gidinfo_t *gid_info, int *flag) 2551 { 2552 uint_t ii, ioc_no, attrmod; 2553 uint_t nentries, start, end; 2554 timeout_id_t timeout_id; 2555 ib_dm_srv_t *srv_ents; 2556 ibdm_ioc_info_t *ioc_info; 2557 ibdm_srvents_info_t *gsrv_ents; 2558 2559 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 2560 " IBMF msg %p gid info %p", msg, gid_info); 2561 2562 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 2563 /* 2564 * Get the start and end index of the service entries 2565 * Upper 16 bits identify the IOC 2566 * Lower 16 bits specify the range of service entries 2567 * LSB specifies (Big endian) end of the range 2568 * MSB specifies (Big endian) start of the range 2569 */ 2570 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2571 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 2572 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 2573 start = (attrmod & IBDM_8_BIT_MASK); 2574 2575 /* Make sure that IOC index is with the valid range */ 2576 if ((ioc_no < 1) | 2577 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 2578 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2579 "IOC index Out of range, index %d", ioc_no); 2580 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2581 return; 2582 } 2583 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 2584 2585 /* 2586 * Make sure that the "start" and "end" service indexes are 2587 * with in the valid range 2588 */ 2589 nentries = ioc_info->ioc_profile.ioc_service_entries; 2590 if ((start > end) | (start >= nentries) | (end >= nentries)) { 2591 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2592 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 2593 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2594 return; 2595 } 2596 gsrv_ents = &ioc_info->ioc_serv[start]; 2597 mutex_enter(&gid_info->gl_mutex); 2598 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 2599 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2600 "already known, ioc %d, srv %d, se_state %x", 2601 ioc_no - 1, start, gsrv_ents->se_state); 2602 mutex_exit(&gid_info->gl_mutex); 2603 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2604 return; 2605 } 2606 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 2607 if (ioc_info->ioc_serv[start].se_timeout_id) { 2608 IBTF_DPRINTF_L2("ibdm", 2609 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 2610 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 2611 mutex_exit(&gid_info->gl_mutex); 2612 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 2613 "se_timeout_id = 0x%x", timeout_id); 2614 if (untimeout(timeout_id) == -1) { 2615 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 2616 "untimeout se_timeout_id failed"); 2617 } 2618 mutex_enter(&gid_info->gl_mutex); 2619 ioc_info->ioc_serv[start].se_timeout_id = 0; 2620 } 2621 2622 gsrv_ents->se_state = IBDM_SE_VALID; 2623 mutex_exit(&gid_info->gl_mutex); 2624 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 2625 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 2626 bcopy(srv_ents->srv_name, 2627 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 2628 ibdm_dump_service_entries(&gsrv_ents->se_attr); 2629 } 2630 } 2631 2632 2633 /* 2634 * ibdm_get_diagcode: 2635 * Send request to get IOU/IOC diag code 2636 * Returns IBDM_SUCCESS/IBDM_FAILURE 2637 */ 2638 static int 2639 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 2640 { 2641 ibmf_msg_t *msg; 2642 ib_mad_hdr_t *hdr; 2643 ibdm_ioc_info_t *ioc; 2644 ibdm_timeout_cb_args_t *cb_args; 2645 timeout_id_t *timeout_id; 2646 2647 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 2648 gid_info, attr); 2649 2650 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2651 &msg) != IBMF_SUCCESS) { 2652 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 2653 return (IBDM_FAILURE); 2654 } 2655 2656 ibdm_alloc_send_buffers(msg); 2657 2658 mutex_enter(&gid_info->gl_mutex); 2659 ibdm_bump_transactionID(gid_info); 2660 mutex_exit(&gid_info->gl_mutex); 2661 2662 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2663 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2664 if (gid_info->gl_redirected == B_TRUE) { 2665 if (gid_info->gl_redirect_dlid != 0) { 2666 msg->im_local_addr.ia_remote_lid = 2667 gid_info->gl_redirect_dlid; 2668 } 2669 2670 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2671 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2672 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2673 } else { 2674 msg->im_local_addr.ia_remote_qno = 1; 2675 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2676 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2677 } 2678 2679 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2680 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2681 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2682 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2683 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2684 hdr->Status = 0; 2685 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2686 2687 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 2688 hdr->AttributeModifier = h2b32(attr); 2689 2690 if (attr == 0) { 2691 cb_args = &gid_info->gl_iou_cb_args; 2692 gid_info->gl_iou->iou_dc_valid = B_FALSE; 2693 cb_args->cb_ioc_num = 0; 2694 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 2695 timeout_id = &gid_info->gl_timeout_id; 2696 } else { 2697 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 2698 ioc->ioc_dc_valid = B_FALSE; 2699 cb_args = &ioc->ioc_dc_cb_args; 2700 cb_args->cb_ioc_num = attr - 1; 2701 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 2702 timeout_id = &ioc->ioc_dc_timeout_id; 2703 } 2704 cb_args->cb_gid_info = gid_info; 2705 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2706 cb_args->cb_srvents_start = 0; 2707 2708 2709 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2710 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2711 2712 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 2713 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 2714 2715 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2716 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2717 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 2718 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2719 } 2720 return (IBDM_SUCCESS); 2721 } 2722 2723 /* 2724 * ibdm_handle_diagcode: 2725 * Process the DiagCode MAD response and update local DM 2726 * data structure. 2727 */ 2728 static void 2729 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 2730 ibdm_dp_gidinfo_t *gid_info, int *flag) 2731 { 2732 uint16_t attrmod, *diagcode; 2733 ibdm_iou_info_t *iou; 2734 ibdm_ioc_info_t *ioc; 2735 timeout_id_t timeout_id; 2736 ibdm_timeout_cb_args_t *cb_args; 2737 2738 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 2739 2740 mutex_enter(&gid_info->gl_mutex); 2741 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 2742 iou = gid_info->gl_iou; 2743 if (attrmod == 0) { 2744 if (iou->iou_dc_valid != B_FALSE) { 2745 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2746 IBTF_DPRINTF_L4("ibdm", 2747 "\thandle_diagcode: Duplicate IOU DiagCode"); 2748 mutex_exit(&gid_info->gl_mutex); 2749 return; 2750 } 2751 cb_args = &gid_info->gl_iou_cb_args; 2752 cb_args->cb_req_type = 0; 2753 iou->iou_diagcode = b2h16(*diagcode); 2754 iou->iou_dc_valid = B_TRUE; 2755 if (gid_info->gl_timeout_id) { 2756 timeout_id = gid_info->gl_timeout_id; 2757 mutex_exit(&gid_info->gl_mutex); 2758 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 2759 "gl_timeout_id = 0x%x", timeout_id); 2760 if (untimeout(timeout_id) == -1) { 2761 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 2762 "untimeout gl_timeout_id failed"); 2763 } 2764 mutex_enter(&gid_info->gl_mutex); 2765 gid_info->gl_timeout_id = 0; 2766 } 2767 } else { 2768 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 2769 if (ioc->ioc_dc_valid != B_FALSE) { 2770 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2771 IBTF_DPRINTF_L4("ibdm", 2772 "\thandle_diagcode: Duplicate IOC DiagCode"); 2773 mutex_exit(&gid_info->gl_mutex); 2774 return; 2775 } 2776 cb_args = &ioc->ioc_dc_cb_args; 2777 cb_args->cb_req_type = 0; 2778 ioc->ioc_diagcode = b2h16(*diagcode); 2779 ioc->ioc_dc_valid = B_TRUE; 2780 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 2781 if (timeout_id) { 2782 mutex_exit(&gid_info->gl_mutex); 2783 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 2784 "timeout_id = 0x%x", timeout_id); 2785 if (untimeout(timeout_id) == -1) { 2786 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 2787 "untimeout ioc_dc_timeout_id failed"); 2788 } 2789 mutex_enter(&gid_info->gl_mutex); 2790 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 2791 } 2792 } 2793 mutex_exit(&gid_info->gl_mutex); 2794 2795 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 2796 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 2797 } 2798 2799 2800 /* 2801 * ibdm_is_ioc_present() 2802 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 2803 */ 2804 static ibdm_ioc_info_t * 2805 ibdm_is_ioc_present(ib_guid_t ioc_guid, 2806 ibdm_dp_gidinfo_t *gid_info, int *flag) 2807 { 2808 int ii; 2809 ibdm_ioc_info_t *ioc; 2810 ibdm_dp_gidinfo_t *head; 2811 ib_dm_io_unitinfo_t *iou; 2812 2813 mutex_enter(&ibdm.ibdm_mutex); 2814 head = ibdm.ibdm_dp_gidlist_head; 2815 while (head) { 2816 mutex_enter(&head->gl_mutex); 2817 if (head->gl_iou == NULL) { 2818 mutex_exit(&head->gl_mutex); 2819 head = head->gl_next; 2820 continue; 2821 } 2822 iou = &head->gl_iou->iou_info; 2823 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 2824 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 2825 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 2826 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 2827 if (gid_info == head) { 2828 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2829 } else if (ibdm_check_dgid(head->gl_dgid_lo, 2830 head->gl_dgid_hi) != NULL) { 2831 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 2832 "present: gid not present"); 2833 ibdm_add_to_gl_gid(gid_info, head); 2834 } 2835 mutex_exit(&head->gl_mutex); 2836 mutex_exit(&ibdm.ibdm_mutex); 2837 return (ioc); 2838 } 2839 } 2840 mutex_exit(&head->gl_mutex); 2841 head = head->gl_next; 2842 } 2843 mutex_exit(&ibdm.ibdm_mutex); 2844 return (NULL); 2845 } 2846 2847 2848 /* 2849 * ibdm_ibmf_send_cb() 2850 * IBMF invokes this callback routine after posting the DM MAD to 2851 * the HCA. 2852 */ 2853 /*ARGSUSED*/ 2854 static void 2855 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 2856 { 2857 ibdm_dump_ibmf_msg(ibmf_msg, 1); 2858 ibdm_free_send_buffers(ibmf_msg); 2859 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 2860 IBTF_DPRINTF_L4("ibdm", 2861 "\tibmf_send_cb: IBMF free msg failed"); 2862 } 2863 } 2864 2865 2866 /* 2867 * ibdm_ibmf_recv_cb() 2868 * Invoked by the IBMF when a response to the one of the DM requests 2869 * is received. 2870 */ 2871 /*ARGSUSED*/ 2872 static void 2873 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 2874 { 2875 ibdm_taskq_args_t *taskq_args; 2876 2877 /* 2878 * If the taskq enable is set then dispatch a taskq to process 2879 * the MAD, otherwise just process it on this thread 2880 */ 2881 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 2882 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 2883 return; 2884 } 2885 2886 /* 2887 * create a taskq and dispatch it to process the incoming MAD 2888 */ 2889 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 2890 if (taskq_args == NULL) { 2891 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 2892 "taskq_args"); 2893 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2894 IBTF_DPRINTF_L4("ibmf_recv_cb", 2895 "\tibmf_recv_cb: IBMF free msg failed"); 2896 } 2897 return; 2898 } 2899 taskq_args->tq_ibmf_handle = ibmf_hdl; 2900 taskq_args->tq_ibmf_msg = msg; 2901 taskq_args->tq_args = arg; 2902 2903 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 2904 TQ_NOSLEEP) == 0) { 2905 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 2906 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2907 IBTF_DPRINTF_L4("ibmf_recv_cb", 2908 "\tibmf_recv_cb: IBMF free msg failed"); 2909 } 2910 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 2911 return; 2912 } 2913 2914 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 2915 } 2916 2917 2918 void 2919 ibdm_recv_incoming_mad(void *args) 2920 { 2921 ibdm_taskq_args_t *taskq_args; 2922 2923 taskq_args = (ibdm_taskq_args_t *)args; 2924 2925 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 2926 "Processing incoming MAD via taskq"); 2927 2928 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 2929 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 2930 2931 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 2932 } 2933 2934 2935 /* 2936 * Calls ibdm_process_incoming_mad with all function arguments extracted 2937 * from args 2938 */ 2939 /*ARGSUSED*/ 2940 static void 2941 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 2942 { 2943 int flag = 0; 2944 int ret; 2945 uint64_t transaction_id; 2946 ib_mad_hdr_t *hdr; 2947 ibdm_dp_gidinfo_t *gid_info = NULL; 2948 2949 IBTF_DPRINTF_L4("ibdm", 2950 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 2951 ibdm_dump_ibmf_msg(msg, 0); 2952 2953 /* 2954 * IBMF calls this routine for every DM MAD that arrives at this port. 2955 * But we handle only the responses for requests we sent. We drop all 2956 * the DM packets that does not have response bit set in the MAD 2957 * header(this eliminates all the requests sent to this port). 2958 * We handle only DM class version 1 MAD's 2959 */ 2960 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 2961 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 2962 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2963 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 2964 "IBMF free msg failed DM request drop it"); 2965 } 2966 return; 2967 } 2968 2969 transaction_id = b2h64(hdr->TransactionID); 2970 2971 mutex_enter(&ibdm.ibdm_mutex); 2972 gid_info = ibdm.ibdm_dp_gidlist_head; 2973 while (gid_info) { 2974 if ((gid_info->gl_transactionID & 2975 IBDM_GID_TRANSACTIONID_MASK) == 2976 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 2977 break; 2978 gid_info = gid_info->gl_next; 2979 } 2980 mutex_exit(&ibdm.ibdm_mutex); 2981 2982 if (gid_info == NULL) { 2983 /* Drop the packet */ 2984 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 2985 " does not match: 0x%llx", transaction_id); 2986 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2987 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 2988 "IBMF free msg failed DM request drop it"); 2989 } 2990 return; 2991 } 2992 2993 /* Handle redirection for all the MAD's, except ClassPortInfo */ 2994 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 2995 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 2996 ret = ibdm_handle_redirection(msg, gid_info, &flag); 2997 if (ret == IBDM_SUCCESS) { 2998 return; 2999 } 3000 } else { 3001 uint_t gl_state; 3002 3003 mutex_enter(&gid_info->gl_mutex); 3004 gl_state = gid_info->gl_state; 3005 mutex_exit(&gid_info->gl_mutex); 3006 3007 switch (gl_state) { 3008 case IBDM_GET_CLASSPORTINFO: 3009 ibdm_handle_classportinfo( 3010 ibmf_hdl, msg, gid_info, &flag); 3011 break; 3012 3013 case IBDM_GET_IOUNITINFO: 3014 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3015 break; 3016 3017 case IBDM_GET_IOC_DETAILS: 3018 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3019 3020 case IB_DM_ATTR_SERVICE_ENTRIES: 3021 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3022 break; 3023 3024 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3025 ibdm_handle_ioc_profile( 3026 ibmf_hdl, msg, gid_info, &flag); 3027 break; 3028 3029 case IB_DM_ATTR_DIAG_CODE: 3030 ibdm_handle_diagcode(msg, gid_info, &flag); 3031 break; 3032 3033 default: 3034 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3035 "Error state, wrong attribute :-("); 3036 (void) ibmf_free_msg(ibmf_hdl, &msg); 3037 return; 3038 } 3039 break; 3040 default: 3041 IBTF_DPRINTF_L2("ibdm", 3042 "process_incoming_mad: Dropping the packet" 3043 " gl_state %x", gl_state); 3044 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3045 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3046 "IBMF free msg failed DM request drop it"); 3047 } 3048 return; 3049 } 3050 } 3051 3052 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3053 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3054 IBTF_DPRINTF_L2("ibdm", 3055 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3056 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3057 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3058 "IBMF free msg failed DM request drop it"); 3059 } 3060 return; 3061 } 3062 3063 mutex_enter(&gid_info->gl_mutex); 3064 if (gid_info->gl_pending_cmds < 1) { 3065 IBTF_DPRINTF_L2("ibdm", 3066 "\tprocess_incoming_mad: pending commands negative"); 3067 } 3068 if (--gid_info->gl_pending_cmds) { 3069 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3070 "gid_info %p pending cmds %d", 3071 gid_info, gid_info->gl_pending_cmds); 3072 mutex_exit(&gid_info->gl_mutex); 3073 } else { 3074 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3075 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3076 mutex_exit(&gid_info->gl_mutex); 3077 ibdm_notify_newgid_iocs(gid_info); 3078 mutex_enter(&ibdm.ibdm_mutex); 3079 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3080 IBTF_DPRINTF_L4("ibdm", 3081 "\tprocess_incoming_mad: Wakeup"); 3082 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3083 cv_broadcast(&ibdm.ibdm_probe_cv); 3084 } 3085 mutex_exit(&ibdm.ibdm_mutex); 3086 } 3087 3088 /* 3089 * Do not deallocate the IBMF packet if atleast one request 3090 * is posted. IBMF packet is reused. 3091 */ 3092 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3093 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3094 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3095 "IBMF free msg failed DM request drop it"); 3096 } 3097 } 3098 } 3099 3100 3101 /* 3102 * ibdm_verify_mad_status() 3103 * Verifies the MAD status 3104 * Returns IBDM_SUCCESS if status is correct 3105 * Returns IBDM_FAILURE for bogus MAD status 3106 */ 3107 static int 3108 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3109 { 3110 int ret = 0; 3111 3112 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3113 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3114 return (IBDM_FAILURE); 3115 } 3116 3117 if (b2h16(hdr->Status) == 0) 3118 ret = IBDM_SUCCESS; 3119 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3120 ret = IBDM_SUCCESS; 3121 else { 3122 IBTF_DPRINTF_L2("ibdm", 3123 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status)); 3124 ret = IBDM_FAILURE; 3125 } 3126 return (ret); 3127 } 3128 3129 3130 3131 /* 3132 * ibdm_handle_redirection() 3133 * Returns IBDM_SUCCESS/IBDM_FAILURE 3134 */ 3135 static int 3136 ibdm_handle_redirection(ibmf_msg_t *msg, 3137 ibdm_dp_gidinfo_t *gid_info, int *flag) 3138 { 3139 int attrmod, ioc_no, start; 3140 void *data; 3141 timeout_id_t *timeout_id; 3142 ib_mad_hdr_t *hdr; 3143 ibdm_ioc_info_t *ioc = NULL; 3144 ibdm_timeout_cb_args_t *cb_args; 3145 ibdm_mad_classportinfo_t *cpi; 3146 3147 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3148 mutex_enter(&gid_info->gl_mutex); 3149 switch (gid_info->gl_state) { 3150 case IBDM_GET_IOUNITINFO: 3151 cb_args = &gid_info->gl_iou_cb_args; 3152 timeout_id = &gid_info->gl_timeout_id; 3153 break; 3154 3155 case IBDM_GET_IOC_DETAILS: 3156 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3157 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3158 3159 case IB_DM_ATTR_DIAG_CODE: 3160 if (attrmod == 0) { 3161 cb_args = &gid_info->gl_iou_cb_args; 3162 timeout_id = &gid_info->gl_timeout_id; 3163 break; 3164 } 3165 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3166 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3167 "IOC# Out of range %d", attrmod); 3168 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3169 mutex_exit(&gid_info->gl_mutex); 3170 return (IBDM_FAILURE); 3171 } 3172 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3173 cb_args = &ioc->ioc_dc_cb_args; 3174 timeout_id = &ioc->ioc_dc_timeout_id; 3175 break; 3176 3177 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3178 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3179 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3180 "IOC# Out of range %d", attrmod); 3181 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3182 mutex_exit(&gid_info->gl_mutex); 3183 return (IBDM_FAILURE); 3184 } 3185 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3186 cb_args = &ioc->ioc_cb_args; 3187 timeout_id = &ioc->ioc_timeout_id; 3188 break; 3189 3190 case IB_DM_ATTR_SERVICE_ENTRIES: 3191 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3192 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3193 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3194 "IOC# Out of range %d", ioc_no); 3195 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3196 mutex_exit(&gid_info->gl_mutex); 3197 return (IBDM_FAILURE); 3198 } 3199 start = (attrmod & IBDM_8_BIT_MASK); 3200 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3201 if (start > ioc->ioc_profile.ioc_service_entries) { 3202 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3203 " SE index Out of range %d", start); 3204 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3205 mutex_exit(&gid_info->gl_mutex); 3206 return (IBDM_FAILURE); 3207 } 3208 cb_args = &ioc->ioc_serv[start].se_cb_args; 3209 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3210 break; 3211 3212 default: 3213 /* ERROR State */ 3214 IBTF_DPRINTF_L2("ibdm", 3215 "\thandle_redirection: wrong attribute :-("); 3216 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3217 mutex_exit(&gid_info->gl_mutex); 3218 return (IBDM_FAILURE); 3219 } 3220 break; 3221 default: 3222 /* ERROR State */ 3223 IBTF_DPRINTF_L2("ibdm", 3224 "\thandle_redirection: Error state :-("); 3225 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3226 mutex_exit(&gid_info->gl_mutex); 3227 return (IBDM_FAILURE); 3228 } 3229 if ((*timeout_id) != 0) { 3230 mutex_exit(&gid_info->gl_mutex); 3231 if (untimeout(*timeout_id) == -1) { 3232 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3233 "untimeout failed %x", *timeout_id); 3234 } else { 3235 IBTF_DPRINTF_L5("ibdm", 3236 "\thandle_redirection: timeout %x", *timeout_id); 3237 } 3238 mutex_enter(&gid_info->gl_mutex); 3239 *timeout_id = 0; 3240 } 3241 3242 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3243 cpi = (ibdm_mad_classportinfo_t *)data; 3244 3245 gid_info->gl_resp_timeout = 3246 (b2h32(cpi->RespTimeValue) & 0x1F); 3247 3248 gid_info->gl_redirected = B_TRUE; 3249 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3250 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3251 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3252 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3253 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3254 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3255 3256 if (gid_info->gl_redirect_dlid != 0) { 3257 msg->im_local_addr.ia_remote_lid = 3258 gid_info->gl_redirect_dlid; 3259 } 3260 ibdm_bump_transactionID(gid_info); 3261 mutex_exit(&gid_info->gl_mutex); 3262 3263 ibdm_alloc_send_buffers(msg); 3264 3265 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3266 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3267 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3268 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3269 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3270 hdr->Status = 0; 3271 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3272 hdr->AttributeID = 3273 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3274 hdr->AttributeModifier = 3275 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3276 3277 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3278 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3279 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3280 3281 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3282 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3283 3284 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3285 "timeout %x", *timeout_id); 3286 3287 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3288 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3289 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3290 "message transport failed"); 3291 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3292 } 3293 (*flag) |= IBDM_IBMF_PKT_REUSED; 3294 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3295 return (IBDM_SUCCESS); 3296 } 3297 3298 3299 /* 3300 * ibdm_pkt_timeout_hdlr 3301 * This timeout handler is registed for every IBMF packet that is 3302 * sent through the IBMF. It gets called when no response is received 3303 * within the specified time for the packet. No retries for the failed 3304 * commands currently. Drops the failed IBMF packet and update the 3305 * pending list commands. 3306 */ 3307 static void 3308 ibdm_pkt_timeout_hdlr(void *arg) 3309 { 3310 int probe_done = B_FALSE; 3311 ibdm_iou_info_t *iou; 3312 ibdm_ioc_info_t *ioc; 3313 ibdm_timeout_cb_args_t *cb_args = arg; 3314 ibdm_dp_gidinfo_t *gid_info; 3315 int srv_ent; 3316 uint_t new_gl_state; 3317 3318 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3319 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3320 cb_args->cb_req_type, cb_args->cb_ioc_num, 3321 cb_args->cb_srvents_start); 3322 3323 gid_info = cb_args->cb_gid_info; 3324 mutex_enter(&gid_info->gl_mutex); 3325 3326 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3327 (cb_args->cb_req_type == 0)) { 3328 3329 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3330 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3331 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3332 3333 if (gid_info->gl_timeout_id) 3334 gid_info->gl_timeout_id = 0; 3335 mutex_exit(&gid_info->gl_mutex); 3336 return; 3337 } 3338 if (cb_args->cb_retry_count) { 3339 cb_args->cb_retry_count--; 3340 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3341 if (gid_info->gl_timeout_id) 3342 gid_info->gl_timeout_id = 0; 3343 mutex_exit(&gid_info->gl_mutex); 3344 return; 3345 } 3346 cb_args->cb_retry_count = 0; 3347 } 3348 3349 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3350 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3351 cb_args->cb_req_type, cb_args->cb_ioc_num, 3352 cb_args->cb_srvents_start); 3353 3354 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3355 switch (cb_args->cb_req_type) { 3356 3357 case IBDM_REQ_TYPE_CLASSPORTINFO: 3358 case IBDM_REQ_TYPE_IOUINFO: 3359 new_gl_state = IBDM_GID_PROBING_FAILED; 3360 if (--gid_info->gl_pending_cmds == 0) 3361 probe_done = B_TRUE; 3362 if (gid_info->gl_timeout_id) 3363 gid_info->gl_timeout_id = 0; 3364 mutex_exit(&gid_info->gl_mutex); 3365 ibdm_delete_glhca_list(gid_info); 3366 mutex_enter(&gid_info->gl_mutex); 3367 break; 3368 case IBDM_REQ_TYPE_IOCINFO: 3369 iou = gid_info->gl_iou; 3370 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3371 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3372 if (--gid_info->gl_pending_cmds == 0) 3373 probe_done = B_TRUE; 3374 #ifndef __lock_lint 3375 if (ioc->ioc_timeout_id) 3376 ioc->ioc_timeout_id = 0; 3377 #endif 3378 break; 3379 case IBDM_REQ_TYPE_SRVENTS: 3380 iou = gid_info->gl_iou; 3381 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3382 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3383 if (--gid_info->gl_pending_cmds == 0) 3384 probe_done = B_TRUE; 3385 srv_ent = cb_args->cb_srvents_start; 3386 #ifndef __lock_lint 3387 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3388 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3389 #endif 3390 break; 3391 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3392 iou = gid_info->gl_iou; 3393 iou->iou_dc_valid = B_FALSE; 3394 if (--gid_info->gl_pending_cmds == 0) 3395 probe_done = B_TRUE; 3396 if (gid_info->gl_timeout_id) 3397 gid_info->gl_timeout_id = 0; 3398 break; 3399 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3400 iou = gid_info->gl_iou; 3401 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3402 ioc->ioc_dc_valid = B_FALSE; 3403 if (--gid_info->gl_pending_cmds == 0) 3404 probe_done = B_TRUE; 3405 #ifndef __lock_lint 3406 if (ioc->ioc_dc_timeout_id) 3407 ioc->ioc_dc_timeout_id = 0; 3408 #endif 3409 break; 3410 } 3411 if (probe_done == B_TRUE) { 3412 gid_info->gl_state = new_gl_state; 3413 mutex_exit(&gid_info->gl_mutex); 3414 ibdm_notify_newgid_iocs(gid_info); 3415 mutex_enter(&ibdm.ibdm_mutex); 3416 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3417 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3418 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3419 cv_broadcast(&ibdm.ibdm_probe_cv); 3420 } 3421 mutex_exit(&ibdm.ibdm_mutex); 3422 } else 3423 mutex_exit(&gid_info->gl_mutex); 3424 } 3425 3426 3427 /* 3428 * ibdm_retry_command() 3429 * Retries the failed command. 3430 * Returns IBDM_FAILURE/IBDM_SUCCESS 3431 */ 3432 static int 3433 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3434 { 3435 int ret; 3436 ibmf_msg_t *msg; 3437 ib_mad_hdr_t *hdr; 3438 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3439 timeout_id_t *timeout_id; 3440 ibdm_ioc_info_t *ioc; 3441 int ioc_no; 3442 3443 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3444 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3445 cb_args->cb_req_type, cb_args->cb_ioc_num, 3446 cb_args->cb_srvents_start); 3447 3448 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3449 3450 3451 /* 3452 * Reset the gid if alloc_msg failed with BAD_HANDLE 3453 * ibdm_reset_gidinfo reinits the gid_info 3454 */ 3455 if (ret == IBMF_BAD_HANDLE) { 3456 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 3457 gid_info); 3458 3459 mutex_exit(&gid_info->gl_mutex); 3460 ibdm_reset_gidinfo(gid_info); 3461 mutex_enter(&gid_info->gl_mutex); 3462 3463 /* Retry alloc */ 3464 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 3465 &msg); 3466 } 3467 3468 if (ret != IBDM_SUCCESS) { 3469 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 3470 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3471 cb_args->cb_req_type, cb_args->cb_ioc_num, 3472 cb_args->cb_srvents_start); 3473 return (IBDM_FAILURE); 3474 } 3475 3476 ibdm_alloc_send_buffers(msg); 3477 3478 ibdm_bump_transactionID(gid_info); 3479 3480 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3481 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3482 if (gid_info->gl_redirected == B_TRUE) { 3483 if (gid_info->gl_redirect_dlid != 0) { 3484 msg->im_local_addr.ia_remote_lid = 3485 gid_info->gl_redirect_dlid; 3486 } 3487 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3488 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3489 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3490 } else { 3491 msg->im_local_addr.ia_remote_qno = 1; 3492 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3493 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3494 } 3495 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3496 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3497 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3498 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3499 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3500 hdr->Status = 0; 3501 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3502 3503 switch (cb_args->cb_req_type) { 3504 case IBDM_REQ_TYPE_CLASSPORTINFO: 3505 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 3506 hdr->AttributeModifier = 0; 3507 timeout_id = &gid_info->gl_timeout_id; 3508 break; 3509 case IBDM_REQ_TYPE_IOUINFO: 3510 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 3511 hdr->AttributeModifier = 0; 3512 timeout_id = &gid_info->gl_timeout_id; 3513 break; 3514 case IBDM_REQ_TYPE_IOCINFO: 3515 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 3516 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3517 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3518 timeout_id = &ioc->ioc_timeout_id; 3519 break; 3520 case IBDM_REQ_TYPE_SRVENTS: 3521 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 3522 ibdm_fill_srv_attr_mod(hdr, cb_args); 3523 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3524 timeout_id = 3525 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 3526 break; 3527 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3528 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3529 hdr->AttributeModifier = 0; 3530 timeout_id = &gid_info->gl_timeout_id; 3531 break; 3532 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3533 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3534 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3535 ioc_no = cb_args->cb_ioc_num; 3536 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 3537 timeout_id = &ioc->ioc_dc_timeout_id; 3538 break; 3539 } 3540 3541 mutex_exit(&gid_info->gl_mutex); 3542 3543 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3544 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3545 3546 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 3547 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 3548 cb_args->cb_srvents_start, *timeout_id); 3549 3550 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, 3551 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 3552 cb_args, 0) != IBMF_SUCCESS) { 3553 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 3554 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3555 cb_args->cb_req_type, cb_args->cb_ioc_num, 3556 cb_args->cb_srvents_start); 3557 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3558 } 3559 mutex_enter(&gid_info->gl_mutex); 3560 return (IBDM_SUCCESS); 3561 } 3562 3563 3564 /* 3565 * ibdm_update_ioc_port_gidlist() 3566 */ 3567 static void 3568 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 3569 ibdm_dp_gidinfo_t *gid_info) 3570 { 3571 int ii, ngid_ents; 3572 ibdm_gid_t *tmp; 3573 ibdm_hca_list_t *gid_hca_head, *temp; 3574 ibdm_hca_list_t *ioc_head = NULL; 3575 3576 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 3577 3578 ngid_ents = gid_info->gl_ngids; 3579 dest->ioc_nportgids = ngid_ents; 3580 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 3581 ngid_ents, KM_SLEEP); 3582 tmp = gid_info->gl_gid; 3583 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 3584 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 3585 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 3586 tmp = tmp->gid_next; 3587 } 3588 3589 gid_hca_head = gid_info->gl_hca_list; 3590 while (gid_hca_head) { 3591 temp = ibdm_dup_hca_attr(gid_hca_head); 3592 temp->hl_next = ioc_head; 3593 ioc_head = temp; 3594 gid_hca_head = gid_hca_head->hl_next; 3595 } 3596 dest->ioc_hca_list = ioc_head; 3597 } 3598 3599 3600 /* 3601 * ibdm_alloc_send_buffers() 3602 * Allocates memory for the IBMF send buffer 3603 */ 3604 static void 3605 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 3606 { 3607 msgp->im_msgbufs_send.im_bufs_mad_hdr = 3608 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 3609 msgp->im_msgbufs_send.im_bufs_cl_data = (uchar_t *) 3610 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 3611 msgp->im_msgbufs_send.im_bufs_cl_data_len = 3612 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t); 3613 } 3614 3615 3616 /* 3617 * ibdm_alloc_send_buffers() 3618 * De-allocates memory for the IBMF send buffer 3619 */ 3620 static void 3621 ibdm_free_send_buffers(ibmf_msg_t *msgp) 3622 { 3623 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 3624 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 3625 } 3626 3627 /* 3628 * ibdm_probe_ioc() 3629 * 1. Gets the node records for the port GUID. This detects all the port 3630 * to the IOU. 3631 * 2. Selectively probes all the IOC, given it's node GUID 3632 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 3633 * Controller Profile asynchronously 3634 */ 3635 /*ARGSUSED*/ 3636 static void 3637 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 3638 { 3639 int ii, nrecords; 3640 size_t nr_len = 0, pi_len = 0; 3641 ib_gid_t sgid, dgid; 3642 ibdm_hca_list_t *hca_list = NULL; 3643 sa_node_record_t *nr, *tmp; 3644 ibdm_port_attr_t *port = NULL; 3645 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 3646 ibdm_dp_gidinfo_t *temp_gidinfo; 3647 ibdm_gid_t *temp_gid; 3648 sa_portinfo_record_t *pi; 3649 3650 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin", 3651 nodeguid, ioc_guid, reprobe_flag); 3652 3653 /* Rescan the GID list for any removed GIDs for reprobe */ 3654 if (reprobe_flag) 3655 ibdm_rescan_gidlist(&ioc_guid); 3656 3657 mutex_enter(&ibdm.ibdm_hl_mutex); 3658 for (ibdm_get_next_port(&hca_list, &port, 1); port; 3659 ibdm_get_next_port(&hca_list, &port, 1)) { 3660 reprobe_gid = new_gid = node_gid = NULL; 3661 3662 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 3663 if (nr == NULL) { 3664 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 3665 continue; 3666 } 3667 nrecords = (nr_len / sizeof (sa_node_record_t)); 3668 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 3669 pi = ibdm_get_portinfo( 3670 port->pa_sa_hdl, &pi_len, tmp->LID); 3671 3672 if ((pi) && (pi->PortInfo.CapabilityMask & 3673 SM_CAP_MASK_IS_DM_SUPPD)) { 3674 /* 3675 * For reprobes: Check if GID, already in 3676 * the list. If so, set the state to SKIPPED 3677 */ 3678 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 3679 tmp->NodeInfo.PortGUID)) != NULL) && 3680 temp_gidinfo->gl_state == 3681 IBDM_GID_PROBING_COMPLETE) { 3682 ASSERT(reprobe_gid == NULL); 3683 ibdm_addto_glhcalist(temp_gidinfo, 3684 hca_list); 3685 reprobe_gid = temp_gidinfo; 3686 kmem_free(pi, pi_len); 3687 continue; 3688 } else if (temp_gidinfo != NULL) { 3689 kmem_free(pi, pi_len); 3690 ibdm_addto_glhcalist(temp_gidinfo, 3691 hca_list); 3692 continue; 3693 } 3694 3695 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 3696 "create_gid : prefix %llx, guid %llx\n", 3697 pi->PortInfo.GidPrefix, 3698 tmp->NodeInfo.PortGUID); 3699 3700 sgid.gid_prefix = port->pa_sn_prefix; 3701 sgid.gid_guid = port->pa_port_guid; 3702 dgid.gid_prefix = pi->PortInfo.GidPrefix; 3703 dgid.gid_guid = tmp->NodeInfo.PortGUID; 3704 new_gid = ibdm_create_gid_info(port, sgid, 3705 dgid); 3706 if (new_gid == NULL) { 3707 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 3708 "create_gid_info failed\n"); 3709 kmem_free(pi, pi_len); 3710 continue; 3711 } 3712 if (node_gid == NULL) { 3713 node_gid = new_gid; 3714 ibdm_add_to_gl_gid(node_gid, node_gid); 3715 } else { 3716 IBTF_DPRINTF_L4("ibdm", 3717 "\tprobe_ioc: new gid"); 3718 temp_gid = kmem_zalloc( 3719 sizeof (ibdm_gid_t), KM_SLEEP); 3720 temp_gid->gid_dgid_hi = 3721 new_gid->gl_dgid_hi; 3722 temp_gid->gid_dgid_lo = 3723 new_gid->gl_dgid_lo; 3724 temp_gid->gid_next = node_gid->gl_gid; 3725 node_gid->gl_gid = temp_gid; 3726 node_gid->gl_ngids++; 3727 } 3728 new_gid->gl_nodeguid = nodeguid; 3729 new_gid->gl_portguid = dgid.gid_guid; 3730 ibdm_addto_glhcalist(new_gid, hca_list); 3731 3732 /* 3733 * Set the state to skipped as all these 3734 * gids point to the same node. 3735 * We (re)probe only one GID below and reset 3736 * state appropriately 3737 */ 3738 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 3739 kmem_free(pi, pi_len); 3740 } 3741 } 3742 kmem_free(nr, nr_len); 3743 3744 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 3745 "reprobe_gid %p new_gid %p node_gid %p", 3746 reprobe_flag, reprobe_gid, new_gid, node_gid); 3747 3748 if (reprobe_flag != 0 && reprobe_gid != NULL) { 3749 int niocs, jj; 3750 ibdm_ioc_info_t *tmp_ioc; 3751 int ioc_matched = 0; 3752 3753 mutex_exit(&ibdm.ibdm_hl_mutex); 3754 mutex_enter(&reprobe_gid->gl_mutex); 3755 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 3756 niocs = 3757 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 3758 reprobe_gid->gl_pending_cmds++; 3759 mutex_exit(&reprobe_gid->gl_mutex); 3760 3761 for (jj = 0; jj < niocs; jj++) { 3762 tmp_ioc = 3763 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 3764 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 3765 continue; 3766 3767 ioc_matched = 1; 3768 3769 /* 3770 * Explicitly set gl_reprobe_flag to 0 so that 3771 * IBnex is not notified on completion 3772 */ 3773 mutex_enter(&reprobe_gid->gl_mutex); 3774 reprobe_gid->gl_reprobe_flag = 0; 3775 mutex_exit(&reprobe_gid->gl_mutex); 3776 3777 mutex_enter(&ibdm.ibdm_mutex); 3778 ibdm.ibdm_ngid_probes_in_progress++; 3779 mutex_exit(&ibdm.ibdm_mutex); 3780 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 3781 IBDM_SUCCESS) { 3782 IBTF_DPRINTF_L4("ibdm", 3783 "\tprobe_ioc: " 3784 "send_ioc_profile failed " 3785 "for ioc %d", jj); 3786 ibdm_gid_decr_pending(reprobe_gid); 3787 break; 3788 } 3789 mutex_enter(&ibdm.ibdm_mutex); 3790 ibdm_wait_probe_completion(); 3791 mutex_exit(&ibdm.ibdm_mutex); 3792 break; 3793 } 3794 if (ioc_matched == 0) 3795 ibdm_gid_decr_pending(reprobe_gid); 3796 else { 3797 mutex_enter(&ibdm.ibdm_hl_mutex); 3798 break; 3799 } 3800 } else if (new_gid != NULL) { 3801 mutex_exit(&ibdm.ibdm_hl_mutex); 3802 node_gid = node_gid ? node_gid : new_gid; 3803 3804 /* 3805 * New or reinserted GID : Enable notification 3806 * to IBnex 3807 */ 3808 mutex_enter(&node_gid->gl_mutex); 3809 node_gid->gl_reprobe_flag = 1; 3810 mutex_exit(&node_gid->gl_mutex); 3811 3812 ibdm_probe_gid(node_gid); 3813 3814 mutex_enter(&ibdm.ibdm_hl_mutex); 3815 } 3816 } 3817 mutex_exit(&ibdm.ibdm_hl_mutex); 3818 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 3819 } 3820 3821 3822 /* 3823 * ibdm_probe_gid() 3824 * Selectively probes the GID 3825 */ 3826 static void 3827 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 3828 { 3829 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 3830 mutex_enter(&gid_info->gl_mutex); 3831 gid_info->gl_pending_cmds++; 3832 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 3833 mutex_exit(&gid_info->gl_mutex); 3834 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 3835 mutex_enter(&gid_info->gl_mutex); 3836 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 3837 --gid_info->gl_pending_cmds; 3838 mutex_exit(&gid_info->gl_mutex); 3839 ibdm_delete_glhca_list(gid_info); 3840 gid_info = gid_info->gl_next; 3841 return; 3842 } 3843 mutex_enter(&ibdm.ibdm_mutex); 3844 ibdm.ibdm_ngid_probes_in_progress++; 3845 gid_info = gid_info->gl_next; 3846 3847 ibdm_wait_probe_completion(); 3848 mutex_exit(&ibdm.ibdm_mutex); 3849 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 3850 } 3851 3852 3853 /* 3854 * ibdm_create_gid_info() 3855 * Allocates a gid_info structure and initializes 3856 * Returns pointer to the structure on success 3857 * and NULL on failure 3858 */ 3859 static ibdm_dp_gidinfo_t * 3860 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 3861 { 3862 uint8_t ii, npaths; 3863 sa_path_record_t *path; 3864 size_t len; 3865 ibdm_pkey_tbl_t *pkey_tbl; 3866 ibdm_dp_gidinfo_t *gid_info = NULL; 3867 int ret; 3868 3869 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 3870 npaths = 1; 3871 3872 /* query for reversible paths */ 3873 if (port->pa_sa_hdl) 3874 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 3875 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 3876 &len, &path); 3877 else 3878 return (NULL); 3879 3880 if (ret == IBMF_SUCCESS && path) { 3881 ibdm_dump_path_info(path); 3882 3883 gid_info = kmem_zalloc( 3884 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 3885 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 3886 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 3887 gid_info->gl_dgid_lo = path->DGID.gid_guid; 3888 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 3889 gid_info->gl_sgid_lo = path->SGID.gid_guid; 3890 gid_info->gl_p_key = path->P_Key; 3891 gid_info->gl_sa_hdl = port->pa_sa_hdl; 3892 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 3893 gid_info->gl_slid = path->SLID; 3894 gid_info->gl_dlid = path->DLID; 3895 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 3896 << IBDM_GID_TRANSACTIONID_SHIFT; 3897 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 3898 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 3899 << IBDM_GID_TRANSACTIONID_SHIFT; 3900 3901 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 3902 for (ii = 0; ii < port->pa_npkeys; ii++) { 3903 if (port->pa_pkey_tbl == NULL) 3904 break; 3905 3906 pkey_tbl = &port->pa_pkey_tbl[ii]; 3907 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 3908 (pkey_tbl->pt_qp_hdl != NULL)) { 3909 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 3910 break; 3911 } 3912 } 3913 kmem_free(path, len); 3914 3915 /* 3916 * QP handle for GID not initialized. No matching Pkey 3917 * was found!! ibdm should *not* hit this case. Flag an 3918 * error and drop the GID if ibdm does encounter this. 3919 */ 3920 if (gid_info->gl_qp_hdl == NULL) { 3921 IBTF_DPRINTF_L2(ibdm_string, 3922 "\tcreate_gid_info: No matching Pkey"); 3923 ibdm_delete_gidinfo(gid_info); 3924 return (NULL); 3925 } 3926 3927 ibdm.ibdm_ngids++; 3928 if (ibdm.ibdm_dp_gidlist_head == NULL) { 3929 ibdm.ibdm_dp_gidlist_head = gid_info; 3930 ibdm.ibdm_dp_gidlist_tail = gid_info; 3931 } else { 3932 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 3933 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 3934 ibdm.ibdm_dp_gidlist_tail = gid_info; 3935 } 3936 } 3937 3938 return (gid_info); 3939 } 3940 3941 3942 /* 3943 * ibdm_get_node_records 3944 * Sends a SA query to get the NODE record 3945 * Returns pointer to the sa_node_record_t on success 3946 * and NULL on failure 3947 */ 3948 static sa_node_record_t * 3949 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 3950 { 3951 sa_node_record_t req, *resp = NULL; 3952 ibmf_saa_access_args_t args; 3953 int ret; 3954 3955 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 3956 3957 bzero(&req, sizeof (sa_node_record_t)); 3958 req.NodeInfo.NodeGUID = guid; 3959 3960 args.sq_attr_id = SA_NODERECORD_ATTRID; 3961 args.sq_access_type = IBMF_SAA_RETRIEVE; 3962 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 3963 args.sq_template = &req; 3964 args.sq_callback = NULL; 3965 args.sq_callback_arg = NULL; 3966 3967 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 3968 if (ret != IBMF_SUCCESS) { 3969 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 3970 " SA Retrieve Failed: %d", ret); 3971 return (NULL); 3972 } 3973 if ((resp == NULL) || (*length == 0)) { 3974 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 3975 return (NULL); 3976 } 3977 3978 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 3979 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 3980 3981 return (resp); 3982 } 3983 3984 3985 /* 3986 * ibdm_get_portinfo() 3987 * Sends a SA query to get the PortInfo record 3988 * Returns pointer to the sa_portinfo_record_t on success 3989 * and NULL on failure 3990 */ 3991 static sa_portinfo_record_t * 3992 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 3993 { 3994 sa_portinfo_record_t req, *resp = NULL; 3995 ibmf_saa_access_args_t args; 3996 int ret; 3997 3998 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 3999 4000 bzero(&req, sizeof (sa_portinfo_record_t)); 4001 req.EndportLID = lid; 4002 4003 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4004 args.sq_access_type = IBMF_SAA_RETRIEVE; 4005 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4006 args.sq_template = &req; 4007 args.sq_callback = NULL; 4008 args.sq_callback_arg = NULL; 4009 4010 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4011 if (ret != IBMF_SUCCESS) { 4012 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4013 " SA Retrieve Failed: 0x%X", ret); 4014 return (NULL); 4015 } 4016 if ((*length == 0) || (resp == NULL)) 4017 return (NULL); 4018 4019 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4020 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4021 return (resp); 4022 } 4023 4024 4025 /* 4026 * ibdm_ibnex_register_callback 4027 * IB nexus callback routine for HCA attach and detach notification 4028 */ 4029 void 4030 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4031 { 4032 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4033 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4034 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4035 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4036 } 4037 4038 4039 /* 4040 * ibdm_ibnex_unregister_callbacks 4041 */ 4042 void 4043 ibdm_ibnex_unregister_callback() 4044 { 4045 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4046 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4047 ibdm.ibdm_ibnex_callback = NULL; 4048 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4049 } 4050 4051 4052 /* 4053 * ibdm_ibnex_get_waittime() 4054 * Calculates the wait time based on the last HCA attach time 4055 */ 4056 time_t 4057 ibdm_ibnex_get_waittime(ib_guid_t hca_guid, int *dft_wait) 4058 { 4059 int ii; 4060 time_t temp, wait_time = 0; 4061 ibdm_hca_list_t *hca; 4062 4063 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime hcaguid:%llx" 4064 "\tport settling time %d", hca_guid, *dft_wait); 4065 4066 mutex_enter(&ibdm.ibdm_hl_mutex); 4067 hca = ibdm.ibdm_hca_list_head; 4068 4069 if (hca_guid) { 4070 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4071 if ((hca_guid == hca->hl_hca_guid) && 4072 (hca->hl_nports != hca->hl_nports_active)) { 4073 wait_time = 4074 ddi_get_time() - hca->hl_attach_time; 4075 wait_time = ((wait_time >= *dft_wait) ? 4076 0 : (*dft_wait - wait_time)); 4077 break; 4078 } 4079 hca = hca->hl_next; 4080 } 4081 mutex_exit(&ibdm.ibdm_hl_mutex); 4082 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4083 return (wait_time); 4084 } 4085 4086 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4087 if (hca->hl_nports != hca->hl_nports_active) { 4088 temp = ddi_get_time() - hca->hl_attach_time; 4089 temp = ((temp >= *dft_wait) ? 0 : (*dft_wait - temp)); 4090 wait_time = (temp > wait_time) ? temp : wait_time; 4091 } 4092 } 4093 mutex_exit(&ibdm.ibdm_hl_mutex); 4094 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4095 return (wait_time); 4096 } 4097 4098 4099 /* 4100 * ibdm_ibnex_probe_hcaport 4101 * Probes the presence of HCA port (with HCA dip and port number) 4102 * Returns port attributes structure on SUCCESS 4103 */ 4104 ibdm_port_attr_t * 4105 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4106 { 4107 int ii, jj; 4108 ibdm_hca_list_t *hca_list; 4109 ibdm_port_attr_t *port_attr; 4110 4111 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4112 4113 mutex_enter(&ibdm.ibdm_hl_mutex); 4114 hca_list = ibdm.ibdm_hca_list_head; 4115 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4116 if (hca_list->hl_hca_guid == hca_guid) { 4117 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4118 if (hca_list->hl_port_attr[jj].pa_port_num == 4119 port_num) { 4120 break; 4121 } 4122 } 4123 if (jj != hca_list->hl_nports) 4124 break; 4125 } 4126 hca_list = hca_list->hl_next; 4127 } 4128 if (ii == ibdm.ibdm_hca_count) { 4129 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4130 mutex_exit(&ibdm.ibdm_hl_mutex); 4131 return (NULL); 4132 } 4133 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4134 sizeof (ibdm_port_attr_t), KM_SLEEP); 4135 bcopy((char *)&hca_list->hl_port_attr[jj], 4136 port_attr, sizeof (ibdm_port_attr_t)); 4137 ibdm_update_port_attr(port_attr); 4138 4139 mutex_exit(&ibdm.ibdm_hl_mutex); 4140 return (port_attr); 4141 } 4142 4143 4144 /* 4145 * ibdm_ibnex_get_port_attrs 4146 * Scan all HCAs for a matching port_guid. 4147 * Returns "port attributes" structure on success. 4148 */ 4149 ibdm_port_attr_t * 4150 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4151 { 4152 int ii, jj; 4153 ibdm_hca_list_t *hca_list; 4154 ibdm_port_attr_t *port_attr; 4155 4156 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4157 4158 mutex_enter(&ibdm.ibdm_hl_mutex); 4159 hca_list = ibdm.ibdm_hca_list_head; 4160 4161 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4162 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4163 if (hca_list->hl_port_attr[jj].pa_port_guid == 4164 port_guid) { 4165 break; 4166 } 4167 } 4168 if (jj != hca_list->hl_nports) 4169 break; 4170 hca_list = hca_list->hl_next; 4171 } 4172 4173 if (ii == ibdm.ibdm_hca_count) { 4174 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4175 mutex_exit(&ibdm.ibdm_hl_mutex); 4176 return (NULL); 4177 } 4178 4179 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4180 KM_SLEEP); 4181 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4182 sizeof (ibdm_port_attr_t)); 4183 ibdm_update_port_attr(port_attr); 4184 4185 mutex_exit(&ibdm.ibdm_hl_mutex); 4186 return (port_attr); 4187 } 4188 4189 4190 /* 4191 * ibdm_ibnex_free_port_attr() 4192 */ 4193 void 4194 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4195 { 4196 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4197 if (port_attr) { 4198 if (port_attr->pa_pkey_tbl != NULL) { 4199 kmem_free(port_attr->pa_pkey_tbl, 4200 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4201 } 4202 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4203 } 4204 } 4205 4206 4207 /* 4208 * ibdm_ibnex_get_hca_list() 4209 * Returns portinfo for all the port for all the HCA's 4210 */ 4211 void 4212 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4213 { 4214 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4215 int ii; 4216 4217 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4218 4219 mutex_enter(&ibdm.ibdm_hl_mutex); 4220 temp = ibdm.ibdm_hca_list_head; 4221 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4222 temp1 = ibdm_dup_hca_attr(temp); 4223 temp1->hl_next = head; 4224 head = temp1; 4225 temp = temp->hl_next; 4226 } 4227 *count = ibdm.ibdm_hca_count; 4228 *hca = head; 4229 mutex_exit(&ibdm.ibdm_hl_mutex); 4230 } 4231 4232 4233 /* 4234 * ibdm_ibnex_get_hca_info_by_guid() 4235 */ 4236 ibdm_hca_list_t * 4237 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4238 { 4239 ibdm_hca_list_t *head = NULL, *hca = NULL; 4240 4241 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4242 4243 mutex_enter(&ibdm.ibdm_hl_mutex); 4244 head = ibdm.ibdm_hca_list_head; 4245 while (head) { 4246 if (head->hl_hca_guid == hca_guid) { 4247 hca = ibdm_dup_hca_attr(head); 4248 hca->hl_next = NULL; 4249 break; 4250 } 4251 head = head->hl_next; 4252 } 4253 mutex_exit(&ibdm.ibdm_hl_mutex); 4254 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4255 return (hca); 4256 } 4257 4258 4259 /* 4260 * ibdm_dup_hca_attr() 4261 * Allocate a new HCA attribute strucuture and initialize 4262 * hca attribute structure with the incoming HCA attributes 4263 * returned the allocated hca attributes. 4264 */ 4265 static ibdm_hca_list_t * 4266 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4267 { 4268 int len; 4269 ibdm_hca_list_t *out_hca; 4270 4271 len = sizeof (ibdm_hca_list_t) + 4272 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4273 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4274 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4275 bcopy((char *)in_hca, 4276 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4277 if (in_hca->hl_nports) { 4278 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4279 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4280 bcopy((char *)in_hca->hl_port_attr, 4281 (char *)out_hca->hl_port_attr, 4282 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4283 for (len = 0; len < out_hca->hl_nports; len++) 4284 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4285 } 4286 return (out_hca); 4287 } 4288 4289 4290 /* 4291 * ibdm_ibnex_free_hca_list() 4292 * Free one/more HCA lists 4293 */ 4294 void 4295 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4296 { 4297 int ii; 4298 size_t len; 4299 ibdm_hca_list_t *temp; 4300 ibdm_port_attr_t *port; 4301 4302 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4303 ASSERT(hca_list); 4304 while (hca_list) { 4305 temp = hca_list; 4306 hca_list = hca_list->hl_next; 4307 for (ii = 0; ii < temp->hl_nports; ii++) { 4308 port = &temp->hl_port_attr[ii]; 4309 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4310 if (len != 0) 4311 kmem_free(port->pa_pkey_tbl, len); 4312 } 4313 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4314 sizeof (ibdm_port_attr_t)); 4315 kmem_free(temp, len); 4316 } 4317 } 4318 4319 4320 /* 4321 * ibdm_ibnex_probe_iocguid() 4322 * Probes the IOC on the fabric and returns the IOC information 4323 * if present. Otherwise, NULL is returned 4324 */ 4325 /* ARGSUSED */ 4326 ibdm_ioc_info_t * 4327 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4328 { 4329 int k; 4330 ibdm_ioc_info_t *ioc_info; 4331 ibdm_dp_gidinfo_t *gid_info; 4332 4333 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4334 iou, ioc_guid, reprobe_flag); 4335 /* Check whether we know this already */ 4336 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4337 if (ioc_info == NULL) { 4338 mutex_enter(&ibdm.ibdm_mutex); 4339 while (ibdm.ibdm_busy & IBDM_BUSY) 4340 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4341 ibdm.ibdm_busy |= IBDM_BUSY; 4342 mutex_exit(&ibdm.ibdm_mutex); 4343 ibdm_probe_ioc(iou, ioc_guid, 0); 4344 mutex_enter(&ibdm.ibdm_mutex); 4345 ibdm.ibdm_busy &= ~IBDM_BUSY; 4346 cv_broadcast(&ibdm.ibdm_busy_cv); 4347 mutex_exit(&ibdm.ibdm_mutex); 4348 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4349 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4350 /* Free the ioc_list before reprobe; and cancel any timers */ 4351 mutex_enter(&ibdm.ibdm_mutex); 4352 if (ioc_info->ioc_timeout_id) { 4353 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4354 "ioc_timeout_id = 0x%x", 4355 ioc_info->ioc_timeout_id); 4356 if (untimeout(ioc_info->ioc_timeout_id) == -1) { 4357 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4358 "untimeout ioc_timeout_id failed"); 4359 } 4360 ioc_info->ioc_timeout_id = 0; 4361 } 4362 if (ioc_info->ioc_dc_timeout_id) { 4363 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4364 "ioc_dc_timeout_id = 0x%x", 4365 ioc_info->ioc_dc_timeout_id); 4366 if (untimeout(ioc_info->ioc_dc_timeout_id) == -1) { 4367 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4368 "untimeout ioc_dc_timeout_id failed"); 4369 } 4370 ioc_info->ioc_dc_timeout_id = 0; 4371 } 4372 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 4373 if (ioc_info->ioc_serv[k].se_timeout_id) { 4374 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4375 "ioc_info->ioc_serv[k].se_timeout_id = %x", 4376 k, ioc_info->ioc_serv[k].se_timeout_id); 4377 if (untimeout(ioc_info->ioc_serv[k]. 4378 se_timeout_id) == -1) { 4379 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4380 "untimeout se_timeout_id %d " 4381 "failed", k); 4382 } 4383 ioc_info->ioc_serv[k].se_timeout_id = 0; 4384 } 4385 mutex_exit(&ibdm.ibdm_mutex); 4386 ibdm_ibnex_free_ioc_list(ioc_info); 4387 4388 mutex_enter(&ibdm.ibdm_mutex); 4389 while (ibdm.ibdm_busy & IBDM_BUSY) 4390 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4391 ibdm.ibdm_busy |= IBDM_BUSY; 4392 mutex_exit(&ibdm.ibdm_mutex); 4393 4394 ibdm_probe_ioc(iou, ioc_guid, 1); 4395 4396 /* 4397 * Skip if gl_reprobe_flag is set, this will be 4398 * a re-inserted / new GID, for which notifications 4399 * have already been send. 4400 */ 4401 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 4402 gid_info = gid_info->gl_next) { 4403 uint8_t ii, niocs; 4404 ibdm_ioc_info_t *ioc; 4405 4406 if (gid_info->gl_iou == NULL) 4407 continue; 4408 4409 if (gid_info->gl_reprobe_flag) { 4410 gid_info->gl_reprobe_flag = 0; 4411 continue; 4412 } 4413 4414 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 4415 for (ii = 0; ii < niocs; ii++) { 4416 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 4417 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 4418 mutex_enter(&ibdm.ibdm_mutex); 4419 ibdm_reprobe_update_port_srv(ioc, 4420 gid_info); 4421 mutex_exit(&ibdm.ibdm_mutex); 4422 } 4423 } 4424 } 4425 mutex_enter(&ibdm.ibdm_mutex); 4426 ibdm.ibdm_busy &= ~IBDM_BUSY; 4427 cv_broadcast(&ibdm.ibdm_busy_cv); 4428 mutex_exit(&ibdm.ibdm_mutex); 4429 4430 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4431 } 4432 return (ioc_info); 4433 } 4434 4435 4436 /* 4437 * ibdm_ibnex_get_ioc_info() 4438 * Returns pointer to ibdm_ioc_info_t if it finds 4439 * matching record for the ioc_guid, otherwise NULL 4440 * is returned 4441 */ 4442 ibdm_ioc_info_t * 4443 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 4444 { 4445 int ii; 4446 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 4447 ibdm_dp_gidinfo_t *gid_list; 4448 ib_dm_io_unitinfo_t *iou; 4449 4450 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 4451 4452 mutex_enter(&ibdm.ibdm_mutex); 4453 while (ibdm.ibdm_busy & IBDM_BUSY) 4454 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4455 ibdm.ibdm_busy |= IBDM_BUSY; 4456 4457 gid_list = ibdm.ibdm_dp_gidlist_head; 4458 while (gid_list) { 4459 mutex_enter(&gid_list->gl_mutex); 4460 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4461 mutex_exit(&gid_list->gl_mutex); 4462 gid_list = gid_list->gl_next; 4463 continue; 4464 } 4465 if (gid_list->gl_iou == NULL) { 4466 IBTF_DPRINTF_L2("ibdm", 4467 "\tget_ioc_info: No IOU info"); 4468 mutex_exit(&gid_list->gl_mutex); 4469 gid_list = gid_list->gl_next; 4470 continue; 4471 } 4472 iou = &gid_list->gl_iou->iou_info; 4473 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4474 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4475 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 4476 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 4477 ioc = ibdm_dup_ioc_info(tmp, gid_list); 4478 mutex_exit(&gid_list->gl_mutex); 4479 ibdm.ibdm_busy &= ~IBDM_BUSY; 4480 cv_broadcast(&ibdm.ibdm_busy_cv); 4481 mutex_exit(&ibdm.ibdm_mutex); 4482 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 4483 return (ioc); 4484 } 4485 } 4486 if (ii == iou->iou_num_ctrl_slots) 4487 ioc = NULL; 4488 4489 mutex_exit(&gid_list->gl_mutex); 4490 gid_list = gid_list->gl_next; 4491 } 4492 4493 ibdm.ibdm_busy &= ~IBDM_BUSY; 4494 cv_broadcast(&ibdm.ibdm_busy_cv); 4495 mutex_exit(&ibdm.ibdm_mutex); 4496 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 4497 return (ioc); 4498 } 4499 4500 4501 /* 4502 * ibdm_ibnex_get_ioc_count() 4503 * Returns number of ibdm_ioc_info_t it finds 4504 */ 4505 int 4506 ibdm_ibnex_get_ioc_count(void) 4507 { 4508 int count = 0, k; 4509 ibdm_ioc_info_t *ioc; 4510 ibdm_dp_gidinfo_t *gid_list; 4511 4512 mutex_enter(&ibdm.ibdm_mutex); 4513 ibdm_sweep_fabric(0); 4514 4515 while (ibdm.ibdm_busy & IBDM_BUSY) 4516 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4517 ibdm.ibdm_busy |= IBDM_BUSY; 4518 4519 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 4520 gid_list = gid_list->gl_next) { 4521 mutex_enter(&gid_list->gl_mutex); 4522 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 4523 (gid_list->gl_iou == NULL)) { 4524 mutex_exit(&gid_list->gl_mutex); 4525 continue; 4526 } 4527 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 4528 k++) { 4529 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 4530 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 4531 ++count; 4532 } 4533 mutex_exit(&gid_list->gl_mutex); 4534 } 4535 ibdm.ibdm_busy &= ~IBDM_BUSY; 4536 cv_broadcast(&ibdm.ibdm_busy_cv); 4537 mutex_exit(&ibdm.ibdm_mutex); 4538 4539 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 4540 return (count); 4541 } 4542 4543 4544 /* 4545 * ibdm_ibnex_get_ioc_list() 4546 * Returns information about all the IOCs present on the fabric. 4547 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 4548 * Does not sweep fabric if DONOT_PROBE is set 4549 */ 4550 ibdm_ioc_info_t * 4551 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 4552 { 4553 int ii; 4554 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 4555 ibdm_dp_gidinfo_t *gid_list; 4556 ib_dm_io_unitinfo_t *iou; 4557 4558 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 4559 4560 mutex_enter(&ibdm.ibdm_mutex); 4561 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 4562 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 4563 4564 while (ibdm.ibdm_busy & IBDM_BUSY) 4565 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4566 ibdm.ibdm_busy |= IBDM_BUSY; 4567 4568 gid_list = ibdm.ibdm_dp_gidlist_head; 4569 while (gid_list) { 4570 mutex_enter(&gid_list->gl_mutex); 4571 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4572 mutex_exit(&gid_list->gl_mutex); 4573 gid_list = gid_list->gl_next; 4574 continue; 4575 } 4576 if (gid_list->gl_iou == NULL) { 4577 IBTF_DPRINTF_L2("ibdm", 4578 "\tget_ioc_list: No IOU info"); 4579 mutex_exit(&gid_list->gl_mutex); 4580 gid_list = gid_list->gl_next; 4581 continue; 4582 } 4583 iou = &gid_list->gl_iou->iou_info; 4584 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4585 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4586 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 4587 tmp = ibdm_dup_ioc_info(ioc, gid_list); 4588 tmp->ioc_next = ioc_list; 4589 ioc_list = tmp; 4590 } 4591 } 4592 mutex_exit(&gid_list->gl_mutex); 4593 gid_list = gid_list->gl_next; 4594 } 4595 ibdm.ibdm_busy &= ~IBDM_BUSY; 4596 cv_broadcast(&ibdm.ibdm_busy_cv); 4597 mutex_exit(&ibdm.ibdm_mutex); 4598 4599 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 4600 return (ioc_list); 4601 } 4602 4603 /* 4604 * ibdm_dup_ioc_info() 4605 * Duplicate the IOC information and return the IOC 4606 * information. 4607 */ 4608 static ibdm_ioc_info_t * 4609 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 4610 { 4611 ibdm_ioc_info_t *out_ioc; 4612 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 4613 4614 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 4615 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 4616 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 4617 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 4618 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 4619 4620 return (out_ioc); 4621 } 4622 4623 4624 /* 4625 * ibdm_free_ioc_list() 4626 * Deallocate memory for IOC list structure 4627 */ 4628 void 4629 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 4630 { 4631 ibdm_ioc_info_t *temp; 4632 4633 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 4634 while (ioc) { 4635 temp = ioc; 4636 ioc = ioc->ioc_next; 4637 kmem_free(temp->ioc_gid_list, 4638 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 4639 if (temp->ioc_hca_list) 4640 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 4641 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 4642 } 4643 } 4644 4645 4646 /* 4647 * ibdm_ibnex_update_pkey_tbls 4648 * Updates the DM P_Key database. 4649 * NOTE: Two cases are handled here: P_Key being added or removed. 4650 * 4651 * Arguments : NONE 4652 * Return Values : NONE 4653 */ 4654 void 4655 ibdm_ibnex_update_pkey_tbls(void) 4656 { 4657 int h, pp, pidx; 4658 uint_t nports; 4659 uint_t size; 4660 ib_pkey_t new_pkey; 4661 ib_pkey_t *orig_pkey; 4662 ibdm_hca_list_t *hca_list; 4663 ibdm_port_attr_t *port; 4664 ibt_hca_portinfo_t *pinfop; 4665 4666 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 4667 4668 mutex_enter(&ibdm.ibdm_hl_mutex); 4669 hca_list = ibdm.ibdm_hca_list_head; 4670 4671 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 4672 4673 /* This updates P_Key Tables for all ports of this HCA */ 4674 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 4675 &nports, &size); 4676 4677 /* number of ports shouldn't have changed */ 4678 ASSERT(nports == hca_list->hl_nports); 4679 4680 for (pp = 0; pp < hca_list->hl_nports; pp++) { 4681 port = &hca_list->hl_port_attr[pp]; 4682 4683 /* 4684 * First figure out the P_Keys from IBTL. 4685 * Three things could have happened: 4686 * New P_Keys added 4687 * Existing P_Keys removed 4688 * Both of the above two 4689 * 4690 * Loop through the P_Key Indices and check if a 4691 * give P_Key_Ix matches that of the one seen by 4692 * IBDM. If they match no action is needed. 4693 * 4694 * If they don't match: 4695 * 1. if orig_pkey is invalid and new_pkey is valid 4696 * ---> add new_pkey to DM database 4697 * 2. if orig_pkey is valid and new_pkey is invalid 4698 * ---> remove orig_pkey from DM database 4699 * 3. if orig_pkey and new_pkey are both valid: 4700 * ---> remov orig_pkey from DM database 4701 * ---> add new_pkey to DM database 4702 * 4. if orig_pkey and new_pkey are both invalid: 4703 * ---> do nothing. Updated DM database. 4704 */ 4705 4706 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 4707 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 4708 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 4709 4710 /* keys match - do nothing */ 4711 if (*orig_pkey == new_pkey) 4712 continue; 4713 4714 if (IBDM_INVALID_PKEY(*orig_pkey) && 4715 !IBDM_INVALID_PKEY(new_pkey)) { 4716 /* P_Key was added */ 4717 IBTF_DPRINTF_L5("ibdm", 4718 "\tibnex_update_pkey_tbls: new " 4719 "P_Key added = 0x%x", new_pkey); 4720 *orig_pkey = new_pkey; 4721 ibdm_port_attr_ibmf_init(port, 4722 new_pkey, pp); 4723 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 4724 IBDM_INVALID_PKEY(new_pkey)) { 4725 /* P_Key was removed */ 4726 IBTF_DPRINTF_L5("ibdm", 4727 "\tibnex_update_pkey_tbls: P_Key " 4728 "removed = 0x%x", *orig_pkey); 4729 *orig_pkey = new_pkey; 4730 (void) ibdm_port_attr_ibmf_fini(port, 4731 pidx); 4732 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 4733 !IBDM_INVALID_PKEY(new_pkey)) { 4734 /* P_Key were replaced */ 4735 IBTF_DPRINTF_L5("ibdm", 4736 "\tibnex_update_pkey_tbls: P_Key " 4737 "replaced 0x%x with 0x%x", 4738 *orig_pkey, new_pkey); 4739 (void) ibdm_port_attr_ibmf_fini(port, 4740 pidx); 4741 *orig_pkey = new_pkey; 4742 ibdm_port_attr_ibmf_init(port, 4743 new_pkey, pp); 4744 } else { 4745 /* 4746 * P_Keys are invalid 4747 * set anyway to reflect if 4748 * INVALID_FULL was changed to 4749 * INVALID_LIMITED or vice-versa. 4750 */ 4751 *orig_pkey = new_pkey; 4752 } /* end of else */ 4753 4754 } /* loop of p_key index */ 4755 4756 } /* loop of #ports of HCA */ 4757 4758 ibt_free_portinfo(pinfop, size); 4759 hca_list = hca_list->hl_next; 4760 4761 } /* loop for all HCAs in the system */ 4762 4763 mutex_exit(&ibdm.ibdm_hl_mutex); 4764 } 4765 4766 4767 /* 4768 * ibdm_send_ioc_profile() 4769 * Send IOC Controller Profile request. When the request is completed 4770 * IBMF calls ibdm_process_incoming_mad routine to inform about 4771 * the completion. 4772 */ 4773 static int 4774 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 4775 { 4776 ibmf_msg_t *msg; 4777 ib_mad_hdr_t *hdr; 4778 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 4779 ibdm_timeout_cb_args_t *cb_args; 4780 4781 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 4782 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 4783 4784 /* 4785 * Send command to get IOC profile. 4786 * Allocate a IBMF packet and initialize the packet. 4787 */ 4788 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 4789 &msg) != IBMF_SUCCESS) { 4790 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 4791 return (IBDM_FAILURE); 4792 } 4793 4794 ibdm_alloc_send_buffers(msg); 4795 4796 mutex_enter(&gid_info->gl_mutex); 4797 ibdm_bump_transactionID(gid_info); 4798 mutex_exit(&gid_info->gl_mutex); 4799 4800 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 4801 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 4802 if (gid_info->gl_redirected == B_TRUE) { 4803 if (gid_info->gl_redirect_dlid != 0) { 4804 msg->im_local_addr.ia_remote_lid = 4805 gid_info->gl_redirect_dlid; 4806 } 4807 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 4808 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 4809 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 4810 } else { 4811 msg->im_local_addr.ia_remote_qno = 1; 4812 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 4813 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 4814 } 4815 4816 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 4817 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 4818 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 4819 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 4820 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 4821 hdr->Status = 0; 4822 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 4823 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 4824 hdr->AttributeModifier = h2b32(ioc_no + 1); 4825 4826 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 4827 cb_args = &ioc_info->ioc_cb_args; 4828 cb_args->cb_gid_info = gid_info; 4829 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 4830 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 4831 cb_args->cb_ioc_num = ioc_no; 4832 4833 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 4834 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 4835 4836 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 4837 "timeout %x", ioc_info->ioc_timeout_id); 4838 4839 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 4840 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 4841 IBTF_DPRINTF_L2("ibdm", 4842 "\tsend_ioc_profile: msg transport failed"); 4843 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 4844 } 4845 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 4846 return (IBDM_SUCCESS); 4847 } 4848 4849 4850 /* 4851 * ibdm_port_reachable 4852 * Sends a SA query to get the NODE record for port GUID 4853 * Returns IBDM_SUCCESS if the port GID is reachable 4854 */ 4855 static int 4856 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 4857 ib_guid_t *node_guid) 4858 { 4859 sa_node_record_t req, *resp = NULL; 4860 ibmf_saa_access_args_t args; 4861 int ret; 4862 size_t length; 4863 4864 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 4865 guid); 4866 4867 bzero(&req, sizeof (sa_node_record_t)); 4868 req.NodeInfo.PortGUID = guid; 4869 4870 args.sq_attr_id = SA_NODERECORD_ATTRID; 4871 args.sq_access_type = IBMF_SAA_RETRIEVE; 4872 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 4873 args.sq_template = &req; 4874 args.sq_callback = NULL; 4875 args.sq_callback_arg = NULL; 4876 4877 ret = ibmf_sa_access(sa_hdl, &args, 0, &length, (void **) &resp); 4878 if (ret != IBMF_SUCCESS) { 4879 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 4880 " SA Retrieve Failed: %d", ret); 4881 return (IBDM_FAILURE); 4882 } 4883 4884 if ((resp == NULL) || (length == 0)) { 4885 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 4886 return (IBDM_FAILURE); 4887 } 4888 4889 if (node_guid != NULL) 4890 *node_guid = resp->NodeInfo.NodeGUID; 4891 4892 kmem_free(resp, length); 4893 4894 return (IBDM_SUCCESS); 4895 } 4896 4897 /* 4898 * Update the gidlist for all affected IOCs when GID becomes 4899 * available/unavailable. 4900 * 4901 * Parameters : 4902 * gidinfo - Incoming / Outgoing GID. 4903 * add_flag - 1 for GID added, 0 for GID removed. 4904 * - (-1) : IOC gid list updated, ioc_list required. 4905 * 4906 * This function gets the GID for the node GUID corresponding to the 4907 * port GID. Gets the IOU info 4908 */ 4909 static ibdm_ioc_info_t * 4910 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 4911 { 4912 ibdm_dp_gidinfo_t *node_gid = NULL; 4913 uint8_t niocs, ii; 4914 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 4915 4916 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 4917 4918 switch (avail_flag) { 4919 case 1 : 4920 node_gid = ibdm_check_dest_nodeguid(gid_info); 4921 break; 4922 case 0 : 4923 node_gid = ibdm_handle_gid_rm(gid_info); 4924 break; 4925 case -1 : 4926 node_gid = gid_info; 4927 break; 4928 default : 4929 break; 4930 } 4931 4932 if (node_gid == NULL) { 4933 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 4934 "No node GID found, port gid 0x%p, avail_flag %d", 4935 gid_info, avail_flag); 4936 return (NULL); 4937 } 4938 4939 mutex_enter(&node_gid->gl_mutex); 4940 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 4941 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 4942 node_gid->gl_iou == NULL) { 4943 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 4944 "gl_state %x, gl_iou %p", node_gid->gl_state, 4945 node_gid->gl_iou); 4946 mutex_exit(&node_gid->gl_mutex); 4947 return (NULL); 4948 } 4949 4950 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4951 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 4952 niocs); 4953 for (ii = 0; ii < niocs; ii++) { 4954 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 4955 /* 4956 * Skip IOCs for which probe is not complete or 4957 * reprobe is progress 4958 */ 4959 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 4960 tmp = ibdm_dup_ioc_info(ioc, node_gid); 4961 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 4962 tmp->ioc_next = ioc_list; 4963 ioc_list = tmp; 4964 } 4965 } 4966 mutex_exit(&node_gid->gl_mutex); 4967 4968 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 4969 ioc_list); 4970 return (ioc_list); 4971 } 4972 4973 /* 4974 * ibdm_saa_event_cb : 4975 * Event handling which does *not* require ibdm_hl_mutex to be 4976 * held are executed in the same thread. This is to prevent 4977 * deadlocks with HCA port down notifications which hold the 4978 * ibdm_hl_mutex. 4979 * 4980 * GID_AVAILABLE event is handled here. A taskq is spawned to 4981 * handle GID_UNAVAILABLE. 4982 * 4983 * A new mutex ibdm_ibnex_mutex has been introduced to protect 4984 * ibnex_callback. This has been done to prevent any possible 4985 * deadlock (described above) while handling GID_AVAILABLE. 4986 * 4987 * IBMF calls the event callback for a HCA port. The SA handle 4988 * for this port would be valid, till the callback returns. 4989 * IBDM calling IBDM using the above SA handle should be valid. 4990 * 4991 * IBDM will additionally check (SA handle != NULL), before 4992 * calling IBMF. 4993 */ 4994 /*ARGSUSED*/ 4995 static void 4996 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 4997 ibmf_saa_subnet_event_t ibmf_saa_event, 4998 ibmf_saa_event_details_t *event_details, void *callback_arg) 4999 { 5000 ibdm_saa_event_arg_t *event_arg; 5001 ib_gid_t sgid, dgid; 5002 ibdm_port_attr_t *hca_port; 5003 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5004 ib_guid_t nodeguid; 5005 5006 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5007 5008 hca_port = (ibdm_port_attr_t *)callback_arg; 5009 5010 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5011 ibmf_saa_handle, ibmf_saa_event, event_details, 5012 callback_arg); 5013 #ifdef DEBUG 5014 if (ibdm_ignore_saa_event) 5015 return; 5016 #endif 5017 5018 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5019 /* 5020 * Ensure no other probe / sweep fabric is in 5021 * progress. 5022 */ 5023 mutex_enter(&ibdm.ibdm_mutex); 5024 while (ibdm.ibdm_busy & IBDM_BUSY) 5025 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5026 ibdm.ibdm_busy |= IBDM_BUSY; 5027 mutex_exit(&ibdm.ibdm_mutex); 5028 5029 /* 5030 * If we already know about this GID, return. 5031 * GID_AVAILABLE may be reported for multiple HCA 5032 * ports. 5033 */ 5034 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5035 event_details->ie_gid.gid_prefix)) != NULL) { 5036 mutex_enter(&ibdm.ibdm_mutex); 5037 ibdm.ibdm_busy &= ~IBDM_BUSY; 5038 cv_broadcast(&ibdm.ibdm_busy_cv); 5039 mutex_exit(&ibdm.ibdm_mutex); 5040 return; 5041 } 5042 5043 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5044 "Insertion notified", 5045 event_details->ie_gid.gid_prefix, 5046 event_details->ie_gid.gid_guid); 5047 5048 /* This is a new gid, insert it to GID list */ 5049 sgid.gid_prefix = hca_port->pa_sn_prefix; 5050 sgid.gid_guid = hca_port->pa_port_guid; 5051 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5052 dgid.gid_guid = event_details->ie_gid.gid_guid; 5053 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5054 if (gid_info == NULL) { 5055 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5056 "create_gid_info returned NULL"); 5057 mutex_enter(&ibdm.ibdm_mutex); 5058 ibdm.ibdm_busy &= ~IBDM_BUSY; 5059 cv_broadcast(&ibdm.ibdm_busy_cv); 5060 mutex_exit(&ibdm.ibdm_mutex); 5061 return; 5062 } 5063 mutex_enter(&gid_info->gl_mutex); 5064 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5065 mutex_exit(&gid_info->gl_mutex); 5066 5067 /* Get the node GUID */ 5068 if (ibdm_port_reachable(ibmf_saa_handle, dgid.gid_guid, 5069 &nodeguid) != IBDM_SUCCESS) { 5070 /* 5071 * Set the state to PROBE_NOT_DONE for the 5072 * next sweep to probe it 5073 */ 5074 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5075 "Skipping GID : port GUID not found"); 5076 mutex_enter(&gid_info->gl_mutex); 5077 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5078 mutex_exit(&gid_info->gl_mutex); 5079 mutex_enter(&ibdm.ibdm_mutex); 5080 ibdm.ibdm_busy &= ~IBDM_BUSY; 5081 cv_broadcast(&ibdm.ibdm_busy_cv); 5082 mutex_exit(&ibdm.ibdm_mutex); 5083 return; 5084 } 5085 5086 gid_info->gl_nodeguid = nodeguid; 5087 gid_info->gl_portguid = dgid.gid_guid; 5088 5089 /* 5090 * Get the gid info with the same node GUID. 5091 */ 5092 mutex_enter(&ibdm.ibdm_mutex); 5093 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5094 while (node_gid_info) { 5095 if (node_gid_info->gl_nodeguid == 5096 gid_info->gl_nodeguid && 5097 node_gid_info->gl_iou != NULL) { 5098 break; 5099 } 5100 node_gid_info = node_gid_info->gl_next; 5101 } 5102 mutex_exit(&ibdm.ibdm_mutex); 5103 5104 /* 5105 * Handling a new GID requires filling of gl_hca_list. 5106 * This require ibdm hca_list to be parsed and hence 5107 * holding the ibdm_hl_mutex. Spawning a new thread to 5108 * handle this. 5109 */ 5110 if (node_gid_info == NULL) { 5111 if (taskq_dispatch(system_taskq, 5112 ibdm_saa_handle_new_gid, (void *)gid_info, 5113 TQ_NOSLEEP) == NULL) { 5114 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5115 "new_gid taskq_dispatch failed"); 5116 return; 5117 } 5118 } 5119 5120 mutex_enter(&ibdm.ibdm_mutex); 5121 ibdm.ibdm_busy &= ~IBDM_BUSY; 5122 cv_broadcast(&ibdm.ibdm_busy_cv); 5123 mutex_exit(&ibdm.ibdm_mutex); 5124 return; 5125 } 5126 5127 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5128 return; 5129 5130 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5131 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5132 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5133 event_arg->ibmf_saa_event = ibmf_saa_event; 5134 bcopy(event_details, &event_arg->event_details, 5135 sizeof (ibmf_saa_event_details_t)); 5136 event_arg->callback_arg = callback_arg; 5137 5138 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5139 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5140 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5141 "taskq_dispatch failed"); 5142 ibdm_free_saa_event_arg(event_arg); 5143 return; 5144 } 5145 } 5146 5147 /* 5148 * Handle a new GID discovered by GID_AVAILABLE saa event. 5149 */ 5150 void 5151 ibdm_saa_handle_new_gid(void *arg) 5152 { 5153 ibdm_dp_gidinfo_t *gid_info; 5154 ibdm_hca_list_t *hca_list = NULL; 5155 ibdm_port_attr_t *port = NULL; 5156 ibdm_ioc_info_t *ioc_list = NULL; 5157 5158 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5159 5160 gid_info = (ibdm_dp_gidinfo_t *)arg; 5161 5162 /* 5163 * Ensure that no other sweep / probe has completed 5164 * probing this gid. 5165 */ 5166 mutex_enter(&gid_info->gl_mutex); 5167 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5168 mutex_exit(&gid_info->gl_mutex); 5169 return; 5170 } 5171 mutex_exit(&gid_info->gl_mutex); 5172 5173 /* 5174 * Parse HCAs to fill gl_hca_list 5175 */ 5176 mutex_enter(&ibdm.ibdm_hl_mutex); 5177 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5178 ibdm_get_next_port(&hca_list, &port, 1)) { 5179 if (ibdm_port_reachable(port->pa_sa_hdl, 5180 gid_info->gl_portguid, NULL) == 5181 IBDM_SUCCESS) { 5182 ibdm_addto_glhcalist(gid_info, hca_list); 5183 } 5184 } 5185 mutex_exit(&ibdm.ibdm_hl_mutex); 5186 5187 /* 5188 * Ensure no other probe / sweep fabric is in 5189 * progress. 5190 */ 5191 mutex_enter(&ibdm.ibdm_mutex); 5192 while (ibdm.ibdm_busy & IBDM_BUSY) 5193 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5194 ibdm.ibdm_busy |= IBDM_BUSY; 5195 mutex_exit(&ibdm.ibdm_mutex); 5196 5197 /* 5198 * New IOU probe it, to check if new IOCs 5199 */ 5200 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5201 "new GID : probing"); 5202 mutex_enter(&ibdm.ibdm_mutex); 5203 ibdm.ibdm_ngid_probes_in_progress++; 5204 mutex_exit(&ibdm.ibdm_mutex); 5205 mutex_enter(&gid_info->gl_mutex); 5206 gid_info->gl_reprobe_flag = 0; 5207 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5208 mutex_exit(&gid_info->gl_mutex); 5209 ibdm_probe_gid_thread((void *)gid_info); 5210 5211 mutex_enter(&ibdm.ibdm_mutex); 5212 ibdm_wait_probe_completion(); 5213 mutex_exit(&ibdm.ibdm_mutex); 5214 5215 if (gid_info->gl_iou == NULL) { 5216 mutex_enter(&ibdm.ibdm_mutex); 5217 ibdm.ibdm_busy &= ~IBDM_BUSY; 5218 cv_broadcast(&ibdm.ibdm_busy_cv); 5219 mutex_exit(&ibdm.ibdm_mutex); 5220 return; 5221 } 5222 5223 /* 5224 * Update GID list in all IOCs affected by this 5225 */ 5226 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5227 5228 /* 5229 * Pass on the IOCs with updated GIDs to IBnexus 5230 */ 5231 if (ioc_list) { 5232 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5233 if (ibdm.ibdm_ibnex_callback != NULL) { 5234 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5235 IBDM_EVENT_IOC_PROP_UPDATE); 5236 } 5237 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5238 } 5239 5240 mutex_enter(&ibdm.ibdm_mutex); 5241 ibdm.ibdm_busy &= ~IBDM_BUSY; 5242 cv_broadcast(&ibdm.ibdm_busy_cv); 5243 mutex_exit(&ibdm.ibdm_mutex); 5244 } 5245 5246 /* 5247 * ibdm_saa_event_taskq : 5248 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5249 * held. The GID_UNAVAILABLE handling is done in a taskq to 5250 * prevent deadlocks with HCA port down notifications which hold 5251 * ibdm_hl_mutex. 5252 */ 5253 void 5254 ibdm_saa_event_taskq(void *arg) 5255 { 5256 ibdm_saa_event_arg_t *event_arg; 5257 ibmf_saa_handle_t ibmf_saa_handle; 5258 ibmf_saa_subnet_event_t ibmf_saa_event; 5259 ibmf_saa_event_details_t *event_details; 5260 void *callback_arg; 5261 5262 ibdm_dp_gidinfo_t *gid_info; 5263 ibdm_port_attr_t *hca_port, *port = NULL; 5264 ibdm_hca_list_t *hca_list = NULL; 5265 int sa_handle_valid = 0; 5266 ibdm_ioc_info_t *ioc_list = NULL; 5267 5268 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5269 5270 event_arg = (ibdm_saa_event_arg_t *)arg; 5271 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5272 ibmf_saa_event = event_arg->ibmf_saa_event; 5273 event_details = &event_arg->event_details; 5274 callback_arg = event_arg->callback_arg; 5275 5276 ASSERT(callback_arg != NULL); 5277 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 5278 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 5279 ibmf_saa_handle, ibmf_saa_event, event_details, 5280 callback_arg); 5281 5282 hca_port = (ibdm_port_attr_t *)callback_arg; 5283 5284 /* Check if the port_attr is still valid */ 5285 mutex_enter(&ibdm.ibdm_hl_mutex); 5286 for (ibdm_get_next_port(&hca_list, &port, 0); port; 5287 ibdm_get_next_port(&hca_list, &port, 0)) { 5288 if (port == hca_port && port->pa_port_guid == 5289 hca_port->pa_port_guid) { 5290 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 5291 sa_handle_valid = 1; 5292 break; 5293 } 5294 } 5295 mutex_exit(&ibdm.ibdm_hl_mutex); 5296 if (sa_handle_valid == 0) { 5297 ibdm_free_saa_event_arg(event_arg); 5298 return; 5299 } 5300 5301 if (hca_port && (hca_port->pa_sa_hdl == NULL || 5302 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 5303 ibdm_free_saa_event_arg(event_arg); 5304 return; 5305 } 5306 hca_list = NULL; 5307 port = NULL; 5308 5309 /* 5310 * Check if the GID is visible to other HCA ports. 5311 * Return if so. 5312 */ 5313 mutex_enter(&ibdm.ibdm_hl_mutex); 5314 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5315 ibdm_get_next_port(&hca_list, &port, 1)) { 5316 if (ibdm_port_reachable(port->pa_sa_hdl, 5317 event_details->ie_gid.gid_guid, NULL) == 5318 IBDM_SUCCESS) { 5319 mutex_exit(&ibdm.ibdm_hl_mutex); 5320 ibdm_free_saa_event_arg(event_arg); 5321 return; 5322 } 5323 } 5324 mutex_exit(&ibdm.ibdm_hl_mutex); 5325 5326 /* 5327 * Ensure no other probe / sweep fabric is in 5328 * progress. 5329 */ 5330 mutex_enter(&ibdm.ibdm_mutex); 5331 while (ibdm.ibdm_busy & IBDM_BUSY) 5332 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5333 ibdm.ibdm_busy |= IBDM_BUSY; 5334 mutex_exit(&ibdm.ibdm_mutex); 5335 5336 /* 5337 * If this GID is no longer in GID list, return 5338 * GID_UNAVAILABLE may be reported for multiple HCA 5339 * ports. 5340 */ 5341 mutex_enter(&ibdm.ibdm_mutex); 5342 gid_info = ibdm.ibdm_dp_gidlist_head; 5343 while (gid_info) { 5344 if (gid_info->gl_portguid == 5345 event_details->ie_gid.gid_guid) { 5346 break; 5347 } 5348 gid_info = gid_info->gl_next; 5349 } 5350 mutex_exit(&ibdm.ibdm_mutex); 5351 if (gid_info == NULL) { 5352 mutex_enter(&ibdm.ibdm_mutex); 5353 ibdm.ibdm_busy &= ~IBDM_BUSY; 5354 cv_broadcast(&ibdm.ibdm_busy_cv); 5355 mutex_exit(&ibdm.ibdm_mutex); 5356 ibdm_free_saa_event_arg(event_arg); 5357 return; 5358 } 5359 5360 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5361 "Unavailable notification", 5362 event_details->ie_gid.gid_prefix, 5363 event_details->ie_gid.gid_guid); 5364 5365 /* 5366 * Update GID list in all IOCs affected by this 5367 */ 5368 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 5369 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 5370 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5371 5372 /* 5373 * Remove GID from the global GID list 5374 * Handle the case where all port GIDs for an 5375 * IOU have been hot-removed. Check both gid_info 5376 * & ioc_info for checking ngids. 5377 */ 5378 mutex_enter(&ibdm.ibdm_mutex); 5379 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5380 mutex_enter(&gid_info->gl_mutex); 5381 (void) ibdm_free_iou_info(gid_info); 5382 mutex_exit(&gid_info->gl_mutex); 5383 } 5384 if (gid_info->gl_prev != NULL) 5385 gid_info->gl_prev->gl_next = gid_info->gl_next; 5386 if (gid_info->gl_next != NULL) 5387 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5388 5389 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5390 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5391 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5392 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5393 ibdm.ibdm_ngids--; 5394 5395 ibdm.ibdm_busy &= ~IBDM_BUSY; 5396 cv_broadcast(&ibdm.ibdm_busy_cv); 5397 mutex_exit(&ibdm.ibdm_mutex); 5398 5399 mutex_destroy(&gid_info->gl_mutex); 5400 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5401 5402 /* 5403 * Pass on the IOCs with updated GIDs to IBnexus 5404 */ 5405 if (ioc_list) { 5406 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 5407 "IOC_PROP_UPDATE for %p\n", ioc_list); 5408 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5409 if (ibdm.ibdm_ibnex_callback != NULL) { 5410 (*ibdm.ibdm_ibnex_callback)((void *) 5411 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5412 } 5413 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5414 } 5415 5416 ibdm_free_saa_event_arg(event_arg); 5417 } 5418 5419 5420 static int 5421 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 5422 { 5423 ibdm_gid_t *scan_new, *scan_prev; 5424 int cmp_failed = 0; 5425 5426 ASSERT(new != NULL); 5427 ASSERT(prev != NULL); 5428 5429 /* 5430 * Search for each new gid anywhere in the prev GID list. 5431 * Note that the gid list could have been re-ordered. 5432 */ 5433 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 5434 for (scan_prev = prev, cmp_failed = 1; scan_prev; 5435 scan_prev = scan_prev->gid_next) { 5436 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 5437 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 5438 cmp_failed = 0; 5439 break; 5440 } 5441 } 5442 5443 if (cmp_failed) 5444 return (1); 5445 } 5446 return (0); 5447 } 5448 5449 /* 5450 * This is always called in a single thread 5451 * This function updates the gid_list and serv_list of IOC 5452 * The current gid_list is in ioc_info_t(contains only port 5453 * guids for which probe is done) & gidinfo_t(other port gids) 5454 * The gids in both locations are used for comparision. 5455 */ 5456 static void 5457 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 5458 { 5459 ibdm_gid_t *cur_gid_list; 5460 uint_t cur_nportgids; 5461 5462 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 5463 5464 ioc->ioc_info_updated.ib_prop_updated = 0; 5465 5466 5467 /* Current GID list in gid_info only */ 5468 cur_gid_list = gidinfo->gl_gid; 5469 cur_nportgids = gidinfo->gl_ngids; 5470 5471 /* 5472 * Service entry names and IDs are not compared currently. 5473 * This may require change. 5474 */ 5475 if (ioc->ioc_prev_serv_cnt != ioc->ioc_profile.ioc_service_entries) 5476 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 5477 5478 if (ioc->ioc_prev_nportgids != cur_nportgids || 5479 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 5480 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 5481 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 5482 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 5483 } 5484 5485 /* Zero out previous entries */ 5486 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 5487 if (ioc->ioc_prev_serv) 5488 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 5489 sizeof (ibdm_srvents_info_t)); 5490 ioc->ioc_prev_serv_cnt = 0; 5491 ioc->ioc_prev_nportgids = 0; 5492 ioc->ioc_prev_serv = NULL; 5493 ioc->ioc_prev_gid_list = NULL; 5494 } 5495 5496 /* 5497 * Handle GID removal. This returns gid_info of an GID for the same 5498 * node GUID, if found. For an GID with IOU information, the same 5499 * gid_info is returned if no gid_info with same node_guid is found. 5500 */ 5501 static ibdm_dp_gidinfo_t * 5502 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 5503 { 5504 ibdm_dp_gidinfo_t *gid_list; 5505 5506 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 5507 5508 if (rm_gid->gl_iou == NULL) { 5509 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 5510 /* 5511 * Search for a GID with same node_guid and 5512 * gl_iou != NULL 5513 */ 5514 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5515 gid_list = gid_list->gl_next) { 5516 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 5517 == rm_gid->gl_nodeguid)) 5518 break; 5519 } 5520 5521 if (gid_list) 5522 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 5523 5524 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 5525 return (gid_list); 5526 } else { 5527 /* 5528 * Search for a GID with same node_guid and 5529 * gl_iou == NULL 5530 */ 5531 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 5532 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5533 gid_list = gid_list->gl_next) { 5534 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 5535 == rm_gid->gl_nodeguid)) 5536 break; 5537 } 5538 5539 if (gid_list) { 5540 /* 5541 * Copy the following fields from rm_gid : 5542 * 1. gl_state 5543 * 2. gl_iou 5544 * 3. gl_gid & gl_ngids 5545 * 5546 * Note : Function is synchronized by 5547 * ibdm_busy flag. 5548 * 5549 * Note : Redirect info is initialized if 5550 * any MADs for the GID fail 5551 */ 5552 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 5553 "copying info to GID with gl_iou != NULl"); 5554 gid_list->gl_state = rm_gid->gl_state; 5555 gid_list->gl_iou = rm_gid->gl_iou; 5556 gid_list->gl_gid = rm_gid->gl_gid; 5557 gid_list->gl_ngids = rm_gid->gl_ngids; 5558 5559 /* Remove the GID from gl_gid list */ 5560 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 5561 } else { 5562 /* 5563 * Handle a case where all GIDs to the IOU have 5564 * been removed. 5565 */ 5566 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 5567 "to IOU"); 5568 5569 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 5570 return (rm_gid); 5571 } 5572 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 5573 return (gid_list); 5574 } 5575 } 5576 5577 static void 5578 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 5579 ibdm_dp_gidinfo_t *rm_gid) 5580 { 5581 ibdm_gid_t *tmp, *prev; 5582 5583 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 5584 gid_info, rm_gid); 5585 5586 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 5587 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 5588 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 5589 if (prev == NULL) 5590 gid_info->gl_gid = tmp->gid_next; 5591 else 5592 prev->gid_next = tmp->gid_next; 5593 5594 kmem_free(tmp, sizeof (ibdm_gid_t)); 5595 gid_info->gl_ngids--; 5596 break; 5597 } else { 5598 prev = tmp; 5599 tmp = tmp->gid_next; 5600 } 5601 } 5602 } 5603 5604 static void 5605 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 5606 { 5607 ibdm_gid_t *head = NULL, *new, *tail; 5608 5609 /* First copy the destination */ 5610 for (; dest; dest = dest->gid_next) { 5611 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 5612 new->gid_dgid_hi = dest->gid_dgid_hi; 5613 new->gid_dgid_lo = dest->gid_dgid_lo; 5614 new->gid_next = head; 5615 head = new; 5616 } 5617 5618 /* Insert this to the source */ 5619 if (*src_ptr == NULL) 5620 *src_ptr = head; 5621 else { 5622 for (tail = *src_ptr; tail->gid_next != NULL; 5623 tail = tail->gid_next) 5624 ; 5625 5626 tail->gid_next = head; 5627 } 5628 } 5629 5630 static void 5631 ibdm_free_gid_list(ibdm_gid_t *head) 5632 { 5633 ibdm_gid_t *delete; 5634 5635 for (delete = head; delete; ) { 5636 head = delete->gid_next; 5637 kmem_free(delete, sizeof (ibdm_gid_t)); 5638 delete = head; 5639 } 5640 } 5641 5642 /* 5643 * This function rescans the DM capable GIDs (gl_state is 5644 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 5645 * basically checks if the DM capable GID is reachable. If 5646 * not this is handled the same way as GID_UNAVAILABLE, 5647 * except that notifications are not send to IBnexus. 5648 * 5649 * This function also initializes the ioc_prev_list for 5650 * a particular IOC (when called from probe_ioc, with 5651 * ioc_guidp != NULL) or all IOCs for the gid (called from 5652 * sweep_fabric, ioc_guidp == NULL). 5653 */ 5654 static void 5655 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 5656 { 5657 ibdm_dp_gidinfo_t *gid_info, *tmp; 5658 int ii, niocs, found; 5659 ibdm_hca_list_t *hca_list = NULL; 5660 ibdm_port_attr_t *port = NULL; 5661 ibdm_ioc_info_t *ioc_list; 5662 5663 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 5664 found = 0; 5665 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 5666 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 5667 gid_info = gid_info->gl_next; 5668 continue; 5669 } 5670 5671 /* 5672 * Check if the GID is visible to any HCA ports. 5673 * Return if so. 5674 */ 5675 mutex_enter(&ibdm.ibdm_hl_mutex); 5676 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5677 ibdm_get_next_port(&hca_list, &port, 1)) { 5678 if (ibdm_port_reachable(port->pa_sa_hdl, 5679 gid_info->gl_dgid_lo, NULL) == IBDM_SUCCESS) { 5680 found = 1; 5681 break; 5682 } 5683 } 5684 mutex_exit(&ibdm.ibdm_hl_mutex); 5685 5686 if (found) { 5687 if (gid_info->gl_iou == NULL) { 5688 gid_info = gid_info->gl_next; 5689 continue; 5690 } 5691 5692 /* Intialize the ioc_prev_gid_list */ 5693 niocs = 5694 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 5695 for (ii = 0; ii < niocs; ii++) { 5696 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 5697 5698 if (ioc_guidp == NULL || (*ioc_guidp == 5699 ioc_list->ioc_profile.ioc_guid)) { 5700 /* Add info of GIDs in gid_info also */ 5701 ibdm_addto_gidlist( 5702 &ioc_list->ioc_prev_gid_list, 5703 gid_info->gl_gid); 5704 ioc_list->ioc_prev_nportgids = 5705 gid_info->gl_ngids; 5706 } 5707 } 5708 gid_info = gid_info->gl_next; 5709 continue; 5710 } 5711 5712 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 5713 "deleted port GUID %llx", 5714 gid_info->gl_dgid_lo); 5715 5716 /* 5717 * Update GID list in all IOCs affected by this 5718 */ 5719 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5720 5721 /* 5722 * Remove GID from the global GID list 5723 * Handle the case where all port GIDs for an 5724 * IOU have been hot-removed. 5725 */ 5726 mutex_enter(&ibdm.ibdm_mutex); 5727 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5728 mutex_enter(&gid_info->gl_mutex); 5729 (void) ibdm_free_iou_info(gid_info); 5730 mutex_exit(&gid_info->gl_mutex); 5731 } 5732 tmp = gid_info->gl_next; 5733 if (gid_info->gl_prev != NULL) 5734 gid_info->gl_prev->gl_next = gid_info->gl_next; 5735 if (gid_info->gl_next != NULL) 5736 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5737 5738 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5739 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5740 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5741 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5742 ibdm.ibdm_ngids--; 5743 5744 mutex_destroy(&gid_info->gl_mutex); 5745 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5746 gid_info = tmp; 5747 5748 mutex_exit(&ibdm.ibdm_mutex); 5749 5750 /* 5751 * Pass on the IOCs with updated GIDs to IBnexus 5752 */ 5753 if (ioc_list) { 5754 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 5755 "IOC_PROP_UPDATE for %p\n", ioc_list); 5756 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5757 if (ibdm.ibdm_ibnex_callback != NULL) { 5758 (*ibdm.ibdm_ibnex_callback)((void *) 5759 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5760 } 5761 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5762 } 5763 } 5764 } 5765 5766 /* 5767 * This function notifies IBnex of IOCs on this GID. 5768 * Notification is for GIDs with gl_reprobe_flag set. 5769 * The flag is set when IOC probe / fabric sweep 5770 * probes a GID starting from CLASS port info. 5771 * 5772 * IBnexus will have information of a reconnected IOC 5773 * if it had probed it before. If this is a new IOC, 5774 * IBnexus ignores the notification. 5775 * 5776 * This function should be called with no locks held. 5777 */ 5778 static void 5779 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 5780 { 5781 ibdm_ioc_info_t *ioc_list; 5782 5783 if (gid_info->gl_reprobe_flag == 0 || 5784 gid_info->gl_iou == NULL) 5785 return; 5786 5787 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 5788 5789 /* 5790 * Pass on the IOCs with updated GIDs to IBnexus 5791 */ 5792 if (ioc_list) { 5793 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5794 if (ibdm.ibdm_ibnex_callback != NULL) { 5795 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5796 IBDM_EVENT_IOC_PROP_UPDATE); 5797 } 5798 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5799 } 5800 } 5801 5802 5803 static void 5804 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 5805 { 5806 if (arg != NULL) 5807 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 5808 } 5809 5810 /* 5811 * This function parses the list of HCAs and HCA ports 5812 * to return the port_attr of the next HCA port. A port 5813 * connected to IB fabric (port_state active) is returned, 5814 * if connected_flag is set. 5815 */ 5816 static void 5817 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 5818 ibdm_port_attr_t **inp_portp, int connect_flag) 5819 { 5820 int ii; 5821 ibdm_port_attr_t *port, *next_port = NULL; 5822 ibdm_port_attr_t *inp_port; 5823 ibdm_hca_list_t *hca_list; 5824 int found = 0; 5825 5826 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 5827 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 5828 inp_hcap, inp_portp, connect_flag); 5829 5830 hca_list = *inp_hcap; 5831 inp_port = *inp_portp; 5832 5833 if (hca_list == NULL) 5834 hca_list = ibdm.ibdm_hca_list_head; 5835 5836 for (; hca_list; hca_list = hca_list->hl_next) { 5837 for (ii = 0; ii < hca_list->hl_nports; ii++) { 5838 port = &hca_list->hl_port_attr[ii]; 5839 5840 /* 5841 * inp_port != NULL; 5842 * Skip till we find the matching port 5843 */ 5844 if (inp_port && !found) { 5845 if (inp_port == port) 5846 found = 1; 5847 continue; 5848 } 5849 5850 if (!connect_flag) { 5851 next_port = port; 5852 break; 5853 } 5854 5855 if (port->pa_sa_hdl == NULL) 5856 ibdm_initialize_port(port); 5857 if (port->pa_sa_hdl == NULL) 5858 (void) ibdm_fini_port(port); 5859 else if (next_port == NULL && 5860 port->pa_sa_hdl != NULL && 5861 port->pa_state == IBT_PORT_ACTIVE) { 5862 next_port = port; 5863 break; 5864 } 5865 } 5866 5867 if (next_port) 5868 break; 5869 } 5870 5871 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 5872 "returns hca_list %p port %p", hca_list, next_port); 5873 *inp_hcap = hca_list; 5874 *inp_portp = next_port; 5875 } 5876 5877 static void 5878 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 5879 { 5880 ibdm_gid_t *tmp; 5881 5882 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 5883 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 5884 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 5885 5886 mutex_enter(&nodegid->gl_mutex); 5887 tmp->gid_next = nodegid->gl_gid; 5888 nodegid->gl_gid = tmp; 5889 nodegid->gl_ngids++; 5890 mutex_exit(&nodegid->gl_mutex); 5891 } 5892 5893 static void 5894 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 5895 ibdm_hca_list_t *hca) 5896 { 5897 ibdm_hca_list_t *head, *prev = NULL, *temp; 5898 5899 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 5900 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 5901 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 5902 mutex_enter(&gid_info->gl_mutex); 5903 head = gid_info->gl_hca_list; 5904 if (head == NULL) { 5905 head = ibdm_dup_hca_attr(hca); 5906 head->hl_next = NULL; 5907 gid_info->gl_hca_list = head; 5908 mutex_exit(&gid_info->gl_mutex); 5909 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 5910 "gid %p, gl_hca_list %p", gid_info, 5911 gid_info->gl_hca_list); 5912 return; 5913 } 5914 5915 /* Check if already in the list */ 5916 while (head) { 5917 if (head->hl_hca_guid == hca->hl_hca_guid) { 5918 mutex_exit(&gid_info->gl_mutex); 5919 IBTF_DPRINTF_L4(ibdm_string, 5920 "\taddto_glhcalist : gid %p hca %p dup", 5921 gid_info, hca); 5922 return; 5923 } 5924 prev = head; 5925 head = head->hl_next; 5926 } 5927 5928 /* Add this HCA to gl_hca_list */ 5929 temp = ibdm_dup_hca_attr(hca); 5930 temp->hl_next = NULL; 5931 prev->hl_next = temp; 5932 5933 mutex_exit(&gid_info->gl_mutex); 5934 5935 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 5936 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 5937 } 5938 5939 static void 5940 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 5941 { 5942 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 5943 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 5944 5945 mutex_enter(&gid_info->gl_mutex); 5946 if (gid_info->gl_hca_list) 5947 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 5948 gid_info->gl_hca_list = NULL; 5949 mutex_exit(&gid_info->gl_mutex); 5950 } 5951 5952 5953 static void 5954 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 5955 { 5956 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 5957 port_sa_hdl); 5958 5959 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 5960 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 5961 5962 /* Check : Not busy in another probe / sweep */ 5963 mutex_enter(&ibdm.ibdm_mutex); 5964 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 5965 ibdm_dp_gidinfo_t *gid_info; 5966 5967 ibdm.ibdm_busy |= IBDM_BUSY; 5968 mutex_exit(&ibdm.ibdm_mutex); 5969 5970 /* 5971 * Check if any GID is using the SA & IBMF handle 5972 * of HCA port going down. Reset ibdm_dp_gidinfo_t 5973 * using another HCA port which can reach the GID. 5974 * This is for DM capable GIDs only, no need to do 5975 * this for others 5976 * 5977 * Delete the GID if no alternate HCA port to reach 5978 * it is found. 5979 */ 5980 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 5981 ibdm_dp_gidinfo_t *tmp; 5982 5983 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 5984 "checking gidinfo %p", gid_info); 5985 5986 if (gid_info->gl_sa_hdl == port_sa_hdl) { 5987 IBTF_DPRINTF_L3(ibdm_string, 5988 "\tevent_hdlr: down HCA port hdl " 5989 "matches gid %p", gid_info); 5990 5991 /* 5992 * The non-DM GIDs can come back 5993 * with a new subnet prefix, when 5994 * the HCA port commes up again. To 5995 * avoid issues, delete non-DM 5996 * capable GIDs, if the gid was 5997 * discovered using the HCA port 5998 * going down. This is ensured by 5999 * setting gl_disconnected to 1. 6000 */ 6001 if (gid_info->gl_nodeguid == 0) 6002 gid_info->gl_disconnected = 1; 6003 else 6004 ibdm_reset_gidinfo(gid_info); 6005 6006 if (gid_info->gl_disconnected) { 6007 IBTF_DPRINTF_L3(ibdm_string, 6008 "\tevent_hdlr: deleting" 6009 " gid %p", gid_info); 6010 tmp = gid_info; 6011 gid_info = gid_info->gl_next; 6012 ibdm_delete_gidinfo(tmp); 6013 } else 6014 gid_info = gid_info->gl_next; 6015 } else 6016 gid_info = gid_info->gl_next; 6017 } 6018 6019 mutex_enter(&ibdm.ibdm_mutex); 6020 ibdm.ibdm_busy &= ~IBDM_BUSY; 6021 cv_signal(&ibdm.ibdm_busy_cv); 6022 } 6023 mutex_exit(&ibdm.ibdm_mutex); 6024 } 6025 6026 static void 6027 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6028 { 6029 ibdm_hca_list_t *hca_list = NULL; 6030 ibdm_port_attr_t *port = NULL; 6031 int gid_reinited = 0; 6032 sa_node_record_t *nr, *tmp; 6033 sa_portinfo_record_t *pi; 6034 size_t nr_len = 0, pi_len = 0; 6035 size_t path_len; 6036 ib_gid_t sgid, dgid; 6037 int ret, ii, nrecords; 6038 sa_path_record_t *path; 6039 uint8_t npaths = 1; 6040 ibdm_pkey_tbl_t *pkey_tbl; 6041 6042 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6043 6044 /* 6045 * Get list of all the ports reachable from the local known HCA 6046 * ports which are active 6047 */ 6048 mutex_enter(&ibdm.ibdm_hl_mutex); 6049 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6050 ibdm_get_next_port(&hca_list, &port, 1)) { 6051 6052 6053 /* 6054 * Get the path and re-populate the gidinfo. 6055 * Getting the path is the same probe_ioc 6056 * Init the gid info as in ibdm_create_gidinfo() 6057 */ 6058 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6059 gidinfo->gl_nodeguid); 6060 if (nr == NULL) { 6061 IBTF_DPRINTF_L4(ibdm_string, 6062 "\treset_gidinfo : no records"); 6063 continue; 6064 } 6065 6066 nrecords = (nr_len / sizeof (sa_node_record_t)); 6067 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6068 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6069 break; 6070 } 6071 6072 if (ii == nrecords) { 6073 IBTF_DPRINTF_L4(ibdm_string, 6074 "\treset_gidinfo : no record for portguid"); 6075 kmem_free(nr, nr_len); 6076 continue; 6077 } 6078 6079 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6080 if (pi == NULL) { 6081 IBTF_DPRINTF_L4(ibdm_string, 6082 "\treset_gidinfo : no portinfo"); 6083 kmem_free(nr, nr_len); 6084 continue; 6085 } 6086 6087 sgid.gid_prefix = port->pa_sn_prefix; 6088 sgid.gid_guid = port->pa_port_guid; 6089 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6090 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6091 6092 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6093 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6094 6095 if ((ret != IBMF_SUCCESS) || path == NULL) { 6096 IBTF_DPRINTF_L4(ibdm_string, 6097 "\treset_gidinfo : no paths"); 6098 kmem_free(pi, pi_len); 6099 kmem_free(nr, nr_len); 6100 continue; 6101 } 6102 6103 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6104 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6105 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6106 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6107 gidinfo->gl_p_key = path->P_Key; 6108 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6109 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6110 gidinfo->gl_slid = path->SLID; 6111 gidinfo->gl_dlid = path->DLID; 6112 /* Reset redirect info, next MAD will set if redirected */ 6113 gidinfo->gl_redirected = 0; 6114 6115 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 6116 for (ii = 0; ii < port->pa_npkeys; ii++) { 6117 if (port->pa_pkey_tbl == NULL) 6118 break; 6119 6120 pkey_tbl = &port->pa_pkey_tbl[ii]; 6121 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) && 6122 (pkey_tbl->pt_qp_hdl != NULL)) { 6123 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 6124 break; 6125 } 6126 } 6127 6128 if (gidinfo->gl_qp_hdl == NULL) 6129 IBTF_DPRINTF_L2(ibdm_string, 6130 "\treset_gid_info: No matching Pkey"); 6131 else 6132 gid_reinited = 1; 6133 6134 kmem_free(path, path_len); 6135 kmem_free(pi, pi_len); 6136 kmem_free(nr, nr_len); 6137 break; 6138 } 6139 mutex_exit(&ibdm.ibdm_hl_mutex); 6140 6141 if (!gid_reinited) 6142 gidinfo->gl_disconnected = 1; 6143 } 6144 6145 static void 6146 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6147 { 6148 ibdm_ioc_info_t *ioc_list; 6149 int in_gidlist = 0; 6150 6151 /* 6152 * Check if gidinfo has been inserted into the 6153 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6154 * != NULL, if gidinfo is the list. 6155 */ 6156 if (gidinfo->gl_prev != NULL || 6157 gidinfo->gl_next != NULL || 6158 ibdm.ibdm_dp_gidlist_head == gidinfo) 6159 in_gidlist = 1; 6160 6161 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6162 6163 /* 6164 * Remove GID from the global GID list 6165 * Handle the case where all port GIDs for an 6166 * IOU have been hot-removed. 6167 */ 6168 mutex_enter(&ibdm.ibdm_mutex); 6169 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6170 mutex_enter(&gidinfo->gl_mutex); 6171 (void) ibdm_free_iou_info(gidinfo); 6172 mutex_exit(&gidinfo->gl_mutex); 6173 } 6174 6175 /* Delete gl_hca_list */ 6176 mutex_exit(&ibdm.ibdm_mutex); 6177 ibdm_delete_glhca_list(gidinfo); 6178 mutex_enter(&ibdm.ibdm_mutex); 6179 6180 if (in_gidlist) { 6181 if (gidinfo->gl_prev != NULL) 6182 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6183 if (gidinfo->gl_next != NULL) 6184 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6185 6186 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6187 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6188 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6189 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6190 ibdm.ibdm_ngids--; 6191 } 6192 mutex_exit(&ibdm.ibdm_mutex); 6193 6194 mutex_destroy(&gidinfo->gl_mutex); 6195 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6196 6197 /* 6198 * Pass on the IOCs with updated GIDs to IBnexus 6199 */ 6200 if (ioc_list) { 6201 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6202 "IOC_PROP_UPDATE for %p\n", ioc_list); 6203 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6204 if (ibdm.ibdm_ibnex_callback != NULL) { 6205 (*ibdm.ibdm_ibnex_callback)((void *) 6206 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6207 } 6208 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6209 } 6210 } 6211 6212 6213 static void 6214 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6215 { 6216 uint32_t attr_mod; 6217 6218 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6219 attr_mod |= cb_args->cb_srvents_start; 6220 attr_mod |= (cb_args->cb_srvents_end) << 8; 6221 hdr->AttributeModifier = h2b32(attr_mod); 6222 } 6223 6224 static void 6225 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6226 { 6227 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 6228 gid_info->gl_transactionID++; 6229 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 6230 IBTF_DPRINTF_L4(ibdm_string, 6231 "\tbump_transactionID(%p), wrapup", gid_info); 6232 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 6233 } 6234 } 6235 6236 /* For debugging purpose only */ 6237 #ifdef DEBUG 6238 void 6239 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 6240 { 6241 ib_mad_hdr_t *mad_hdr; 6242 6243 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 6244 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 6245 6246 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 6247 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 6248 ibmf_msg->im_local_addr.ia_remote_lid, 6249 ibmf_msg->im_local_addr.ia_remote_qno); 6250 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x", 6251 ibmf_msg->im_local_addr.ia_p_key, ibmf_msg->im_local_addr.ia_q_key); 6252 6253 if (flag) 6254 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 6255 else 6256 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 6257 6258 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 6259 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 6260 6261 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 6262 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 6263 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 6264 "\tR Method : 0x%x", 6265 mad_hdr->ClassVersion, mad_hdr->R_Method); 6266 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 6267 "\tTransaction ID : 0x%llx", 6268 mad_hdr->Status, mad_hdr->TransactionID); 6269 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 6270 "\tAttribute Modified : 0x%lx", 6271 mad_hdr->AttributeID, mad_hdr->AttributeModifier); 6272 } 6273 6274 void 6275 ibdm_dump_path_info(sa_path_record_t *path) 6276 { 6277 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 6278 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 6279 6280 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 6281 path->DGID.gid_prefix, path->DGID.gid_guid); 6282 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 6283 path->SGID.gid_prefix, path->SGID.gid_guid); 6284 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\tDlID : %x", 6285 path->SLID, path->DLID); 6286 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x", path->P_Key); 6287 } 6288 6289 6290 void 6291 ibdm_dump_classportinfo(ibdm_mad_classportinfo_t *classportinfo) 6292 { 6293 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 6294 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 6295 6296 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 6297 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 6298 6299 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 6300 (b2h32(classportinfo->RedirectQP))); 6301 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 6302 b2h16(classportinfo->RedirectP_Key)); 6303 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 6304 b2h16(classportinfo->RedirectQ_Key)); 6305 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%x", 6306 b2h64(classportinfo->RedirectGID_hi)); 6307 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%x", 6308 b2h64(classportinfo->RedirectGID_lo)); 6309 } 6310 6311 6312 void 6313 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 6314 { 6315 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 6316 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 6317 6318 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 6319 b2h16(iou_info->iou_changeid)); 6320 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 6321 iou_info->iou_num_ctrl_slots); 6322 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 6323 iou_info->iou_flag); 6324 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 6325 iou_info->iou_ctrl_list[0]); 6326 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 6327 iou_info->iou_ctrl_list[1]); 6328 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 6329 iou_info->iou_ctrl_list[2]); 6330 } 6331 6332 6333 void 6334 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 6335 { 6336 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 6337 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 6338 6339 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 6340 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 6341 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 6342 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 6343 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 6344 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 6345 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 6346 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 6347 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 6348 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 6349 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 6350 ioc->ioc_rdma_read_qdepth); 6351 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 6352 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 6353 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 6354 ioc->ioc_ctrl_opcap_mask); 6355 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 6356 } 6357 6358 6359 void 6360 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 6361 { 6362 IBTF_DPRINTF_L4("ibdm", 6363 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 6364 6365 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 6366 "Service Name : %s", srv_ents->srv_name); 6367 } 6368 6369 int ibdm_allow_sweep_fabric_timestamp = 1; 6370 6371 void 6372 ibdm_dump_sweep_fabric_timestamp(int flag) 6373 { 6374 static hrtime_t x; 6375 if (flag) { 6376 if (ibdm_allow_sweep_fabric_timestamp) { 6377 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 6378 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 6379 } 6380 x = 0; 6381 } else 6382 x = gethrtime(); 6383 } 6384 #endif 6385