1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * ibdm.c 30 * 31 * This file contains the InifiniBand Device Manager (IBDM) support functions. 32 * IB nexus driver will only be the client for the IBDM module. 33 * 34 * IBDM registers with IBTF for HCA arrival/removal notification. 35 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 36 * the IOU's. 37 * 38 * IB nexus driver registers with IBDM to find the information about the 39 * HCA's and IOC's (behind the IOU) present on the IB fabric. 40 */ 41 42 #include <sys/systm.h> 43 #include <sys/taskq.h> 44 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 45 #include <sys/modctl.h> 46 47 /* Function Prototype declarations */ 48 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *); 49 static int ibdm_fini(void); 50 static int ibdm_init(void); 51 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 52 ibdm_hca_list_t *); 53 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 54 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 55 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 56 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 57 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 58 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 59 ib_guid_t *, ib_guid_t *); 60 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 61 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 62 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 63 static int ibdm_handle_redirection(ibmf_msg_t *, 64 ibdm_dp_gidinfo_t *, int *); 65 static void ibdm_wait_probe_completion(void); 66 static void ibdm_sweep_fabric(int); 67 static void ibdm_probe_gid_thread(void *); 68 static void ibdm_wakeup_probe_gid_cv(void); 69 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 70 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 71 static void ibdm_update_port_attr(ibdm_port_attr_t *); 72 static void ibdm_handle_hca_attach(ib_guid_t); 73 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 74 ibdm_dp_gidinfo_t *, int *); 75 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 76 static void ibdm_recv_incoming_mad(void *); 77 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 78 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 79 static void ibdm_pkt_timeout_hdlr(void *arg); 80 static void ibdm_initialize_port(ibdm_port_attr_t *); 81 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 82 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 83 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 84 static void ibdm_free_send_buffers(ibmf_msg_t *); 85 static void ibdm_handle_hca_detach(ib_guid_t); 86 static int ibdm_fini_port(ibdm_port_attr_t *); 87 static int ibdm_uninit_hca(ibdm_hca_list_t *); 88 static void ibdm_handle_iounitinfo(ibmf_handle_t, 89 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 90 static void ibdm_handle_ioc_profile(ibmf_handle_t, 91 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 92 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 93 ibt_async_code_t, ibt_async_event_t *); 94 static void ibdm_handle_classportinfo(ibmf_handle_t, 95 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 96 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 97 ibdm_dp_gidinfo_t *); 98 99 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 100 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 101 ibdm_dp_gidinfo_t *gid_list); 102 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 103 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 104 ibdm_dp_gidinfo_t *, int *); 105 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 106 ibdm_hca_list_t **); 107 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 108 size_t *, ib_guid_t); 109 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 110 ib_lid_t); 111 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 112 ib_gid_t, ib_gid_t); 113 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 114 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 115 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 116 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 117 ibmf_saa_event_details_t *, void *); 118 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 119 ibdm_dp_gidinfo_t *); 120 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 121 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 122 ibdm_dp_gidinfo_t *); 123 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 124 static void ibdm_free_gid_list(ibdm_gid_t *); 125 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 126 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 127 static void ibdm_saa_event_taskq(void *); 128 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 129 static void ibdm_get_next_port(ibdm_hca_list_t **, 130 ibdm_port_attr_t **, int); 131 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 132 ibdm_dp_gidinfo_t *); 133 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 134 ibdm_hca_list_t *); 135 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 136 static void ibdm_saa_handle_new_gid(void *); 137 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 138 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 139 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 140 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 141 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 142 143 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 144 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 145 #ifdef DEBUG 146 int ibdm_ignore_saa_event = 0; 147 #endif 148 149 /* Modload support */ 150 static struct modlmisc ibdm_modlmisc = { 151 &mod_miscops, 152 "InfiniBand Device Manager %I%", 153 }; 154 155 struct modlinkage ibdm_modlinkage = { 156 MODREV_1, 157 (void *)&ibdm_modlmisc, 158 NULL 159 }; 160 161 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 162 IBTI_V2, 163 IBT_DM, 164 ibdm_event_hdlr, 165 NULL, 166 "ibdm" 167 }; 168 169 /* Global variables */ 170 ibdm_t ibdm; 171 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 172 char *ibdm_string = "ibdm"; 173 174 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 175 ibdm.ibdm_dp_gidlist_head)) 176 177 /* 178 * _init 179 * Loadable module init, called before any other module. 180 * Initialize mutex 181 * Register with IBTF 182 */ 183 int 184 _init(void) 185 { 186 int err; 187 188 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 189 190 if ((err = ibdm_init()) != IBDM_SUCCESS) { 191 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 192 (void) ibdm_fini(); 193 return (DDI_FAILURE); 194 } 195 196 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 197 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 198 (void) ibdm_fini(); 199 } 200 return (err); 201 } 202 203 204 int 205 _fini(void) 206 { 207 int err; 208 209 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 210 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 211 (void) ibdm_init(); 212 return (EBUSY); 213 } 214 215 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 216 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 217 (void) ibdm_init(); 218 } 219 return (err); 220 } 221 222 223 int 224 _info(struct modinfo *modinfop) 225 { 226 return (mod_info(&ibdm_modlinkage, modinfop)); 227 } 228 229 230 /* 231 * ibdm_init(): 232 * Register with IBTF 233 * Allocate memory for the HCAs 234 * Allocate minor-nodes for the HCAs 235 */ 236 static int 237 ibdm_init(void) 238 { 239 int i, hca_count; 240 ib_guid_t *hca_guids; 241 ibt_status_t status; 242 243 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 244 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 245 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 246 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 247 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 248 mutex_enter(&ibdm.ibdm_mutex); 249 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 250 } 251 252 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 253 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 254 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 255 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 256 "failed %x", status); 257 mutex_exit(&ibdm.ibdm_mutex); 258 return (IBDM_FAILURE); 259 } 260 261 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 262 mutex_exit(&ibdm.ibdm_mutex); 263 } 264 265 266 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 267 hca_count = ibt_get_hca_list(&hca_guids); 268 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 269 for (i = 0; i < hca_count; i++) 270 (void) ibdm_handle_hca_attach(hca_guids[i]); 271 if (hca_count) 272 ibt_free_hca_list(hca_guids, hca_count); 273 274 mutex_enter(&ibdm.ibdm_mutex); 275 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 276 mutex_exit(&ibdm.ibdm_mutex); 277 } 278 279 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 280 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 281 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 282 mutex_enter(&ibdm.ibdm_mutex); 283 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 284 mutex_exit(&ibdm.ibdm_mutex); 285 } 286 return (IBDM_SUCCESS); 287 } 288 289 290 static int 291 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info) 292 { 293 int ii, k, niocs; 294 size_t size; 295 ibdm_gid_t *delete, *head; 296 timeout_id_t timeout_id; 297 ibdm_ioc_info_t *ioc; 298 299 ASSERT(mutex_owned(&gid_info->gl_mutex)); 300 if (gid_info->gl_iou == NULL) { 301 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 302 return (0); 303 } 304 305 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 306 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 307 gid_info, niocs); 308 309 for (ii = 0; ii < niocs; ii++) { 310 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 311 312 /* handle the case where an ioc_timeout_id is scheduled */ 313 if (ioc->ioc_timeout_id) { 314 timeout_id = ioc->ioc_timeout_id; 315 mutex_exit(&gid_info->gl_mutex); 316 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 317 "ioc_timeout_id = 0x%x", timeout_id); 318 if (untimeout(timeout_id) == -1) { 319 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 320 "untimeout ioc_timeout_id failed"); 321 mutex_enter(&gid_info->gl_mutex); 322 return (-1); 323 } 324 mutex_enter(&gid_info->gl_mutex); 325 ioc->ioc_timeout_id = 0; 326 } 327 328 /* handle the case where an ioc_dc_timeout_id is scheduled */ 329 if (ioc->ioc_dc_timeout_id) { 330 timeout_id = ioc->ioc_dc_timeout_id; 331 mutex_exit(&gid_info->gl_mutex); 332 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 333 "ioc_dc_timeout_id = 0x%x", timeout_id); 334 if (untimeout(timeout_id) == -1) { 335 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 336 "untimeout ioc_dc_timeout_id failed"); 337 mutex_enter(&gid_info->gl_mutex); 338 return (-1); 339 } 340 mutex_enter(&gid_info->gl_mutex); 341 ioc->ioc_dc_timeout_id = 0; 342 } 343 344 /* handle the case where serv[k].se_timeout_id is scheduled */ 345 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 346 if (ioc->ioc_serv[k].se_timeout_id) { 347 timeout_id = ioc->ioc_serv[k].se_timeout_id; 348 mutex_exit(&gid_info->gl_mutex); 349 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 350 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 351 k, timeout_id); 352 if (untimeout(timeout_id) == -1) { 353 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 354 " untimeout se_timeout_id failed"); 355 mutex_enter(&gid_info->gl_mutex); 356 return (-1); 357 } 358 mutex_enter(&gid_info->gl_mutex); 359 ioc->ioc_serv[k].se_timeout_id = 0; 360 } 361 } 362 363 /* delete GID list */ 364 head = ioc->ioc_gid_list; 365 while (head) { 366 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 367 "Deleting gid_list struct %p", head); 368 delete = head; 369 head = head->gid_next; 370 kmem_free(delete, sizeof (ibdm_gid_t)); 371 } 372 ioc->ioc_gid_list = NULL; 373 374 /* delete ioc_serv */ 375 size = ioc->ioc_profile.ioc_service_entries * 376 sizeof (ibdm_srvents_info_t); 377 if (ioc->ioc_serv && size) { 378 kmem_free(ioc->ioc_serv, size); 379 ioc->ioc_serv = NULL; 380 } 381 } 382 383 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 384 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 385 kmem_free(gid_info->gl_iou, size); 386 gid_info->gl_iou = NULL; 387 return (0); 388 } 389 390 391 /* 392 * ibdm_fini(): 393 * Un-register with IBTF 394 * De allocate memory for the GID info 395 */ 396 static int 397 ibdm_fini() 398 { 399 int ii; 400 ibdm_hca_list_t *hca_list, *temp; 401 ibdm_dp_gidinfo_t *gid_info, *tmp; 402 ibdm_gid_t *head, *delete; 403 404 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 405 406 mutex_enter(&ibdm.ibdm_hl_mutex); 407 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 408 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 409 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 410 mutex_exit(&ibdm.ibdm_hl_mutex); 411 return (IBDM_FAILURE); 412 } 413 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 414 ibdm.ibdm_ibt_clnt_hdl = NULL; 415 } 416 417 hca_list = ibdm.ibdm_hca_list_head; 418 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 419 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 420 temp = hca_list; 421 hca_list = hca_list->hl_next; 422 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 423 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 424 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 425 "uninit_hca %p failed", temp); 426 mutex_exit(&ibdm.ibdm_hl_mutex); 427 return (IBDM_FAILURE); 428 } 429 } 430 mutex_exit(&ibdm.ibdm_hl_mutex); 431 432 mutex_enter(&ibdm.ibdm_mutex); 433 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 434 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 435 436 gid_info = ibdm.ibdm_dp_gidlist_head; 437 while (gid_info) { 438 mutex_enter(&gid_info->gl_mutex); 439 (void) ibdm_free_iou_info(gid_info); 440 mutex_exit(&gid_info->gl_mutex); 441 ibdm_delete_glhca_list(gid_info); 442 443 tmp = gid_info; 444 gid_info = gid_info->gl_next; 445 mutex_destroy(&tmp->gl_mutex); 446 head = tmp->gl_gid; 447 while (head) { 448 IBTF_DPRINTF_L4("ibdm", 449 "\tibdm_fini: Deleting gid structs"); 450 delete = head; 451 head = head->gid_next; 452 kmem_free(delete, sizeof (ibdm_gid_t)); 453 } 454 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 455 } 456 mutex_exit(&ibdm.ibdm_mutex); 457 458 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 459 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 460 mutex_destroy(&ibdm.ibdm_mutex); 461 mutex_destroy(&ibdm.ibdm_hl_mutex); 462 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 463 } 464 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 465 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 466 cv_destroy(&ibdm.ibdm_probe_cv); 467 cv_destroy(&ibdm.ibdm_busy_cv); 468 } 469 return (IBDM_SUCCESS); 470 } 471 472 473 /* 474 * ibdm_event_hdlr() 475 * 476 * IBDM registers this asynchronous event handler at the time of 477 * ibt_attach. IBDM support the following async events. For other 478 * event, simply returns success. 479 * IBT_HCA_ATTACH_EVENT: 480 * Retrieves the information about all the port that are 481 * present on this HCA, allocates the port attributes 482 * structure and calls IB nexus callback routine with 483 * the port attributes structure as an input argument. 484 * IBT_HCA_DETACH_EVENT: 485 * Retrieves the information about all the ports that are 486 * present on this HCA and calls IB nexus callback with 487 * port guid as an argument 488 * IBT_EVENT_PORT_UP: 489 * Register with IBMF and SA access 490 * Setup IBMF receive callback routine 491 * IBT_EVENT_PORT_DOWN: 492 * Un-Register with IBMF and SA access 493 * Teardown IBMF receive callback routine 494 */ 495 /*ARGSUSED*/ 496 static void 497 ibdm_event_hdlr(void *clnt_hdl, 498 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 499 { 500 ibdm_hca_list_t *hca_list; 501 ibdm_port_attr_t *port; 502 ibmf_saa_handle_t port_sa_hdl; 503 504 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 505 506 switch (code) { 507 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 508 ibdm_handle_hca_attach(event->ev_hca_guid); 509 break; 510 511 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 512 ibdm_handle_hca_detach(event->ev_hca_guid); 513 mutex_enter(&ibdm.ibdm_ibnex_mutex); 514 if (ibdm.ibdm_ibnex_callback != NULL) { 515 (*ibdm.ibdm_ibnex_callback)((void *) 516 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 517 } 518 mutex_exit(&ibdm.ibdm_ibnex_mutex); 519 break; 520 521 case IBT_EVENT_PORT_UP: 522 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 523 mutex_enter(&ibdm.ibdm_hl_mutex); 524 port = ibdm_get_port_attr(event, &hca_list); 525 if (port == NULL) { 526 IBTF_DPRINTF_L2("ibdm", 527 "\tevent_hdlr: HCA not present"); 528 mutex_exit(&ibdm.ibdm_hl_mutex); 529 break; 530 } 531 ibdm_initialize_port(port); 532 hca_list->hl_nports_active++; 533 mutex_exit(&ibdm.ibdm_hl_mutex); 534 break; 535 536 case IBT_ERROR_PORT_DOWN: 537 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 538 mutex_enter(&ibdm.ibdm_hl_mutex); 539 port = ibdm_get_port_attr(event, &hca_list); 540 if (port == NULL) { 541 IBTF_DPRINTF_L2("ibdm", 542 "\tevent_hdlr: HCA not present"); 543 mutex_exit(&ibdm.ibdm_hl_mutex); 544 break; 545 } 546 hca_list->hl_nports_active--; 547 port_sa_hdl = port->pa_sa_hdl; 548 (void) ibdm_fini_port(port); 549 mutex_exit(&ibdm.ibdm_hl_mutex); 550 ibdm_reset_all_dgids(port_sa_hdl); 551 break; 552 553 default: /* Ignore all other events/errors */ 554 break; 555 } 556 } 557 558 559 /* 560 * ibdm_initialize_port() 561 * Register with IBMF 562 * Register with SA access 563 * Register a receive callback routine with IBMF. IBMF invokes 564 * this routine whenever a MAD arrives at this port. 565 * Update the port attributes 566 */ 567 static void 568 ibdm_initialize_port(ibdm_port_attr_t *port) 569 { 570 int ii; 571 uint_t nports, size; 572 uint_t pkey_idx; 573 ib_pkey_t pkey; 574 ibt_hca_portinfo_t *pinfop; 575 ibmf_register_info_t ibmf_reg; 576 ibmf_saa_subnet_event_args_t event_args; 577 578 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 579 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 580 581 /* Check whether the port is active */ 582 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 583 NULL) != IBT_SUCCESS) 584 return; 585 586 if (port->pa_sa_hdl != NULL) 587 return; 588 589 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 590 &pinfop, &nports, &size) != IBT_SUCCESS) { 591 /* This should not occur */ 592 port->pa_npkeys = 0; 593 port->pa_pkey_tbl = NULL; 594 return; 595 } 596 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 597 598 port->pa_state = pinfop->p_linkstate; 599 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 600 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 601 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 602 603 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 604 port->pa_pkey_tbl[pkey_idx].pt_pkey = 605 pinfop->p_pkey_tbl[pkey_idx]; 606 607 ibt_free_portinfo(pinfop, size); 608 609 event_args.is_event_callback = ibdm_saa_event_cb; 610 event_args.is_event_callback_arg = port; 611 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 612 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 613 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 614 "sa access registration failed"); 615 return; 616 } 617 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 618 ibmf_reg.ir_port_num = port->pa_port_num; 619 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 620 621 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 622 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 623 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 624 "IBMF registration failed"); 625 (void) ibdm_fini_port(port); 626 return; 627 } 628 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 629 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 630 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 631 "IBMF setup recv cb failed"); 632 (void) ibdm_fini_port(port); 633 return; 634 } 635 636 for (ii = 0; ii < port->pa_npkeys; ii++) { 637 pkey = port->pa_pkey_tbl[ii].pt_pkey; 638 if (IBDM_INVALID_PKEY(pkey)) { 639 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 640 continue; 641 } 642 ibdm_port_attr_ibmf_init(port, pkey, ii); 643 } 644 } 645 646 647 /* 648 * ibdm_port_attr_ibmf_init: 649 * With IBMF - Alloc QP Handle and Setup Async callback 650 */ 651 static void 652 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 653 { 654 int ret; 655 656 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 657 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 658 IBMF_SUCCESS) { 659 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 660 "IBMF failed to alloc qp %d", ret); 661 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 662 return; 663 } 664 665 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 666 port->pa_ibmf_hdl); 667 668 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 669 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 670 IBMF_SUCCESS) { 671 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 672 "IBMF setup recv cb failed %d", ret); 673 (void) ibmf_free_qp(port->pa_ibmf_hdl, 674 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 675 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 676 } 677 } 678 679 680 /* 681 * ibdm_get_port_attr() 682 * Get port attributes from HCA guid and port number 683 * Return pointer to ibdm_port_attr_t on Success 684 * and NULL on failure 685 */ 686 static ibdm_port_attr_t * 687 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 688 { 689 ibdm_hca_list_t *hca_list; 690 ibdm_port_attr_t *port_attr; 691 int ii; 692 693 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 694 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 695 hca_list = ibdm.ibdm_hca_list_head; 696 while (hca_list) { 697 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 698 for (ii = 0; ii < hca_list->hl_nports; ii++) { 699 port_attr = &hca_list->hl_port_attr[ii]; 700 if (port_attr->pa_port_num == event->ev_port) { 701 *retval = hca_list; 702 return (port_attr); 703 } 704 } 705 } 706 hca_list = hca_list->hl_next; 707 } 708 return (NULL); 709 } 710 711 712 /* 713 * ibdm_update_port_attr() 714 * Update the port attributes 715 */ 716 static void 717 ibdm_update_port_attr(ibdm_port_attr_t *port) 718 { 719 uint_t nports, size; 720 uint_t pkey_idx; 721 ibt_hca_portinfo_t *portinfop; 722 723 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 724 if (ibt_query_hca_ports(port->pa_hca_hdl, 725 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 726 /* This should not occur */ 727 port->pa_npkeys = 0; 728 port->pa_pkey_tbl = NULL; 729 return; 730 } 731 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 732 733 port->pa_state = portinfop->p_linkstate; 734 735 /* 736 * PKey information in portinfo valid only if port is 737 * ACTIVE. Bail out if not. 738 */ 739 if (port->pa_state != IBT_PORT_ACTIVE) { 740 port->pa_npkeys = 0; 741 port->pa_pkey_tbl = NULL; 742 ibt_free_portinfo(portinfop, size); 743 return; 744 } 745 746 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 747 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 748 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 749 750 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 751 port->pa_pkey_tbl[pkey_idx].pt_pkey = 752 portinfop->p_pkey_tbl[pkey_idx]; 753 } 754 ibt_free_portinfo(portinfop, size); 755 } 756 757 758 /* 759 * ibdm_handle_hca_attach() 760 */ 761 static void 762 ibdm_handle_hca_attach(ib_guid_t hca_guid) 763 { 764 uint_t size; 765 uint_t ii, nports; 766 ibt_status_t status; 767 ibt_hca_hdl_t hca_hdl; 768 ibt_hca_attr_t *hca_attr; 769 ibdm_hca_list_t *hca_list, *temp; 770 ibdm_port_attr_t *port_attr; 771 ibt_hca_portinfo_t *portinfop; 772 773 IBTF_DPRINTF_L4("ibdm", 774 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 775 776 /* open the HCA first */ 777 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 778 &hca_hdl)) != IBT_SUCCESS) { 779 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 780 "open_hca failed, status 0x%x", status); 781 return; 782 } 783 784 hca_attr = (ibt_hca_attr_t *) 785 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 786 /* ibt_query_hca always returns IBT_SUCCESS */ 787 (void) ibt_query_hca(hca_hdl, hca_attr); 788 789 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 790 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 791 hca_attr->hca_version_id, hca_attr->hca_nports); 792 793 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 794 &size)) != IBT_SUCCESS) { 795 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 796 "ibt_query_hca_ports failed, status 0x%x", status); 797 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 798 (void) ibt_close_hca(hca_hdl); 799 return; 800 } 801 hca_list = (ibdm_hca_list_t *) 802 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 803 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 804 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 805 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 806 hca_list->hl_nports = hca_attr->hca_nports; 807 hca_list->hl_attach_time = ddi_get_time(); 808 hca_list->hl_hca_hdl = hca_hdl; 809 810 /* 811 * Init a dummy port attribute for the HCA node 812 * This is for Per-HCA Node. Initialize port_attr : 813 * hca_guid & port_guid -> hca_guid 814 * npkeys, pkey_tbl is NULL 815 * port_num, sn_prefix is 0 816 * vendorid, product_id, dev_version from HCA 817 * pa_state is IBT_PORT_ACTIVE 818 */ 819 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 820 sizeof (ibdm_port_attr_t), KM_SLEEP); 821 port_attr = hca_list->hl_hca_port_attr; 822 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 823 port_attr->pa_productid = hca_attr->hca_device_id; 824 port_attr->pa_dev_version = hca_attr->hca_version_id; 825 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 826 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 827 port_attr->pa_port_guid = hca_attr->hca_node_guid; 828 port_attr->pa_state = IBT_PORT_ACTIVE; 829 830 831 for (ii = 0; ii < nports; ii++) { 832 port_attr = &hca_list->hl_port_attr[ii]; 833 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 834 port_attr->pa_productid = hca_attr->hca_device_id; 835 port_attr->pa_dev_version = hca_attr->hca_version_id; 836 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 837 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 838 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 839 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 840 port_attr->pa_port_num = portinfop[ii].p_port_num; 841 port_attr->pa_state = portinfop[ii].p_linkstate; 842 843 /* 844 * Register with IBMF, SA access when the port is in 845 * ACTIVE state. Also register a callback routine 846 * with IBMF to receive incoming DM MAD's. 847 * The IBDM event handler takes care of registration of 848 * port which are not active. 849 */ 850 IBTF_DPRINTF_L4("ibdm", 851 "\thandle_hca_attach: port guid %llx Port state 0x%x", 852 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 853 854 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 855 mutex_enter(&ibdm.ibdm_hl_mutex); 856 hca_list->hl_nports_active++; 857 ibdm_initialize_port(port_attr); 858 mutex_exit(&ibdm.ibdm_hl_mutex); 859 } 860 } 861 mutex_enter(&ibdm.ibdm_hl_mutex); 862 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 863 if (temp->hl_hca_guid == hca_guid) { 864 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 865 "already seen by IBDM", hca_guid); 866 mutex_exit(&ibdm.ibdm_hl_mutex); 867 (void) ibdm_uninit_hca(hca_list); 868 return; 869 } 870 } 871 ibdm.ibdm_hca_count++; 872 if (ibdm.ibdm_hca_list_head == NULL) { 873 ibdm.ibdm_hca_list_head = hca_list; 874 ibdm.ibdm_hca_list_tail = hca_list; 875 } else { 876 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 877 ibdm.ibdm_hca_list_tail = hca_list; 878 } 879 mutex_exit(&ibdm.ibdm_hl_mutex); 880 mutex_enter(&ibdm.ibdm_ibnex_mutex); 881 if (ibdm.ibdm_ibnex_callback != NULL) { 882 (*ibdm.ibdm_ibnex_callback)((void *) 883 &hca_guid, IBDM_EVENT_HCA_ADDED); 884 } 885 mutex_exit(&ibdm.ibdm_ibnex_mutex); 886 887 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 888 ibt_free_portinfo(portinfop, size); 889 } 890 891 892 /* 893 * ibdm_handle_hca_detach() 894 */ 895 static void 896 ibdm_handle_hca_detach(ib_guid_t hca_guid) 897 { 898 ibdm_hca_list_t *head, *prev = NULL; 899 size_t len; 900 ibdm_dp_gidinfo_t *gidinfo; 901 902 IBTF_DPRINTF_L4("ibdm", 903 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 904 905 /* Make sure no probes are running */ 906 mutex_enter(&ibdm.ibdm_mutex); 907 while (ibdm.ibdm_busy & IBDM_BUSY) 908 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 909 ibdm.ibdm_busy |= IBDM_BUSY; 910 mutex_exit(&ibdm.ibdm_mutex); 911 912 mutex_enter(&ibdm.ibdm_hl_mutex); 913 head = ibdm.ibdm_hca_list_head; 914 while (head) { 915 if (head->hl_hca_guid == hca_guid) { 916 if (prev == NULL) 917 ibdm.ibdm_hca_list_head = head->hl_next; 918 else 919 prev->hl_next = head->hl_next; 920 ibdm.ibdm_hca_count--; 921 break; 922 } 923 prev = head; 924 head = head->hl_next; 925 } 926 mutex_exit(&ibdm.ibdm_hl_mutex); 927 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 928 (void) ibdm_handle_hca_attach(hca_guid); 929 930 /* 931 * Now clean up the HCA lists in the gidlist. 932 */ 933 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 934 gidinfo->gl_next) { 935 prev = NULL; 936 head = gidinfo->gl_hca_list; 937 while (head) { 938 if (head->hl_hca_guid == hca_guid) { 939 if (prev == NULL) 940 gidinfo->gl_hca_list = 941 head->hl_next; 942 else 943 prev->hl_next = head->hl_next; 944 945 len = sizeof (ibdm_hca_list_t) + 946 (head->hl_nports * 947 sizeof (ibdm_port_attr_t)); 948 kmem_free(head, len); 949 950 break; 951 } 952 prev = head; 953 head = head->hl_next; 954 } 955 } 956 957 mutex_enter(&ibdm.ibdm_mutex); 958 ibdm.ibdm_busy &= ~IBDM_BUSY; 959 cv_broadcast(&ibdm.ibdm_busy_cv); 960 mutex_exit(&ibdm.ibdm_mutex); 961 } 962 963 964 static int 965 ibdm_uninit_hca(ibdm_hca_list_t *head) 966 { 967 int ii; 968 ibdm_port_attr_t *port_attr; 969 970 for (ii = 0; ii < head->hl_nports; ii++) { 971 port_attr = &head->hl_port_attr[ii]; 972 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 973 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 974 "ibdm_fini_port() failed", head, ii); 975 return (IBDM_FAILURE); 976 } 977 } 978 if (head->hl_hca_hdl) 979 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) 980 return (IBDM_FAILURE); 981 kmem_free(head->hl_port_attr, 982 head->hl_nports * sizeof (ibdm_port_attr_t)); 983 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 984 kmem_free(head, sizeof (ibdm_hca_list_t)); 985 return (IBDM_SUCCESS); 986 } 987 988 989 /* 990 * For each port on the HCA, 991 * 1) Teardown IBMF receive callback function 992 * 2) Unregister with IBMF 993 * 3) Unregister with SA access 994 */ 995 static int 996 ibdm_fini_port(ibdm_port_attr_t *port_attr) 997 { 998 int ii, ibmf_status; 999 1000 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1001 if (port_attr->pa_pkey_tbl == NULL) 1002 break; 1003 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1004 continue; 1005 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1006 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1007 "ibdm_port_attr_ibmf_fini failed for " 1008 "port pkey 0x%x", ii); 1009 return (IBDM_FAILURE); 1010 } 1011 } 1012 1013 if (port_attr->pa_ibmf_hdl) { 1014 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1015 IBMF_QP_HANDLE_DEFAULT, 0); 1016 if (ibmf_status != IBMF_SUCCESS) { 1017 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1018 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1019 return (IBDM_FAILURE); 1020 } 1021 1022 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1023 if (ibmf_status != IBMF_SUCCESS) { 1024 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1025 "ibmf_unregister failed %d", ibmf_status); 1026 return (IBDM_FAILURE); 1027 } 1028 1029 port_attr->pa_ibmf_hdl = NULL; 1030 } 1031 1032 if (port_attr->pa_sa_hdl) { 1033 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1034 if (ibmf_status != IBMF_SUCCESS) { 1035 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1036 "ibmf_sa_session_close failed %d", ibmf_status); 1037 return (IBDM_FAILURE); 1038 } 1039 port_attr->pa_sa_hdl = NULL; 1040 } 1041 1042 if (port_attr->pa_pkey_tbl != NULL) { 1043 kmem_free(port_attr->pa_pkey_tbl, 1044 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1045 port_attr->pa_pkey_tbl = NULL; 1046 port_attr->pa_npkeys = 0; 1047 } 1048 1049 return (IBDM_SUCCESS); 1050 } 1051 1052 1053 /* 1054 * ibdm_port_attr_ibmf_fini: 1055 * With IBMF - Tear down Async callback and free QP Handle 1056 */ 1057 static int 1058 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1059 { 1060 int ibmf_status; 1061 1062 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1063 1064 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1065 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1066 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1067 if (ibmf_status != IBMF_SUCCESS) { 1068 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1069 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1070 return (IBDM_FAILURE); 1071 } 1072 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1073 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1074 if (ibmf_status != IBMF_SUCCESS) { 1075 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1076 "ibmf_free_qp failed %d", ibmf_status); 1077 return (IBDM_FAILURE); 1078 } 1079 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1080 } 1081 return (IBDM_SUCCESS); 1082 } 1083 1084 1085 /* 1086 * ibdm_gid_decr_pending: 1087 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1088 */ 1089 static void 1090 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1091 { 1092 mutex_enter(&ibdm.ibdm_mutex); 1093 mutex_enter(&gidinfo->gl_mutex); 1094 if (--gidinfo->gl_pending_cmds == 0) { 1095 /* 1096 * Handle DGID getting removed. 1097 */ 1098 if (gidinfo->gl_disconnected) { 1099 mutex_exit(&gidinfo->gl_mutex); 1100 mutex_exit(&ibdm.ibdm_mutex); 1101 1102 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1103 "gidinfo %p hot removal", gidinfo); 1104 ibdm_delete_gidinfo(gidinfo); 1105 1106 mutex_enter(&ibdm.ibdm_mutex); 1107 ibdm.ibdm_ngid_probes_in_progress--; 1108 ibdm_wait_probe_completion(); 1109 mutex_exit(&ibdm.ibdm_mutex); 1110 return; 1111 } 1112 mutex_exit(&gidinfo->gl_mutex); 1113 mutex_exit(&ibdm.ibdm_mutex); 1114 ibdm_notify_newgid_iocs(gidinfo); 1115 mutex_enter(&ibdm.ibdm_mutex); 1116 mutex_enter(&gidinfo->gl_mutex); 1117 1118 ibdm.ibdm_ngid_probes_in_progress--; 1119 ibdm_wait_probe_completion(); 1120 } 1121 mutex_exit(&gidinfo->gl_mutex); 1122 mutex_exit(&ibdm.ibdm_mutex); 1123 } 1124 1125 1126 /* 1127 * ibdm_wait_probe_completion: 1128 * wait for probing to complete 1129 */ 1130 static void 1131 ibdm_wait_probe_completion(void) 1132 { 1133 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1134 if (ibdm.ibdm_ngid_probes_in_progress) { 1135 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1136 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1137 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1138 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1139 } 1140 } 1141 1142 1143 /* 1144 * ibdm_wakeup_probe_gid_cv: 1145 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1146 */ 1147 static void 1148 ibdm_wakeup_probe_gid_cv(void) 1149 { 1150 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1151 if (!ibdm.ibdm_ngid_probes_in_progress) { 1152 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1153 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1154 cv_broadcast(&ibdm.ibdm_probe_cv); 1155 } 1156 1157 } 1158 1159 1160 /* 1161 * ibdm_sweep_fabric(reprobe_flag) 1162 * Find all possible Managed IOU's and their IOC's that are visible 1163 * to the host. The algorithm used is as follows 1164 * 1165 * Send a "bus walk" request for each port on the host HCA to SA access 1166 * SA returns complete set of GID's that are reachable from 1167 * source port. This is done in parallel. 1168 * 1169 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1170 * 1171 * Sort the GID list and eliminate duplicate GID's 1172 * 1) Use DGID for sorting 1173 * 2) use PortGuid for sorting 1174 * Send SA query to retrieve NodeRecord and 1175 * extract PortGuid from that. 1176 * 1177 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1178 * support DM MAD's 1179 * Send a "Portinfo" query to get the port capabilities and 1180 * then check for DM MAD's support 1181 * 1182 * Send "ClassPortInfo" request for all the GID's in parallel, 1183 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1184 * cv_signal to complete. 1185 * 1186 * When DM agent on the remote GID sends back the response, IBMF 1187 * invokes DM callback routine. 1188 * 1189 * If the response is proper, send "IOUnitInfo" request and set 1190 * GID state to IBDM_GET_IOUNITINFO. 1191 * 1192 * If the response is proper, send "IocProfileInfo" request to 1193 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1194 * 1195 * Send request to get Service entries simultaneously 1196 * 1197 * Signal the waiting thread when received response for all the commands. 1198 * 1199 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1200 * response during the probing period. 1201 * 1202 * Note: 1203 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1204 * keep track of number commands in progress at any point of time. 1205 * MAD transaction ID is used to identify a particular GID 1206 * TBD: Consider registering the IBMF receive callback on demand 1207 * 1208 * Note: This routine must be called with ibdm.ibdm_mutex held 1209 * TBD: Re probe the failure GID (for certain failures) when requested 1210 * for fabric sweep next time 1211 * 1212 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1213 */ 1214 static void 1215 ibdm_sweep_fabric(int reprobe_flag) 1216 { 1217 int ii; 1218 int new_paths = 0; 1219 uint8_t niocs; 1220 taskqid_t tid; 1221 ibdm_ioc_info_t *ioc; 1222 ibdm_hca_list_t *hca_list = NULL; 1223 ibdm_port_attr_t *port = NULL; 1224 ibdm_dp_gidinfo_t *gid_info; 1225 1226 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1227 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1228 1229 /* 1230 * Check whether a sweep already in progress. If so, just 1231 * wait for the fabric sweep to complete 1232 */ 1233 while (ibdm.ibdm_busy & IBDM_BUSY) 1234 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1235 ibdm.ibdm_busy |= IBDM_BUSY; 1236 mutex_exit(&ibdm.ibdm_mutex); 1237 1238 ibdm_dump_sweep_fabric_timestamp(0); 1239 1240 /* Rescan the GID list for any removed GIDs for reprobe */ 1241 if (reprobe_flag) 1242 ibdm_rescan_gidlist(NULL); 1243 1244 /* 1245 * Get list of all the ports reachable from the local known HCA 1246 * ports which are active 1247 */ 1248 mutex_enter(&ibdm.ibdm_hl_mutex); 1249 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1250 ibdm_get_next_port(&hca_list, &port, 1)) { 1251 /* 1252 * Get PATHS to all the reachable ports from 1253 * SGID and update the global ibdm structure. 1254 */ 1255 new_paths = ibdm_get_reachable_ports(port, hca_list); 1256 ibdm.ibdm_ngids += new_paths; 1257 } 1258 mutex_exit(&ibdm.ibdm_hl_mutex); 1259 1260 mutex_enter(&ibdm.ibdm_mutex); 1261 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1262 mutex_exit(&ibdm.ibdm_mutex); 1263 1264 /* Send a request to probe GIDs asynchronously. */ 1265 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1266 gid_info = gid_info->gl_next) { 1267 mutex_enter(&gid_info->gl_mutex); 1268 gid_info->gl_reprobe_flag = reprobe_flag; 1269 mutex_exit(&gid_info->gl_mutex); 1270 1271 /* process newly encountered GIDs */ 1272 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1273 (void *)gid_info, TQ_NOSLEEP); 1274 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1275 " taskq_id = %x", gid_info, tid); 1276 /* taskq failed to dispatch call it directly */ 1277 if (tid == NULL) 1278 ibdm_probe_gid_thread((void *)gid_info); 1279 } 1280 1281 mutex_enter(&ibdm.ibdm_mutex); 1282 ibdm_wait_probe_completion(); 1283 1284 /* 1285 * Update the properties, if reprobe_flag is set 1286 * Skip if gl_reprobe_flag is set, this will be 1287 * a re-inserted / new GID, for which notifications 1288 * have already been send. 1289 */ 1290 if (reprobe_flag) { 1291 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1292 gid_info = gid_info->gl_next) { 1293 if (gid_info->gl_iou == NULL) 1294 continue; 1295 if (gid_info->gl_reprobe_flag) { 1296 gid_info->gl_reprobe_flag = 0; 1297 continue; 1298 } 1299 1300 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1301 for (ii = 0; ii < niocs; ii++) { 1302 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1303 if (ioc) 1304 ibdm_reprobe_update_port_srv(ioc, 1305 gid_info); 1306 } 1307 } 1308 } 1309 ibdm_dump_sweep_fabric_timestamp(1); 1310 1311 ibdm.ibdm_busy &= ~IBDM_BUSY; 1312 cv_broadcast(&ibdm.ibdm_busy_cv); 1313 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1314 } 1315 1316 1317 /* 1318 * ibdm_probe_gid_thread: 1319 * thread that does the actual work for sweeping the fabric 1320 * for a given GID 1321 */ 1322 static void 1323 ibdm_probe_gid_thread(void *args) 1324 { 1325 int reprobe_flag; 1326 ib_guid_t node_guid; 1327 ib_guid_t port_guid; 1328 ibdm_dp_gidinfo_t *gid_info; 1329 1330 gid_info = (ibdm_dp_gidinfo_t *)args; 1331 reprobe_flag = gid_info->gl_reprobe_flag; 1332 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1333 gid_info, reprobe_flag); 1334 ASSERT(gid_info != NULL); 1335 ASSERT(gid_info->gl_pending_cmds == 0); 1336 1337 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1338 reprobe_flag == 0) { 1339 /* 1340 * This GID may have been already probed. Send 1341 * in a CLP to check if IOUnitInfo changed? 1342 * Explicitly set gl_reprobe_flag to 0 so that 1343 * IBnex is not notified on completion 1344 */ 1345 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1346 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1347 "get new IOCs information"); 1348 mutex_enter(&gid_info->gl_mutex); 1349 gid_info->gl_pending_cmds++; 1350 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1351 gid_info->gl_reprobe_flag = 0; 1352 mutex_exit(&gid_info->gl_mutex); 1353 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1354 mutex_enter(&gid_info->gl_mutex); 1355 gid_info->gl_pending_cmds = 0; 1356 mutex_exit(&gid_info->gl_mutex); 1357 mutex_enter(&ibdm.ibdm_mutex); 1358 --ibdm.ibdm_ngid_probes_in_progress; 1359 ibdm_wakeup_probe_gid_cv(); 1360 mutex_exit(&ibdm.ibdm_mutex); 1361 } 1362 } else { 1363 mutex_enter(&ibdm.ibdm_mutex); 1364 --ibdm.ibdm_ngid_probes_in_progress; 1365 ibdm_wakeup_probe_gid_cv(); 1366 mutex_exit(&ibdm.ibdm_mutex); 1367 } 1368 return; 1369 } else if (reprobe_flag && gid_info->gl_state == 1370 IBDM_GID_PROBING_COMPLETE) { 1371 /* 1372 * Reprobe all IOCs for the GID which has completed 1373 * probe. Skip other port GIDs to same IOU. 1374 * Explicitly set gl_reprobe_flag to 0 so that 1375 * IBnex is not notified on completion 1376 */ 1377 ibdm_ioc_info_t *ioc_info; 1378 uint8_t niocs, ii; 1379 1380 ASSERT(gid_info->gl_iou); 1381 mutex_enter(&gid_info->gl_mutex); 1382 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1383 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1384 gid_info->gl_pending_cmds += niocs; 1385 gid_info->gl_reprobe_flag = 0; 1386 mutex_exit(&gid_info->gl_mutex); 1387 for (ii = 0; ii < niocs; ii++) { 1388 uchar_t slot_info; 1389 ib_dm_io_unitinfo_t *giou_info; 1390 1391 /* 1392 * Check whether IOC is present in the slot 1393 * Series of nibbles (in the field 1394 * iou_ctrl_list) represents a slot in the 1395 * IOU. 1396 * Byte format: 76543210 1397 * Bits 0-3 of first byte represent Slot 2 1398 * bits 4-7 of first byte represent slot 1, 1399 * bits 0-3 of second byte represent slot 4 1400 * and so on 1401 * Each 4-bit nibble has the following meaning 1402 * 0x0 : IOC not installed 1403 * 0x1 : IOC is present 1404 * 0xf : Slot does not exist 1405 * and all other values are reserved. 1406 */ 1407 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1408 giou_info = &gid_info->gl_iou->iou_info; 1409 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1410 if ((ii % 2) == 0) 1411 slot_info = (slot_info >> 4); 1412 1413 if ((slot_info & 0xf) != 1) { 1414 ioc_info->ioc_state = 1415 IBDM_IOC_STATE_PROBE_FAILED; 1416 ibdm_gid_decr_pending(gid_info); 1417 continue; 1418 } 1419 1420 if (ibdm_send_ioc_profile(gid_info, ii) != 1421 IBDM_SUCCESS) { 1422 ibdm_gid_decr_pending(gid_info); 1423 } 1424 } 1425 1426 return; 1427 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1428 mutex_enter(&ibdm.ibdm_mutex); 1429 --ibdm.ibdm_ngid_probes_in_progress; 1430 ibdm_wakeup_probe_gid_cv(); 1431 mutex_exit(&ibdm.ibdm_mutex); 1432 return; 1433 } 1434 1435 mutex_enter(&gid_info->gl_mutex); 1436 gid_info->gl_pending_cmds++; 1437 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1438 mutex_exit(&gid_info->gl_mutex); 1439 1440 /* 1441 * Check whether the destination GID supports DM agents. If 1442 * not, stop probing the GID and continue with the next GID 1443 * in the list. 1444 */ 1445 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1446 mutex_enter(&gid_info->gl_mutex); 1447 gid_info->gl_pending_cmds = 0; 1448 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1449 mutex_exit(&gid_info->gl_mutex); 1450 ibdm_delete_glhca_list(gid_info); 1451 mutex_enter(&ibdm.ibdm_mutex); 1452 --ibdm.ibdm_ngid_probes_in_progress; 1453 ibdm_wakeup_probe_gid_cv(); 1454 mutex_exit(&ibdm.ibdm_mutex); 1455 return; 1456 } 1457 1458 /* Get the nodeguid and portguid of the port */ 1459 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1460 &node_guid, &port_guid) != IBDM_SUCCESS) { 1461 mutex_enter(&gid_info->gl_mutex); 1462 gid_info->gl_pending_cmds = 0; 1463 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1464 mutex_exit(&gid_info->gl_mutex); 1465 ibdm_delete_glhca_list(gid_info); 1466 mutex_enter(&ibdm.ibdm_mutex); 1467 --ibdm.ibdm_ngid_probes_in_progress; 1468 ibdm_wakeup_probe_gid_cv(); 1469 mutex_exit(&ibdm.ibdm_mutex); 1470 return; 1471 } 1472 1473 /* 1474 * Check whether we already knew about this NodeGuid 1475 * If so, do not probe the GID and continue with the 1476 * next GID in the gid list. Set the GID state to 1477 * probing done. 1478 */ 1479 mutex_enter(&ibdm.ibdm_mutex); 1480 gid_info->gl_nodeguid = node_guid; 1481 gid_info->gl_portguid = port_guid; 1482 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1483 mutex_exit(&ibdm.ibdm_mutex); 1484 mutex_enter(&gid_info->gl_mutex); 1485 gid_info->gl_pending_cmds = 0; 1486 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1487 mutex_exit(&gid_info->gl_mutex); 1488 ibdm_delete_glhca_list(gid_info); 1489 mutex_enter(&ibdm.ibdm_mutex); 1490 --ibdm.ibdm_ngid_probes_in_progress; 1491 ibdm_wakeup_probe_gid_cv(); 1492 mutex_exit(&ibdm.ibdm_mutex); 1493 return; 1494 } 1495 ibdm_add_to_gl_gid(gid_info, gid_info); 1496 mutex_exit(&ibdm.ibdm_mutex); 1497 1498 /* 1499 * New or reinserted GID : Enable notification to IBnex 1500 */ 1501 mutex_enter(&gid_info->gl_mutex); 1502 gid_info->gl_reprobe_flag = 1; 1503 mutex_exit(&gid_info->gl_mutex); 1504 1505 /* 1506 * Send ClassPortInfo request to the GID asynchronously. 1507 */ 1508 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1509 mutex_enter(&gid_info->gl_mutex); 1510 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1511 gid_info->gl_pending_cmds = 0; 1512 mutex_exit(&gid_info->gl_mutex); 1513 ibdm_delete_glhca_list(gid_info); 1514 mutex_enter(&ibdm.ibdm_mutex); 1515 --ibdm.ibdm_ngid_probes_in_progress; 1516 ibdm_wakeup_probe_gid_cv(); 1517 mutex_exit(&ibdm.ibdm_mutex); 1518 return; 1519 } 1520 } 1521 1522 1523 /* 1524 * ibdm_check_dest_nodeguid 1525 * Searches for the NodeGuid in the GID list 1526 * Returns matching gid_info if found and otherwise NULL 1527 * 1528 * This function is called to handle new GIDs discovered 1529 * during device sweep / probe or for GID_AVAILABLE event. 1530 * 1531 * Parameter : 1532 * gid_info GID to check 1533 */ 1534 static ibdm_dp_gidinfo_t * 1535 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1536 { 1537 ibdm_dp_gidinfo_t *gid_list; 1538 ibdm_gid_t *tmp; 1539 1540 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1541 1542 gid_list = ibdm.ibdm_dp_gidlist_head; 1543 while (gid_list) { 1544 if ((gid_list != gid_info) && 1545 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1546 IBTF_DPRINTF_L4("ibdm", 1547 "\tcheck_dest_nodeguid: NodeGuid is present"); 1548 1549 /* Add to gid_list */ 1550 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1551 KM_SLEEP); 1552 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1553 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1554 tmp->gid_next = gid_list->gl_gid; 1555 gid_list->gl_gid = tmp; 1556 gid_list->gl_ngids++; 1557 return (gid_list); 1558 } 1559 1560 gid_list = gid_list->gl_next; 1561 } 1562 1563 return (NULL); 1564 } 1565 1566 1567 /* 1568 * ibdm_is_dev_mgt_supported 1569 * Get the PortInfo attribute (SA Query) 1570 * Check "CompatabilityMask" field in the Portinfo. 1571 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1572 * by the port, otherwise IBDM_FAILURE 1573 */ 1574 static int 1575 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1576 { 1577 int ret; 1578 size_t length = 0; 1579 sa_portinfo_record_t req, *resp = NULL; 1580 ibmf_saa_access_args_t qargs; 1581 1582 bzero(&req, sizeof (sa_portinfo_record_t)); 1583 req.EndportLID = gid_info->gl_dlid; 1584 1585 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1586 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1587 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1588 qargs.sq_template = &req; 1589 qargs.sq_callback = NULL; 1590 qargs.sq_callback_arg = NULL; 1591 1592 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1593 &qargs, 0, &length, (void **)&resp); 1594 1595 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1596 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1597 "failed to get PORTINFO attribute %d", ret); 1598 return (IBDM_FAILURE); 1599 } 1600 1601 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1602 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1603 ret = IBDM_SUCCESS; 1604 } else { 1605 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1606 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1607 ret = IBDM_FAILURE; 1608 } 1609 kmem_free(resp, length); 1610 return (ret); 1611 } 1612 1613 1614 /* 1615 * ibdm_get_node_port_guids() 1616 * Get the NodeInfoRecord of the port 1617 * Save NodeGuid and PortGUID values in the GID list structure. 1618 * Return IBDM_SUCCESS/IBDM_FAILURE 1619 */ 1620 static int 1621 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1622 ib_guid_t *node_guid, ib_guid_t *port_guid) 1623 { 1624 int ret; 1625 size_t length = 0; 1626 sa_node_record_t req, *resp = NULL; 1627 ibmf_saa_access_args_t qargs; 1628 1629 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1630 1631 bzero(&req, sizeof (sa_node_record_t)); 1632 req.LID = dlid; 1633 1634 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1635 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1636 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1637 qargs.sq_template = &req; 1638 qargs.sq_callback = NULL; 1639 qargs.sq_callback_arg = NULL; 1640 1641 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1642 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1643 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1644 " SA Retrieve Failed: %d", ret); 1645 return (IBDM_FAILURE); 1646 } 1647 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1648 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1649 1650 *node_guid = resp->NodeInfo.NodeGUID; 1651 *port_guid = resp->NodeInfo.PortGUID; 1652 kmem_free(resp, length); 1653 return (IBDM_SUCCESS); 1654 } 1655 1656 1657 /* 1658 * ibdm_get_reachable_ports() 1659 * Get list of the destination GID (and its path records) by 1660 * querying the SA access. 1661 * 1662 * Returns Number paths 1663 */ 1664 static int 1665 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1666 { 1667 uint_t ii, jj, nrecs; 1668 uint_t npaths = 0; 1669 size_t length; 1670 ib_gid_t sgid; 1671 ibdm_pkey_tbl_t *pkey_tbl; 1672 sa_path_record_t *result; 1673 sa_path_record_t *precp; 1674 ibdm_dp_gidinfo_t *gid_info; 1675 1676 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1677 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1678 1679 sgid.gid_prefix = portinfo->pa_sn_prefix; 1680 sgid.gid_guid = portinfo->pa_port_guid; 1681 1682 /* get reversible paths */ 1683 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1684 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1685 != IBMF_SUCCESS) { 1686 IBTF_DPRINTF_L2("ibdm", 1687 "\tget_reachable_ports: Getting path records failed"); 1688 return (0); 1689 } 1690 1691 for (ii = 0; ii < nrecs; ii++) { 1692 precp = &result[ii]; 1693 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1694 precp->DGID.gid_prefix)) != NULL) { 1695 IBTF_DPRINTF_L2("ibdm", "\tget_reachable_ports: " 1696 "Already exists nrecs %d, ii %d", nrecs, ii); 1697 ibdm_addto_glhcalist(gid_info, hca); 1698 continue; 1699 } 1700 /* 1701 * This is a new GID. Allocate a GID structure and 1702 * initialize the structure 1703 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 1704 * by kmem_zalloc call 1705 */ 1706 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 1707 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 1708 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 1709 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 1710 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 1711 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 1712 gid_info->gl_p_key = precp->P_Key; 1713 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 1714 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 1715 gid_info->gl_slid = precp->SLID; 1716 gid_info->gl_dlid = precp->DLID; 1717 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 1718 << IBDM_GID_TRANSACTIONID_SHIFT; 1719 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 1720 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 1721 << IBDM_GID_TRANSACTIONID_SHIFT; 1722 ibdm_addto_glhcalist(gid_info, hca); 1723 1724 ibdm_dump_path_info(precp); 1725 1726 gid_info->gl_qp_hdl = NULL; 1727 ASSERT(portinfo->pa_pkey_tbl != NULL && 1728 portinfo->pa_npkeys != 0); 1729 1730 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 1731 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 1732 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 1733 (pkey_tbl->pt_qp_hdl != NULL)) { 1734 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 1735 break; 1736 } 1737 } 1738 1739 /* 1740 * QP handle for GID not initialized. No matching Pkey 1741 * was found!! ibdm should *not* hit this case. Flag an 1742 * error and drop the GID if ibdm does encounter this. 1743 */ 1744 if (gid_info->gl_qp_hdl == NULL) { 1745 IBTF_DPRINTF_L2(ibdm_string, 1746 "\tget_reachable_ports: No matching Pkey"); 1747 ibdm_delete_gidinfo(gid_info); 1748 continue; 1749 } 1750 if (ibdm.ibdm_dp_gidlist_head == NULL) { 1751 ibdm.ibdm_dp_gidlist_head = gid_info; 1752 ibdm.ibdm_dp_gidlist_tail = gid_info; 1753 } else { 1754 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 1755 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 1756 ibdm.ibdm_dp_gidlist_tail = gid_info; 1757 } 1758 npaths++; 1759 } 1760 kmem_free(result, length); 1761 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 1762 return (npaths); 1763 } 1764 1765 1766 /* 1767 * ibdm_check_dgid() 1768 * Look in the global list to check whether we know this DGID already 1769 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 1770 */ 1771 static ibdm_dp_gidinfo_t * 1772 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 1773 { 1774 ibdm_dp_gidinfo_t *gid_list; 1775 1776 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1777 gid_list = gid_list->gl_next) { 1778 if ((guid == gid_list->gl_dgid_lo) && 1779 (prefix == gid_list->gl_dgid_hi)) { 1780 break; 1781 } 1782 } 1783 return (gid_list); 1784 } 1785 1786 1787 /* 1788 * ibdm_find_gid() 1789 * Look in the global list to find a GID entry with matching 1790 * port & node GUID. 1791 * Return pointer to gidinfo if found, else return NULL 1792 */ 1793 static ibdm_dp_gidinfo_t * 1794 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 1795 { 1796 ibdm_dp_gidinfo_t *gid_list; 1797 1798 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 1799 nodeguid, portguid); 1800 1801 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1802 gid_list = gid_list->gl_next) { 1803 if ((portguid == gid_list->gl_portguid) && 1804 (nodeguid == gid_list->gl_nodeguid)) { 1805 break; 1806 } 1807 } 1808 1809 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 1810 gid_list); 1811 return (gid_list); 1812 } 1813 1814 1815 /* 1816 * ibdm_send_classportinfo() 1817 * Send classportinfo request. When the request is completed 1818 * IBMF calls ibdm_classportinfo_cb routine to inform about 1819 * the completion. 1820 * Returns IBDM_SUCCESS/IBDM_FAILURE 1821 */ 1822 static int 1823 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 1824 { 1825 ibmf_msg_t *msg; 1826 ib_mad_hdr_t *hdr; 1827 ibdm_timeout_cb_args_t *cb_args; 1828 1829 IBTF_DPRINTF_L4("ibdm", 1830 "\tsend_classportinfo: gid info 0x%p", gid_info); 1831 1832 /* 1833 * Send command to get classportinfo attribute. Allocate a IBMF 1834 * packet and initialize the packet. 1835 */ 1836 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 1837 &msg) != IBMF_SUCCESS) { 1838 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 1839 return (IBDM_FAILURE); 1840 } 1841 1842 ibdm_alloc_send_buffers(msg); 1843 1844 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 1845 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 1846 msg->im_local_addr.ia_remote_qno = 1; 1847 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 1848 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 1849 1850 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 1851 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 1852 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 1853 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 1854 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 1855 hdr->Status = 0; 1856 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 1857 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 1858 hdr->AttributeModifier = 0; 1859 1860 cb_args = &gid_info->gl_cpi_cb_args; 1861 cb_args->cb_gid_info = gid_info; 1862 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 1863 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 1864 1865 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 1866 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 1867 1868 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 1869 "timeout id %x", gid_info->gl_timeout_id); 1870 1871 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 1872 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 1873 IBTF_DPRINTF_L2("ibdm", 1874 "\tsend_classportinfo: ibmf send failed"); 1875 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 1876 } 1877 1878 return (IBDM_SUCCESS); 1879 } 1880 1881 1882 /* 1883 * ibdm_handle_classportinfo() 1884 * Invoked by the IBMF when the classportinfo request is completed. 1885 */ 1886 static void 1887 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 1888 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 1889 { 1890 void *data; 1891 timeout_id_t timeout_id; 1892 ib_mad_hdr_t *hdr; 1893 ibdm_mad_classportinfo_t *cpi; 1894 1895 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 1896 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 1897 1898 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 1899 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 1900 "Not a ClassPortInfo resp"); 1901 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 1902 return; 1903 } 1904 1905 /* 1906 * Verify whether timeout handler is created/active. 1907 * If created/ active, cancel the timeout handler 1908 */ 1909 mutex_enter(&gid_info->gl_mutex); 1910 ibdm_bump_transactionID(gid_info); 1911 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 1912 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 1913 *flag |= IBDM_IBMF_PKT_DUP_RESP; 1914 mutex_exit(&gid_info->gl_mutex); 1915 return; 1916 } 1917 gid_info->gl_iou_cb_args.cb_req_type = 0; 1918 if (gid_info->gl_timeout_id) { 1919 timeout_id = gid_info->gl_timeout_id; 1920 mutex_exit(&gid_info->gl_mutex); 1921 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 1922 "gl_timeout_id = 0x%x", timeout_id); 1923 if (untimeout(timeout_id) == -1) { 1924 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 1925 "untimeout gl_timeout_id failed"); 1926 } 1927 mutex_enter(&gid_info->gl_mutex); 1928 gid_info->gl_timeout_id = 0; 1929 } 1930 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1931 gid_info->gl_pending_cmds++; 1932 mutex_exit(&gid_info->gl_mutex); 1933 1934 data = msg->im_msgbufs_recv.im_bufs_cl_data; 1935 cpi = (ibdm_mad_classportinfo_t *)data; 1936 1937 /* 1938 * Cache the "RespTimeValue" and redirection information in the 1939 * global gid list data structure. This cached information will 1940 * be used to send any further requests to the GID. 1941 */ 1942 gid_info->gl_resp_timeout = 1943 (b2h32(cpi->RespTimeValue) & 0x1F); 1944 1945 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 1946 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 1947 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 1948 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 1949 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 1950 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 1951 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 1952 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 1953 1954 ibdm_dump_classportinfo(cpi); 1955 1956 /* 1957 * Send IOUnitInfo request 1958 * Reuse previously allocated IBMF packet for sending ClassPortInfo 1959 * Check whether DM agent on the remote node requested redirection 1960 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 1961 */ 1962 ibdm_alloc_send_buffers(msg); 1963 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 1964 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 1965 1966 if (gid_info->gl_redirected == B_TRUE) { 1967 if (gid_info->gl_redirect_dlid != 0) { 1968 msg->im_local_addr.ia_remote_lid = 1969 gid_info->gl_redirect_dlid; 1970 } 1971 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 1972 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 1973 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 1974 } else { 1975 msg->im_local_addr.ia_remote_qno = 1; 1976 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 1977 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 1978 } 1979 1980 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 1981 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 1982 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 1983 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 1984 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 1985 hdr->Status = 0; 1986 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 1987 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 1988 hdr->AttributeModifier = 0; 1989 1990 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 1991 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 1992 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 1993 1994 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 1995 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 1996 1997 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 1998 "timeout %x", gid_info->gl_timeout_id); 1999 2000 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2001 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2002 IBTF_DPRINTF_L2("ibdm", 2003 "\thandle_classportinfo: msg transport failed"); 2004 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2005 } 2006 (*flag) |= IBDM_IBMF_PKT_REUSED; 2007 } 2008 2009 2010 /* 2011 * ibdm_send_iounitinfo: 2012 * Sends a DM request to get IOU unitinfo. 2013 */ 2014 static int 2015 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2016 { 2017 ibmf_msg_t *msg; 2018 ib_mad_hdr_t *hdr; 2019 2020 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2021 2022 /* 2023 * Send command to get iounitinfo attribute. Allocate a IBMF 2024 * packet and initialize the packet. 2025 */ 2026 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2027 IBMF_SUCCESS) { 2028 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2029 return (IBDM_FAILURE); 2030 } 2031 2032 mutex_enter(&gid_info->gl_mutex); 2033 ibdm_bump_transactionID(gid_info); 2034 mutex_exit(&gid_info->gl_mutex); 2035 2036 2037 ibdm_alloc_send_buffers(msg); 2038 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2039 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2040 msg->im_local_addr.ia_remote_qno = 1; 2041 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2042 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2043 2044 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2045 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2046 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2047 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2048 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2049 hdr->Status = 0; 2050 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2051 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2052 hdr->AttributeModifier = 0; 2053 2054 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2055 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2056 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2057 2058 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2059 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2060 2061 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2062 "timeout %x", gid_info->gl_timeout_id); 2063 2064 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2065 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2066 IBMF_SUCCESS) { 2067 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2068 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2069 msg, &gid_info->gl_iou_cb_args); 2070 } 2071 return (IBDM_SUCCESS); 2072 } 2073 2074 /* 2075 * ibdm_handle_iounitinfo() 2076 * Invoked by the IBMF when IO Unitinfo request is completed. 2077 */ 2078 static void 2079 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2080 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2081 { 2082 int ii, first = B_TRUE; 2083 int num_iocs; 2084 size_t size; 2085 uchar_t slot_info; 2086 timeout_id_t timeout_id; 2087 ib_mad_hdr_t *hdr; 2088 ibdm_ioc_info_t *ioc_info; 2089 ib_dm_io_unitinfo_t *iou_info; 2090 ib_dm_io_unitinfo_t *giou_info; 2091 ibdm_timeout_cb_args_t *cb_args; 2092 2093 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2094 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2095 2096 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2097 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2098 "Unexpected response"); 2099 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2100 return; 2101 } 2102 2103 mutex_enter(&gid_info->gl_mutex); 2104 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2105 IBTF_DPRINTF_L4("ibdm", 2106 "\thandle_iounitinfo: DUP resp"); 2107 mutex_exit(&gid_info->gl_mutex); 2108 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2109 return; 2110 } 2111 gid_info->gl_iou_cb_args.cb_req_type = 0; 2112 if (gid_info->gl_timeout_id) { 2113 timeout_id = gid_info->gl_timeout_id; 2114 mutex_exit(&gid_info->gl_mutex); 2115 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2116 "gl_timeout_id = 0x%x", timeout_id); 2117 if (untimeout(timeout_id) == -1) { 2118 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2119 "untimeout gl_timeout_id failed"); 2120 } 2121 mutex_enter(&gid_info->gl_mutex); 2122 gid_info->gl_timeout_id = 0; 2123 } 2124 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2125 2126 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2127 ibdm_dump_iounitinfo(iou_info); 2128 num_iocs = iou_info->iou_num_ctrl_slots; 2129 /* 2130 * check if number of IOCs reported is zero? if yes, return. 2131 * when num_iocs are reported zero internal IOC database needs 2132 * to be updated. To ensure that save the number of IOCs in 2133 * the new field "gl_num_iocs". Use a new field instead of 2134 * "giou_info->iou_num_ctrl_slots" as that would prevent 2135 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2136 */ 2137 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2138 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2139 mutex_exit(&gid_info->gl_mutex); 2140 return; 2141 } 2142 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2143 2144 /* 2145 * if there is an existing gl_iou (IOU has been probed before) 2146 * check if the "iou_changeid" is same as saved entry in 2147 * "giou_info->iou_changeid". 2148 * (note: this logic can prevent IOC enumeration if a given 2149 * vendor doesn't support setting iou_changeid field for its IOU) 2150 * 2151 * if there is an existing gl_iou and iou_changeid has changed : 2152 * free up existing gl_iou info and its related structures. 2153 * reallocate gl_iou info all over again. 2154 * if we donot free this up; then this leads to memory leaks 2155 */ 2156 if (gid_info->gl_iou) { 2157 giou_info = &gid_info->gl_iou->iou_info; 2158 if (iou_info->iou_changeid == giou_info->iou_changeid) { 2159 IBTF_DPRINTF_L3("ibdm", 2160 "\thandle_iounitinfo: no IOCs changed"); 2161 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2162 mutex_exit(&gid_info->gl_mutex); 2163 return; 2164 } 2165 if (ibdm_free_iou_info(gid_info)) { 2166 IBTF_DPRINTF_L3("ibdm", 2167 "\thandle_iounitinfo: failed to cleanup resources"); 2168 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2169 mutex_exit(&gid_info->gl_mutex); 2170 return; 2171 } 2172 } 2173 2174 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2175 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2176 giou_info = &gid_info->gl_iou->iou_info; 2177 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2178 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2179 2180 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2181 giou_info->iou_flag = iou_info->iou_flag; 2182 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2183 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2184 gid_info->gl_pending_cmds += num_iocs; 2185 gid_info->gl_pending_cmds += 1; /* for diag code */ 2186 mutex_exit(&gid_info->gl_mutex); 2187 2188 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2189 mutex_enter(&gid_info->gl_mutex); 2190 gid_info->gl_pending_cmds--; 2191 mutex_exit(&gid_info->gl_mutex); 2192 } 2193 /* 2194 * Parallelize getting IOC controller profiles from here. 2195 * Allocate IBMF packets and send commands to get IOC profile for 2196 * each IOC present on the IOU. 2197 */ 2198 for (ii = 0; ii < num_iocs; ii++) { 2199 /* 2200 * Check whether IOC is present in the slot 2201 * Series of nibbles (in the field iou_ctrl_list) represents 2202 * a slot in the IOU. 2203 * Byte format: 76543210 2204 * Bits 0-3 of first byte represent Slot 2 2205 * bits 4-7 of first byte represent slot 1, 2206 * bits 0-3 of second byte represent slot 4 and so on 2207 * Each 4-bit nibble has the following meaning 2208 * 0x0 : IOC not installed 2209 * 0x1 : IOC is present 2210 * 0xf : Slot does not exist 2211 * and all other values are reserved. 2212 */ 2213 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2214 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2215 if ((ii % 2) == 0) 2216 slot_info = (slot_info >> 4); 2217 2218 if ((slot_info & 0xf) != 1) { 2219 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2220 "No IOC is present in the slot = %d", ii); 2221 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2222 mutex_enter(&gid_info->gl_mutex); 2223 gid_info->gl_pending_cmds--; 2224 mutex_exit(&gid_info->gl_mutex); 2225 continue; 2226 } 2227 2228 mutex_enter(&gid_info->gl_mutex); 2229 ibdm_bump_transactionID(gid_info); 2230 mutex_exit(&gid_info->gl_mutex); 2231 2232 /* 2233 * Re use the already allocated packet (for IOUnitinfo) to 2234 * send the first IOC controller attribute. Allocate new 2235 * IBMF packets for the rest of the IOC's 2236 */ 2237 if (first != B_TRUE) { 2238 msg = NULL; 2239 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2240 &msg) != IBMF_SUCCESS) { 2241 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2242 "IBMF packet allocation failed"); 2243 mutex_enter(&gid_info->gl_mutex); 2244 gid_info->gl_pending_cmds--; 2245 mutex_exit(&gid_info->gl_mutex); 2246 continue; 2247 } 2248 2249 } 2250 2251 /* allocate send buffers for all messages */ 2252 ibdm_alloc_send_buffers(msg); 2253 2254 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2255 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2256 if (gid_info->gl_redirected == B_TRUE) { 2257 if (gid_info->gl_redirect_dlid != 0) { 2258 msg->im_local_addr.ia_remote_lid = 2259 gid_info->gl_redirect_dlid; 2260 } 2261 msg->im_local_addr.ia_remote_qno = 2262 gid_info->gl_redirect_QP; 2263 msg->im_local_addr.ia_p_key = 2264 gid_info->gl_redirect_pkey; 2265 msg->im_local_addr.ia_q_key = 2266 gid_info->gl_redirect_qkey; 2267 } else { 2268 msg->im_local_addr.ia_remote_qno = 1; 2269 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2270 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2271 } 2272 2273 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2274 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2275 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2276 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2277 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2278 hdr->Status = 0; 2279 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2280 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2281 hdr->AttributeModifier = h2b32(ii + 1); 2282 2283 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2284 cb_args = &ioc_info->ioc_cb_args; 2285 cb_args->cb_gid_info = gid_info; 2286 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2287 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2288 cb_args->cb_ioc_num = ii; 2289 2290 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2291 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2292 2293 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2294 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2295 2296 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2297 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2298 IBTF_DPRINTF_L2("ibdm", 2299 "\thandle_iounitinfo: msg transport failed"); 2300 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2301 } 2302 (*flag) |= IBDM_IBMF_PKT_REUSED; 2303 first = B_FALSE; 2304 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2305 } 2306 } 2307 2308 2309 /* 2310 * ibdm_handle_ioc_profile() 2311 * Invoked by the IBMF when the IOCControllerProfile request 2312 * gets completed 2313 */ 2314 static void 2315 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2316 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2317 { 2318 int first = B_TRUE, reprobe = 0; 2319 uint_t ii, ioc_no, srv_start; 2320 uint_t nserv_entries; 2321 timeout_id_t timeout_id; 2322 ib_mad_hdr_t *hdr; 2323 ibdm_ioc_info_t *ioc_info; 2324 ibdm_timeout_cb_args_t *cb_args; 2325 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2326 2327 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2328 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2329 2330 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2331 /* 2332 * Check whether we know this IOC already 2333 * This will return NULL if reprobe is in progress 2334 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2335 * Do not hold mutexes here. 2336 */ 2337 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2338 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2339 "IOC guid %llx is present", ioc->ioc_guid); 2340 return; 2341 } 2342 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2343 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2344 2345 /* Make sure that IOC index is with the valid range */ 2346 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2347 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2348 "IOC index Out of range, index %d", ioc); 2349 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2350 return; 2351 } 2352 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2353 ioc_info->ioc_iou_info = gid_info->gl_iou; 2354 2355 mutex_enter(&gid_info->gl_mutex); 2356 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2357 reprobe = 1; 2358 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2359 ioc_info->ioc_serv = NULL; 2360 ioc_info->ioc_prev_serv_cnt = 2361 ioc_info->ioc_profile.ioc_service_entries; 2362 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2363 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2364 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2365 mutex_exit(&gid_info->gl_mutex); 2366 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2367 return; 2368 } 2369 ioc_info->ioc_cb_args.cb_req_type = 0; 2370 if (ioc_info->ioc_timeout_id) { 2371 timeout_id = ioc_info->ioc_timeout_id; 2372 mutex_exit(&gid_info->gl_mutex); 2373 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2374 "ioc_timeout_id = 0x%x", timeout_id); 2375 if (untimeout(timeout_id) == -1) { 2376 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2377 "untimeout ioc_timeout_id failed"); 2378 } 2379 mutex_enter(&gid_info->gl_mutex); 2380 ioc_info->ioc_timeout_id = 0; 2381 } 2382 2383 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2384 if (reprobe == 0) { 2385 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2386 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2387 } 2388 2389 /* 2390 * Save all the IOC information in the global structures. 2391 * Note the wire format is Big Endian and the Sparc process also 2392 * big endian. So, there is no need to convert the data fields 2393 * The conversion routines used below are ineffective on Sparc 2394 * machines where as they will be effective on little endian 2395 * machines such as Intel processors. 2396 */ 2397 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2398 2399 /* 2400 * Restrict updates to onlyport GIDs and service entries during reprobe 2401 */ 2402 if (reprobe == 0) { 2403 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2404 gioc->ioc_vendorid = 2405 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2406 >> IB_DM_VENDORID_SHIFT); 2407 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2408 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2409 gioc->ioc_subsys_vendorid = 2410 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2411 >> IB_DM_VENDORID_SHIFT); 2412 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2413 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2414 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2415 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2416 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2417 gioc->ioc_send_msg_qdepth = 2418 b2h16(ioc->ioc_send_msg_qdepth); 2419 gioc->ioc_rdma_read_qdepth = 2420 b2h16(ioc->ioc_rdma_read_qdepth); 2421 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2422 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2423 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2424 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2425 IB_DM_IOC_ID_STRING_LEN); 2426 2427 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2428 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2429 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2430 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2431 2432 if (ioc_info->ioc_diagdeviceid == B_TRUE) 2433 gid_info->gl_pending_cmds++; 2434 } 2435 gioc->ioc_service_entries = ioc->ioc_service_entries; 2436 gid_info->gl_pending_cmds += (gioc->ioc_service_entries/4); 2437 if (gioc->ioc_service_entries % 4) 2438 gid_info->gl_pending_cmds++; 2439 2440 mutex_exit(&gid_info->gl_mutex); 2441 2442 ibdm_dump_ioc_profile(gioc); 2443 2444 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2445 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2446 mutex_enter(&gid_info->gl_mutex); 2447 gid_info->gl_pending_cmds--; 2448 mutex_exit(&gid_info->gl_mutex); 2449 } 2450 } 2451 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2452 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2453 KM_SLEEP); 2454 2455 /* 2456 * In one single request, maximum number of requests that can be 2457 * obtained is 4. If number of service entries are more than four, 2458 * calculate number requests needed and send them parallelly. 2459 */ 2460 nserv_entries = ioc->ioc_service_entries; 2461 ii = 0; 2462 while (nserv_entries) { 2463 mutex_enter(&gid_info->gl_mutex); 2464 ibdm_bump_transactionID(gid_info); 2465 mutex_exit(&gid_info->gl_mutex); 2466 2467 if (first != B_TRUE) { 2468 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2469 &msg) != IBMF_SUCCESS) { 2470 continue; 2471 } 2472 2473 } 2474 ibdm_alloc_send_buffers(msg); 2475 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2476 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2477 if (gid_info->gl_redirected == B_TRUE) { 2478 if (gid_info->gl_redirect_dlid != 0) { 2479 msg->im_local_addr.ia_remote_lid = 2480 gid_info->gl_redirect_dlid; 2481 } 2482 msg->im_local_addr.ia_remote_qno = 2483 gid_info->gl_redirect_QP; 2484 msg->im_local_addr.ia_p_key = 2485 gid_info->gl_redirect_pkey; 2486 msg->im_local_addr.ia_q_key = 2487 gid_info->gl_redirect_qkey; 2488 } else { 2489 msg->im_local_addr.ia_remote_qno = 1; 2490 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2491 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2492 } 2493 2494 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2495 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2496 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2497 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2498 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2499 hdr->Status = 0; 2500 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2501 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2502 2503 srv_start = ii * 4; 2504 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2505 cb_args->cb_gid_info = gid_info; 2506 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2507 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 2508 cb_args->cb_srvents_start = srv_start; 2509 cb_args->cb_ioc_num = ioc_no - 1; 2510 2511 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 2512 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 2513 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 2514 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 2515 } else { 2516 cb_args->cb_srvents_end = 2517 (cb_args->cb_srvents_start + nserv_entries - 1); 2518 nserv_entries = 0; 2519 } 2520 ibdm_fill_srv_attr_mod(hdr, cb_args); 2521 2522 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 2523 ibdm_pkt_timeout_hdlr, cb_args, 2524 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2525 2526 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 2527 "timeout %x, ioc %d srv %d", 2528 ioc_info->ioc_serv[srv_start].se_timeout_id, 2529 ioc_no - 1, srv_start); 2530 2531 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2532 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2533 IBTF_DPRINTF_L2("ibdm", 2534 "\thandle_ioc_profile: msg send failed"); 2535 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2536 } 2537 (*flag) |= IBDM_IBMF_PKT_REUSED; 2538 first = B_FALSE; 2539 ii++; 2540 } 2541 } 2542 2543 2544 /* 2545 * ibdm_handle_srventry_mad() 2546 */ 2547 static void 2548 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 2549 ibdm_dp_gidinfo_t *gid_info, int *flag) 2550 { 2551 uint_t ii, ioc_no, attrmod; 2552 uint_t nentries, start, end; 2553 timeout_id_t timeout_id; 2554 ib_dm_srv_t *srv_ents; 2555 ibdm_ioc_info_t *ioc_info; 2556 ibdm_srvents_info_t *gsrv_ents; 2557 2558 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 2559 " IBMF msg %p gid info %p", msg, gid_info); 2560 2561 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 2562 /* 2563 * Get the start and end index of the service entries 2564 * Upper 16 bits identify the IOC 2565 * Lower 16 bits specify the range of service entries 2566 * LSB specifies (Big endian) end of the range 2567 * MSB specifies (Big endian) start of the range 2568 */ 2569 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2570 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 2571 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 2572 start = (attrmod & IBDM_8_BIT_MASK); 2573 2574 /* Make sure that IOC index is with the valid range */ 2575 if ((ioc_no < 1) | 2576 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 2577 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2578 "IOC index Out of range, index %d", ioc_no); 2579 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2580 return; 2581 } 2582 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 2583 2584 /* 2585 * Make sure that the "start" and "end" service indexes are 2586 * with in the valid range 2587 */ 2588 nentries = ioc_info->ioc_profile.ioc_service_entries; 2589 if ((start > end) | (start >= nentries) | (end >= nentries)) { 2590 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2591 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 2592 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2593 return; 2594 } 2595 gsrv_ents = &ioc_info->ioc_serv[start]; 2596 mutex_enter(&gid_info->gl_mutex); 2597 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 2598 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2599 "already known, ioc %d, srv %d, se_state %x", 2600 ioc_no - 1, start, gsrv_ents->se_state); 2601 mutex_exit(&gid_info->gl_mutex); 2602 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2603 return; 2604 } 2605 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 2606 if (ioc_info->ioc_serv[start].se_timeout_id) { 2607 IBTF_DPRINTF_L2("ibdm", 2608 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 2609 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 2610 mutex_exit(&gid_info->gl_mutex); 2611 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 2612 "se_timeout_id = 0x%x", timeout_id); 2613 if (untimeout(timeout_id) == -1) { 2614 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 2615 "untimeout se_timeout_id failed"); 2616 } 2617 mutex_enter(&gid_info->gl_mutex); 2618 ioc_info->ioc_serv[start].se_timeout_id = 0; 2619 } 2620 2621 gsrv_ents->se_state = IBDM_SE_VALID; 2622 mutex_exit(&gid_info->gl_mutex); 2623 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 2624 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 2625 bcopy(srv_ents->srv_name, 2626 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 2627 ibdm_dump_service_entries(&gsrv_ents->se_attr); 2628 } 2629 } 2630 2631 2632 /* 2633 * ibdm_get_diagcode: 2634 * Send request to get IOU/IOC diag code 2635 * Returns IBDM_SUCCESS/IBDM_FAILURE 2636 */ 2637 static int 2638 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 2639 { 2640 ibmf_msg_t *msg; 2641 ib_mad_hdr_t *hdr; 2642 ibdm_ioc_info_t *ioc; 2643 ibdm_timeout_cb_args_t *cb_args; 2644 timeout_id_t *timeout_id; 2645 2646 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 2647 gid_info, attr); 2648 2649 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2650 &msg) != IBMF_SUCCESS) { 2651 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 2652 return (IBDM_FAILURE); 2653 } 2654 2655 ibdm_alloc_send_buffers(msg); 2656 2657 mutex_enter(&gid_info->gl_mutex); 2658 ibdm_bump_transactionID(gid_info); 2659 mutex_exit(&gid_info->gl_mutex); 2660 2661 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2662 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2663 if (gid_info->gl_redirected == B_TRUE) { 2664 if (gid_info->gl_redirect_dlid != 0) { 2665 msg->im_local_addr.ia_remote_lid = 2666 gid_info->gl_redirect_dlid; 2667 } 2668 2669 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2670 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2671 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2672 } else { 2673 msg->im_local_addr.ia_remote_qno = 1; 2674 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2675 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2676 } 2677 2678 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2679 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2680 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2681 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2682 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2683 hdr->Status = 0; 2684 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2685 2686 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 2687 hdr->AttributeModifier = h2b32(attr); 2688 2689 if (attr == 0) { 2690 cb_args = &gid_info->gl_iou_cb_args; 2691 gid_info->gl_iou->iou_dc_valid = B_FALSE; 2692 cb_args->cb_ioc_num = 0; 2693 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 2694 timeout_id = &gid_info->gl_timeout_id; 2695 } else { 2696 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 2697 ioc->ioc_dc_valid = B_FALSE; 2698 cb_args = &ioc->ioc_dc_cb_args; 2699 cb_args->cb_ioc_num = attr - 1; 2700 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 2701 timeout_id = &ioc->ioc_dc_timeout_id; 2702 } 2703 cb_args->cb_gid_info = gid_info; 2704 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2705 cb_args->cb_srvents_start = 0; 2706 2707 2708 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2709 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2710 2711 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 2712 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 2713 2714 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2715 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2716 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 2717 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2718 } 2719 return (IBDM_SUCCESS); 2720 } 2721 2722 /* 2723 * ibdm_handle_diagcode: 2724 * Process the DiagCode MAD response and update local DM 2725 * data structure. 2726 */ 2727 static void 2728 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 2729 ibdm_dp_gidinfo_t *gid_info, int *flag) 2730 { 2731 uint16_t attrmod, *diagcode; 2732 ibdm_iou_info_t *iou; 2733 ibdm_ioc_info_t *ioc; 2734 timeout_id_t timeout_id; 2735 ibdm_timeout_cb_args_t *cb_args; 2736 2737 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 2738 2739 mutex_enter(&gid_info->gl_mutex); 2740 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 2741 iou = gid_info->gl_iou; 2742 if (attrmod == 0) { 2743 if (iou->iou_dc_valid != B_FALSE) { 2744 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2745 IBTF_DPRINTF_L4("ibdm", 2746 "\thandle_diagcode: Duplicate IOU DiagCode"); 2747 mutex_exit(&gid_info->gl_mutex); 2748 return; 2749 } 2750 cb_args = &gid_info->gl_iou_cb_args; 2751 cb_args->cb_req_type = 0; 2752 iou->iou_diagcode = b2h16(*diagcode); 2753 iou->iou_dc_valid = B_TRUE; 2754 if (gid_info->gl_timeout_id) { 2755 timeout_id = gid_info->gl_timeout_id; 2756 mutex_exit(&gid_info->gl_mutex); 2757 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 2758 "gl_timeout_id = 0x%x", timeout_id); 2759 if (untimeout(timeout_id) == -1) { 2760 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 2761 "untimeout gl_timeout_id failed"); 2762 } 2763 mutex_enter(&gid_info->gl_mutex); 2764 gid_info->gl_timeout_id = 0; 2765 } 2766 } else { 2767 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 2768 if (ioc->ioc_dc_valid != B_FALSE) { 2769 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2770 IBTF_DPRINTF_L4("ibdm", 2771 "\thandle_diagcode: Duplicate IOC DiagCode"); 2772 mutex_exit(&gid_info->gl_mutex); 2773 return; 2774 } 2775 cb_args = &ioc->ioc_dc_cb_args; 2776 cb_args->cb_req_type = 0; 2777 ioc->ioc_diagcode = b2h16(*diagcode); 2778 ioc->ioc_dc_valid = B_TRUE; 2779 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 2780 if (timeout_id) { 2781 mutex_exit(&gid_info->gl_mutex); 2782 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 2783 "timeout_id = 0x%x", timeout_id); 2784 if (untimeout(timeout_id) == -1) { 2785 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 2786 "untimeout ioc_dc_timeout_id failed"); 2787 } 2788 mutex_enter(&gid_info->gl_mutex); 2789 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 2790 } 2791 } 2792 mutex_exit(&gid_info->gl_mutex); 2793 2794 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 2795 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 2796 } 2797 2798 2799 /* 2800 * ibdm_is_ioc_present() 2801 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 2802 */ 2803 static ibdm_ioc_info_t * 2804 ibdm_is_ioc_present(ib_guid_t ioc_guid, 2805 ibdm_dp_gidinfo_t *gid_info, int *flag) 2806 { 2807 int ii; 2808 ibdm_ioc_info_t *ioc; 2809 ibdm_dp_gidinfo_t *head; 2810 ib_dm_io_unitinfo_t *iou; 2811 2812 mutex_enter(&ibdm.ibdm_mutex); 2813 head = ibdm.ibdm_dp_gidlist_head; 2814 while (head) { 2815 mutex_enter(&head->gl_mutex); 2816 if (head->gl_iou == NULL) { 2817 mutex_exit(&head->gl_mutex); 2818 head = head->gl_next; 2819 continue; 2820 } 2821 iou = &head->gl_iou->iou_info; 2822 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 2823 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 2824 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 2825 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 2826 if (gid_info == head) { 2827 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2828 } else if (ibdm_check_dgid(head->gl_dgid_lo, 2829 head->gl_dgid_hi) != NULL) { 2830 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 2831 "present: gid not present"); 2832 ibdm_add_to_gl_gid(gid_info, head); 2833 } 2834 mutex_exit(&head->gl_mutex); 2835 mutex_exit(&ibdm.ibdm_mutex); 2836 return (ioc); 2837 } 2838 } 2839 mutex_exit(&head->gl_mutex); 2840 head = head->gl_next; 2841 } 2842 mutex_exit(&ibdm.ibdm_mutex); 2843 return (NULL); 2844 } 2845 2846 2847 /* 2848 * ibdm_ibmf_send_cb() 2849 * IBMF invokes this callback routine after posting the DM MAD to 2850 * the HCA. 2851 */ 2852 /*ARGSUSED*/ 2853 static void 2854 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 2855 { 2856 ibdm_dump_ibmf_msg(ibmf_msg, 1); 2857 ibdm_free_send_buffers(ibmf_msg); 2858 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 2859 IBTF_DPRINTF_L4("ibdm", 2860 "\tibmf_send_cb: IBMF free msg failed"); 2861 } 2862 } 2863 2864 2865 /* 2866 * ibdm_ibmf_recv_cb() 2867 * Invoked by the IBMF when a response to the one of the DM requests 2868 * is received. 2869 */ 2870 /*ARGSUSED*/ 2871 static void 2872 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 2873 { 2874 ibdm_taskq_args_t *taskq_args; 2875 2876 /* 2877 * If the taskq enable is set then dispatch a taskq to process 2878 * the MAD, otherwise just process it on this thread 2879 */ 2880 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 2881 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 2882 return; 2883 } 2884 2885 /* 2886 * create a taskq and dispatch it to process the incoming MAD 2887 */ 2888 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 2889 if (taskq_args == NULL) { 2890 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 2891 "taskq_args"); 2892 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2893 IBTF_DPRINTF_L4("ibmf_recv_cb", 2894 "\tibmf_recv_cb: IBMF free msg failed"); 2895 } 2896 return; 2897 } 2898 taskq_args->tq_ibmf_handle = ibmf_hdl; 2899 taskq_args->tq_ibmf_msg = msg; 2900 taskq_args->tq_args = arg; 2901 2902 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 2903 TQ_NOSLEEP) == 0) { 2904 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 2905 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2906 IBTF_DPRINTF_L4("ibmf_recv_cb", 2907 "\tibmf_recv_cb: IBMF free msg failed"); 2908 } 2909 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 2910 return; 2911 } 2912 2913 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 2914 } 2915 2916 2917 void 2918 ibdm_recv_incoming_mad(void *args) 2919 { 2920 ibdm_taskq_args_t *taskq_args; 2921 2922 taskq_args = (ibdm_taskq_args_t *)args; 2923 2924 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 2925 "Processing incoming MAD via taskq"); 2926 2927 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 2928 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 2929 2930 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 2931 } 2932 2933 2934 /* 2935 * Calls ibdm_process_incoming_mad with all function arguments extracted 2936 * from args 2937 */ 2938 /*ARGSUSED*/ 2939 static void 2940 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 2941 { 2942 int flag = 0; 2943 int ret; 2944 uint64_t transaction_id; 2945 ib_mad_hdr_t *hdr; 2946 ibdm_dp_gidinfo_t *gid_info = NULL; 2947 2948 IBTF_DPRINTF_L4("ibdm", 2949 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 2950 ibdm_dump_ibmf_msg(msg, 0); 2951 2952 /* 2953 * IBMF calls this routine for every DM MAD that arrives at this port. 2954 * But we handle only the responses for requests we sent. We drop all 2955 * the DM packets that does not have response bit set in the MAD 2956 * header(this eliminates all the requests sent to this port). 2957 * We handle only DM class version 1 MAD's 2958 */ 2959 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 2960 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 2961 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2962 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 2963 "IBMF free msg failed DM request drop it"); 2964 } 2965 return; 2966 } 2967 2968 transaction_id = b2h64(hdr->TransactionID); 2969 2970 mutex_enter(&ibdm.ibdm_mutex); 2971 gid_info = ibdm.ibdm_dp_gidlist_head; 2972 while (gid_info) { 2973 if ((gid_info->gl_transactionID & 2974 IBDM_GID_TRANSACTIONID_MASK) == 2975 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 2976 break; 2977 gid_info = gid_info->gl_next; 2978 } 2979 mutex_exit(&ibdm.ibdm_mutex); 2980 2981 if (gid_info == NULL) { 2982 /* Drop the packet */ 2983 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 2984 " does not match: 0x%llx", transaction_id); 2985 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2986 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 2987 "IBMF free msg failed DM request drop it"); 2988 } 2989 return; 2990 } 2991 2992 /* Handle redirection for all the MAD's, except ClassPortInfo */ 2993 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 2994 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 2995 ret = ibdm_handle_redirection(msg, gid_info, &flag); 2996 if (ret == IBDM_SUCCESS) { 2997 return; 2998 } 2999 } else { 3000 uint_t gl_state; 3001 3002 mutex_enter(&gid_info->gl_mutex); 3003 gl_state = gid_info->gl_state; 3004 mutex_exit(&gid_info->gl_mutex); 3005 3006 switch (gl_state) { 3007 case IBDM_GET_CLASSPORTINFO: 3008 ibdm_handle_classportinfo( 3009 ibmf_hdl, msg, gid_info, &flag); 3010 break; 3011 3012 case IBDM_GET_IOUNITINFO: 3013 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3014 break; 3015 3016 case IBDM_GET_IOC_DETAILS: 3017 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3018 3019 case IB_DM_ATTR_SERVICE_ENTRIES: 3020 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3021 break; 3022 3023 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3024 ibdm_handle_ioc_profile( 3025 ibmf_hdl, msg, gid_info, &flag); 3026 break; 3027 3028 case IB_DM_ATTR_DIAG_CODE: 3029 ibdm_handle_diagcode(msg, gid_info, &flag); 3030 break; 3031 3032 default: 3033 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3034 "Error state, wrong attribute :-("); 3035 (void) ibmf_free_msg(ibmf_hdl, &msg); 3036 return; 3037 } 3038 break; 3039 default: 3040 IBTF_DPRINTF_L2("ibdm", 3041 "process_incoming_mad: Dropping the packet" 3042 " gl_state %x", gl_state); 3043 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3044 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3045 "IBMF free msg failed DM request drop it"); 3046 } 3047 return; 3048 } 3049 } 3050 3051 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3052 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3053 IBTF_DPRINTF_L2("ibdm", 3054 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3055 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3056 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3057 "IBMF free msg failed DM request drop it"); 3058 } 3059 return; 3060 } 3061 3062 mutex_enter(&gid_info->gl_mutex); 3063 if (gid_info->gl_pending_cmds < 1) { 3064 IBTF_DPRINTF_L2("ibdm", 3065 "\tprocess_incoming_mad: pending commands negative"); 3066 } 3067 if (--gid_info->gl_pending_cmds) { 3068 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3069 "gid_info %p pending cmds %d", 3070 gid_info, gid_info->gl_pending_cmds); 3071 mutex_exit(&gid_info->gl_mutex); 3072 } else { 3073 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3074 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3075 mutex_exit(&gid_info->gl_mutex); 3076 ibdm_notify_newgid_iocs(gid_info); 3077 mutex_enter(&ibdm.ibdm_mutex); 3078 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3079 IBTF_DPRINTF_L4("ibdm", 3080 "\tprocess_incoming_mad: Wakeup"); 3081 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3082 cv_broadcast(&ibdm.ibdm_probe_cv); 3083 } 3084 mutex_exit(&ibdm.ibdm_mutex); 3085 } 3086 3087 /* 3088 * Do not deallocate the IBMF packet if atleast one request 3089 * is posted. IBMF packet is reused. 3090 */ 3091 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3092 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3093 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3094 "IBMF free msg failed DM request drop it"); 3095 } 3096 } 3097 } 3098 3099 3100 /* 3101 * ibdm_verify_mad_status() 3102 * Verifies the MAD status 3103 * Returns IBDM_SUCCESS if status is correct 3104 * Returns IBDM_FAILURE for bogus MAD status 3105 */ 3106 static int 3107 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3108 { 3109 int ret = 0; 3110 3111 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3112 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3113 return (IBDM_FAILURE); 3114 } 3115 3116 if (b2h16(hdr->Status) == 0) 3117 ret = IBDM_SUCCESS; 3118 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3119 ret = IBDM_SUCCESS; 3120 else { 3121 IBTF_DPRINTF_L4("ibdm", 3122 "\tverify_mad_status: Stauts : 0x%x", b2h16(hdr->Status)); 3123 ret = IBDM_FAILURE; 3124 } 3125 return (ret); 3126 } 3127 3128 3129 3130 /* 3131 * ibdm_handle_redirection() 3132 * Returns IBDM_SUCCESS/IBDM_FAILURE 3133 */ 3134 static int 3135 ibdm_handle_redirection(ibmf_msg_t *msg, 3136 ibdm_dp_gidinfo_t *gid_info, int *flag) 3137 { 3138 int attrmod, ioc_no, start; 3139 void *data; 3140 timeout_id_t *timeout_id; 3141 ib_mad_hdr_t *hdr; 3142 ibdm_ioc_info_t *ioc = NULL; 3143 ibdm_timeout_cb_args_t *cb_args; 3144 ibdm_mad_classportinfo_t *cpi; 3145 3146 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3147 mutex_enter(&gid_info->gl_mutex); 3148 switch (gid_info->gl_state) { 3149 case IBDM_GET_IOUNITINFO: 3150 cb_args = &gid_info->gl_iou_cb_args; 3151 timeout_id = &gid_info->gl_timeout_id; 3152 break; 3153 3154 case IBDM_GET_IOC_DETAILS: 3155 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3156 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3157 3158 case IB_DM_ATTR_DIAG_CODE: 3159 if (attrmod == 0) { 3160 cb_args = &gid_info->gl_iou_cb_args; 3161 timeout_id = &gid_info->gl_timeout_id; 3162 break; 3163 } 3164 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3165 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3166 "IOC# Out of range %d", attrmod); 3167 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3168 mutex_exit(&gid_info->gl_mutex); 3169 return (IBDM_FAILURE); 3170 } 3171 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3172 cb_args = &ioc->ioc_dc_cb_args; 3173 timeout_id = &ioc->ioc_dc_timeout_id; 3174 break; 3175 3176 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3177 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3178 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3179 "IOC# Out of range %d", attrmod); 3180 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3181 mutex_exit(&gid_info->gl_mutex); 3182 return (IBDM_FAILURE); 3183 } 3184 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3185 cb_args = &ioc->ioc_cb_args; 3186 timeout_id = &ioc->ioc_timeout_id; 3187 break; 3188 3189 case IB_DM_ATTR_SERVICE_ENTRIES: 3190 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3191 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3192 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3193 "IOC# Out of range %d", ioc_no); 3194 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3195 mutex_exit(&gid_info->gl_mutex); 3196 return (IBDM_FAILURE); 3197 } 3198 start = (attrmod & IBDM_8_BIT_MASK); 3199 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3200 if (start > ioc->ioc_profile.ioc_service_entries) { 3201 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3202 " SE index Out of range %d", start); 3203 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3204 mutex_exit(&gid_info->gl_mutex); 3205 return (IBDM_FAILURE); 3206 } 3207 cb_args = &ioc->ioc_serv[start].se_cb_args; 3208 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3209 break; 3210 3211 default: 3212 /* ERROR State */ 3213 IBTF_DPRINTF_L2("ibdm", 3214 "\thandle_redirection: wrong attribute :-("); 3215 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3216 mutex_exit(&gid_info->gl_mutex); 3217 return (IBDM_FAILURE); 3218 } 3219 break; 3220 default: 3221 /* ERROR State */ 3222 IBTF_DPRINTF_L2("ibdm", 3223 "\thandle_redirection: Error state :-("); 3224 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3225 mutex_exit(&gid_info->gl_mutex); 3226 return (IBDM_FAILURE); 3227 } 3228 if ((*timeout_id) != 0) { 3229 mutex_exit(&gid_info->gl_mutex); 3230 if (untimeout(*timeout_id) == -1) { 3231 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3232 "untimeout failed %x", *timeout_id); 3233 } else { 3234 IBTF_DPRINTF_L5("ibdm", 3235 "\thandle_redirection: timeout %x", *timeout_id); 3236 } 3237 mutex_enter(&gid_info->gl_mutex); 3238 *timeout_id = 0; 3239 } 3240 3241 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3242 cpi = (ibdm_mad_classportinfo_t *)data; 3243 3244 gid_info->gl_resp_timeout = 3245 (b2h32(cpi->RespTimeValue) & 0x1F); 3246 3247 gid_info->gl_redirected = B_TRUE; 3248 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3249 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3250 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3251 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3252 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3253 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3254 3255 if (gid_info->gl_redirect_dlid != 0) { 3256 msg->im_local_addr.ia_remote_lid = 3257 gid_info->gl_redirect_dlid; 3258 } 3259 ibdm_bump_transactionID(gid_info); 3260 mutex_exit(&gid_info->gl_mutex); 3261 3262 ibdm_alloc_send_buffers(msg); 3263 3264 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3265 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3266 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3267 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3268 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3269 hdr->Status = 0; 3270 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3271 hdr->AttributeID = 3272 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3273 hdr->AttributeModifier = 3274 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3275 3276 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3277 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3278 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3279 3280 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3281 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3282 3283 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3284 "timeout %x", *timeout_id); 3285 3286 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3287 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3288 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3289 "message transport failed"); 3290 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3291 } 3292 (*flag) |= IBDM_IBMF_PKT_REUSED; 3293 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3294 return (IBDM_SUCCESS); 3295 } 3296 3297 3298 /* 3299 * ibdm_pkt_timeout_hdlr 3300 * This timeout handler is registed for every IBMF packet that is 3301 * sent through the IBMF. It gets called when no response is received 3302 * within the specified time for the packet. No retries for the failed 3303 * commands currently. Drops the failed IBMF packet and update the 3304 * pending list commands. 3305 */ 3306 static void 3307 ibdm_pkt_timeout_hdlr(void *arg) 3308 { 3309 int probe_done = B_FALSE; 3310 ibdm_iou_info_t *iou; 3311 ibdm_ioc_info_t *ioc; 3312 ibdm_timeout_cb_args_t *cb_args = arg; 3313 ibdm_dp_gidinfo_t *gid_info; 3314 int srv_ent; 3315 uint_t new_gl_state; 3316 3317 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3318 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3319 cb_args->cb_req_type, cb_args->cb_ioc_num, 3320 cb_args->cb_srvents_start); 3321 3322 gid_info = cb_args->cb_gid_info; 3323 mutex_enter(&gid_info->gl_mutex); 3324 3325 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3326 (cb_args->cb_req_type == 0)) { 3327 3328 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3329 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3330 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3331 3332 if (gid_info->gl_timeout_id) 3333 gid_info->gl_timeout_id = 0; 3334 mutex_exit(&gid_info->gl_mutex); 3335 return; 3336 } 3337 if (cb_args->cb_retry_count) { 3338 cb_args->cb_retry_count--; 3339 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3340 if (gid_info->gl_timeout_id) 3341 gid_info->gl_timeout_id = 0; 3342 mutex_exit(&gid_info->gl_mutex); 3343 return; 3344 } 3345 cb_args->cb_retry_count = 0; 3346 } 3347 3348 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3349 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3350 cb_args->cb_req_type, cb_args->cb_ioc_num, 3351 cb_args->cb_srvents_start); 3352 3353 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3354 switch (cb_args->cb_req_type) { 3355 3356 case IBDM_REQ_TYPE_CLASSPORTINFO: 3357 case IBDM_REQ_TYPE_IOUINFO: 3358 new_gl_state = IBDM_GID_PROBING_FAILED; 3359 if (--gid_info->gl_pending_cmds == 0) 3360 probe_done = B_TRUE; 3361 if (gid_info->gl_timeout_id) 3362 gid_info->gl_timeout_id = 0; 3363 mutex_exit(&gid_info->gl_mutex); 3364 ibdm_delete_glhca_list(gid_info); 3365 mutex_enter(&gid_info->gl_mutex); 3366 break; 3367 case IBDM_REQ_TYPE_IOCINFO: 3368 iou = gid_info->gl_iou; 3369 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3370 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3371 if (--gid_info->gl_pending_cmds == 0) 3372 probe_done = B_TRUE; 3373 #ifndef __lock_lint 3374 if (ioc->ioc_timeout_id) 3375 ioc->ioc_timeout_id = 0; 3376 #endif 3377 break; 3378 case IBDM_REQ_TYPE_SRVENTS: 3379 iou = gid_info->gl_iou; 3380 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3381 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3382 if (--gid_info->gl_pending_cmds == 0) 3383 probe_done = B_TRUE; 3384 srv_ent = cb_args->cb_srvents_start; 3385 #ifndef __lock_lint 3386 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3387 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3388 #endif 3389 break; 3390 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3391 iou = gid_info->gl_iou; 3392 iou->iou_dc_valid = B_FALSE; 3393 if (--gid_info->gl_pending_cmds == 0) 3394 probe_done = B_TRUE; 3395 if (gid_info->gl_timeout_id) 3396 gid_info->gl_timeout_id = 0; 3397 break; 3398 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3399 iou = gid_info->gl_iou; 3400 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3401 ioc->ioc_dc_valid = B_FALSE; 3402 if (--gid_info->gl_pending_cmds == 0) 3403 probe_done = B_TRUE; 3404 #ifndef __lock_lint 3405 if (ioc->ioc_dc_timeout_id) 3406 ioc->ioc_dc_timeout_id = 0; 3407 #endif 3408 break; 3409 } 3410 if (probe_done == B_TRUE) { 3411 gid_info->gl_state = new_gl_state; 3412 mutex_exit(&gid_info->gl_mutex); 3413 ibdm_notify_newgid_iocs(gid_info); 3414 mutex_enter(&ibdm.ibdm_mutex); 3415 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3416 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3417 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3418 cv_broadcast(&ibdm.ibdm_probe_cv); 3419 } 3420 mutex_exit(&ibdm.ibdm_mutex); 3421 } else 3422 mutex_exit(&gid_info->gl_mutex); 3423 } 3424 3425 3426 /* 3427 * ibdm_retry_command() 3428 * Retries the failed command. 3429 * Returns IBDM_FAILURE/IBDM_SUCCESS 3430 */ 3431 static int 3432 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3433 { 3434 int ret, rval = IBDM_SUCCESS; 3435 ibmf_msg_t *msg; 3436 ib_mad_hdr_t *hdr; 3437 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3438 timeout_id_t *timeout_id; 3439 ibdm_ioc_info_t *ioc; 3440 int ioc_no; 3441 3442 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3443 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3444 cb_args->cb_req_type, cb_args->cb_ioc_num, 3445 cb_args->cb_srvents_start); 3446 3447 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3448 3449 3450 /* 3451 * Reset the gid if alloc_msg failed with BAD_HANDLE 3452 * ibdm_reset_gidinfo reinits the gid_info 3453 */ 3454 if (ret == IBMF_BAD_HANDLE) { 3455 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 3456 gid_info); 3457 3458 mutex_exit(&gid_info->gl_mutex); 3459 ibdm_reset_gidinfo(gid_info); 3460 mutex_enter(&gid_info->gl_mutex); 3461 3462 /* Retry alloc */ 3463 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 3464 &msg); 3465 } 3466 3467 if (ret != IBDM_SUCCESS) { 3468 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 3469 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3470 cb_args->cb_req_type, cb_args->cb_ioc_num, 3471 cb_args->cb_srvents_start); 3472 return (IBDM_FAILURE); 3473 } 3474 3475 ibdm_alloc_send_buffers(msg); 3476 3477 ibdm_bump_transactionID(gid_info); 3478 3479 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3480 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3481 if (gid_info->gl_redirected == B_TRUE) { 3482 if (gid_info->gl_redirect_dlid != 0) { 3483 msg->im_local_addr.ia_remote_lid = 3484 gid_info->gl_redirect_dlid; 3485 } 3486 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3487 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3488 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3489 } else { 3490 msg->im_local_addr.ia_remote_qno = 1; 3491 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3492 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3493 } 3494 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3495 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3496 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3497 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3498 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3499 hdr->Status = 0; 3500 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3501 3502 switch (cb_args->cb_req_type) { 3503 case IBDM_REQ_TYPE_CLASSPORTINFO: 3504 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 3505 hdr->AttributeModifier = 0; 3506 timeout_id = &gid_info->gl_timeout_id; 3507 break; 3508 case IBDM_REQ_TYPE_IOUINFO: 3509 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 3510 hdr->AttributeModifier = 0; 3511 timeout_id = &gid_info->gl_timeout_id; 3512 break; 3513 case IBDM_REQ_TYPE_IOCINFO: 3514 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 3515 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3516 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3517 timeout_id = &ioc->ioc_timeout_id; 3518 break; 3519 case IBDM_REQ_TYPE_SRVENTS: 3520 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 3521 ibdm_fill_srv_attr_mod(hdr, cb_args); 3522 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3523 timeout_id = 3524 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 3525 break; 3526 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3527 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3528 hdr->AttributeModifier = 0; 3529 timeout_id = &gid_info->gl_timeout_id; 3530 break; 3531 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3532 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3533 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3534 ioc_no = cb_args->cb_ioc_num; 3535 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 3536 timeout_id = &ioc->ioc_dc_timeout_id; 3537 break; 3538 } 3539 3540 mutex_exit(&gid_info->gl_mutex); 3541 3542 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3543 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3544 3545 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 3546 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 3547 cb_args->cb_srvents_start, *timeout_id); 3548 3549 if ((rval = ibmf_msg_transport(gid_info->gl_ibmf_hdl, 3550 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 3551 cb_args, 0)) != IBMF_SUCCESS) { 3552 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 3553 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3554 cb_args->cb_req_type, cb_args->cb_ioc_num, 3555 cb_args->cb_srvents_start); 3556 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3557 rval = IBDM_FAILURE; 3558 } 3559 mutex_enter(&gid_info->gl_mutex); 3560 return (rval); 3561 } 3562 3563 3564 /* 3565 * ibdm_update_ioc_port_gidlist() 3566 */ 3567 static void 3568 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 3569 ibdm_dp_gidinfo_t *gid_info) 3570 { 3571 int ii, ngid_ents; 3572 ibdm_gid_t *tmp; 3573 ibdm_hca_list_t *gid_hca_head, *temp; 3574 ibdm_hca_list_t *ioc_head = NULL; 3575 3576 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 3577 3578 ngid_ents = gid_info->gl_ngids; 3579 dest->ioc_nportgids = ngid_ents; 3580 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 3581 ngid_ents, KM_SLEEP); 3582 tmp = gid_info->gl_gid; 3583 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 3584 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 3585 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 3586 tmp = tmp->gid_next; 3587 } 3588 3589 gid_hca_head = gid_info->gl_hca_list; 3590 while (gid_hca_head) { 3591 temp = ibdm_dup_hca_attr(gid_hca_head); 3592 temp->hl_next = ioc_head; 3593 ioc_head = temp; 3594 gid_hca_head = gid_hca_head->hl_next; 3595 } 3596 dest->ioc_hca_list = ioc_head; 3597 } 3598 3599 3600 /* 3601 * ibdm_alloc_send_buffers() 3602 * Allocates memory for the IBMF send buffer 3603 */ 3604 static void 3605 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 3606 { 3607 msgp->im_msgbufs_send.im_bufs_mad_hdr = 3608 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 3609 msgp->im_msgbufs_send.im_bufs_cl_data = (uchar_t *) 3610 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 3611 msgp->im_msgbufs_send.im_bufs_cl_data_len = 3612 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t); 3613 } 3614 3615 3616 /* 3617 * ibdm_alloc_send_buffers() 3618 * De-allocates memory for the IBMF send buffer 3619 */ 3620 static void 3621 ibdm_free_send_buffers(ibmf_msg_t *msgp) 3622 { 3623 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 3624 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 3625 } 3626 3627 /* 3628 * ibdm_probe_ioc() 3629 * 1. Gets the node records for the port GUID. This detects all the port 3630 * to the IOU. 3631 * 2. Selectively probes all the IOC, given it's node GUID 3632 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 3633 * Controller Profile asynchronously 3634 */ 3635 /*ARGSUSED*/ 3636 static void 3637 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 3638 { 3639 int ii, nrecords; 3640 size_t nr_len = 0, pi_len = 0; 3641 ib_gid_t sgid, dgid; 3642 ibdm_hca_list_t *hca_list = NULL; 3643 sa_node_record_t *nr, *tmp; 3644 ibdm_port_attr_t *port = NULL; 3645 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 3646 ibdm_dp_gidinfo_t *temp_gidinfo; 3647 ibdm_gid_t *temp_gid; 3648 sa_portinfo_record_t *pi; 3649 3650 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%x, %x, %x): Begin", 3651 nodeguid, ioc_guid, reprobe_flag); 3652 3653 /* Rescan the GID list for any removed GIDs for reprobe */ 3654 if (reprobe_flag) 3655 ibdm_rescan_gidlist(&ioc_guid); 3656 3657 mutex_enter(&ibdm.ibdm_hl_mutex); 3658 for (ibdm_get_next_port(&hca_list, &port, 1); port; 3659 ibdm_get_next_port(&hca_list, &port, 1)) { 3660 reprobe_gid = new_gid = node_gid = NULL; 3661 3662 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 3663 if (nr == NULL) { 3664 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 3665 continue; 3666 } 3667 nrecords = (nr_len / sizeof (sa_node_record_t)); 3668 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 3669 pi = ibdm_get_portinfo( 3670 port->pa_sa_hdl, &pi_len, tmp->LID); 3671 3672 if ((pi) && (pi->PortInfo.CapabilityMask & 3673 SM_CAP_MASK_IS_DM_SUPPD)) { 3674 /* 3675 * For reprobes: Check if GID, already in 3676 * the list. If so, set the state to SKIPPED 3677 */ 3678 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 3679 tmp->NodeInfo.PortGUID)) != NULL) && 3680 temp_gidinfo->gl_state == 3681 IBDM_GID_PROBING_COMPLETE) { 3682 ASSERT(reprobe_gid == NULL); 3683 ibdm_addto_glhcalist(temp_gidinfo, 3684 hca_list); 3685 reprobe_gid = temp_gidinfo; 3686 kmem_free(pi, pi_len); 3687 continue; 3688 } else if (temp_gidinfo != NULL) { 3689 kmem_free(pi, pi_len); 3690 ibdm_addto_glhcalist(temp_gidinfo, 3691 hca_list); 3692 continue; 3693 } 3694 3695 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 3696 "create_gid : prefix %llx, guid %llx\n", 3697 pi->PortInfo.GidPrefix, 3698 tmp->NodeInfo.PortGUID); 3699 3700 sgid.gid_prefix = port->pa_sn_prefix; 3701 sgid.gid_guid = port->pa_port_guid; 3702 dgid.gid_prefix = pi->PortInfo.GidPrefix; 3703 dgid.gid_guid = tmp->NodeInfo.PortGUID; 3704 new_gid = ibdm_create_gid_info(port, sgid, 3705 dgid); 3706 if (new_gid == NULL) { 3707 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 3708 "create_gid_info failed\n"); 3709 kmem_free(pi, pi_len); 3710 continue; 3711 } 3712 if (node_gid == NULL) { 3713 node_gid = new_gid; 3714 ibdm_add_to_gl_gid(node_gid, node_gid); 3715 } else { 3716 IBTF_DPRINTF_L4("ibdm", 3717 "\tprobe_ioc: new gid"); 3718 temp_gid = kmem_zalloc( 3719 sizeof (ibdm_gid_t), KM_SLEEP); 3720 temp_gid->gid_dgid_hi = 3721 new_gid->gl_dgid_hi; 3722 temp_gid->gid_dgid_lo = 3723 new_gid->gl_dgid_lo; 3724 temp_gid->gid_next = node_gid->gl_gid; 3725 node_gid->gl_gid = temp_gid; 3726 node_gid->gl_ngids++; 3727 } 3728 new_gid->gl_nodeguid = nodeguid; 3729 new_gid->gl_portguid = dgid.gid_guid; 3730 ibdm_addto_glhcalist(new_gid, hca_list); 3731 3732 /* 3733 * Set the state to skipped as all these 3734 * gids point to the same node. 3735 * We (re)probe only one GID below and reset 3736 * state appropriately 3737 */ 3738 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 3739 kmem_free(pi, pi_len); 3740 } 3741 } 3742 kmem_free(nr, nr_len); 3743 3744 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 3745 "reprobe_gid %p new_gid %p node_gid %p", 3746 reprobe_flag, reprobe_gid, new_gid, node_gid); 3747 3748 if (reprobe_flag != 0 && reprobe_gid != NULL) { 3749 int niocs, jj; 3750 ibdm_ioc_info_t *tmp_ioc; 3751 int ioc_matched = 0; 3752 3753 mutex_exit(&ibdm.ibdm_hl_mutex); 3754 mutex_enter(&reprobe_gid->gl_mutex); 3755 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 3756 niocs = 3757 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 3758 reprobe_gid->gl_pending_cmds++; 3759 mutex_exit(&reprobe_gid->gl_mutex); 3760 3761 for (jj = 0; jj < niocs; jj++) { 3762 tmp_ioc = 3763 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 3764 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 3765 continue; 3766 3767 ioc_matched = 1; 3768 3769 /* 3770 * Explicitly set gl_reprobe_flag to 0 so that 3771 * IBnex is not notified on completion 3772 */ 3773 mutex_enter(&reprobe_gid->gl_mutex); 3774 reprobe_gid->gl_reprobe_flag = 0; 3775 mutex_exit(&reprobe_gid->gl_mutex); 3776 3777 mutex_enter(&ibdm.ibdm_mutex); 3778 ibdm.ibdm_ngid_probes_in_progress++; 3779 mutex_exit(&ibdm.ibdm_mutex); 3780 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 3781 IBDM_SUCCESS) { 3782 IBTF_DPRINTF_L4("ibdm", 3783 "\tprobe_ioc: " 3784 "send_ioc_profile failed " 3785 "for ioc %d", jj); 3786 ibdm_gid_decr_pending(reprobe_gid); 3787 break; 3788 } 3789 mutex_enter(&ibdm.ibdm_mutex); 3790 ibdm_wait_probe_completion(); 3791 mutex_exit(&ibdm.ibdm_mutex); 3792 break; 3793 } 3794 if (ioc_matched == 0) 3795 ibdm_gid_decr_pending(reprobe_gid); 3796 else { 3797 mutex_enter(&ibdm.ibdm_hl_mutex); 3798 break; 3799 } 3800 } else if (new_gid != NULL) { 3801 mutex_exit(&ibdm.ibdm_hl_mutex); 3802 node_gid = node_gid ? node_gid : new_gid; 3803 3804 /* 3805 * New or reinserted GID : Enable notification 3806 * to IBnex 3807 */ 3808 mutex_enter(&node_gid->gl_mutex); 3809 node_gid->gl_reprobe_flag = 1; 3810 mutex_exit(&node_gid->gl_mutex); 3811 3812 ibdm_probe_gid(node_gid); 3813 3814 mutex_enter(&ibdm.ibdm_hl_mutex); 3815 } else { 3816 IBTF_DPRINTF_L2("ibdm", "\tibdm_probe_ioc " 3817 "Invalid state!"); 3818 } 3819 3820 } 3821 mutex_exit(&ibdm.ibdm_hl_mutex); 3822 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 3823 } 3824 3825 3826 /* 3827 * ibdm_probe_gid() 3828 * Selectively probes the GID 3829 */ 3830 static void 3831 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 3832 { 3833 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 3834 mutex_enter(&gid_info->gl_mutex); 3835 gid_info->gl_pending_cmds++; 3836 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 3837 mutex_exit(&gid_info->gl_mutex); 3838 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 3839 mutex_enter(&gid_info->gl_mutex); 3840 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 3841 --gid_info->gl_pending_cmds; 3842 mutex_exit(&gid_info->gl_mutex); 3843 ibdm_delete_glhca_list(gid_info); 3844 gid_info = gid_info->gl_next; 3845 return; 3846 } 3847 mutex_enter(&ibdm.ibdm_mutex); 3848 ibdm.ibdm_ngid_probes_in_progress++; 3849 gid_info = gid_info->gl_next; 3850 3851 ibdm_wait_probe_completion(); 3852 mutex_exit(&ibdm.ibdm_mutex); 3853 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 3854 } 3855 3856 3857 /* 3858 * ibdm_create_gid_info() 3859 * Allocates a gid_info structure and initializes 3860 * Returns pointer to the structure on success 3861 * and NULL on failure 3862 */ 3863 static ibdm_dp_gidinfo_t * 3864 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 3865 { 3866 uint8_t ii, npaths; 3867 sa_path_record_t *path; 3868 size_t len; 3869 ibdm_pkey_tbl_t *pkey_tbl; 3870 ibdm_dp_gidinfo_t *gid_info = NULL; 3871 int ret; 3872 3873 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 3874 npaths = 1; 3875 3876 /* query for reversible paths */ 3877 if (port->pa_sa_hdl) 3878 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 3879 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 3880 &len, &path); 3881 else 3882 return (NULL); 3883 3884 if (ret == IBMF_SUCCESS && path) { 3885 ibdm_dump_path_info(path); 3886 3887 gid_info = kmem_zalloc( 3888 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 3889 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 3890 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 3891 gid_info->gl_dgid_lo = path->DGID.gid_guid; 3892 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 3893 gid_info->gl_sgid_lo = path->SGID.gid_guid; 3894 gid_info->gl_p_key = path->P_Key; 3895 gid_info->gl_sa_hdl = port->pa_sa_hdl; 3896 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 3897 gid_info->gl_slid = path->SLID; 3898 gid_info->gl_dlid = path->DLID; 3899 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 3900 << IBDM_GID_TRANSACTIONID_SHIFT; 3901 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 3902 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 3903 << IBDM_GID_TRANSACTIONID_SHIFT; 3904 3905 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 3906 for (ii = 0; ii < port->pa_npkeys; ii++) { 3907 if (port->pa_pkey_tbl == NULL) 3908 break; 3909 3910 pkey_tbl = &port->pa_pkey_tbl[ii]; 3911 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 3912 (pkey_tbl->pt_qp_hdl != NULL)) { 3913 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 3914 break; 3915 } 3916 } 3917 kmem_free(path, len); 3918 3919 /* 3920 * QP handle for GID not initialized. No matching Pkey 3921 * was found!! ibdm should *not* hit this case. Flag an 3922 * error and drop the GID if ibdm does encounter this. 3923 */ 3924 if (gid_info->gl_qp_hdl == NULL) { 3925 IBTF_DPRINTF_L2(ibdm_string, 3926 "\tcreate_gid_info: No matching Pkey"); 3927 ibdm_delete_gidinfo(gid_info); 3928 return (NULL); 3929 } 3930 3931 ibdm.ibdm_ngids++; 3932 if (ibdm.ibdm_dp_gidlist_head == NULL) { 3933 ibdm.ibdm_dp_gidlist_head = gid_info; 3934 ibdm.ibdm_dp_gidlist_tail = gid_info; 3935 } else { 3936 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 3937 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 3938 ibdm.ibdm_dp_gidlist_tail = gid_info; 3939 } 3940 } 3941 3942 return (gid_info); 3943 } 3944 3945 3946 /* 3947 * ibdm_get_node_records 3948 * Sends a SA query to get the NODE record 3949 * Returns pointer to the sa_node_record_t on success 3950 * and NULL on failure 3951 */ 3952 static sa_node_record_t * 3953 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 3954 { 3955 sa_node_record_t req, *resp = NULL; 3956 ibmf_saa_access_args_t args; 3957 int ret; 3958 3959 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 3960 3961 bzero(&req, sizeof (sa_node_record_t)); 3962 req.NodeInfo.NodeGUID = guid; 3963 3964 args.sq_attr_id = SA_NODERECORD_ATTRID; 3965 args.sq_access_type = IBMF_SAA_RETRIEVE; 3966 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 3967 args.sq_template = &req; 3968 args.sq_callback = NULL; 3969 args.sq_callback_arg = NULL; 3970 3971 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 3972 if (ret != IBMF_SUCCESS) { 3973 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 3974 " SA Retrieve Failed: %d", ret); 3975 return (NULL); 3976 } 3977 if ((resp == NULL) || (*length == 0)) { 3978 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 3979 return (NULL); 3980 } 3981 3982 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 3983 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 3984 3985 return (resp); 3986 } 3987 3988 3989 /* 3990 * ibdm_get_portinfo() 3991 * Sends a SA query to get the PortInfo record 3992 * Returns pointer to the sa_portinfo_record_t on success 3993 * and NULL on failure 3994 */ 3995 static sa_portinfo_record_t * 3996 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 3997 { 3998 sa_portinfo_record_t req, *resp = NULL; 3999 ibmf_saa_access_args_t args; 4000 int ret; 4001 4002 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 4003 4004 bzero(&req, sizeof (sa_portinfo_record_t)); 4005 req.EndportLID = lid; 4006 4007 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4008 args.sq_access_type = IBMF_SAA_RETRIEVE; 4009 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4010 args.sq_template = &req; 4011 args.sq_callback = NULL; 4012 args.sq_callback_arg = NULL; 4013 4014 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4015 if (ret != IBMF_SUCCESS) { 4016 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4017 " SA Retrieve Failed: 0x%X", ret); 4018 return (NULL); 4019 } 4020 if ((*length == 0) || (resp == NULL)) 4021 return (NULL); 4022 4023 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4024 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4025 return (resp); 4026 } 4027 4028 4029 /* 4030 * ibdm_ibnex_register_callback 4031 * IB nexus callback routine for HCA attach and detach notification 4032 */ 4033 void 4034 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4035 { 4036 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4037 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4038 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4039 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4040 } 4041 4042 4043 /* 4044 * ibdm_ibnex_unregister_callbacks 4045 */ 4046 void 4047 ibdm_ibnex_unregister_callback() 4048 { 4049 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4050 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4051 ibdm.ibdm_ibnex_callback = NULL; 4052 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4053 } 4054 4055 4056 /* 4057 * ibdm_ibnex_get_waittime() 4058 * Calculates the wait time based on the last HCA attach time 4059 */ 4060 time_t 4061 ibdm_ibnex_get_waittime(ib_guid_t hca_guid, int *dft_wait) 4062 { 4063 int ii; 4064 time_t temp, wait_time = 0; 4065 ibdm_hca_list_t *hca; 4066 4067 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime hcaguid:%llx" 4068 "\tport settling time %d", hca_guid, *dft_wait); 4069 4070 mutex_enter(&ibdm.ibdm_hl_mutex); 4071 hca = ibdm.ibdm_hca_list_head; 4072 4073 if (hca_guid) { 4074 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4075 if ((hca_guid == hca->hl_hca_guid) && 4076 (hca->hl_nports != hca->hl_nports_active)) { 4077 wait_time = 4078 ddi_get_time() - hca->hl_attach_time; 4079 wait_time = ((wait_time >= *dft_wait) ? 4080 0 : (*dft_wait - wait_time)); 4081 break; 4082 } 4083 hca = hca->hl_next; 4084 } 4085 mutex_exit(&ibdm.ibdm_hl_mutex); 4086 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4087 return (wait_time); 4088 } 4089 4090 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4091 if (hca->hl_nports != hca->hl_nports_active) { 4092 temp = ddi_get_time() - hca->hl_attach_time; 4093 temp = ((temp >= *dft_wait) ? 0 : (*dft_wait - temp)); 4094 wait_time = (temp > wait_time) ? temp : wait_time; 4095 } 4096 } 4097 mutex_exit(&ibdm.ibdm_hl_mutex); 4098 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4099 return (wait_time); 4100 } 4101 4102 4103 /* 4104 * ibdm_ibnex_probe_hcaport 4105 * Probes the presence of HCA port (with HCA dip and port number) 4106 * Returns port attributes structure on SUCCESS 4107 */ 4108 ibdm_port_attr_t * 4109 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4110 { 4111 int ii, jj; 4112 ibdm_hca_list_t *hca_list; 4113 ibdm_port_attr_t *port_attr; 4114 4115 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4116 4117 mutex_enter(&ibdm.ibdm_hl_mutex); 4118 hca_list = ibdm.ibdm_hca_list_head; 4119 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4120 if (hca_list->hl_hca_guid == hca_guid) { 4121 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4122 if (hca_list->hl_port_attr[jj].pa_port_num == 4123 port_num) { 4124 break; 4125 } 4126 } 4127 if (jj != hca_list->hl_nports) 4128 break; 4129 } 4130 hca_list = hca_list->hl_next; 4131 } 4132 if (ii == ibdm.ibdm_hca_count) { 4133 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4134 mutex_exit(&ibdm.ibdm_hl_mutex); 4135 return (NULL); 4136 } 4137 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4138 sizeof (ibdm_port_attr_t), KM_SLEEP); 4139 bcopy((char *)&hca_list->hl_port_attr[jj], 4140 port_attr, sizeof (ibdm_port_attr_t)); 4141 ibdm_update_port_attr(port_attr); 4142 4143 mutex_exit(&ibdm.ibdm_hl_mutex); 4144 return (port_attr); 4145 } 4146 4147 4148 /* 4149 * ibdm_ibnex_get_port_attrs 4150 * Scan all HCAs for a matching port_guid. 4151 * Returns "port attributes" structure on success. 4152 */ 4153 ibdm_port_attr_t * 4154 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4155 { 4156 int ii, jj; 4157 ibdm_hca_list_t *hca_list; 4158 ibdm_port_attr_t *port_attr; 4159 4160 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4161 4162 mutex_enter(&ibdm.ibdm_hl_mutex); 4163 hca_list = ibdm.ibdm_hca_list_head; 4164 4165 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4166 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4167 if (hca_list->hl_port_attr[jj].pa_port_guid == 4168 port_guid) { 4169 break; 4170 } 4171 } 4172 if (jj != hca_list->hl_nports) 4173 break; 4174 hca_list = hca_list->hl_next; 4175 } 4176 4177 if (ii == ibdm.ibdm_hca_count) { 4178 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4179 mutex_exit(&ibdm.ibdm_hl_mutex); 4180 return (NULL); 4181 } 4182 4183 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4184 KM_SLEEP); 4185 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4186 sizeof (ibdm_port_attr_t)); 4187 ibdm_update_port_attr(port_attr); 4188 4189 mutex_exit(&ibdm.ibdm_hl_mutex); 4190 return (port_attr); 4191 } 4192 4193 4194 /* 4195 * ibdm_ibnex_free_port_attr() 4196 */ 4197 void 4198 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4199 { 4200 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4201 if (port_attr) { 4202 if (port_attr->pa_pkey_tbl != NULL) { 4203 kmem_free(port_attr->pa_pkey_tbl, 4204 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4205 } 4206 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4207 } 4208 } 4209 4210 4211 /* 4212 * ibdm_ibnex_get_hca_list() 4213 * Returns portinfo for all the port for all the HCA's 4214 */ 4215 void 4216 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4217 { 4218 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4219 int ii; 4220 4221 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4222 4223 mutex_enter(&ibdm.ibdm_hl_mutex); 4224 temp = ibdm.ibdm_hca_list_head; 4225 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4226 temp1 = ibdm_dup_hca_attr(temp); 4227 temp1->hl_next = head; 4228 head = temp1; 4229 temp = temp->hl_next; 4230 } 4231 *count = ibdm.ibdm_hca_count; 4232 *hca = head; 4233 mutex_exit(&ibdm.ibdm_hl_mutex); 4234 } 4235 4236 4237 /* 4238 * ibdm_ibnex_get_hca_info_by_guid() 4239 */ 4240 ibdm_hca_list_t * 4241 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4242 { 4243 ibdm_hca_list_t *head = NULL, *hca = NULL; 4244 4245 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4246 4247 mutex_enter(&ibdm.ibdm_hl_mutex); 4248 head = ibdm.ibdm_hca_list_head; 4249 while (head) { 4250 if (head->hl_hca_guid == hca_guid) { 4251 hca = ibdm_dup_hca_attr(head); 4252 hca->hl_next = NULL; 4253 break; 4254 } 4255 head = head->hl_next; 4256 } 4257 mutex_exit(&ibdm.ibdm_hl_mutex); 4258 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4259 return (hca); 4260 } 4261 4262 4263 /* 4264 * ibdm_dup_hca_attr() 4265 * Allocate a new HCA attribute strucuture and initialize 4266 * hca attribute structure with the incoming HCA attributes 4267 * returned the allocated hca attributes. 4268 */ 4269 static ibdm_hca_list_t * 4270 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4271 { 4272 int len; 4273 ibdm_hca_list_t *out_hca; 4274 4275 len = sizeof (ibdm_hca_list_t) + 4276 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4277 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4278 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4279 bcopy((char *)in_hca, 4280 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4281 if (in_hca->hl_nports) { 4282 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4283 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4284 bcopy((char *)in_hca->hl_port_attr, 4285 (char *)out_hca->hl_port_attr, 4286 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4287 for (len = 0; len < out_hca->hl_nports; len++) 4288 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4289 } 4290 return (out_hca); 4291 } 4292 4293 4294 /* 4295 * ibdm_ibnex_free_hca_list() 4296 * Free one/more HCA lists 4297 */ 4298 void 4299 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4300 { 4301 int ii; 4302 size_t len; 4303 ibdm_hca_list_t *temp; 4304 ibdm_port_attr_t *port; 4305 4306 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4307 ASSERT(hca_list); 4308 while (hca_list) { 4309 temp = hca_list; 4310 hca_list = hca_list->hl_next; 4311 for (ii = 0; ii < temp->hl_nports; ii++) { 4312 port = &temp->hl_port_attr[ii]; 4313 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4314 if (len != 0) 4315 kmem_free(port->pa_pkey_tbl, len); 4316 } 4317 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4318 sizeof (ibdm_port_attr_t)); 4319 kmem_free(temp, len); 4320 } 4321 } 4322 4323 4324 /* 4325 * ibdm_ibnex_probe_iocguid() 4326 * Probes the IOC on the fabric and returns the IOC information 4327 * if present. Otherwise, NULL is returned 4328 */ 4329 /* ARGSUSED */ 4330 ibdm_ioc_info_t * 4331 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4332 { 4333 int k; 4334 ibdm_ioc_info_t *ioc_info; 4335 ibdm_dp_gidinfo_t *gid_info; 4336 4337 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4338 iou, ioc_guid, reprobe_flag); 4339 /* Check whether we know this already */ 4340 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4341 if (ioc_info == NULL) { 4342 mutex_enter(&ibdm.ibdm_mutex); 4343 while (ibdm.ibdm_busy & IBDM_BUSY) 4344 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4345 ibdm.ibdm_busy |= IBDM_BUSY; 4346 mutex_exit(&ibdm.ibdm_mutex); 4347 ibdm_probe_ioc(iou, ioc_guid, 0); 4348 mutex_enter(&ibdm.ibdm_mutex); 4349 ibdm.ibdm_busy &= ~IBDM_BUSY; 4350 cv_broadcast(&ibdm.ibdm_busy_cv); 4351 mutex_exit(&ibdm.ibdm_mutex); 4352 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4353 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4354 /* Free the ioc_list before reprobe; and cancel any timers */ 4355 mutex_enter(&ibdm.ibdm_mutex); 4356 if (ioc_info->ioc_timeout_id) { 4357 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4358 "ioc_timeout_id = 0x%x", 4359 ioc_info->ioc_timeout_id); 4360 if (untimeout(ioc_info->ioc_timeout_id) == -1) { 4361 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4362 "untimeout ioc_timeout_id failed"); 4363 } 4364 ioc_info->ioc_timeout_id = 0; 4365 } 4366 if (ioc_info->ioc_dc_timeout_id) { 4367 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4368 "ioc_dc_timeout_id = 0x%x", 4369 ioc_info->ioc_dc_timeout_id); 4370 if (untimeout(ioc_info->ioc_dc_timeout_id) == -1) { 4371 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4372 "untimeout ioc_dc_timeout_id failed"); 4373 } 4374 ioc_info->ioc_dc_timeout_id = 0; 4375 } 4376 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 4377 if (ioc_info->ioc_serv[k].se_timeout_id) { 4378 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4379 "ioc_info->ioc_serv[k].se_timeout_id = %x", 4380 k, ioc_info->ioc_serv[k].se_timeout_id); 4381 if (untimeout(ioc_info->ioc_serv[k]. 4382 se_timeout_id) == -1) { 4383 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4384 "untimeout se_timeout_id %d " 4385 "failed", k); 4386 } 4387 ioc_info->ioc_serv[k].se_timeout_id = 0; 4388 } 4389 mutex_exit(&ibdm.ibdm_mutex); 4390 ibdm_ibnex_free_ioc_list(ioc_info); 4391 4392 mutex_enter(&ibdm.ibdm_mutex); 4393 while (ibdm.ibdm_busy & IBDM_BUSY) 4394 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4395 ibdm.ibdm_busy |= IBDM_BUSY; 4396 mutex_exit(&ibdm.ibdm_mutex); 4397 4398 ibdm_probe_ioc(iou, ioc_guid, 1); 4399 4400 /* 4401 * Skip if gl_reprobe_flag is set, this will be 4402 * a re-inserted / new GID, for which notifications 4403 * have already been send. 4404 */ 4405 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 4406 gid_info = gid_info->gl_next) { 4407 uint8_t ii, niocs; 4408 ibdm_ioc_info_t *ioc; 4409 4410 if (gid_info->gl_iou == NULL) 4411 continue; 4412 4413 if (gid_info->gl_reprobe_flag) { 4414 gid_info->gl_reprobe_flag = 0; 4415 continue; 4416 } 4417 4418 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 4419 for (ii = 0; ii < niocs; ii++) { 4420 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 4421 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 4422 mutex_enter(&ibdm.ibdm_mutex); 4423 ibdm_reprobe_update_port_srv(ioc, 4424 gid_info); 4425 mutex_exit(&ibdm.ibdm_mutex); 4426 } 4427 } 4428 } 4429 mutex_enter(&ibdm.ibdm_mutex); 4430 ibdm.ibdm_busy &= ~IBDM_BUSY; 4431 cv_broadcast(&ibdm.ibdm_busy_cv); 4432 mutex_exit(&ibdm.ibdm_mutex); 4433 4434 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4435 } 4436 return (ioc_info); 4437 } 4438 4439 4440 /* 4441 * ibdm_ibnex_get_ioc_info() 4442 * Returns pointer to ibdm_ioc_info_t if it finds 4443 * matching record for the ioc_guid, otherwise NULL 4444 * is returned 4445 */ 4446 ibdm_ioc_info_t * 4447 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 4448 { 4449 int ii; 4450 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 4451 ibdm_dp_gidinfo_t *gid_list; 4452 ib_dm_io_unitinfo_t *iou; 4453 4454 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 4455 4456 mutex_enter(&ibdm.ibdm_mutex); 4457 while (ibdm.ibdm_busy & IBDM_BUSY) 4458 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4459 ibdm.ibdm_busy |= IBDM_BUSY; 4460 4461 gid_list = ibdm.ibdm_dp_gidlist_head; 4462 while (gid_list) { 4463 mutex_enter(&gid_list->gl_mutex); 4464 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4465 mutex_exit(&gid_list->gl_mutex); 4466 gid_list = gid_list->gl_next; 4467 continue; 4468 } 4469 if (gid_list->gl_iou == NULL) { 4470 IBTF_DPRINTF_L2("ibdm", 4471 "\tget_ioc_info: No IOU info"); 4472 mutex_exit(&gid_list->gl_mutex); 4473 gid_list = gid_list->gl_next; 4474 continue; 4475 } 4476 iou = &gid_list->gl_iou->iou_info; 4477 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4478 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4479 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 4480 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 4481 ioc = ibdm_dup_ioc_info(tmp, gid_list); 4482 mutex_exit(&gid_list->gl_mutex); 4483 ibdm.ibdm_busy &= ~IBDM_BUSY; 4484 cv_broadcast(&ibdm.ibdm_busy_cv); 4485 mutex_exit(&ibdm.ibdm_mutex); 4486 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 4487 return (ioc); 4488 } 4489 } 4490 if (ii == iou->iou_num_ctrl_slots) 4491 ioc = NULL; 4492 4493 mutex_exit(&gid_list->gl_mutex); 4494 gid_list = gid_list->gl_next; 4495 } 4496 4497 ibdm.ibdm_busy &= ~IBDM_BUSY; 4498 cv_broadcast(&ibdm.ibdm_busy_cv); 4499 mutex_exit(&ibdm.ibdm_mutex); 4500 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 4501 return (ioc); 4502 } 4503 4504 4505 /* 4506 * ibdm_ibnex_get_ioc_count() 4507 * Returns number of ibdm_ioc_info_t it finds 4508 */ 4509 int 4510 ibdm_ibnex_get_ioc_count(void) 4511 { 4512 int count = 0, k; 4513 ibdm_ioc_info_t *ioc; 4514 ibdm_dp_gidinfo_t *gid_list; 4515 4516 mutex_enter(&ibdm.ibdm_mutex); 4517 ibdm_sweep_fabric(0); 4518 4519 while (ibdm.ibdm_busy & IBDM_BUSY) 4520 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4521 ibdm.ibdm_busy |= IBDM_BUSY; 4522 4523 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 4524 gid_list = gid_list->gl_next) { 4525 mutex_enter(&gid_list->gl_mutex); 4526 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 4527 (gid_list->gl_iou == NULL)) { 4528 mutex_exit(&gid_list->gl_mutex); 4529 continue; 4530 } 4531 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 4532 k++) { 4533 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 4534 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 4535 ++count; 4536 } 4537 mutex_exit(&gid_list->gl_mutex); 4538 } 4539 ibdm.ibdm_busy &= ~IBDM_BUSY; 4540 cv_broadcast(&ibdm.ibdm_busy_cv); 4541 mutex_exit(&ibdm.ibdm_mutex); 4542 4543 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 4544 return (count); 4545 } 4546 4547 4548 /* 4549 * ibdm_ibnex_get_ioc_list() 4550 * Returns information about all the IOCs present on the fabric. 4551 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 4552 * Does not sweep fabric if DONOT_PROBE is set 4553 */ 4554 ibdm_ioc_info_t * 4555 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 4556 { 4557 int ii; 4558 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 4559 ibdm_dp_gidinfo_t *gid_list; 4560 ib_dm_io_unitinfo_t *iou; 4561 4562 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 4563 4564 mutex_enter(&ibdm.ibdm_mutex); 4565 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 4566 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 4567 4568 while (ibdm.ibdm_busy & IBDM_BUSY) 4569 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4570 ibdm.ibdm_busy |= IBDM_BUSY; 4571 4572 gid_list = ibdm.ibdm_dp_gidlist_head; 4573 while (gid_list) { 4574 mutex_enter(&gid_list->gl_mutex); 4575 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4576 mutex_exit(&gid_list->gl_mutex); 4577 gid_list = gid_list->gl_next; 4578 continue; 4579 } 4580 if (gid_list->gl_iou == NULL) { 4581 IBTF_DPRINTF_L2("ibdm", 4582 "\tget_ioc_list: No IOU info"); 4583 mutex_exit(&gid_list->gl_mutex); 4584 gid_list = gid_list->gl_next; 4585 continue; 4586 } 4587 iou = &gid_list->gl_iou->iou_info; 4588 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4589 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4590 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 4591 tmp = ibdm_dup_ioc_info(ioc, gid_list); 4592 tmp->ioc_next = ioc_list; 4593 ioc_list = tmp; 4594 } 4595 } 4596 mutex_exit(&gid_list->gl_mutex); 4597 gid_list = gid_list->gl_next; 4598 } 4599 ibdm.ibdm_busy &= ~IBDM_BUSY; 4600 cv_broadcast(&ibdm.ibdm_busy_cv); 4601 mutex_exit(&ibdm.ibdm_mutex); 4602 4603 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 4604 return (ioc_list); 4605 } 4606 4607 /* 4608 * ibdm_dup_ioc_info() 4609 * Duplicate the IOC information and return the IOC 4610 * information. 4611 */ 4612 static ibdm_ioc_info_t * 4613 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 4614 { 4615 ibdm_ioc_info_t *out_ioc; 4616 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 4617 4618 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 4619 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 4620 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 4621 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 4622 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 4623 4624 return (out_ioc); 4625 } 4626 4627 4628 /* 4629 * ibdm_free_ioc_list() 4630 * Deallocate memory for IOC list structure 4631 */ 4632 void 4633 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 4634 { 4635 ibdm_ioc_info_t *temp; 4636 4637 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 4638 while (ioc) { 4639 temp = ioc; 4640 ioc = ioc->ioc_next; 4641 kmem_free(temp->ioc_gid_list, 4642 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 4643 if (temp->ioc_hca_list) 4644 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 4645 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 4646 } 4647 } 4648 4649 4650 /* 4651 * ibdm_ibnex_update_pkey_tbls 4652 * Updates the DM P_Key database. 4653 * NOTE: Two cases are handled here: P_Key being added or removed. 4654 * 4655 * Arguments : NONE 4656 * Return Values : NONE 4657 */ 4658 void 4659 ibdm_ibnex_update_pkey_tbls(void) 4660 { 4661 int h, pp, pidx; 4662 uint_t nports; 4663 uint_t size; 4664 ib_pkey_t new_pkey; 4665 ib_pkey_t *orig_pkey; 4666 ibdm_hca_list_t *hca_list; 4667 ibdm_port_attr_t *port; 4668 ibt_hca_portinfo_t *pinfop; 4669 4670 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 4671 4672 mutex_enter(&ibdm.ibdm_hl_mutex); 4673 hca_list = ibdm.ibdm_hca_list_head; 4674 4675 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 4676 4677 /* This updates P_Key Tables for all ports of this HCA */ 4678 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 4679 &nports, &size); 4680 4681 /* number of ports shouldn't have changed */ 4682 ASSERT(nports == hca_list->hl_nports); 4683 4684 for (pp = 0; pp < hca_list->hl_nports; pp++) { 4685 port = &hca_list->hl_port_attr[pp]; 4686 4687 /* 4688 * First figure out the P_Keys from IBTL. 4689 * Three things could have happened: 4690 * New P_Keys added 4691 * Existing P_Keys removed 4692 * Both of the above two 4693 * 4694 * Loop through the P_Key Indices and check if a 4695 * give P_Key_Ix matches that of the one seen by 4696 * IBDM. If they match no action is needed. 4697 * 4698 * If they don't match: 4699 * 1. if orig_pkey is invalid and new_pkey is valid 4700 * ---> add new_pkey to DM database 4701 * 2. if orig_pkey is valid and new_pkey is invalid 4702 * ---> remove orig_pkey from DM database 4703 * 3. if orig_pkey and new_pkey are both valid: 4704 * ---> remov orig_pkey from DM database 4705 * ---> add new_pkey to DM database 4706 * 4. if orig_pkey and new_pkey are both invalid: 4707 * ---> do nothing. Updated DM database. 4708 */ 4709 4710 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 4711 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 4712 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 4713 4714 /* keys match - do nothing */ 4715 if (*orig_pkey == new_pkey) 4716 continue; 4717 4718 if (IBDM_INVALID_PKEY(*orig_pkey) && 4719 !IBDM_INVALID_PKEY(new_pkey)) { 4720 /* P_Key was added */ 4721 IBTF_DPRINTF_L5("ibdm", 4722 "\tibnex_update_pkey_tbls: new " 4723 "P_Key added = 0x%x", new_pkey); 4724 *orig_pkey = new_pkey; 4725 ibdm_port_attr_ibmf_init(port, 4726 new_pkey, pp); 4727 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 4728 IBDM_INVALID_PKEY(new_pkey)) { 4729 /* P_Key was removed */ 4730 IBTF_DPRINTF_L5("ibdm", 4731 "\tibnex_update_pkey_tbls: P_Key " 4732 "removed = 0x%x", *orig_pkey); 4733 *orig_pkey = new_pkey; 4734 (void) ibdm_port_attr_ibmf_fini(port, 4735 pidx); 4736 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 4737 !IBDM_INVALID_PKEY(new_pkey)) { 4738 /* P_Key were replaced */ 4739 IBTF_DPRINTF_L5("ibdm", 4740 "\tibnex_update_pkey_tbls: P_Key " 4741 "replaced 0x%x with 0x%x", 4742 *orig_pkey, new_pkey); 4743 (void) ibdm_port_attr_ibmf_fini(port, 4744 pidx); 4745 *orig_pkey = new_pkey; 4746 ibdm_port_attr_ibmf_init(port, 4747 new_pkey, pp); 4748 } else { 4749 /* 4750 * P_Keys are invalid 4751 * set anyway to reflect if 4752 * INVALID_FULL was changed to 4753 * INVALID_LIMITED or vice-versa. 4754 */ 4755 *orig_pkey = new_pkey; 4756 } /* end of else */ 4757 4758 } /* loop of p_key index */ 4759 4760 } /* loop of #ports of HCA */ 4761 4762 ibt_free_portinfo(pinfop, size); 4763 hca_list = hca_list->hl_next; 4764 4765 } /* loop for all HCAs in the system */ 4766 4767 mutex_exit(&ibdm.ibdm_hl_mutex); 4768 } 4769 4770 4771 /* 4772 * ibdm_send_ioc_profile() 4773 * Send IOC Controller Profile request. When the request is completed 4774 * IBMF calls ibdm_process_incoming_mad routine to inform about 4775 * the completion. 4776 */ 4777 static int 4778 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 4779 { 4780 ibmf_msg_t *msg; 4781 ib_mad_hdr_t *hdr; 4782 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 4783 ibdm_timeout_cb_args_t *cb_args; 4784 4785 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 4786 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 4787 4788 /* 4789 * Send command to get IOC profile. 4790 * Allocate a IBMF packet and initialize the packet. 4791 */ 4792 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 4793 &msg) != IBMF_SUCCESS) { 4794 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 4795 return (IBDM_FAILURE); 4796 } 4797 4798 ibdm_alloc_send_buffers(msg); 4799 4800 mutex_enter(&gid_info->gl_mutex); 4801 ibdm_bump_transactionID(gid_info); 4802 mutex_exit(&gid_info->gl_mutex); 4803 4804 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 4805 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 4806 if (gid_info->gl_redirected == B_TRUE) { 4807 if (gid_info->gl_redirect_dlid != 0) { 4808 msg->im_local_addr.ia_remote_lid = 4809 gid_info->gl_redirect_dlid; 4810 } 4811 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 4812 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 4813 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 4814 } else { 4815 msg->im_local_addr.ia_remote_qno = 1; 4816 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 4817 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 4818 } 4819 4820 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 4821 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 4822 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 4823 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 4824 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 4825 hdr->Status = 0; 4826 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 4827 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 4828 hdr->AttributeModifier = h2b32(ioc_no + 1); 4829 4830 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 4831 cb_args = &ioc_info->ioc_cb_args; 4832 cb_args->cb_gid_info = gid_info; 4833 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 4834 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 4835 cb_args->cb_ioc_num = ioc_no; 4836 4837 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 4838 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 4839 4840 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 4841 "timeout %x", ioc_info->ioc_timeout_id); 4842 4843 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 4844 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 4845 IBTF_DPRINTF_L2("ibdm", 4846 "\tsend_ioc_profile: msg transport failed"); 4847 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 4848 } 4849 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 4850 return (IBDM_SUCCESS); 4851 } 4852 4853 4854 /* 4855 * ibdm_port_reachable 4856 * Sends a SA query to get the NODE record for port GUID 4857 * Returns IBDM_SUCCESS if the port GID is reachable 4858 */ 4859 static int 4860 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 4861 ib_guid_t *node_guid) 4862 { 4863 sa_node_record_t req, *resp = NULL; 4864 ibmf_saa_access_args_t args; 4865 int ret; 4866 size_t length; 4867 4868 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 4869 guid); 4870 4871 bzero(&req, sizeof (sa_node_record_t)); 4872 req.NodeInfo.PortGUID = guid; 4873 4874 args.sq_attr_id = SA_NODERECORD_ATTRID; 4875 args.sq_access_type = IBMF_SAA_RETRIEVE; 4876 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 4877 args.sq_template = &req; 4878 args.sq_callback = NULL; 4879 args.sq_callback_arg = NULL; 4880 4881 ret = ibmf_sa_access(sa_hdl, &args, 0, &length, (void **) &resp); 4882 if (ret != IBMF_SUCCESS) { 4883 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 4884 " SA Retrieve Failed: %d", ret); 4885 return (IBDM_FAILURE); 4886 } 4887 4888 if ((resp == NULL) || (length == 0)) { 4889 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 4890 return (IBDM_FAILURE); 4891 } 4892 4893 if (node_guid != NULL) 4894 *node_guid = resp->NodeInfo.NodeGUID; 4895 4896 kmem_free(resp, length); 4897 4898 return (IBDM_SUCCESS); 4899 } 4900 4901 /* 4902 * Update the gidlist for all affected IOCs when GID becomes 4903 * available/unavailable. 4904 * 4905 * Parameters : 4906 * gidinfo - Incoming / Outgoing GID. 4907 * add_flag - 1 for GID added, 0 for GID removed. 4908 * - (-1) : IOC gid list updated, ioc_list required. 4909 * 4910 * This function gets the GID for the node GUID corresponding to the 4911 * port GID. Gets the IOU info 4912 */ 4913 static ibdm_ioc_info_t * 4914 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 4915 { 4916 ibdm_dp_gidinfo_t *node_gid = NULL; 4917 uint8_t niocs, ii; 4918 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 4919 4920 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 4921 4922 switch (avail_flag) { 4923 case 1 : 4924 node_gid = ibdm_check_dest_nodeguid(gid_info); 4925 break; 4926 case 0 : 4927 node_gid = ibdm_handle_gid_rm(gid_info); 4928 break; 4929 case -1 : 4930 node_gid = gid_info; 4931 break; 4932 default : 4933 break; 4934 } 4935 4936 if (node_gid == NULL) { 4937 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 4938 "No node GID found, port gid 0x%p, avail_flag %d", 4939 gid_info, avail_flag); 4940 return (NULL); 4941 } 4942 4943 mutex_enter(&node_gid->gl_mutex); 4944 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 4945 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 4946 node_gid->gl_iou == NULL) { 4947 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 4948 "gl_state %x, gl_iou %p", node_gid->gl_state, 4949 node_gid->gl_iou); 4950 mutex_exit(&node_gid->gl_mutex); 4951 return (NULL); 4952 } 4953 4954 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4955 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 4956 niocs); 4957 for (ii = 0; ii < niocs; ii++) { 4958 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 4959 /* 4960 * Skip IOCs for which probe is not complete or 4961 * reprobe is progress 4962 */ 4963 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 4964 tmp = ibdm_dup_ioc_info(ioc, node_gid); 4965 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 4966 tmp->ioc_next = ioc_list; 4967 ioc_list = tmp; 4968 } 4969 } 4970 mutex_exit(&node_gid->gl_mutex); 4971 4972 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 4973 ioc_list); 4974 return (ioc_list); 4975 } 4976 4977 /* 4978 * ibdm_saa_event_cb : 4979 * Event handling which does *not* require ibdm_hl_mutex to be 4980 * held are executed in the same thread. This is to prevent 4981 * deadlocks with HCA port down notifications which hold the 4982 * ibdm_hl_mutex. 4983 * 4984 * GID_AVAILABLE event is handled here. A taskq is spawned to 4985 * handle GID_UNAVAILABLE. 4986 * 4987 * A new mutex ibdm_ibnex_mutex has been introduced to protect 4988 * ibnex_callback. This has been done to prevent any possible 4989 * deadlock (described above) while handling GID_AVAILABLE. 4990 * 4991 * IBMF calls the event callback for a HCA port. The SA handle 4992 * for this port would be valid, till the callback returns. 4993 * IBDM calling IBDM using the above SA handle should be valid. 4994 * 4995 * IBDM will additionally check (SA handle != NULL), before 4996 * calling IBMF. 4997 */ 4998 /*ARGSUSED*/ 4999 static void 5000 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 5001 ibmf_saa_subnet_event_t ibmf_saa_event, 5002 ibmf_saa_event_details_t *event_details, void *callback_arg) 5003 { 5004 ibdm_saa_event_arg_t *event_arg; 5005 ib_gid_t sgid, dgid; 5006 ibdm_port_attr_t *hca_port; 5007 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5008 ib_guid_t nodeguid; 5009 5010 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5011 5012 hca_port = (ibdm_port_attr_t *)callback_arg; 5013 5014 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5015 ibmf_saa_handle, ibmf_saa_event, event_details, 5016 callback_arg); 5017 #ifdef DEBUG 5018 if (ibdm_ignore_saa_event) 5019 return; 5020 #endif 5021 5022 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5023 /* 5024 * Ensure no other probe / sweep fabric is in 5025 * progress. 5026 */ 5027 mutex_enter(&ibdm.ibdm_mutex); 5028 while (ibdm.ibdm_busy & IBDM_BUSY) 5029 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5030 ibdm.ibdm_busy |= IBDM_BUSY; 5031 mutex_exit(&ibdm.ibdm_mutex); 5032 5033 /* 5034 * If we already know about this GID, return. 5035 * GID_AVAILABLE may be reported for multiple HCA 5036 * ports. 5037 */ 5038 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5039 event_details->ie_gid.gid_prefix)) != NULL) { 5040 mutex_enter(&ibdm.ibdm_mutex); 5041 ibdm.ibdm_busy &= ~IBDM_BUSY; 5042 cv_broadcast(&ibdm.ibdm_busy_cv); 5043 mutex_exit(&ibdm.ibdm_mutex); 5044 return; 5045 } 5046 5047 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5048 "Insertion notified", 5049 event_details->ie_gid.gid_prefix, 5050 event_details->ie_gid.gid_guid); 5051 5052 /* This is a new gid, insert it to GID list */ 5053 sgid.gid_prefix = hca_port->pa_sn_prefix; 5054 sgid.gid_guid = hca_port->pa_port_guid; 5055 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5056 dgid.gid_guid = event_details->ie_gid.gid_guid; 5057 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5058 if (gid_info == NULL) { 5059 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5060 "create_gid_info returned NULL"); 5061 mutex_enter(&ibdm.ibdm_mutex); 5062 ibdm.ibdm_busy &= ~IBDM_BUSY; 5063 cv_broadcast(&ibdm.ibdm_busy_cv); 5064 mutex_exit(&ibdm.ibdm_mutex); 5065 return; 5066 } 5067 mutex_enter(&gid_info->gl_mutex); 5068 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5069 mutex_exit(&gid_info->gl_mutex); 5070 5071 /* Get the node GUID */ 5072 if (ibdm_port_reachable(ibmf_saa_handle, dgid.gid_guid, 5073 &nodeguid) != IBDM_SUCCESS) { 5074 /* 5075 * Set the state to PROBE_NOT_DONE for the 5076 * next sweep to probe it 5077 */ 5078 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5079 "Skipping GID : port GUID not found"); 5080 mutex_enter(&gid_info->gl_mutex); 5081 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5082 mutex_exit(&gid_info->gl_mutex); 5083 mutex_enter(&ibdm.ibdm_mutex); 5084 ibdm.ibdm_busy &= ~IBDM_BUSY; 5085 cv_broadcast(&ibdm.ibdm_busy_cv); 5086 mutex_exit(&ibdm.ibdm_mutex); 5087 return; 5088 } 5089 5090 gid_info->gl_nodeguid = nodeguid; 5091 gid_info->gl_portguid = dgid.gid_guid; 5092 5093 /* 5094 * Get the gid info with the same node GUID. 5095 */ 5096 mutex_enter(&ibdm.ibdm_mutex); 5097 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5098 while (node_gid_info) { 5099 if (node_gid_info->gl_nodeguid == 5100 gid_info->gl_nodeguid && 5101 node_gid_info->gl_iou != NULL) { 5102 break; 5103 } 5104 node_gid_info = node_gid_info->gl_next; 5105 } 5106 mutex_exit(&ibdm.ibdm_mutex); 5107 5108 /* 5109 * Handling a new GID requires filling of gl_hca_list. 5110 * This require ibdm hca_list to be parsed and hence 5111 * holding the ibdm_hl_mutex. Spawning a new thread to 5112 * handle this. 5113 */ 5114 if (node_gid_info == NULL) { 5115 if (taskq_dispatch(system_taskq, 5116 ibdm_saa_handle_new_gid, (void *)gid_info, 5117 TQ_NOSLEEP) == NULL) { 5118 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5119 "new_gid taskq_dispatch failed"); 5120 return; 5121 } 5122 } 5123 5124 mutex_enter(&ibdm.ibdm_mutex); 5125 ibdm.ibdm_busy &= ~IBDM_BUSY; 5126 cv_broadcast(&ibdm.ibdm_busy_cv); 5127 mutex_exit(&ibdm.ibdm_mutex); 5128 return; 5129 } 5130 5131 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5132 return; 5133 5134 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5135 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5136 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5137 event_arg->ibmf_saa_event = ibmf_saa_event; 5138 bcopy(event_details, &event_arg->event_details, 5139 sizeof (ibmf_saa_event_details_t)); 5140 event_arg->callback_arg = callback_arg; 5141 5142 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5143 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5144 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5145 "taskq_dispatch failed"); 5146 ibdm_free_saa_event_arg(event_arg); 5147 return; 5148 } 5149 } 5150 5151 /* 5152 * Handle a new GID discovered by GID_AVAILABLE saa event. 5153 */ 5154 void 5155 ibdm_saa_handle_new_gid(void *arg) 5156 { 5157 ibdm_dp_gidinfo_t *gid_info; 5158 ibdm_hca_list_t *hca_list = NULL; 5159 ibdm_port_attr_t *port = NULL; 5160 ibdm_ioc_info_t *ioc_list = NULL; 5161 5162 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5163 5164 gid_info = (ibdm_dp_gidinfo_t *)arg; 5165 5166 /* 5167 * Ensure that no other sweep / probe has completed 5168 * probing this gid. 5169 */ 5170 mutex_enter(&gid_info->gl_mutex); 5171 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5172 mutex_exit(&gid_info->gl_mutex); 5173 return; 5174 } 5175 mutex_exit(&gid_info->gl_mutex); 5176 5177 /* 5178 * Parse HCAs to fill gl_hca_list 5179 */ 5180 mutex_enter(&ibdm.ibdm_hl_mutex); 5181 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5182 ibdm_get_next_port(&hca_list, &port, 1)) { 5183 if (ibdm_port_reachable(port->pa_sa_hdl, 5184 gid_info->gl_portguid, NULL) == 5185 IBDM_SUCCESS) { 5186 ibdm_addto_glhcalist(gid_info, hca_list); 5187 } 5188 } 5189 mutex_exit(&ibdm.ibdm_hl_mutex); 5190 5191 /* 5192 * Ensure no other probe / sweep fabric is in 5193 * progress. 5194 */ 5195 mutex_enter(&ibdm.ibdm_mutex); 5196 while (ibdm.ibdm_busy & IBDM_BUSY) 5197 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5198 ibdm.ibdm_busy |= IBDM_BUSY; 5199 mutex_exit(&ibdm.ibdm_mutex); 5200 5201 /* 5202 * New IOU probe it, to check if new IOCs 5203 */ 5204 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5205 "new GID : probing"); 5206 mutex_enter(&ibdm.ibdm_mutex); 5207 ibdm.ibdm_ngid_probes_in_progress++; 5208 mutex_exit(&ibdm.ibdm_mutex); 5209 mutex_enter(&gid_info->gl_mutex); 5210 gid_info->gl_reprobe_flag = 0; 5211 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5212 mutex_exit(&gid_info->gl_mutex); 5213 ibdm_probe_gid_thread((void *)gid_info); 5214 5215 mutex_enter(&ibdm.ibdm_mutex); 5216 ibdm_wait_probe_completion(); 5217 mutex_exit(&ibdm.ibdm_mutex); 5218 5219 if (gid_info->gl_iou == NULL) { 5220 mutex_enter(&ibdm.ibdm_mutex); 5221 ibdm.ibdm_busy &= ~IBDM_BUSY; 5222 cv_broadcast(&ibdm.ibdm_busy_cv); 5223 mutex_exit(&ibdm.ibdm_mutex); 5224 return; 5225 } 5226 5227 /* 5228 * Update GID list in all IOCs affected by this 5229 */ 5230 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5231 5232 /* 5233 * Pass on the IOCs with updated GIDs to IBnexus 5234 */ 5235 if (ioc_list) { 5236 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5237 if (ibdm.ibdm_ibnex_callback != NULL) { 5238 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5239 IBDM_EVENT_IOC_PROP_UPDATE); 5240 } 5241 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5242 } 5243 5244 mutex_enter(&ibdm.ibdm_mutex); 5245 ibdm.ibdm_busy &= ~IBDM_BUSY; 5246 cv_broadcast(&ibdm.ibdm_busy_cv); 5247 mutex_exit(&ibdm.ibdm_mutex); 5248 } 5249 5250 /* 5251 * ibdm_saa_event_taskq : 5252 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5253 * held. The GID_UNAVAILABLE handling is done in a taskq to 5254 * prevent deadlocks with HCA port down notifications which hold 5255 * ibdm_hl_mutex. 5256 */ 5257 void 5258 ibdm_saa_event_taskq(void *arg) 5259 { 5260 ibdm_saa_event_arg_t *event_arg; 5261 ibmf_saa_handle_t ibmf_saa_handle; 5262 ibmf_saa_subnet_event_t ibmf_saa_event; 5263 ibmf_saa_event_details_t *event_details; 5264 void *callback_arg; 5265 5266 ibdm_dp_gidinfo_t *gid_info; 5267 ibdm_port_attr_t *hca_port, *port = NULL; 5268 ibdm_hca_list_t *hca_list = NULL; 5269 int sa_handle_valid = 0; 5270 ibdm_ioc_info_t *ioc_list = NULL; 5271 5272 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5273 5274 event_arg = (ibdm_saa_event_arg_t *)arg; 5275 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5276 ibmf_saa_event = event_arg->ibmf_saa_event; 5277 event_details = &event_arg->event_details; 5278 callback_arg = event_arg->callback_arg; 5279 5280 ASSERT(callback_arg != NULL); 5281 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 5282 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 5283 ibmf_saa_handle, ibmf_saa_event, event_details, 5284 callback_arg); 5285 5286 hca_port = (ibdm_port_attr_t *)callback_arg; 5287 5288 /* Check if the port_attr is still valid */ 5289 mutex_enter(&ibdm.ibdm_hl_mutex); 5290 for (ibdm_get_next_port(&hca_list, &port, 0); port; 5291 ibdm_get_next_port(&hca_list, &port, 0)) { 5292 if (port == hca_port && port->pa_port_guid == 5293 hca_port->pa_port_guid) { 5294 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 5295 sa_handle_valid = 1; 5296 break; 5297 } 5298 } 5299 mutex_exit(&ibdm.ibdm_hl_mutex); 5300 if (sa_handle_valid == 0) { 5301 ibdm_free_saa_event_arg(event_arg); 5302 return; 5303 } 5304 5305 if (hca_port && (hca_port->pa_sa_hdl == NULL || 5306 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 5307 ibdm_free_saa_event_arg(event_arg); 5308 return; 5309 } 5310 hca_list = NULL; 5311 port = NULL; 5312 5313 /* 5314 * Check if the GID is visible to other HCA ports. 5315 * Return if so. 5316 */ 5317 mutex_enter(&ibdm.ibdm_hl_mutex); 5318 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5319 ibdm_get_next_port(&hca_list, &port, 1)) { 5320 if (ibdm_port_reachable(port->pa_sa_hdl, 5321 event_details->ie_gid.gid_guid, NULL) == 5322 IBDM_SUCCESS) { 5323 mutex_exit(&ibdm.ibdm_hl_mutex); 5324 ibdm_free_saa_event_arg(event_arg); 5325 return; 5326 } 5327 } 5328 mutex_exit(&ibdm.ibdm_hl_mutex); 5329 5330 /* 5331 * Ensure no other probe / sweep fabric is in 5332 * progress. 5333 */ 5334 mutex_enter(&ibdm.ibdm_mutex); 5335 while (ibdm.ibdm_busy & IBDM_BUSY) 5336 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5337 ibdm.ibdm_busy |= IBDM_BUSY; 5338 mutex_exit(&ibdm.ibdm_mutex); 5339 5340 /* 5341 * If this GID is no longer in GID list, return 5342 * GID_UNAVAILABLE may be reported for multiple HCA 5343 * ports. 5344 */ 5345 mutex_enter(&ibdm.ibdm_mutex); 5346 gid_info = ibdm.ibdm_dp_gidlist_head; 5347 while (gid_info) { 5348 if (gid_info->gl_portguid == 5349 event_details->ie_gid.gid_guid) { 5350 break; 5351 } 5352 gid_info = gid_info->gl_next; 5353 } 5354 mutex_exit(&ibdm.ibdm_mutex); 5355 if (gid_info == NULL) { 5356 mutex_enter(&ibdm.ibdm_mutex); 5357 ibdm.ibdm_busy &= ~IBDM_BUSY; 5358 cv_broadcast(&ibdm.ibdm_busy_cv); 5359 mutex_exit(&ibdm.ibdm_mutex); 5360 ibdm_free_saa_event_arg(event_arg); 5361 return; 5362 } 5363 5364 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5365 "Unavailable notification", 5366 event_details->ie_gid.gid_prefix, 5367 event_details->ie_gid.gid_guid); 5368 5369 /* 5370 * Update GID list in all IOCs affected by this 5371 */ 5372 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 5373 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 5374 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5375 5376 /* 5377 * Remove GID from the global GID list 5378 * Handle the case where all port GIDs for an 5379 * IOU have been hot-removed. Check both gid_info 5380 * & ioc_info for checking ngids. 5381 */ 5382 mutex_enter(&ibdm.ibdm_mutex); 5383 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5384 mutex_enter(&gid_info->gl_mutex); 5385 (void) ibdm_free_iou_info(gid_info); 5386 mutex_exit(&gid_info->gl_mutex); 5387 } 5388 if (gid_info->gl_prev != NULL) 5389 gid_info->gl_prev->gl_next = gid_info->gl_next; 5390 if (gid_info->gl_next != NULL) 5391 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5392 5393 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5394 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5395 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5396 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5397 ibdm.ibdm_ngids--; 5398 5399 ibdm.ibdm_busy &= ~IBDM_BUSY; 5400 cv_broadcast(&ibdm.ibdm_busy_cv); 5401 mutex_exit(&ibdm.ibdm_mutex); 5402 5403 mutex_destroy(&gid_info->gl_mutex); 5404 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5405 5406 /* 5407 * Pass on the IOCs with updated GIDs to IBnexus 5408 */ 5409 if (ioc_list) { 5410 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 5411 "IOC_PROP_UPDATE for %p\n", ioc_list); 5412 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5413 if (ibdm.ibdm_ibnex_callback != NULL) { 5414 (*ibdm.ibdm_ibnex_callback)((void *) 5415 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5416 } 5417 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5418 } 5419 5420 ibdm_free_saa_event_arg(event_arg); 5421 } 5422 5423 5424 static int 5425 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 5426 { 5427 ibdm_gid_t *scan_new, *scan_prev; 5428 int cmp_failed = 0; 5429 5430 ASSERT(new != NULL); 5431 ASSERT(prev != NULL); 5432 5433 /* 5434 * Search for each new gid anywhere in the prev GID list. 5435 * Note that the gid list could have been re-ordered. 5436 */ 5437 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 5438 for (scan_prev = prev, cmp_failed = 1; scan_prev; 5439 scan_prev = scan_prev->gid_next) { 5440 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 5441 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 5442 cmp_failed = 0; 5443 break; 5444 } 5445 } 5446 5447 if (cmp_failed) 5448 return (1); 5449 } 5450 return (0); 5451 } 5452 5453 /* 5454 * This is always called in a single thread 5455 * This function updates the gid_list and serv_list of IOC 5456 * The current gid_list is in ioc_info_t(contains only port 5457 * guids for which probe is done) & gidinfo_t(other port gids) 5458 * The gids in both locations are used for comparision. 5459 */ 5460 static void 5461 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 5462 { 5463 ibdm_gid_t *cur_gid_list; 5464 uint_t cur_nportgids; 5465 5466 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 5467 5468 ioc->ioc_info_updated.ib_prop_updated = 0; 5469 5470 5471 /* Current GID list in gid_info only */ 5472 cur_gid_list = gidinfo->gl_gid; 5473 cur_nportgids = gidinfo->gl_ngids; 5474 5475 /* 5476 * Service entry names and IDs are not compared currently. 5477 * This may require change. 5478 */ 5479 if (ioc->ioc_prev_serv_cnt != ioc->ioc_profile.ioc_service_entries) 5480 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 5481 5482 if (ioc->ioc_prev_nportgids != cur_nportgids || 5483 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 5484 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 5485 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 5486 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 5487 } 5488 5489 /* Zero out previous entries */ 5490 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 5491 if (ioc->ioc_prev_serv) 5492 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 5493 sizeof (ibdm_srvents_info_t)); 5494 ioc->ioc_prev_serv_cnt = 0; 5495 ioc->ioc_prev_nportgids = 0; 5496 ioc->ioc_prev_serv = NULL; 5497 ioc->ioc_prev_gid_list = NULL; 5498 } 5499 5500 /* 5501 * Handle GID removal. This returns gid_info of an GID for the same 5502 * node GUID, if found. For an GID with IOU information, the same 5503 * gid_info is returned if no gid_info with same node_guid is found. 5504 */ 5505 static ibdm_dp_gidinfo_t * 5506 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 5507 { 5508 ibdm_dp_gidinfo_t *gid_list; 5509 5510 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 5511 5512 if (rm_gid->gl_iou == NULL) { 5513 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 5514 /* 5515 * Search for a GID with same node_guid and 5516 * gl_iou != NULL 5517 */ 5518 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5519 gid_list = gid_list->gl_next) { 5520 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 5521 == rm_gid->gl_nodeguid)) 5522 break; 5523 } 5524 5525 if (gid_list) 5526 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 5527 5528 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 5529 return (gid_list); 5530 } else { 5531 /* 5532 * Search for a GID with same node_guid and 5533 * gl_iou == NULL 5534 */ 5535 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 5536 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5537 gid_list = gid_list->gl_next) { 5538 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 5539 == rm_gid->gl_nodeguid)) 5540 break; 5541 } 5542 5543 if (gid_list) { 5544 /* 5545 * Copy the following fields from rm_gid : 5546 * 1. gl_state 5547 * 2. gl_iou 5548 * 3. gl_gid & gl_ngids 5549 * 5550 * Note : Function is synchronized by 5551 * ibdm_busy flag. 5552 * 5553 * Note : Redirect info is initialized if 5554 * any MADs for the GID fail 5555 */ 5556 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 5557 "copying info to GID with gl_iou != NULl"); 5558 gid_list->gl_state = rm_gid->gl_state; 5559 gid_list->gl_iou = rm_gid->gl_iou; 5560 gid_list->gl_gid = rm_gid->gl_gid; 5561 gid_list->gl_ngids = rm_gid->gl_ngids; 5562 5563 /* Remove the GID from gl_gid list */ 5564 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 5565 } else { 5566 /* 5567 * Handle a case where all GIDs to the IOU have 5568 * been removed. 5569 */ 5570 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 5571 "to IOU"); 5572 5573 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 5574 return (rm_gid); 5575 } 5576 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 5577 return (gid_list); 5578 } 5579 } 5580 5581 static void 5582 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 5583 ibdm_dp_gidinfo_t *rm_gid) 5584 { 5585 ibdm_gid_t *tmp, *prev; 5586 5587 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 5588 gid_info, rm_gid); 5589 5590 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 5591 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 5592 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 5593 if (prev == NULL) 5594 gid_info->gl_gid = tmp->gid_next; 5595 else 5596 prev->gid_next = tmp->gid_next; 5597 5598 kmem_free(tmp, sizeof (ibdm_gid_t)); 5599 gid_info->gl_ngids--; 5600 break; 5601 } else { 5602 prev = tmp; 5603 tmp = tmp->gid_next; 5604 } 5605 } 5606 } 5607 5608 static void 5609 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 5610 { 5611 ibdm_gid_t *head = NULL, *new, *tail; 5612 5613 /* First copy the destination */ 5614 for (; dest; dest = dest->gid_next) { 5615 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 5616 new->gid_dgid_hi = dest->gid_dgid_hi; 5617 new->gid_dgid_lo = dest->gid_dgid_lo; 5618 new->gid_next = head; 5619 head = new; 5620 } 5621 5622 /* Insert this to the source */ 5623 if (*src_ptr == NULL) 5624 *src_ptr = head; 5625 else { 5626 for (tail = *src_ptr; tail->gid_next != NULL; 5627 tail = tail->gid_next) 5628 ; 5629 5630 tail->gid_next = head; 5631 } 5632 } 5633 5634 static void 5635 ibdm_free_gid_list(ibdm_gid_t *head) 5636 { 5637 ibdm_gid_t *delete; 5638 5639 for (delete = head; delete; ) { 5640 head = delete->gid_next; 5641 kmem_free(delete, sizeof (ibdm_gid_t)); 5642 delete = head; 5643 } 5644 } 5645 5646 /* 5647 * This function rescans the DM capable GIDs (gl_state is 5648 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 5649 * basically checks if the DM capable GID is reachable. If 5650 * not this is handled the same way as GID_UNAVAILABLE, 5651 * except that notifications are not send to IBnexus. 5652 * 5653 * This function also initializes the ioc_prev_list for 5654 * a particular IOC (when called from probe_ioc, with 5655 * ioc_guidp != NULL) or all IOCs for the gid (called from 5656 * sweep_fabric, ioc_guidp == NULL). 5657 */ 5658 static void 5659 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 5660 { 5661 ibdm_dp_gidinfo_t *gid_info, *tmp; 5662 int ii, niocs, found; 5663 ibdm_hca_list_t *hca_list = NULL; 5664 ibdm_port_attr_t *port = NULL; 5665 ibdm_ioc_info_t *ioc_list; 5666 5667 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 5668 found = 0; 5669 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 5670 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 5671 gid_info = gid_info->gl_next; 5672 continue; 5673 } 5674 5675 /* 5676 * Check if the GID is visible to any HCA ports. 5677 * Return if so. 5678 */ 5679 mutex_enter(&ibdm.ibdm_hl_mutex); 5680 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5681 ibdm_get_next_port(&hca_list, &port, 1)) { 5682 if (ibdm_port_reachable(port->pa_sa_hdl, 5683 gid_info->gl_dgid_lo, NULL) == IBDM_SUCCESS) { 5684 found = 1; 5685 break; 5686 } 5687 } 5688 mutex_exit(&ibdm.ibdm_hl_mutex); 5689 5690 if (found) { 5691 if (gid_info->gl_iou == NULL) { 5692 gid_info = gid_info->gl_next; 5693 continue; 5694 } 5695 5696 /* Intialize the ioc_prev_gid_list */ 5697 niocs = 5698 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 5699 for (ii = 0; ii < niocs; ii++) { 5700 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 5701 5702 if (ioc_guidp == NULL || (*ioc_guidp == 5703 ioc_list->ioc_profile.ioc_guid)) { 5704 /* Add info of GIDs in gid_info also */ 5705 ibdm_addto_gidlist( 5706 &ioc_list->ioc_prev_gid_list, 5707 gid_info->gl_gid); 5708 ioc_list->ioc_prev_nportgids = 5709 gid_info->gl_ngids; 5710 } 5711 } 5712 gid_info = gid_info->gl_next; 5713 continue; 5714 } 5715 5716 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 5717 "deleted port GUID %llx", 5718 gid_info->gl_dgid_lo); 5719 5720 /* 5721 * Update GID list in all IOCs affected by this 5722 */ 5723 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5724 5725 /* 5726 * Remove GID from the global GID list 5727 * Handle the case where all port GIDs for an 5728 * IOU have been hot-removed. 5729 */ 5730 mutex_enter(&ibdm.ibdm_mutex); 5731 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5732 mutex_enter(&gid_info->gl_mutex); 5733 (void) ibdm_free_iou_info(gid_info); 5734 mutex_exit(&gid_info->gl_mutex); 5735 } 5736 tmp = gid_info->gl_next; 5737 if (gid_info->gl_prev != NULL) 5738 gid_info->gl_prev->gl_next = gid_info->gl_next; 5739 if (gid_info->gl_next != NULL) 5740 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5741 5742 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5743 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5744 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5745 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5746 ibdm.ibdm_ngids--; 5747 5748 mutex_destroy(&gid_info->gl_mutex); 5749 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5750 gid_info = tmp; 5751 5752 mutex_exit(&ibdm.ibdm_mutex); 5753 5754 /* 5755 * Pass on the IOCs with updated GIDs to IBnexus 5756 */ 5757 if (ioc_list) { 5758 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 5759 "IOC_PROP_UPDATE for %p\n", ioc_list); 5760 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5761 if (ibdm.ibdm_ibnex_callback != NULL) { 5762 (*ibdm.ibdm_ibnex_callback)((void *) 5763 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5764 } 5765 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5766 } 5767 } 5768 } 5769 5770 /* 5771 * This function notifies IBnex of IOCs on this GID. 5772 * Notification is for GIDs with gl_reprobe_flag set. 5773 * The flag is set when IOC probe / fabric sweep 5774 * probes a GID starting from CLASS port info. 5775 * 5776 * IBnexus will have information of a reconnected IOC 5777 * if it had probed it before. If this is a new IOC, 5778 * IBnexus ignores the notification. 5779 * 5780 * This function should be called with no locks held. 5781 */ 5782 static void 5783 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 5784 { 5785 ibdm_ioc_info_t *ioc_list; 5786 5787 if (gid_info->gl_reprobe_flag == 0 || 5788 gid_info->gl_iou == NULL) 5789 return; 5790 5791 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 5792 5793 /* 5794 * Pass on the IOCs with updated GIDs to IBnexus 5795 */ 5796 if (ioc_list) { 5797 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5798 if (ibdm.ibdm_ibnex_callback != NULL) { 5799 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5800 IBDM_EVENT_IOC_PROP_UPDATE); 5801 } 5802 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5803 } 5804 } 5805 5806 5807 static void 5808 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 5809 { 5810 if (arg != NULL) 5811 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 5812 } 5813 5814 /* 5815 * This function parses the list of HCAs and HCA ports 5816 * to return the port_attr of the next HCA port. A port 5817 * connected to IB fabric (port_state active) is returned, 5818 * if connected_flag is set. 5819 */ 5820 static void 5821 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 5822 ibdm_port_attr_t **inp_portp, int connect_flag) 5823 { 5824 int ii; 5825 ibdm_port_attr_t *port, *next_port = NULL; 5826 ibdm_port_attr_t *inp_port; 5827 ibdm_hca_list_t *hca_list; 5828 int found = 0; 5829 5830 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 5831 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 5832 inp_hcap, inp_portp, connect_flag); 5833 5834 hca_list = *inp_hcap; 5835 inp_port = *inp_portp; 5836 5837 if (hca_list == NULL) 5838 hca_list = ibdm.ibdm_hca_list_head; 5839 5840 for (; hca_list; hca_list = hca_list->hl_next) { 5841 for (ii = 0; ii < hca_list->hl_nports; ii++) { 5842 port = &hca_list->hl_port_attr[ii]; 5843 5844 /* 5845 * inp_port != NULL; 5846 * Skip till we find the matching port 5847 */ 5848 if (inp_port && !found) { 5849 if (inp_port == port) 5850 found = 1; 5851 continue; 5852 } 5853 5854 if (!connect_flag) { 5855 next_port = port; 5856 break; 5857 } 5858 5859 if (port->pa_sa_hdl == NULL) 5860 ibdm_initialize_port(port); 5861 if (port->pa_sa_hdl == NULL) 5862 (void) ibdm_fini_port(port); 5863 else if (next_port == NULL && 5864 port->pa_sa_hdl != NULL && 5865 port->pa_state == IBT_PORT_ACTIVE) { 5866 next_port = port; 5867 break; 5868 } 5869 } 5870 5871 if (next_port) 5872 break; 5873 } 5874 5875 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 5876 "returns hca_list %p port %p", hca_list, next_port); 5877 *inp_hcap = hca_list; 5878 *inp_portp = next_port; 5879 } 5880 5881 static void 5882 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 5883 { 5884 ibdm_gid_t *tmp; 5885 5886 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 5887 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 5888 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 5889 5890 mutex_enter(&nodegid->gl_mutex); 5891 tmp->gid_next = nodegid->gl_gid; 5892 nodegid->gl_gid = tmp; 5893 nodegid->gl_ngids++; 5894 mutex_exit(&nodegid->gl_mutex); 5895 } 5896 5897 static void 5898 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 5899 ibdm_hca_list_t *hca) 5900 { 5901 ibdm_hca_list_t *head, *prev = NULL, *temp; 5902 5903 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 5904 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 5905 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 5906 mutex_enter(&gid_info->gl_mutex); 5907 head = gid_info->gl_hca_list; 5908 if (head == NULL) { 5909 head = ibdm_dup_hca_attr(hca); 5910 head->hl_next = NULL; 5911 gid_info->gl_hca_list = head; 5912 mutex_exit(&gid_info->gl_mutex); 5913 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 5914 "gid %p, gl_hca_list %p", gid_info, 5915 gid_info->gl_hca_list); 5916 return; 5917 } 5918 5919 /* Check if already in the list */ 5920 while (head) { 5921 if (head->hl_hca_guid == hca->hl_hca_guid) { 5922 mutex_exit(&gid_info->gl_mutex); 5923 IBTF_DPRINTF_L4(ibdm_string, 5924 "\taddto_glhcalist : gid %x hca %x dup", 5925 gid_info, hca); 5926 return; 5927 } 5928 prev = head; 5929 head = head->hl_next; 5930 } 5931 5932 /* Add this HCA to gl_hca_list */ 5933 temp = ibdm_dup_hca_attr(hca); 5934 temp->hl_next = NULL; 5935 prev->hl_next = temp; 5936 5937 mutex_exit(&gid_info->gl_mutex); 5938 5939 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 5940 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 5941 } 5942 5943 static void 5944 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 5945 { 5946 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 5947 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 5948 5949 mutex_enter(&gid_info->gl_mutex); 5950 if (gid_info->gl_hca_list) 5951 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 5952 gid_info->gl_hca_list = NULL; 5953 mutex_exit(&gid_info->gl_mutex); 5954 } 5955 5956 5957 static void 5958 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 5959 { 5960 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 5961 port_sa_hdl); 5962 5963 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 5964 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 5965 5966 /* Check : Not busy in another probe / sweep */ 5967 mutex_enter(&ibdm.ibdm_mutex); 5968 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 5969 ibdm_dp_gidinfo_t *gid_info; 5970 5971 ibdm.ibdm_busy |= IBDM_BUSY; 5972 mutex_exit(&ibdm.ibdm_mutex); 5973 5974 /* 5975 * Check if any GID is using the SA & IBMF handle 5976 * of HCA port going down. Reset ibdm_dp_gidinfo_t 5977 * using another HCA port which can reach the GID. 5978 * This is for DM capable GIDs only, no need to do 5979 * this for others 5980 * 5981 * Delete the GID if no alternate HCA port to reach 5982 * it is found. 5983 */ 5984 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 5985 ibdm_dp_gidinfo_t *tmp; 5986 5987 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 5988 "checking gidinfo %p", gid_info); 5989 5990 if (gid_info->gl_sa_hdl == port_sa_hdl) { 5991 IBTF_DPRINTF_L3(ibdm_string, 5992 "\tevent_hdlr: down HCA port hdl " 5993 "matches gid %p", gid_info); 5994 5995 /* 5996 * The non-DM GIDs can come back 5997 * with a new subnet prefix, when 5998 * the HCA port commes up again. To 5999 * avoid issues, delete non-DM 6000 * capable GIDs, if the gid was 6001 * discovered using the HCA port 6002 * going down. This is ensured by 6003 * setting gl_disconnected to 1. 6004 */ 6005 if (gid_info->gl_nodeguid != 0) 6006 gid_info->gl_disconnected = 1; 6007 else 6008 ibdm_reset_gidinfo(gid_info); 6009 6010 if (gid_info->gl_disconnected) { 6011 IBTF_DPRINTF_L3(ibdm_string, 6012 "\tevent_hdlr: deleting" 6013 " gid %p", gid_info); 6014 tmp = gid_info; 6015 gid_info = gid_info->gl_next; 6016 ibdm_delete_gidinfo(tmp); 6017 } else 6018 gid_info = gid_info->gl_next; 6019 } else 6020 gid_info = gid_info->gl_next; 6021 } 6022 6023 mutex_enter(&ibdm.ibdm_mutex); 6024 ibdm.ibdm_busy &= ~IBDM_BUSY; 6025 cv_signal(&ibdm.ibdm_busy_cv); 6026 } 6027 mutex_exit(&ibdm.ibdm_mutex); 6028 } 6029 6030 static void 6031 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6032 { 6033 ibdm_hca_list_t *hca_list = NULL; 6034 ibdm_port_attr_t *port = NULL; 6035 int gid_reinited = 0; 6036 sa_node_record_t *nr, *tmp; 6037 sa_portinfo_record_t *pi; 6038 size_t nr_len = 0, pi_len = 0; 6039 size_t path_len; 6040 ib_gid_t sgid, dgid; 6041 int ret, ii, nrecords; 6042 sa_path_record_t *path; 6043 uint8_t npaths = 1; 6044 6045 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6046 6047 /* 6048 * Get list of all the ports reachable from the local known HCA 6049 * ports which are active 6050 */ 6051 mutex_enter(&ibdm.ibdm_hl_mutex); 6052 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6053 ibdm_get_next_port(&hca_list, &port, 1)) { 6054 6055 6056 /* 6057 * Get the path and re-populate the gidinfo. 6058 * Getting the path is the same probe_ioc 6059 * Init the gid info as in ibdm_create_gidinfo() 6060 */ 6061 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6062 gidinfo->gl_nodeguid); 6063 if (nr == NULL) { 6064 IBTF_DPRINTF_L4(ibdm_string, 6065 "\treset_gidinfo : no records"); 6066 continue; 6067 } 6068 6069 nrecords = (nr_len / sizeof (sa_node_record_t)); 6070 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6071 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6072 break; 6073 } 6074 6075 if (ii == nrecords) { 6076 IBTF_DPRINTF_L4(ibdm_string, 6077 "\treset_gidinfo : no record for portguid"); 6078 kmem_free(nr, nr_len); 6079 continue; 6080 } 6081 6082 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6083 if (pi == NULL) { 6084 IBTF_DPRINTF_L4(ibdm_string, 6085 "\treset_gidinfo : no portinfo"); 6086 kmem_free(nr, nr_len); 6087 continue; 6088 } 6089 6090 sgid.gid_prefix = port->pa_sn_prefix; 6091 sgid.gid_guid = port->pa_port_guid; 6092 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6093 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6094 6095 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6096 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6097 6098 if ((ret != IBMF_SUCCESS) || path == NULL) { 6099 IBTF_DPRINTF_L4(ibdm_string, 6100 "\treset_gidinfo : no paths"); 6101 kmem_free(pi, pi_len); 6102 kmem_free(nr, nr_len); 6103 continue; 6104 } 6105 6106 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6107 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6108 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6109 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6110 gidinfo->gl_p_key = path->P_Key; 6111 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6112 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6113 gidinfo->gl_slid = path->SLID; 6114 gidinfo->gl_dlid = path->DLID; 6115 6116 /* Reset redirect info, next MAD will set if redirected */ 6117 gidinfo->gl_redirected = 0; 6118 6119 gid_reinited = 1; 6120 6121 kmem_free(path, path_len); 6122 kmem_free(pi, pi_len); 6123 kmem_free(nr, nr_len); 6124 break; 6125 } 6126 mutex_exit(&ibdm.ibdm_hl_mutex); 6127 6128 if (!gid_reinited) 6129 gidinfo->gl_disconnected = 1; 6130 } 6131 6132 static void 6133 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6134 { 6135 ibdm_ioc_info_t *ioc_list; 6136 int in_gidlist = 0; 6137 6138 /* 6139 * Check if gidinfo has been inserted into the 6140 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6141 * != NULL, if gidinfo is the list. 6142 */ 6143 if (gidinfo->gl_prev != NULL || 6144 gidinfo->gl_next != NULL || 6145 ibdm.ibdm_dp_gidlist_head == gidinfo) 6146 in_gidlist = 1; 6147 6148 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6149 6150 /* 6151 * Remove GID from the global GID list 6152 * Handle the case where all port GIDs for an 6153 * IOU have been hot-removed. 6154 */ 6155 mutex_enter(&ibdm.ibdm_mutex); 6156 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6157 mutex_enter(&gidinfo->gl_mutex); 6158 (void) ibdm_free_iou_info(gidinfo); 6159 mutex_exit(&gidinfo->gl_mutex); 6160 } 6161 6162 /* Delete gl_hca_list */ 6163 mutex_exit(&ibdm.ibdm_mutex); 6164 ibdm_delete_glhca_list(gidinfo); 6165 mutex_enter(&ibdm.ibdm_mutex); 6166 6167 if (in_gidlist) { 6168 if (gidinfo->gl_prev != NULL) 6169 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6170 if (gidinfo->gl_next != NULL) 6171 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6172 6173 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6174 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6175 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6176 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6177 ibdm.ibdm_ngids--; 6178 } 6179 mutex_exit(&ibdm.ibdm_mutex); 6180 6181 mutex_destroy(&gidinfo->gl_mutex); 6182 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6183 6184 /* 6185 * Pass on the IOCs with updated GIDs to IBnexus 6186 */ 6187 if (ioc_list) { 6188 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6189 "IOC_PROP_UPDATE for %p\n", ioc_list); 6190 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6191 if (ibdm.ibdm_ibnex_callback != NULL) { 6192 (*ibdm.ibdm_ibnex_callback)((void *) 6193 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6194 } 6195 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6196 } 6197 } 6198 6199 6200 static void 6201 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6202 { 6203 uint32_t attr_mod; 6204 6205 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6206 attr_mod |= cb_args->cb_srvents_start; 6207 attr_mod |= (cb_args->cb_srvents_end) << 8; 6208 hdr->AttributeModifier = h2b32(attr_mod); 6209 } 6210 6211 static void 6212 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6213 { 6214 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 6215 gid_info->gl_transactionID++; 6216 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 6217 IBTF_DPRINTF_L4(ibdm_string, 6218 "\tbump_transactionID(%p), wrapup", gid_info); 6219 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 6220 } 6221 } 6222 6223 /* For debugging purpose only */ 6224 #ifdef DEBUG 6225 void 6226 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 6227 { 6228 ib_mad_hdr_t *mad_hdr; 6229 6230 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 6231 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 6232 6233 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 6234 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 6235 ibmf_msg->im_local_addr.ia_remote_lid, 6236 ibmf_msg->im_local_addr.ia_remote_qno); 6237 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x", 6238 ibmf_msg->im_local_addr.ia_p_key, ibmf_msg->im_local_addr.ia_q_key); 6239 6240 if (flag) 6241 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 6242 else 6243 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 6244 6245 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 6246 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 6247 6248 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 6249 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 6250 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 6251 "\tR Method : 0x%x", 6252 mad_hdr->ClassVersion, mad_hdr->R_Method); 6253 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 6254 "\tTransaction ID : 0x%llx", 6255 mad_hdr->Status, mad_hdr->TransactionID); 6256 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 6257 "\tAttribute Modified : 0x%lx", 6258 mad_hdr->AttributeID, mad_hdr->AttributeModifier); 6259 } 6260 6261 void 6262 ibdm_dump_path_info(sa_path_record_t *path) 6263 { 6264 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 6265 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 6266 6267 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 6268 path->DGID.gid_prefix, path->DGID.gid_guid); 6269 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 6270 path->SGID.gid_prefix, path->SGID.gid_guid); 6271 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\tDlID : %x", 6272 path->SLID, path->DLID); 6273 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x", path->P_Key); 6274 } 6275 6276 6277 void 6278 ibdm_dump_classportinfo(ibdm_mad_classportinfo_t *classportinfo) 6279 { 6280 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 6281 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 6282 6283 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 6284 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 6285 6286 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 6287 (b2h32(classportinfo->RedirectQP))); 6288 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 6289 b2h16(classportinfo->RedirectP_Key)); 6290 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 6291 b2h16(classportinfo->RedirectQ_Key)); 6292 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%x", 6293 b2h64(classportinfo->RedirectGID_hi)); 6294 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%x", 6295 b2h64(classportinfo->RedirectGID_lo)); 6296 } 6297 6298 6299 void 6300 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 6301 { 6302 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 6303 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 6304 6305 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 6306 b2h16(iou_info->iou_changeid)); 6307 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 6308 iou_info->iou_num_ctrl_slots); 6309 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 6310 iou_info->iou_flag); 6311 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 6312 iou_info->iou_ctrl_list[0]); 6313 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 6314 iou_info->iou_ctrl_list[1]); 6315 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 6316 iou_info->iou_ctrl_list[2]); 6317 } 6318 6319 6320 void 6321 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 6322 { 6323 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 6324 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 6325 6326 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 6327 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 6328 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 6329 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 6330 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 6331 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 6332 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 6333 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 6334 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 6335 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 6336 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 6337 ioc->ioc_rdma_read_qdepth); 6338 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 6339 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 6340 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 6341 ioc->ioc_ctrl_opcap_mask); 6342 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 6343 } 6344 6345 6346 void 6347 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 6348 { 6349 IBTF_DPRINTF_L4("ibdm", 6350 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 6351 6352 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 6353 "Service Name : %s", srv_ents->srv_name); 6354 } 6355 6356 int ibdm_allow_sweep_fabric_timestamp = 1; 6357 6358 void 6359 ibdm_dump_sweep_fabric_timestamp(int flag) 6360 { 6361 static hrtime_t x; 6362 if (flag) { 6363 if (ibdm_allow_sweep_fabric_timestamp) { 6364 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 6365 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 6366 } 6367 x = 0; 6368 } else 6369 x = gethrtime(); 6370 } 6371 #endif 6372