1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * ibdm.c 28 * 29 * This file contains the InifiniBand Device Manager (IBDM) support functions. 30 * IB nexus driver will only be the client for the IBDM module. 31 * 32 * IBDM registers with IBTF for HCA arrival/removal notification. 33 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 34 * the IOU's. 35 * 36 * IB nexus driver registers with IBDM to find the information about the 37 * HCA's and IOC's (behind the IOU) present on the IB fabric. 38 */ 39 40 #include <sys/systm.h> 41 #include <sys/taskq.h> 42 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 43 #include <sys/ib/mgt/ibmf/ibmf_impl.h> 44 #include <sys/modctl.h> 45 46 /* Function Prototype declarations */ 47 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **); 48 static int ibdm_fini(void); 49 static int ibdm_init(void); 50 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 51 ibdm_hca_list_t *); 52 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 53 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 54 static boolean_t ibdm_is_cisco(ib_guid_t); 55 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *); 56 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *); 57 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *); 58 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 59 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 60 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 61 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 62 ib_guid_t *, ib_guid_t *); 63 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 64 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 65 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 66 static int ibdm_handle_redirection(ibmf_msg_t *, 67 ibdm_dp_gidinfo_t *, int *); 68 static void ibdm_wait_probe_completion(void); 69 static void ibdm_sweep_fabric(int); 70 static void ibdm_probe_gid_thread(void *); 71 static void ibdm_wakeup_probe_gid_cv(void); 72 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 73 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 74 static void ibdm_update_port_attr(ibdm_port_attr_t *); 75 static void ibdm_handle_hca_attach(ib_guid_t); 76 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 77 ibdm_dp_gidinfo_t *, int *); 78 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 79 static void ibdm_recv_incoming_mad(void *); 80 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 81 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 82 static void ibdm_pkt_timeout_hdlr(void *arg); 83 static void ibdm_initialize_port(ibdm_port_attr_t *); 84 static void ibdm_update_port_pkeys(ibdm_port_attr_t *port); 85 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 86 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 87 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 88 static void ibdm_free_send_buffers(ibmf_msg_t *); 89 static void ibdm_handle_hca_detach(ib_guid_t); 90 static int ibdm_fini_port(ibdm_port_attr_t *); 91 static int ibdm_uninit_hca(ibdm_hca_list_t *); 92 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *, 93 ibdm_dp_gidinfo_t *, int *); 94 static void ibdm_handle_iounitinfo(ibmf_handle_t, 95 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 96 static void ibdm_handle_ioc_profile(ibmf_handle_t, 97 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 98 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 99 ibt_async_code_t, ibt_async_event_t *); 100 static void ibdm_handle_classportinfo(ibmf_handle_t, 101 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 102 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 103 ibdm_dp_gidinfo_t *); 104 105 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 106 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 107 ibdm_dp_gidinfo_t *gid_list); 108 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 109 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 110 ibdm_dp_gidinfo_t *, int *); 111 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 112 ibdm_hca_list_t **); 113 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 114 size_t *, ib_guid_t); 115 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t, 116 ib_guid_t, sa_node_record_t **, size_t *); 117 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 118 ib_lid_t); 119 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 120 ib_gid_t, ib_gid_t); 121 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 122 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 123 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 124 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 125 ibmf_saa_event_details_t *, void *); 126 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 127 ibdm_dp_gidinfo_t *); 128 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 129 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 130 ibdm_dp_gidinfo_t *); 131 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 132 static void ibdm_free_gid_list(ibdm_gid_t *); 133 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 134 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 135 static void ibdm_saa_event_taskq(void *); 136 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 137 static void ibdm_get_next_port(ibdm_hca_list_t **, 138 ibdm_port_attr_t **, int); 139 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 140 ibdm_dp_gidinfo_t *); 141 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 142 ibdm_hca_list_t *); 143 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 144 static void ibdm_saa_handle_new_gid(void *); 145 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 146 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 147 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 148 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 149 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 150 static ibdm_ioc_info_t *ibdm_handle_prev_iou(); 151 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *, 152 int); 153 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t, 154 ibdm_dp_gidinfo_t **); 155 156 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 157 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 158 #ifdef DEBUG 159 int ibdm_ignore_saa_event = 0; 160 #endif 161 162 /* Modload support */ 163 static struct modlmisc ibdm_modlmisc = { 164 &mod_miscops, 165 "InfiniBand Device Manager" 166 }; 167 168 struct modlinkage ibdm_modlinkage = { 169 MODREV_1, 170 (void *)&ibdm_modlmisc, 171 NULL 172 }; 173 174 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 175 IBTI_V_CURR, 176 IBT_DM, 177 ibdm_event_hdlr, 178 NULL, 179 "ibdm" 180 }; 181 182 /* Global variables */ 183 ibdm_t ibdm; 184 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 185 char *ibdm_string = "ibdm"; 186 187 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 188 ibdm.ibdm_dp_gidlist_head)) 189 190 /* 191 * _init 192 * Loadable module init, called before any other module. 193 * Initialize mutex 194 * Register with IBTF 195 */ 196 int 197 _init(void) 198 { 199 int err; 200 201 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 202 203 if ((err = ibdm_init()) != IBDM_SUCCESS) { 204 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 205 (void) ibdm_fini(); 206 return (DDI_FAILURE); 207 } 208 209 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 210 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 211 (void) ibdm_fini(); 212 } 213 return (err); 214 } 215 216 217 int 218 _fini(void) 219 { 220 int err; 221 222 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 223 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 224 (void) ibdm_init(); 225 return (EBUSY); 226 } 227 228 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 229 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 230 (void) ibdm_init(); 231 } 232 return (err); 233 } 234 235 236 int 237 _info(struct modinfo *modinfop) 238 { 239 return (mod_info(&ibdm_modlinkage, modinfop)); 240 } 241 242 243 /* 244 * ibdm_init(): 245 * Register with IBTF 246 * Allocate memory for the HCAs 247 * Allocate minor-nodes for the HCAs 248 */ 249 static int 250 ibdm_init(void) 251 { 252 int i, hca_count; 253 ib_guid_t *hca_guids; 254 ibt_status_t status; 255 256 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 257 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 258 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 259 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 260 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 261 cv_init(&ibdm.ibdm_port_settle_cv, NULL, CV_DRIVER, NULL); 262 mutex_enter(&ibdm.ibdm_mutex); 263 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 264 } 265 266 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 267 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 268 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 269 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 270 "failed %x", status); 271 mutex_exit(&ibdm.ibdm_mutex); 272 return (IBDM_FAILURE); 273 } 274 275 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 276 mutex_exit(&ibdm.ibdm_mutex); 277 } 278 279 280 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 281 hca_count = ibt_get_hca_list(&hca_guids); 282 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 283 for (i = 0; i < hca_count; i++) 284 (void) ibdm_handle_hca_attach(hca_guids[i]); 285 if (hca_count) 286 ibt_free_hca_list(hca_guids, hca_count); 287 288 mutex_enter(&ibdm.ibdm_mutex); 289 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 290 mutex_exit(&ibdm.ibdm_mutex); 291 } 292 293 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 294 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 295 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 296 mutex_enter(&ibdm.ibdm_mutex); 297 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 298 mutex_exit(&ibdm.ibdm_mutex); 299 } 300 return (IBDM_SUCCESS); 301 } 302 303 304 static int 305 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup) 306 { 307 int ii, k, niocs; 308 size_t size; 309 ibdm_gid_t *delete, *head; 310 timeout_id_t timeout_id; 311 ibdm_ioc_info_t *ioc; 312 ibdm_iou_info_t *gl_iou = *ioup; 313 314 ASSERT(mutex_owned(&gid_info->gl_mutex)); 315 if (gl_iou == NULL) { 316 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 317 return (0); 318 } 319 320 niocs = gl_iou->iou_info.iou_num_ctrl_slots; 321 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 322 gid_info, niocs); 323 324 for (ii = 0; ii < niocs; ii++) { 325 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii]; 326 327 /* handle the case where an ioc_timeout_id is scheduled */ 328 if (ioc->ioc_timeout_id) { 329 timeout_id = ioc->ioc_timeout_id; 330 ioc->ioc_timeout_id = 0; 331 mutex_exit(&gid_info->gl_mutex); 332 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 333 "ioc_timeout_id = 0x%x", timeout_id); 334 if (untimeout(timeout_id) == -1) { 335 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 336 "untimeout ioc_timeout_id failed"); 337 mutex_enter(&gid_info->gl_mutex); 338 return (-1); 339 } 340 mutex_enter(&gid_info->gl_mutex); 341 } 342 343 /* handle the case where an ioc_dc_timeout_id is scheduled */ 344 if (ioc->ioc_dc_timeout_id) { 345 timeout_id = ioc->ioc_dc_timeout_id; 346 ioc->ioc_dc_timeout_id = 0; 347 mutex_exit(&gid_info->gl_mutex); 348 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 349 "ioc_dc_timeout_id = 0x%x", timeout_id); 350 if (untimeout(timeout_id) == -1) { 351 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 352 "untimeout ioc_dc_timeout_id failed"); 353 mutex_enter(&gid_info->gl_mutex); 354 return (-1); 355 } 356 mutex_enter(&gid_info->gl_mutex); 357 } 358 359 /* handle the case where serv[k].se_timeout_id is scheduled */ 360 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 361 if (ioc->ioc_serv[k].se_timeout_id) { 362 timeout_id = ioc->ioc_serv[k].se_timeout_id; 363 ioc->ioc_serv[k].se_timeout_id = 0; 364 mutex_exit(&gid_info->gl_mutex); 365 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 366 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 367 k, timeout_id); 368 if (untimeout(timeout_id) == -1) { 369 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 370 " untimeout se_timeout_id failed"); 371 mutex_enter(&gid_info->gl_mutex); 372 return (-1); 373 } 374 mutex_enter(&gid_info->gl_mutex); 375 } 376 } 377 378 /* delete GID list in IOC */ 379 head = ioc->ioc_gid_list; 380 while (head) { 381 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 382 "Deleting gid_list struct %p", head); 383 delete = head; 384 head = head->gid_next; 385 kmem_free(delete, sizeof (ibdm_gid_t)); 386 } 387 ioc->ioc_gid_list = NULL; 388 389 /* delete ioc_serv */ 390 size = ioc->ioc_profile.ioc_service_entries * 391 sizeof (ibdm_srvents_info_t); 392 if (ioc->ioc_serv && size) { 393 kmem_free(ioc->ioc_serv, size); 394 ioc->ioc_serv = NULL; 395 } 396 } 397 /* 398 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information 399 * via the switch during the probe process. 400 */ 401 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE; 402 403 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 404 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 405 kmem_free(gl_iou, size); 406 *ioup = NULL; 407 return (0); 408 } 409 410 411 /* 412 * ibdm_fini(): 413 * Un-register with IBTF 414 * De allocate memory for the GID info 415 */ 416 static int 417 ibdm_fini() 418 { 419 int ii; 420 ibdm_hca_list_t *hca_list, *temp; 421 ibdm_dp_gidinfo_t *gid_info, *tmp; 422 ibdm_gid_t *head, *delete; 423 424 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 425 426 mutex_enter(&ibdm.ibdm_hl_mutex); 427 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 428 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 429 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 430 mutex_exit(&ibdm.ibdm_hl_mutex); 431 return (IBDM_FAILURE); 432 } 433 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 434 ibdm.ibdm_ibt_clnt_hdl = NULL; 435 } 436 437 hca_list = ibdm.ibdm_hca_list_head; 438 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 439 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 440 temp = hca_list; 441 hca_list = hca_list->hl_next; 442 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 443 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 444 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 445 "uninit_hca %p failed", temp); 446 mutex_exit(&ibdm.ibdm_hl_mutex); 447 return (IBDM_FAILURE); 448 } 449 } 450 mutex_exit(&ibdm.ibdm_hl_mutex); 451 452 mutex_enter(&ibdm.ibdm_mutex); 453 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 454 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 455 456 gid_info = ibdm.ibdm_dp_gidlist_head; 457 while (gid_info) { 458 mutex_enter(&gid_info->gl_mutex); 459 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 460 mutex_exit(&gid_info->gl_mutex); 461 ibdm_delete_glhca_list(gid_info); 462 463 tmp = gid_info; 464 gid_info = gid_info->gl_next; 465 mutex_destroy(&tmp->gl_mutex); 466 head = tmp->gl_gid; 467 while (head) { 468 IBTF_DPRINTF_L4("ibdm", 469 "\tibdm_fini: Deleting gid structs"); 470 delete = head; 471 head = head->gid_next; 472 kmem_free(delete, sizeof (ibdm_gid_t)); 473 } 474 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 475 } 476 mutex_exit(&ibdm.ibdm_mutex); 477 478 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 479 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 480 mutex_destroy(&ibdm.ibdm_mutex); 481 mutex_destroy(&ibdm.ibdm_hl_mutex); 482 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 483 cv_destroy(&ibdm.ibdm_port_settle_cv); 484 } 485 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 486 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 487 cv_destroy(&ibdm.ibdm_probe_cv); 488 cv_destroy(&ibdm.ibdm_busy_cv); 489 } 490 return (IBDM_SUCCESS); 491 } 492 493 494 /* 495 * ibdm_event_hdlr() 496 * 497 * IBDM registers this asynchronous event handler at the time of 498 * ibt_attach. IBDM support the following async events. For other 499 * event, simply returns success. 500 * IBT_HCA_ATTACH_EVENT: 501 * Retrieves the information about all the port that are 502 * present on this HCA, allocates the port attributes 503 * structure and calls IB nexus callback routine with 504 * the port attributes structure as an input argument. 505 * IBT_HCA_DETACH_EVENT: 506 * Retrieves the information about all the ports that are 507 * present on this HCA and calls IB nexus callback with 508 * port guid as an argument 509 * IBT_EVENT_PORT_UP: 510 * Register with IBMF and SA access 511 * Setup IBMF receive callback routine 512 * IBT_EVENT_PORT_DOWN: 513 * Un-Register with IBMF and SA access 514 * Teardown IBMF receive callback routine 515 */ 516 /*ARGSUSED*/ 517 static void 518 ibdm_event_hdlr(void *clnt_hdl, 519 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 520 { 521 ibdm_hca_list_t *hca_list; 522 ibdm_port_attr_t *port; 523 ibmf_saa_handle_t port_sa_hdl; 524 525 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 526 527 switch (code) { 528 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 529 ibdm_handle_hca_attach(event->ev_hca_guid); 530 break; 531 532 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 533 ibdm_handle_hca_detach(event->ev_hca_guid); 534 mutex_enter(&ibdm.ibdm_ibnex_mutex); 535 if (ibdm.ibdm_ibnex_callback != NULL) { 536 (*ibdm.ibdm_ibnex_callback)((void *) 537 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 538 } 539 mutex_exit(&ibdm.ibdm_ibnex_mutex); 540 break; 541 542 case IBT_EVENT_PORT_UP: 543 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 544 mutex_enter(&ibdm.ibdm_hl_mutex); 545 port = ibdm_get_port_attr(event, &hca_list); 546 if (port == NULL) { 547 IBTF_DPRINTF_L2("ibdm", 548 "\tevent_hdlr: HCA not present"); 549 mutex_exit(&ibdm.ibdm_hl_mutex); 550 break; 551 } 552 ibdm_initialize_port(port); 553 hca_list->hl_nports_active++; 554 cv_broadcast(&ibdm.ibdm_port_settle_cv); 555 mutex_exit(&ibdm.ibdm_hl_mutex); 556 break; 557 558 case IBT_ERROR_PORT_DOWN: 559 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 560 mutex_enter(&ibdm.ibdm_hl_mutex); 561 port = ibdm_get_port_attr(event, &hca_list); 562 if (port == NULL) { 563 IBTF_DPRINTF_L2("ibdm", 564 "\tevent_hdlr: HCA not present"); 565 mutex_exit(&ibdm.ibdm_hl_mutex); 566 break; 567 } 568 hca_list->hl_nports_active--; 569 port_sa_hdl = port->pa_sa_hdl; 570 (void) ibdm_fini_port(port); 571 port->pa_state = IBT_PORT_DOWN; 572 cv_broadcast(&ibdm.ibdm_port_settle_cv); 573 mutex_exit(&ibdm.ibdm_hl_mutex); 574 ibdm_reset_all_dgids(port_sa_hdl); 575 break; 576 577 case IBT_PORT_CHANGE_EVENT: 578 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_CHANGE"); 579 if (event->ev_port_flags & IBT_PORT_CHANGE_PKEY) { 580 mutex_enter(&ibdm.ibdm_hl_mutex); 581 port = ibdm_get_port_attr(event, &hca_list); 582 if (port == NULL) { 583 IBTF_DPRINTF_L2("ibdm", 584 "\tevent_hdlr: HCA not present"); 585 mutex_exit(&ibdm.ibdm_hl_mutex); 586 break; 587 } 588 ibdm_update_port_pkeys(port); 589 cv_broadcast(&ibdm.ibdm_port_settle_cv); 590 mutex_exit(&ibdm.ibdm_hl_mutex); 591 } 592 break; 593 594 default: /* Ignore all other events/errors */ 595 break; 596 } 597 } 598 599 600 /* 601 * ibdm_update_port_pkeys() 602 * Update the pkey table 603 * Update the port attributes 604 */ 605 static void 606 ibdm_update_port_pkeys(ibdm_port_attr_t *port) 607 { 608 uint_t nports, size; 609 uint_t pkey_idx, opkey_idx; 610 uint16_t npkeys; 611 ibt_hca_portinfo_t *pinfop; 612 ib_pkey_t pkey; 613 ibdm_pkey_tbl_t *pkey_tbl; 614 ibdm_port_attr_t newport; 615 616 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_pkeys:"); 617 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 618 619 /* Check whether the port is active */ 620 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 621 NULL) != IBT_SUCCESS) 622 return; 623 624 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 625 &pinfop, &nports, &size) != IBT_SUCCESS) { 626 /* This should not occur */ 627 port->pa_npkeys = 0; 628 port->pa_pkey_tbl = NULL; 629 return; 630 } 631 632 npkeys = pinfop->p_pkey_tbl_sz; 633 pkey_tbl = kmem_zalloc(npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 634 newport.pa_pkey_tbl = pkey_tbl; 635 newport.pa_ibmf_hdl = port->pa_ibmf_hdl; 636 637 for (pkey_idx = 0; pkey_idx < npkeys; pkey_idx++) { 638 pkey = pkey_tbl[pkey_idx].pt_pkey = 639 pinfop->p_pkey_tbl[pkey_idx]; 640 /* 641 * Is this pkey present in the current table ? 642 */ 643 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) { 644 if (pkey == port->pa_pkey_tbl[opkey_idx].pt_pkey) { 645 pkey_tbl[pkey_idx].pt_qp_hdl = 646 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl; 647 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl = NULL; 648 break; 649 } 650 } 651 652 if (opkey_idx == port->pa_npkeys) { 653 pkey = pkey_tbl[pkey_idx].pt_pkey; 654 if (IBDM_INVALID_PKEY(pkey)) { 655 pkey_tbl[pkey_idx].pt_qp_hdl = NULL; 656 continue; 657 } 658 ibdm_port_attr_ibmf_init(&newport, pkey, pkey_idx); 659 } 660 } 661 662 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) { 663 if (port->pa_pkey_tbl[opkey_idx].pt_qp_hdl != NULL) { 664 if (ibdm_port_attr_ibmf_fini(port, opkey_idx) != 665 IBDM_SUCCESS) { 666 IBTF_DPRINTF_L2("ibdm", "\tupdate_port_pkeys: " 667 "ibdm_port_attr_ibmf_fini failed for " 668 "port pkey 0x%x", 669 port->pa_pkey_tbl[opkey_idx].pt_pkey); 670 } 671 } 672 } 673 674 if (port->pa_pkey_tbl != NULL) { 675 kmem_free(port->pa_pkey_tbl, 676 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 677 } 678 679 port->pa_npkeys = npkeys; 680 port->pa_pkey_tbl = pkey_tbl; 681 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 682 port->pa_state = pinfop->p_linkstate; 683 ibt_free_portinfo(pinfop, size); 684 } 685 686 /* 687 * ibdm_initialize_port() 688 * Register with IBMF 689 * Register with SA access 690 * Register a receive callback routine with IBMF. IBMF invokes 691 * this routine whenever a MAD arrives at this port. 692 * Update the port attributes 693 */ 694 static void 695 ibdm_initialize_port(ibdm_port_attr_t *port) 696 { 697 int ii; 698 uint_t nports, size; 699 uint_t pkey_idx; 700 ib_pkey_t pkey; 701 ibt_hca_portinfo_t *pinfop; 702 ibmf_register_info_t ibmf_reg; 703 ibmf_saa_subnet_event_args_t event_args; 704 705 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 706 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 707 708 /* Check whether the port is active */ 709 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 710 NULL) != IBT_SUCCESS) 711 return; 712 713 if (port->pa_sa_hdl != NULL) 714 return; 715 716 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 717 &pinfop, &nports, &size) != IBT_SUCCESS) { 718 /* This should not occur */ 719 port->pa_npkeys = 0; 720 port->pa_pkey_tbl = NULL; 721 return; 722 } 723 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 724 725 port->pa_state = pinfop->p_linkstate; 726 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 727 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 728 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 729 730 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 731 port->pa_pkey_tbl[pkey_idx].pt_pkey = 732 pinfop->p_pkey_tbl[pkey_idx]; 733 734 ibt_free_portinfo(pinfop, size); 735 736 event_args.is_event_callback = ibdm_saa_event_cb; 737 event_args.is_event_callback_arg = port; 738 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 739 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 740 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 741 "sa access registration failed"); 742 return; 743 } 744 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 745 ibmf_reg.ir_port_num = port->pa_port_num; 746 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 747 748 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 749 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 750 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 751 "IBMF registration failed"); 752 (void) ibdm_fini_port(port); 753 return; 754 } 755 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 756 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 757 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 758 "IBMF setup recv cb failed"); 759 (void) ibdm_fini_port(port); 760 return; 761 } 762 763 for (ii = 0; ii < port->pa_npkeys; ii++) { 764 pkey = port->pa_pkey_tbl[ii].pt_pkey; 765 if (IBDM_INVALID_PKEY(pkey)) { 766 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 767 continue; 768 } 769 ibdm_port_attr_ibmf_init(port, pkey, ii); 770 } 771 } 772 773 774 /* 775 * ibdm_port_attr_ibmf_init: 776 * With IBMF - Alloc QP Handle and Setup Async callback 777 */ 778 static void 779 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 780 { 781 int ret; 782 783 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 784 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 785 IBMF_SUCCESS) { 786 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 787 "IBMF failed to alloc qp %d", ret); 788 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 789 return; 790 } 791 792 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 793 port->pa_ibmf_hdl); 794 795 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 796 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 797 IBMF_SUCCESS) { 798 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 799 "IBMF setup recv cb failed %d", ret); 800 (void) ibmf_free_qp(port->pa_ibmf_hdl, 801 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 802 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 803 } 804 } 805 806 807 /* 808 * ibdm_get_port_attr() 809 * Get port attributes from HCA guid and port number 810 * Return pointer to ibdm_port_attr_t on Success 811 * and NULL on failure 812 */ 813 static ibdm_port_attr_t * 814 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 815 { 816 ibdm_hca_list_t *hca_list; 817 ibdm_port_attr_t *port_attr; 818 int ii; 819 820 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 821 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 822 hca_list = ibdm.ibdm_hca_list_head; 823 while (hca_list) { 824 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 825 for (ii = 0; ii < hca_list->hl_nports; ii++) { 826 port_attr = &hca_list->hl_port_attr[ii]; 827 if (port_attr->pa_port_num == event->ev_port) { 828 *retval = hca_list; 829 return (port_attr); 830 } 831 } 832 } 833 hca_list = hca_list->hl_next; 834 } 835 return (NULL); 836 } 837 838 839 /* 840 * ibdm_update_port_attr() 841 * Update the port attributes 842 */ 843 static void 844 ibdm_update_port_attr(ibdm_port_attr_t *port) 845 { 846 uint_t nports, size; 847 uint_t pkey_idx; 848 ibt_hca_portinfo_t *portinfop; 849 850 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 851 if (ibt_query_hca_ports(port->pa_hca_hdl, 852 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 853 /* This should not occur */ 854 port->pa_npkeys = 0; 855 port->pa_pkey_tbl = NULL; 856 return; 857 } 858 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 859 860 port->pa_state = portinfop->p_linkstate; 861 862 /* 863 * PKey information in portinfo valid only if port is 864 * ACTIVE. Bail out if not. 865 */ 866 if (port->pa_state != IBT_PORT_ACTIVE) { 867 port->pa_npkeys = 0; 868 port->pa_pkey_tbl = NULL; 869 ibt_free_portinfo(portinfop, size); 870 return; 871 } 872 873 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 874 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 875 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 876 877 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 878 port->pa_pkey_tbl[pkey_idx].pt_pkey = 879 portinfop->p_pkey_tbl[pkey_idx]; 880 } 881 ibt_free_portinfo(portinfop, size); 882 } 883 884 885 /* 886 * ibdm_handle_hca_attach() 887 */ 888 static void 889 ibdm_handle_hca_attach(ib_guid_t hca_guid) 890 { 891 uint_t size; 892 uint_t ii, nports; 893 ibt_status_t status; 894 ibt_hca_hdl_t hca_hdl; 895 ibt_hca_attr_t *hca_attr; 896 ibdm_hca_list_t *hca_list, *temp; 897 ibdm_port_attr_t *port_attr; 898 ibt_hca_portinfo_t *portinfop; 899 900 IBTF_DPRINTF_L4("ibdm", 901 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 902 903 /* open the HCA first */ 904 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 905 &hca_hdl)) != IBT_SUCCESS) { 906 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 907 "open_hca failed, status 0x%x", status); 908 return; 909 } 910 911 hca_attr = (ibt_hca_attr_t *) 912 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 913 /* ibt_query_hca always returns IBT_SUCCESS */ 914 (void) ibt_query_hca(hca_hdl, hca_attr); 915 916 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 917 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 918 hca_attr->hca_version_id, hca_attr->hca_nports); 919 920 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 921 &size)) != IBT_SUCCESS) { 922 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 923 "ibt_query_hca_ports failed, status 0x%x", status); 924 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 925 (void) ibt_close_hca(hca_hdl); 926 return; 927 } 928 hca_list = (ibdm_hca_list_t *) 929 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 930 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 931 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 932 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 933 hca_list->hl_nports = hca_attr->hca_nports; 934 hca_list->hl_attach_time = ddi_get_time(); 935 hca_list->hl_hca_hdl = hca_hdl; 936 937 /* 938 * Init a dummy port attribute for the HCA node 939 * This is for Per-HCA Node. Initialize port_attr : 940 * hca_guid & port_guid -> hca_guid 941 * npkeys, pkey_tbl is NULL 942 * port_num, sn_prefix is 0 943 * vendorid, product_id, dev_version from HCA 944 * pa_state is IBT_PORT_ACTIVE 945 */ 946 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 947 sizeof (ibdm_port_attr_t), KM_SLEEP); 948 port_attr = hca_list->hl_hca_port_attr; 949 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 950 port_attr->pa_productid = hca_attr->hca_device_id; 951 port_attr->pa_dev_version = hca_attr->hca_version_id; 952 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 953 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 954 port_attr->pa_port_guid = hca_attr->hca_node_guid; 955 port_attr->pa_state = IBT_PORT_ACTIVE; 956 957 958 for (ii = 0; ii < nports; ii++) { 959 port_attr = &hca_list->hl_port_attr[ii]; 960 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 961 port_attr->pa_productid = hca_attr->hca_device_id; 962 port_attr->pa_dev_version = hca_attr->hca_version_id; 963 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 964 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 965 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 966 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 967 port_attr->pa_port_num = portinfop[ii].p_port_num; 968 port_attr->pa_state = portinfop[ii].p_linkstate; 969 970 /* 971 * Register with IBMF, SA access when the port is in 972 * ACTIVE state. Also register a callback routine 973 * with IBMF to receive incoming DM MAD's. 974 * The IBDM event handler takes care of registration of 975 * port which are not active. 976 */ 977 IBTF_DPRINTF_L4("ibdm", 978 "\thandle_hca_attach: port guid %llx Port state 0x%x", 979 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 980 981 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 982 mutex_enter(&ibdm.ibdm_hl_mutex); 983 hca_list->hl_nports_active++; 984 ibdm_initialize_port(port_attr); 985 cv_broadcast(&ibdm.ibdm_port_settle_cv); 986 mutex_exit(&ibdm.ibdm_hl_mutex); 987 } 988 } 989 mutex_enter(&ibdm.ibdm_hl_mutex); 990 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 991 if (temp->hl_hca_guid == hca_guid) { 992 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 993 "already seen by IBDM", hca_guid); 994 mutex_exit(&ibdm.ibdm_hl_mutex); 995 (void) ibdm_uninit_hca(hca_list); 996 return; 997 } 998 } 999 ibdm.ibdm_hca_count++; 1000 if (ibdm.ibdm_hca_list_head == NULL) { 1001 ibdm.ibdm_hca_list_head = hca_list; 1002 ibdm.ibdm_hca_list_tail = hca_list; 1003 } else { 1004 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 1005 ibdm.ibdm_hca_list_tail = hca_list; 1006 } 1007 mutex_exit(&ibdm.ibdm_hl_mutex); 1008 mutex_enter(&ibdm.ibdm_ibnex_mutex); 1009 if (ibdm.ibdm_ibnex_callback != NULL) { 1010 (*ibdm.ibdm_ibnex_callback)((void *) 1011 &hca_guid, IBDM_EVENT_HCA_ADDED); 1012 } 1013 mutex_exit(&ibdm.ibdm_ibnex_mutex); 1014 1015 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 1016 ibt_free_portinfo(portinfop, size); 1017 } 1018 1019 1020 /* 1021 * ibdm_handle_hca_detach() 1022 */ 1023 static void 1024 ibdm_handle_hca_detach(ib_guid_t hca_guid) 1025 { 1026 ibdm_hca_list_t *head, *prev = NULL; 1027 size_t len; 1028 ibdm_dp_gidinfo_t *gidinfo; 1029 1030 IBTF_DPRINTF_L4("ibdm", 1031 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 1032 1033 /* Make sure no probes are running */ 1034 mutex_enter(&ibdm.ibdm_mutex); 1035 while (ibdm.ibdm_busy & IBDM_BUSY) 1036 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1037 ibdm.ibdm_busy |= IBDM_BUSY; 1038 mutex_exit(&ibdm.ibdm_mutex); 1039 1040 mutex_enter(&ibdm.ibdm_hl_mutex); 1041 head = ibdm.ibdm_hca_list_head; 1042 while (head) { 1043 if (head->hl_hca_guid == hca_guid) { 1044 if (prev == NULL) 1045 ibdm.ibdm_hca_list_head = head->hl_next; 1046 else 1047 prev->hl_next = head->hl_next; 1048 if (ibdm.ibdm_hca_list_tail == head) 1049 ibdm.ibdm_hca_list_tail = prev; 1050 ibdm.ibdm_hca_count--; 1051 break; 1052 } 1053 prev = head; 1054 head = head->hl_next; 1055 } 1056 mutex_exit(&ibdm.ibdm_hl_mutex); 1057 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 1058 (void) ibdm_handle_hca_attach(hca_guid); 1059 1060 /* 1061 * Now clean up the HCA lists in the gidlist. 1062 */ 1063 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 1064 gidinfo->gl_next) { 1065 prev = NULL; 1066 head = gidinfo->gl_hca_list; 1067 while (head) { 1068 if (head->hl_hca_guid == hca_guid) { 1069 if (prev == NULL) 1070 gidinfo->gl_hca_list = 1071 head->hl_next; 1072 else 1073 prev->hl_next = head->hl_next; 1074 1075 len = sizeof (ibdm_hca_list_t) + 1076 (head->hl_nports * 1077 sizeof (ibdm_port_attr_t)); 1078 kmem_free(head, len); 1079 1080 break; 1081 } 1082 prev = head; 1083 head = head->hl_next; 1084 } 1085 } 1086 1087 mutex_enter(&ibdm.ibdm_mutex); 1088 ibdm.ibdm_busy &= ~IBDM_BUSY; 1089 cv_broadcast(&ibdm.ibdm_busy_cv); 1090 mutex_exit(&ibdm.ibdm_mutex); 1091 } 1092 1093 1094 static int 1095 ibdm_uninit_hca(ibdm_hca_list_t *head) 1096 { 1097 int ii; 1098 ibdm_port_attr_t *port_attr; 1099 1100 for (ii = 0; ii < head->hl_nports; ii++) { 1101 port_attr = &head->hl_port_attr[ii]; 1102 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 1103 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 1104 "ibdm_fini_port() failed", head, ii); 1105 return (IBDM_FAILURE); 1106 } 1107 } 1108 if (head->hl_hca_hdl) 1109 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) 1110 return (IBDM_FAILURE); 1111 kmem_free(head->hl_port_attr, 1112 head->hl_nports * sizeof (ibdm_port_attr_t)); 1113 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 1114 kmem_free(head, sizeof (ibdm_hca_list_t)); 1115 return (IBDM_SUCCESS); 1116 } 1117 1118 1119 /* 1120 * For each port on the HCA, 1121 * 1) Teardown IBMF receive callback function 1122 * 2) Unregister with IBMF 1123 * 3) Unregister with SA access 1124 */ 1125 static int 1126 ibdm_fini_port(ibdm_port_attr_t *port_attr) 1127 { 1128 int ii, ibmf_status; 1129 1130 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1131 if (port_attr->pa_pkey_tbl == NULL) 1132 break; 1133 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1134 continue; 1135 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1136 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1137 "ibdm_port_attr_ibmf_fini failed for " 1138 "port pkey 0x%x", ii); 1139 return (IBDM_FAILURE); 1140 } 1141 } 1142 1143 if (port_attr->pa_ibmf_hdl) { 1144 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1145 IBMF_QP_HANDLE_DEFAULT, 0); 1146 if (ibmf_status != IBMF_SUCCESS) { 1147 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1148 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1149 return (IBDM_FAILURE); 1150 } 1151 1152 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1153 if (ibmf_status != IBMF_SUCCESS) { 1154 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1155 "ibmf_unregister failed %d", ibmf_status); 1156 return (IBDM_FAILURE); 1157 } 1158 1159 port_attr->pa_ibmf_hdl = NULL; 1160 } 1161 1162 if (port_attr->pa_sa_hdl) { 1163 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1164 if (ibmf_status != IBMF_SUCCESS) { 1165 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1166 "ibmf_sa_session_close failed %d", ibmf_status); 1167 return (IBDM_FAILURE); 1168 } 1169 port_attr->pa_sa_hdl = NULL; 1170 } 1171 1172 if (port_attr->pa_pkey_tbl != NULL) { 1173 kmem_free(port_attr->pa_pkey_tbl, 1174 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1175 port_attr->pa_pkey_tbl = NULL; 1176 port_attr->pa_npkeys = 0; 1177 } 1178 1179 return (IBDM_SUCCESS); 1180 } 1181 1182 1183 /* 1184 * ibdm_port_attr_ibmf_fini: 1185 * With IBMF - Tear down Async callback and free QP Handle 1186 */ 1187 static int 1188 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1189 { 1190 int ibmf_status; 1191 1192 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1193 1194 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1195 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1196 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1197 if (ibmf_status != IBMF_SUCCESS) { 1198 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1199 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1200 return (IBDM_FAILURE); 1201 } 1202 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1203 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1204 if (ibmf_status != IBMF_SUCCESS) { 1205 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1206 "ibmf_free_qp failed %d", ibmf_status); 1207 return (IBDM_FAILURE); 1208 } 1209 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1210 } 1211 return (IBDM_SUCCESS); 1212 } 1213 1214 1215 /* 1216 * ibdm_gid_decr_pending: 1217 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1218 */ 1219 static void 1220 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1221 { 1222 mutex_enter(&ibdm.ibdm_mutex); 1223 mutex_enter(&gidinfo->gl_mutex); 1224 if (--gidinfo->gl_pending_cmds == 0) { 1225 /* 1226 * Handle DGID getting removed. 1227 */ 1228 if (gidinfo->gl_disconnected) { 1229 mutex_exit(&gidinfo->gl_mutex); 1230 mutex_exit(&ibdm.ibdm_mutex); 1231 1232 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1233 "gidinfo %p hot removal", gidinfo); 1234 ibdm_delete_gidinfo(gidinfo); 1235 1236 mutex_enter(&ibdm.ibdm_mutex); 1237 ibdm.ibdm_ngid_probes_in_progress--; 1238 ibdm_wait_probe_completion(); 1239 mutex_exit(&ibdm.ibdm_mutex); 1240 return; 1241 } 1242 mutex_exit(&gidinfo->gl_mutex); 1243 mutex_exit(&ibdm.ibdm_mutex); 1244 ibdm_notify_newgid_iocs(gidinfo); 1245 mutex_enter(&ibdm.ibdm_mutex); 1246 mutex_enter(&gidinfo->gl_mutex); 1247 1248 ibdm.ibdm_ngid_probes_in_progress--; 1249 ibdm_wait_probe_completion(); 1250 } 1251 mutex_exit(&gidinfo->gl_mutex); 1252 mutex_exit(&ibdm.ibdm_mutex); 1253 } 1254 1255 1256 /* 1257 * ibdm_wait_probe_completion: 1258 * wait for probing to complete 1259 */ 1260 static void 1261 ibdm_wait_probe_completion(void) 1262 { 1263 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1264 if (ibdm.ibdm_ngid_probes_in_progress) { 1265 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1266 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1267 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1268 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1269 } 1270 } 1271 1272 1273 /* 1274 * ibdm_wait_cisco_probe_completion: 1275 * wait for the reply from the Cisco FC GW switch after a setclassportinfo 1276 * request is sent. This wait can be achieved on each gid. 1277 */ 1278 static void 1279 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo) 1280 { 1281 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex)); 1282 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete"); 1283 gidinfo->gl_flag |= IBDM_CISCO_PROBE; 1284 while (gidinfo->gl_flag & IBDM_CISCO_PROBE) 1285 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex); 1286 } 1287 1288 1289 /* 1290 * ibdm_wakeup_probe_gid_cv: 1291 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1292 */ 1293 static void 1294 ibdm_wakeup_probe_gid_cv(void) 1295 { 1296 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1297 if (!ibdm.ibdm_ngid_probes_in_progress) { 1298 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1299 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1300 cv_broadcast(&ibdm.ibdm_probe_cv); 1301 } 1302 1303 } 1304 1305 1306 /* 1307 * ibdm_sweep_fabric(reprobe_flag) 1308 * Find all possible Managed IOU's and their IOC's that are visible 1309 * to the host. The algorithm used is as follows 1310 * 1311 * Send a "bus walk" request for each port on the host HCA to SA access 1312 * SA returns complete set of GID's that are reachable from 1313 * source port. This is done in parallel. 1314 * 1315 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1316 * 1317 * Sort the GID list and eliminate duplicate GID's 1318 * 1) Use DGID for sorting 1319 * 2) use PortGuid for sorting 1320 * Send SA query to retrieve NodeRecord and 1321 * extract PortGuid from that. 1322 * 1323 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1324 * support DM MAD's 1325 * Send a "Portinfo" query to get the port capabilities and 1326 * then check for DM MAD's support 1327 * 1328 * Send "ClassPortInfo" request for all the GID's in parallel, 1329 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1330 * cv_signal to complete. 1331 * 1332 * When DM agent on the remote GID sends back the response, IBMF 1333 * invokes DM callback routine. 1334 * 1335 * If the response is proper, send "IOUnitInfo" request and set 1336 * GID state to IBDM_GET_IOUNITINFO. 1337 * 1338 * If the response is proper, send "IocProfileInfo" request to 1339 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1340 * 1341 * Send request to get Service entries simultaneously 1342 * 1343 * Signal the waiting thread when received response for all the commands. 1344 * 1345 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1346 * response during the probing period. 1347 * 1348 * Note: 1349 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1350 * keep track of number commands in progress at any point of time. 1351 * MAD transaction ID is used to identify a particular GID 1352 * TBD: Consider registering the IBMF receive callback on demand 1353 * 1354 * Note: This routine must be called with ibdm.ibdm_mutex held 1355 * TBD: Re probe the failure GID (for certain failures) when requested 1356 * for fabric sweep next time 1357 * 1358 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1359 */ 1360 static void 1361 ibdm_sweep_fabric(int reprobe_flag) 1362 { 1363 int ii; 1364 int new_paths = 0; 1365 uint8_t niocs; 1366 taskqid_t tid; 1367 ibdm_ioc_info_t *ioc; 1368 ibdm_hca_list_t *hca_list = NULL; 1369 ibdm_port_attr_t *port = NULL; 1370 ibdm_dp_gidinfo_t *gid_info; 1371 1372 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1373 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1374 1375 /* 1376 * Check whether a sweep already in progress. If so, just 1377 * wait for the fabric sweep to complete 1378 */ 1379 while (ibdm.ibdm_busy & IBDM_BUSY) 1380 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1381 ibdm.ibdm_busy |= IBDM_BUSY; 1382 mutex_exit(&ibdm.ibdm_mutex); 1383 1384 ibdm_dump_sweep_fabric_timestamp(0); 1385 1386 /* Rescan the GID list for any removed GIDs for reprobe */ 1387 if (reprobe_flag) 1388 ibdm_rescan_gidlist(NULL); 1389 1390 /* 1391 * Get list of all the ports reachable from the local known HCA 1392 * ports which are active 1393 */ 1394 mutex_enter(&ibdm.ibdm_hl_mutex); 1395 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1396 ibdm_get_next_port(&hca_list, &port, 1)) { 1397 /* 1398 * Get PATHS to all the reachable ports from 1399 * SGID and update the global ibdm structure. 1400 */ 1401 new_paths = ibdm_get_reachable_ports(port, hca_list); 1402 ibdm.ibdm_ngids += new_paths; 1403 } 1404 mutex_exit(&ibdm.ibdm_hl_mutex); 1405 1406 mutex_enter(&ibdm.ibdm_mutex); 1407 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1408 mutex_exit(&ibdm.ibdm_mutex); 1409 1410 /* Send a request to probe GIDs asynchronously. */ 1411 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1412 gid_info = gid_info->gl_next) { 1413 mutex_enter(&gid_info->gl_mutex); 1414 gid_info->gl_reprobe_flag = reprobe_flag; 1415 mutex_exit(&gid_info->gl_mutex); 1416 1417 /* process newly encountered GIDs */ 1418 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1419 (void *)gid_info, TQ_NOSLEEP); 1420 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1421 " taskq_id = %x", gid_info, tid); 1422 /* taskq failed to dispatch call it directly */ 1423 if (tid == NULL) 1424 ibdm_probe_gid_thread((void *)gid_info); 1425 } 1426 1427 mutex_enter(&ibdm.ibdm_mutex); 1428 ibdm_wait_probe_completion(); 1429 1430 /* 1431 * Update the properties, if reprobe_flag is set 1432 * Skip if gl_reprobe_flag is set, this will be 1433 * a re-inserted / new GID, for which notifications 1434 * have already been send. 1435 */ 1436 if (reprobe_flag) { 1437 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1438 gid_info = gid_info->gl_next) { 1439 if (gid_info->gl_iou == NULL) 1440 continue; 1441 if (gid_info->gl_reprobe_flag) { 1442 gid_info->gl_reprobe_flag = 0; 1443 continue; 1444 } 1445 1446 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1447 for (ii = 0; ii < niocs; ii++) { 1448 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1449 if (ioc) 1450 ibdm_reprobe_update_port_srv(ioc, 1451 gid_info); 1452 } 1453 } 1454 } else if (ibdm.ibdm_prev_iou) { 1455 ibdm_ioc_info_t *ioc_list; 1456 1457 /* 1458 * Get the list of IOCs which have changed. 1459 * If any IOCs have changed, Notify IBNexus 1460 */ 1461 ibdm.ibdm_prev_iou = 0; 1462 ioc_list = ibdm_handle_prev_iou(); 1463 if (ioc_list) { 1464 if (ibdm.ibdm_ibnex_callback != NULL) { 1465 (*ibdm.ibdm_ibnex_callback)( 1466 (void *)ioc_list, 1467 IBDM_EVENT_IOC_PROP_UPDATE); 1468 } 1469 } 1470 } 1471 1472 ibdm_dump_sweep_fabric_timestamp(1); 1473 1474 ibdm.ibdm_busy &= ~IBDM_BUSY; 1475 cv_broadcast(&ibdm.ibdm_busy_cv); 1476 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1477 } 1478 1479 1480 /* 1481 * ibdm_is_cisco: 1482 * Check if this is a Cisco device or not. 1483 */ 1484 static boolean_t 1485 ibdm_is_cisco(ib_guid_t guid) 1486 { 1487 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID) 1488 return (B_TRUE); 1489 return (B_FALSE); 1490 } 1491 1492 1493 /* 1494 * ibdm_is_cisco_switch: 1495 * Check if this switch is a CISCO switch or not. 1496 * Note that if this switch is already activated, ibdm_is_cisco_switch() 1497 * returns B_FALSE not to re-activate it again. 1498 */ 1499 static boolean_t 1500 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info) 1501 { 1502 int company_id, device_id; 1503 ASSERT(gid_info != 0); 1504 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 1505 1506 /* 1507 * If this switch is already activated, don't re-activate it. 1508 */ 1509 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE) 1510 return (B_FALSE); 1511 1512 /* 1513 * Check if this switch is a Cisco FC GW or not. 1514 * Use the node guid (the OUI part) instead of the vendor id 1515 * since the vendor id is zero in practice. 1516 */ 1517 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT; 1518 device_id = gid_info->gl_devid; 1519 1520 if (company_id == IBDM_CISCO_COMPANY_ID && 1521 device_id == IBDM_CISCO_DEVICE_ID) 1522 return (B_TRUE); 1523 return (B_FALSE); 1524 } 1525 1526 1527 /* 1528 * ibdm_probe_gid_thread: 1529 * thread that does the actual work for sweeping the fabric 1530 * for a given GID 1531 */ 1532 static void 1533 ibdm_probe_gid_thread(void *args) 1534 { 1535 int reprobe_flag; 1536 ib_guid_t node_guid; 1537 ib_guid_t port_guid; 1538 ibdm_dp_gidinfo_t *gid_info; 1539 1540 gid_info = (ibdm_dp_gidinfo_t *)args; 1541 reprobe_flag = gid_info->gl_reprobe_flag; 1542 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1543 gid_info, reprobe_flag); 1544 ASSERT(gid_info != NULL); 1545 ASSERT(gid_info->gl_pending_cmds == 0); 1546 1547 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1548 reprobe_flag == 0) { 1549 /* 1550 * This GID may have been already probed. Send 1551 * in a CLP to check if IOUnitInfo changed? 1552 * Explicitly set gl_reprobe_flag to 0 so that 1553 * IBnex is not notified on completion 1554 */ 1555 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1556 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1557 "get new IOCs information"); 1558 mutex_enter(&gid_info->gl_mutex); 1559 gid_info->gl_pending_cmds++; 1560 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1561 gid_info->gl_reprobe_flag = 0; 1562 mutex_exit(&gid_info->gl_mutex); 1563 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1564 mutex_enter(&gid_info->gl_mutex); 1565 --gid_info->gl_pending_cmds; 1566 mutex_exit(&gid_info->gl_mutex); 1567 mutex_enter(&ibdm.ibdm_mutex); 1568 --ibdm.ibdm_ngid_probes_in_progress; 1569 ibdm_wakeup_probe_gid_cv(); 1570 mutex_exit(&ibdm.ibdm_mutex); 1571 } 1572 } else { 1573 mutex_enter(&ibdm.ibdm_mutex); 1574 --ibdm.ibdm_ngid_probes_in_progress; 1575 ibdm_wakeup_probe_gid_cv(); 1576 mutex_exit(&ibdm.ibdm_mutex); 1577 } 1578 return; 1579 } else if (reprobe_flag && gid_info->gl_state == 1580 IBDM_GID_PROBING_COMPLETE) { 1581 /* 1582 * Reprobe all IOCs for the GID which has completed 1583 * probe. Skip other port GIDs to same IOU. 1584 * Explicitly set gl_reprobe_flag to 0 so that 1585 * IBnex is not notified on completion 1586 */ 1587 ibdm_ioc_info_t *ioc_info; 1588 uint8_t niocs, ii; 1589 1590 ASSERT(gid_info->gl_iou); 1591 mutex_enter(&gid_info->gl_mutex); 1592 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1593 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1594 gid_info->gl_pending_cmds += niocs; 1595 gid_info->gl_reprobe_flag = 0; 1596 mutex_exit(&gid_info->gl_mutex); 1597 for (ii = 0; ii < niocs; ii++) { 1598 uchar_t slot_info; 1599 ib_dm_io_unitinfo_t *giou_info; 1600 1601 /* 1602 * Check whether IOC is present in the slot 1603 * Series of nibbles (in the field 1604 * iou_ctrl_list) represents a slot in the 1605 * IOU. 1606 * Byte format: 76543210 1607 * Bits 0-3 of first byte represent Slot 2 1608 * bits 4-7 of first byte represent slot 1, 1609 * bits 0-3 of second byte represent slot 4 1610 * and so on 1611 * Each 4-bit nibble has the following meaning 1612 * 0x0 : IOC not installed 1613 * 0x1 : IOC is present 1614 * 0xf : Slot does not exist 1615 * and all other values are reserved. 1616 */ 1617 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1618 giou_info = &gid_info->gl_iou->iou_info; 1619 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1620 if ((ii % 2) == 0) 1621 slot_info = (slot_info >> 4); 1622 1623 if ((slot_info & 0xf) != 1) { 1624 ioc_info->ioc_state = 1625 IBDM_IOC_STATE_PROBE_FAILED; 1626 ibdm_gid_decr_pending(gid_info); 1627 continue; 1628 } 1629 1630 if (ibdm_send_ioc_profile(gid_info, ii) != 1631 IBDM_SUCCESS) { 1632 ibdm_gid_decr_pending(gid_info); 1633 } 1634 } 1635 1636 return; 1637 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1638 mutex_enter(&ibdm.ibdm_mutex); 1639 --ibdm.ibdm_ngid_probes_in_progress; 1640 ibdm_wakeup_probe_gid_cv(); 1641 mutex_exit(&ibdm.ibdm_mutex); 1642 return; 1643 } 1644 1645 /* 1646 * Check whether the destination GID supports DM agents. If 1647 * not, stop probing the GID and continue with the next GID 1648 * in the list. 1649 */ 1650 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1651 mutex_enter(&gid_info->gl_mutex); 1652 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1653 mutex_exit(&gid_info->gl_mutex); 1654 ibdm_delete_glhca_list(gid_info); 1655 mutex_enter(&ibdm.ibdm_mutex); 1656 --ibdm.ibdm_ngid_probes_in_progress; 1657 ibdm_wakeup_probe_gid_cv(); 1658 mutex_exit(&ibdm.ibdm_mutex); 1659 return; 1660 } 1661 1662 /* Get the nodeguid and portguid of the port */ 1663 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1664 &node_guid, &port_guid) != IBDM_SUCCESS) { 1665 mutex_enter(&gid_info->gl_mutex); 1666 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1667 mutex_exit(&gid_info->gl_mutex); 1668 ibdm_delete_glhca_list(gid_info); 1669 mutex_enter(&ibdm.ibdm_mutex); 1670 --ibdm.ibdm_ngid_probes_in_progress; 1671 ibdm_wakeup_probe_gid_cv(); 1672 mutex_exit(&ibdm.ibdm_mutex); 1673 return; 1674 } 1675 1676 /* 1677 * Check whether we already knew about this NodeGuid 1678 * If so, do not probe the GID and continue with the 1679 * next GID in the gid list. Set the GID state to 1680 * probing done. 1681 */ 1682 mutex_enter(&ibdm.ibdm_mutex); 1683 gid_info->gl_nodeguid = node_guid; 1684 gid_info->gl_portguid = port_guid; 1685 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1686 mutex_exit(&ibdm.ibdm_mutex); 1687 mutex_enter(&gid_info->gl_mutex); 1688 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1689 mutex_exit(&gid_info->gl_mutex); 1690 ibdm_delete_glhca_list(gid_info); 1691 mutex_enter(&ibdm.ibdm_mutex); 1692 --ibdm.ibdm_ngid_probes_in_progress; 1693 ibdm_wakeup_probe_gid_cv(); 1694 mutex_exit(&ibdm.ibdm_mutex); 1695 return; 1696 } 1697 ibdm_add_to_gl_gid(gid_info, gid_info); 1698 mutex_exit(&ibdm.ibdm_mutex); 1699 1700 /* 1701 * New or reinserted GID : Enable notification to IBnex 1702 */ 1703 mutex_enter(&gid_info->gl_mutex); 1704 gid_info->gl_reprobe_flag = 1; 1705 1706 /* 1707 * A Cisco FC GW needs the special handling to get IOUnitInfo. 1708 */ 1709 if (ibdm_is_cisco_switch(gid_info)) { 1710 gid_info->gl_pending_cmds++; 1711 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 1712 mutex_exit(&gid_info->gl_mutex); 1713 1714 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 1715 mutex_enter(&gid_info->gl_mutex); 1716 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1717 --gid_info->gl_pending_cmds; 1718 mutex_exit(&gid_info->gl_mutex); 1719 1720 /* free the hca_list on this gid_info */ 1721 ibdm_delete_glhca_list(gid_info); 1722 1723 mutex_enter(&ibdm.ibdm_mutex); 1724 --ibdm.ibdm_ngid_probes_in_progress; 1725 ibdm_wakeup_probe_gid_cv(); 1726 mutex_exit(&ibdm.ibdm_mutex); 1727 1728 return; 1729 } 1730 1731 mutex_enter(&gid_info->gl_mutex); 1732 ibdm_wait_cisco_probe_completion(gid_info); 1733 1734 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: " 1735 "CISCO Wakeup signal received"); 1736 } 1737 1738 /* move on to the 'GET_CLASSPORTINFO' stage */ 1739 gid_info->gl_pending_cmds++; 1740 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1741 mutex_exit(&gid_info->gl_mutex); 1742 1743 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: " 1744 "%d: gid_info %p gl_state %d pending_cmds %d", 1745 __LINE__, gid_info, gid_info->gl_state, 1746 gid_info->gl_pending_cmds); 1747 1748 /* 1749 * Send ClassPortInfo request to the GID asynchronously. 1750 */ 1751 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1752 1753 mutex_enter(&gid_info->gl_mutex); 1754 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1755 --gid_info->gl_pending_cmds; 1756 mutex_exit(&gid_info->gl_mutex); 1757 1758 /* free the hca_list on this gid_info */ 1759 ibdm_delete_glhca_list(gid_info); 1760 1761 mutex_enter(&ibdm.ibdm_mutex); 1762 --ibdm.ibdm_ngid_probes_in_progress; 1763 ibdm_wakeup_probe_gid_cv(); 1764 mutex_exit(&ibdm.ibdm_mutex); 1765 1766 return; 1767 } 1768 } 1769 1770 1771 /* 1772 * ibdm_check_dest_nodeguid 1773 * Searches for the NodeGuid in the GID list 1774 * Returns matching gid_info if found and otherwise NULL 1775 * 1776 * This function is called to handle new GIDs discovered 1777 * during device sweep / probe or for GID_AVAILABLE event. 1778 * 1779 * Parameter : 1780 * gid_info GID to check 1781 */ 1782 static ibdm_dp_gidinfo_t * 1783 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1784 { 1785 ibdm_dp_gidinfo_t *gid_list; 1786 ibdm_gid_t *tmp; 1787 1788 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1789 1790 gid_list = ibdm.ibdm_dp_gidlist_head; 1791 while (gid_list) { 1792 if ((gid_list != gid_info) && 1793 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1794 IBTF_DPRINTF_L4("ibdm", 1795 "\tcheck_dest_nodeguid: NodeGuid is present"); 1796 1797 /* Add to gid_list */ 1798 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1799 KM_SLEEP); 1800 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1801 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1802 tmp->gid_next = gid_list->gl_gid; 1803 gid_list->gl_gid = tmp; 1804 gid_list->gl_ngids++; 1805 return (gid_list); 1806 } 1807 1808 gid_list = gid_list->gl_next; 1809 } 1810 1811 return (NULL); 1812 } 1813 1814 1815 /* 1816 * ibdm_is_dev_mgt_supported 1817 * Get the PortInfo attribute (SA Query) 1818 * Check "CompatabilityMask" field in the Portinfo. 1819 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1820 * by the port, otherwise IBDM_FAILURE 1821 */ 1822 static int 1823 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1824 { 1825 int ret; 1826 size_t length = 0; 1827 sa_portinfo_record_t req, *resp = NULL; 1828 ibmf_saa_access_args_t qargs; 1829 1830 bzero(&req, sizeof (sa_portinfo_record_t)); 1831 req.EndportLID = gid_info->gl_dlid; 1832 1833 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1834 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1835 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1836 qargs.sq_template = &req; 1837 qargs.sq_callback = NULL; 1838 qargs.sq_callback_arg = NULL; 1839 1840 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1841 &qargs, 0, &length, (void **)&resp); 1842 1843 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1844 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1845 "failed to get PORTINFO attribute %d", ret); 1846 return (IBDM_FAILURE); 1847 } 1848 1849 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1850 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1851 ret = IBDM_SUCCESS; 1852 } else { 1853 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1854 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1855 ret = IBDM_FAILURE; 1856 } 1857 kmem_free(resp, length); 1858 return (ret); 1859 } 1860 1861 1862 /* 1863 * ibdm_get_node_port_guids() 1864 * Get the NodeInfoRecord of the port 1865 * Save NodeGuid and PortGUID values in the GID list structure. 1866 * Return IBDM_SUCCESS/IBDM_FAILURE 1867 */ 1868 static int 1869 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1870 ib_guid_t *node_guid, ib_guid_t *port_guid) 1871 { 1872 int ret; 1873 size_t length = 0; 1874 sa_node_record_t req, *resp = NULL; 1875 ibmf_saa_access_args_t qargs; 1876 1877 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1878 1879 bzero(&req, sizeof (sa_node_record_t)); 1880 req.LID = dlid; 1881 1882 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1883 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1884 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1885 qargs.sq_template = &req; 1886 qargs.sq_callback = NULL; 1887 qargs.sq_callback_arg = NULL; 1888 1889 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1890 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1891 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1892 " SA Retrieve Failed: %d", ret); 1893 return (IBDM_FAILURE); 1894 } 1895 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1896 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1897 1898 *node_guid = resp->NodeInfo.NodeGUID; 1899 *port_guid = resp->NodeInfo.PortGUID; 1900 kmem_free(resp, length); 1901 return (IBDM_SUCCESS); 1902 } 1903 1904 1905 /* 1906 * ibdm_get_reachable_ports() 1907 * Get list of the destination GID (and its path records) by 1908 * querying the SA access. 1909 * 1910 * Returns Number paths 1911 */ 1912 static int 1913 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1914 { 1915 uint_t ii, jj, nrecs; 1916 uint_t npaths = 0; 1917 size_t length; 1918 ib_gid_t sgid; 1919 ibdm_pkey_tbl_t *pkey_tbl; 1920 sa_path_record_t *result; 1921 sa_path_record_t *precp; 1922 ibdm_dp_gidinfo_t *gid_info; 1923 1924 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1925 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1926 1927 sgid.gid_prefix = portinfo->pa_sn_prefix; 1928 sgid.gid_guid = portinfo->pa_port_guid; 1929 1930 /* get reversible paths */ 1931 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1932 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1933 != IBMF_SUCCESS) { 1934 IBTF_DPRINTF_L2("ibdm", 1935 "\tget_reachable_ports: Getting path records failed"); 1936 return (0); 1937 } 1938 1939 for (ii = 0; ii < nrecs; ii++) { 1940 sa_node_record_t *nrec; 1941 size_t length; 1942 1943 precp = &result[ii]; 1944 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1945 precp->DGID.gid_prefix)) != NULL) { 1946 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: " 1947 "Already exists nrecs %d, ii %d", nrecs, ii); 1948 ibdm_addto_glhcalist(gid_info, hca); 1949 continue; 1950 } 1951 /* 1952 * This is a new GID. Allocate a GID structure and 1953 * initialize the structure 1954 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 1955 * by kmem_zalloc call 1956 */ 1957 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 1958 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 1959 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 1960 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 1961 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 1962 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 1963 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 1964 gid_info->gl_p_key = precp->P_Key; 1965 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 1966 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 1967 gid_info->gl_slid = precp->SLID; 1968 gid_info->gl_dlid = precp->DLID; 1969 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 1970 << IBDM_GID_TRANSACTIONID_SHIFT; 1971 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 1972 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 1973 << IBDM_GID_TRANSACTIONID_SHIFT; 1974 gid_info->gl_SL = precp->SL; 1975 1976 /* 1977 * get the node record with this guid if the destination 1978 * device is a Cisco one. 1979 */ 1980 if (ibdm_is_cisco(precp->DGID.gid_guid) && 1981 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) && 1982 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl, 1983 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) { 1984 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 1985 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 1986 kmem_free(nrec, length); 1987 } 1988 1989 ibdm_addto_glhcalist(gid_info, hca); 1990 1991 ibdm_dump_path_info(precp); 1992 1993 gid_info->gl_qp_hdl = NULL; 1994 ASSERT(portinfo->pa_pkey_tbl != NULL && 1995 portinfo->pa_npkeys != 0); 1996 1997 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 1998 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 1999 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 2000 (pkey_tbl->pt_qp_hdl != NULL)) { 2001 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 2002 break; 2003 } 2004 } 2005 2006 /* 2007 * QP handle for GID not initialized. No matching Pkey 2008 * was found!! ibdm should *not* hit this case. Flag an 2009 * error and drop the GID if ibdm does encounter this. 2010 */ 2011 if (gid_info->gl_qp_hdl == NULL) { 2012 IBTF_DPRINTF_L2(ibdm_string, 2013 "\tget_reachable_ports: No matching Pkey"); 2014 ibdm_delete_gidinfo(gid_info); 2015 continue; 2016 } 2017 if (ibdm.ibdm_dp_gidlist_head == NULL) { 2018 ibdm.ibdm_dp_gidlist_head = gid_info; 2019 ibdm.ibdm_dp_gidlist_tail = gid_info; 2020 } else { 2021 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 2022 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 2023 ibdm.ibdm_dp_gidlist_tail = gid_info; 2024 } 2025 npaths++; 2026 } 2027 kmem_free(result, length); 2028 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 2029 return (npaths); 2030 } 2031 2032 2033 /* 2034 * ibdm_check_dgid() 2035 * Look in the global list to check whether we know this DGID already 2036 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 2037 */ 2038 static ibdm_dp_gidinfo_t * 2039 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 2040 { 2041 ibdm_dp_gidinfo_t *gid_list; 2042 2043 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 2044 gid_list = gid_list->gl_next) { 2045 if ((guid == gid_list->gl_dgid_lo) && 2046 (prefix == gid_list->gl_dgid_hi)) { 2047 break; 2048 } 2049 } 2050 return (gid_list); 2051 } 2052 2053 2054 /* 2055 * ibdm_find_gid() 2056 * Look in the global list to find a GID entry with matching 2057 * port & node GUID. 2058 * Return pointer to gidinfo if found, else return NULL 2059 */ 2060 static ibdm_dp_gidinfo_t * 2061 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 2062 { 2063 ibdm_dp_gidinfo_t *gid_list; 2064 2065 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 2066 nodeguid, portguid); 2067 2068 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 2069 gid_list = gid_list->gl_next) { 2070 if ((portguid == gid_list->gl_portguid) && 2071 (nodeguid == gid_list->gl_nodeguid)) { 2072 break; 2073 } 2074 } 2075 2076 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 2077 gid_list); 2078 return (gid_list); 2079 } 2080 2081 2082 /* 2083 * ibdm_set_classportinfo() 2084 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW 2085 * by sending the setClassPortInfo request with the trapLID, trapGID 2086 * and etc. to the gateway since the gateway doesn't provide the IO 2087 * Unit Information othewise. This behavior is the Cisco specific one, 2088 * and this function is called to a Cisco FC GW only. 2089 * Returns IBDM_SUCCESS/IBDM_FAILURE 2090 */ 2091 static int 2092 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2093 { 2094 ibmf_msg_t *msg; 2095 ib_mad_hdr_t *hdr; 2096 ibdm_timeout_cb_args_t *cb_args; 2097 void *data; 2098 ib_mad_classportinfo_t *cpi; 2099 2100 IBTF_DPRINTF_L4("ibdm", 2101 "\tset_classportinfo: gid info 0x%p", gid_info); 2102 2103 /* 2104 * Send command to set classportinfo attribute. Allocate a IBMF 2105 * packet and initialize the packet. 2106 */ 2107 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2108 &msg) != IBMF_SUCCESS) { 2109 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail"); 2110 return (IBDM_FAILURE); 2111 } 2112 2113 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2114 ibdm_alloc_send_buffers(msg); 2115 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2116 2117 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2118 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2119 msg->im_local_addr.ia_remote_qno = 1; 2120 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2121 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2122 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2123 2124 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2125 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2126 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2127 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2128 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET; 2129 hdr->Status = 0; 2130 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2131 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2132 hdr->AttributeModifier = 0; 2133 2134 data = msg->im_msgbufs_send.im_bufs_cl_data; 2135 cpi = (ib_mad_classportinfo_t *)data; 2136 2137 /* 2138 * Set the classportinfo values to activate this Cisco FC GW. 2139 */ 2140 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi); 2141 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo); 2142 cpi->TrapLID = h2b16(gid_info->gl_slid); 2143 cpi->TrapSL = gid_info->gl_SL; 2144 cpi->TrapP_Key = h2b16(gid_info->gl_p_key); 2145 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn)); 2146 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *) 2147 gid_info->gl_qp_hdl)->isq_qkey)); 2148 2149 cb_args = &gid_info->gl_cpi_cb_args; 2150 cb_args->cb_gid_info = gid_info; 2151 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2152 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2153 2154 mutex_enter(&gid_info->gl_mutex); 2155 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2156 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2157 mutex_exit(&gid_info->gl_mutex); 2158 2159 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: " 2160 "timeout id %x", gid_info->gl_timeout_id); 2161 2162 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2163 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2164 IBTF_DPRINTF_L2("ibdm", 2165 "\tset_classportinfo: ibmf send failed"); 2166 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2167 } 2168 2169 return (IBDM_SUCCESS); 2170 } 2171 2172 2173 /* 2174 * ibdm_send_classportinfo() 2175 * Send classportinfo request. When the request is completed 2176 * IBMF calls ibdm_classportinfo_cb routine to inform about 2177 * the completion. 2178 * Returns IBDM_SUCCESS/IBDM_FAILURE 2179 */ 2180 static int 2181 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2182 { 2183 ibmf_msg_t *msg; 2184 ib_mad_hdr_t *hdr; 2185 ibdm_timeout_cb_args_t *cb_args; 2186 2187 IBTF_DPRINTF_L4("ibdm", 2188 "\tsend_classportinfo: gid info 0x%p", gid_info); 2189 2190 /* 2191 * Send command to get classportinfo attribute. Allocate a IBMF 2192 * packet and initialize the packet. 2193 */ 2194 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2195 &msg) != IBMF_SUCCESS) { 2196 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 2197 return (IBDM_FAILURE); 2198 } 2199 2200 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2201 ibdm_alloc_send_buffers(msg); 2202 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2203 2204 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2205 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2206 msg->im_local_addr.ia_remote_qno = 1; 2207 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2208 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2209 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2210 2211 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2212 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2213 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2214 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2215 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2216 hdr->Status = 0; 2217 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2218 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2219 hdr->AttributeModifier = 0; 2220 2221 cb_args = &gid_info->gl_cpi_cb_args; 2222 cb_args->cb_gid_info = gid_info; 2223 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2224 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2225 2226 mutex_enter(&gid_info->gl_mutex); 2227 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2228 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2229 mutex_exit(&gid_info->gl_mutex); 2230 2231 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 2232 "timeout id %x", gid_info->gl_timeout_id); 2233 2234 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2235 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2236 IBTF_DPRINTF_L2("ibdm", 2237 "\tsend_classportinfo: ibmf send failed"); 2238 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2239 } 2240 2241 return (IBDM_SUCCESS); 2242 } 2243 2244 2245 /* 2246 * ibdm_handle_setclassportinfo() 2247 * Invoked by the IBMF when setClassPortInfo request is completed. 2248 */ 2249 static void 2250 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl, 2251 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2252 { 2253 void *data; 2254 timeout_id_t timeout_id; 2255 ib_mad_classportinfo_t *cpi; 2256 2257 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl " 2258 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2259 2260 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2261 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: " 2262 "Not a ClassPortInfo resp"); 2263 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2264 return; 2265 } 2266 2267 /* 2268 * Verify whether timeout handler is created/active. 2269 * If created/ active, cancel the timeout handler 2270 */ 2271 mutex_enter(&gid_info->gl_mutex); 2272 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) { 2273 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp"); 2274 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2275 mutex_exit(&gid_info->gl_mutex); 2276 return; 2277 } 2278 ibdm_bump_transactionID(gid_info); 2279 2280 gid_info->gl_iou_cb_args.cb_req_type = 0; 2281 if (gid_info->gl_timeout_id) { 2282 timeout_id = gid_info->gl_timeout_id; 2283 mutex_exit(&gid_info->gl_mutex); 2284 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: " 2285 "gl_timeout_id = 0x%x", timeout_id); 2286 if (untimeout(timeout_id) == -1) { 2287 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: " 2288 "untimeout gl_timeout_id failed"); 2289 } 2290 mutex_enter(&gid_info->gl_mutex); 2291 gid_info->gl_timeout_id = 0; 2292 } 2293 mutex_exit(&gid_info->gl_mutex); 2294 2295 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2296 cpi = (ib_mad_classportinfo_t *)data; 2297 2298 ibdm_dump_classportinfo(cpi); 2299 } 2300 2301 2302 /* 2303 * ibdm_handle_classportinfo() 2304 * Invoked by the IBMF when the classportinfo request is completed. 2305 */ 2306 static void 2307 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 2308 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2309 { 2310 void *data; 2311 timeout_id_t timeout_id; 2312 ib_mad_hdr_t *hdr; 2313 ib_mad_classportinfo_t *cpi; 2314 2315 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 2316 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2317 2318 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2319 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 2320 "Not a ClassPortInfo resp"); 2321 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2322 return; 2323 } 2324 2325 /* 2326 * Verify whether timeout handler is created/active. 2327 * If created/ active, cancel the timeout handler 2328 */ 2329 mutex_enter(&gid_info->gl_mutex); 2330 ibdm_bump_transactionID(gid_info); 2331 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 2332 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 2333 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2334 mutex_exit(&gid_info->gl_mutex); 2335 return; 2336 } 2337 gid_info->gl_iou_cb_args.cb_req_type = 0; 2338 if (gid_info->gl_timeout_id) { 2339 timeout_id = gid_info->gl_timeout_id; 2340 mutex_exit(&gid_info->gl_mutex); 2341 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 2342 "gl_timeout_id = 0x%x", timeout_id); 2343 if (untimeout(timeout_id) == -1) { 2344 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 2345 "untimeout gl_timeout_id failed"); 2346 } 2347 mutex_enter(&gid_info->gl_mutex); 2348 gid_info->gl_timeout_id = 0; 2349 } 2350 gid_info->gl_state = IBDM_GET_IOUNITINFO; 2351 gid_info->gl_pending_cmds++; 2352 mutex_exit(&gid_info->gl_mutex); 2353 2354 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2355 cpi = (ib_mad_classportinfo_t *)data; 2356 2357 /* 2358 * Cache the "RespTimeValue" and redirection information in the 2359 * global gid list data structure. This cached information will 2360 * be used to send any further requests to the GID. 2361 */ 2362 gid_info->gl_resp_timeout = 2363 (b2h32(cpi->RespTimeValue) & 0x1F); 2364 2365 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 2366 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 2367 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 2368 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 2369 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 2370 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 2371 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 2372 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 2373 gid_info->gl_redirectSL = cpi->RedirectSL; 2374 2375 ibdm_dump_classportinfo(cpi); 2376 2377 /* 2378 * Send IOUnitInfo request 2379 * Reuse previously allocated IBMF packet for sending ClassPortInfo 2380 * Check whether DM agent on the remote node requested redirection 2381 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 2382 */ 2383 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2384 ibdm_alloc_send_buffers(msg); 2385 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2386 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2387 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2388 2389 if (gid_info->gl_redirected == B_TRUE) { 2390 if (gid_info->gl_redirect_dlid != 0) { 2391 msg->im_local_addr.ia_remote_lid = 2392 gid_info->gl_redirect_dlid; 2393 } 2394 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2395 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2396 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2397 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 2398 } else { 2399 msg->im_local_addr.ia_remote_qno = 1; 2400 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2401 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2402 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2403 } 2404 2405 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2406 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2407 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2408 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2409 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2410 hdr->Status = 0; 2411 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2412 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2413 hdr->AttributeModifier = 0; 2414 2415 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2416 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2417 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2418 2419 mutex_enter(&gid_info->gl_mutex); 2420 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2421 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2422 mutex_exit(&gid_info->gl_mutex); 2423 2424 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 2425 "timeout %x", gid_info->gl_timeout_id); 2426 2427 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2428 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2429 IBTF_DPRINTF_L2("ibdm", 2430 "\thandle_classportinfo: msg transport failed"); 2431 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2432 } 2433 (*flag) |= IBDM_IBMF_PKT_REUSED; 2434 } 2435 2436 2437 /* 2438 * ibdm_send_iounitinfo: 2439 * Sends a DM request to get IOU unitinfo. 2440 */ 2441 static int 2442 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2443 { 2444 ibmf_msg_t *msg; 2445 ib_mad_hdr_t *hdr; 2446 2447 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2448 2449 /* 2450 * Send command to get iounitinfo attribute. Allocate a IBMF 2451 * packet and initialize the packet. 2452 */ 2453 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2454 IBMF_SUCCESS) { 2455 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2456 return (IBDM_FAILURE); 2457 } 2458 2459 mutex_enter(&gid_info->gl_mutex); 2460 ibdm_bump_transactionID(gid_info); 2461 mutex_exit(&gid_info->gl_mutex); 2462 2463 2464 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2465 ibdm_alloc_send_buffers(msg); 2466 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2467 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2468 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2469 msg->im_local_addr.ia_remote_qno = 1; 2470 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2471 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2472 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2473 2474 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2475 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2476 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2477 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2478 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2479 hdr->Status = 0; 2480 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2481 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2482 hdr->AttributeModifier = 0; 2483 2484 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2485 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2486 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2487 2488 mutex_enter(&gid_info->gl_mutex); 2489 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2490 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2491 mutex_exit(&gid_info->gl_mutex); 2492 2493 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2494 "timeout %x", gid_info->gl_timeout_id); 2495 2496 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2497 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2498 IBMF_SUCCESS) { 2499 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2500 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2501 msg, &gid_info->gl_iou_cb_args); 2502 } 2503 return (IBDM_SUCCESS); 2504 } 2505 2506 /* 2507 * ibdm_handle_iounitinfo() 2508 * Invoked by the IBMF when IO Unitinfo request is completed. 2509 */ 2510 static void 2511 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2512 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2513 { 2514 int ii, first = B_TRUE; 2515 int num_iocs; 2516 size_t size; 2517 uchar_t slot_info; 2518 timeout_id_t timeout_id; 2519 ib_mad_hdr_t *hdr; 2520 ibdm_ioc_info_t *ioc_info; 2521 ib_dm_io_unitinfo_t *iou_info; 2522 ib_dm_io_unitinfo_t *giou_info; 2523 ibdm_timeout_cb_args_t *cb_args; 2524 2525 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2526 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2527 2528 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2529 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2530 "Unexpected response"); 2531 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2532 return; 2533 } 2534 2535 mutex_enter(&gid_info->gl_mutex); 2536 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2537 IBTF_DPRINTF_L4("ibdm", 2538 "\thandle_iounitinfo: DUP resp"); 2539 mutex_exit(&gid_info->gl_mutex); 2540 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2541 return; 2542 } 2543 gid_info->gl_iou_cb_args.cb_req_type = 0; 2544 if (gid_info->gl_timeout_id) { 2545 timeout_id = gid_info->gl_timeout_id; 2546 mutex_exit(&gid_info->gl_mutex); 2547 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2548 "gl_timeout_id = 0x%x", timeout_id); 2549 if (untimeout(timeout_id) == -1) { 2550 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2551 "untimeout gl_timeout_id failed"); 2552 } 2553 mutex_enter(&gid_info->gl_mutex); 2554 gid_info->gl_timeout_id = 0; 2555 } 2556 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2557 2558 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2559 ibdm_dump_iounitinfo(iou_info); 2560 num_iocs = iou_info->iou_num_ctrl_slots; 2561 /* 2562 * check if number of IOCs reported is zero? if yes, return. 2563 * when num_iocs are reported zero internal IOC database needs 2564 * to be updated. To ensure that save the number of IOCs in 2565 * the new field "gl_num_iocs". Use a new field instead of 2566 * "giou_info->iou_num_ctrl_slots" as that would prevent 2567 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2568 */ 2569 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2570 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2571 mutex_exit(&gid_info->gl_mutex); 2572 return; 2573 } 2574 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2575 2576 /* 2577 * if there is an existing gl_iou (IOU has been probed before) 2578 * check if the "iou_changeid" is same as saved entry in 2579 * "giou_info->iou_changeid". 2580 * (note: this logic can prevent IOC enumeration if a given 2581 * vendor doesn't support setting iou_changeid field for its IOU) 2582 * 2583 * if there is an existing gl_iou and iou_changeid has changed : 2584 * free up existing gl_iou info and its related structures. 2585 * reallocate gl_iou info all over again. 2586 * if we donot free this up; then this leads to memory leaks 2587 */ 2588 if (gid_info->gl_iou) { 2589 giou_info = &gid_info->gl_iou->iou_info; 2590 if (b2h16(iou_info->iou_changeid) == 2591 giou_info->iou_changeid) { 2592 IBTF_DPRINTF_L3("ibdm", 2593 "\thandle_iounitinfo: no IOCs changed"); 2594 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2595 mutex_exit(&gid_info->gl_mutex); 2596 return; 2597 } 2598 2599 /* 2600 * Store the iou info as prev_iou to be used after 2601 * sweep is done. 2602 */ 2603 ASSERT(gid_info->gl_prev_iou == NULL); 2604 IBTF_DPRINTF_L4(ibdm_string, 2605 "\thandle_iounitinfo: setting gl_prev_iou %p", 2606 gid_info->gl_prev_iou); 2607 gid_info->gl_prev_iou = gid_info->gl_iou; 2608 ibdm.ibdm_prev_iou = 1; 2609 gid_info->gl_iou = NULL; 2610 } 2611 2612 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2613 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2614 giou_info = &gid_info->gl_iou->iou_info; 2615 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2616 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2617 2618 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2619 giou_info->iou_flag = iou_info->iou_flag; 2620 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2621 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2622 gid_info->gl_pending_cmds++; /* for diag code */ 2623 mutex_exit(&gid_info->gl_mutex); 2624 2625 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2626 mutex_enter(&gid_info->gl_mutex); 2627 gid_info->gl_pending_cmds--; 2628 mutex_exit(&gid_info->gl_mutex); 2629 } 2630 /* 2631 * Parallelize getting IOC controller profiles from here. 2632 * Allocate IBMF packets and send commands to get IOC profile for 2633 * each IOC present on the IOU. 2634 */ 2635 for (ii = 0; ii < num_iocs; ii++) { 2636 /* 2637 * Check whether IOC is present in the slot 2638 * Series of nibbles (in the field iou_ctrl_list) represents 2639 * a slot in the IOU. 2640 * Byte format: 76543210 2641 * Bits 0-3 of first byte represent Slot 2 2642 * bits 4-7 of first byte represent slot 1, 2643 * bits 0-3 of second byte represent slot 4 and so on 2644 * Each 4-bit nibble has the following meaning 2645 * 0x0 : IOC not installed 2646 * 0x1 : IOC is present 2647 * 0xf : Slot does not exist 2648 * and all other values are reserved. 2649 */ 2650 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2651 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2652 if ((ii % 2) == 0) 2653 slot_info = (slot_info >> 4); 2654 2655 if ((slot_info & 0xf) != 1) { 2656 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2657 "No IOC is present in the slot = %d", ii); 2658 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2659 continue; 2660 } 2661 2662 mutex_enter(&gid_info->gl_mutex); 2663 ibdm_bump_transactionID(gid_info); 2664 mutex_exit(&gid_info->gl_mutex); 2665 2666 /* 2667 * Re use the already allocated packet (for IOUnitinfo) to 2668 * send the first IOC controller attribute. Allocate new 2669 * IBMF packets for the rest of the IOC's 2670 */ 2671 if (first != B_TRUE) { 2672 msg = NULL; 2673 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2674 &msg) != IBMF_SUCCESS) { 2675 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2676 "IBMF packet allocation failed"); 2677 continue; 2678 } 2679 2680 } 2681 2682 /* allocate send buffers for all messages */ 2683 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2684 ibdm_alloc_send_buffers(msg); 2685 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2686 2687 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2688 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2689 if (gid_info->gl_redirected == B_TRUE) { 2690 if (gid_info->gl_redirect_dlid != 0) { 2691 msg->im_local_addr.ia_remote_lid = 2692 gid_info->gl_redirect_dlid; 2693 } 2694 msg->im_local_addr.ia_remote_qno = 2695 gid_info->gl_redirect_QP; 2696 msg->im_local_addr.ia_p_key = 2697 gid_info->gl_redirect_pkey; 2698 msg->im_local_addr.ia_q_key = 2699 gid_info->gl_redirect_qkey; 2700 msg->im_local_addr.ia_service_level = 2701 gid_info->gl_redirectSL; 2702 } else { 2703 msg->im_local_addr.ia_remote_qno = 1; 2704 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2705 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2706 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2707 } 2708 2709 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2710 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2711 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2712 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2713 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2714 hdr->Status = 0; 2715 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2716 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2717 hdr->AttributeModifier = h2b32(ii + 1); 2718 2719 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2720 cb_args = &ioc_info->ioc_cb_args; 2721 cb_args->cb_gid_info = gid_info; 2722 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2723 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2724 cb_args->cb_ioc_num = ii; 2725 2726 mutex_enter(&gid_info->gl_mutex); 2727 gid_info->gl_pending_cmds++; /* for diag code */ 2728 2729 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2730 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2731 mutex_exit(&gid_info->gl_mutex); 2732 2733 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2734 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2735 2736 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2737 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2738 IBTF_DPRINTF_L2("ibdm", 2739 "\thandle_iounitinfo: msg transport failed"); 2740 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2741 } 2742 (*flag) |= IBDM_IBMF_PKT_REUSED; 2743 first = B_FALSE; 2744 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2745 } 2746 } 2747 2748 2749 /* 2750 * ibdm_handle_ioc_profile() 2751 * Invoked by the IBMF when the IOCControllerProfile request 2752 * gets completed 2753 */ 2754 static void 2755 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2756 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2757 { 2758 int first = B_TRUE, reprobe = 0; 2759 uint_t ii, ioc_no, srv_start; 2760 uint_t nserv_entries; 2761 timeout_id_t timeout_id; 2762 ib_mad_hdr_t *hdr; 2763 ibdm_ioc_info_t *ioc_info; 2764 ibdm_timeout_cb_args_t *cb_args; 2765 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2766 2767 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2768 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2769 2770 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2771 /* 2772 * Check whether we know this IOC already 2773 * This will return NULL if reprobe is in progress 2774 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2775 * Do not hold mutexes here. 2776 */ 2777 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2778 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2779 "IOC guid %llx is present", ioc->ioc_guid); 2780 return; 2781 } 2782 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2783 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2784 2785 /* Make sure that IOC index is with the valid range */ 2786 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2787 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2788 "IOC index Out of range, index %d", ioc); 2789 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2790 return; 2791 } 2792 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2793 ioc_info->ioc_iou_info = gid_info->gl_iou; 2794 2795 mutex_enter(&gid_info->gl_mutex); 2796 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2797 reprobe = 1; 2798 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2799 ioc_info->ioc_serv = NULL; 2800 ioc_info->ioc_prev_serv_cnt = 2801 ioc_info->ioc_profile.ioc_service_entries; 2802 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2803 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2804 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2805 mutex_exit(&gid_info->gl_mutex); 2806 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2807 return; 2808 } 2809 ioc_info->ioc_cb_args.cb_req_type = 0; 2810 if (ioc_info->ioc_timeout_id) { 2811 timeout_id = ioc_info->ioc_timeout_id; 2812 ioc_info->ioc_timeout_id = 0; 2813 mutex_exit(&gid_info->gl_mutex); 2814 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2815 "ioc_timeout_id = 0x%x", timeout_id); 2816 if (untimeout(timeout_id) == -1) { 2817 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2818 "untimeout ioc_timeout_id failed"); 2819 } 2820 mutex_enter(&gid_info->gl_mutex); 2821 } 2822 2823 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2824 if (reprobe == 0) { 2825 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2826 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2827 } 2828 2829 /* 2830 * Save all the IOC information in the global structures. 2831 * Note the wire format is Big Endian and the Sparc process also 2832 * big endian. So, there is no need to convert the data fields 2833 * The conversion routines used below are ineffective on Sparc 2834 * machines where as they will be effective on little endian 2835 * machines such as Intel processors. 2836 */ 2837 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2838 2839 /* 2840 * Restrict updates to onlyport GIDs and service entries during reprobe 2841 */ 2842 if (reprobe == 0) { 2843 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2844 gioc->ioc_vendorid = 2845 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2846 >> IB_DM_VENDORID_SHIFT); 2847 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2848 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2849 gioc->ioc_subsys_vendorid = 2850 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2851 >> IB_DM_VENDORID_SHIFT); 2852 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2853 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2854 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2855 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2856 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2857 gioc->ioc_send_msg_qdepth = 2858 b2h16(ioc->ioc_send_msg_qdepth); 2859 gioc->ioc_rdma_read_qdepth = 2860 b2h16(ioc->ioc_rdma_read_qdepth); 2861 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2862 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2863 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2864 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2865 IB_DM_IOC_ID_STRING_LEN); 2866 2867 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2868 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2869 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2870 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2871 2872 if (ioc_info->ioc_diagdeviceid == B_TRUE) { 2873 gid_info->gl_pending_cmds++; 2874 IBTF_DPRINTF_L3(ibdm_string, 2875 "\tibdm_handle_ioc_profile: " 2876 "%d: gid_info %p gl_state %d pending_cmds %d", 2877 __LINE__, gid_info, gid_info->gl_state, 2878 gid_info->gl_pending_cmds); 2879 } 2880 } 2881 gioc->ioc_service_entries = ioc->ioc_service_entries; 2882 mutex_exit(&gid_info->gl_mutex); 2883 2884 ibdm_dump_ioc_profile(gioc); 2885 2886 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2887 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2888 mutex_enter(&gid_info->gl_mutex); 2889 gid_info->gl_pending_cmds--; 2890 mutex_exit(&gid_info->gl_mutex); 2891 } 2892 } 2893 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2894 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2895 KM_SLEEP); 2896 2897 /* 2898 * In one single request, maximum number of requests that can be 2899 * obtained is 4. If number of service entries are more than four, 2900 * calculate number requests needed and send them parallelly. 2901 */ 2902 nserv_entries = ioc->ioc_service_entries; 2903 ii = 0; 2904 while (nserv_entries) { 2905 mutex_enter(&gid_info->gl_mutex); 2906 gid_info->gl_pending_cmds++; 2907 ibdm_bump_transactionID(gid_info); 2908 mutex_exit(&gid_info->gl_mutex); 2909 2910 if (first != B_TRUE) { 2911 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2912 &msg) != IBMF_SUCCESS) { 2913 continue; 2914 } 2915 2916 } 2917 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2918 ibdm_alloc_send_buffers(msg); 2919 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2920 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2921 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2922 if (gid_info->gl_redirected == B_TRUE) { 2923 if (gid_info->gl_redirect_dlid != 0) { 2924 msg->im_local_addr.ia_remote_lid = 2925 gid_info->gl_redirect_dlid; 2926 } 2927 msg->im_local_addr.ia_remote_qno = 2928 gid_info->gl_redirect_QP; 2929 msg->im_local_addr.ia_p_key = 2930 gid_info->gl_redirect_pkey; 2931 msg->im_local_addr.ia_q_key = 2932 gid_info->gl_redirect_qkey; 2933 msg->im_local_addr.ia_service_level = 2934 gid_info->gl_redirectSL; 2935 } else { 2936 msg->im_local_addr.ia_remote_qno = 1; 2937 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2938 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2939 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2940 } 2941 2942 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2943 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2944 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2945 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2946 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2947 hdr->Status = 0; 2948 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2949 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2950 2951 srv_start = ii * 4; 2952 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2953 cb_args->cb_gid_info = gid_info; 2954 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2955 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 2956 cb_args->cb_srvents_start = srv_start; 2957 cb_args->cb_ioc_num = ioc_no - 1; 2958 2959 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 2960 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 2961 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 2962 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 2963 } else { 2964 cb_args->cb_srvents_end = 2965 (cb_args->cb_srvents_start + nserv_entries - 1); 2966 nserv_entries = 0; 2967 } 2968 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 2969 ibdm_fill_srv_attr_mod(hdr, cb_args); 2970 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 2971 2972 mutex_enter(&gid_info->gl_mutex); 2973 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 2974 ibdm_pkt_timeout_hdlr, cb_args, 2975 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2976 mutex_exit(&gid_info->gl_mutex); 2977 2978 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 2979 "timeout %x, ioc %d srv %d", 2980 ioc_info->ioc_serv[srv_start].se_timeout_id, 2981 ioc_no - 1, srv_start); 2982 2983 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2984 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2985 IBTF_DPRINTF_L2("ibdm", 2986 "\thandle_ioc_profile: msg send failed"); 2987 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2988 } 2989 (*flag) |= IBDM_IBMF_PKT_REUSED; 2990 first = B_FALSE; 2991 ii++; 2992 } 2993 } 2994 2995 2996 /* 2997 * ibdm_handle_srventry_mad() 2998 */ 2999 static void 3000 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 3001 ibdm_dp_gidinfo_t *gid_info, int *flag) 3002 { 3003 uint_t ii, ioc_no, attrmod; 3004 uint_t nentries, start, end; 3005 timeout_id_t timeout_id; 3006 ib_dm_srv_t *srv_ents; 3007 ibdm_ioc_info_t *ioc_info; 3008 ibdm_srvents_info_t *gsrv_ents; 3009 3010 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 3011 " IBMF msg %p gid info %p", msg, gid_info); 3012 3013 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 3014 /* 3015 * Get the start and end index of the service entries 3016 * Upper 16 bits identify the IOC 3017 * Lower 16 bits specify the range of service entries 3018 * LSB specifies (Big endian) end of the range 3019 * MSB specifies (Big endian) start of the range 3020 */ 3021 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3022 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3023 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 3024 start = (attrmod & IBDM_8_BIT_MASK); 3025 3026 /* Make sure that IOC index is with the valid range */ 3027 if ((ioc_no < 1) | 3028 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 3029 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3030 "IOC index Out of range, index %d", ioc_no); 3031 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3032 return; 3033 } 3034 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3035 3036 /* 3037 * Make sure that the "start" and "end" service indexes are 3038 * with in the valid range 3039 */ 3040 nentries = ioc_info->ioc_profile.ioc_service_entries; 3041 if ((start > end) | (start >= nentries) | (end >= nentries)) { 3042 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3043 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 3044 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3045 return; 3046 } 3047 gsrv_ents = &ioc_info->ioc_serv[start]; 3048 mutex_enter(&gid_info->gl_mutex); 3049 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 3050 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3051 "already known, ioc %d, srv %d, se_state %x", 3052 ioc_no - 1, start, gsrv_ents->se_state); 3053 mutex_exit(&gid_info->gl_mutex); 3054 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3055 return; 3056 } 3057 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 3058 if (ioc_info->ioc_serv[start].se_timeout_id) { 3059 IBTF_DPRINTF_L2("ibdm", 3060 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 3061 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 3062 ioc_info->ioc_serv[start].se_timeout_id = 0; 3063 mutex_exit(&gid_info->gl_mutex); 3064 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 3065 "se_timeout_id = 0x%x", timeout_id); 3066 if (untimeout(timeout_id) == -1) { 3067 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 3068 "untimeout se_timeout_id failed"); 3069 } 3070 mutex_enter(&gid_info->gl_mutex); 3071 } 3072 3073 gsrv_ents->se_state = IBDM_SE_VALID; 3074 mutex_exit(&gid_info->gl_mutex); 3075 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 3076 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 3077 bcopy(srv_ents->srv_name, 3078 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 3079 ibdm_dump_service_entries(&gsrv_ents->se_attr); 3080 } 3081 } 3082 3083 3084 /* 3085 * ibdm_get_diagcode: 3086 * Send request to get IOU/IOC diag code 3087 * Returns IBDM_SUCCESS/IBDM_FAILURE 3088 */ 3089 static int 3090 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 3091 { 3092 ibmf_msg_t *msg; 3093 ib_mad_hdr_t *hdr; 3094 ibdm_ioc_info_t *ioc; 3095 ibdm_timeout_cb_args_t *cb_args; 3096 timeout_id_t *timeout_id; 3097 3098 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 3099 gid_info, attr); 3100 3101 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 3102 &msg) != IBMF_SUCCESS) { 3103 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 3104 return (IBDM_FAILURE); 3105 } 3106 3107 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 3108 ibdm_alloc_send_buffers(msg); 3109 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3110 3111 mutex_enter(&gid_info->gl_mutex); 3112 ibdm_bump_transactionID(gid_info); 3113 mutex_exit(&gid_info->gl_mutex); 3114 3115 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3116 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3117 if (gid_info->gl_redirected == B_TRUE) { 3118 if (gid_info->gl_redirect_dlid != 0) { 3119 msg->im_local_addr.ia_remote_lid = 3120 gid_info->gl_redirect_dlid; 3121 } 3122 3123 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3124 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3125 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3126 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3127 } else { 3128 msg->im_local_addr.ia_remote_qno = 1; 3129 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3130 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3131 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3132 } 3133 3134 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3135 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3136 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3137 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3138 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3139 hdr->Status = 0; 3140 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3141 3142 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3143 hdr->AttributeModifier = h2b32(attr); 3144 3145 if (attr == 0) { 3146 cb_args = &gid_info->gl_iou_cb_args; 3147 gid_info->gl_iou->iou_dc_valid = B_FALSE; 3148 cb_args->cb_ioc_num = 0; 3149 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 3150 timeout_id = &gid_info->gl_timeout_id; 3151 } else { 3152 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 3153 ioc->ioc_dc_valid = B_FALSE; 3154 cb_args = &ioc->ioc_dc_cb_args; 3155 cb_args->cb_ioc_num = attr - 1; 3156 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 3157 timeout_id = &ioc->ioc_dc_timeout_id; 3158 } 3159 cb_args->cb_gid_info = gid_info; 3160 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3161 cb_args->cb_srvents_start = 0; 3162 3163 mutex_enter(&gid_info->gl_mutex); 3164 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3165 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3166 mutex_exit(&gid_info->gl_mutex); 3167 3168 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 3169 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 3170 3171 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3172 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3173 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 3174 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3175 } 3176 return (IBDM_SUCCESS); 3177 } 3178 3179 /* 3180 * ibdm_handle_diagcode: 3181 * Process the DiagCode MAD response and update local DM 3182 * data structure. 3183 */ 3184 static void 3185 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 3186 ibdm_dp_gidinfo_t *gid_info, int *flag) 3187 { 3188 uint16_t attrmod, *diagcode; 3189 ibdm_iou_info_t *iou; 3190 ibdm_ioc_info_t *ioc; 3191 timeout_id_t timeout_id; 3192 ibdm_timeout_cb_args_t *cb_args; 3193 3194 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 3195 3196 mutex_enter(&gid_info->gl_mutex); 3197 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 3198 iou = gid_info->gl_iou; 3199 if (attrmod == 0) { 3200 if (iou->iou_dc_valid != B_FALSE) { 3201 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3202 IBTF_DPRINTF_L4("ibdm", 3203 "\thandle_diagcode: Duplicate IOU DiagCode"); 3204 mutex_exit(&gid_info->gl_mutex); 3205 return; 3206 } 3207 cb_args = &gid_info->gl_iou_cb_args; 3208 cb_args->cb_req_type = 0; 3209 iou->iou_diagcode = b2h16(*diagcode); 3210 iou->iou_dc_valid = B_TRUE; 3211 if (gid_info->gl_timeout_id) { 3212 timeout_id = gid_info->gl_timeout_id; 3213 mutex_exit(&gid_info->gl_mutex); 3214 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 3215 "gl_timeout_id = 0x%x", timeout_id); 3216 if (untimeout(timeout_id) == -1) { 3217 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 3218 "untimeout gl_timeout_id failed"); 3219 } 3220 mutex_enter(&gid_info->gl_mutex); 3221 gid_info->gl_timeout_id = 0; 3222 } 3223 } else { 3224 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 3225 if (ioc->ioc_dc_valid != B_FALSE) { 3226 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3227 IBTF_DPRINTF_L4("ibdm", 3228 "\thandle_diagcode: Duplicate IOC DiagCode"); 3229 mutex_exit(&gid_info->gl_mutex); 3230 return; 3231 } 3232 cb_args = &ioc->ioc_dc_cb_args; 3233 cb_args->cb_req_type = 0; 3234 ioc->ioc_diagcode = b2h16(*diagcode); 3235 ioc->ioc_dc_valid = B_TRUE; 3236 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 3237 if (timeout_id) { 3238 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 3239 mutex_exit(&gid_info->gl_mutex); 3240 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 3241 "timeout_id = 0x%x", timeout_id); 3242 if (untimeout(timeout_id) == -1) { 3243 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 3244 "untimeout ioc_dc_timeout_id failed"); 3245 } 3246 mutex_enter(&gid_info->gl_mutex); 3247 } 3248 } 3249 mutex_exit(&gid_info->gl_mutex); 3250 3251 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 3252 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 3253 } 3254 3255 3256 /* 3257 * ibdm_is_ioc_present() 3258 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 3259 */ 3260 static ibdm_ioc_info_t * 3261 ibdm_is_ioc_present(ib_guid_t ioc_guid, 3262 ibdm_dp_gidinfo_t *gid_info, int *flag) 3263 { 3264 int ii; 3265 ibdm_ioc_info_t *ioc; 3266 ibdm_dp_gidinfo_t *head; 3267 ib_dm_io_unitinfo_t *iou; 3268 3269 mutex_enter(&ibdm.ibdm_mutex); 3270 head = ibdm.ibdm_dp_gidlist_head; 3271 while (head) { 3272 mutex_enter(&head->gl_mutex); 3273 if (head->gl_iou == NULL) { 3274 mutex_exit(&head->gl_mutex); 3275 head = head->gl_next; 3276 continue; 3277 } 3278 iou = &head->gl_iou->iou_info; 3279 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 3280 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 3281 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 3282 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 3283 if (gid_info == head) { 3284 *flag |= IBDM_IBMF_PKT_DUP_RESP; 3285 } else if (ibdm_check_dgid(head->gl_dgid_lo, 3286 head->gl_dgid_hi) != NULL) { 3287 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 3288 "present: gid not present"); 3289 ibdm_add_to_gl_gid(gid_info, head); 3290 } 3291 mutex_exit(&head->gl_mutex); 3292 mutex_exit(&ibdm.ibdm_mutex); 3293 return (ioc); 3294 } 3295 } 3296 mutex_exit(&head->gl_mutex); 3297 head = head->gl_next; 3298 } 3299 mutex_exit(&ibdm.ibdm_mutex); 3300 return (NULL); 3301 } 3302 3303 3304 /* 3305 * ibdm_ibmf_send_cb() 3306 * IBMF invokes this callback routine after posting the DM MAD to 3307 * the HCA. 3308 */ 3309 /*ARGSUSED*/ 3310 static void 3311 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 3312 { 3313 ibdm_dump_ibmf_msg(ibmf_msg, 1); 3314 ibdm_free_send_buffers(ibmf_msg); 3315 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 3316 IBTF_DPRINTF_L4("ibdm", 3317 "\tibmf_send_cb: IBMF free msg failed"); 3318 } 3319 } 3320 3321 3322 /* 3323 * ibdm_ibmf_recv_cb() 3324 * Invoked by the IBMF when a response to the one of the DM requests 3325 * is received. 3326 */ 3327 /*ARGSUSED*/ 3328 static void 3329 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3330 { 3331 ibdm_taskq_args_t *taskq_args; 3332 3333 /* 3334 * If the taskq enable is set then dispatch a taskq to process 3335 * the MAD, otherwise just process it on this thread 3336 */ 3337 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 3338 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 3339 return; 3340 } 3341 3342 /* 3343 * create a taskq and dispatch it to process the incoming MAD 3344 */ 3345 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 3346 if (taskq_args == NULL) { 3347 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 3348 "taskq_args"); 3349 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3350 IBTF_DPRINTF_L4("ibmf_recv_cb", 3351 "\tibmf_recv_cb: IBMF free msg failed"); 3352 } 3353 return; 3354 } 3355 taskq_args->tq_ibmf_handle = ibmf_hdl; 3356 taskq_args->tq_ibmf_msg = msg; 3357 taskq_args->tq_args = arg; 3358 3359 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 3360 TQ_NOSLEEP) == 0) { 3361 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 3362 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3363 IBTF_DPRINTF_L4("ibmf_recv_cb", 3364 "\tibmf_recv_cb: IBMF free msg failed"); 3365 } 3366 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3367 return; 3368 } 3369 3370 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 3371 } 3372 3373 3374 void 3375 ibdm_recv_incoming_mad(void *args) 3376 { 3377 ibdm_taskq_args_t *taskq_args; 3378 3379 taskq_args = (ibdm_taskq_args_t *)args; 3380 3381 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 3382 "Processing incoming MAD via taskq"); 3383 3384 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 3385 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 3386 3387 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3388 } 3389 3390 3391 /* 3392 * Calls ibdm_process_incoming_mad with all function arguments extracted 3393 * from args 3394 */ 3395 /*ARGSUSED*/ 3396 static void 3397 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3398 { 3399 int flag = 0; 3400 int ret; 3401 uint64_t transaction_id; 3402 ib_mad_hdr_t *hdr; 3403 ibdm_dp_gidinfo_t *gid_info = NULL; 3404 3405 IBTF_DPRINTF_L4("ibdm", 3406 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 3407 ibdm_dump_ibmf_msg(msg, 0); 3408 3409 /* 3410 * IBMF calls this routine for every DM MAD that arrives at this port. 3411 * But we handle only the responses for requests we sent. We drop all 3412 * the DM packets that does not have response bit set in the MAD 3413 * header(this eliminates all the requests sent to this port). 3414 * We handle only DM class version 1 MAD's 3415 */ 3416 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 3417 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 3418 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3419 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3420 "IBMF free msg failed DM request drop it"); 3421 } 3422 return; 3423 } 3424 3425 transaction_id = b2h64(hdr->TransactionID); 3426 3427 mutex_enter(&ibdm.ibdm_mutex); 3428 gid_info = ibdm.ibdm_dp_gidlist_head; 3429 while (gid_info) { 3430 if ((gid_info->gl_transactionID & 3431 IBDM_GID_TRANSACTIONID_MASK) == 3432 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 3433 break; 3434 gid_info = gid_info->gl_next; 3435 } 3436 mutex_exit(&ibdm.ibdm_mutex); 3437 3438 if (gid_info == NULL) { 3439 /* Drop the packet */ 3440 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 3441 " does not match: 0x%llx", transaction_id); 3442 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3443 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3444 "IBMF free msg failed DM request drop it"); 3445 } 3446 return; 3447 } 3448 3449 /* Handle redirection for all the MAD's, except ClassPortInfo */ 3450 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 3451 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 3452 ret = ibdm_handle_redirection(msg, gid_info, &flag); 3453 if (ret == IBDM_SUCCESS) { 3454 return; 3455 } 3456 } else { 3457 uint_t gl_state; 3458 3459 mutex_enter(&gid_info->gl_mutex); 3460 gl_state = gid_info->gl_state; 3461 mutex_exit(&gid_info->gl_mutex); 3462 3463 switch (gl_state) { 3464 3465 case IBDM_SET_CLASSPORTINFO: 3466 ibdm_handle_setclassportinfo( 3467 ibmf_hdl, msg, gid_info, &flag); 3468 break; 3469 3470 case IBDM_GET_CLASSPORTINFO: 3471 ibdm_handle_classportinfo( 3472 ibmf_hdl, msg, gid_info, &flag); 3473 break; 3474 3475 case IBDM_GET_IOUNITINFO: 3476 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3477 break; 3478 3479 case IBDM_GET_IOC_DETAILS: 3480 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3481 3482 case IB_DM_ATTR_SERVICE_ENTRIES: 3483 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3484 break; 3485 3486 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3487 ibdm_handle_ioc_profile( 3488 ibmf_hdl, msg, gid_info, &flag); 3489 break; 3490 3491 case IB_DM_ATTR_DIAG_CODE: 3492 ibdm_handle_diagcode(msg, gid_info, &flag); 3493 break; 3494 3495 default: 3496 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3497 "Error state, wrong attribute :-("); 3498 (void) ibmf_free_msg(ibmf_hdl, &msg); 3499 return; 3500 } 3501 break; 3502 default: 3503 IBTF_DPRINTF_L2("ibdm", 3504 "process_incoming_mad: Dropping the packet" 3505 " gl_state %x", gl_state); 3506 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3507 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3508 "IBMF free msg failed DM request drop it"); 3509 } 3510 return; 3511 } 3512 } 3513 3514 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3515 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3516 IBTF_DPRINTF_L2("ibdm", 3517 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3518 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3519 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3520 "IBMF free msg failed DM request drop it"); 3521 } 3522 return; 3523 } 3524 3525 mutex_enter(&gid_info->gl_mutex); 3526 if (gid_info->gl_pending_cmds < 1) { 3527 IBTF_DPRINTF_L2("ibdm", 3528 "\tprocess_incoming_mad: pending commands negative"); 3529 } 3530 if (--gid_info->gl_pending_cmds) { 3531 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3532 "gid_info %p pending cmds %d", 3533 gid_info, gid_info->gl_pending_cmds); 3534 mutex_exit(&gid_info->gl_mutex); 3535 } else { 3536 uint_t prev_state; 3537 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3538 prev_state = gid_info->gl_state; 3539 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3540 if (prev_state == IBDM_SET_CLASSPORTINFO) { 3541 IBTF_DPRINTF_L4("ibdm", 3542 "\tprocess_incoming_mad: " 3543 "Setclassportinfo for Cisco FC GW is done."); 3544 gid_info->gl_flag &= ~IBDM_CISCO_PROBE; 3545 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE; 3546 mutex_exit(&gid_info->gl_mutex); 3547 cv_broadcast(&gid_info->gl_probe_cv); 3548 } else { 3549 mutex_exit(&gid_info->gl_mutex); 3550 ibdm_notify_newgid_iocs(gid_info); 3551 mutex_enter(&ibdm.ibdm_mutex); 3552 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3553 IBTF_DPRINTF_L4("ibdm", 3554 "\tprocess_incoming_mad: Wakeup"); 3555 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3556 cv_broadcast(&ibdm.ibdm_probe_cv); 3557 } 3558 mutex_exit(&ibdm.ibdm_mutex); 3559 } 3560 } 3561 3562 /* 3563 * Do not deallocate the IBMF packet if atleast one request 3564 * is posted. IBMF packet is reused. 3565 */ 3566 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3567 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3568 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3569 "IBMF free msg failed DM request drop it"); 3570 } 3571 } 3572 } 3573 3574 3575 /* 3576 * ibdm_verify_mad_status() 3577 * Verifies the MAD status 3578 * Returns IBDM_SUCCESS if status is correct 3579 * Returns IBDM_FAILURE for bogus MAD status 3580 */ 3581 static int 3582 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3583 { 3584 int ret = 0; 3585 3586 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3587 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3588 return (IBDM_FAILURE); 3589 } 3590 3591 if (b2h16(hdr->Status) == 0) 3592 ret = IBDM_SUCCESS; 3593 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3594 ret = IBDM_SUCCESS; 3595 else { 3596 IBTF_DPRINTF_L2("ibdm", 3597 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status)); 3598 ret = IBDM_FAILURE; 3599 } 3600 return (ret); 3601 } 3602 3603 3604 3605 /* 3606 * ibdm_handle_redirection() 3607 * Returns IBDM_SUCCESS/IBDM_FAILURE 3608 */ 3609 static int 3610 ibdm_handle_redirection(ibmf_msg_t *msg, 3611 ibdm_dp_gidinfo_t *gid_info, int *flag) 3612 { 3613 int attrmod, ioc_no, start; 3614 void *data; 3615 timeout_id_t *timeout_id; 3616 ib_mad_hdr_t *hdr; 3617 ibdm_ioc_info_t *ioc = NULL; 3618 ibdm_timeout_cb_args_t *cb_args; 3619 ib_mad_classportinfo_t *cpi; 3620 3621 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3622 mutex_enter(&gid_info->gl_mutex); 3623 switch (gid_info->gl_state) { 3624 case IBDM_GET_IOUNITINFO: 3625 cb_args = &gid_info->gl_iou_cb_args; 3626 timeout_id = &gid_info->gl_timeout_id; 3627 break; 3628 3629 case IBDM_GET_IOC_DETAILS: 3630 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3631 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3632 3633 case IB_DM_ATTR_DIAG_CODE: 3634 if (attrmod == 0) { 3635 cb_args = &gid_info->gl_iou_cb_args; 3636 timeout_id = &gid_info->gl_timeout_id; 3637 break; 3638 } 3639 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3640 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3641 "IOC# Out of range %d", attrmod); 3642 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3643 mutex_exit(&gid_info->gl_mutex); 3644 return (IBDM_FAILURE); 3645 } 3646 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3647 cb_args = &ioc->ioc_dc_cb_args; 3648 timeout_id = &ioc->ioc_dc_timeout_id; 3649 break; 3650 3651 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3652 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3653 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3654 "IOC# Out of range %d", attrmod); 3655 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3656 mutex_exit(&gid_info->gl_mutex); 3657 return (IBDM_FAILURE); 3658 } 3659 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3660 cb_args = &ioc->ioc_cb_args; 3661 timeout_id = &ioc->ioc_timeout_id; 3662 break; 3663 3664 case IB_DM_ATTR_SERVICE_ENTRIES: 3665 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3666 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3667 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3668 "IOC# Out of range %d", ioc_no); 3669 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3670 mutex_exit(&gid_info->gl_mutex); 3671 return (IBDM_FAILURE); 3672 } 3673 start = (attrmod & IBDM_8_BIT_MASK); 3674 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3675 if (start > ioc->ioc_profile.ioc_service_entries) { 3676 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3677 " SE index Out of range %d", start); 3678 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3679 mutex_exit(&gid_info->gl_mutex); 3680 return (IBDM_FAILURE); 3681 } 3682 cb_args = &ioc->ioc_serv[start].se_cb_args; 3683 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3684 break; 3685 3686 default: 3687 /* ERROR State */ 3688 IBTF_DPRINTF_L2("ibdm", 3689 "\thandle_redirection: wrong attribute :-("); 3690 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3691 mutex_exit(&gid_info->gl_mutex); 3692 return (IBDM_FAILURE); 3693 } 3694 break; 3695 default: 3696 /* ERROR State */ 3697 IBTF_DPRINTF_L2("ibdm", 3698 "\thandle_redirection: Error state :-("); 3699 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3700 mutex_exit(&gid_info->gl_mutex); 3701 return (IBDM_FAILURE); 3702 } 3703 if ((*timeout_id) != 0) { 3704 mutex_exit(&gid_info->gl_mutex); 3705 if (untimeout(*timeout_id) == -1) { 3706 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3707 "untimeout failed %x", *timeout_id); 3708 } else { 3709 IBTF_DPRINTF_L5("ibdm", 3710 "\thandle_redirection: timeout %x", *timeout_id); 3711 } 3712 mutex_enter(&gid_info->gl_mutex); 3713 *timeout_id = 0; 3714 } 3715 3716 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3717 cpi = (ib_mad_classportinfo_t *)data; 3718 3719 gid_info->gl_resp_timeout = 3720 (b2h32(cpi->RespTimeValue) & 0x1F); 3721 3722 gid_info->gl_redirected = B_TRUE; 3723 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3724 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3725 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3726 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3727 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3728 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3729 gid_info->gl_redirectSL = cpi->RedirectSL; 3730 3731 if (gid_info->gl_redirect_dlid != 0) { 3732 msg->im_local_addr.ia_remote_lid = 3733 gid_info->gl_redirect_dlid; 3734 } 3735 ibdm_bump_transactionID(gid_info); 3736 mutex_exit(&gid_info->gl_mutex); 3737 3738 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3739 ibdm_alloc_send_buffers(msg); 3740 3741 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3742 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3743 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3744 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3745 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3746 hdr->Status = 0; 3747 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3748 hdr->AttributeID = 3749 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3750 hdr->AttributeModifier = 3751 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3752 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3753 3754 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3755 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3756 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3757 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3758 3759 mutex_enter(&gid_info->gl_mutex); 3760 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3761 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3762 mutex_exit(&gid_info->gl_mutex); 3763 3764 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3765 "timeout %x", *timeout_id); 3766 3767 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3768 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3769 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3770 "message transport failed"); 3771 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3772 } 3773 (*flag) |= IBDM_IBMF_PKT_REUSED; 3774 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3775 return (IBDM_SUCCESS); 3776 } 3777 3778 3779 /* 3780 * ibdm_pkt_timeout_hdlr 3781 * This timeout handler is registed for every IBMF packet that is 3782 * sent through the IBMF. It gets called when no response is received 3783 * within the specified time for the packet. No retries for the failed 3784 * commands currently. Drops the failed IBMF packet and update the 3785 * pending list commands. 3786 */ 3787 static void 3788 ibdm_pkt_timeout_hdlr(void *arg) 3789 { 3790 ibdm_iou_info_t *iou; 3791 ibdm_ioc_info_t *ioc; 3792 ibdm_timeout_cb_args_t *cb_args = arg; 3793 ibdm_dp_gidinfo_t *gid_info; 3794 int srv_ent; 3795 uint_t new_gl_state; 3796 3797 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3798 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3799 cb_args->cb_req_type, cb_args->cb_ioc_num, 3800 cb_args->cb_srvents_start); 3801 3802 gid_info = cb_args->cb_gid_info; 3803 mutex_enter(&gid_info->gl_mutex); 3804 3805 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3806 (cb_args->cb_req_type == 0)) { 3807 3808 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3809 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3810 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3811 3812 if (gid_info->gl_timeout_id) 3813 gid_info->gl_timeout_id = 0; 3814 mutex_exit(&gid_info->gl_mutex); 3815 return; 3816 } 3817 if (cb_args->cb_retry_count) { 3818 cb_args->cb_retry_count--; 3819 /* 3820 * A new timeout_id is set inside ibdm_retry_command(). 3821 * When the function returns an error, the timeout_id 3822 * is reset (to zero) in the switch statement below. 3823 */ 3824 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3825 mutex_exit(&gid_info->gl_mutex); 3826 return; 3827 } 3828 cb_args->cb_retry_count = 0; 3829 } 3830 3831 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3832 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3833 cb_args->cb_req_type, cb_args->cb_ioc_num, 3834 cb_args->cb_srvents_start); 3835 3836 switch (cb_args->cb_req_type) { 3837 3838 case IBDM_REQ_TYPE_CLASSPORTINFO: 3839 case IBDM_REQ_TYPE_IOUINFO: 3840 new_gl_state = IBDM_GID_PROBING_FAILED; 3841 if (gid_info->gl_timeout_id) 3842 gid_info->gl_timeout_id = 0; 3843 break; 3844 3845 case IBDM_REQ_TYPE_IOCINFO: 3846 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3847 iou = gid_info->gl_iou; 3848 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3849 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3850 if (ioc->ioc_timeout_id) 3851 ioc->ioc_timeout_id = 0; 3852 break; 3853 3854 case IBDM_REQ_TYPE_SRVENTS: 3855 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3856 iou = gid_info->gl_iou; 3857 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3858 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3859 srv_ent = cb_args->cb_srvents_start; 3860 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3861 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3862 break; 3863 3864 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3865 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3866 iou = gid_info->gl_iou; 3867 iou->iou_dc_valid = B_FALSE; 3868 if (gid_info->gl_timeout_id) 3869 gid_info->gl_timeout_id = 0; 3870 break; 3871 3872 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3873 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3874 iou = gid_info->gl_iou; 3875 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3876 ioc->ioc_dc_valid = B_FALSE; 3877 if (ioc->ioc_dc_timeout_id) 3878 ioc->ioc_dc_timeout_id = 0; 3879 break; 3880 3881 default: /* ERROR State */ 3882 new_gl_state = IBDM_GID_PROBING_FAILED; 3883 if (gid_info->gl_timeout_id) 3884 gid_info->gl_timeout_id = 0; 3885 IBTF_DPRINTF_L2("ibdm", 3886 "\tpkt_timeout_hdlr: wrong request type."); 3887 break; 3888 } 3889 3890 --gid_info->gl_pending_cmds; /* decrease the counter */ 3891 3892 if (gid_info->gl_pending_cmds == 0) { 3893 gid_info->gl_state = new_gl_state; 3894 mutex_exit(&gid_info->gl_mutex); 3895 /* 3896 * Delete this gid_info if the gid probe fails. 3897 */ 3898 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3899 ibdm_delete_glhca_list(gid_info); 3900 } 3901 ibdm_notify_newgid_iocs(gid_info); 3902 mutex_enter(&ibdm.ibdm_mutex); 3903 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3904 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3905 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3906 cv_broadcast(&ibdm.ibdm_probe_cv); 3907 } 3908 mutex_exit(&ibdm.ibdm_mutex); 3909 } else { 3910 /* 3911 * Reset gl_pending_cmd if the extra timeout happens since 3912 * gl_pending_cmd becomes negative as a result. 3913 */ 3914 if (gid_info->gl_pending_cmds < 0) { 3915 gid_info->gl_pending_cmds = 0; 3916 IBTF_DPRINTF_L2("ibdm", 3917 "\tpkt_timeout_hdlr: extra timeout request." 3918 " reset gl_pending_cmds"); 3919 } 3920 mutex_exit(&gid_info->gl_mutex); 3921 /* 3922 * Delete this gid_info if the gid probe fails. 3923 */ 3924 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3925 ibdm_delete_glhca_list(gid_info); 3926 } 3927 } 3928 } 3929 3930 3931 /* 3932 * ibdm_retry_command() 3933 * Retries the failed command. 3934 * Returns IBDM_FAILURE/IBDM_SUCCESS 3935 */ 3936 static int 3937 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3938 { 3939 int ret; 3940 ibmf_msg_t *msg; 3941 ib_mad_hdr_t *hdr; 3942 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3943 timeout_id_t *timeout_id; 3944 ibdm_ioc_info_t *ioc; 3945 int ioc_no; 3946 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3947 3948 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3949 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3950 cb_args->cb_req_type, cb_args->cb_ioc_num, 3951 cb_args->cb_srvents_start); 3952 3953 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3954 3955 3956 /* 3957 * Reset the gid if alloc_msg failed with BAD_HANDLE 3958 * ibdm_reset_gidinfo reinits the gid_info 3959 */ 3960 if (ret == IBMF_BAD_HANDLE) { 3961 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 3962 gid_info); 3963 3964 mutex_exit(&gid_info->gl_mutex); 3965 ibdm_reset_gidinfo(gid_info); 3966 mutex_enter(&gid_info->gl_mutex); 3967 3968 /* Retry alloc */ 3969 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 3970 &msg); 3971 } 3972 3973 if (ret != IBDM_SUCCESS) { 3974 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 3975 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3976 cb_args->cb_req_type, cb_args->cb_ioc_num, 3977 cb_args->cb_srvents_start); 3978 return (IBDM_FAILURE); 3979 } 3980 3981 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 3982 ibdm_alloc_send_buffers(msg); 3983 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3984 3985 ibdm_bump_transactionID(gid_info); 3986 3987 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3988 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3989 if (gid_info->gl_redirected == B_TRUE) { 3990 if (gid_info->gl_redirect_dlid != 0) { 3991 msg->im_local_addr.ia_remote_lid = 3992 gid_info->gl_redirect_dlid; 3993 } 3994 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3995 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3996 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3997 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3998 } else { 3999 msg->im_local_addr.ia_remote_qno = 1; 4000 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 4001 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 4002 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 4003 } 4004 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 4005 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 4006 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 4007 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 4008 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 4009 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 4010 hdr->Status = 0; 4011 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 4012 4013 switch (cb_args->cb_req_type) { 4014 case IBDM_REQ_TYPE_CLASSPORTINFO: 4015 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 4016 hdr->AttributeModifier = 0; 4017 timeout_id = &gid_info->gl_timeout_id; 4018 break; 4019 case IBDM_REQ_TYPE_IOUINFO: 4020 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 4021 hdr->AttributeModifier = 0; 4022 timeout_id = &gid_info->gl_timeout_id; 4023 break; 4024 case IBDM_REQ_TYPE_IOCINFO: 4025 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 4026 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 4027 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 4028 timeout_id = &ioc->ioc_timeout_id; 4029 break; 4030 case IBDM_REQ_TYPE_SRVENTS: 4031 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 4032 ibdm_fill_srv_attr_mod(hdr, cb_args); 4033 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 4034 timeout_id = 4035 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 4036 break; 4037 case IBDM_REQ_TYPE_IOU_DIAGCODE: 4038 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 4039 hdr->AttributeModifier = 0; 4040 timeout_id = &gid_info->gl_timeout_id; 4041 break; 4042 case IBDM_REQ_TYPE_IOC_DIAGCODE: 4043 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 4044 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 4045 ioc_no = cb_args->cb_ioc_num; 4046 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 4047 timeout_id = &ioc->ioc_dc_timeout_id; 4048 break; 4049 } 4050 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hdr)) 4051 4052 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 4053 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 4054 4055 mutex_exit(&gid_info->gl_mutex); 4056 4057 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 4058 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 4059 cb_args->cb_srvents_start, *timeout_id); 4060 4061 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, 4062 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 4063 cb_args, 0) != IBMF_SUCCESS) { 4064 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 4065 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 4066 cb_args->cb_req_type, cb_args->cb_ioc_num, 4067 cb_args->cb_srvents_start); 4068 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 4069 } 4070 mutex_enter(&gid_info->gl_mutex); 4071 return (IBDM_SUCCESS); 4072 } 4073 4074 4075 /* 4076 * ibdm_update_ioc_port_gidlist() 4077 */ 4078 static void 4079 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 4080 ibdm_dp_gidinfo_t *gid_info) 4081 { 4082 int ii, ngid_ents; 4083 ibdm_gid_t *tmp; 4084 ibdm_hca_list_t *gid_hca_head, *temp; 4085 ibdm_hca_list_t *ioc_head = NULL; 4086 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 4087 4088 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 4089 4090 ngid_ents = gid_info->gl_ngids; 4091 dest->ioc_nportgids = ngid_ents; 4092 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 4093 ngid_ents, KM_SLEEP); 4094 tmp = gid_info->gl_gid; 4095 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 4096 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 4097 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 4098 tmp = tmp->gid_next; 4099 } 4100 4101 gid_hca_head = gid_info->gl_hca_list; 4102 while (gid_hca_head) { 4103 temp = ibdm_dup_hca_attr(gid_hca_head); 4104 temp->hl_next = ioc_head; 4105 ioc_head = temp; 4106 gid_hca_head = gid_hca_head->hl_next; 4107 } 4108 dest->ioc_hca_list = ioc_head; 4109 } 4110 4111 4112 /* 4113 * ibdm_alloc_send_buffers() 4114 * Allocates memory for the IBMF send buffer to send and/or receive 4115 * the Device Management MAD packet. 4116 */ 4117 static void 4118 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 4119 { 4120 msgp->im_msgbufs_send.im_bufs_mad_hdr = 4121 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 4122 4123 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *) 4124 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 4125 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ; 4126 4127 msgp->im_msgbufs_send.im_bufs_cl_data = 4128 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ); 4129 msgp->im_msgbufs_send.im_bufs_cl_data_len = 4130 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ; 4131 } 4132 4133 4134 /* 4135 * ibdm_alloc_send_buffers() 4136 * De-allocates memory for the IBMF send buffer 4137 */ 4138 static void 4139 ibdm_free_send_buffers(ibmf_msg_t *msgp) 4140 { 4141 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 4142 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 4143 } 4144 4145 /* 4146 * ibdm_probe_ioc() 4147 * 1. Gets the node records for the port GUID. This detects all the port 4148 * to the IOU. 4149 * 2. Selectively probes all the IOC, given it's node GUID 4150 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 4151 * Controller Profile asynchronously 4152 */ 4153 /*ARGSUSED*/ 4154 static void 4155 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 4156 { 4157 int ii, nrecords; 4158 size_t nr_len = 0, pi_len = 0; 4159 ib_gid_t sgid, dgid; 4160 ibdm_hca_list_t *hca_list = NULL; 4161 sa_node_record_t *nr, *tmp; 4162 ibdm_port_attr_t *port = NULL; 4163 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 4164 ibdm_dp_gidinfo_t *temp_gidinfo; 4165 ibdm_gid_t *temp_gid; 4166 sa_portinfo_record_t *pi; 4167 4168 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin", 4169 nodeguid, ioc_guid, reprobe_flag); 4170 4171 /* Rescan the GID list for any removed GIDs for reprobe */ 4172 if (reprobe_flag) 4173 ibdm_rescan_gidlist(&ioc_guid); 4174 4175 mutex_enter(&ibdm.ibdm_hl_mutex); 4176 for (ibdm_get_next_port(&hca_list, &port, 1); port; 4177 ibdm_get_next_port(&hca_list, &port, 1)) { 4178 reprobe_gid = new_gid = node_gid = NULL; 4179 4180 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 4181 if (nr == NULL) { 4182 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 4183 continue; 4184 } 4185 nrecords = (nr_len / sizeof (sa_node_record_t)); 4186 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 4187 if ((pi = ibdm_get_portinfo( 4188 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) { 4189 IBTF_DPRINTF_L4("ibdm", 4190 "\tibdm_get_portinfo: no portinfo recs"); 4191 continue; 4192 } 4193 4194 /* 4195 * If Device Management is not supported on 4196 * this port, skip the rest. 4197 */ 4198 if (!(pi->PortInfo.CapabilityMask & 4199 SM_CAP_MASK_IS_DM_SUPPD)) { 4200 kmem_free(pi, pi_len); 4201 continue; 4202 } 4203 4204 /* 4205 * For reprobes: Check if GID, already in 4206 * the list. If so, set the state to SKIPPED 4207 */ 4208 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 4209 tmp->NodeInfo.PortGUID)) != NULL) && 4210 temp_gidinfo->gl_state == 4211 IBDM_GID_PROBING_COMPLETE) { 4212 ASSERT(reprobe_gid == NULL); 4213 ibdm_addto_glhcalist(temp_gidinfo, 4214 hca_list); 4215 reprobe_gid = temp_gidinfo; 4216 kmem_free(pi, pi_len); 4217 continue; 4218 } else if (temp_gidinfo != NULL) { 4219 kmem_free(pi, pi_len); 4220 ibdm_addto_glhcalist(temp_gidinfo, 4221 hca_list); 4222 continue; 4223 } 4224 4225 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 4226 "create_gid : prefix %llx, guid %llx\n", 4227 pi->PortInfo.GidPrefix, 4228 tmp->NodeInfo.PortGUID); 4229 4230 sgid.gid_prefix = port->pa_sn_prefix; 4231 sgid.gid_guid = port->pa_port_guid; 4232 dgid.gid_prefix = pi->PortInfo.GidPrefix; 4233 dgid.gid_guid = tmp->NodeInfo.PortGUID; 4234 new_gid = ibdm_create_gid_info(port, sgid, 4235 dgid); 4236 if (new_gid == NULL) { 4237 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4238 "create_gid_info failed\n"); 4239 kmem_free(pi, pi_len); 4240 continue; 4241 } 4242 if (node_gid == NULL) { 4243 node_gid = new_gid; 4244 ibdm_add_to_gl_gid(node_gid, node_gid); 4245 } else { 4246 IBTF_DPRINTF_L4("ibdm", 4247 "\tprobe_ioc: new gid"); 4248 temp_gid = kmem_zalloc( 4249 sizeof (ibdm_gid_t), KM_SLEEP); 4250 temp_gid->gid_dgid_hi = 4251 new_gid->gl_dgid_hi; 4252 temp_gid->gid_dgid_lo = 4253 new_gid->gl_dgid_lo; 4254 temp_gid->gid_next = node_gid->gl_gid; 4255 node_gid->gl_gid = temp_gid; 4256 node_gid->gl_ngids++; 4257 } 4258 new_gid->gl_nodeguid = nodeguid; 4259 new_gid->gl_portguid = dgid.gid_guid; 4260 ibdm_addto_glhcalist(new_gid, hca_list); 4261 4262 /* 4263 * Set the state to skipped as all these 4264 * gids point to the same node. 4265 * We (re)probe only one GID below and reset 4266 * state appropriately 4267 */ 4268 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 4269 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID; 4270 kmem_free(pi, pi_len); 4271 } 4272 kmem_free(nr, nr_len); 4273 4274 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 4275 "reprobe_gid %p new_gid %p node_gid %p", 4276 reprobe_flag, reprobe_gid, new_gid, node_gid); 4277 4278 if (reprobe_flag != 0 && reprobe_gid != NULL) { 4279 int niocs, jj; 4280 ibdm_ioc_info_t *tmp_ioc; 4281 int ioc_matched = 0; 4282 4283 mutex_exit(&ibdm.ibdm_hl_mutex); 4284 mutex_enter(&reprobe_gid->gl_mutex); 4285 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 4286 niocs = 4287 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4288 reprobe_gid->gl_pending_cmds++; 4289 mutex_exit(&reprobe_gid->gl_mutex); 4290 4291 for (jj = 0; jj < niocs; jj++) { 4292 tmp_ioc = 4293 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 4294 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 4295 continue; 4296 4297 ioc_matched = 1; 4298 4299 /* 4300 * Explicitly set gl_reprobe_flag to 0 so that 4301 * IBnex is not notified on completion 4302 */ 4303 mutex_enter(&reprobe_gid->gl_mutex); 4304 reprobe_gid->gl_reprobe_flag = 0; 4305 mutex_exit(&reprobe_gid->gl_mutex); 4306 4307 mutex_enter(&ibdm.ibdm_mutex); 4308 ibdm.ibdm_ngid_probes_in_progress++; 4309 mutex_exit(&ibdm.ibdm_mutex); 4310 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 4311 IBDM_SUCCESS) { 4312 IBTF_DPRINTF_L4("ibdm", 4313 "\tprobe_ioc: " 4314 "send_ioc_profile failed " 4315 "for ioc %d", jj); 4316 ibdm_gid_decr_pending(reprobe_gid); 4317 break; 4318 } 4319 mutex_enter(&ibdm.ibdm_mutex); 4320 ibdm_wait_probe_completion(); 4321 mutex_exit(&ibdm.ibdm_mutex); 4322 break; 4323 } 4324 if (ioc_matched == 0) 4325 ibdm_gid_decr_pending(reprobe_gid); 4326 else { 4327 mutex_enter(&ibdm.ibdm_hl_mutex); 4328 break; 4329 } 4330 } else if (new_gid != NULL) { 4331 mutex_exit(&ibdm.ibdm_hl_mutex); 4332 node_gid = node_gid ? node_gid : new_gid; 4333 4334 /* 4335 * New or reinserted GID : Enable notification 4336 * to IBnex 4337 */ 4338 mutex_enter(&node_gid->gl_mutex); 4339 node_gid->gl_reprobe_flag = 1; 4340 mutex_exit(&node_gid->gl_mutex); 4341 4342 ibdm_probe_gid(node_gid); 4343 4344 mutex_enter(&ibdm.ibdm_hl_mutex); 4345 } 4346 } 4347 mutex_exit(&ibdm.ibdm_hl_mutex); 4348 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 4349 } 4350 4351 4352 /* 4353 * ibdm_probe_gid() 4354 * Selectively probes the GID 4355 */ 4356 static void 4357 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 4358 { 4359 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 4360 4361 /* 4362 * A Cisco FC GW needs the special handling to get IOUnitInfo. 4363 */ 4364 mutex_enter(&gid_info->gl_mutex); 4365 if (ibdm_is_cisco_switch(gid_info)) { 4366 gid_info->gl_pending_cmds++; 4367 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 4368 mutex_exit(&gid_info->gl_mutex); 4369 4370 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 4371 4372 mutex_enter(&gid_info->gl_mutex); 4373 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4374 --gid_info->gl_pending_cmds; 4375 mutex_exit(&gid_info->gl_mutex); 4376 4377 /* free the hca_list on this gid_info */ 4378 ibdm_delete_glhca_list(gid_info); 4379 gid_info = gid_info->gl_next; 4380 return; 4381 } 4382 4383 mutex_enter(&gid_info->gl_mutex); 4384 ibdm_wait_cisco_probe_completion(gid_info); 4385 4386 IBTF_DPRINTF_L4("ibdm", 4387 "\tprobe_gid: CISCO Wakeup signal received"); 4388 } 4389 4390 /* move on to the 'GET_CLASSPORTINFO' stage */ 4391 gid_info->gl_pending_cmds++; 4392 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 4393 mutex_exit(&gid_info->gl_mutex); 4394 4395 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 4396 4397 mutex_enter(&gid_info->gl_mutex); 4398 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4399 --gid_info->gl_pending_cmds; 4400 mutex_exit(&gid_info->gl_mutex); 4401 4402 /* free the hca_list on this gid_info */ 4403 ibdm_delete_glhca_list(gid_info); 4404 gid_info = gid_info->gl_next; 4405 return; 4406 } 4407 4408 mutex_enter(&ibdm.ibdm_mutex); 4409 ibdm.ibdm_ngid_probes_in_progress++; 4410 gid_info = gid_info->gl_next; 4411 ibdm_wait_probe_completion(); 4412 mutex_exit(&ibdm.ibdm_mutex); 4413 4414 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 4415 } 4416 4417 4418 /* 4419 * ibdm_create_gid_info() 4420 * Allocates a gid_info structure and initializes 4421 * Returns pointer to the structure on success 4422 * and NULL on failure 4423 */ 4424 static ibdm_dp_gidinfo_t * 4425 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 4426 { 4427 uint8_t ii, npaths; 4428 sa_path_record_t *path; 4429 size_t len; 4430 ibdm_pkey_tbl_t *pkey_tbl; 4431 ibdm_dp_gidinfo_t *gid_info = NULL; 4432 int ret; 4433 4434 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 4435 npaths = 1; 4436 4437 /* query for reversible paths */ 4438 if (port->pa_sa_hdl) 4439 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 4440 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 4441 &len, &path); 4442 else 4443 return (NULL); 4444 4445 if (ret == IBMF_SUCCESS && path) { 4446 ibdm_dump_path_info(path); 4447 4448 gid_info = kmem_zalloc( 4449 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 4450 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 4451 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 4452 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 4453 gid_info->gl_dgid_lo = path->DGID.gid_guid; 4454 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 4455 gid_info->gl_sgid_lo = path->SGID.gid_guid; 4456 gid_info->gl_p_key = path->P_Key; 4457 gid_info->gl_sa_hdl = port->pa_sa_hdl; 4458 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 4459 gid_info->gl_slid = path->SLID; 4460 gid_info->gl_dlid = path->DLID; 4461 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 4462 << IBDM_GID_TRANSACTIONID_SHIFT; 4463 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 4464 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 4465 << IBDM_GID_TRANSACTIONID_SHIFT; 4466 gid_info->gl_SL = path->SL; 4467 4468 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 4469 for (ii = 0; ii < port->pa_npkeys; ii++) { 4470 if (port->pa_pkey_tbl == NULL) 4471 break; 4472 4473 pkey_tbl = &port->pa_pkey_tbl[ii]; 4474 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 4475 (pkey_tbl->pt_qp_hdl != NULL)) { 4476 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 4477 break; 4478 } 4479 } 4480 kmem_free(path, len); 4481 4482 /* 4483 * QP handle for GID not initialized. No matching Pkey 4484 * was found!! ibdm should *not* hit this case. Flag an 4485 * error and drop the GID if ibdm does encounter this. 4486 */ 4487 if (gid_info->gl_qp_hdl == NULL) { 4488 IBTF_DPRINTF_L2(ibdm_string, 4489 "\tcreate_gid_info: No matching Pkey"); 4490 ibdm_delete_gidinfo(gid_info); 4491 return (NULL); 4492 } 4493 4494 ibdm.ibdm_ngids++; 4495 if (ibdm.ibdm_dp_gidlist_head == NULL) { 4496 ibdm.ibdm_dp_gidlist_head = gid_info; 4497 ibdm.ibdm_dp_gidlist_tail = gid_info; 4498 } else { 4499 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 4500 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 4501 ibdm.ibdm_dp_gidlist_tail = gid_info; 4502 } 4503 } 4504 4505 return (gid_info); 4506 } 4507 4508 4509 /* 4510 * ibdm_get_node_records 4511 * Sends a SA query to get the NODE record 4512 * Returns pointer to the sa_node_record_t on success 4513 * and NULL on failure 4514 */ 4515 static sa_node_record_t * 4516 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 4517 { 4518 sa_node_record_t req, *resp = NULL; 4519 ibmf_saa_access_args_t args; 4520 int ret; 4521 4522 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 4523 4524 bzero(&req, sizeof (sa_node_record_t)); 4525 req.NodeInfo.NodeGUID = guid; 4526 4527 args.sq_attr_id = SA_NODERECORD_ATTRID; 4528 args.sq_access_type = IBMF_SAA_RETRIEVE; 4529 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 4530 args.sq_template = &req; 4531 args.sq_callback = NULL; 4532 args.sq_callback_arg = NULL; 4533 4534 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4535 if (ret != IBMF_SUCCESS) { 4536 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 4537 " SA Retrieve Failed: %d", ret); 4538 return (NULL); 4539 } 4540 if ((resp == NULL) || (*length == 0)) { 4541 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 4542 return (NULL); 4543 } 4544 4545 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 4546 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 4547 4548 return (resp); 4549 } 4550 4551 4552 /* 4553 * ibdm_get_portinfo() 4554 * Sends a SA query to get the PortInfo record 4555 * Returns pointer to the sa_portinfo_record_t on success 4556 * and NULL on failure 4557 */ 4558 static sa_portinfo_record_t * 4559 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 4560 { 4561 sa_portinfo_record_t req, *resp = NULL; 4562 ibmf_saa_access_args_t args; 4563 int ret; 4564 4565 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 4566 4567 bzero(&req, sizeof (sa_portinfo_record_t)); 4568 req.EndportLID = lid; 4569 4570 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4571 args.sq_access_type = IBMF_SAA_RETRIEVE; 4572 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4573 args.sq_template = &req; 4574 args.sq_callback = NULL; 4575 args.sq_callback_arg = NULL; 4576 4577 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4578 if (ret != IBMF_SUCCESS) { 4579 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4580 " SA Retrieve Failed: 0x%X", ret); 4581 return (NULL); 4582 } 4583 if ((*length == 0) || (resp == NULL)) 4584 return (NULL); 4585 4586 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4587 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4588 return (resp); 4589 } 4590 4591 4592 /* 4593 * ibdm_ibnex_register_callback 4594 * IB nexus callback routine for HCA attach and detach notification 4595 */ 4596 void 4597 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4598 { 4599 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4600 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4601 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4602 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4603 } 4604 4605 4606 /* 4607 * ibdm_ibnex_unregister_callbacks 4608 */ 4609 void 4610 ibdm_ibnex_unregister_callback() 4611 { 4612 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4613 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4614 ibdm.ibdm_ibnex_callback = NULL; 4615 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4616 } 4617 4618 /* 4619 * ibdm_get_waittime() 4620 * Calculates the wait time based on the last HCA attach time 4621 */ 4622 static time_t 4623 ibdm_get_waittime(ib_guid_t hca_guid, int dft_wait) 4624 { 4625 int ii; 4626 time_t temp, wait_time = 0; 4627 ibdm_hca_list_t *hca; 4628 4629 IBTF_DPRINTF_L4("ibdm", "\tget_waittime hcaguid:%llx" 4630 "\tport settling time %d", hca_guid, dft_wait); 4631 4632 ASSERT(mutex_owned(&ibdm.ibdm_hl_mutex)); 4633 4634 hca = ibdm.ibdm_hca_list_head; 4635 4636 if (hca_guid) { 4637 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4638 if ((hca_guid == hca->hl_hca_guid) && 4639 (hca->hl_nports != hca->hl_nports_active)) { 4640 wait_time = 4641 ddi_get_time() - hca->hl_attach_time; 4642 wait_time = ((wait_time >= dft_wait) ? 4643 0 : (dft_wait - wait_time)); 4644 break; 4645 } 4646 hca = hca->hl_next; 4647 } 4648 IBTF_DPRINTF_L4("ibdm", "\tget_waittime %llx", wait_time); 4649 return (wait_time); 4650 } 4651 4652 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4653 if (hca->hl_nports != hca->hl_nports_active) { 4654 temp = ddi_get_time() - hca->hl_attach_time; 4655 temp = ((temp >= dft_wait) ? 0 : (dft_wait - temp)); 4656 wait_time = (temp > wait_time) ? temp : wait_time; 4657 } 4658 } 4659 IBTF_DPRINTF_L4("ibdm", "\tget_waittime %llx", wait_time); 4660 return (wait_time); 4661 } 4662 4663 void 4664 ibdm_ibnex_port_settle_wait(ib_guid_t hca_guid, int dft_wait) 4665 { 4666 time_t wait_time; 4667 4668 mutex_enter(&ibdm.ibdm_hl_mutex); 4669 4670 while ((wait_time = ibdm_get_waittime(hca_guid, dft_wait)) > 0) { 4671 (void) cv_timedwait(&ibdm.ibdm_port_settle_cv, 4672 &ibdm.ibdm_hl_mutex, 4673 ddi_get_lbolt() + drv_usectohz(wait_time * 1000000)); 4674 } 4675 4676 mutex_exit(&ibdm.ibdm_hl_mutex); 4677 } 4678 4679 4680 /* 4681 * ibdm_ibnex_probe_hcaport 4682 * Probes the presence of HCA port (with HCA dip and port number) 4683 * Returns port attributes structure on SUCCESS 4684 */ 4685 ibdm_port_attr_t * 4686 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4687 { 4688 int ii, jj; 4689 ibdm_hca_list_t *hca_list; 4690 ibdm_port_attr_t *port_attr; 4691 4692 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4693 4694 mutex_enter(&ibdm.ibdm_hl_mutex); 4695 hca_list = ibdm.ibdm_hca_list_head; 4696 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4697 if (hca_list->hl_hca_guid == hca_guid) { 4698 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4699 if (hca_list->hl_port_attr[jj].pa_port_num == 4700 port_num) { 4701 break; 4702 } 4703 } 4704 if (jj != hca_list->hl_nports) 4705 break; 4706 } 4707 hca_list = hca_list->hl_next; 4708 } 4709 if (ii == ibdm.ibdm_hca_count) { 4710 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4711 mutex_exit(&ibdm.ibdm_hl_mutex); 4712 return (NULL); 4713 } 4714 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4715 sizeof (ibdm_port_attr_t), KM_SLEEP); 4716 bcopy((char *)&hca_list->hl_port_attr[jj], 4717 port_attr, sizeof (ibdm_port_attr_t)); 4718 ibdm_update_port_attr(port_attr); 4719 4720 mutex_exit(&ibdm.ibdm_hl_mutex); 4721 return (port_attr); 4722 } 4723 4724 4725 /* 4726 * ibdm_ibnex_get_port_attrs 4727 * Scan all HCAs for a matching port_guid. 4728 * Returns "port attributes" structure on success. 4729 */ 4730 ibdm_port_attr_t * 4731 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4732 { 4733 int ii, jj; 4734 ibdm_hca_list_t *hca_list; 4735 ibdm_port_attr_t *port_attr; 4736 4737 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4738 4739 mutex_enter(&ibdm.ibdm_hl_mutex); 4740 hca_list = ibdm.ibdm_hca_list_head; 4741 4742 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4743 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4744 if (hca_list->hl_port_attr[jj].pa_port_guid == 4745 port_guid) { 4746 break; 4747 } 4748 } 4749 if (jj != hca_list->hl_nports) 4750 break; 4751 hca_list = hca_list->hl_next; 4752 } 4753 4754 if (ii == ibdm.ibdm_hca_count) { 4755 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4756 mutex_exit(&ibdm.ibdm_hl_mutex); 4757 return (NULL); 4758 } 4759 4760 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4761 KM_SLEEP); 4762 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4763 sizeof (ibdm_port_attr_t)); 4764 ibdm_update_port_attr(port_attr); 4765 4766 mutex_exit(&ibdm.ibdm_hl_mutex); 4767 return (port_attr); 4768 } 4769 4770 4771 /* 4772 * ibdm_ibnex_free_port_attr() 4773 */ 4774 void 4775 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4776 { 4777 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4778 if (port_attr) { 4779 if (port_attr->pa_pkey_tbl != NULL) { 4780 kmem_free(port_attr->pa_pkey_tbl, 4781 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4782 } 4783 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4784 } 4785 } 4786 4787 4788 /* 4789 * ibdm_ibnex_get_hca_list() 4790 * Returns portinfo for all the port for all the HCA's 4791 */ 4792 void 4793 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4794 { 4795 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4796 int ii; 4797 4798 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4799 4800 mutex_enter(&ibdm.ibdm_hl_mutex); 4801 temp = ibdm.ibdm_hca_list_head; 4802 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4803 temp1 = ibdm_dup_hca_attr(temp); 4804 temp1->hl_next = head; 4805 head = temp1; 4806 temp = temp->hl_next; 4807 } 4808 *count = ibdm.ibdm_hca_count; 4809 *hca = head; 4810 mutex_exit(&ibdm.ibdm_hl_mutex); 4811 } 4812 4813 4814 /* 4815 * ibdm_ibnex_get_hca_info_by_guid() 4816 */ 4817 ibdm_hca_list_t * 4818 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4819 { 4820 ibdm_hca_list_t *head = NULL, *hca = NULL; 4821 4822 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4823 4824 mutex_enter(&ibdm.ibdm_hl_mutex); 4825 head = ibdm.ibdm_hca_list_head; 4826 while (head) { 4827 if (head->hl_hca_guid == hca_guid) { 4828 hca = ibdm_dup_hca_attr(head); 4829 hca->hl_next = NULL; 4830 break; 4831 } 4832 head = head->hl_next; 4833 } 4834 mutex_exit(&ibdm.ibdm_hl_mutex); 4835 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4836 return (hca); 4837 } 4838 4839 4840 /* 4841 * ibdm_dup_hca_attr() 4842 * Allocate a new HCA attribute strucuture and initialize 4843 * hca attribute structure with the incoming HCA attributes 4844 * returned the allocated hca attributes. 4845 */ 4846 static ibdm_hca_list_t * 4847 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4848 { 4849 int len; 4850 ibdm_hca_list_t *out_hca; 4851 4852 len = sizeof (ibdm_hca_list_t) + 4853 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4854 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4855 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4856 bcopy((char *)in_hca, 4857 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4858 if (in_hca->hl_nports) { 4859 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4860 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4861 bcopy((char *)in_hca->hl_port_attr, 4862 (char *)out_hca->hl_port_attr, 4863 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4864 for (len = 0; len < out_hca->hl_nports; len++) 4865 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4866 } 4867 return (out_hca); 4868 } 4869 4870 4871 /* 4872 * ibdm_ibnex_free_hca_list() 4873 * Free one/more HCA lists 4874 */ 4875 void 4876 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4877 { 4878 int ii; 4879 size_t len; 4880 ibdm_hca_list_t *temp; 4881 ibdm_port_attr_t *port; 4882 4883 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4884 ASSERT(hca_list); 4885 while (hca_list) { 4886 temp = hca_list; 4887 hca_list = hca_list->hl_next; 4888 for (ii = 0; ii < temp->hl_nports; ii++) { 4889 port = &temp->hl_port_attr[ii]; 4890 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4891 if (len != 0) 4892 kmem_free(port->pa_pkey_tbl, len); 4893 } 4894 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4895 sizeof (ibdm_port_attr_t)); 4896 kmem_free(temp, len); 4897 } 4898 } 4899 4900 4901 /* 4902 * ibdm_ibnex_probe_iocguid() 4903 * Probes the IOC on the fabric and returns the IOC information 4904 * if present. Otherwise, NULL is returned 4905 */ 4906 /* ARGSUSED */ 4907 ibdm_ioc_info_t * 4908 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4909 { 4910 int k; 4911 ibdm_ioc_info_t *ioc_info; 4912 ibdm_dp_gidinfo_t *gid_info; /* used as index and arg */ 4913 timeout_id_t *timeout_id; 4914 4915 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4916 iou, ioc_guid, reprobe_flag); 4917 /* Check whether we know this already */ 4918 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4919 if (ioc_info == NULL) { 4920 mutex_enter(&ibdm.ibdm_mutex); 4921 while (ibdm.ibdm_busy & IBDM_BUSY) 4922 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4923 ibdm.ibdm_busy |= IBDM_BUSY; 4924 mutex_exit(&ibdm.ibdm_mutex); 4925 ibdm_probe_ioc(iou, ioc_guid, 0); 4926 mutex_enter(&ibdm.ibdm_mutex); 4927 ibdm.ibdm_busy &= ~IBDM_BUSY; 4928 cv_broadcast(&ibdm.ibdm_busy_cv); 4929 mutex_exit(&ibdm.ibdm_mutex); 4930 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4931 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4932 ASSERT(gid_info != NULL); 4933 /* Free the ioc_list before reprobe; and cancel any timers */ 4934 mutex_enter(&ibdm.ibdm_mutex); 4935 mutex_enter(&gid_info->gl_mutex); 4936 if (ioc_info->ioc_timeout_id) { 4937 timeout_id = ioc_info->ioc_timeout_id; 4938 ioc_info->ioc_timeout_id = 0; 4939 mutex_exit(&gid_info->gl_mutex); 4940 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4941 "ioc_timeout_id = 0x%x", timeout_id); 4942 if (untimeout(timeout_id) == -1) { 4943 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4944 "untimeout ioc_timeout_id failed"); 4945 } 4946 mutex_enter(&gid_info->gl_mutex); 4947 } 4948 if (ioc_info->ioc_dc_timeout_id) { 4949 timeout_id = ioc_info->ioc_dc_timeout_id; 4950 ioc_info->ioc_dc_timeout_id = 0; 4951 mutex_exit(&gid_info->gl_mutex); 4952 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4953 "ioc_dc_timeout_id = 0x%x", timeout_id); 4954 if (untimeout(timeout_id) == -1) { 4955 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4956 "untimeout ioc_dc_timeout_id failed"); 4957 } 4958 mutex_enter(&gid_info->gl_mutex); 4959 } 4960 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 4961 if (ioc_info->ioc_serv[k].se_timeout_id) { 4962 timeout_id = ioc_info->ioc_serv[k]. 4963 se_timeout_id; 4964 ioc_info->ioc_serv[k].se_timeout_id = 0; 4965 mutex_exit(&gid_info->gl_mutex); 4966 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4967 "ioc_info->ioc_serv[k].se_timeout_id = %x", 4968 k, timeout_id); 4969 if (untimeout(timeout_id) == -1) { 4970 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4971 "untimeout se_timeout_id %d " 4972 "failed", k); 4973 } 4974 mutex_enter(&gid_info->gl_mutex); 4975 } 4976 mutex_exit(&gid_info->gl_mutex); 4977 mutex_exit(&ibdm.ibdm_mutex); 4978 ibdm_ibnex_free_ioc_list(ioc_info); 4979 4980 mutex_enter(&ibdm.ibdm_mutex); 4981 while (ibdm.ibdm_busy & IBDM_BUSY) 4982 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4983 ibdm.ibdm_busy |= IBDM_BUSY; 4984 mutex_exit(&ibdm.ibdm_mutex); 4985 4986 ibdm_probe_ioc(iou, ioc_guid, 1); 4987 4988 /* 4989 * Skip if gl_reprobe_flag is set, this will be 4990 * a re-inserted / new GID, for which notifications 4991 * have already been send. 4992 */ 4993 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 4994 gid_info = gid_info->gl_next) { 4995 uint8_t ii, niocs; 4996 ibdm_ioc_info_t *ioc; 4997 4998 if (gid_info->gl_iou == NULL) 4999 continue; 5000 5001 if (gid_info->gl_reprobe_flag) { 5002 gid_info->gl_reprobe_flag = 0; 5003 continue; 5004 } 5005 5006 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 5007 for (ii = 0; ii < niocs; ii++) { 5008 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 5009 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 5010 mutex_enter(&ibdm.ibdm_mutex); 5011 ibdm_reprobe_update_port_srv(ioc, 5012 gid_info); 5013 mutex_exit(&ibdm.ibdm_mutex); 5014 } 5015 } 5016 } 5017 mutex_enter(&ibdm.ibdm_mutex); 5018 ibdm.ibdm_busy &= ~IBDM_BUSY; 5019 cv_broadcast(&ibdm.ibdm_busy_cv); 5020 mutex_exit(&ibdm.ibdm_mutex); 5021 5022 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 5023 } 5024 return (ioc_info); 5025 } 5026 5027 5028 /* 5029 * ibdm_get_ioc_info_with_gid() 5030 * Returns pointer to ibdm_ioc_info_t if it finds 5031 * matching record for the ioc_guid. Otherwise NULL is returned. 5032 * The pointer to gid_info is set to the second argument in case that 5033 * the non-NULL value returns (and the second argument is not NULL). 5034 * 5035 * Note. use the same strings as "ibnex_get_ioc_info" in 5036 * IBTF_DPRINTF() to keep compatibility. 5037 */ 5038 static ibdm_ioc_info_t * 5039 ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid, 5040 ibdm_dp_gidinfo_t **gid_info) 5041 { 5042 int ii; 5043 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 5044 ibdm_dp_gidinfo_t *gid_list; 5045 ib_dm_io_unitinfo_t *iou; 5046 5047 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 5048 5049 mutex_enter(&ibdm.ibdm_mutex); 5050 while (ibdm.ibdm_busy & IBDM_BUSY) 5051 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5052 ibdm.ibdm_busy |= IBDM_BUSY; 5053 5054 if (gid_info) 5055 *gid_info = NULL; /* clear the value of gid_info */ 5056 5057 gid_list = ibdm.ibdm_dp_gidlist_head; 5058 while (gid_list) { 5059 mutex_enter(&gid_list->gl_mutex); 5060 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5061 mutex_exit(&gid_list->gl_mutex); 5062 gid_list = gid_list->gl_next; 5063 continue; 5064 } 5065 if (gid_list->gl_iou == NULL) { 5066 IBTF_DPRINTF_L2("ibdm", 5067 "\tget_ioc_info: No IOU info"); 5068 mutex_exit(&gid_list->gl_mutex); 5069 gid_list = gid_list->gl_next; 5070 continue; 5071 } 5072 iou = &gid_list->gl_iou->iou_info; 5073 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5074 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5075 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 5076 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 5077 ioc = ibdm_dup_ioc_info(tmp, gid_list); 5078 if (gid_info) 5079 *gid_info = gid_list; /* set this ptr */ 5080 mutex_exit(&gid_list->gl_mutex); 5081 ibdm.ibdm_busy &= ~IBDM_BUSY; 5082 cv_broadcast(&ibdm.ibdm_busy_cv); 5083 mutex_exit(&ibdm.ibdm_mutex); 5084 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 5085 return (ioc); 5086 } 5087 } 5088 if (ii == iou->iou_num_ctrl_slots) 5089 ioc = NULL; 5090 5091 mutex_exit(&gid_list->gl_mutex); 5092 gid_list = gid_list->gl_next; 5093 } 5094 5095 ibdm.ibdm_busy &= ~IBDM_BUSY; 5096 cv_broadcast(&ibdm.ibdm_busy_cv); 5097 mutex_exit(&ibdm.ibdm_mutex); 5098 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 5099 return (ioc); 5100 } 5101 5102 /* 5103 * ibdm_ibnex_get_ioc_info() 5104 * Returns pointer to ibdm_ioc_info_t if it finds 5105 * matching record for the ioc_guid, otherwise NULL 5106 * is returned 5107 * 5108 * Note. this is a wrapper function to ibdm_get_ioc_info_with_gid() now. 5109 */ 5110 ibdm_ioc_info_t * 5111 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 5112 { 5113 /* will not use the gid_info pointer, so the second arg is NULL */ 5114 return (ibdm_get_ioc_info_with_gid(ioc_guid, NULL)); 5115 } 5116 5117 /* 5118 * ibdm_ibnex_get_ioc_count() 5119 * Returns number of ibdm_ioc_info_t it finds 5120 */ 5121 int 5122 ibdm_ibnex_get_ioc_count(void) 5123 { 5124 int count = 0, k; 5125 ibdm_ioc_info_t *ioc; 5126 ibdm_dp_gidinfo_t *gid_list; 5127 5128 mutex_enter(&ibdm.ibdm_mutex); 5129 ibdm_sweep_fabric(0); 5130 5131 while (ibdm.ibdm_busy & IBDM_BUSY) 5132 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5133 ibdm.ibdm_busy |= IBDM_BUSY; 5134 5135 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5136 gid_list = gid_list->gl_next) { 5137 mutex_enter(&gid_list->gl_mutex); 5138 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 5139 (gid_list->gl_iou == NULL)) { 5140 mutex_exit(&gid_list->gl_mutex); 5141 continue; 5142 } 5143 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 5144 k++) { 5145 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 5146 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 5147 ++count; 5148 } 5149 mutex_exit(&gid_list->gl_mutex); 5150 } 5151 ibdm.ibdm_busy &= ~IBDM_BUSY; 5152 cv_broadcast(&ibdm.ibdm_busy_cv); 5153 mutex_exit(&ibdm.ibdm_mutex); 5154 5155 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 5156 return (count); 5157 } 5158 5159 5160 /* 5161 * ibdm_ibnex_get_ioc_list() 5162 * Returns information about all the IOCs present on the fabric. 5163 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 5164 * Does not sweep fabric if DONOT_PROBE is set 5165 */ 5166 ibdm_ioc_info_t * 5167 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 5168 { 5169 int ii; 5170 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 5171 ibdm_dp_gidinfo_t *gid_list; 5172 ib_dm_io_unitinfo_t *iou; 5173 5174 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 5175 5176 mutex_enter(&ibdm.ibdm_mutex); 5177 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 5178 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 5179 5180 while (ibdm.ibdm_busy & IBDM_BUSY) 5181 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5182 ibdm.ibdm_busy |= IBDM_BUSY; 5183 5184 gid_list = ibdm.ibdm_dp_gidlist_head; 5185 while (gid_list) { 5186 mutex_enter(&gid_list->gl_mutex); 5187 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5188 mutex_exit(&gid_list->gl_mutex); 5189 gid_list = gid_list->gl_next; 5190 continue; 5191 } 5192 if (gid_list->gl_iou == NULL) { 5193 IBTF_DPRINTF_L2("ibdm", 5194 "\tget_ioc_list: No IOU info"); 5195 mutex_exit(&gid_list->gl_mutex); 5196 gid_list = gid_list->gl_next; 5197 continue; 5198 } 5199 iou = &gid_list->gl_iou->iou_info; 5200 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5201 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5202 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5203 tmp = ibdm_dup_ioc_info(ioc, gid_list); 5204 tmp->ioc_next = ioc_list; 5205 ioc_list = tmp; 5206 } 5207 } 5208 mutex_exit(&gid_list->gl_mutex); 5209 gid_list = gid_list->gl_next; 5210 } 5211 ibdm.ibdm_busy &= ~IBDM_BUSY; 5212 cv_broadcast(&ibdm.ibdm_busy_cv); 5213 mutex_exit(&ibdm.ibdm_mutex); 5214 5215 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 5216 return (ioc_list); 5217 } 5218 5219 /* 5220 * ibdm_dup_ioc_info() 5221 * Duplicate the IOC information and return the IOC 5222 * information. 5223 */ 5224 static ibdm_ioc_info_t * 5225 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 5226 { 5227 ibdm_ioc_info_t *out_ioc; 5228 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 5229 ASSERT(MUTEX_HELD(&gid_list->gl_mutex)); 5230 5231 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 5232 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 5233 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 5234 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 5235 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 5236 5237 return (out_ioc); 5238 } 5239 5240 5241 /* 5242 * ibdm_free_ioc_list() 5243 * Deallocate memory for IOC list structure 5244 */ 5245 void 5246 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 5247 { 5248 ibdm_ioc_info_t *temp; 5249 5250 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 5251 while (ioc) { 5252 temp = ioc; 5253 ioc = ioc->ioc_next; 5254 kmem_free(temp->ioc_gid_list, 5255 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 5256 if (temp->ioc_hca_list) 5257 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 5258 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 5259 } 5260 } 5261 5262 5263 /* 5264 * ibdm_ibnex_update_pkey_tbls 5265 * Updates the DM P_Key database. 5266 * NOTE: Two cases are handled here: P_Key being added or removed. 5267 * 5268 * Arguments : NONE 5269 * Return Values : NONE 5270 */ 5271 void 5272 ibdm_ibnex_update_pkey_tbls(void) 5273 { 5274 int h, pp, pidx; 5275 uint_t nports; 5276 uint_t size; 5277 ib_pkey_t new_pkey; 5278 ib_pkey_t *orig_pkey; 5279 ibdm_hca_list_t *hca_list; 5280 ibdm_port_attr_t *port; 5281 ibt_hca_portinfo_t *pinfop; 5282 5283 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 5284 5285 mutex_enter(&ibdm.ibdm_hl_mutex); 5286 hca_list = ibdm.ibdm_hca_list_head; 5287 5288 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 5289 5290 /* This updates P_Key Tables for all ports of this HCA */ 5291 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 5292 &nports, &size); 5293 5294 /* number of ports shouldn't have changed */ 5295 ASSERT(nports == hca_list->hl_nports); 5296 5297 for (pp = 0; pp < hca_list->hl_nports; pp++) { 5298 port = &hca_list->hl_port_attr[pp]; 5299 5300 /* 5301 * First figure out the P_Keys from IBTL. 5302 * Three things could have happened: 5303 * New P_Keys added 5304 * Existing P_Keys removed 5305 * Both of the above two 5306 * 5307 * Loop through the P_Key Indices and check if a 5308 * give P_Key_Ix matches that of the one seen by 5309 * IBDM. If they match no action is needed. 5310 * 5311 * If they don't match: 5312 * 1. if orig_pkey is invalid and new_pkey is valid 5313 * ---> add new_pkey to DM database 5314 * 2. if orig_pkey is valid and new_pkey is invalid 5315 * ---> remove orig_pkey from DM database 5316 * 3. if orig_pkey and new_pkey are both valid: 5317 * ---> remov orig_pkey from DM database 5318 * ---> add new_pkey to DM database 5319 * 4. if orig_pkey and new_pkey are both invalid: 5320 * ---> do nothing. Updated DM database. 5321 */ 5322 5323 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 5324 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 5325 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 5326 5327 /* keys match - do nothing */ 5328 if (*orig_pkey == new_pkey) 5329 continue; 5330 5331 if (IBDM_INVALID_PKEY(*orig_pkey) && 5332 !IBDM_INVALID_PKEY(new_pkey)) { 5333 /* P_Key was added */ 5334 IBTF_DPRINTF_L5("ibdm", 5335 "\tibnex_update_pkey_tbls: new " 5336 "P_Key added = 0x%x", new_pkey); 5337 *orig_pkey = new_pkey; 5338 ibdm_port_attr_ibmf_init(port, 5339 new_pkey, pp); 5340 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5341 IBDM_INVALID_PKEY(new_pkey)) { 5342 /* P_Key was removed */ 5343 IBTF_DPRINTF_L5("ibdm", 5344 "\tibnex_update_pkey_tbls: P_Key " 5345 "removed = 0x%x", *orig_pkey); 5346 *orig_pkey = new_pkey; 5347 (void) ibdm_port_attr_ibmf_fini(port, 5348 pidx); 5349 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5350 !IBDM_INVALID_PKEY(new_pkey)) { 5351 /* P_Key were replaced */ 5352 IBTF_DPRINTF_L5("ibdm", 5353 "\tibnex_update_pkey_tbls: P_Key " 5354 "replaced 0x%x with 0x%x", 5355 *orig_pkey, new_pkey); 5356 (void) ibdm_port_attr_ibmf_fini(port, 5357 pidx); 5358 *orig_pkey = new_pkey; 5359 ibdm_port_attr_ibmf_init(port, 5360 new_pkey, pp); 5361 } else { 5362 /* 5363 * P_Keys are invalid 5364 * set anyway to reflect if 5365 * INVALID_FULL was changed to 5366 * INVALID_LIMITED or vice-versa. 5367 */ 5368 *orig_pkey = new_pkey; 5369 } /* end of else */ 5370 5371 } /* loop of p_key index */ 5372 5373 } /* loop of #ports of HCA */ 5374 5375 ibt_free_portinfo(pinfop, size); 5376 hca_list = hca_list->hl_next; 5377 5378 } /* loop for all HCAs in the system */ 5379 5380 mutex_exit(&ibdm.ibdm_hl_mutex); 5381 } 5382 5383 5384 /* 5385 * ibdm_send_ioc_profile() 5386 * Send IOC Controller Profile request. When the request is completed 5387 * IBMF calls ibdm_process_incoming_mad routine to inform about 5388 * the completion. 5389 */ 5390 static int 5391 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 5392 { 5393 ibmf_msg_t *msg; 5394 ib_mad_hdr_t *hdr; 5395 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 5396 ibdm_timeout_cb_args_t *cb_args; 5397 5398 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 5399 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 5400 5401 /* 5402 * Send command to get IOC profile. 5403 * Allocate a IBMF packet and initialize the packet. 5404 */ 5405 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 5406 &msg) != IBMF_SUCCESS) { 5407 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 5408 return (IBDM_FAILURE); 5409 } 5410 5411 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 5412 ibdm_alloc_send_buffers(msg); 5413 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 5414 5415 mutex_enter(&gid_info->gl_mutex); 5416 ibdm_bump_transactionID(gid_info); 5417 mutex_exit(&gid_info->gl_mutex); 5418 5419 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 5420 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 5421 if (gid_info->gl_redirected == B_TRUE) { 5422 if (gid_info->gl_redirect_dlid != 0) { 5423 msg->im_local_addr.ia_remote_lid = 5424 gid_info->gl_redirect_dlid; 5425 } 5426 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 5427 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 5428 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 5429 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 5430 } else { 5431 msg->im_local_addr.ia_remote_qno = 1; 5432 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 5433 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 5434 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 5435 } 5436 5437 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 5438 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 5439 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 5440 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 5441 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 5442 hdr->Status = 0; 5443 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 5444 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 5445 hdr->AttributeModifier = h2b32(ioc_no + 1); 5446 5447 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5448 cb_args = &ioc_info->ioc_cb_args; 5449 cb_args->cb_gid_info = gid_info; 5450 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 5451 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 5452 cb_args->cb_ioc_num = ioc_no; 5453 5454 mutex_enter(&gid_info->gl_mutex); 5455 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 5456 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 5457 mutex_exit(&gid_info->gl_mutex); 5458 5459 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 5460 "timeout %x", ioc_info->ioc_timeout_id); 5461 5462 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 5463 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 5464 IBTF_DPRINTF_L2("ibdm", 5465 "\tsend_ioc_profile: msg transport failed"); 5466 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 5467 } 5468 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5469 return (IBDM_SUCCESS); 5470 } 5471 5472 5473 /* 5474 * ibdm_port_reachable 5475 * Returns B_TRUE if the port GID is reachable by sending 5476 * a SA query to get the NODE record for this port GUID. 5477 */ 5478 static boolean_t 5479 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid) 5480 { 5481 sa_node_record_t *resp; 5482 size_t length; 5483 5484 /* 5485 * Verify if it's reachable by getting the node record. 5486 */ 5487 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) == 5488 IBDM_SUCCESS) { 5489 kmem_free(resp, length); 5490 return (B_TRUE); 5491 } 5492 return (B_FALSE); 5493 } 5494 5495 /* 5496 * ibdm_get_node_record_by_port 5497 * Sends a SA query to get the NODE record for port GUID 5498 * Returns IBDM_SUCCESS if the port GID is reachable. 5499 * 5500 * Note: the caller must be responsible for freeing the resource 5501 * by calling kmem_free(resp, length) later. 5502 */ 5503 static int 5504 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 5505 sa_node_record_t **resp, size_t *length) 5506 { 5507 sa_node_record_t req; 5508 ibmf_saa_access_args_t args; 5509 int ret; 5510 ASSERT(resp != NULL && length != NULL); 5511 5512 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 5513 guid); 5514 5515 bzero(&req, sizeof (sa_node_record_t)); 5516 req.NodeInfo.PortGUID = guid; 5517 5518 args.sq_attr_id = SA_NODERECORD_ATTRID; 5519 args.sq_access_type = IBMF_SAA_RETRIEVE; 5520 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 5521 args.sq_template = &req; 5522 args.sq_callback = NULL; 5523 args.sq_callback_arg = NULL; 5524 5525 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp); 5526 if (ret != IBMF_SUCCESS) { 5527 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 5528 " SA Retrieve Failed: %d", ret); 5529 return (IBDM_FAILURE); 5530 } 5531 if (*resp == NULL || *length == 0) { 5532 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 5533 return (IBDM_FAILURE); 5534 } 5535 /* 5536 * There is one NodeRecord on each endport on a subnet. 5537 */ 5538 ASSERT(*length == sizeof (sa_node_record_t)); 5539 5540 return (IBDM_SUCCESS); 5541 } 5542 5543 5544 /* 5545 * Update the gidlist for all affected IOCs when GID becomes 5546 * available/unavailable. 5547 * 5548 * Parameters : 5549 * gidinfo - Incoming / Outgoing GID. 5550 * add_flag - 1 for GID added, 0 for GID removed. 5551 * - (-1) : IOC gid list updated, ioc_list required. 5552 * 5553 * This function gets the GID for the node GUID corresponding to the 5554 * port GID. Gets the IOU info 5555 */ 5556 static ibdm_ioc_info_t * 5557 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 5558 { 5559 ibdm_dp_gidinfo_t *node_gid = NULL; 5560 uint8_t niocs, ii; 5561 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 5562 5563 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 5564 5565 switch (avail_flag) { 5566 case 1 : 5567 node_gid = ibdm_check_dest_nodeguid(gid_info); 5568 break; 5569 case 0 : 5570 node_gid = ibdm_handle_gid_rm(gid_info); 5571 break; 5572 case -1 : 5573 node_gid = gid_info; 5574 break; 5575 default : 5576 break; 5577 } 5578 5579 if (node_gid == NULL) { 5580 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 5581 "No node GID found, port gid 0x%p, avail_flag %d", 5582 gid_info, avail_flag); 5583 return (NULL); 5584 } 5585 5586 mutex_enter(&node_gid->gl_mutex); 5587 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 5588 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 5589 node_gid->gl_iou == NULL) { 5590 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 5591 "gl_state %x, gl_iou %p", node_gid->gl_state, 5592 node_gid->gl_iou); 5593 mutex_exit(&node_gid->gl_mutex); 5594 return (NULL); 5595 } 5596 5597 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 5598 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 5599 niocs); 5600 for (ii = 0; ii < niocs; ii++) { 5601 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 5602 /* 5603 * Skip IOCs for which probe is not complete or 5604 * reprobe is progress 5605 */ 5606 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5607 tmp = ibdm_dup_ioc_info(ioc, node_gid); 5608 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 5609 tmp->ioc_next = ioc_list; 5610 ioc_list = tmp; 5611 } 5612 } 5613 mutex_exit(&node_gid->gl_mutex); 5614 5615 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 5616 ioc_list); 5617 return (ioc_list); 5618 } 5619 5620 /* 5621 * ibdm_saa_event_cb : 5622 * Event handling which does *not* require ibdm_hl_mutex to be 5623 * held are executed in the same thread. This is to prevent 5624 * deadlocks with HCA port down notifications which hold the 5625 * ibdm_hl_mutex. 5626 * 5627 * GID_AVAILABLE event is handled here. A taskq is spawned to 5628 * handle GID_UNAVAILABLE. 5629 * 5630 * A new mutex ibdm_ibnex_mutex has been introduced to protect 5631 * ibnex_callback. This has been done to prevent any possible 5632 * deadlock (described above) while handling GID_AVAILABLE. 5633 * 5634 * IBMF calls the event callback for a HCA port. The SA handle 5635 * for this port would be valid, till the callback returns. 5636 * IBDM calling IBDM using the above SA handle should be valid. 5637 * 5638 * IBDM will additionally check (SA handle != NULL), before 5639 * calling IBMF. 5640 */ 5641 /*ARGSUSED*/ 5642 static void 5643 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 5644 ibmf_saa_subnet_event_t ibmf_saa_event, 5645 ibmf_saa_event_details_t *event_details, void *callback_arg) 5646 { 5647 ibdm_saa_event_arg_t *event_arg; 5648 ib_gid_t sgid, dgid; 5649 ibdm_port_attr_t *hca_port; 5650 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5651 sa_node_record_t *nrec; 5652 size_t length; 5653 5654 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5655 5656 hca_port = (ibdm_port_attr_t *)callback_arg; 5657 5658 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5659 ibmf_saa_handle, ibmf_saa_event, event_details, 5660 callback_arg); 5661 #ifdef DEBUG 5662 if (ibdm_ignore_saa_event) 5663 return; 5664 #endif 5665 5666 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5667 /* 5668 * Ensure no other probe / sweep fabric is in 5669 * progress. 5670 */ 5671 mutex_enter(&ibdm.ibdm_mutex); 5672 while (ibdm.ibdm_busy & IBDM_BUSY) 5673 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5674 ibdm.ibdm_busy |= IBDM_BUSY; 5675 mutex_exit(&ibdm.ibdm_mutex); 5676 5677 /* 5678 * If we already know about this GID, return. 5679 * GID_AVAILABLE may be reported for multiple HCA 5680 * ports. 5681 */ 5682 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5683 event_details->ie_gid.gid_prefix)) != NULL) { 5684 mutex_enter(&ibdm.ibdm_mutex); 5685 ibdm.ibdm_busy &= ~IBDM_BUSY; 5686 cv_broadcast(&ibdm.ibdm_busy_cv); 5687 mutex_exit(&ibdm.ibdm_mutex); 5688 return; 5689 } 5690 5691 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5692 "Insertion notified", 5693 event_details->ie_gid.gid_prefix, 5694 event_details->ie_gid.gid_guid); 5695 5696 /* This is a new gid, insert it to GID list */ 5697 sgid.gid_prefix = hca_port->pa_sn_prefix; 5698 sgid.gid_guid = hca_port->pa_port_guid; 5699 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5700 dgid.gid_guid = event_details->ie_gid.gid_guid; 5701 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5702 if (gid_info == NULL) { 5703 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5704 "create_gid_info returned NULL"); 5705 mutex_enter(&ibdm.ibdm_mutex); 5706 ibdm.ibdm_busy &= ~IBDM_BUSY; 5707 cv_broadcast(&ibdm.ibdm_busy_cv); 5708 mutex_exit(&ibdm.ibdm_mutex); 5709 return; 5710 } 5711 mutex_enter(&gid_info->gl_mutex); 5712 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5713 mutex_exit(&gid_info->gl_mutex); 5714 5715 /* Get the node GUID */ 5716 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid, 5717 &nrec, &length) != IBDM_SUCCESS) { 5718 /* 5719 * Set the state to PROBE_NOT_DONE for the 5720 * next sweep to probe it 5721 */ 5722 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5723 "Skipping GID : port GUID not found"); 5724 mutex_enter(&gid_info->gl_mutex); 5725 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5726 mutex_exit(&gid_info->gl_mutex); 5727 mutex_enter(&ibdm.ibdm_mutex); 5728 ibdm.ibdm_busy &= ~IBDM_BUSY; 5729 cv_broadcast(&ibdm.ibdm_busy_cv); 5730 mutex_exit(&ibdm.ibdm_mutex); 5731 return; 5732 } 5733 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 5734 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 5735 kmem_free(nrec, length); 5736 gid_info->gl_portguid = dgid.gid_guid; 5737 5738 /* 5739 * Get the gid info with the same node GUID. 5740 */ 5741 mutex_enter(&ibdm.ibdm_mutex); 5742 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5743 while (node_gid_info) { 5744 if (node_gid_info->gl_nodeguid == 5745 gid_info->gl_nodeguid && 5746 node_gid_info->gl_iou != NULL) { 5747 break; 5748 } 5749 node_gid_info = node_gid_info->gl_next; 5750 } 5751 mutex_exit(&ibdm.ibdm_mutex); 5752 5753 /* 5754 * Handling a new GID requires filling of gl_hca_list. 5755 * This require ibdm hca_list to be parsed and hence 5756 * holding the ibdm_hl_mutex. Spawning a new thread to 5757 * handle this. 5758 */ 5759 if (node_gid_info == NULL) { 5760 if (taskq_dispatch(system_taskq, 5761 ibdm_saa_handle_new_gid, (void *)gid_info, 5762 TQ_NOSLEEP) == NULL) { 5763 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5764 "new_gid taskq_dispatch failed"); 5765 return; 5766 } 5767 } 5768 5769 mutex_enter(&ibdm.ibdm_mutex); 5770 ibdm.ibdm_busy &= ~IBDM_BUSY; 5771 cv_broadcast(&ibdm.ibdm_busy_cv); 5772 mutex_exit(&ibdm.ibdm_mutex); 5773 return; 5774 } 5775 5776 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5777 return; 5778 5779 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5780 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5781 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5782 event_arg->ibmf_saa_event = ibmf_saa_event; 5783 bcopy(event_details, &event_arg->event_details, 5784 sizeof (ibmf_saa_event_details_t)); 5785 event_arg->callback_arg = callback_arg; 5786 5787 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5788 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5789 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5790 "taskq_dispatch failed"); 5791 ibdm_free_saa_event_arg(event_arg); 5792 return; 5793 } 5794 } 5795 5796 /* 5797 * Handle a new GID discovered by GID_AVAILABLE saa event. 5798 */ 5799 void 5800 ibdm_saa_handle_new_gid(void *arg) 5801 { 5802 ibdm_dp_gidinfo_t *gid_info; 5803 ibdm_hca_list_t *hca_list = NULL; 5804 ibdm_port_attr_t *port = NULL; 5805 ibdm_ioc_info_t *ioc_list = NULL; 5806 5807 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5808 5809 gid_info = (ibdm_dp_gidinfo_t *)arg; 5810 5811 /* 5812 * Ensure that no other sweep / probe has completed 5813 * probing this gid. 5814 */ 5815 mutex_enter(&gid_info->gl_mutex); 5816 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5817 mutex_exit(&gid_info->gl_mutex); 5818 return; 5819 } 5820 mutex_exit(&gid_info->gl_mutex); 5821 5822 /* 5823 * Parse HCAs to fill gl_hca_list 5824 */ 5825 mutex_enter(&ibdm.ibdm_hl_mutex); 5826 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5827 ibdm_get_next_port(&hca_list, &port, 1)) { 5828 if (ibdm_port_reachable(port->pa_sa_hdl, 5829 gid_info->gl_portguid) == B_TRUE) { 5830 ibdm_addto_glhcalist(gid_info, hca_list); 5831 } 5832 } 5833 mutex_exit(&ibdm.ibdm_hl_mutex); 5834 5835 /* 5836 * Ensure no other probe / sweep fabric is in 5837 * progress. 5838 */ 5839 mutex_enter(&ibdm.ibdm_mutex); 5840 while (ibdm.ibdm_busy & IBDM_BUSY) 5841 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5842 ibdm.ibdm_busy |= IBDM_BUSY; 5843 mutex_exit(&ibdm.ibdm_mutex); 5844 5845 /* 5846 * New IOU probe it, to check if new IOCs 5847 */ 5848 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5849 "new GID : probing"); 5850 mutex_enter(&ibdm.ibdm_mutex); 5851 ibdm.ibdm_ngid_probes_in_progress++; 5852 mutex_exit(&ibdm.ibdm_mutex); 5853 mutex_enter(&gid_info->gl_mutex); 5854 gid_info->gl_reprobe_flag = 0; 5855 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5856 mutex_exit(&gid_info->gl_mutex); 5857 ibdm_probe_gid_thread((void *)gid_info); 5858 5859 mutex_enter(&ibdm.ibdm_mutex); 5860 ibdm_wait_probe_completion(); 5861 mutex_exit(&ibdm.ibdm_mutex); 5862 5863 if (gid_info->gl_iou == NULL) { 5864 mutex_enter(&ibdm.ibdm_mutex); 5865 ibdm.ibdm_busy &= ~IBDM_BUSY; 5866 cv_broadcast(&ibdm.ibdm_busy_cv); 5867 mutex_exit(&ibdm.ibdm_mutex); 5868 return; 5869 } 5870 5871 /* 5872 * Update GID list in all IOCs affected by this 5873 */ 5874 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5875 5876 /* 5877 * Pass on the IOCs with updated GIDs to IBnexus 5878 */ 5879 if (ioc_list) { 5880 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5881 if (ibdm.ibdm_ibnex_callback != NULL) { 5882 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5883 IBDM_EVENT_IOC_PROP_UPDATE); 5884 } 5885 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5886 } 5887 5888 mutex_enter(&ibdm.ibdm_mutex); 5889 ibdm.ibdm_busy &= ~IBDM_BUSY; 5890 cv_broadcast(&ibdm.ibdm_busy_cv); 5891 mutex_exit(&ibdm.ibdm_mutex); 5892 } 5893 5894 /* 5895 * ibdm_saa_event_taskq : 5896 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5897 * held. The GID_UNAVAILABLE handling is done in a taskq to 5898 * prevent deadlocks with HCA port down notifications which hold 5899 * ibdm_hl_mutex. 5900 */ 5901 void 5902 ibdm_saa_event_taskq(void *arg) 5903 { 5904 ibdm_saa_event_arg_t *event_arg; 5905 ibmf_saa_handle_t ibmf_saa_handle; 5906 ibmf_saa_subnet_event_t ibmf_saa_event; 5907 ibmf_saa_event_details_t *event_details; 5908 void *callback_arg; 5909 5910 ibdm_dp_gidinfo_t *gid_info; 5911 ibdm_port_attr_t *hca_port, *port = NULL; 5912 ibdm_hca_list_t *hca_list = NULL; 5913 int sa_handle_valid = 0; 5914 ibdm_ioc_info_t *ioc_list = NULL; 5915 5916 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5917 5918 event_arg = (ibdm_saa_event_arg_t *)arg; 5919 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5920 ibmf_saa_event = event_arg->ibmf_saa_event; 5921 event_details = &event_arg->event_details; 5922 callback_arg = event_arg->callback_arg; 5923 5924 ASSERT(callback_arg != NULL); 5925 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 5926 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 5927 ibmf_saa_handle, ibmf_saa_event, event_details, 5928 callback_arg); 5929 5930 hca_port = (ibdm_port_attr_t *)callback_arg; 5931 5932 /* Check if the port_attr is still valid */ 5933 mutex_enter(&ibdm.ibdm_hl_mutex); 5934 for (ibdm_get_next_port(&hca_list, &port, 0); port; 5935 ibdm_get_next_port(&hca_list, &port, 0)) { 5936 if (port == hca_port && port->pa_port_guid == 5937 hca_port->pa_port_guid) { 5938 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 5939 sa_handle_valid = 1; 5940 break; 5941 } 5942 } 5943 mutex_exit(&ibdm.ibdm_hl_mutex); 5944 if (sa_handle_valid == 0) { 5945 ibdm_free_saa_event_arg(event_arg); 5946 return; 5947 } 5948 5949 if (hca_port && (hca_port->pa_sa_hdl == NULL || 5950 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 5951 ibdm_free_saa_event_arg(event_arg); 5952 return; 5953 } 5954 hca_list = NULL; 5955 port = NULL; 5956 5957 /* 5958 * Check if the GID is visible to other HCA ports. 5959 * Return if so. 5960 */ 5961 mutex_enter(&ibdm.ibdm_hl_mutex); 5962 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5963 ibdm_get_next_port(&hca_list, &port, 1)) { 5964 if (ibdm_port_reachable(port->pa_sa_hdl, 5965 event_details->ie_gid.gid_guid) == B_TRUE) { 5966 mutex_exit(&ibdm.ibdm_hl_mutex); 5967 ibdm_free_saa_event_arg(event_arg); 5968 return; 5969 } 5970 } 5971 mutex_exit(&ibdm.ibdm_hl_mutex); 5972 5973 /* 5974 * Ensure no other probe / sweep fabric is in 5975 * progress. 5976 */ 5977 mutex_enter(&ibdm.ibdm_mutex); 5978 while (ibdm.ibdm_busy & IBDM_BUSY) 5979 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5980 ibdm.ibdm_busy |= IBDM_BUSY; 5981 mutex_exit(&ibdm.ibdm_mutex); 5982 5983 /* 5984 * If this GID is no longer in GID list, return 5985 * GID_UNAVAILABLE may be reported for multiple HCA 5986 * ports. 5987 */ 5988 mutex_enter(&ibdm.ibdm_mutex); 5989 gid_info = ibdm.ibdm_dp_gidlist_head; 5990 while (gid_info) { 5991 if (gid_info->gl_portguid == 5992 event_details->ie_gid.gid_guid) { 5993 break; 5994 } 5995 gid_info = gid_info->gl_next; 5996 } 5997 mutex_exit(&ibdm.ibdm_mutex); 5998 if (gid_info == NULL) { 5999 mutex_enter(&ibdm.ibdm_mutex); 6000 ibdm.ibdm_busy &= ~IBDM_BUSY; 6001 cv_broadcast(&ibdm.ibdm_busy_cv); 6002 mutex_exit(&ibdm.ibdm_mutex); 6003 ibdm_free_saa_event_arg(event_arg); 6004 return; 6005 } 6006 6007 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 6008 "Unavailable notification", 6009 event_details->ie_gid.gid_prefix, 6010 event_details->ie_gid.gid_guid); 6011 6012 /* 6013 * Update GID list in all IOCs affected by this 6014 */ 6015 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 6016 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 6017 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6018 6019 /* 6020 * Remove GID from the global GID list 6021 * Handle the case where all port GIDs for an 6022 * IOU have been hot-removed. Check both gid_info 6023 * & ioc_info for checking ngids. 6024 */ 6025 mutex_enter(&ibdm.ibdm_mutex); 6026 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6027 mutex_enter(&gid_info->gl_mutex); 6028 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6029 mutex_exit(&gid_info->gl_mutex); 6030 } 6031 if (gid_info->gl_prev != NULL) 6032 gid_info->gl_prev->gl_next = gid_info->gl_next; 6033 if (gid_info->gl_next != NULL) 6034 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6035 6036 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6037 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6038 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6039 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6040 ibdm.ibdm_ngids--; 6041 6042 ibdm.ibdm_busy &= ~IBDM_BUSY; 6043 cv_broadcast(&ibdm.ibdm_busy_cv); 6044 mutex_exit(&ibdm.ibdm_mutex); 6045 6046 /* free the hca_list on this gid_info */ 6047 ibdm_delete_glhca_list(gid_info); 6048 6049 mutex_destroy(&gid_info->gl_mutex); 6050 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6051 6052 /* 6053 * Pass on the IOCs with updated GIDs to IBnexus 6054 */ 6055 if (ioc_list) { 6056 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 6057 "IOC_PROP_UPDATE for %p\n", ioc_list); 6058 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6059 if (ibdm.ibdm_ibnex_callback != NULL) { 6060 (*ibdm.ibdm_ibnex_callback)((void *) 6061 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6062 } 6063 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6064 } 6065 6066 ibdm_free_saa_event_arg(event_arg); 6067 } 6068 6069 6070 static int 6071 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 6072 { 6073 ibdm_gid_t *scan_new, *scan_prev; 6074 int cmp_failed = 0; 6075 6076 ASSERT(new != NULL); 6077 ASSERT(prev != NULL); 6078 6079 /* 6080 * Search for each new gid anywhere in the prev GID list. 6081 * Note that the gid list could have been re-ordered. 6082 */ 6083 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 6084 for (scan_prev = prev, cmp_failed = 1; scan_prev; 6085 scan_prev = scan_prev->gid_next) { 6086 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 6087 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 6088 cmp_failed = 0; 6089 break; 6090 } 6091 } 6092 6093 if (cmp_failed) 6094 return (1); 6095 } 6096 return (0); 6097 } 6098 6099 /* 6100 * This is always called in a single thread 6101 * This function updates the gid_list and serv_list of IOC 6102 * The current gid_list is in ioc_info_t(contains only port 6103 * guids for which probe is done) & gidinfo_t(other port gids) 6104 * The gids in both locations are used for comparision. 6105 */ 6106 static void 6107 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 6108 { 6109 ibdm_gid_t *cur_gid_list; 6110 uint_t cur_nportgids; 6111 6112 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6113 6114 ioc->ioc_info_updated.ib_prop_updated = 0; 6115 6116 6117 /* Current GID list in gid_info only */ 6118 cur_gid_list = gidinfo->gl_gid; 6119 cur_nportgids = gidinfo->gl_ngids; 6120 6121 if (ioc->ioc_prev_serv_cnt != 6122 ioc->ioc_profile.ioc_service_entries || 6123 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0], 6124 ioc->ioc_prev_serv_cnt)) 6125 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 6126 6127 if (ioc->ioc_prev_nportgids != cur_nportgids || 6128 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 6129 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6130 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 6131 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6132 } 6133 6134 /* Zero out previous entries */ 6135 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 6136 if (ioc->ioc_prev_serv) 6137 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 6138 sizeof (ibdm_srvents_info_t)); 6139 ioc->ioc_prev_serv_cnt = 0; 6140 ioc->ioc_prev_nportgids = 0; 6141 ioc->ioc_prev_serv = NULL; 6142 ioc->ioc_prev_gid_list = NULL; 6143 } 6144 6145 /* 6146 * Handle GID removal. This returns gid_info of an GID for the same 6147 * node GUID, if found. For an GID with IOU information, the same 6148 * gid_info is returned if no gid_info with same node_guid is found. 6149 */ 6150 static ibdm_dp_gidinfo_t * 6151 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 6152 { 6153 ibdm_dp_gidinfo_t *gid_list; 6154 6155 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 6156 6157 if (rm_gid->gl_iou == NULL) { 6158 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 6159 /* 6160 * Search for a GID with same node_guid and 6161 * gl_iou != NULL 6162 */ 6163 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6164 gid_list = gid_list->gl_next) { 6165 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 6166 == rm_gid->gl_nodeguid)) 6167 break; 6168 } 6169 6170 if (gid_list) 6171 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6172 6173 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6174 return (gid_list); 6175 } else { 6176 /* 6177 * Search for a GID with same node_guid and 6178 * gl_iou == NULL 6179 */ 6180 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 6181 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6182 gid_list = gid_list->gl_next) { 6183 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 6184 == rm_gid->gl_nodeguid)) 6185 break; 6186 } 6187 6188 if (gid_list) { 6189 /* 6190 * Copy the following fields from rm_gid : 6191 * 1. gl_state 6192 * 2. gl_iou 6193 * 3. gl_gid & gl_ngids 6194 * 6195 * Note : Function is synchronized by 6196 * ibdm_busy flag. 6197 * 6198 * Note : Redirect info is initialized if 6199 * any MADs for the GID fail 6200 */ 6201 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 6202 "copying info to GID with gl_iou != NULl"); 6203 gid_list->gl_state = rm_gid->gl_state; 6204 gid_list->gl_iou = rm_gid->gl_iou; 6205 gid_list->gl_gid = rm_gid->gl_gid; 6206 gid_list->gl_ngids = rm_gid->gl_ngids; 6207 6208 /* Remove the GID from gl_gid list */ 6209 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6210 } else { 6211 /* 6212 * Handle a case where all GIDs to the IOU have 6213 * been removed. 6214 */ 6215 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 6216 "to IOU"); 6217 6218 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 6219 return (rm_gid); 6220 } 6221 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6222 return (gid_list); 6223 } 6224 } 6225 6226 static void 6227 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 6228 ibdm_dp_gidinfo_t *rm_gid) 6229 { 6230 ibdm_gid_t *tmp, *prev; 6231 6232 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 6233 gid_info, rm_gid); 6234 6235 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 6236 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 6237 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 6238 if (prev == NULL) 6239 gid_info->gl_gid = tmp->gid_next; 6240 else 6241 prev->gid_next = tmp->gid_next; 6242 6243 kmem_free(tmp, sizeof (ibdm_gid_t)); 6244 gid_info->gl_ngids--; 6245 break; 6246 } else { 6247 prev = tmp; 6248 tmp = tmp->gid_next; 6249 } 6250 } 6251 } 6252 6253 static void 6254 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 6255 { 6256 ibdm_gid_t *head = NULL, *new, *tail; 6257 6258 /* First copy the destination */ 6259 for (; dest; dest = dest->gid_next) { 6260 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6261 new->gid_dgid_hi = dest->gid_dgid_hi; 6262 new->gid_dgid_lo = dest->gid_dgid_lo; 6263 new->gid_next = head; 6264 head = new; 6265 } 6266 6267 /* Insert this to the source */ 6268 if (*src_ptr == NULL) 6269 *src_ptr = head; 6270 else { 6271 for (tail = *src_ptr; tail->gid_next != NULL; 6272 tail = tail->gid_next) 6273 ; 6274 6275 tail->gid_next = head; 6276 } 6277 } 6278 6279 static void 6280 ibdm_free_gid_list(ibdm_gid_t *head) 6281 { 6282 ibdm_gid_t *delete; 6283 6284 for (delete = head; delete; ) { 6285 head = delete->gid_next; 6286 kmem_free(delete, sizeof (ibdm_gid_t)); 6287 delete = head; 6288 } 6289 } 6290 6291 /* 6292 * This function rescans the DM capable GIDs (gl_state is 6293 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 6294 * basically checks if the DM capable GID is reachable. If 6295 * not this is handled the same way as GID_UNAVAILABLE, 6296 * except that notifications are not send to IBnexus. 6297 * 6298 * This function also initializes the ioc_prev_list for 6299 * a particular IOC (when called from probe_ioc, with 6300 * ioc_guidp != NULL) or all IOCs for the gid (called from 6301 * sweep_fabric, ioc_guidp == NULL). 6302 */ 6303 static void 6304 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 6305 { 6306 ibdm_dp_gidinfo_t *gid_info, *tmp; 6307 int ii, niocs, found; 6308 ibdm_hca_list_t *hca_list = NULL; 6309 ibdm_port_attr_t *port = NULL; 6310 ibdm_ioc_info_t *ioc_list; 6311 6312 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6313 found = 0; 6314 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 6315 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 6316 gid_info = gid_info->gl_next; 6317 continue; 6318 } 6319 6320 /* 6321 * Check if the GID is visible to any HCA ports. 6322 * Return if so. 6323 */ 6324 mutex_enter(&ibdm.ibdm_hl_mutex); 6325 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6326 ibdm_get_next_port(&hca_list, &port, 1)) { 6327 if (ibdm_port_reachable(port->pa_sa_hdl, 6328 gid_info->gl_dgid_lo) == B_TRUE) { 6329 found = 1; 6330 break; 6331 } 6332 } 6333 mutex_exit(&ibdm.ibdm_hl_mutex); 6334 6335 if (found) { 6336 if (gid_info->gl_iou == NULL) { 6337 gid_info = gid_info->gl_next; 6338 continue; 6339 } 6340 6341 /* Intialize the ioc_prev_gid_list */ 6342 niocs = 6343 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6344 for (ii = 0; ii < niocs; ii++) { 6345 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6346 6347 if (ioc_guidp == NULL || (*ioc_guidp == 6348 ioc_list->ioc_profile.ioc_guid)) { 6349 /* Add info of GIDs in gid_info also */ 6350 ibdm_addto_gidlist( 6351 &ioc_list->ioc_prev_gid_list, 6352 gid_info->gl_gid); 6353 ioc_list->ioc_prev_nportgids = 6354 gid_info->gl_ngids; 6355 } 6356 } 6357 gid_info = gid_info->gl_next; 6358 continue; 6359 } 6360 6361 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6362 "deleted port GUID %llx", 6363 gid_info->gl_dgid_lo); 6364 6365 /* 6366 * Update GID list in all IOCs affected by this 6367 */ 6368 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6369 6370 /* 6371 * Remove GID from the global GID list 6372 * Handle the case where all port GIDs for an 6373 * IOU have been hot-removed. 6374 */ 6375 mutex_enter(&ibdm.ibdm_mutex); 6376 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6377 mutex_enter(&gid_info->gl_mutex); 6378 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6379 mutex_exit(&gid_info->gl_mutex); 6380 } 6381 6382 tmp = gid_info->gl_next; 6383 if (gid_info->gl_prev != NULL) 6384 gid_info->gl_prev->gl_next = gid_info->gl_next; 6385 if (gid_info->gl_next != NULL) 6386 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6387 6388 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6389 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6390 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6391 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6392 ibdm.ibdm_ngids--; 6393 mutex_exit(&ibdm.ibdm_mutex); 6394 6395 /* free the hca_list on this gid_info */ 6396 ibdm_delete_glhca_list(gid_info); 6397 6398 mutex_destroy(&gid_info->gl_mutex); 6399 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6400 6401 gid_info = tmp; 6402 6403 /* 6404 * Pass on the IOCs with updated GIDs to IBnexus 6405 */ 6406 if (ioc_list) { 6407 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6408 "IOC_PROP_UPDATE for %p\n", ioc_list); 6409 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6410 if (ibdm.ibdm_ibnex_callback != NULL) { 6411 (*ibdm.ibdm_ibnex_callback)((void *) 6412 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6413 } 6414 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6415 } 6416 } 6417 } 6418 6419 /* 6420 * This function notifies IBnex of IOCs on this GID. 6421 * Notification is for GIDs with gl_reprobe_flag set. 6422 * The flag is set when IOC probe / fabric sweep 6423 * probes a GID starting from CLASS port info. 6424 * 6425 * IBnexus will have information of a reconnected IOC 6426 * if it had probed it before. If this is a new IOC, 6427 * IBnexus ignores the notification. 6428 * 6429 * This function should be called with no locks held. 6430 */ 6431 static void 6432 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 6433 { 6434 ibdm_ioc_info_t *ioc_list; 6435 6436 if (gid_info->gl_reprobe_flag == 0 || 6437 gid_info->gl_iou == NULL) 6438 return; 6439 6440 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 6441 6442 /* 6443 * Pass on the IOCs with updated GIDs to IBnexus 6444 */ 6445 if (ioc_list) { 6446 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6447 if (ibdm.ibdm_ibnex_callback != NULL) { 6448 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 6449 IBDM_EVENT_IOC_PROP_UPDATE); 6450 } 6451 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6452 } 6453 } 6454 6455 6456 static void 6457 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 6458 { 6459 if (arg != NULL) 6460 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 6461 } 6462 6463 /* 6464 * This function parses the list of HCAs and HCA ports 6465 * to return the port_attr of the next HCA port. A port 6466 * connected to IB fabric (port_state active) is returned, 6467 * if connected_flag is set. 6468 */ 6469 static void 6470 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 6471 ibdm_port_attr_t **inp_portp, int connect_flag) 6472 { 6473 int ii; 6474 ibdm_port_attr_t *port, *next_port = NULL; 6475 ibdm_port_attr_t *inp_port; 6476 ibdm_hca_list_t *hca_list; 6477 int found = 0; 6478 6479 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6480 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 6481 inp_hcap, inp_portp, connect_flag); 6482 6483 hca_list = *inp_hcap; 6484 inp_port = *inp_portp; 6485 6486 if (hca_list == NULL) 6487 hca_list = ibdm.ibdm_hca_list_head; 6488 6489 for (; hca_list; hca_list = hca_list->hl_next) { 6490 for (ii = 0; ii < hca_list->hl_nports; ii++) { 6491 port = &hca_list->hl_port_attr[ii]; 6492 6493 /* 6494 * inp_port != NULL; 6495 * Skip till we find the matching port 6496 */ 6497 if (inp_port && !found) { 6498 if (inp_port == port) 6499 found = 1; 6500 continue; 6501 } 6502 6503 if (!connect_flag) { 6504 next_port = port; 6505 break; 6506 } 6507 6508 if (port->pa_sa_hdl == NULL) 6509 ibdm_initialize_port(port); 6510 if (port->pa_sa_hdl == NULL) 6511 (void) ibdm_fini_port(port); 6512 else if (next_port == NULL && 6513 port->pa_sa_hdl != NULL && 6514 port->pa_state == IBT_PORT_ACTIVE) { 6515 next_port = port; 6516 break; 6517 } 6518 } 6519 6520 if (next_port) 6521 break; 6522 } 6523 6524 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 6525 "returns hca_list %p port %p", hca_list, next_port); 6526 *inp_hcap = hca_list; 6527 *inp_portp = next_port; 6528 } 6529 6530 static void 6531 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 6532 { 6533 ibdm_gid_t *tmp; 6534 6535 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6536 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 6537 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 6538 6539 mutex_enter(&nodegid->gl_mutex); 6540 tmp->gid_next = nodegid->gl_gid; 6541 nodegid->gl_gid = tmp; 6542 nodegid->gl_ngids++; 6543 mutex_exit(&nodegid->gl_mutex); 6544 } 6545 6546 static void 6547 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 6548 ibdm_hca_list_t *hca) 6549 { 6550 ibdm_hca_list_t *head, *prev = NULL, *temp; 6551 6552 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 6553 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 6554 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6555 6556 mutex_enter(&gid_info->gl_mutex); 6557 head = gid_info->gl_hca_list; 6558 if (head == NULL) { 6559 head = ibdm_dup_hca_attr(hca); 6560 head->hl_next = NULL; 6561 gid_info->gl_hca_list = head; 6562 mutex_exit(&gid_info->gl_mutex); 6563 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6564 "gid %p, gl_hca_list %p", gid_info, 6565 gid_info->gl_hca_list); 6566 return; 6567 } 6568 6569 /* Check if already in the list */ 6570 while (head) { 6571 if (head->hl_hca_guid == hca->hl_hca_guid) { 6572 mutex_exit(&gid_info->gl_mutex); 6573 IBTF_DPRINTF_L4(ibdm_string, 6574 "\taddto_glhcalist : gid %p hca %p dup", 6575 gid_info, hca); 6576 return; 6577 } 6578 prev = head; 6579 head = head->hl_next; 6580 } 6581 6582 /* Add this HCA to gl_hca_list */ 6583 temp = ibdm_dup_hca_attr(hca); 6584 temp->hl_next = NULL; 6585 prev->hl_next = temp; 6586 mutex_exit(&gid_info->gl_mutex); 6587 6588 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6589 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 6590 } 6591 6592 static void 6593 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 6594 { 6595 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6596 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6597 6598 mutex_enter(&gid_info->gl_mutex); 6599 if (gid_info->gl_hca_list) 6600 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 6601 gid_info->gl_hca_list = NULL; 6602 mutex_exit(&gid_info->gl_mutex); 6603 } 6604 6605 6606 static void 6607 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 6608 { 6609 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 6610 port_sa_hdl); 6611 6612 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6613 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6614 6615 /* Check : Not busy in another probe / sweep */ 6616 mutex_enter(&ibdm.ibdm_mutex); 6617 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 6618 ibdm_dp_gidinfo_t *gid_info; 6619 6620 ibdm.ibdm_busy |= IBDM_BUSY; 6621 mutex_exit(&ibdm.ibdm_mutex); 6622 6623 /* 6624 * Check if any GID is using the SA & IBMF handle 6625 * of HCA port going down. Reset ibdm_dp_gidinfo_t 6626 * using another HCA port which can reach the GID. 6627 * This is for DM capable GIDs only, no need to do 6628 * this for others 6629 * 6630 * Delete the GID if no alternate HCA port to reach 6631 * it is found. 6632 */ 6633 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6634 ibdm_dp_gidinfo_t *tmp; 6635 6636 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 6637 "checking gidinfo %p", gid_info); 6638 6639 if (gid_info->gl_sa_hdl == port_sa_hdl) { 6640 IBTF_DPRINTF_L3(ibdm_string, 6641 "\tevent_hdlr: down HCA port hdl " 6642 "matches gid %p", gid_info); 6643 6644 /* 6645 * The non-DM GIDs can come back 6646 * with a new subnet prefix, when 6647 * the HCA port commes up again. To 6648 * avoid issues, delete non-DM 6649 * capable GIDs, if the gid was 6650 * discovered using the HCA port 6651 * going down. This is ensured by 6652 * setting gl_disconnected to 1. 6653 */ 6654 if (gid_info->gl_nodeguid == 0) 6655 gid_info->gl_disconnected = 1; 6656 else 6657 ibdm_reset_gidinfo(gid_info); 6658 6659 if (gid_info->gl_disconnected) { 6660 IBTF_DPRINTF_L3(ibdm_string, 6661 "\tevent_hdlr: deleting" 6662 " gid %p", gid_info); 6663 tmp = gid_info; 6664 gid_info = gid_info->gl_next; 6665 ibdm_delete_gidinfo(tmp); 6666 } else 6667 gid_info = gid_info->gl_next; 6668 } else 6669 gid_info = gid_info->gl_next; 6670 } 6671 6672 mutex_enter(&ibdm.ibdm_mutex); 6673 ibdm.ibdm_busy &= ~IBDM_BUSY; 6674 cv_signal(&ibdm.ibdm_busy_cv); 6675 } 6676 mutex_exit(&ibdm.ibdm_mutex); 6677 } 6678 6679 static void 6680 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6681 { 6682 ibdm_hca_list_t *hca_list = NULL; 6683 ibdm_port_attr_t *port = NULL; 6684 int gid_reinited = 0; 6685 sa_node_record_t *nr, *tmp; 6686 sa_portinfo_record_t *pi; 6687 size_t nr_len = 0, pi_len = 0; 6688 size_t path_len; 6689 ib_gid_t sgid, dgid; 6690 int ret, ii, nrecords; 6691 sa_path_record_t *path; 6692 uint8_t npaths = 1; 6693 ibdm_pkey_tbl_t *pkey_tbl; 6694 6695 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6696 6697 /* 6698 * Get list of all the ports reachable from the local known HCA 6699 * ports which are active 6700 */ 6701 mutex_enter(&ibdm.ibdm_hl_mutex); 6702 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6703 ibdm_get_next_port(&hca_list, &port, 1)) { 6704 6705 6706 /* 6707 * Get the path and re-populate the gidinfo. 6708 * Getting the path is the same probe_ioc 6709 * Init the gid info as in ibdm_create_gidinfo() 6710 */ 6711 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6712 gidinfo->gl_nodeguid); 6713 if (nr == NULL) { 6714 IBTF_DPRINTF_L4(ibdm_string, 6715 "\treset_gidinfo : no records"); 6716 continue; 6717 } 6718 6719 nrecords = (nr_len / sizeof (sa_node_record_t)); 6720 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6721 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6722 break; 6723 } 6724 6725 if (ii == nrecords) { 6726 IBTF_DPRINTF_L4(ibdm_string, 6727 "\treset_gidinfo : no record for portguid"); 6728 kmem_free(nr, nr_len); 6729 continue; 6730 } 6731 6732 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6733 if (pi == NULL) { 6734 IBTF_DPRINTF_L4(ibdm_string, 6735 "\treset_gidinfo : no portinfo"); 6736 kmem_free(nr, nr_len); 6737 continue; 6738 } 6739 6740 sgid.gid_prefix = port->pa_sn_prefix; 6741 sgid.gid_guid = port->pa_port_guid; 6742 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6743 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6744 6745 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6746 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6747 6748 if ((ret != IBMF_SUCCESS) || path == NULL) { 6749 IBTF_DPRINTF_L4(ibdm_string, 6750 "\treset_gidinfo : no paths"); 6751 kmem_free(pi, pi_len); 6752 kmem_free(nr, nr_len); 6753 continue; 6754 } 6755 6756 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6757 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6758 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6759 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6760 gidinfo->gl_p_key = path->P_Key; 6761 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6762 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6763 gidinfo->gl_slid = path->SLID; 6764 gidinfo->gl_dlid = path->DLID; 6765 /* Reset redirect info, next MAD will set if redirected */ 6766 gidinfo->gl_redirected = 0; 6767 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID; 6768 gidinfo->gl_SL = path->SL; 6769 6770 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 6771 for (ii = 0; ii < port->pa_npkeys; ii++) { 6772 if (port->pa_pkey_tbl == NULL) 6773 break; 6774 6775 pkey_tbl = &port->pa_pkey_tbl[ii]; 6776 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) && 6777 (pkey_tbl->pt_qp_hdl != NULL)) { 6778 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 6779 break; 6780 } 6781 } 6782 6783 if (gidinfo->gl_qp_hdl == NULL) 6784 IBTF_DPRINTF_L2(ibdm_string, 6785 "\treset_gid_info: No matching Pkey"); 6786 else 6787 gid_reinited = 1; 6788 6789 kmem_free(path, path_len); 6790 kmem_free(pi, pi_len); 6791 kmem_free(nr, nr_len); 6792 break; 6793 } 6794 mutex_exit(&ibdm.ibdm_hl_mutex); 6795 6796 if (!gid_reinited) 6797 gidinfo->gl_disconnected = 1; 6798 } 6799 6800 static void 6801 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6802 { 6803 ibdm_ioc_info_t *ioc_list; 6804 int in_gidlist = 0; 6805 6806 /* 6807 * Check if gidinfo has been inserted into the 6808 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6809 * != NULL, if gidinfo is the list. 6810 */ 6811 if (gidinfo->gl_prev != NULL || 6812 gidinfo->gl_next != NULL || 6813 ibdm.ibdm_dp_gidlist_head == gidinfo) 6814 in_gidlist = 1; 6815 6816 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6817 6818 /* 6819 * Remove GID from the global GID list 6820 * Handle the case where all port GIDs for an 6821 * IOU have been hot-removed. 6822 */ 6823 mutex_enter(&ibdm.ibdm_mutex); 6824 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6825 mutex_enter(&gidinfo->gl_mutex); 6826 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou); 6827 mutex_exit(&gidinfo->gl_mutex); 6828 } 6829 6830 /* Delete gl_hca_list */ 6831 mutex_exit(&ibdm.ibdm_mutex); 6832 ibdm_delete_glhca_list(gidinfo); 6833 mutex_enter(&ibdm.ibdm_mutex); 6834 6835 if (in_gidlist) { 6836 if (gidinfo->gl_prev != NULL) 6837 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6838 if (gidinfo->gl_next != NULL) 6839 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6840 6841 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6842 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6843 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6844 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6845 ibdm.ibdm_ngids--; 6846 } 6847 mutex_exit(&ibdm.ibdm_mutex); 6848 6849 mutex_destroy(&gidinfo->gl_mutex); 6850 cv_destroy(&gidinfo->gl_probe_cv); 6851 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6852 6853 /* 6854 * Pass on the IOCs with updated GIDs to IBnexus 6855 */ 6856 if (ioc_list) { 6857 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6858 "IOC_PROP_UPDATE for %p\n", ioc_list); 6859 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6860 if (ibdm.ibdm_ibnex_callback != NULL) { 6861 (*ibdm.ibdm_ibnex_callback)((void *) 6862 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6863 } 6864 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6865 } 6866 } 6867 6868 6869 static void 6870 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6871 { 6872 uint32_t attr_mod; 6873 6874 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6875 attr_mod |= cb_args->cb_srvents_start; 6876 attr_mod |= (cb_args->cb_srvents_end) << 8; 6877 hdr->AttributeModifier = h2b32(attr_mod); 6878 } 6879 6880 static void 6881 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6882 { 6883 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 6884 gid_info->gl_transactionID++; 6885 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 6886 IBTF_DPRINTF_L4(ibdm_string, 6887 "\tbump_transactionID(%p), wrapup", gid_info); 6888 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 6889 } 6890 } 6891 6892 /* 6893 * gl_prev_iou is set for *non-reprobe* sweeep requests, which 6894 * detected that ChangeID in IOU info has changed. The service 6895 * entry also may have changed. Check if service entry in IOC 6896 * has changed wrt the prev iou, if so notify to IB Nexus. 6897 */ 6898 static ibdm_ioc_info_t * 6899 ibdm_handle_prev_iou() 6900 { 6901 ibdm_dp_gidinfo_t *gid_info; 6902 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list; 6903 ibdm_ioc_info_t *prev_ioc, *ioc; 6904 int ii, jj, niocs, prev_niocs; 6905 6906 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6907 6908 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter"); 6909 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 6910 gid_info = gid_info->gl_next) { 6911 if (gid_info->gl_prev_iou == NULL) 6912 continue; 6913 6914 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p", 6915 gid_info); 6916 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6917 prev_niocs = 6918 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots; 6919 for (ii = 0; ii < niocs; ii++) { 6920 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6921 6922 /* Find matching IOC */ 6923 for (jj = 0; jj < prev_niocs; jj++) { 6924 prev_ioc = (ibdm_ioc_info_t *) 6925 &gid_info->gl_prev_iou->iou_ioc_info[jj]; 6926 if (prev_ioc->ioc_profile.ioc_guid == 6927 ioc->ioc_profile.ioc_guid) 6928 break; 6929 } 6930 if (jj == prev_niocs) 6931 prev_ioc = NULL; 6932 if (ioc == NULL || prev_ioc == NULL) 6933 continue; 6934 if ((ioc->ioc_profile.ioc_service_entries != 6935 prev_ioc->ioc_profile.ioc_service_entries) || 6936 ibdm_serv_cmp(&ioc->ioc_serv[0], 6937 &prev_ioc->ioc_serv[0], 6938 ioc->ioc_profile.ioc_service_entries) != 0) { 6939 IBTF_DPRINTF_L4(ibdm_string, 6940 "/thandle_prev_iou modified IOC: " 6941 "current ioc %p, old ioc %p", 6942 ioc, prev_ioc); 6943 mutex_enter(&gid_info->gl_mutex); 6944 ioc_list = ibdm_dup_ioc_info(ioc, gid_info); 6945 mutex_exit(&gid_info->gl_mutex); 6946 ioc_list->ioc_info_updated.ib_prop_updated 6947 = 0; 6948 ioc_list->ioc_info_updated.ib_srv_prop_updated 6949 = 1; 6950 6951 if (ioc_list_head == NULL) 6952 ioc_list_head = ioc_list; 6953 else { 6954 ioc_list_head->ioc_next = ioc_list; 6955 ioc_list_head = ioc_list; 6956 } 6957 } 6958 } 6959 6960 mutex_enter(&gid_info->gl_mutex); 6961 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou); 6962 mutex_exit(&gid_info->gl_mutex); 6963 } 6964 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p", 6965 ioc_list_head); 6966 return (ioc_list_head); 6967 } 6968 6969 /* 6970 * Compares two service entries lists, returns 0 if same, returns 1 6971 * if no match. 6972 */ 6973 static int 6974 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2, 6975 int nserv) 6976 { 6977 int ii; 6978 6979 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter"); 6980 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) { 6981 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id || 6982 bcmp(serv1->se_attr.srv_name, 6983 serv2->se_attr.srv_name, 6984 IB_DM_MAX_SVC_NAME_LEN) != 0) { 6985 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1"); 6986 return (1); 6987 } 6988 } 6989 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0"); 6990 return (0); 6991 } 6992 6993 /* For debugging purpose only */ 6994 #ifdef DEBUG 6995 void 6996 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr) 6997 { 6998 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 6999 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 7000 7001 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 7002 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 7003 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 7004 "\tR Method : 0x%x", 7005 mad_hdr->ClassVersion, mad_hdr->R_Method); 7006 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 7007 "\tTransaction ID : 0x%llx", 7008 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID)); 7009 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 7010 "\tAttribute Modified : 0x%lx", 7011 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier)); 7012 } 7013 7014 7015 void 7016 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 7017 { 7018 ib_mad_hdr_t *mad_hdr; 7019 7020 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 7021 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 7022 7023 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 7024 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 7025 ibmf_msg->im_local_addr.ia_remote_lid, 7026 ibmf_msg->im_local_addr.ia_remote_qno); 7027 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x" 7028 " SL : 0x%x", ibmf_msg->im_local_addr.ia_p_key, 7029 ibmf_msg->im_local_addr.ia_q_key, 7030 ibmf_msg->im_local_addr.ia_service_level); 7031 7032 if (flag) 7033 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 7034 else 7035 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 7036 7037 ibdm_dump_mad_hdr(mad_hdr); 7038 } 7039 7040 7041 void 7042 ibdm_dump_path_info(sa_path_record_t *path) 7043 { 7044 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 7045 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 7046 7047 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 7048 path->DGID.gid_prefix, path->DGID.gid_guid); 7049 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 7050 path->SGID.gid_prefix, path->SGID.gid_guid); 7051 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\t\tDlID : %x", 7052 path->SLID, path->DLID); 7053 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x\t\tSL : %x", 7054 path->P_Key, path->SL); 7055 } 7056 7057 7058 void 7059 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo) 7060 { 7061 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 7062 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 7063 7064 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 7065 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 7066 7067 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx", 7068 b2h64(classportinfo->RedirectGID_hi)); 7069 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx", 7070 b2h64(classportinfo->RedirectGID_lo)); 7071 IBTF_DPRINTF_L4("ibdm", "\t Redirected TC : 0x%x", 7072 classportinfo->RedirectTC); 7073 IBTF_DPRINTF_L4("ibdm", "\t Redirected SL : 0x%x", 7074 classportinfo->RedirectSL); 7075 IBTF_DPRINTF_L4("ibdm", "\t Redirected FL : 0x%x", 7076 classportinfo->RedirectFL); 7077 IBTF_DPRINTF_L4("ibdm", "\t Redirected LID : 0x%x", 7078 b2h16(classportinfo->RedirectLID)); 7079 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 7080 b2h16(classportinfo->RedirectP_Key)); 7081 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 7082 classportinfo->RedirectQP); 7083 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 7084 b2h32(classportinfo->RedirectQ_Key)); 7085 IBTF_DPRINTF_L4("ibdm", "\t Trap GID hi : 0x%llx", 7086 b2h64(classportinfo->TrapGID_hi)); 7087 IBTF_DPRINTF_L4("ibdm", "\t Trap GID lo : 0x%llx", 7088 b2h64(classportinfo->TrapGID_lo)); 7089 IBTF_DPRINTF_L4("ibdm", "\t Trap TC : 0x%x", 7090 classportinfo->TrapTC); 7091 IBTF_DPRINTF_L4("ibdm", "\t Trap SL : 0x%x", 7092 classportinfo->TrapSL); 7093 IBTF_DPRINTF_L4("ibdm", "\t Trap FL : 0x%x", 7094 classportinfo->TrapFL); 7095 IBTF_DPRINTF_L4("ibdm", "\t Trap LID : 0x%x", 7096 b2h16(classportinfo->TrapLID)); 7097 IBTF_DPRINTF_L4("ibdm", "\t Trap P_Key : 0x%x", 7098 b2h16(classportinfo->TrapP_Key)); 7099 IBTF_DPRINTF_L4("ibdm", "\t Trap HL : 0x%x", 7100 classportinfo->TrapHL); 7101 IBTF_DPRINTF_L4("ibdm", "\t Trap QP : 0x%x", 7102 classportinfo->TrapQP); 7103 IBTF_DPRINTF_L4("ibdm", "\t Trap Q_Key : 0x%x", 7104 b2h32(classportinfo->TrapQ_Key)); 7105 } 7106 7107 7108 void 7109 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 7110 { 7111 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 7112 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 7113 7114 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 7115 b2h16(iou_info->iou_changeid)); 7116 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 7117 iou_info->iou_num_ctrl_slots); 7118 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 7119 iou_info->iou_flag); 7120 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 7121 iou_info->iou_ctrl_list[0]); 7122 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 7123 iou_info->iou_ctrl_list[1]); 7124 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 7125 iou_info->iou_ctrl_list[2]); 7126 } 7127 7128 7129 void 7130 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 7131 { 7132 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 7133 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 7134 7135 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 7136 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 7137 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 7138 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 7139 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 7140 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 7141 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 7142 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 7143 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 7144 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 7145 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 7146 ioc->ioc_rdma_read_qdepth); 7147 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 7148 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 7149 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 7150 ioc->ioc_ctrl_opcap_mask); 7151 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 7152 } 7153 7154 7155 void 7156 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 7157 { 7158 IBTF_DPRINTF_L4("ibdm", 7159 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 7160 7161 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 7162 "Service Name : %s", srv_ents->srv_name); 7163 } 7164 7165 int ibdm_allow_sweep_fabric_timestamp = 1; 7166 7167 void 7168 ibdm_dump_sweep_fabric_timestamp(int flag) 7169 { 7170 static hrtime_t x; 7171 if (flag) { 7172 if (ibdm_allow_sweep_fabric_timestamp) { 7173 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 7174 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 7175 } 7176 x = 0; 7177 } else 7178 x = gethrtime(); 7179 } 7180 #endif 7181